diff options
Diffstat (limited to 'apps/couch/src')
55 files changed, 17533 insertions, 0 deletions
diff --git a/apps/couch/src/couch.app.src b/apps/couch/src/couch.app.src new file mode 100644 index 00000000..2e25d55a --- /dev/null +++ b/apps/couch/src/couch.app.src @@ -0,0 +1,25 @@ +{application, couch, [ + {description, "Apache CouchDB"}, + {vsn, "1.0.1"}, + {registered, [ + couch_config, + couch_db_update, + couch_db_update_notifier_sup, + couch_external_manager, + couch_httpd, + couch_log, + couch_primary_services, + couch_query_servers, + couch_rep_sup, + couch_secondary_services, + couch_server, + couch_server_sup, + couch_stats_aggregator, + couch_stats_collector, + couch_task_status, + couch_view + ]}, + {mod, {couch_app, []}}, + {applications, [kernel, stdlib, crypto, sasl, inets, oauth, ibrowse, + mochiweb, ssl]} +]}. diff --git a/apps/couch/src/couch.erl b/apps/couch/src/couch.erl new file mode 100644 index 00000000..956e9489 --- /dev/null +++ b/apps/couch/src/couch.erl @@ -0,0 +1,39 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch). + +-compile(export_all). + +start() -> + application:start(couch). + +stop() -> + application:stop(couch). + +restart() -> + case stop() of + ok -> + start(); + {error, {not_started,couch}} -> + start(); + {error, Reason} -> + {error, Reason} + end. + +reload() -> + case supervisor:terminate_child(couch_server_sup, couch_config) of + ok -> + supervisor:restart_child(couch_server_sup, couch_config); + {error, Reason} -> + {error, Reason} + end. diff --git a/apps/couch/src/couch_app.erl b/apps/couch/src/couch_app.erl new file mode 100644 index 00000000..232953d9 --- /dev/null +++ b/apps/couch/src/couch_app.erl @@ -0,0 +1,56 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_app). + +-behaviour(application). + +-include("couch_db.hrl"). + +-export([start/2, stop/1]). + +start(_Type, DefaultIniFiles) -> + IniFiles = get_ini_files(DefaultIniFiles), + case start_apps([crypto, public_key, sasl, inets, oauth, ssl, ibrowse, mochiweb]) of + ok -> + couch_server_sup:start_link(IniFiles); + {error, Reason} -> + {error, Reason} + end. + +stop(_) -> + ok. + +get_ini_files(Default) -> + case init:get_argument(couch_ini) of + error -> + Default; + {ok, [[]]} -> + Default; + {ok, [Values]} -> + Values + end. + +start_apps([]) -> + ok; +start_apps([App|Rest]) -> + case application:start(App) of + ok -> + start_apps(Rest); + {error, {already_started, App}} -> + start_apps(Rest); + {error, _Reason} when App =:= public_key -> + % ignore on R12B5 + start_apps(Rest); + {error, _Reason} -> + {error, {app_would_not_start, App}} + end. diff --git a/apps/couch/src/couch_auth_cache.erl b/apps/couch/src/couch_auth_cache.erl new file mode 100644 index 00000000..078bfcc1 --- /dev/null +++ b/apps/couch/src/couch_auth_cache.erl @@ -0,0 +1,410 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_auth_cache). +-behaviour(gen_server). + +% public API +-export([get_user_creds/1]). + +% gen_server API +-export([start_link/0, init/1, handle_call/3, handle_info/2, handle_cast/2]). +-export([code_change/3, terminate/2]). + +-include("couch_db.hrl"). +-include("couch_js_functions.hrl"). + +-define(STATE, auth_state_ets). +-define(BY_USER, auth_by_user_ets). +-define(BY_ATIME, auth_by_atime_ets). + +-record(state, { + max_cache_size = 0, + cache_size = 0, + db_notifier = nil +}). + + +-spec get_user_creds(UserName::string() | binary()) -> + Credentials::list() | nil. + +get_user_creds(UserName) when is_list(UserName) -> + get_user_creds(?l2b(UserName)); + +get_user_creds(UserName) -> + UserCreds = case couch_config:get("admins", ?b2l(UserName)) of + "-hashed-" ++ HashedPwdAndSalt -> + % the name is an admin, now check to see if there is a user doc + % which has a matching name, salt, and password_sha + [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","), + case get_from_cache(UserName) of + nil -> + [{<<"roles">>, [<<"_admin">>]}, + {<<"salt">>, ?l2b(Salt)}, + {<<"password_sha">>, ?l2b(HashedPwd)}]; + UserProps when is_list(UserProps) -> + DocRoles = couch_util:get_value(<<"roles">>, UserProps), + [{<<"roles">>, [<<"_admin">> | DocRoles]}, + {<<"salt">>, ?l2b(Salt)}, + {<<"password_sha">>, ?l2b(HashedPwd)}] + end; + _Else -> + get_from_cache(UserName) + end, + validate_user_creds(UserCreds). + + +get_from_cache(UserName) -> + exec_if_auth_db( + fun(_AuthDb) -> + maybe_refresh_cache(), + case ets:lookup(?BY_USER, UserName) of + [] -> + gen_server:call(?MODULE, {fetch, UserName}, infinity); + [{UserName, {Credentials, _ATime}}] -> + couch_stats_collector:increment({couchdb, auth_cache_hits}), + gen_server:cast(?MODULE, {cache_hit, UserName}), + Credentials + end + end, + nil + ). + + +validate_user_creds(nil) -> + nil; +validate_user_creds(UserCreds) -> + case couch_util:get_value(<<"_conflicts">>, UserCreds) of + undefined -> + ok; + _ConflictList -> + throw({unauthorized, + <<"User document conflicts must be resolved before the document", + " is used for authentication purposes.">> + }) + end, + UserCreds. + + +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + + +init(_) -> + ?STATE = ets:new(?STATE, [set, protected, named_table]), + ?BY_USER = ets:new(?BY_USER, [set, protected, named_table]), + ?BY_ATIME = ets:new(?BY_ATIME, [ordered_set, private, named_table]), + AuthDbName = couch_config:get("couch_httpd_auth", "authentication_db"), + true = ets:insert(?STATE, {auth_db_name, ?l2b(AuthDbName)}), + true = ets:insert(?STATE, {auth_db, open_auth_db()}), + process_flag(trap_exit, true), + ok = couch_config:register( + fun("couch_httpd_auth", "auth_cache_size", SizeList) -> + Size = list_to_integer(SizeList), + ok = gen_server:call(?MODULE, {new_max_cache_size, Size}, infinity) + end + ), + ok = couch_config:register( + fun("couch_httpd_auth", "authentication_db", DbName) -> + ok = gen_server:call(?MODULE, {new_auth_db, ?l2b(DbName)}, infinity) + end + ), + {ok, Notifier} = couch_db_update_notifier:start_link(fun handle_db_event/1), + State = #state{ + db_notifier = Notifier, + max_cache_size = list_to_integer( + couch_config:get("couch_httpd_auth", "auth_cache_size", "50") + ) + }, + {ok, State}. + + +handle_db_event({Event, DbName}) -> + [{auth_db_name, AuthDbName}] = ets:lookup(?STATE, auth_db_name), + case DbName =:= AuthDbName of + true -> + case Event of + deleted -> gen_server:call(?MODULE, auth_db_deleted, infinity); + created -> gen_server:call(?MODULE, auth_db_created, infinity); + _Else -> ok + end; + false -> + ok + end. + + +handle_call({new_auth_db, AuthDbName}, _From, State) -> + NewState = clear_cache(State), + true = ets:insert(?STATE, {auth_db_name, AuthDbName}), + true = ets:insert(?STATE, {auth_db, open_auth_db()}), + {reply, ok, NewState}; + +handle_call(auth_db_deleted, _From, State) -> + NewState = clear_cache(State), + true = ets:insert(?STATE, {auth_db, nil}), + {reply, ok, NewState}; + +handle_call(auth_db_created, _From, State) -> + NewState = clear_cache(State), + true = ets:insert(?STATE, {auth_db, open_auth_db()}), + {reply, ok, NewState}; + +handle_call({new_max_cache_size, NewSize}, _From, State) -> + case NewSize >= State#state.cache_size of + true -> + ok; + false -> + lists:foreach( + fun(_) -> + LruTime = ets:last(?BY_ATIME), + [{LruTime, UserName}] = ets:lookup(?BY_ATIME, LruTime), + true = ets:delete(?BY_ATIME, LruTime), + true = ets:delete(?BY_USER, UserName) + end, + lists:seq(1, State#state.cache_size - NewSize) + ) + end, + NewState = State#state{ + max_cache_size = NewSize, + cache_size = erlang:min(NewSize, State#state.cache_size) + }, + {reply, ok, NewState}; + +handle_call({fetch, UserName}, _From, State) -> + {Credentials, NewState} = case ets:lookup(?BY_USER, UserName) of + [{UserName, {Creds, ATime}}] -> + couch_stats_collector:increment({couchdb, auth_cache_hits}), + cache_hit(UserName, Creds, ATime), + {Creds, State}; + [] -> + couch_stats_collector:increment({couchdb, auth_cache_misses}), + Creds = get_user_props_from_db(UserName), + State1 = add_cache_entry(UserName, Creds, erlang:now(), State), + {Creds, State1} + end, + {reply, Credentials, NewState}; + +handle_call(refresh, _From, State) -> + exec_if_auth_db(fun refresh_entries/1), + {reply, ok, State}. + + +handle_cast({cache_hit, UserName}, State) -> + case ets:lookup(?BY_USER, UserName) of + [{UserName, {Credentials, ATime}}] -> + cache_hit(UserName, Credentials, ATime); + _ -> + ok + end, + {noreply, State}. + + +handle_info(_Msg, State) -> + {noreply, State}. + + +terminate(_Reason, #state{db_notifier = Notifier}) -> + couch_db_update_notifier:stop(Notifier), + exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end), + true = ets:delete(?BY_USER), + true = ets:delete(?BY_ATIME), + true = ets:delete(?STATE). + + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + + +clear_cache(State) -> + exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end), + true = ets:delete_all_objects(?BY_USER), + true = ets:delete_all_objects(?BY_ATIME), + State#state{cache_size = 0}. + + +add_cache_entry(UserName, Credentials, ATime, State) -> + case State#state.cache_size >= State#state.max_cache_size of + true -> + free_mru_cache_entry(); + false -> + ok + end, + true = ets:insert(?BY_ATIME, {ATime, UserName}), + true = ets:insert(?BY_USER, {UserName, {Credentials, ATime}}), + State#state{cache_size = couch_util:get_value(size, ets:info(?BY_USER))}. + + +free_mru_cache_entry() -> + case ets:last(?BY_ATIME) of + '$end_of_table' -> + ok; % empty cache + LruTime -> + [{LruTime, UserName}] = ets:lookup(?BY_ATIME, LruTime), + true = ets:delete(?BY_ATIME, LruTime), + true = ets:delete(?BY_USER, UserName) + end. + + +cache_hit(UserName, Credentials, ATime) -> + NewATime = erlang:now(), + true = ets:delete(?BY_ATIME, ATime), + true = ets:insert(?BY_ATIME, {NewATime, UserName}), + true = ets:insert(?BY_USER, {UserName, {Credentials, NewATime}}). + + +refresh_entries(AuthDb) -> + case reopen_auth_db(AuthDb) of + nil -> + ok; + AuthDb2 -> + case AuthDb2#db.update_seq > AuthDb#db.update_seq of + true -> + {ok, _, _} = couch_db:enum_docs_since( + AuthDb2, + AuthDb#db.update_seq, + fun(DocInfo, _, _) -> refresh_entry(AuthDb2, DocInfo) end, + AuthDb#db.update_seq, + [] + ), + true = ets:insert(?STATE, {auth_db, AuthDb2}); + false -> + ok + end + end. + + +refresh_entry(Db, #doc_info{high_seq = DocSeq} = DocInfo) -> + case is_user_doc(DocInfo) of + {true, UserName} -> + case ets:lookup(?BY_USER, UserName) of + [] -> + ok; + [{UserName, {_OldCreds, ATime}}] -> + {ok, Doc} = couch_db:open_doc(Db, DocInfo, [conflicts, deleted]), + NewCreds = user_creds(Doc), + true = ets:insert(?BY_USER, {UserName, {NewCreds, ATime}}) + end; + false -> + ok + end, + {ok, DocSeq}. + + +user_creds(#doc{deleted = true}) -> + nil; +user_creds(#doc{} = Doc) -> + {Creds} = couch_query_servers:json_doc(Doc), + Creds. + + +is_user_doc(#doc_info{id = <<"org.couchdb.user:", UserName/binary>>}) -> + {true, UserName}; +is_user_doc(_) -> + false. + + +maybe_refresh_cache() -> + case cache_needs_refresh() of + true -> + ok = gen_server:call(?MODULE, refresh, infinity); + false -> + ok + end. + + +cache_needs_refresh() -> + exec_if_auth_db( + fun(AuthDb) -> + case reopen_auth_db(AuthDb) of + nil -> + false; + AuthDb2 -> + AuthDb2#db.update_seq > AuthDb#db.update_seq + end + end, + false + ). + + +reopen_auth_db(AuthDb) -> + case (catch gen_server:call(AuthDb#db.main_pid, get_db, infinity)) of + {ok, AuthDb2} -> + AuthDb2; + _ -> + nil + end. + + +exec_if_auth_db(Fun) -> + exec_if_auth_db(Fun, ok). + +exec_if_auth_db(Fun, DefRes) -> + case ets:lookup(?STATE, auth_db) of + [{auth_db, #db{} = AuthDb}] -> + Fun(AuthDb); + _ -> + DefRes + end. + + +open_auth_db() -> + [{auth_db_name, DbName}] = ets:lookup(?STATE, auth_db_name), + {ok, AuthDb} = ensure_users_db_exists(DbName, [sys_db]), + AuthDb. + + +get_user_props_from_db(UserName) -> + exec_if_auth_db( + fun(AuthDb) -> + Db = reopen_auth_db(AuthDb), + DocId = <<"org.couchdb.user:", UserName/binary>>, + try + {ok, Doc} = couch_db:open_doc(Db, DocId, [conflicts]), + {DocProps} = couch_query_servers:json_doc(Doc), + DocProps + catch + _:_Error -> + nil + end + end, + nil + ). + +ensure_users_db_exists(DbName, Options) -> + Options1 = [{user_ctx, #user_ctx{roles=[<<"_admin">>]}} | Options], + case couch_db:open(DbName, Options1) of + {ok, Db} -> + ensure_auth_ddoc_exists(Db, <<"_design/_auth">>), + {ok, Db}; + _Error -> + {ok, Db} = couch_db:create(DbName, Options1), + ok = ensure_auth_ddoc_exists(Db, <<"_design/_auth">>), + {ok, Db} + end. + +ensure_auth_ddoc_exists(Db, DDocId) -> + case couch_db:open_doc(Db, DDocId) of + {not_found, _Reason} -> + {ok, AuthDesign} = auth_design_doc(DDocId), + {ok, _Rev} = couch_db:update_doc(Db, AuthDesign, []); + _ -> + ok + end, + ok. + +auth_design_doc(DocId) -> + DocProps = [ + {<<"_id">>, DocId}, + {<<"language">>,<<"javascript">>}, + {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION} + ], + {ok, couch_doc:from_json_obj({DocProps})}. diff --git a/apps/couch/src/couch_btree.erl b/apps/couch/src/couch_btree.erl new file mode 100644 index 00000000..0e47bac7 --- /dev/null +++ b/apps/couch/src/couch_btree.erl @@ -0,0 +1,676 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_btree). + +-export([open/2, open/3, query_modify/4, add/2, add_remove/3]). +-export([fold/4, full_reduce/1, final_reduce/2, foldl/3, foldl/4]). +-export([fold_reduce/4, lookup/2, get_state/1, set_options/2]). + +-define(CHUNK_THRESHOLD, 16#4ff). + +-record(btree, + {fd, + root, + extract_kv = fun({Key, Value}) -> {Key, Value} end, + assemble_kv = fun(Key, Value) -> {Key, Value} end, + less = fun(A, B) -> A < B end, + reduce = nil + }). + +extract(#btree{extract_kv=Extract}, Value) -> + Extract(Value). + +assemble(#btree{assemble_kv=Assemble}, Key, Value) -> + Assemble(Key, Value). + +less(#btree{less=Less}, A, B) -> + Less(A, B). + +% pass in 'nil' for State if a new Btree. +open(State, Fd) -> + {ok, #btree{root=State, fd=Fd}}. + +set_options(Bt, []) -> + Bt; +set_options(Bt, [{split, Extract}|Rest]) -> + set_options(Bt#btree{extract_kv=Extract}, Rest); +set_options(Bt, [{join, Assemble}|Rest]) -> + set_options(Bt#btree{assemble_kv=Assemble}, Rest); +set_options(Bt, [{less, Less}|Rest]) -> + set_options(Bt#btree{less=Less}, Rest); +set_options(Bt, [{reduce, Reduce}|Rest]) -> + set_options(Bt#btree{reduce=Reduce}, Rest). + +open(State, Fd, Options) -> + {ok, set_options(#btree{root=State, fd=Fd}, Options)}. + +get_state(#btree{root=Root}) -> + Root. + +final_reduce(#btree{reduce=Reduce}, Val) -> + final_reduce(Reduce, Val); +final_reduce(Reduce, {[], []}) -> + Reduce(reduce, []); +final_reduce(_Bt, {[], [Red]}) -> + Red; +final_reduce(Reduce, {[], Reductions}) -> + Reduce(rereduce, Reductions); +final_reduce(Reduce, {KVs, Reductions}) -> + Red = Reduce(reduce, KVs), + final_reduce(Reduce, {[], [Red | Reductions]}). + +fold_reduce(#btree{root=Root}=Bt, Fun, Acc, Options) -> + Dir = couch_util:get_value(dir, Options, fwd), + StartKey = couch_util:get_value(start_key, Options), + EndKey = couch_util:get_value(end_key, Options), + KeyGroupFun = couch_util:get_value(key_group_fun, Options, fun(_,_) -> true end), + {StartKey2, EndKey2} = + case Dir of + rev -> {EndKey, StartKey}; + fwd -> {StartKey, EndKey} + end, + try + {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} = + reduce_stream_node(Bt, Dir, Root, StartKey2, EndKey2, undefined, [], [], + KeyGroupFun, Fun, Acc), + if GroupedKey2 == undefined -> + {ok, Acc2}; + true -> + case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of + {ok, Acc3} -> {ok, Acc3}; + {stop, Acc3} -> {ok, Acc3} + end + end + catch + throw:{stop, AccDone} -> {ok, AccDone} + end. + +full_reduce(#btree{root=nil,reduce=Reduce}) -> + {ok, Reduce(reduce, [])}; +full_reduce(#btree{root={_P, Red}}) -> + {ok, Red}. + +% wraps a 2 arity function with the proper 3 arity function +convert_fun_arity(Fun) when is_function(Fun, 2) -> + fun(KV, _Reds, AccIn) -> Fun(KV, AccIn) end; +convert_fun_arity(Fun) when is_function(Fun, 3) -> + Fun. % Already arity 3 + +make_key_in_end_range_function(#btree{less=Less}, fwd, Options) -> + case couch_util:get_value(end_key_gt, Options) of + undefined -> + case couch_util:get_value(end_key, Options) of + undefined -> + fun(_Key) -> true end; + LastKey -> + fun(Key) -> not Less(LastKey, Key) end + end; + EndKey -> + fun(Key) -> Less(Key, EndKey) end + end; +make_key_in_end_range_function(#btree{less=Less}, rev, Options) -> + case couch_util:get_value(end_key_gt, Options) of + undefined -> + case couch_util:get_value(end_key, Options) of + undefined -> + fun(_Key) -> true end; + LastKey -> + fun(Key) -> not Less(Key, LastKey) end + end; + EndKey -> + fun(Key) -> Less(EndKey, Key) end + end. + + +foldl(Bt, Fun, Acc) -> + fold(Bt, Fun, Acc, []). + +foldl(Bt, Fun, Acc, Options) -> + fold(Bt, Fun, Acc, Options). + + +fold(#btree{root=nil}, _Fun, Acc, _Options) -> + {ok, {[], []}, Acc}; +fold(#btree{root=Root}=Bt, Fun, Acc, Options) -> + Dir = couch_util:get_value(dir, Options, fwd), + InRange = make_key_in_end_range_function(Bt, Dir, Options), + Result = + case couch_util:get_value(start_key, Options) of + undefined -> + stream_node(Bt, [], Bt#btree.root, InRange, Dir, + convert_fun_arity(Fun), Acc); + StartKey -> + stream_node(Bt, [], Bt#btree.root, StartKey, InRange, Dir, + convert_fun_arity(Fun), Acc) + end, + case Result of + {ok, Acc2}-> + {_P, FullReduction} = Root, + {ok, {[], [FullReduction]}, Acc2}; + {stop, LastReduction, Acc2} -> + {ok, LastReduction, Acc2} + end. + +add(Bt, InsertKeyValues) -> + add_remove(Bt, InsertKeyValues, []). + +add_remove(Bt, InsertKeyValues, RemoveKeys) -> + {ok, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys), + {ok, Bt2}. + +query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) -> + #btree{root=Root} = Bt, + InsertActions = lists:map( + fun(KeyValue) -> + {Key, Value} = extract(Bt, KeyValue), + {insert, Key, Value} + end, InsertValues), + RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys], + FetchActions = [{fetch, Key, nil} || Key <- LookupKeys], + SortFun = + fun({OpA, A, _}, {OpB, B, _}) -> + case A == B of + % A and B are equal, sort by op. + true -> op_order(OpA) < op_order(OpB); + false -> + less(Bt, A, B) + end + end, + Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])), + {ok, KeyPointers, QueryResults, Bt2} = modify_node(Bt, Root, Actions, []), + {ok, NewRoot, Bt3} = complete_root(Bt2, KeyPointers), + {ok, QueryResults, Bt3#btree{root=NewRoot}}. + +% for ordering different operatations with the same key. +% fetch < remove < insert +op_order(fetch) -> 1; +op_order(remove) -> 2; +op_order(insert) -> 3. + +lookup(#btree{root=Root, less=Less}=Bt, Keys) -> + SortedKeys = lists:sort(Less, Keys), + {ok, SortedResults} = lookup(Bt, Root, SortedKeys), + % We want to return the results in the same order as the keys were input + % but we may have changed the order when we sorted. So we need to put the + % order back into the results. + couch_util:reorder_results(Keys, SortedResults). + +lookup(_Bt, nil, Keys) -> + {ok, [{Key, not_found} || Key <- Keys]}; +lookup(Bt, {Pointer, _Reds}, Keys) -> + {NodeType, NodeList} = get_node(Bt, Pointer), + case NodeType of + kp_node -> + lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []); + kv_node -> + lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, []) + end. + +lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) -> + {ok, lists:reverse(Output)}; +lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound -> + {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])}; +lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Output) -> + N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), FirstLookupKey), + {Key, PointerInfo} = element(N, NodeTuple), + SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end, + case lists:splitwith(SplitFun, LookupKeys) of + {[], GreaterQueries} -> + lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output); + {LessEqQueries, GreaterQueries} -> + {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries), + lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output)) + end. + + +lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) -> + {ok, lists:reverse(Output)}; +lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound -> + % keys not found + {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])}; +lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) -> + N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey), + {Key, Value} = element(N, NodeTuple), + case less(Bt, LookupKey, Key) of + true -> + % LookupKey is less than Key + lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]); + false -> + case less(Bt, Key, LookupKey) of + true -> + % LookupKey is greater than Key + lookup_kvnode(Bt, NodeTuple, N+1, RestLookupKeys, [{LookupKey, not_found} | Output]); + false -> + % LookupKey is equal to Key + lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output]) + end + end. + + +complete_root(Bt, []) -> + {ok, nil, Bt}; +complete_root(Bt, [{_Key, PointerInfo}])-> + {ok, PointerInfo, Bt}; +complete_root(Bt, KPs) -> + {ok, ResultKeyPointers, Bt2} = write_node(Bt, kp_node, KPs), + complete_root(Bt2, ResultKeyPointers). + +%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%% +% It is inaccurate as it does not account for compression when blocks are +% written. Plus with the "case byte_size(term_to_binary(InList)) of" code +% it's probably really inefficient. + +chunkify(InList) -> + case byte_size(term_to_binary(InList)) of + Size when Size > ?CHUNK_THRESHOLD -> + NumberOfChunksLikely = ((Size div ?CHUNK_THRESHOLD) + 1), + ChunkThreshold = Size div NumberOfChunksLikely, + chunkify(InList, ChunkThreshold, [], 0, []); + _Else -> + [InList] + end. + +chunkify([], _ChunkThreshold, [], 0, OutputChunks) -> + lists:reverse(OutputChunks); +chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) -> + lists:reverse([lists:reverse(OutList) | OutputChunks]); +chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) -> + case byte_size(term_to_binary(InElement)) of + Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] -> + chunkify(RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]); + Size -> + chunkify(RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks) + end. + +modify_node(Bt, RootPointerInfo, Actions, QueryOutput) -> + case RootPointerInfo of + nil -> + NodeType = kv_node, + NodeList = []; + {Pointer, _Reds} -> + {NodeType, NodeList} = get_node(Bt, Pointer) + end, + NodeTuple = list_to_tuple(NodeList), + + {ok, NewNodeList, QueryOutput2, Bt2} = + case NodeType of + kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput); + kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput) + end, + case NewNodeList of + [] -> % no nodes remain + {ok, [], QueryOutput2, Bt2}; + NodeList -> % nothing changed + {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple), + {ok, [{LastKey, RootPointerInfo}], QueryOutput2, Bt2}; + _Else2 -> + {ok, ResultList, Bt3} = write_node(Bt2, NodeType, NewNodeList), + {ok, ResultList, QueryOutput2, Bt3} + end. + +reduce_node(#btree{reduce=nil}, _NodeType, _NodeList) -> + []; +reduce_node(#btree{reduce=R}, kp_node, NodeList) -> + R(rereduce, [Red || {_K, {_P, Red}} <- NodeList]); +reduce_node(#btree{reduce=R}=Bt, kv_node, NodeList) -> + R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]). + + +get_node(#btree{fd = Fd}, NodePos) -> + {ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos), + {NodeType, NodeList}. + +write_node(Bt, NodeType, NodeList) -> + % split up nodes into smaller sizes + NodeListList = chunkify(NodeList), + % now write out each chunk and return the KeyPointer pairs for those nodes + ResultList = [ + begin + {ok, Pointer} = couch_file:append_term(Bt#btree.fd, {NodeType, ANodeList}), + {LastKey, _} = lists:last(ANodeList), + {LastKey, {Pointer, reduce_node(Bt, NodeType, ANodeList)}} + end + || + ANodeList <- NodeListList + ], + {ok, ResultList, Bt}. + +modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) -> + modify_node(Bt, nil, Actions, QueryOutput); +modify_kpnode(Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) -> + {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, + tuple_size(NodeTuple), [])), QueryOutput, Bt}; +modify_kpnode(Bt, NodeTuple, LowerBound, + [{_, FirstActionKey, _}|_]=Actions, ResultNode, QueryOutput) -> + Sz = tuple_size(NodeTuple), + N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey), + case N =:= Sz of + true -> + % perform remaining actions on last node + {_, PointerInfo} = element(Sz, NodeTuple), + {ok, ChildKPs, QueryOutput2, Bt2} = + modify_node(Bt, PointerInfo, Actions, QueryOutput), + NodeList = lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, + Sz - 1, ChildKPs)), + {ok, NodeList, QueryOutput2, Bt2}; + false -> + {NodeKey, PointerInfo} = element(N, NodeTuple), + SplitFun = fun({_ActionType, ActionKey, _ActionValue}) -> + not less(Bt, NodeKey, ActionKey) + end, + {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions), + {ok, ChildKPs, QueryOutput2, Bt2} = + modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput), + ResultNode2 = lists:reverse(ChildKPs, bounded_tuple_to_revlist(NodeTuple, + LowerBound, N - 1, ResultNode)), + modify_kpnode(Bt2, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2) + end. + +bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End -> + Tail; +bounded_tuple_to_revlist(Tuple, Start, End, Tail) -> + bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]). + +bounded_tuple_to_list(Tuple, Start, End, Tail) -> + bounded_tuple_to_list2(Tuple, Start, End, [], Tail). + +bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End -> + lists:reverse(Acc, Tail); +bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) -> + bounded_tuple_to_list2(Tuple, Start + 1, End, [element(Start, Tuple) | Acc], Tail). + +find_first_gteq(_Bt, _Tuple, Start, End, _Key) when Start == End -> + End; +find_first_gteq(Bt, Tuple, Start, End, Key) -> + Mid = Start + ((End - Start) div 2), + {TupleKey, _} = element(Mid, Tuple), + case less(Bt, TupleKey, Key) of + true -> + find_first_gteq(Bt, Tuple, Mid+1, End, Key); + false -> + find_first_gteq(Bt, Tuple, Start, Mid, Key) + end. + +modify_kvnode(Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) -> + {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])), QueryOutput, Bt}; +modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) when LowerBound > tuple_size(NodeTuple) -> + case ActionType of + insert -> + modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput); + remove -> + % just drop the action + modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput); + fetch -> + % the key/value must not exist in the tree + modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput]) + end; +modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], AccNode, QueryOutput) -> + N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey), + {Key, Value} = element(N, NodeTuple), + ResultNode = bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode), + case less(Bt, ActionKey, Key) of + true -> + case ActionType of + insert -> + % ActionKey is less than the Key, so insert + modify_kvnode(Bt, NodeTuple, N, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput); + remove -> + % ActionKey is less than the Key, just drop the action + modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput); + fetch -> + % ActionKey is less than the Key, the key/value must not exist in the tree + modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput]) + end; + false -> + % ActionKey and Key are maybe equal. + case less(Bt, Key, ActionKey) of + false -> + case ActionType of + insert -> + modify_kvnode(Bt, NodeTuple, N+1, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput); + remove -> + modify_kvnode(Bt, NodeTuple, N+1, RestActions, ResultNode, QueryOutput); + fetch -> + % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node + % since an identical action key can follow it. + modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput]) + end; + true -> + modify_kvnode(Bt, NodeTuple, N + 1, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput) + end + end. + + +reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _KeyEnd, GroupedKey, GroupedKVsAcc, + GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) -> + {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey}; +reduce_stream_node(Bt, Dir, {P, _R}, KeyStart, KeyEnd, GroupedKey, GroupedKVsAcc, + GroupedRedsAcc, KeyGroupFun, Fun, Acc) -> + case get_node(Bt, P) of + {kp_node, NodeList} -> + reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, KeyEnd, GroupedKey, + GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc); + {kv_node, KVs} -> + reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, KeyEnd, GroupedKey, + GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) + end. + +reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, KeyEnd, + GroupedKey, GroupedKVsAcc, GroupedRedsAcc, + KeyGroupFun, Fun, Acc) -> + + GTEKeyStartKVs = + case KeyStart of + undefined -> + KVs; + _ -> + lists:dropwhile(fun({Key,_}) -> less(Bt, Key, KeyStart) end, KVs) + end, + KVs2 = + case KeyEnd of + undefined -> + GTEKeyStartKVs; + _ -> + lists:takewhile( + fun({Key,_}) -> + not less(Bt, KeyEnd, Key) + end, GTEKeyStartKVs) + end, + reduce_stream_kv_node2(Bt, adjust_dir(Dir, KVs2), GroupedKey, GroupedKVsAcc, GroupedRedsAcc, + KeyGroupFun, Fun, Acc). + + +reduce_stream_kv_node2(_Bt, [], GroupedKey, GroupedKVsAcc, GroupedRedsAcc, + _KeyGroupFun, _Fun, Acc) -> + {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey}; +reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc, + GroupedRedsAcc, KeyGroupFun, Fun, Acc) -> + case GroupedKey of + undefined -> + reduce_stream_kv_node2(Bt, RestKVs, Key, + [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc); + _ -> + + case KeyGroupFun(GroupedKey, Key) of + true -> + reduce_stream_kv_node2(Bt, RestKVs, GroupedKey, + [assemble(Bt,Key,Value)|GroupedKVsAcc], GroupedRedsAcc, KeyGroupFun, + Fun, Acc); + false -> + case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of + {ok, Acc2} -> + reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)], + [], KeyGroupFun, Fun, Acc2); + {stop, Acc2} -> + throw({stop, Acc2}) + end + end + end. + +reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, KeyEnd, + GroupedKey, GroupedKVsAcc, GroupedRedsAcc, + KeyGroupFun, Fun, Acc) -> + Nodes = + case KeyStart of + undefined -> + NodeList; + _ -> + lists:dropwhile( + fun({Key,_}) -> + less(Bt, Key, KeyStart) + end, NodeList) + end, + NodesInRange = + case KeyEnd of + undefined -> + Nodes; + _ -> + {InRange, MaybeInRange} = lists:splitwith( + fun({Key,_}) -> + less(Bt, Key, KeyEnd) + end, Nodes), + InRange ++ case MaybeInRange of [] -> []; [FirstMaybe|_] -> [FirstMaybe] end + end, + reduce_stream_kp_node2(Bt, Dir, adjust_dir(Dir, NodesInRange), KeyStart, KeyEnd, + GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc). + + +reduce_stream_kp_node2(Bt, Dir, [{_Key, NodeInfo} | RestNodeList], KeyStart, KeyEnd, + undefined, [], [], KeyGroupFun, Fun, Acc) -> + {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} = + reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, KeyEnd, undefined, + [], [], KeyGroupFun, Fun, Acc), + reduce_stream_kp_node2(Bt, Dir, RestNodeList, KeyStart, KeyEnd, GroupedKey2, + GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2); +reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, KeyEnd, + GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) -> + {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) -> + KeyGroupFun(GroupedKey, Key) end, NodeList), + {GroupedNodes, UngroupedNodes} = + case Grouped0 of + [] -> + {Grouped0, Ungrouped0}; + _ -> + [FirstGrouped | RestGrouped] = lists:reverse(Grouped0), + {RestGrouped, [FirstGrouped | Ungrouped0]} + end, + GroupedReds = [R || {_, {_,R}} <- GroupedNodes], + case UngroupedNodes of + [{_Key, NodeInfo}|RestNodes] -> + {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} = + reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, KeyEnd, GroupedKey, + GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc), + reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, KeyEnd, GroupedKey2, + GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2); + [] -> + {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey} + end. + +adjust_dir(fwd, List) -> + List; +adjust_dir(rev, List) -> + lists:reverse(List). + +stream_node(Bt, Reds, {Pointer, _Reds}, StartKey, InRange, Dir, Fun, Acc) -> + {NodeType, NodeList} = get_node(Bt, Pointer), + case NodeType of + kp_node -> + stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc); + kv_node -> + stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc) + end. + +stream_node(Bt, Reds, {Pointer, _Reds}, InRange, Dir, Fun, Acc) -> + {NodeType, NodeList} = get_node(Bt, Pointer), + case NodeType of + kp_node -> + stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc); + kv_node -> + stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc) + end. + +stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) -> + {ok, Acc}; +stream_kp_node(Bt, Reds, [{_Key, {Pointer, Red}} | Rest], InRange, Dir, Fun, Acc) -> + case stream_node(Bt, Reds, {Pointer, Red}, InRange, Dir, Fun, Acc) of + {ok, Acc2} -> + stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2); + {stop, LastReds, Acc2} -> + {stop, LastReds, Acc2} + end. + +drop_nodes(_Bt, Reds, _StartKey, []) -> + {Reds, []}; +drop_nodes(Bt, Reds, StartKey, [{NodeKey, {Pointer, Red}} | RestKPs]) -> + case less(Bt, NodeKey, StartKey) of + true -> drop_nodes(Bt, [Red | Reds], StartKey, RestKPs); + false -> {Reds, [{NodeKey, {Pointer, Red}} | RestKPs]} + end. + +stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) -> + {NewReds, NodesToStream} = + case Dir of + fwd -> + % drop all nodes sorting before the key + drop_nodes(Bt, Reds, StartKey, KPs); + rev -> + % keep all nodes sorting before the key, AND the first node to sort after + RevKPs = lists:reverse(KPs), + case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of + {_RevsBefore, []} -> + % everything sorts before it + {Reds, KPs}; + {RevBefore, [FirstAfter | Drop]} -> + {[Red || {_K,{_P,Red}} <- Drop] ++ Reds, + [FirstAfter | lists:reverse(RevBefore)]} + end + end, + case NodesToStream of + [] -> + {ok, Acc}; + [{_Key, {Pointer, Red}} | Rest] -> + case stream_node(Bt, NewReds, {Pointer, Red}, StartKey, InRange, Dir, Fun, Acc) of + {ok, Acc2} -> + stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2); + {stop, LastReds, Acc2} -> + {stop, LastReds, Acc2} + end + end. + +stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) -> + DropFun = + case Dir of + fwd -> + fun({Key, _}) -> less(Bt, Key, StartKey) end; + rev -> + fun({Key, _}) -> less(Bt, StartKey, Key) end + end, + {LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs), + AssembleLTKVs = [assemble(Bt,K,V) || {K,V} <- LTKVs], + stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc). + +stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) -> + {ok, Acc}; +stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) -> + case InRange(K) of + false -> + {stop, {PrevKVs, Reds}, Acc}; + true -> + AssembledKV = assemble(Bt, K, V), + case Fun(AssembledKV, {PrevKVs, Reds}, Acc) of + {ok, Acc2} -> + stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2); + {stop, Acc2} -> + {stop, {PrevKVs, Reds}, Acc2} + end + end. diff --git a/apps/couch/src/couch_changes.erl b/apps/couch/src/couch_changes.erl new file mode 100644 index 00000000..3a5bc4f8 --- /dev/null +++ b/apps/couch/src/couch_changes.erl @@ -0,0 +1,270 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_changes). +-include("couch_db.hrl"). + +-export([handle_changes/3]). + +%% @type Req -> #httpd{} | {json_req, JsonObj()} +handle_changes(#changes_args{style=Style}=Args1, Req, Db) -> + Args = Args1#changes_args{filter= + make_filter_fun(Args1#changes_args.filter, Style, Req, Db)}, + StartSeq = case Args#changes_args.dir of + rev -> + couch_db:get_update_seq(Db); + fwd -> + Args#changes_args.since + end, + if Args#changes_args.feed == "continuous" orelse + Args#changes_args.feed == "longpoll" -> + fun(Callback) -> + Self = self(), + {ok, Notify} = couch_db_update_notifier:start_link( + fun({_, DbName}) when DbName == Db#db.name -> + Self ! db_updated; + (_) -> + ok + end + ), + start_sending_changes(Callback, Args#changes_args.feed), + {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback), + try + keep_sending_changes( + Args, + Callback, + Db, + StartSeq, + <<"">>, + Timeout, + TimeoutFun + ) + after + couch_db_update_notifier:stop(Notify), + get_rest_db_updated() % clean out any remaining update messages + end + end; + true -> + fun(Callback) -> + start_sending_changes(Callback, Args#changes_args.feed), + {ok, {_, LastSeq, _Prepend, _, _, _, _, _}} = + send_changes( + Args#changes_args{feed="normal"}, + Callback, + Db, + StartSeq, + <<"">> + ), + end_sending_changes(Callback, LastSeq, Args#changes_args.feed) + end + end. + +%% @type Req -> #httpd{} | {json_req, JsonObj()} +make_filter_fun(FilterName, Style, Req, Db) -> + case [list_to_binary(couch_httpd:unquote(Part)) + || Part <- string:tokens(FilterName, "/")] of + [] -> + fun(#doc_info{revs=[#rev_info{rev=Rev}|_]=Revs}) -> + case Style of + main_only -> + [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}]; + all_docs -> + [{[{<<"rev">>, couch_doc:rev_to_str(R)}]} + || #rev_info{rev=R} <- Revs] + end + end; + [DName, FName] -> + DesignId = <<"_design/", DName/binary>>, + DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, []), + % validate that the ddoc has the filter fun + #doc{body={Props}} = DDoc, + couch_util:get_nested_json_value({Props}, [<<"filters">>, FName]), + fun(DocInfo) -> + DocInfos = + case Style of + main_only -> + [DocInfo]; + all_docs -> + [DocInfo#doc_info{revs=[Rev]}|| Rev <- DocInfo#doc_info.revs] + end, + Docs = [Doc || {ok, Doc} <- [ + couch_db:open_doc(Db, DocInfo2, [deleted, conflicts]) + || DocInfo2 <- DocInfos]], + {ok, Passes} = couch_query_servers:filter_docs( + Req, Db, DDoc, FName, Docs + ), + [{[{<<"rev">>, couch_doc:rev_to_str({RevPos,RevId})}]} + || {Pass, #doc{revs={RevPos,[RevId|_]}}} + <- lists:zip(Passes, Docs), Pass == true] + end; + _Else -> + throw({bad_request, + "filter parameter must be of the form `designname/filtername`"}) + end. + +get_changes_timeout(Args, Callback) -> + #changes_args{ + heartbeat = Heartbeat, + timeout = Timeout, + feed = ResponseType + } = Args, + DefaultTimeout = list_to_integer( + couch_config:get("httpd", "changes_timeout", "60000") + ), + case Heartbeat of + undefined -> + case Timeout of + undefined -> + {DefaultTimeout, fun() -> stop end}; + infinity -> + {infinity, fun() -> stop end}; + _ -> + {lists:min([DefaultTimeout, Timeout]), fun() -> stop end} + end; + true -> + {DefaultTimeout, fun() -> Callback(timeout, ResponseType), ok end}; + _ -> + {lists:min([DefaultTimeout, Heartbeat]), + fun() -> Callback(timeout, ResponseType), ok end} + end. + +start_sending_changes(_Callback, "continuous") -> + ok; +start_sending_changes(Callback, ResponseType) -> + Callback(start, ResponseType). + +send_changes(Args, Callback, Db, StartSeq, Prepend) -> + #changes_args{ + style = Style, + include_docs = IncludeDocs, + limit = Limit, + feed = ResponseType, + dir = Dir, + filter = FilterFun + } = Args, + couch_db:changes_since( + Db, + Style, + StartSeq, + fun changes_enumerator/2, + [{dir, Dir}], + {Db, StartSeq, Prepend, FilterFun, Callback, ResponseType, Limit, + IncludeDocs} + ). + +keep_sending_changes(Args, Callback, Db, StartSeq, Prepend, Timeout, + TimeoutFun) -> + #changes_args{ + feed = ResponseType, + limit = Limit + } = Args, + % ?LOG_INFO("send_changes start ~p",[StartSeq]), + {ok, {_, EndSeq, Prepend2, _, _, _, NewLimit, _}} = send_changes( + Args#changes_args{dir=fwd}, Callback, Db, StartSeq, Prepend + ), + % ?LOG_INFO("send_changes last ~p",[EndSeq]), + couch_db:close(Db), + if Limit > NewLimit, ResponseType == "longpoll" -> + end_sending_changes(Callback, EndSeq, ResponseType); + true -> + case wait_db_updated(Timeout, TimeoutFun) of + updated -> + % ?LOG_INFO("wait_db_updated updated ~p",[{Db#db.name, EndSeq}]), + case couch_db:open(Db#db.name, [{user_ctx, Db#db.user_ctx}]) of + {ok, Db2} -> + keep_sending_changes( + Args#changes_args{limit=NewLimit}, + Callback, + Db2, + EndSeq, + Prepend2, + Timeout, + TimeoutFun + ); + _Else -> + end_sending_changes(Callback, EndSeq, ResponseType) + end; + stop -> + % ?LOG_INFO("wait_db_updated stop ~p",[{Db#db.name, EndSeq}]), + end_sending_changes(Callback, EndSeq, ResponseType) + end + end. + +end_sending_changes(Callback, EndSeq, ResponseType) -> + Callback({stop, EndSeq}, ResponseType). + +changes_enumerator(DocInfo, {Db, _, _, FilterFun, Callback, "continuous", + Limit, IncludeDocs}) -> + + #doc_info{id=Id, high_seq=Seq, + revs=[#rev_info{deleted=Del,rev=Rev}|_]} = DocInfo, + Results0 = FilterFun(DocInfo), + Results = [Result || Result <- Results0, Result /= null], + Go = if Limit =< 1 -> stop; true -> ok end, + case Results of + [] -> + {Go, {Db, Seq, nil, FilterFun, Callback, "continuous", Limit, + IncludeDocs} + }; + _ -> + ChangesRow = changes_row(Db, Seq, Id, Del, Results, Rev, IncludeDocs), + Callback({change, ChangesRow, <<"">>}, "continuous"), + {Go, {Db, Seq, nil, FilterFun, Callback, "continuous", Limit - 1, + IncludeDocs} + } + end; +changes_enumerator(DocInfo, {Db, _, Prepend, FilterFun, Callback, ResponseType, + Limit, IncludeDocs}) -> + + #doc_info{id=Id, high_seq=Seq, revs=[#rev_info{deleted=Del,rev=Rev}|_]} + = DocInfo, + Results0 = FilterFun(DocInfo), + Results = [Result || Result <- Results0, Result /= null], + Go = if Limit =< 1 -> stop; true -> ok end, + case Results of + [] -> + {Go, {Db, Seq, Prepend, FilterFun, Callback, ResponseType, Limit, + IncludeDocs} + }; + _ -> + ChangesRow = changes_row(Db, Seq, Id, Del, Results, Rev, IncludeDocs), + Callback({change, ChangesRow, Prepend}, ResponseType), + {Go, {Db, Seq, <<",\n">>, FilterFun, Callback, ResponseType, Limit - 1, + IncludeDocs} + } + end. + + +changes_row(Db, Seq, Id, Del, Results, Rev, true) -> + {[{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++ + deleted_item(Del) ++ couch_httpd_view:doc_member(Db, {Id, Rev})}; +changes_row(_, Seq, Id, Del, Results, _, false) -> + {[{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++ + deleted_item(Del)}. + +deleted_item(true) -> [{deleted, true}]; +deleted_item(_) -> []. + +% waits for a db_updated msg, if there are multiple msgs, collects them. +wait_db_updated(Timeout, TimeoutFun) -> + receive db_updated -> get_rest_db_updated() + after Timeout -> + case TimeoutFun() of + ok -> wait_db_updated(Timeout, TimeoutFun); + stop -> stop + end + end. + +get_rest_db_updated() -> + receive db_updated -> get_rest_db_updated() + after 0 -> updated + end. diff --git a/apps/couch/src/couch_config.erl b/apps/couch/src/couch_config.erl new file mode 100644 index 00000000..be53e3a3 --- /dev/null +++ b/apps/couch/src/couch_config.erl @@ -0,0 +1,243 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +% Reads CouchDB's ini file and gets queried for configuration parameters. +% This module is initialized with a list of ini files that it consecutively +% reads Key/Value pairs from and saves them in an ets table. If more an one +% ini file is specified, the last one is used to write changes that are made +% with store/2 back to that ini file. + +-module(couch_config). +-behaviour(gen_server). + +-include("couch_db.hrl"). + + +-export([start_link/1, stop/0]). +-export([all/0, get/1, get/2, get/3, set/3, set/4, delete/2, delete/3]). +-export([register/1, register/2]). +-export([parse_ini_file/1]). + +-export([init/1, terminate/2, code_change/3]). +-export([handle_call/3, handle_cast/2, handle_info/2]). + +-record(config, { + notify_funs=[], + write_filename=undefined +}). + + +start_link(IniFiles) -> + gen_server:start_link({local, ?MODULE}, ?MODULE, IniFiles, []). + +stop() -> + gen_server:cast(?MODULE, stop). + + +all() -> + lists:sort(gen_server:call(?MODULE, all, infinity)). + + +get(Section) when is_binary(Section) -> + ?MODULE:get(?b2l(Section)); +get(Section) -> + Matches = ets:match(?MODULE, {{Section, '$1'}, '$2'}), + [{Key, Value} || [Key, Value] <- Matches]. + +get(Section, Key) -> + ?MODULE:get(Section, Key, undefined). + +get(Section, Key, Default) when is_binary(Section) and is_binary(Key) -> + ?MODULE:get(?b2l(Section), ?b2l(Key), Default); +get(Section, Key, Default) -> + case ets:lookup(?MODULE, {Section, Key}) of + [] -> Default; + [{_, Match}] -> Match + end. + +set(Section, Key, Value) -> + ?MODULE:set(Section, Key, Value, true). + +set(Section, Key, Value, Persist) when is_binary(Section) and is_binary(Key) -> + ?MODULE:set(?b2l(Section), ?b2l(Key), Value, Persist); +set(Section, Key, Value, Persist) -> + gen_server:call(?MODULE, {set, Section, Key, Value, Persist}). + + +delete(Section, Key) when is_binary(Section) and is_binary(Key) -> + delete(?b2l(Section), ?b2l(Key)); +delete(Section, Key) -> + delete(Section, Key, true). + +delete(Section, Key, Persist) when is_binary(Section) and is_binary(Key) -> + delete(?b2l(Section), ?b2l(Key), Persist); +delete(Section, Key, Persist) -> + gen_server:call(?MODULE, {delete, Section, Key, Persist}). + + +register(Fun) -> + ?MODULE:register(Fun, self()). + +register(Fun, Pid) -> + gen_server:call(?MODULE, {register, Fun, Pid}). + + +init(IniFiles) -> + ets:new(?MODULE, [named_table, set, protected]), + lists:map(fun(IniFile) -> + {ok, ParsedIniValues} = parse_ini_file(IniFile), + ets:insert(?MODULE, ParsedIniValues) + end, IniFiles), + WriteFile = case IniFiles of + [_|_] -> lists:last(IniFiles); + _ -> undefined + end, + {ok, #config{write_filename=WriteFile}}. + + +terminate(_Reason, _State) -> + ok. + + +handle_call(all, _From, Config) -> + Resp = lists:sort((ets:tab2list(?MODULE))), + {reply, Resp, Config}; +handle_call({set, Sec, Key, Val, Persist}, From, Config) -> + true = ets:insert(?MODULE, {{Sec, Key}, Val}), + case {Persist, Config#config.write_filename} of + {true, undefined} -> + ok; + {true, FileName} -> + couch_config_writer:save_to_file({{Sec, Key}, Val}, FileName); + _ -> + ok + end, + spawn_link(fun() -> + [catch F(Sec, Key, Val, Persist) || {_Pid, F} <- Config#config.notify_funs], + gen_server:reply(From, ok) + end), + {noreply, Config}; +handle_call({delete, Sec, Key, Persist}, From, Config) -> + true = ets:delete(?MODULE, {Sec,Key}), + case {Persist, Config#config.write_filename} of + {true, undefined} -> + ok; + {true, FileName} -> + couch_config_writer:save_to_file({{Sec, Key}, ""}, FileName); + _ -> + ok + end, + spawn_link(fun() -> + [catch F(Sec, Key, deleted, Persist) || {_Pid, F} <- Config#config.notify_funs], + gen_server:reply(From, ok) + end), + {noreply, Config}; +handle_call({register, Fun, Pid}, _From, #config{notify_funs=PidFuns}=Config) -> + erlang:monitor(process, Pid), + % convert 1 and 2 arity to 3 arity + Fun2 = + case Fun of + _ when is_function(Fun, 1) -> + fun(Section, _Key, _Value, _Persist) -> Fun(Section) end; + _ when is_function(Fun, 2) -> + fun(Section, Key, _Value, _Persist) -> Fun(Section, Key) end; + _ when is_function(Fun, 3) -> + fun(Section, Key, Value, _Persist) -> Fun(Section, Key, Value) end; + _ when is_function(Fun, 4) -> + Fun + end, + {reply, ok, Config#config{notify_funs=[{Pid, Fun2} | PidFuns]}}. + + +handle_cast(stop, State) -> + {stop, normal, State}; +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info({'DOWN', _, _, DownPid, _}, #config{notify_funs=PidFuns}=Config) -> + % remove any funs registered by the downed process + FilteredPidFuns = [{Pid,Fun} || {Pid,Fun} <- PidFuns, Pid /= DownPid], + {noreply, Config#config{notify_funs=FilteredPidFuns}}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + + +parse_ini_file(IniFile) -> + IniFilename = couch_util:abs_pathname(IniFile), + IniBin = + case file:read_file(IniFilename) of + {ok, IniBin0} -> + IniBin0; + {error, enoent} -> + Fmt = "Couldn't find server configuration file ~s.", + Msg = ?l2b(io_lib:format(Fmt, [IniFilename])), + ?LOG_ERROR("~s~n", [Msg]), + throw({startup_error, Msg}) + end, + + Lines = re:split(IniBin, "\r\n|\n|\r|\032", [{return, list}]), + {_, ParsedIniValues} = + lists:foldl(fun(Line, {AccSectionName, AccValues}) -> + case string:strip(Line) of + "[" ++ Rest -> + case re:split(Rest, "\\]", [{return, list}]) of + [NewSectionName, ""] -> + {NewSectionName, AccValues}; + _Else -> % end bracket not at end, ignore this line + {AccSectionName, AccValues} + end; + ";" ++ _Comment -> + {AccSectionName, AccValues}; + Line2 -> + case re:split(Line2, "\s?=\s?", [{return, list}]) of + [Value] -> + MultiLineValuePart = case re:run(Line, "^ \\S", []) of + {match, _} -> + true; + _ -> + false + end, + case {MultiLineValuePart, AccValues} of + {true, [{{_, ValueName}, PrevValue} | AccValuesRest]} -> + % remove comment + case re:split(Value, " ;|\t;", [{return, list}]) of + [[]] -> + % empty line + {AccSectionName, AccValues}; + [LineValue | _Rest] -> + E = {{AccSectionName, ValueName}, + PrevValue ++ " " ++ LineValue}, + {AccSectionName, [E | AccValuesRest]} + end; + _ -> + {AccSectionName, AccValues} + end; + [""|_LineValues] -> % line begins with "=", ignore + {AccSectionName, AccValues}; + [ValueName|LineValues] -> % yeehaw, got a line! + RemainingLine = couch_util:implode(LineValues, "="), + % removes comments + case re:split(RemainingLine, " ;|\t;", [{return, list}]) of + [[]] -> + % empty line means delete this key + ets:delete(?MODULE, {AccSectionName, ValueName}), + {AccSectionName, AccValues}; + [LineValue | _Rest] -> + {AccSectionName, + [{{AccSectionName, ValueName}, LineValue} | AccValues]} + end + end + end + end, {"", []}, Lines), + {ok, ParsedIniValues}. + diff --git a/apps/couch/src/couch_config_writer.erl b/apps/couch/src/couch_config_writer.erl new file mode 100644 index 00000000..c8691d79 --- /dev/null +++ b/apps/couch/src/couch_config_writer.erl @@ -0,0 +1,79 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +%% @doc Saves a Key/Value pair to a ini file. The Key consists of a Section +%% and Option combination. If that combination is found in the ini file +%% the new value replaces the old value. If only the Section is found the +%% Option and value combination is appended to the Section. If the Section +%% does not yet exist in the ini file, it is added and the Option/Value +%% pair is appended. +%% @see couch_config + +-module(couch_config_writer). + +-export([save_to_file/2]). + +%% @spec save_to_file( +%% Config::{{Section::string(), Option::string()}, Value::string()}, +%% File::filename()) -> ok +%% @doc Saves a Section/Key/Value triple to the ini file File::filename() +save_to_file({{Section, Key}, Value}, File) -> + {ok, OldFileContents} = file:read_file(File), + Lines = re:split(OldFileContents, "\r\n|\n|\r|\032", [{return, list}]), + + SectionLine = "[" ++ Section ++ "]", + {ok, Pattern} = re:compile(["^(", Key, "\\s*=)|\\[[a-zA-Z0-9\_-]*\\]"]), + + NewLines = process_file_lines(Lines, [], SectionLine, Pattern, Key, Value), + NewFileContents = reverse_and_add_newline(strip_empty_lines(NewLines), []), + ok = file:write_file(File, NewFileContents). + + +process_file_lines([Section|Rest], SeenLines, Section, Pattern, Key, Value) -> + process_section_lines(Rest, [Section|SeenLines], Pattern, Key, Value); + +process_file_lines([Line|Rest], SeenLines, Section, Pattern, Key, Value) -> + process_file_lines(Rest, [Line|SeenLines], Section, Pattern, Key, Value); + +process_file_lines([], SeenLines, Section, _Pattern, Key, Value) -> + % Section wasn't found. Append it with the option here. + [Key ++ " = " ++ Value, Section, "" | strip_empty_lines(SeenLines)]. + + +process_section_lines([Line|Rest], SeenLines, Pattern, Key, Value) -> + case re:run(Line, Pattern, [{capture, all_but_first}]) of + nomatch -> % Found nothing interesting. Move on. + process_section_lines(Rest, [Line|SeenLines], Pattern, Key, Value); + {match, []} -> % Found another section. Append the option here. + lists:reverse(Rest) ++ + [Line, "", Key ++ " = " ++ Value | strip_empty_lines(SeenLines)]; + {match, _} -> % Found the option itself. Replace it. + lists:reverse(Rest) ++ [Key ++ " = " ++ Value | SeenLines] + end; + +process_section_lines([], SeenLines, _Pattern, Key, Value) -> + % Found end of file within the section. Append the option here. + [Key ++ " = " ++ Value | strip_empty_lines(SeenLines)]. + + +reverse_and_add_newline([Line|Rest], Content) -> + reverse_and_add_newline(Rest, [Line, "\n", Content]); + +reverse_and_add_newline([], Content) -> + Content. + + +strip_empty_lines(["" | Rest]) -> + strip_empty_lines(Rest); + +strip_empty_lines(All) -> + All. diff --git a/apps/couch/src/couch_db.erl b/apps/couch/src/couch_db.erl new file mode 100644 index 00000000..7678f6ca --- /dev/null +++ b/apps/couch/src/couch_db.erl @@ -0,0 +1,1195 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_db). +-behaviour(gen_server). + +-export([open/2,open_int/2,close/1,create/2,start_compact/1,get_db_info/1,get_design_docs/1]). +-export([open_ref_counted/2,is_idle/1,monitor/1,count_changes_since/2]). +-export([update_doc/3,update_doc/4,update_docs/4,update_docs/2,update_docs/3,delete_doc/3]). +-export([get_doc_info/2,open_doc/2,open_doc/3,open_doc_revs/4]). +-export([set_revs_limit/2,get_revs_limit/1]). +-export([get_missing_revs/2,name/1,doc_to_tree/1,get_update_seq/1,get_committed_update_seq/1]). +-export([enum_docs/4,enum_docs_since/5]). +-export([enum_docs_since_reduce_to_count/1,enum_docs_reduce_to_count/1]). +-export([increment_update_seq/1,get_purge_seq/1,purge_docs/2,get_last_purged/1]). +-export([start_link/3,open_doc_int/3,ensure_full_commit/1]). +-export([set_security/2,get_security/1]). +-export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]). +-export([changes_since/5,changes_since/6,read_doc/2,new_revid/1]). +-export([check_is_admin/1, check_is_reader/1]). + +-include("couch_db.hrl"). + + +start_link(DbName, Filepath, Options) -> + case open_db_file(Filepath, Options) of + {ok, Fd} -> + StartResult = gen_server:start_link(couch_db, {DbName, Filepath, Fd, Options}, []), + unlink(Fd), + StartResult; + Else -> + Else + end. + +open_db_file(Filepath, Options) -> + case couch_file:open(Filepath, Options) of + {ok, Fd} -> + {ok, Fd}; + {error, enoent} -> + % couldn't find file. is there a compact version? This can happen if + % crashed during the file switch. + case couch_file:open(Filepath ++ ".compact") of + {ok, Fd} -> + ?LOG_INFO("Found ~s~s compaction file, using as primary storage.", [Filepath, ".compact"]), + ok = file:rename(Filepath ++ ".compact", Filepath), + ok = couch_file:sync(Fd), + {ok, Fd}; + {error, enoent} -> + {not_found, no_db_file} + end; + Error -> + Error + end. + + +create(DbName, Options) -> + couch_server:create(DbName, Options). + +% this is for opening a database for internal purposes like the replicator +% or the view indexer. it never throws a reader error. +open_int(DbName, Options) -> + couch_server:open(DbName, Options). + +% this should be called anytime an http request opens the database. +% it ensures that the http userCtx is a valid reader +open(DbName, Options) -> + case couch_server:open(DbName, Options) of + {ok, Db} -> + try + check_is_reader(Db), + {ok, Db} + catch + throw:Error -> + close(Db), + throw(Error) + end; + Else -> Else + end. + +ensure_full_commit(#db{update_pid=UpdatePid,instance_start_time=StartTime}) -> + ok = gen_server:call(UpdatePid, full_commit, infinity), + {ok, StartTime}. + +close(#db{fd_ref_counter=RefCntr}) -> + couch_ref_counter:drop(RefCntr). + +open_ref_counted(MainPid, OpenedPid) -> + gen_server:call(MainPid, {open_ref_count, OpenedPid}). + +is_idle(MainPid) -> + gen_server:call(MainPid, is_idle). + +monitor(#db{main_pid=MainPid}) -> + erlang:monitor(process, MainPid). + +start_compact(#db{update_pid=Pid}) -> + gen_server:call(Pid, start_compact). + +delete_doc(Db, Id, Revisions) -> + DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions], + {ok, [Result]} = update_docs(Db, DeletedDocs, []), + {ok, Result}. + +open_doc(Db, IdOrDocInfo) -> + open_doc(Db, IdOrDocInfo, []). + +open_doc(Db, Id, Options) -> + increment_stat(Db, {couchdb, database_reads}), + case open_doc_int(Db, Id, Options) of + {ok, #doc{deleted=true}=Doc} -> + case lists:member(deleted, Options) of + true -> + apply_open_options({ok, Doc},Options); + false -> + {not_found, deleted} + end; + Else -> + apply_open_options(Else,Options) + end. + +apply_open_options({ok, Doc},Options) -> + apply_open_options2(Doc,Options); +apply_open_options(Else,_Options) -> + Else. + +apply_open_options2(Doc,[]) -> + {ok, Doc}; +apply_open_options2(#doc{atts=Atts,revs=Revs}=Doc, + [{atts_since, PossibleAncestors}|Rest]) -> + RevPos = find_ancestor_rev_pos(Revs, PossibleAncestors), + apply_open_options2(Doc#doc{atts=[A#att{data= + if AttPos>RevPos -> Data; true -> stub end} + || #att{revpos=AttPos,data=Data}=A <- Atts]}, Rest); +apply_open_options2(Doc,[_|Rest]) -> + apply_open_options2(Doc,Rest). + + +find_ancestor_rev_pos({_, []}, _AttsSinceRevs) -> + 0; +find_ancestor_rev_pos(_DocRevs, []) -> + 0; +find_ancestor_rev_pos({RevPos, [RevId|Rest]}, AttsSinceRevs) -> + case lists:member({RevPos, RevId}, AttsSinceRevs) of + true -> + RevPos; + false -> + find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs) + end. + +open_doc_revs(Db, Id, Revs, Options) -> + increment_stat(Db, {couchdb, database_reads}), + [{ok, Results}] = open_doc_revs_int(Db, [{Id, Revs}], Options), + {ok, [apply_open_options(Result, Options) || Result <- Results]}. + +% Each returned result is a list of tuples: +% {Id, MissingRevs, PossibleAncestors} +% if no revs are missing, it's omitted from the results. +get_missing_revs(Db, IdRevsList) -> + Results = get_full_doc_infos(Db, [Id1 || {Id1, _Revs} <- IdRevsList]), + {ok, find_missing(IdRevsList, Results)}. + +find_missing([], []) -> + []; +find_missing([{Id, Revs}|RestIdRevs], [{ok, FullInfo} | RestLookupInfo]) -> + case couch_key_tree:find_missing(FullInfo#full_doc_info.rev_tree, Revs) of + [] -> + find_missing(RestIdRevs, RestLookupInfo); + MissingRevs -> + #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo), + LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo], + % Find the revs that are possible parents of this rev + PossibleAncestors = + lists:foldl(fun({LeafPos, LeafRevId}, Acc) -> + % this leaf is a "possible ancenstor" of the missing + % revs if this LeafPos lessthan any of the missing revs + case lists:any(fun({MissingPos, _}) -> + LeafPos < MissingPos end, MissingRevs) of + true -> + [{LeafPos, LeafRevId} | Acc]; + false -> + Acc + end + end, [], LeafRevs), + [{Id, MissingRevs, PossibleAncestors} | + find_missing(RestIdRevs, RestLookupInfo)] + end; +find_missing([{Id, Revs}|RestIdRevs], [not_found | RestLookupInfo]) -> + [{Id, Revs, []} | find_missing(RestIdRevs, RestLookupInfo)]. + +get_doc_info(Db, Id) -> + case get_full_doc_info(Db, Id) of + {ok, DocInfo} -> + {ok, couch_doc:to_doc_info(DocInfo)}; + Else -> + Else + end. + +% returns {ok, DocInfo} or not_found +get_full_doc_info(Db, Id) -> + [Result] = get_full_doc_infos(Db, [Id]), + Result. + +get_full_doc_infos(Db, Ids) -> + couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids). + +increment_update_seq(#db{update_pid=UpdatePid}) -> + gen_server:call(UpdatePid, increment_update_seq). + +purge_docs(#db{update_pid=UpdatePid}, IdsRevs) -> + gen_server:call(UpdatePid, {purge_docs, IdsRevs}). + +get_committed_update_seq(#db{committed_update_seq=Seq}) -> + Seq. + +get_update_seq(#db{update_seq=Seq})-> + Seq. + +get_purge_seq(#db{header=#db_header{purge_seq=PurgeSeq}})-> + PurgeSeq. + +get_last_purged(#db{header=#db_header{purged_docs=nil}}) -> + {ok, []}; +get_last_purged(#db{fd=Fd, header=#db_header{purged_docs=PurgedPointer}}) -> + couch_file:pread_term(Fd, PurgedPointer). + +get_db_info(Db) -> + #db{fd=Fd, + header=#db_header{disk_version=DiskVersion}, + compactor_pid=Compactor, + update_seq=SeqNum, + name=Name, + fulldocinfo_by_id_btree=FullDocBtree, + instance_start_time=StartTime, + committed_update_seq=CommittedUpdateSeq} = Db, + {ok, Size} = couch_file:bytes(Fd), + {ok, {Count, DelCount}} = couch_btree:full_reduce(FullDocBtree), + InfoList = [ + {db_name, Name}, + {doc_count, Count}, + {doc_del_count, DelCount}, + {update_seq, SeqNum}, + {purge_seq, couch_db:get_purge_seq(Db)}, + {compact_running, Compactor/=nil}, + {disk_size, Size}, + {instance_start_time, StartTime}, + {disk_format_version, DiskVersion}, + {committed_update_seq, CommittedUpdateSeq} + ], + {ok, InfoList}. + +get_design_docs(#db{fulldocinfo_by_id_btree=Btree}=Db) -> + {ok,_, Docs} = couch_btree:fold(Btree, + fun(#full_doc_info{id= <<"_design/",_/binary>>}=FullDocInfo, _Reds, AccDocs) -> + {ok, Doc} = couch_db:open_doc_int(Db, FullDocInfo, []), + {ok, [Doc | AccDocs]}; + (_, _Reds, AccDocs) -> + {stop, AccDocs} + end, + [], [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}]), + {ok, Docs}. + +check_is_admin(#db{user_ctx=#user_ctx{name=Name,roles=Roles}}=Db) -> + {Admins} = get_admins(Db), + AdminRoles = [<<"_admin">> | couch_util:get_value(<<"roles">>, Admins, [])], + AdminNames = couch_util:get_value(<<"names">>, Admins,[]), + case AdminRoles -- Roles of + AdminRoles -> % same list, not an admin role + case AdminNames -- [Name] of + AdminNames -> % same names, not an admin + throw({unauthorized, <<"You are not a db or server admin.">>}); + _ -> + ok + end; + _ -> + ok + end. + +check_is_reader(#db{user_ctx=#user_ctx{name=Name,roles=Roles}=UserCtx}=Db) -> + case (catch check_is_admin(Db)) of + ok -> ok; + _ -> + {Readers} = get_readers(Db), + ReaderRoles = couch_util:get_value(<<"roles">>, Readers,[]), + WithAdminRoles = [<<"_admin">> | ReaderRoles], + ReaderNames = couch_util:get_value(<<"names">>, Readers,[]), + case ReaderRoles ++ ReaderNames of + [] -> ok; % no readers == public access + _Else -> + case WithAdminRoles -- Roles of + WithAdminRoles -> % same list, not an reader role + case ReaderNames -- [Name] of + ReaderNames -> % same names, not a reader + ?LOG_DEBUG("Not a reader: UserCtx ~p vs Names ~p Roles ~p",[UserCtx, ReaderNames, WithAdminRoles]), + throw({unauthorized, <<"You are not authorized to access this db.">>}); + _ -> + ok + end; + _ -> + ok + end + end + end. + +get_admins(#db{security=SecProps}) -> + couch_util:get_value(<<"admins">>, SecProps, {[]}). + +get_readers(#db{security=SecProps}) -> + couch_util:get_value(<<"readers">>, SecProps, {[]}). + +get_security(#db{security=SecProps}) -> + {SecProps}. + +set_security(#db{update_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) -> + check_is_admin(Db), + ok = validate_security_object(NewSecProps), + ok = gen_server:call(Pid, {set_security, NewSecProps}, infinity), + {ok, _} = ensure_full_commit(Db), + ok; +set_security(_, _) -> + throw(bad_request). + +validate_security_object(SecProps) -> + Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}), + Readers = couch_util:get_value(<<"readers">>, SecProps, {[]}), + ok = validate_names_and_roles(Admins), + ok = validate_names_and_roles(Readers), + ok. + +% validate user input +validate_names_and_roles({Props}) when is_list(Props) -> + case couch_util:get_value(<<"names">>,Props,[]) of + Ns when is_list(Ns) -> + [throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)], + Ns; + _ -> throw("names must be a JSON list of strings") + end, + case couch_util:get_value(<<"roles">>,Props,[]) of + Rs when is_list(Rs) -> + [throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)], + Rs; + _ -> throw("roles must be a JSON list of strings") + end, + ok. + +get_revs_limit(#db{revs_limit=Limit}) -> + Limit. + +set_revs_limit(#db{update_pid=Pid}=Db, Limit) when Limit > 0 -> + check_is_admin(Db), + gen_server:call(Pid, {set_revs_limit, Limit}, infinity); +set_revs_limit(_Db, _Limit) -> + throw(invalid_revs_limit). + +name(#db{name=Name}) -> + Name. + +update_doc(Db, Doc, Options) -> + update_doc(Db, Doc, Options, interactive_edit). + +update_doc(Db, Doc, Options, UpdateType) -> + case update_docs(Db, [Doc], Options, UpdateType) of + {ok, [{ok, NewRev}]} -> + {ok, NewRev}; + {ok, [{{_Id, _Rev}, Error}]} -> + throw(Error); + {ok, [Error]} -> + throw(Error); + {ok, []} -> + % replication success + {Pos, [RevId | _]} = Doc#doc.revs, + {ok, {Pos, RevId}} + end. + +update_docs(Db, Docs) -> + update_docs(Db, Docs, []). + +% group_alike_docs groups the sorted documents into sublist buckets, by id. +% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]] +group_alike_docs(Docs) -> + Sorted = lists:sort(fun(#doc{id=A},#doc{id=B})-> A < B end, Docs), + group_alike_docs(Sorted, []). + +group_alike_docs([], Buckets) -> + lists:reverse(Buckets); +group_alike_docs([Doc|Rest], []) -> + group_alike_docs(Rest, [[Doc]]); +group_alike_docs([Doc|Rest], [Bucket|RestBuckets]) -> + [#doc{id=BucketId}|_] = Bucket, + case Doc#doc.id == BucketId of + true -> + % add to existing bucket + group_alike_docs(Rest, [[Doc|Bucket]|RestBuckets]); + false -> + % add to new bucket + group_alike_docs(Rest, [[Doc]|[Bucket|RestBuckets]]) + end. + +validate_doc_update(#db{}=Db, #doc{id= <<"_design/",_/binary>>}, _GetDiskDocFun) -> + catch check_is_admin(Db); +validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) -> + ok; +validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) -> + ok; +validate_doc_update(Db, Doc, GetDiskDocFun) -> + DiskDoc = GetDiskDocFun(), + JsonCtx = couch_util:json_user_ctx(Db), + SecObj = get_security(Db), + try [case Fun(Doc, DiskDoc, JsonCtx, SecObj) of + ok -> ok; + Error -> throw(Error) + end || Fun <- Db#db.validate_doc_funs], + ok + catch + throw:Error -> + Error + end. + + +prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, Revs}}=Doc, + OldFullDocInfo, LeafRevsDict, AllowConflict) -> + case Revs of + [PrevRev|_] -> + case dict:find({RevStart, PrevRev}, LeafRevsDict) of + {ok, {Deleted, DiskSp, DiskRevs}} -> + case couch_doc:has_stubs(Doc) of + true -> + DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs), + Doc2 = couch_doc:merge_stubs(Doc, DiskDoc), + {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2}; + false -> + LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,DiskSp,DiskRevs) end, + {validate_doc_update(Db, Doc, LoadDiskDoc), Doc} + end; + error when AllowConflict -> + couch_doc:merge_stubs(Doc, #doc{}), % will generate error if + % there are stubs + {validate_doc_update(Db, Doc, fun() -> nil end), Doc}; + error -> + {conflict, Doc} + end; + [] -> + % new doc, and we have existing revs. + % reuse existing deleted doc + if OldFullDocInfo#full_doc_info.deleted orelse AllowConflict -> + {validate_doc_update(Db, Doc, fun() -> nil end), Doc}; + true -> + {conflict, Doc} + end + end. + + + +prep_and_validate_updates(_Db, [], [], _AllowConflict, AccPrepped, + AccFatalErrors) -> + {AccPrepped, AccFatalErrors}; +prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups], + AllowConflict, AccPrepped, AccErrors) -> + [#doc{id=Id}|_]=DocBucket, + % no existing revs are known, + {PreppedBucket, AccErrors3} = lists:foldl( + fun(#doc{revs=Revs}=Doc, {AccBucket, AccErrors2}) -> + case couch_doc:has_stubs(Doc) of + true -> + couch_doc:merge_stubs(Doc, #doc{}); % will throw exception + false -> ok + end, + case Revs of + {0, []} -> + case validate_doc_update(Db, Doc, fun() -> nil end) of + ok -> + {[Doc | AccBucket], AccErrors2}; + Error -> + {AccBucket, [{{Id, {0, []}}, Error} | AccErrors2]} + end; + _ -> + % old revs specified but none exist, a conflict + {AccBucket, [{{Id, Revs}, conflict} | AccErrors2]} + end + end, + {[], AccErrors}, DocBucket), + + prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict, + [PreppedBucket | AccPrepped], AccErrors3); +prep_and_validate_updates(Db, [DocBucket|RestBuckets], + [{ok, #full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo}|RestLookups], + AllowConflict, AccPrepped, AccErrors) -> + Leafs = couch_key_tree:get_all_leafs(OldRevTree), + LeafRevsDict = dict:from_list([{{Start, RevId}, {Deleted, Sp, Revs}} || + {{Deleted, Sp, _Seq}, {Start, [RevId|_]}=Revs} <- Leafs]), + {PreppedBucket, AccErrors3} = lists:foldl( + fun(Doc, {Docs2Acc, AccErrors2}) -> + case prep_and_validate_update(Db, Doc, OldFullDocInfo, + LeafRevsDict, AllowConflict) of + {ok, Doc2} -> + {[Doc2 | Docs2Acc], AccErrors2}; + {Error, #doc{id=Id,revs=Revs}} -> + % Record the error + {Docs2Acc, [{{Id, Revs}, Error} |AccErrors2]} + end + end, + {[], AccErrors}, DocBucket), + prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict, + [PreppedBucket | AccPrepped], AccErrors3). + + +update_docs(#db{update_pid=UpdatePid}=Db, Docs, Options) -> + update_docs(#db{update_pid=UpdatePid}=Db, Docs, Options, interactive_edit). + + +prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) -> + Errors2 = [{{Id, {Pos, Rev}}, Error} || + {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors], + {lists:reverse(AccPrepped), lists:reverse(Errors2)}; +prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) -> + case OldInfo of + not_found -> + {ValidatedBucket, AccErrors3} = lists:foldl( + fun(Doc, {AccPrepped2, AccErrors2}) -> + case couch_doc:has_stubs(Doc) of + true -> + couch_doc:merge_stubs(Doc, #doc{}); % will throw exception + false -> ok + end, + case validate_doc_update(Db, Doc, fun() -> nil end) of + ok -> + {[Doc | AccPrepped2], AccErrors2}; + Error -> + {AccPrepped2, [{Doc, Error} | AccErrors2]} + end + end, + {[], AccErrors}, Bucket), + prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3); + {ok, #full_doc_info{rev_tree=OldTree}} -> + NewRevTree = lists:foldl( + fun(NewDoc, AccTree) -> + {NewTree, _} = couch_key_tree:merge(AccTree, [couch_db:doc_to_tree(NewDoc)]), + NewTree + end, + OldTree, Bucket), + Leafs = couch_key_tree:get_all_leafs_full(NewRevTree), + LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]), + {ValidatedBucket, AccErrors3} = + lists:foldl( + fun(#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, {AccValidated, AccErrors2}) -> + case dict:find({Pos, RevId}, LeafRevsFullDict) of + {ok, {Start, Path}} -> + % our unflushed doc is a leaf node. Go back on the path + % to find the previous rev that's on disk. + + LoadPrevRevFun = fun() -> + make_first_doc_on_disk(Db,Id,Start-1, tl(Path)) + end, + + case couch_doc:has_stubs(Doc) of + true -> + DiskDoc = LoadPrevRevFun(), + Doc2 = couch_doc:merge_stubs(Doc, DiskDoc), + GetDiskDocFun = fun() -> DiskDoc end; + false -> + Doc2 = Doc, + GetDiskDocFun = LoadPrevRevFun + end, + + case validate_doc_update(Db, Doc2, GetDiskDocFun) of + ok -> + {[Doc2 | AccValidated], AccErrors2}; + Error -> + {AccValidated, [{Doc, Error} | AccErrors2]} + end; + _ -> + % this doc isn't a leaf or already exists in the tree. + % ignore but consider it a success. + {AccValidated, AccErrors2} + end + end, + {[], AccErrors}, Bucket), + prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, + [ValidatedBucket | AccPrepped], AccErrors3) + end. + + + +new_revid(#doc{body=Body,revs={OldStart,OldRevs}, + atts=Atts,deleted=Deleted}) -> + case [{N, T, M} || #att{name=N,type=T,md5=M} <- Atts, M =/= <<>>] of + Atts2 when length(Atts) =/= length(Atts2) -> + % We must have old style non-md5 attachments + ?l2b(integer_to_list(couch_util:rand32())); + Atts2 -> + OldRev = case OldRevs of [] -> 0; [OldRev0|_] -> OldRev0 end, + couch_util:md5(term_to_binary([Deleted, OldStart, OldRev, Body, Atts2])) + end. + +new_revs([], OutBuckets, IdRevsAcc) -> + {lists:reverse(OutBuckets), IdRevsAcc}; +new_revs([Bucket|RestBuckets], OutBuckets, IdRevsAcc) -> + {NewBucket, IdRevsAcc3} = lists:mapfoldl( + fun(#doc{id=Id,revs={Start, RevIds}}=Doc, IdRevsAcc2)-> + NewRevId = new_revid(Doc), + {Doc#doc{revs={Start+1, [NewRevId | RevIds]}}, + [{{Id, {Start, RevIds}}, {ok, {Start+1, NewRevId}}} | IdRevsAcc2]} + end, IdRevsAcc, Bucket), + new_revs(RestBuckets, [NewBucket|OutBuckets], IdRevsAcc3). + +check_dup_atts(#doc{atts=Atts}=Doc) -> + Atts2 = lists:sort(fun(#att{name=N1}, #att{name=N2}) -> N1 < N2 end, Atts), + check_dup_atts2(Atts2), + Doc. + +check_dup_atts2([#att{name=N}, #att{name=N} | _]) -> + throw({bad_request, <<"Duplicate attachments">>}); +check_dup_atts2([_ | Rest]) -> + check_dup_atts2(Rest); +check_dup_atts2(_) -> + ok. + + +update_docs(Db, Docs, Options, replicated_changes) -> + increment_stat(Db, {couchdb, database_writes}), + DocBuckets = group_alike_docs(Docs), + + case (Db#db.validate_doc_funs /= []) orelse + lists:any( + fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true; + (#doc{atts=Atts}) -> + Atts /= [] + end, Docs) of + true -> + Ids = [Id || [#doc{id=Id}|_] <- DocBuckets], + ExistingDocs = get_full_doc_infos(Db, Ids), + + {DocBuckets2, DocErrors} = + prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocs, [], []), + DocBuckets3 = [Bucket || [_|_]=Bucket <- DocBuckets2]; % remove empty buckets + false -> + DocErrors = [], + DocBuckets3 = DocBuckets + end, + DocBuckets4 = [[doc_flush_atts(check_dup_atts(Doc), Db#db.fd) + || Doc <- Bucket] || Bucket <- DocBuckets3], + {ok, []} = write_and_commit(Db, DocBuckets4, [], [merge_conflicts | Options]), + {ok, DocErrors}; + +update_docs(Db, Docs, Options, interactive_edit) -> + increment_stat(Db, {couchdb, database_writes}), + AllOrNothing = lists:member(all_or_nothing, Options), + % go ahead and generate the new revision ids for the documents. + % separate out the NonRep documents from the rest of the documents + {Docs2, NonRepDocs} = lists:foldl( + fun(#doc{id=Id}=Doc, {DocsAcc, NonRepDocsAcc}) -> + case Id of + <<?LOCAL_DOC_PREFIX, _/binary>> -> + {DocsAcc, [Doc | NonRepDocsAcc]}; + Id-> + {[Doc | DocsAcc], NonRepDocsAcc} + end + end, {[], []}, Docs), + + DocBuckets = group_alike_docs(Docs2), + + case (Db#db.validate_doc_funs /= []) orelse + lists:any( + fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) -> + true; + (#doc{atts=Atts}) -> + Atts /= [] + end, Docs2) of + true -> + % lookup the doc by id and get the most recent + Ids = [Id || [#doc{id=Id}|_] <- DocBuckets], + ExistingDocInfos = get_full_doc_infos(Db, Ids), + + {DocBucketsPrepped, PreCommitFailures} = prep_and_validate_updates(Db, + DocBuckets, ExistingDocInfos, AllOrNothing, [], []), + + % strip out any empty buckets + DocBuckets2 = [Bucket || [_|_] = Bucket <- DocBucketsPrepped]; + false -> + PreCommitFailures = [], + DocBuckets2 = DocBuckets + end, + + if (AllOrNothing) and (PreCommitFailures /= []) -> + {aborted, lists:map( + fun({{Id,{Pos, [RevId|_]}}, Error}) -> + {{Id, {Pos, RevId}}, Error}; + ({{Id,{0, []}}, Error}) -> + {{Id, {0, <<>>}}, Error} + end, PreCommitFailures)}; + true -> + Options2 = if AllOrNothing -> [merge_conflicts]; + true -> [] end ++ Options, + DocBuckets3 = [[ + doc_flush_atts(set_new_att_revpos( + check_dup_atts(Doc)), Db#db.fd) + || Doc <- B] || B <- DocBuckets2], + {DocBuckets4, IdRevs} = new_revs(DocBuckets3, [], []), + + {ok, CommitResults} = write_and_commit(Db, DocBuckets4, NonRepDocs, Options2), + + ResultsDict = dict:from_list(IdRevs ++ CommitResults ++ PreCommitFailures), + {ok, lists:map( + fun(#doc{id=Id,revs={Pos, RevIds}}) -> + {ok, Result} = dict:find({Id, {Pos, RevIds}}, ResultsDict), + Result + end, Docs)} + end. + +% Returns the first available document on disk. Input list is a full rev path +% for the doc. +make_first_doc_on_disk(_Db, _Id, _Pos, []) -> + nil; +make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) -> + make_first_doc_on_disk(Db, Id, Pos - 1, RestPath); +make_first_doc_on_disk(Db, Id, Pos, [{_Rev, {IsDel, Sp, _Seq}} |_]=DocPath) -> + Revs = [Rev || {Rev, _} <- DocPath], + make_doc(Db, Id, IsDel, Sp, {Pos, Revs}). + +set_commit_option(Options) -> + CommitSettings = { + [true || O <- Options, O==full_commit orelse O==delay_commit], + couch_config:get("couchdb", "delayed_commits", "false") + }, + case CommitSettings of + {[true], _} -> + Options; % user requested explicit commit setting, do not change it + {_, "true"} -> + Options; % delayed commits are enabled, do nothing + {_, "false"} -> + [full_commit|Options]; + {_, Else} -> + ?LOG_ERROR("[couchdb] delayed_commits setting must be true/false, not ~p", + [Else]), + [full_commit|Options] + end. + +collect_results(UpdatePid, MRef, ResultsAcc) -> + receive + {result, UpdatePid, Result} -> + collect_results(UpdatePid, MRef, [Result | ResultsAcc]); + {done, UpdatePid} -> + {ok, ResultsAcc}; + {retry, UpdatePid} -> + retry; + {'DOWN', MRef, _, _, Reason} -> + exit(Reason) + end. + +write_and_commit(#db{update_pid=UpdatePid, user_ctx=Ctx}=Db, DocBuckets, + NonRepDocs, Options0) -> + Options = set_commit_option(Options0), + MergeConflicts = lists:member(merge_conflicts, Options), + FullCommit = lists:member(full_commit, Options), + MRef = erlang:monitor(process, UpdatePid), + try + UpdatePid ! {update_docs, self(), DocBuckets, NonRepDocs, MergeConflicts, FullCommit}, + case collect_results(UpdatePid, MRef, []) of + {ok, Results} -> {ok, Results}; + retry -> + % This can happen if the db file we wrote to was swapped out by + % compaction. Retry by reopening the db and writing to the current file + {ok, Db2} = open_ref_counted(Db#db.main_pid, Ctx), + DocBuckets2 = [[doc_flush_atts(Doc, Db2#db.fd) || Doc <- Bucket] || Bucket <- DocBuckets], + % We only retry once + close(Db2), + UpdatePid ! {update_docs, self(), DocBuckets2, NonRepDocs, MergeConflicts, FullCommit}, + case collect_results(UpdatePid, MRef, []) of + {ok, Results} -> {ok, Results}; + retry -> throw({update_error, compaction_retry}) + end + end + after + erlang:demonitor(MRef, [flush]) + end. + + +set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts}=Doc) -> + Doc#doc{atts= lists:map(fun(#att{data={_Fd,_Sp}}=Att) -> + % already commited to disk, do not set new rev + Att; + (Att) -> + Att#att{revpos=RevPos+1} + end, Atts)}. + + +doc_flush_atts(Doc, Fd) -> + Doc#doc{atts=[flush_att(Fd, Att) || Att <- Doc#doc.atts]}. + +check_md5(_NewSig, <<>>) -> ok; +check_md5(Sig1, Sig2) when Sig1 == Sig2 -> ok; +check_md5(_, _) -> throw(md5_mismatch). + +flush_att(Fd, #att{data={Fd0, _}}=Att) when Fd0 == Fd -> + % already written to our file, nothing to write + Att; + +flush_att(Fd, #att{data={OtherFd,StreamPointer}, md5=InMd5}=Att) -> + {NewStreamData, Len, _IdentityLen, Md5, IdentityMd5} = + couch_stream:copy_to_new_stream(OtherFd, StreamPointer, Fd), + check_md5(IdentityMd5, InMd5), + Att#att{data={Fd, NewStreamData}, md5=Md5, att_len=Len, disk_len=Len}; + +flush_att(Fd, #att{data=Data}=Att) when is_binary(Data) -> + with_stream(Fd, Att, fun(OutputStream) -> + couch_stream:write(OutputStream, Data) + end); + +flush_att(Fd, #att{data=Fun,att_len=undefined}=Att) when is_function(Fun) -> + with_stream(Fd, Att, fun(OutputStream) -> + % Fun(MaxChunkSize, WriterFun) must call WriterFun + % once for each chunk of the attachment, + Fun(4096, + % WriterFun({Length, Binary}, State) + % WriterFun({0, _Footers}, State) + % Called with Length == 0 on the last time. + % WriterFun returns NewState. + fun({0, Footers}, _) -> + F = mochiweb_headers:from_binary(Footers), + case mochiweb_headers:get_value("Content-MD5", F) of + undefined -> + ok; + Md5 -> + {md5, base64:decode(Md5)} + end; + ({_Length, Chunk}, _) -> + couch_stream:write(OutputStream, Chunk) + end, ok) + end); + +flush_att(Fd, #att{data=Fun,att_len=AttLen}=Att) when is_function(Fun) -> + with_stream(Fd, Att, fun(OutputStream) -> + write_streamed_attachment(OutputStream, Fun, AttLen) + end). + +% From RFC 2616 3.6.1 - Chunked Transfer Coding +% +% In other words, the origin server is willing to accept +% the possibility that the trailer fields might be silently +% discarded along the path to the client. +% +% I take this to mean that if "Trailers: Content-MD5\r\n" +% is present in the request, but there is no Content-MD5 +% trailer, we're free to ignore this inconsistency and +% pretend that no Content-MD5 exists. +with_stream(Fd, #att{md5=InMd5,type=Type,encoding=Enc}=Att, Fun) -> + {ok, OutputStream} = case (Enc =:= identity) andalso + couch_util:compressible_att_type(Type) of + true -> + CompLevel = list_to_integer( + couch_config:get("attachments", "compression_level", "0") + ), + couch_stream:open(Fd, gzip, [{compression_level, CompLevel}]); + _ -> + couch_stream:open(Fd) + end, + ReqMd5 = case Fun(OutputStream) of + {md5, FooterMd5} -> + case InMd5 of + md5_in_footer -> FooterMd5; + _ -> InMd5 + end; + _ -> + InMd5 + end, + {StreamInfo, Len, IdentityLen, Md5, IdentityMd5} = + couch_stream:close(OutputStream), + check_md5(IdentityMd5, ReqMd5), + {AttLen, DiskLen, NewEnc} = case Enc of + identity -> + case {Md5, IdentityMd5} of + {Same, Same} -> + {Len, IdentityLen, identity}; + _ -> + {Len, IdentityLen, gzip} + end; + gzip -> + case {Att#att.att_len, Att#att.disk_len} of + {AL, DL} when AL =:= undefined orelse DL =:= undefined -> + % Compressed attachment uploaded through the standalone API. + {Len, Len, gzip}; + {AL, DL} -> + % This case is used for efficient push-replication, where a + % compressed attachment is located in the body of multipart + % content-type request. + {AL, DL, gzip} + end + end, + Att#att{ + data={Fd,StreamInfo}, + att_len=AttLen, + disk_len=DiskLen, + md5=Md5, + encoding=NewEnc + }. + + +write_streamed_attachment(_Stream, _F, 0) -> + ok; +write_streamed_attachment(Stream, F, LenLeft) -> + Bin = F(), + ok = couch_stream:write(Stream, Bin), + write_streamed_attachment(Stream, F, LenLeft - size(Bin)). + +enum_docs_since_reduce_to_count(Reds) -> + couch_btree:final_reduce( + fun couch_db_updater:btree_by_seq_reduce/2, Reds). + +enum_docs_reduce_to_count(Reds) -> + {Count, _DelCount} = couch_btree:final_reduce( + fun couch_db_updater:btree_by_id_reduce/2, Reds), + Count. + +changes_since(Db, Style, StartSeq, Fun, Acc) -> + changes_since(Db, Style, StartSeq, Fun, [], Acc). + +changes_since(Db, Style, StartSeq, Fun, Options, Acc) -> + Wrapper = fun(DocInfo, _Offset, Acc2) -> + #doc_info{revs=Revs} = DocInfo, + DocInfo2 = + case Style of + main_only -> + DocInfo; + all_docs -> + % remove revs before the seq + DocInfo#doc_info{revs=[RevInfo || + #rev_info{seq=RevSeq}=RevInfo <- Revs, StartSeq < RevSeq]} + end, + Fun(DocInfo2, Acc2) + end, + {ok, _LastReduction, AccOut} = couch_btree:fold(Db#db.docinfo_by_seq_btree, + Wrapper, Acc, [{start_key, StartSeq + 1}] ++ Options), + {ok, AccOut}. + +count_changes_since(Db, SinceSeq) -> + {ok, Changes} = + couch_btree:fold_reduce(Db#db.docinfo_by_seq_btree, + fun(_SeqStart, PartialReds, 0) -> + {ok, couch_btree:final_reduce(Db#db.docinfo_by_seq_btree, PartialReds)} + end, + 0, [{start_key, SinceSeq + 1}]), + Changes. + +enum_docs_since(Db, SinceSeq, InFun, Acc, Options) -> + {ok, LastReduction, AccOut} = couch_btree:fold(Db#db.docinfo_by_seq_btree, InFun, Acc, [{start_key, SinceSeq + 1} | Options]), + {ok, enum_docs_since_reduce_to_count(LastReduction), AccOut}. + +enum_docs(Db, InFun, InAcc, Options) -> + {ok, LastReduce, OutAcc} = couch_btree:fold(Db#db.fulldocinfo_by_id_btree, InFun, InAcc, Options), + {ok, enum_docs_reduce_to_count(LastReduce), OutAcc}. + +% server functions + +init({DbName, Filepath, Fd, Options}) -> + {ok, UpdaterPid} = gen_server:start_link(couch_db_updater, {self(), DbName, Filepath, Fd, Options}, []), + {ok, #db{fd_ref_counter=RefCntr}=Db} = gen_server:call(UpdaterPid, get_db), + couch_ref_counter:add(RefCntr), + case lists:member(sys_db, Options) of + true -> + ok; + false -> + couch_stats_collector:track_process_count({couchdb, open_databases}) + end, + process_flag(trap_exit, true), + {ok, Db}. + +terminate(_Reason, Db) -> + couch_util:shutdown_sync(Db#db.update_pid), + ok. + +handle_call({open_ref_count, OpenerPid}, _, #db{fd_ref_counter=RefCntr}=Db) -> + ok = couch_ref_counter:add(RefCntr, OpenerPid), + {reply, {ok, Db}, Db}; +handle_call(is_idle, _From, #db{fd_ref_counter=RefCntr, compactor_pid=Compact, + waiting_delayed_commit=Delay}=Db) -> + % Idle means no referrers. Unless in the middle of a compaction file switch, + % there are always at least 2 referrers, couch_db_updater and us. + {reply, (Delay == nil) andalso (Compact == nil) andalso (couch_ref_counter:count(RefCntr) == 2), Db}; +handle_call({db_updated, NewDb}, _From, #db{fd_ref_counter=OldRefCntr}) -> + #db{fd_ref_counter=NewRefCntr}=NewDb, + case NewRefCntr =:= OldRefCntr of + true -> ok; + false -> + couch_ref_counter:add(NewRefCntr), + couch_ref_counter:drop(OldRefCntr) + end, + {reply, ok, NewDb}; +handle_call(get_db, _From, Db) -> + {reply, {ok, Db}, Db}. + + +handle_cast(Msg, Db) -> + ?LOG_ERROR("Bad cast message received for db ~s: ~p", [Db#db.name, Msg]), + exit({error, Msg}). + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +handle_info({'EXIT', _Pid, normal}, Db) -> + {noreply, Db}; +handle_info({'EXIT', _Pid, Reason}, Server) -> + {stop, Reason, Server}; +handle_info(Msg, Db) -> + ?LOG_ERROR("Bad message received for db ~s: ~p", [Db#db.name, Msg]), + exit({error, Msg}). + + +%%% Internal function %%% +open_doc_revs_int(Db, IdRevs, Options) -> + Ids = [Id || {Id, _Revs} <- IdRevs], + LookupResults = get_full_doc_infos(Db, Ids), + lists:zipwith( + fun({Id, Revs}, Lookup) -> + case Lookup of + {ok, #full_doc_info{rev_tree=RevTree}} -> + {FoundRevs, MissingRevs} = + case Revs of + all -> + {couch_key_tree:get_all_leafs(RevTree), []}; + _ -> + case lists:member(latest, Options) of + true -> + couch_key_tree:get_key_leafs(RevTree, Revs); + false -> + couch_key_tree:get(RevTree, Revs) + end + end, + FoundResults = + lists:map(fun({Value, {Pos, [Rev|_]}=FoundRevPath}) -> + case Value of + ?REV_MISSING -> + % we have the rev in our list but know nothing about it + {{not_found, missing}, {Pos, Rev}}; + {IsDeleted, SummaryPtr, _UpdateSeq} -> + {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)} + end + end, FoundRevs), + Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs], + {ok, Results}; + not_found when Revs == all -> + {ok, []}; + not_found -> + {ok, [{{not_found, missing}, Rev} || Rev <- Revs]} + end + end, + IdRevs, LookupResults). + +open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, _Options) -> + case couch_btree:lookup(Db#db.local_docs_btree, [Id]) of + [{ok, {_, {Rev, BodyData}}}] -> + {ok, #doc{id=Id, revs={0, [list_to_binary(integer_to_list(Rev))]}, body=BodyData}}; + [not_found] -> + {not_found, missing} + end; +open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) -> + #rev_info{deleted=IsDeleted,rev={Pos,RevId},body_sp=Bp} = RevInfo, + Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}), + {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}}; +open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) -> + #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} = + DocInfo = couch_doc:to_doc_info(FullDocInfo), + {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]), + Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath), + {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}}; +open_doc_int(Db, Id, Options) -> + case get_full_doc_info(Db, Id) of + {ok, FullDocInfo} -> + open_doc_int(Db, FullDocInfo, Options); + not_found -> + {not_found, missing} + end. + +doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTree, Options) -> + case lists:member(revs_info, Options) of + false -> []; + true -> + {[{Pos, RevPath}],[]} = + couch_key_tree:get_full_key_paths(RevTree, [Rev]), + + [{revs_info, Pos, lists:map( + fun({Rev1, {true, _Sp, _UpdateSeq}}) -> + {Rev1, deleted}; + ({Rev1, {false, _Sp, _UpdateSeq}}) -> + {Rev1, available}; + ({Rev1, ?REV_MISSING}) -> + {Rev1, missing} + end, RevPath)}] + end ++ + case lists:member(conflicts, Options) of + false -> []; + true -> + case [Rev1 || #rev_info{rev=Rev1,deleted=false} <- RestInfo] of + [] -> []; + ConflictRevs -> [{conflicts, ConflictRevs}] + end + end ++ + case lists:member(deleted_conflicts, Options) of + false -> []; + true -> + case [Rev1 || #rev_info{rev=Rev1,deleted=true} <- RestInfo] of + [] -> []; + DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}] + end + end ++ + case lists:member(local_seq, Options) of + false -> []; + true -> [{local_seq, Seq}] + end. + +read_doc(#db{fd=Fd}, OldStreamPointer) when is_tuple(OldStreamPointer) -> + % 09 UPGRADE CODE + couch_stream:old_read_term(Fd, OldStreamPointer); +read_doc(#db{fd=Fd}, Pos) -> + couch_file:pread_term(Fd, Pos). + + +doc_to_tree(#doc{revs={Start, RevIds}}=Doc) -> + [Tree] = doc_to_tree_simple(Doc, lists:reverse(RevIds)), + {Start - length(RevIds) + 1, Tree}. + + +doc_to_tree_simple(Doc, [RevId]) -> + [{RevId, Doc, []}]; +doc_to_tree_simple(Doc, [RevId | Rest]) -> + [{RevId, ?REV_MISSING, doc_to_tree_simple(Doc, Rest)}]. + + +make_doc(#db{fd=Fd}=Db, Id, Deleted, Bp, RevisionPath) -> + {BodyData, Atts} = + case Bp of + nil -> + {[], []}; + _ -> + {ok, {BodyData0, Atts0}} = read_doc(Db, Bp), + {BodyData0, + lists:map( + fun({Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) -> + #att{name=Name, + type=Type, + att_len=AttLen, + disk_len=DiskLen, + md5=Md5, + revpos=RevPos, + data={Fd,Sp}, + encoding= + case Enc of + true -> + % 0110 UPGRADE CODE + gzip; + false -> + % 0110 UPGRADE CODE + identity; + _ -> + Enc + end + }; + ({Name,Type,Sp,AttLen,RevPos,Md5}) -> + #att{name=Name, + type=Type, + att_len=AttLen, + disk_len=AttLen, + md5=Md5, + revpos=RevPos, + data={Fd,Sp}}; + ({Name,{Type,Sp,AttLen}}) -> + #att{name=Name, + type=Type, + att_len=AttLen, + disk_len=AttLen, + md5= <<>>, + revpos=0, + data={Fd,Sp}} + end, Atts0)} + end, + #doc{ + id = Id, + revs = RevisionPath, + body = BodyData, + atts = Atts, + deleted = Deleted + }. + + +increment_stat(#db{is_sys_db = true}, _Stat) -> + ok; +increment_stat(#db{}, Stat) -> + couch_stats_collector:increment(Stat). diff --git a/apps/couch/src/couch_db_update_notifier.erl b/apps/couch/src/couch_db_update_notifier.erl new file mode 100644 index 00000000..150eb31b --- /dev/null +++ b/apps/couch/src/couch_db_update_notifier.erl @@ -0,0 +1,73 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +% +% This causes an OS process to spawned and it is notified every time a database +% is updated. +% +% The notifications are in the form of a the database name sent as a line of +% text to the OS processes stdout. +% + +-module(couch_db_update_notifier). + +-behaviour(gen_event). + +-export([start_link/1, notify/1]). +-export([init/1, terminate/2, handle_event/2, handle_call/2, handle_info/2, code_change/3,stop/1]). + +-include("couch_db.hrl"). + +start_link(Exec) -> + couch_event_sup:start_link(couch_db_update, {couch_db_update_notifier, make_ref()}, Exec). + +notify(Event) -> + gen_event:notify(couch_db_update, Event). + +stop(Pid) -> + couch_event_sup:stop(Pid). + +init(Exec) when is_list(Exec) -> % an exe + couch_os_process:start_link(Exec, []); +init(Else) -> + {ok, Else}. + +terminate(_Reason, Pid) when is_pid(Pid) -> + couch_os_process:stop(Pid), + ok; +terminate(_Reason, _State) -> + ok. + +handle_event(Event, Fun) when is_function(Fun, 1) -> + Fun(Event), + {ok, Fun}; +handle_event(Event, {Fun, FunAcc}) -> + FunAcc2 = Fun(Event, FunAcc), + {ok, {Fun, FunAcc2}}; +handle_event({EventAtom, DbName}, Pid) -> + Obj = {[{type, list_to_binary(atom_to_list(EventAtom))}, {db, DbName}]}, + ok = couch_os_process:send(Pid, Obj), + {ok, Pid}. + +handle_call(_Request, State) -> + {reply, ok, State}. + +handle_info({'EXIT', Pid, Reason}, Pid) -> + ?LOG_ERROR("Update notification process ~p died: ~p", [Pid, Reason]), + remove_handler; +handle_info({'EXIT', _, _}, Pid) -> + %% the db_update event manager traps exits and forwards this message to all + %% its handlers. Just ignore as it wasn't our os_process that exited. + {ok, Pid}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. diff --git a/apps/couch/src/couch_db_update_notifier_sup.erl b/apps/couch/src/couch_db_update_notifier_sup.erl new file mode 100644 index 00000000..4d730fc7 --- /dev/null +++ b/apps/couch/src/couch_db_update_notifier_sup.erl @@ -0,0 +1,63 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +% +% This causes an OS process to spawned and it is notified every time a database +% is updated. +% +% The notifications are in the form of a the database name sent as a line of +% text to the OS processes stdout. +% + +-module(couch_db_update_notifier_sup). + +-behaviour(supervisor). + +-export([start_link/0,init/1]). + +start_link() -> + supervisor:start_link({local, couch_db_update_notifier_sup}, + couch_db_update_notifier_sup, []). + +init([]) -> + ok = couch_config:register( + fun("update_notification", Key, Value) -> reload_config(Key, Value) end + ), + + UpdateNotifierExes = couch_config:get("update_notification"), + + {ok, + {{one_for_one, 10, 3600}, + lists:map(fun({Name, UpdateNotifierExe}) -> + {Name, + {couch_db_update_notifier, start_link, [UpdateNotifierExe]}, + permanent, + 1000, + supervisor, + [couch_db_update_notifier]} + end, UpdateNotifierExes)}}. + +%% @doc when update_notification configuration changes, terminate the process +%% for that notifier and start a new one with the updated config +reload_config(Id, Exe) -> + ChildSpec = { + Id, + {couch_db_update_notifier, start_link, [Exe]}, + permanent, + 1000, + supervisor, + [couch_db_update_notifier] + }, + supervisor:terminate_child(couch_db_update_notifier_sup, Id), + supervisor:delete_child(couch_db_update_notifier_sup, Id), + supervisor:start_child(couch_db_update_notifier_sup, ChildSpec). + diff --git a/apps/couch/src/couch_db_updater.erl b/apps/couch/src/couch_db_updater.erl new file mode 100644 index 00000000..19a4c165 --- /dev/null +++ b/apps/couch/src/couch_db_updater.erl @@ -0,0 +1,879 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_db_updater). +-behaviour(gen_server). + +-export([btree_by_id_reduce/2,btree_by_seq_reduce/2]). +-export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]). + +-include("couch_db.hrl"). + + +init({MainPid, DbName, Filepath, Fd, Options}) -> + process_flag(trap_exit, true), + case lists:member(create, Options) of + true -> + % create a new header and writes it to the file + Header = #db_header{}, + ok = couch_file:write_header(Fd, Header), + % delete any old compaction files that might be hanging around + RootDir = couch_config:get("couchdb", "database_dir", "."), + couch_file:delete(RootDir, Filepath ++ ".compact"); + false -> + ok = couch_file:upgrade_old_header(Fd, <<$g, $m, $k, 0>>), % 09 UPGRADE CODE + case couch_file:read_header(Fd) of + {ok, Header} -> + ok; + no_valid_header -> + % create a new header and writes it to the file + Header = #db_header{}, + ok = couch_file:write_header(Fd, Header), + % delete any old compaction files that might be hanging around + file:delete(Filepath ++ ".compact") + end + end, + + Db = init_db(DbName, Filepath, Fd, Header), + Db2 = refresh_validate_doc_funs(Db), + {ok, Db2#db{main_pid = MainPid, is_sys_db = lists:member(sys_db, Options)}}. + + +terminate(_Reason, Db) -> + couch_file:close(Db#db.fd), + couch_util:shutdown_sync(Db#db.compactor_pid), + couch_util:shutdown_sync(Db#db.fd_ref_counter), + ok. + +handle_call(get_db, _From, Db) -> + {reply, {ok, Db}, Db}; +handle_call(full_commit, _From, #db{waiting_delayed_commit=nil}=Db) -> + {reply, ok, Db}; % no data waiting, return ok immediately +handle_call(full_commit, _From, Db) -> + {reply, ok, commit_data(Db)}; % commit the data and return ok +handle_call(increment_update_seq, _From, Db) -> + Db2 = commit_data(Db#db{update_seq=Db#db.update_seq+1}), + ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}), + couch_db_update_notifier:notify({updated, Db#db.name}), + {reply, {ok, Db2#db.update_seq}, Db2}; + +handle_call({set_security, NewSec}, _From, Db) -> + {ok, Ptr} = couch_file:append_term(Db#db.fd, NewSec), + Db2 = commit_data(Db#db{security=NewSec, security_ptr=Ptr, + update_seq=Db#db.update_seq+1}), + ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}), + {reply, ok, Db2}; + +handle_call({set_revs_limit, Limit}, _From, Db) -> + Db2 = commit_data(Db#db{revs_limit=Limit, + update_seq=Db#db.update_seq+1}), + ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}), + {reply, ok, Db2}; + +handle_call({purge_docs, _IdRevs}, _From, + #db{compactor_pid=Pid}=Db) when Pid /= nil -> + {reply, {error, purge_during_compaction}, Db}; +handle_call({purge_docs, IdRevs}, _From, Db) -> + #db{ + fd=Fd, + fulldocinfo_by_id_btree = DocInfoByIdBTree, + docinfo_by_seq_btree = DocInfoBySeqBTree, + update_seq = LastSeq, + header = Header = #db_header{purge_seq=PurgeSeq} + } = Db, + DocLookups = couch_btree:lookup(DocInfoByIdBTree, + [Id || {Id, _Revs} <- IdRevs]), + + NewDocInfos = lists:zipwith( + fun({_Id, Revs}, {ok, #full_doc_info{rev_tree=Tree}=FullDocInfo}) -> + case couch_key_tree:remove_leafs(Tree, Revs) of + {_, []=_RemovedRevs} -> % no change + nil; + {NewTree, RemovedRevs} -> + {FullDocInfo#full_doc_info{rev_tree=NewTree},RemovedRevs} + end; + (_, not_found) -> + nil + end, + IdRevs, DocLookups), + + SeqsToRemove = [Seq + || {#full_doc_info{update_seq=Seq},_} <- NewDocInfos], + + FullDocInfoToUpdate = [FullInfo + || {#full_doc_info{rev_tree=Tree}=FullInfo,_} + <- NewDocInfos, Tree /= []], + + IdRevsPurged = [{Id, Revs} + || {#full_doc_info{id=Id}, Revs} <- NewDocInfos], + + {DocInfoToUpdate, NewSeq} = lists:mapfoldl( + fun(#full_doc_info{rev_tree=Tree}=FullInfo, SeqAcc) -> + Tree2 = couch_key_tree:map_leafs( fun(RevInfo) -> + RevInfo#rev_info{seq=SeqAcc + 1} + end, Tree), + {couch_doc:to_doc_info(FullInfo#full_doc_info{rev_tree=Tree2}), + SeqAcc + 1} + end, LastSeq, FullDocInfoToUpdate), + + IdsToRemove = [Id || {#full_doc_info{id=Id,rev_tree=[]},_} + <- NewDocInfos], + + {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree, + DocInfoToUpdate, SeqsToRemove), + {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree, + FullDocInfoToUpdate, IdsToRemove), + {ok, Pointer} = couch_file:append_term(Fd, IdRevsPurged), + + Db2 = commit_data( + Db#db{ + fulldocinfo_by_id_btree = DocInfoByIdBTree2, + docinfo_by_seq_btree = DocInfoBySeqBTree2, + update_seq = NewSeq + 1, + header=Header#db_header{purge_seq=PurgeSeq+1, purged_docs=Pointer}}), + + ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}), + couch_db_update_notifier:notify({updated, Db#db.name}), + {reply, {ok, (Db2#db.header)#db_header.purge_seq, IdRevsPurged}, Db2}; +handle_call(start_compact, _From, Db) -> + case Db#db.compactor_pid of + nil -> + ?LOG_INFO("Starting compaction for db \"~s\"", [Db#db.name]), + Pid = spawn_link(fun() -> start_copy_compact(Db) end), + Db2 = Db#db{compactor_pid=Pid}, + ok = gen_server:call(Db#db.main_pid, {db_updated, Db2}), + {reply, ok, Db2}; + _ -> + % compact currently running, this is a no-op + {reply, ok, Db} + end. + + + +handle_cast({compact_done, CompactFilepath}, #db{filepath=Filepath}=Db) -> + {ok, NewFd} = couch_file:open(CompactFilepath), + {ok, NewHeader} = couch_file:read_header(NewFd), + #db{update_seq=NewSeq} = NewDb = + init_db(Db#db.name, Filepath, NewFd, NewHeader), + unlink(NewFd), + case Db#db.update_seq == NewSeq of + true -> + % suck up all the local docs into memory and write them to the new db + {ok, _, LocalDocs} = couch_btree:foldl(Db#db.local_docs_btree, + fun(Value, _Offset, Acc) -> {ok, [Value | Acc]} end, []), + {ok, NewLocalBtree} = couch_btree:add(NewDb#db.local_docs_btree, LocalDocs), + + NewDb2 = commit_data(NewDb#db{ + local_docs_btree = NewLocalBtree, + main_pid = Db#db.main_pid, + filepath = Filepath, + instance_start_time = Db#db.instance_start_time, + revs_limit = Db#db.revs_limit + }), + + ?LOG_DEBUG("CouchDB swapping files ~s and ~s.", + [Filepath, CompactFilepath]), + RootDir = couch_config:get("couchdb", "database_dir", "."), + couch_file:delete(RootDir, Filepath), + ok = file:rename(CompactFilepath, Filepath), + close_db(Db), + ok = gen_server:call(Db#db.main_pid, {db_updated, NewDb2}), + ?LOG_INFO("Compaction for db \"~s\" completed.", [Db#db.name]), + {noreply, NewDb2#db{compactor_pid=nil}}; + false -> + ?LOG_INFO("Compaction file still behind main file " + "(update seq=~p. compact update seq=~p). Retrying.", + [Db#db.update_seq, NewSeq]), + close_db(NewDb), + Pid = spawn_link(fun() -> start_copy_compact(Db) end), + Db2 = Db#db{compactor_pid=Pid}, + {noreply, Db2} + end. + + +handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts, + FullCommit}, Db) -> + GroupedDocs2 = [[{Client, D} || D <- DocGroup] || DocGroup <- GroupedDocs], + if NonRepDocs == [] -> + {GroupedDocs3, Clients, FullCommit2} = collect_updates(GroupedDocs2, + [Client], MergeConflicts, FullCommit); + true -> + GroupedDocs3 = GroupedDocs2, + FullCommit2 = FullCommit, + Clients = [Client] + end, + NonRepDocs2 = [{Client, NRDoc} || NRDoc <- NonRepDocs], + try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts, + FullCommit2) of + {ok, Db2} -> + ok = gen_server:call(Db#db.main_pid, {db_updated, Db2}), + if Db2#db.update_seq /= Db#db.update_seq -> + couch_db_update_notifier:notify({updated, Db2#db.name}); + true -> ok + end, + [catch(ClientPid ! {done, self()}) || ClientPid <- Clients], + {noreply, Db2} + catch + throw: retry -> + [catch(ClientPid ! {retry, self()}) || ClientPid <- Clients], + {noreply, Db} + end; +handle_info(delayed_commit, #db{waiting_delayed_commit=nil}=Db) -> + %no outstanding delayed commits, ignore + {noreply, Db}; +handle_info(delayed_commit, Db) -> + case commit_data(Db) of + Db -> + {noreply, Db}; + Db2 -> + ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}), + {noreply, Db2} + end; +handle_info({'EXIT', _Pid, normal}, Db) -> + {noreply, Db}; +handle_info({'EXIT', _Pid, Reason}, Db) -> + {stop, Reason, Db}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + + +merge_updates([], RestB, AccOutGroups) -> + lists:reverse(AccOutGroups, RestB); +merge_updates(RestA, [], AccOutGroups) -> + lists:reverse(AccOutGroups, RestA); +merge_updates([[{_, #doc{id=IdA}}|_]=GroupA | RestA], + [[{_, #doc{id=IdB}}|_]=GroupB | RestB], AccOutGroups) -> + if IdA == IdB -> + merge_updates(RestA, RestB, [GroupA ++ GroupB | AccOutGroups]); + IdA < IdB -> + merge_updates(RestA, [GroupB | RestB], [GroupA | AccOutGroups]); + true -> + merge_updates([GroupA | RestA], RestB, [GroupB | AccOutGroups]) + end. + +collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts, FullCommit) -> + receive + % Only collect updates with the same MergeConflicts flag and without + % local docs. It's easier to just avoid multiple _local doc + % updaters than deal with their possible conflicts, and local docs + % writes are relatively rare. Can be optmized later if really needed. + {update_docs, Client, GroupedDocs, [], MergeConflicts, FullCommit2} -> + GroupedDocs2 = [[{Client, Doc} || Doc <- DocGroup] + || DocGroup <- GroupedDocs], + GroupedDocsAcc2 = + merge_updates(GroupedDocsAcc, GroupedDocs2, []), + collect_updates(GroupedDocsAcc2, [Client | ClientsAcc], + MergeConflicts, (FullCommit or FullCommit2)) + after 0 -> + {GroupedDocsAcc, ClientsAcc, FullCommit} + end. + + +btree_by_seq_split(#doc_info{id=Id, high_seq=KeySeq, revs=Revs}) -> + RevInfos = [{Rev, Seq, Bp} || + #rev_info{rev=Rev,seq=Seq,deleted=false,body_sp=Bp} <- Revs], + DeletedRevInfos = [{Rev, Seq, Bp} || + #rev_info{rev=Rev,seq=Seq,deleted=true,body_sp=Bp} <- Revs], + {KeySeq,{Id, RevInfos, DeletedRevInfos}}. + +btree_by_seq_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) -> + #doc_info{ + id = Id, + high_seq=KeySeq, + revs = + [#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp = Bp} || + {Rev, Seq, Bp} <- RevInfos] ++ + [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} || + {Rev, Seq, Bp} <- DeletedRevInfos]}; +btree_by_seq_join(KeySeq,{Id, Rev, Bp, Conflicts, DelConflicts, Deleted}) -> + % 09 UPGRADE CODE + % this is the 0.9.0 and earlier by_seq record. It's missing the body pointers + % and individual seq nums for conflicts that are currently in the index, + % meaning the filtered _changes api will not work except for on main docs. + % Simply compact a 0.9.0 database to upgrade the index. + #doc_info{ + id=Id, + high_seq=KeySeq, + revs = [#rev_info{rev=Rev,seq=KeySeq,deleted=Deleted,body_sp=Bp}] ++ + [#rev_info{rev=Rev1,seq=KeySeq,deleted=false} || Rev1 <- Conflicts] ++ + [#rev_info{rev=Rev2,seq=KeySeq,deleted=true} || Rev2 <- DelConflicts]}. + +btree_by_id_split(#full_doc_info{id=Id, update_seq=Seq, + deleted=Deleted, rev_tree=Tree}) -> + DiskTree = + couch_key_tree:map( + fun(_RevId, {IsDeleted, BodyPointer, UpdateSeq}) -> + {if IsDeleted -> 1; true -> 0 end, BodyPointer, UpdateSeq}; + (_RevId, ?REV_MISSING) -> + ?REV_MISSING + end, Tree), + {Id, {Seq, if Deleted -> 1; true -> 0 end, DiskTree}}. + +btree_by_id_join(Id, {HighSeq, Deleted, DiskTree}) -> + Tree = + couch_key_tree:map( + fun(_RevId, {IsDeleted, BodyPointer, UpdateSeq}) -> + {IsDeleted == 1, BodyPointer, UpdateSeq}; + (_RevId, ?REV_MISSING) -> + ?REV_MISSING; + (_RevId, {IsDeleted, BodyPointer}) -> + % 09 UPGRADE CODE + % this is the 0.9.0 and earlier rev info record. It's missing the seq + % nums, which means couchdb will sometimes reexamine unchanged + % documents with the _changes API. + % This is fixed by compacting the database. + {IsDeleted == 1, BodyPointer, HighSeq} + end, DiskTree), + + #full_doc_info{id=Id, update_seq=HighSeq, deleted=Deleted==1, rev_tree=Tree}. + +btree_by_id_reduce(reduce, FullDocInfos) -> + % count the number of not deleted documents + {length([1 || #full_doc_info{deleted=false} <- FullDocInfos]), + length([1 || #full_doc_info{deleted=true} <- FullDocInfos])}; +btree_by_id_reduce(rereduce, Reds) -> + {lists:sum([Count || {Count,_} <- Reds]), + lists:sum([DelCount || {_, DelCount} <- Reds])}. + +btree_by_seq_reduce(reduce, DocInfos) -> + % count the number of documents + length(DocInfos); +btree_by_seq_reduce(rereduce, Reds) -> + lists:sum(Reds). + +simple_upgrade_record(Old, New) when tuple_size(Old) =:= tuple_size(New) -> + Old; +simple_upgrade_record(Old, New) when tuple_size(Old) < tuple_size(New) -> + OldSz = tuple_size(Old), + NewValuesTail = + lists:sublist(tuple_to_list(New), OldSz + 1, tuple_size(New) - OldSz), + list_to_tuple(tuple_to_list(Old) ++ NewValuesTail). + + +init_db(DbName, Filepath, Fd, Header0) -> + Header1 = simple_upgrade_record(Header0, #db_header{}), + Header = + case element(2, Header1) of + 1 -> Header1#db_header{unused = 0, security_ptr = nil}; % 0.9 + 2 -> Header1#db_header{unused = 0, security_ptr = nil}; % post 0.9 and pre 0.10 + 3 -> Header1#db_header{security_ptr = nil}; % post 0.9 and pre 0.10 + 4 -> Header1#db_header{security_ptr = nil}; % 0.10 and pre 0.11 + ?LATEST_DISK_VERSION -> Header1; + _ -> throw({database_disk_version_error, "Incorrect disk header version"}) + end, + + {ok, FsyncOptions} = couch_util:parse_term( + couch_config:get("couchdb", "fsync_options", + "[before_header, after_header, on_file_open]")), + + case lists:member(on_file_open, FsyncOptions) of + true -> ok = couch_file:sync(Fd); + _ -> ok + end, + + {ok, IdBtree} = couch_btree:open(Header#db_header.fulldocinfo_by_id_btree_state, Fd, + [{split, fun(X) -> btree_by_id_split(X) end}, + {join, fun(X,Y) -> btree_by_id_join(X,Y) end}, + {reduce, fun(X,Y) -> btree_by_id_reduce(X,Y) end}]), + {ok, SeqBtree} = couch_btree:open(Header#db_header.docinfo_by_seq_btree_state, Fd, + [{split, fun(X) -> btree_by_seq_split(X) end}, + {join, fun(X,Y) -> btree_by_seq_join(X,Y) end}, + {reduce, fun(X,Y) -> btree_by_seq_reduce(X,Y) end}]), + {ok, LocalDocsBtree} = couch_btree:open(Header#db_header.local_docs_btree_state, Fd), + case Header#db_header.security_ptr of + nil -> + Security = [], + SecurityPtr = nil; + SecurityPtr -> + {ok, Security} = couch_file:pread_term(Fd, SecurityPtr) + end, + % convert start time tuple to microsecs and store as a binary string + {MegaSecs, Secs, MicroSecs} = now(), + StartTime = ?l2b(io_lib:format("~p", + [(MegaSecs*1000000*1000000) + (Secs*1000000) + MicroSecs])), + {ok, RefCntr} = couch_ref_counter:start([Fd]), + #db{ + update_pid=self(), + fd=Fd, + fd_ref_counter = RefCntr, + header=Header, + fulldocinfo_by_id_btree = IdBtree, + docinfo_by_seq_btree = SeqBtree, + local_docs_btree = LocalDocsBtree, + committed_update_seq = Header#db_header.update_seq, + update_seq = Header#db_header.update_seq, + name = DbName, + filepath = Filepath, + security = Security, + security_ptr = SecurityPtr, + instance_start_time = StartTime, + revs_limit = Header#db_header.revs_limit, + fsync_options = FsyncOptions + }. + + +close_db(#db{fd_ref_counter = RefCntr}) -> + couch_ref_counter:drop(RefCntr). + + +refresh_validate_doc_funs(Db) -> + {ok, DesignDocs} = couch_db:get_design_docs(Db), + ProcessDocFuns = lists:flatmap( + fun(DesignDoc) -> + case couch_doc:get_validate_doc_fun(DesignDoc) of + nil -> []; + Fun -> [Fun] + end + end, DesignDocs), + Db#db{validate_doc_funs=ProcessDocFuns}. + +% rev tree functions + +flush_trees(_Db, [], AccFlushedTrees) -> + {ok, lists:reverse(AccFlushedTrees)}; +flush_trees(#db{fd=Fd,header=Header}=Db, + [InfoUnflushed | RestUnflushed], AccFlushed) -> + #full_doc_info{update_seq=UpdateSeq, rev_tree=Unflushed} = InfoUnflushed, + Flushed = couch_key_tree:map( + fun(_Rev, Value) -> + case Value of + #doc{atts=Atts,deleted=IsDeleted}=Doc -> + % this node value is actually an unwritten document summary, + % write to disk. + % make sure the Fd in the written bins is the same Fd we are + % and convert bins, removing the FD. + % All bins should have been written to disk already. + DiskAtts = + case Atts of + [] -> []; + [#att{data={BinFd, _Sp}} | _ ] when BinFd == Fd -> + [{N,T,P,AL,DL,R,M,E} + || #att{name=N,type=T,data={_,P},md5=M,revpos=R, + att_len=AL,disk_len=DL,encoding=E} + <- Atts]; + _ -> + % BinFd must not equal our Fd. This can happen when a database + % is being switched out during a compaction + ?LOG_DEBUG("File where the attachments are written has" + " changed. Possibly retrying.", []), + throw(retry) + end, + {ok, NewSummaryPointer} = + case Header#db_header.disk_version < 4 of + true -> + couch_file:append_term(Fd, {Doc#doc.body, DiskAtts}); + false -> + couch_file:append_term_md5(Fd, {Doc#doc.body, DiskAtts}) + end, + {IsDeleted, NewSummaryPointer, UpdateSeq}; + _ -> + Value + end + end, Unflushed), + flush_trees(Db, RestUnflushed, [InfoUnflushed#full_doc_info{rev_tree=Flushed} | AccFlushed]). + + +send_result(Client, Id, OriginalRevs, NewResult) -> + % used to send a result to the client + catch(Client ! {result, self(), {{Id, OriginalRevs}, NewResult}}). + +merge_rev_trees(_MergeConflicts, [], [], AccNewInfos, AccRemoveSeqs, AccSeq) -> + {ok, lists:reverse(AccNewInfos), AccRemoveSeqs, AccSeq}; +merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList], + [OldDocInfo|RestOldInfo], AccNewInfos, AccRemoveSeqs, AccSeq) -> + #full_doc_info{id=Id,rev_tree=OldTree,deleted=OldDeleted,update_seq=OldSeq} + = OldDocInfo, + NewRevTree = lists:foldl( + fun({Client, #doc{revs={Pos,[_Rev|PrevRevs]}}=NewDoc}, AccTree) -> + if not MergeConflicts -> + case couch_key_tree:merge(AccTree, [couch_db:doc_to_tree(NewDoc)]) of + {_NewTree, conflicts} when (not OldDeleted) -> + send_result(Client, Id, {Pos-1,PrevRevs}, conflict), + AccTree; + {NewTree, conflicts} when PrevRevs /= [] -> + % Check to be sure if prev revision was specified, it's + % a leaf node in the tree + Leafs = couch_key_tree:get_all_leafs(AccTree), + IsPrevLeaf = lists:any(fun({_, {LeafPos, [LeafRevId|_]}}) -> + {LeafPos, LeafRevId} == {Pos-1, hd(PrevRevs)} + end, Leafs), + if IsPrevLeaf -> + NewTree; + true -> + send_result(Client, Id, {Pos-1,PrevRevs}, conflict), + AccTree + end; + {NewTree, no_conflicts} when AccTree == NewTree -> + % the tree didn't change at all + % meaning we are saving a rev that's already + % been editted again. + if (Pos == 1) and OldDeleted -> + % this means we are recreating a brand new document + % into a state that already existed before. + % put the rev into a subsequent edit of the deletion + #doc_info{revs=[#rev_info{rev={OldPos,OldRev}}|_]} = + couch_doc:to_doc_info(OldDocInfo), + NewRevId = couch_db:new_revid( + NewDoc#doc{revs={OldPos, [OldRev]}}), + NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}}, + {NewTree2, _} = couch_key_tree:merge(AccTree, + [couch_db:doc_to_tree(NewDoc2)]), + % we changed the rev id, this tells the caller we did + send_result(Client, Id, {Pos-1,PrevRevs}, + {ok, {OldPos + 1, NewRevId}}), + NewTree2; + true -> + send_result(Client, Id, {Pos-1,PrevRevs}, conflict), + AccTree + end; + {NewTree, _} -> + NewTree + end; + true -> + {NewTree, _} = couch_key_tree:merge(AccTree, + [couch_db:doc_to_tree(NewDoc)]), + NewTree + end + end, + OldTree, NewDocs), + if NewRevTree == OldTree -> + % nothing changed + merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo, AccNewInfos, + AccRemoveSeqs, AccSeq); + true -> + % we have updated the document, give it a new seq # + NewInfo = #full_doc_info{id=Id,update_seq=AccSeq+1,rev_tree=NewRevTree}, + RemoveSeqs = case OldSeq of + 0 -> AccRemoveSeqs; + _ -> [OldSeq | AccRemoveSeqs] + end, + merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo, + [NewInfo|AccNewInfos], RemoveSeqs, AccSeq+1) + end. + + + +new_index_entries([], AccById, AccBySeq) -> + {AccById, AccBySeq}; +new_index_entries([FullDocInfo|RestInfos], AccById, AccBySeq) -> + #doc_info{revs=[#rev_info{deleted=Deleted}|_]} = DocInfo = + couch_doc:to_doc_info(FullDocInfo), + new_index_entries(RestInfos, + [FullDocInfo#full_doc_info{deleted=Deleted}|AccById], + [DocInfo|AccBySeq]). + + +stem_full_doc_infos(#db{revs_limit=Limit}, DocInfos) -> + [Info#full_doc_info{rev_tree=couch_key_tree:stem(Tree, Limit)} || + #full_doc_info{rev_tree=Tree}=Info <- DocInfos]. + +update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) -> + #db{ + fulldocinfo_by_id_btree = DocInfoByIdBTree, + docinfo_by_seq_btree = DocInfoBySeqBTree, + update_seq = LastSeq + } = Db, + Ids = [Id || [{_Client, #doc{id=Id}}|_] <- DocsList], + % lookup up the old documents, if they exist. + OldDocLookups = couch_btree:lookup(DocInfoByIdBTree, Ids), + OldDocInfos = lists:zipwith( + fun(_Id, {ok, FullDocInfo}) -> + FullDocInfo; + (Id, not_found) -> + #full_doc_info{id=Id} + end, + Ids, OldDocLookups), + % Merge the new docs into the revision trees. + {ok, NewDocInfos0, RemoveSeqs, NewSeq} = merge_rev_trees( + MergeConflicts, DocsList, OldDocInfos, [], [], LastSeq), + + NewFullDocInfos = stem_full_doc_infos(Db, NewDocInfos0), + + % All documents are now ready to write. + + {ok, Db2} = update_local_docs(Db, NonRepDocs), + + % Write out the document summaries (the bodies are stored in the nodes of + % the trees, the attachments are already written to disk) + {ok, FlushedFullDocInfos} = flush_trees(Db2, NewFullDocInfos, []), + + {IndexFullDocInfos, IndexDocInfos} = + new_index_entries(FlushedFullDocInfos, [], []), + + % and the indexes + {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree, IndexFullDocInfos, []), + {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree, IndexDocInfos, RemoveSeqs), + + Db3 = Db2#db{ + fulldocinfo_by_id_btree = DocInfoByIdBTree2, + docinfo_by_seq_btree = DocInfoBySeqBTree2, + update_seq = NewSeq}, + + % Check if we just updated any design documents, and update the validation + % funs if we did. + case [1 || <<"_design/",_/binary>> <- Ids] of + [] -> + Db4 = Db3; + _ -> + Db4 = refresh_validate_doc_funs(Db3) + end, + + {ok, commit_data(Db4, not FullCommit)}. + + +update_local_docs(#db{local_docs_btree=Btree}=Db, Docs) -> + Ids = [Id || {_Client, #doc{id=Id}} <- Docs], + OldDocLookups = couch_btree:lookup(Btree, Ids), + BtreeEntries = lists:zipwith( + fun({Client, #doc{id=Id,deleted=Delete,revs={0,PrevRevs},body=Body}}, OldDocLookup) -> + case PrevRevs of + [RevStr|_] -> + PrevRev = list_to_integer(?b2l(RevStr)); + [] -> + PrevRev = 0 + end, + OldRev = + case OldDocLookup of + {ok, {_, {OldRev0, _}}} -> OldRev0; + not_found -> 0 + end, + case OldRev == PrevRev of + true -> + case Delete of + false -> + send_result(Client, Id, {0, PrevRevs}, {ok, + {0, ?l2b(integer_to_list(PrevRev + 1))}}), + {update, {Id, {PrevRev + 1, Body}}}; + true -> + send_result(Client, Id, {0, PrevRevs}, + {ok, {0, <<"0">>}}), + {remove, Id} + end; + false -> + send_result(Client, Id, {0, PrevRevs}, conflict), + ignore + end + end, Docs, OldDocLookups), + + BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries], + BtreeIdsUpdate = [{Key, Val} || {update, {Key, Val}} <- BtreeEntries], + + {ok, Btree2} = + couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove), + + {ok, Db#db{local_docs_btree = Btree2}}. + + +commit_data(Db) -> + commit_data(Db, false). + +db_to_header(Db, Header) -> + Header#db_header{ + update_seq = Db#db.update_seq, + docinfo_by_seq_btree_state = couch_btree:get_state(Db#db.docinfo_by_seq_btree), + fulldocinfo_by_id_btree_state = couch_btree:get_state(Db#db.fulldocinfo_by_id_btree), + local_docs_btree_state = couch_btree:get_state(Db#db.local_docs_btree), + security_ptr = Db#db.security_ptr, + revs_limit = Db#db.revs_limit}. + +commit_data(#db{waiting_delayed_commit=nil} = Db, true) -> + Db#db{waiting_delayed_commit=erlang:send_after(1000,self(),delayed_commit)}; +commit_data(Db, true) -> + Db; +commit_data(Db, _) -> + #db{ + fd = Fd, + filepath = Filepath, + header = OldHeader, + fsync_options = FsyncOptions, + waiting_delayed_commit = Timer + } = Db, + if is_reference(Timer) -> erlang:cancel_timer(Timer); true -> ok end, + case db_to_header(Db, OldHeader) of + OldHeader -> + Db#db{waiting_delayed_commit=nil}; + Header -> + case lists:member(before_header, FsyncOptions) of + true -> ok = couch_file:sync(Filepath); + _ -> ok + end, + + ok = couch_file:write_header(Fd, Header), + + case lists:member(after_header, FsyncOptions) of + true -> ok = couch_file:sync(Filepath); + _ -> ok + end, + + Db#db{waiting_delayed_commit=nil, + header=Header, + committed_update_seq=Db#db.update_seq} + end. + + +copy_doc_attachments(#db{fd=SrcFd}=SrcDb, {Pos,_RevId}, SrcSp, DestFd) -> + {ok, {BodyData, BinInfos}} = couch_db:read_doc(SrcDb, SrcSp), + % copy the bin values + NewBinInfos = lists:map( + fun({Name, {Type, BinSp, AttLen}}) when is_tuple(BinSp) orelse BinSp == null -> + % 09 UPGRADE CODE + {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} = + couch_stream:old_copy_to_new_stream(SrcFd, BinSp, AttLen, DestFd), + {Name, Type, NewBinSp, AttLen, AttLen, Pos, Md5, identity}; + ({Name, {Type, BinSp, AttLen}}) -> + % 09 UPGRADE CODE + {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} = + couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd), + {Name, Type, NewBinSp, AttLen, AttLen, Pos, Md5, identity}; + ({Name, Type, BinSp, AttLen, _RevPos, <<>>}) when + is_tuple(BinSp) orelse BinSp == null -> + % 09 UPGRADE CODE + {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} = + couch_stream:old_copy_to_new_stream(SrcFd, BinSp, AttLen, DestFd), + {Name, Type, NewBinSp, AttLen, AttLen, AttLen, Md5, identity}; + ({Name, Type, BinSp, AttLen, RevPos, Md5}) -> + % 010 UPGRADE CODE + {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} = + couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd), + {Name, Type, NewBinSp, AttLen, AttLen, RevPos, Md5, identity}; + ({Name, Type, BinSp, AttLen, DiskLen, RevPos, Md5, Enc1}) -> + {NewBinSp, AttLen, _, Md5, _IdentityMd5} = + couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd), + Enc = case Enc1 of + true -> + % 0110 UPGRADE CODE + gzip; + false -> + % 0110 UPGRADE CODE + identity; + _ -> + Enc1 + end, + {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, Md5, Enc} + end, BinInfos), + {BodyData, NewBinInfos}. + +copy_rev_tree_attachments(SrcDb, DestFd, Tree) -> + couch_key_tree:map( + fun(Rev, {IsDel, Sp, Seq}, leaf) -> + DocBody = copy_doc_attachments(SrcDb, Rev, Sp, DestFd), + {IsDel, DocBody, Seq}; + (_, _, branch) -> + ?REV_MISSING + end, Tree). + + +copy_docs(Db, #db{fd=DestFd}=NewDb, InfoBySeq, Retry) -> + Ids = [Id || #doc_info{id=Id} <- InfoBySeq], + LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids), + + % write out the attachments + NewFullDocInfos0 = lists:map( + fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) -> + Info#full_doc_info{rev_tree=copy_rev_tree_attachments(Db, DestFd, RevTree)} + end, LookupResults), + % write out the docs + % we do this in 2 stages so the docs are written out contiguously, making + % view indexing and replication faster. + NewFullDocInfos1 = lists:map( + fun(#full_doc_info{rev_tree=RevTree}=Info) -> + Info#full_doc_info{rev_tree=couch_key_tree:map_leafs( + fun(_Key, {IsDel, DocBody, Seq}) -> + {ok, Pos} = couch_file:append_term_md5(DestFd, DocBody), + {IsDel, Pos, Seq} + end, RevTree)} + end, NewFullDocInfos0), + + NewFullDocInfos = stem_full_doc_infos(Db, NewFullDocInfos1), + NewDocInfos = [couch_doc:to_doc_info(Info) || Info <- NewFullDocInfos], + RemoveSeqs = + case Retry of + false -> + []; + true -> + % We are retrying a compaction, meaning the documents we are copying may + % already exist in our file and must be removed from the by_seq index. + Existing = couch_btree:lookup(NewDb#db.fulldocinfo_by_id_btree, Ids), + [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing] + end, + + {ok, DocInfoBTree} = couch_btree:add_remove( + NewDb#db.docinfo_by_seq_btree, NewDocInfos, RemoveSeqs), + {ok, FullDocInfoBTree} = couch_btree:add_remove( + NewDb#db.fulldocinfo_by_id_btree, NewFullDocInfos, []), + NewDb#db{ fulldocinfo_by_id_btree=FullDocInfoBTree, + docinfo_by_seq_btree=DocInfoBTree}. + + + +copy_compact(Db, NewDb0, Retry) -> + FsyncOptions = [Op || Op <- NewDb0#db.fsync_options, Op == before_header], + NewDb = NewDb0#db{fsync_options=FsyncOptions}, + TotalChanges = couch_db:count_changes_since(Db, NewDb#db.update_seq), + EnumBySeqFun = + fun(#doc_info{high_seq=Seq}=DocInfo, _Offset, {AccNewDb, AccUncopied, TotalCopied}) -> + couch_task_status:update("Copied ~p of ~p changes (~p%)", + [TotalCopied, TotalChanges, (TotalCopied*100) div TotalChanges]), + if TotalCopied rem 1000 =:= 0 -> + NewDb2 = copy_docs(Db, AccNewDb, lists:reverse([DocInfo | AccUncopied]), Retry), + if TotalCopied rem 10000 =:= 0 -> + {ok, {commit_data(NewDb2#db{update_seq=Seq}), [], TotalCopied + 1}}; + true -> + {ok, {NewDb2#db{update_seq=Seq}, [], TotalCopied + 1}} + end; + true -> + {ok, {AccNewDb, [DocInfo | AccUncopied], TotalCopied + 1}} + end + end, + + couch_task_status:set_update_frequency(500), + + {ok, _, {NewDb2, Uncopied, TotalChanges}} = + couch_btree:foldl(Db#db.docinfo_by_seq_btree, EnumBySeqFun, + {NewDb, [], 0}, + [{start_key, NewDb#db.update_seq + 1}]), + + couch_task_status:update("Flushing"), + + NewDb3 = copy_docs(Db, NewDb2, lists:reverse(Uncopied), Retry), + + % copy misc header values + if NewDb3#db.security /= Db#db.security -> + {ok, Ptr} = couch_file:append_term(NewDb3#db.fd, Db#db.security), + NewDb4 = NewDb3#db{security=Db#db.security, security_ptr=Ptr}; + true -> + NewDb4 = NewDb3 + end, + + commit_data(NewDb4#db{update_seq=Db#db.update_seq}). + +start_copy_compact(#db{name=Name,filepath=Filepath}=Db) -> + CompactFile = Filepath ++ ".compact", + ?LOG_DEBUG("Compaction process spawned for db \"~s\"", [Name]), + case couch_file:open(CompactFile) of + {ok, Fd} -> + couch_task_status:add_task(<<"Database Compaction">>, <<Name/binary, " retry">>, <<"Starting">>), + Retry = true, + {ok, Header} = couch_file:read_header(Fd); + {error, enoent} -> + couch_task_status:add_task(<<"Database Compaction">>, Name, <<"Starting">>), + {ok, Fd} = couch_file:open(CompactFile, [create]), + Retry = false, + ok = couch_file:write_header(Fd, Header=#db_header{}) + end, + NewDb = init_db(Name, CompactFile, Fd, Header), + unlink(Fd), + NewDb2 = copy_compact(Db, NewDb, Retry), + close_db(NewDb2), + gen_server:cast(Db#db.update_pid, {compact_done, CompactFile}). + diff --git a/apps/couch/src/couch_doc.erl b/apps/couch/src/couch_doc.erl new file mode 100644 index 00000000..d15cd7de --- /dev/null +++ b/apps/couch/src/couch_doc.erl @@ -0,0 +1,508 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_doc). + +-export([to_doc_info/1,to_doc_info_path/1,parse_rev/1,parse_revs/1,rev_to_str/1,revs_to_strs/1]). +-export([att_foldl/3,att_foldl_decode/3,get_validate_doc_fun/1]). +-export([from_json_obj/1,to_json_obj/2,has_stubs/1, merge_stubs/2]). +-export([validate_docid/1]). +-export([doc_from_multi_part_stream/2]). +-export([doc_to_multi_part_stream/5, len_doc_to_multi_part_stream/4]). + +-include("couch_db.hrl"). + +% helpers used by to_json_obj +to_json_rev(0, []) -> + []; +to_json_rev(Start, [FirstRevId|_]) -> + [{<<"_rev">>, ?l2b([integer_to_list(Start),"-",revid_to_str(FirstRevId)])}]. + +to_json_body(true, {Body}) -> + Body ++ [{<<"_deleted">>, true}]; +to_json_body(false, {Body}) -> + Body. + +to_json_revisions(Options, Start, RevIds) -> + case lists:member(revs, Options) of + false -> []; + true -> + [{<<"_revisions">>, {[{<<"start">>, Start}, + {<<"ids">>, [revid_to_str(R) ||R <- RevIds]}]}}] + end. + +revid_to_str(RevId) when size(RevId) =:= 16 -> + ?l2b(couch_util:to_hex(RevId)); +revid_to_str(RevId) -> + RevId. + +rev_to_str({Pos, RevId}) -> + ?l2b([integer_to_list(Pos),"-",revid_to_str(RevId)]). + + +revs_to_strs([]) -> + []; +revs_to_strs([{Pos, RevId}| Rest]) -> + [rev_to_str({Pos, RevId}) | revs_to_strs(Rest)]. + +to_json_meta(Meta) -> + lists:map( + fun({revs_info, Start, RevsInfo}) -> + {JsonRevsInfo, _Pos} = lists:mapfoldl( + fun({RevId, Status}, PosAcc) -> + JsonObj = {[{<<"rev">>, rev_to_str({PosAcc, RevId})}, + {<<"status">>, ?l2b(atom_to_list(Status))}]}, + {JsonObj, PosAcc - 1} + end, Start, RevsInfo), + {<<"_revs_info">>, JsonRevsInfo}; + ({local_seq, Seq}) -> + {<<"_local_seq">>, Seq}; + ({conflicts, Conflicts}) -> + {<<"_conflicts">>, revs_to_strs(Conflicts)}; + ({deleted_conflicts, DConflicts}) -> + {<<"_deleted_conflicts">>, revs_to_strs(DConflicts)} + end, Meta). + +to_json_attachments(Attachments, Options) -> + to_json_attachments( + Attachments, + lists:member(attachments, Options), + lists:member(follows, Options), + lists:member(att_encoding_info, Options) + ). + +to_json_attachments([], _OutputData, _DataToFollow, _ShowEncInfo) -> + []; +to_json_attachments(Atts, OutputData, DataToFollow, ShowEncInfo) -> + AttProps = lists:map( + fun(#att{disk_len=DiskLen, att_len=AttLen, encoding=Enc}=Att) -> + {Att#att.name, {[ + {<<"content_type">>, Att#att.type}, + {<<"revpos">>, Att#att.revpos} + ] ++ + if not OutputData orelse Att#att.data == stub -> + [{<<"length">>, DiskLen}, {<<"stub">>, true}]; + true -> + if DataToFollow -> + [{<<"length">>, DiskLen}, {<<"follows">>, true}]; + true -> + AttData = case Enc of + gzip -> + zlib:gunzip(att_to_bin(Att)); + identity -> + att_to_bin(Att) + end, + [{<<"data">>, base64:encode(AttData)}] + end + end ++ + case {ShowEncInfo, Enc} of + {false, _} -> + []; + {true, identity} -> + []; + {true, _} -> + [ + {<<"encoding">>, couch_util:to_binary(Enc)}, + {<<"encoded_length">>, AttLen} + ] + end + }} + end, Atts), + [{<<"_attachments">>, {AttProps}}]. + +to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds}, + meta=Meta}=Doc,Options)-> + {[{<<"_id">>, Id}] + ++ to_json_rev(Start, RevIds) + ++ to_json_body(Del, Body) + ++ to_json_revisions(Options, Start, RevIds) + ++ to_json_meta(Meta) + ++ to_json_attachments(Doc#doc.atts, Options) + }. + +from_json_obj({Props}) -> + transfer_fields(Props, #doc{body=[]}); + +from_json_obj(_Other) -> + throw({bad_request, "Document must be a JSON object"}). + +parse_revid(RevId) when size(RevId) =:= 32 -> + RevInt = erlang:list_to_integer(?b2l(RevId), 16), + <<RevInt:128>>; +parse_revid(RevId) when length(RevId) =:= 32 -> + RevInt = erlang:list_to_integer(RevId, 16), + <<RevInt:128>>; +parse_revid(RevId) when is_binary(RevId) -> + RevId; +parse_revid(RevId) when is_list(RevId) -> + ?l2b(RevId). + + +parse_rev(Rev) when is_binary(Rev) -> + parse_rev(?b2l(Rev)); +parse_rev(Rev) when is_list(Rev) -> + SplitRev = lists:splitwith(fun($-) -> false; (_) -> true end, Rev), + case SplitRev of + {Pos, [$- | RevId]} -> {list_to_integer(Pos), parse_revid(RevId)}; + _Else -> throw({bad_request, <<"Invalid rev format">>}) + end; +parse_rev(_BadRev) -> + throw({bad_request, <<"Invalid rev format">>}). + +parse_revs([]) -> + []; +parse_revs([Rev | Rest]) -> + [parse_rev(Rev) | parse_revs(Rest)]. + + +validate_docid(Id) when is_binary(Id) -> + case Id of + <<"_design/", _/binary>> -> ok; + <<"_local/", _/binary>> -> ok; + <<"_", _/binary>> -> + throw({bad_request, <<"Only reserved document ids may start with underscore.">>}); + _Else -> ok + end; +validate_docid(Id) -> + ?LOG_DEBUG("Document id is not a string: ~p", [Id]), + throw({bad_request, <<"Document id must be a string">>}). + +transfer_fields([], #doc{body=Fields}=Doc) -> + % convert fields back to json object + Doc#doc{body={lists:reverse(Fields)}}; + +transfer_fields([{<<"_id">>, Id} | Rest], Doc) -> + validate_docid(Id), + transfer_fields(Rest, Doc#doc{id=Id}); + +transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc) -> + {Pos, RevId} = parse_rev(Rev), + transfer_fields(Rest, + Doc#doc{revs={Pos, [RevId]}}); + +transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc) -> + % we already got the rev from the _revisions + transfer_fields(Rest,Doc); + +transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) -> + Atts = lists:map(fun({Name, {BinProps}}) -> + case couch_util:get_value(<<"stub">>, BinProps) of + true -> + Type = couch_util:get_value(<<"content_type">>, BinProps), + RevPos = couch_util:get_value(<<"revpos">>, BinProps, nil), + DiskLen = couch_util:get_value(<<"length">>, BinProps), + {Enc, EncLen} = att_encoding_info(BinProps), + #att{name=Name, data=stub, type=Type, att_len=EncLen, + disk_len=DiskLen, encoding=Enc, revpos=RevPos}; + _ -> + Type = couch_util:get_value(<<"content_type">>, BinProps, + ?DEFAULT_ATTACHMENT_CONTENT_TYPE), + RevPos = couch_util:get_value(<<"revpos">>, BinProps, 0), + case couch_util:get_value(<<"follows">>, BinProps) of + true -> + DiskLen = couch_util:get_value(<<"length">>, BinProps), + {Enc, EncLen} = att_encoding_info(BinProps), + #att{name=Name, data=follows, type=Type, encoding=Enc, + att_len=EncLen, disk_len=DiskLen, revpos=RevPos}; + _ -> + Value = couch_util:get_value(<<"data">>, BinProps), + Bin = base64:decode(Value), + LenBin = size(Bin), + #att{name=Name, data=Bin, type=Type, att_len=LenBin, + disk_len=LenBin, revpos=RevPos} + end + end + end, JsonBins), + transfer_fields(Rest, Doc#doc{atts=Atts}); + +transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) -> + RevIds = couch_util:get_value(<<"ids">>, Props), + Start = couch_util:get_value(<<"start">>, Props), + if not is_integer(Start) -> + throw({doc_validation, "_revisions.start isn't an integer."}); + not is_list(RevIds) -> + throw({doc_validation, "_revisions.ids isn't a array."}); + true -> + ok + end, + [throw({doc_validation, "RevId isn't a string"}) || + RevId <- RevIds, not is_binary(RevId)], + RevIds2 = [parse_revid(RevId) || RevId <- RevIds], + transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}}); + +transfer_fields([{<<"_deleted">>, B} | Rest], Doc) when is_boolean(B) -> + transfer_fields(Rest, Doc#doc{deleted=B}); + +% ignored fields +transfer_fields([{<<"_revs_info">>, _} | Rest], Doc) -> + transfer_fields(Rest, Doc); +transfer_fields([{<<"_local_seq">>, _} | Rest], Doc) -> + transfer_fields(Rest, Doc); +transfer_fields([{<<"_conflicts">>, _} | Rest], Doc) -> + transfer_fields(Rest, Doc); +transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc) -> + transfer_fields(Rest, Doc); + +% unknown special field +transfer_fields([{<<"_",Name/binary>>, _} | _], _) -> + throw({doc_validation, + ?l2b(io_lib:format("Bad special document member: _~s", [Name]))}); + +transfer_fields([Field | Rest], #doc{body=Fields}=Doc) -> + transfer_fields(Rest, Doc#doc{body=[Field|Fields]}). + +att_encoding_info(BinProps) -> + DiskLen = couch_util:get_value(<<"length">>, BinProps), + case couch_util:get_value(<<"encoding">>, BinProps) of + undefined -> + {identity, DiskLen}; + Enc -> + EncodedLen = couch_util:get_value(<<"encoded_length">>, BinProps, DiskLen), + {list_to_existing_atom(?b2l(Enc)), EncodedLen} + end. + +to_doc_info(FullDocInfo) -> + {DocInfo, _Path} = to_doc_info_path(FullDocInfo), + DocInfo. + +max_seq([], Max) -> + Max; +max_seq([#rev_info{seq=Seq}|Rest], Max) -> + max_seq(Rest, if Max > Seq -> Max; true -> Seq end). + +to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree}) -> + RevInfosAndPath = + [{#rev_info{deleted=Del,body_sp=Bp,seq=Seq,rev={Pos,RevId}}, Path} || + {{Del, Bp, Seq},{Pos, [RevId|_]}=Path} <- + couch_key_tree:get_all_leafs(Tree)], + SortedRevInfosAndPath = lists:sort( + fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA}, + {#rev_info{deleted=DeletedB,rev=RevB}, _PathB}) -> + % sort descending by {not deleted, rev} + {not DeletedA, RevA} > {not DeletedB, RevB} + end, RevInfosAndPath), + [{_RevInfo, WinPath}|_] = SortedRevInfosAndPath, + RevInfos = [RevInfo || {RevInfo, _Path} <- SortedRevInfosAndPath], + {#doc_info{id=Id, high_seq=max_seq(RevInfos, 0), revs=RevInfos}, WinPath}. + + + + +att_foldl(#att{data=Bin}, Fun, Acc) when is_binary(Bin) -> + Fun(Bin, Acc); +att_foldl(#att{data={Fd,Sp},att_len=Len}, Fun, Acc) when is_tuple(Sp) orelse Sp == null -> + % 09 UPGRADE CODE + couch_stream:old_foldl(Fd, Sp, Len, Fun, Acc); +att_foldl(#att{data={Fd,Sp},md5=Md5}, Fun, Acc) -> + couch_stream:foldl(Fd, Sp, Md5, Fun, Acc); +att_foldl(#att{data=DataFun,att_len=Len}, Fun, Acc) when is_function(DataFun) -> + fold_streamed_data(DataFun, Len, Fun, Acc). + +att_foldl_decode(#att{data={Fd,Sp},md5=Md5,encoding=Enc}, Fun, Acc) -> + couch_stream:foldl_decode(Fd, Sp, Md5, Enc, Fun, Acc); +att_foldl_decode(#att{data=Fun2,att_len=Len, encoding=identity}, Fun, Acc) -> + fold_streamed_data(Fun2, Len, Fun, Acc). + +att_to_bin(#att{data=Bin}) when is_binary(Bin) -> + Bin; +att_to_bin(#att{data=Iolist}) when is_list(Iolist) -> + iolist_to_binary(Iolist); +att_to_bin(#att{data={_Fd,_Sp}}=Att) -> + iolist_to_binary( + lists:reverse(att_foldl( + Att, + fun(Bin,Acc) -> [Bin|Acc] end, + [] + )) + ); +att_to_bin(#att{data=DataFun, att_len=Len}) when is_function(DataFun)-> + iolist_to_binary( + lists:reverse(fold_streamed_data( + DataFun, + Len, + fun(Data, Acc) -> [Data | Acc] end, + [] + )) + ). + +get_validate_doc_fun(#doc{body={Props}}=DDoc) -> + case couch_util:get_value(<<"validate_doc_update">>, Props) of + undefined -> + nil; + _Else -> + fun(EditDoc, DiskDoc, Ctx, SecObj) -> + couch_query_servers:validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) + end + end. + + +has_stubs(#doc{atts=Atts}) -> + has_stubs(Atts); +has_stubs([]) -> + false; +has_stubs([#att{data=stub}|_]) -> + true; +has_stubs([_Att|Rest]) -> + has_stubs(Rest). + +merge_stubs(#doc{id=Id,atts=MemBins}=StubsDoc, #doc{atts=DiskBins}) -> + BinDict = dict:from_list([{Name, Att} || #att{name=Name}=Att <- DiskBins]), + MergedBins = lists:map( + fun(#att{name=Name, data=stub, revpos=StubRevPos}) -> + case dict:find(Name, BinDict) of + {ok, #att{revpos=DiskRevPos}=DiskAtt} + when DiskRevPos == StubRevPos orelse StubRevPos == nil -> + DiskAtt; + _ -> + throw({missing_stub, + <<"id:", Id/binary, ", name:", Name/binary>>}) + end; + (Att) -> + Att + end, MemBins), + StubsDoc#doc{atts= MergedBins}. + +fold_streamed_data(_RcvFun, 0, _Fun, Acc) -> + Acc; +fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0-> + Bin = RcvFun(), + ResultAcc = Fun(Bin, Acc), + fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc). + +len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) -> + AttsSize = lists:foldl(fun(#att{data=Data} = Att, AccAttsSize) -> + case Data of + stub -> + AccAttsSize; + _ -> + AccAttsSize + + 4 + % "\r\n\r\n" + case SendEncodedAtts of + true -> + Att#att.att_len; + _ -> + Att#att.disk_len + end + + 4 + % "\r\n--" + size(Boundary) + end + end, 0, Atts), + if AttsSize == 0 -> + {<<"application/json">>, iolist_size(JsonBytes)}; + true -> + {<<"multipart/related; boundary=\"", Boundary/binary, "\"">>, + 2 + % "--" + size(Boundary) + + 36 + % "\r\ncontent-type: application/json\r\n\r\n" + iolist_size(JsonBytes) + + 4 + % "\r\n--" + size(Boundary) + + + AttsSize + + 2 % "--" + } + end. + +doc_to_multi_part_stream(Boundary, JsonBytes, Atts, WriteFun, + SendEncodedAtts) -> + case lists:any(fun(#att{data=Data})-> Data /= stub end, Atts) of + true -> + WriteFun([<<"--", Boundary/binary, + "\r\ncontent-type: application/json\r\n\r\n">>, + JsonBytes, <<"\r\n--", Boundary/binary>>]), + atts_to_mp(Atts, Boundary, WriteFun, SendEncodedAtts); + false -> + WriteFun(JsonBytes) + end. + +atts_to_mp([], _Boundary, WriteFun, _SendEncAtts) -> + WriteFun(<<"--">>); +atts_to_mp([#att{data=stub} | RestAtts], Boundary, WriteFun, + SendEncodedAtts) -> + atts_to_mp(RestAtts, Boundary, WriteFun, SendEncodedAtts); +atts_to_mp([Att | RestAtts], Boundary, WriteFun, + SendEncodedAtts) -> + WriteFun(<<"\r\n\r\n">>), + AttFun = case SendEncodedAtts of + false -> + fun att_foldl_decode/3; + true -> + fun att_foldl/3 + end, + AttFun(Att, fun(Data, _) -> WriteFun(Data) end, ok), + WriteFun(<<"\r\n--", Boundary/binary>>), + atts_to_mp(RestAtts, Boundary, WriteFun, SendEncodedAtts). + + +doc_from_multi_part_stream(ContentType, DataFun) -> + Self = self(), + Parser = spawn_link(fun() -> + couch_httpd:parse_multipart_request(ContentType, DataFun, + fun(Next)-> mp_parse_doc(Next, []) end), + unlink(Self) + end), + Parser ! {get_doc_bytes, self()}, + receive + {doc_bytes, DocBytes} -> + Doc = from_json_obj(?JSON_DECODE(DocBytes)), + % go through the attachments looking for 'follows' in the data, + % replace with function that reads the data from MIME stream. + ReadAttachmentDataFun = fun() -> + Parser ! {get_bytes, self()}, + receive {bytes, Bytes} -> Bytes end + end, + Atts2 = lists:map( + fun(#att{data=follows}=A) -> + A#att{data=ReadAttachmentDataFun}; + (A) -> + A + end, Doc#doc.atts), + {ok, Doc#doc{atts=Atts2}} + end. + +mp_parse_doc({headers, H}, []) -> + case couch_util:get_value("content-type", H) of + {"application/json", _} -> + fun (Next) -> + mp_parse_doc(Next, []) + end + end; +mp_parse_doc({body, Bytes}, AccBytes) -> + fun (Next) -> + mp_parse_doc(Next, [Bytes | AccBytes]) + end; +mp_parse_doc(body_end, AccBytes) -> + receive {get_doc_bytes, From} -> + From ! {doc_bytes, lists:reverse(AccBytes)} + end, + fun (Next) -> + mp_parse_atts(Next) + end. + +mp_parse_atts(eof) -> + ok; +mp_parse_atts({headers, _H}) -> + fun (Next) -> + mp_parse_atts(Next) + end; +mp_parse_atts({body, Bytes}) -> + receive {get_bytes, From} -> + From ! {bytes, Bytes} + end, + fun (Next) -> + mp_parse_atts(Next) + end; +mp_parse_atts(body_end) -> + fun (Next) -> + mp_parse_atts(Next) + end. + + diff --git a/apps/couch/src/couch_event_sup.erl b/apps/couch/src/couch_event_sup.erl new file mode 100644 index 00000000..6fd6963a --- /dev/null +++ b/apps/couch/src/couch_event_sup.erl @@ -0,0 +1,69 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +%% The purpose of this module is to allow event handlers to particpate in Erlang +%% supervisor trees. It provide a monitorable process that crashes if the event +%% handler fails. The process, when shutdown, deregisters the event handler. + +-module(couch_event_sup). +-behaviour(gen_server). + +-include("couch_db.hrl"). + +-export([start_link/3,start_link/4, stop/1]). +-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]). + +% +% Instead calling the +% ok = gen_event:add_sup_handler(error_logger, my_log, Args) +% +% do this: +% {ok, LinkedPid} = couch_event_sup:start_link(error_logger, my_log, Args) +% +% The benefit is the event is now part of the process tree, and can be +% started, restarted and shutdown consistently like the rest of the server +% components. +% +% And now if the "event" crashes, the supervisor is notified and can restart +% the event handler. +% +% Use this form to named process: +% {ok, LinkedPid} = couch_event_sup:start_link({local, my_log}, error_logger, my_log, Args) +% + +start_link(EventMgr, EventHandler, Args) -> + gen_server:start_link(couch_event_sup, {EventMgr, EventHandler, Args}, []). + +start_link(ServerName, EventMgr, EventHandler, Args) -> + gen_server:start_link(ServerName, couch_event_sup, {EventMgr, EventHandler, Args}, []). + +stop(Pid) -> + gen_server:cast(Pid, stop). + +init({EventMgr, EventHandler, Args}) -> + ok = gen_event:add_sup_handler(EventMgr, EventHandler, Args), + {ok, {EventMgr, EventHandler}}. + +terminate(_Reason, _State) -> + ok. + +handle_call(_Whatever, _From, State) -> + {ok, State}. + +handle_cast(stop, State) -> + {stop, normal, State}. + +handle_info({gen_event_EXIT, _Handler, Reason}, State) -> + {stop, Reason, State}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. diff --git a/apps/couch/src/couch_external_manager.erl b/apps/couch/src/couch_external_manager.erl new file mode 100644 index 00000000..7e401389 --- /dev/null +++ b/apps/couch/src/couch_external_manager.erl @@ -0,0 +1,101 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_external_manager). +-behaviour(gen_server). + +-export([start_link/0, execute/2, config_change/2]). +-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2]). + +-include("couch_db.hrl"). + +start_link() -> + gen_server:start_link({local, couch_external_manager}, + couch_external_manager, [], []). + +execute(UrlName, JsonReq) -> + Pid = gen_server:call(couch_external_manager, {get, UrlName}), + case Pid of + {error, Reason} -> + Reason; + _ -> + couch_external_server:execute(Pid, JsonReq) + end. + +config_change("external", UrlName) -> + gen_server:call(couch_external_manager, {config, UrlName}). + +% gen_server API + +init([]) -> + process_flag(trap_exit, true), + Handlers = ets:new(couch_external_manager_handlers, [set, private]), + couch_config:register(fun config_change/2), + {ok, Handlers}. + +terminate(_Reason, Handlers) -> + ets:foldl(fun({_UrlName, Pid}, nil) -> + couch_external_server:stop(Pid), + nil + end, nil, Handlers), + ok. + +handle_call({get, UrlName}, _From, Handlers) -> + case ets:lookup(Handlers, UrlName) of + [] -> + case couch_config:get("external", UrlName, nil) of + nil -> + Msg = lists:flatten( + io_lib:format("No server configured for ~p.", [UrlName])), + {reply, {error, {unknown_external_server, ?l2b(Msg)}}, Handlers}; + Command -> + {ok, NewPid} = couch_external_server:start_link(UrlName, Command), + true = ets:insert(Handlers, {UrlName, NewPid}), + {reply, NewPid, Handlers} + end; + [{UrlName, Pid}] -> + {reply, Pid, Handlers} + end; +handle_call({config, UrlName}, _From, Handlers) -> + % A newly added handler and a handler that had it's command + % changed are treated exactly the same. + + % Shutdown the old handler. + case ets:lookup(Handlers, UrlName) of + [{UrlName, Pid}] -> + couch_external_server:stop(Pid); + [] -> + ok + end, + % Wait for next request to boot the handler. + {reply, ok, Handlers}. + +handle_cast(_Whatever, State) -> + {noreply, State}. + +handle_info({'EXIT', Pid, normal}, Handlers) -> + ?LOG_INFO("EXTERNAL: Server ~p terminated normally", [Pid]), + % The process terminated normally without us asking - Remove Pid from the + % handlers table so we don't attempt to reuse it + ets:match_delete(Handlers, {'_', Pid}), + {noreply, Handlers}; + +handle_info({'EXIT', Pid, Reason}, Handlers) -> + ?LOG_INFO("EXTERNAL: Server ~p died. (reason: ~p)", [Pid, Reason]), + % Remove Pid from the handlers table so we don't try closing + % it a second time in terminate/2. + ets:match_delete(Handlers, {'_', Pid}), + {stop, normal, Handlers}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + diff --git a/apps/couch/src/couch_external_server.erl b/apps/couch/src/couch_external_server.erl new file mode 100644 index 00000000..045fcee9 --- /dev/null +++ b/apps/couch/src/couch_external_server.erl @@ -0,0 +1,69 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_external_server). +-behaviour(gen_server). + +-export([start_link/2, stop/1, execute/2]). +-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]). + +-include("couch_db.hrl"). + +% External API + +start_link(Name, Command) -> + gen_server:start_link(couch_external_server, [Name, Command], []). + +stop(Pid) -> + gen_server:cast(Pid, stop). + +execute(Pid, JsonReq) -> + gen_server:call(Pid, {execute, JsonReq}, infinity). + +% Gen Server Handlers + +init([Name, Command]) -> + ?LOG_INFO("EXTERNAL: Starting process for: ~s", [Name]), + ?LOG_INFO("COMMAND: ~s", [Command]), + process_flag(trap_exit, true), + Timeout = list_to_integer(couch_config:get("couchdb", "os_process_timeout", + "5000")), + {ok, Pid} = couch_os_process:start_link(Command, [{timeout, Timeout}]), + couch_config:register(fun("couchdb", "os_process_timeout", NewTimeout) -> + couch_os_process:set_timeout(Pid, list_to_integer(NewTimeout)) + end), + {ok, {Name, Command, Pid}}. + +terminate(_Reason, {_Name, _Command, Pid}) -> + couch_os_process:stop(Pid), + ok. + +handle_call({execute, JsonReq}, _From, {Name, Command, Pid}) -> + {reply, couch_os_process:prompt(Pid, JsonReq), {Name, Command, Pid}}. + +handle_info({'EXIT', _Pid, normal}, State) -> + {noreply, State}; +handle_info({'EXIT', Pid, Reason}, {Name, Command, Pid}) -> + ?LOG_INFO("EXTERNAL: Process for ~s exiting. (reason: ~w)", [Name, Reason]), + {stop, Reason, {Name, Command, Pid}}. + +handle_cast(stop, {Name, Command, Pid}) -> + ?LOG_INFO("EXTERNAL: Shutting down ~s", [Name]), + exit(Pid, normal), + {stop, normal, {Name, Command, Pid}}; +handle_cast(_Whatever, State) -> + {noreply, State}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + + diff --git a/apps/couch/src/couch_file.erl b/apps/couch/src/couch_file.erl new file mode 100644 index 00000000..0a891712 --- /dev/null +++ b/apps/couch/src/couch_file.erl @@ -0,0 +1,588 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_file). +-behaviour(gen_server). + +-include("couch_db.hrl"). + +-define(SIZE_BLOCK, 4096). + +-record(file, { + fd, + tail_append_begin = 0, % 09 UPGRADE CODE + eof = 0 + }). + +-export([open/1, open/2, close/1, bytes/1, sync/1, append_binary/2,old_pread/3]). +-export([append_term/2, pread_term/2, pread_iolist/2, write_header/2]). +-export([pread_binary/2, read_header/1, truncate/2, upgrade_old_header/2]). +-export([append_term_md5/2,append_binary_md5/2]). +-export([init/1, terminate/2, handle_call/3, handle_cast/2, code_change/3, handle_info/2]). +-export([delete/2,delete/3,init_delete_dir/1]). + +%%---------------------------------------------------------------------- +%% Args: Valid Options are [create] and [create,overwrite]. +%% Files are opened in read/write mode. +%% Returns: On success, {ok, Fd} +%% or {error, Reason} if the file could not be opened. +%%---------------------------------------------------------------------- + +open(Filepath) -> + open(Filepath, []). + +open(Filepath, Options) -> + case gen_server:start_link(couch_file, + {Filepath, Options, self(), Ref = make_ref()}, []) of + {ok, Fd} -> + {ok, Fd}; + ignore -> + % get the error + receive + {Ref, Pid, Error} -> + case process_info(self(), trap_exit) of + {trap_exit, true} -> receive {'EXIT', Pid, _} -> ok end; + {trap_exit, false} -> ok + end, + Error + end; + Error -> + Error + end. + + +%%---------------------------------------------------------------------- +%% Purpose: To append an Erlang term to the end of the file. +%% Args: Erlang term to serialize and append to the file. +%% Returns: {ok, Pos} where Pos is the file offset to the beginning the +%% serialized term. Use pread_term to read the term back. +%% or {error, Reason}. +%%---------------------------------------------------------------------- + +append_term(Fd, Term) -> + append_binary(Fd, term_to_binary(Term)). + +append_term_md5(Fd, Term) -> + append_binary_md5(Fd, term_to_binary(Term)). + + +%%---------------------------------------------------------------------- +%% Purpose: To append an Erlang binary to the end of the file. +%% Args: Erlang term to serialize and append to the file. +%% Returns: {ok, Pos} where Pos is the file offset to the beginning the +%% serialized term. Use pread_term to read the term back. +%% or {error, Reason}. +%%---------------------------------------------------------------------- + +append_binary(Fd, Bin) -> + Size = iolist_size(Bin), + gen_server:call(Fd, {append_bin, + [<<0:1/integer,Size:31/integer>>, Bin]}, infinity). + +append_binary_md5(Fd, Bin) -> + Size = iolist_size(Bin), + gen_server:call(Fd, {append_bin, + [<<1:1/integer,Size:31/integer>>, couch_util:md5(Bin), Bin]}, infinity). + + +%%---------------------------------------------------------------------- +%% Purpose: Reads a term from a file that was written with append_term +%% Args: Pos, the offset into the file where the term is serialized. +%% Returns: {ok, Term} +%% or {error, Reason}. +%%---------------------------------------------------------------------- + + +pread_term(Fd, Pos) -> + {ok, Bin} = pread_binary(Fd, Pos), + {ok, binary_to_term(Bin)}. + + +%%---------------------------------------------------------------------- +%% Purpose: Reads a binrary from a file that was written with append_binary +%% Args: Pos, the offset into the file where the term is serialized. +%% Returns: {ok, Term} +%% or {error, Reason}. +%%---------------------------------------------------------------------- + +pread_binary(Fd, Pos) -> + {ok, L} = pread_iolist(Fd, Pos), + {ok, iolist_to_binary(L)}. + + +pread_iolist(Fd, Pos) -> + gen_server:call(Fd, {pread_iolist, Pos}, infinity). + +%%---------------------------------------------------------------------- +%% Purpose: The length of a file, in bytes. +%% Returns: {ok, Bytes} +%% or {error, Reason}. +%%---------------------------------------------------------------------- + +% length in bytes +bytes(Fd) -> + gen_server:call(Fd, bytes, infinity). + +%%---------------------------------------------------------------------- +%% Purpose: Truncate a file to the number of bytes. +%% Returns: ok +%% or {error, Reason}. +%%---------------------------------------------------------------------- + +truncate(Fd, Pos) -> + gen_server:call(Fd, {truncate, Pos}, infinity). + +%%---------------------------------------------------------------------- +%% Purpose: Ensure all bytes written to the file are flushed to disk. +%% Returns: ok +%% or {error, Reason}. +%%---------------------------------------------------------------------- + +sync(Filepath) when is_list(Filepath) -> + {ok, Fd} = file:open(Filepath, [append, raw]), + try file:sync(Fd) after file:close(Fd) end; +sync(Fd) -> + gen_server:call(Fd, sync, infinity). + +%%---------------------------------------------------------------------- +%% Purpose: Close the file. +%% Returns: ok +%%---------------------------------------------------------------------- +close(Fd) -> + MRef = erlang:monitor(process, Fd), + try + catch unlink(Fd), + catch exit(Fd, shutdown), + receive + {'DOWN', MRef, _, _, _} -> + ok + end + after + erlang:demonitor(MRef, [flush]) + end. + + +delete(RootDir, Filepath) -> + delete(RootDir, Filepath, true). + + +delete(RootDir, Filepath, Async) -> + DelFile = filename:join([RootDir,".delete", ?b2l(couch_uuids:random())]), + case file:rename(Filepath, DelFile) of + ok -> + if (Async) -> + spawn(file, delete, [DelFile]), + ok; + true -> + file:delete(DelFile) + end; + Error -> + Error + end. + + +init_delete_dir(RootDir) -> + Dir = filename:join(RootDir,".delete"), + % note: ensure_dir requires an actual filename companent, which is the + % reason for "foo". + filelib:ensure_dir(filename:join(Dir,"foo")), + filelib:fold_files(Dir, ".*", true, + fun(Filename, _) -> + ok = file:delete(Filename) + end, ok). + + +% 09 UPGRADE CODE +old_pread(Fd, Pos, Len) -> + {ok, <<RawBin:Len/binary>>, false} = gen_server:call(Fd, {pread, Pos, Len}, infinity), + {ok, RawBin}. + +% 09 UPGRADE CODE +upgrade_old_header(Fd, Sig) -> + gen_server:call(Fd, {upgrade_old_header, Sig}, infinity). + + +read_header(Fd) -> + case gen_server:call(Fd, find_header, infinity) of + {ok, Bin} -> + {ok, binary_to_term(Bin)}; + Else -> + Else + end. + +write_header(Fd, Data) -> + Bin = term_to_binary(Data), + Md5 = couch_util:md5(Bin), + % now we assemble the final header binary and write to disk + FinalBin = <<Md5/binary, Bin/binary>>, + gen_server:call(Fd, {write_header, FinalBin}, infinity). + + + + +init_status_error(ReturnPid, Ref, Error) -> + ReturnPid ! {Ref, self(), Error}, + ignore. + +% server functions + +init({Filepath, Options, ReturnPid, Ref}) -> + process_flag(trap_exit, true), + case lists:member(create, Options) of + true -> + filelib:ensure_dir(Filepath), + case file:open(Filepath, [read, append, raw, binary]) of + {ok, Fd} -> + {ok, Length} = file:position(Fd, eof), + case Length > 0 of + true -> + % this means the file already exists and has data. + % FYI: We don't differentiate between empty files and non-existant + % files here. + case lists:member(overwrite, Options) of + true -> + {ok, 0} = file:position(Fd, 0), + ok = file:truncate(Fd), + ok = file:sync(Fd), + maybe_track_open_os_files(Options), + {ok, #file{fd=Fd}}; + false -> + ok = file:close(Fd), + init_status_error(ReturnPid, Ref, file_exists) + end; + false -> + maybe_track_open_os_files(Options), + {ok, #file{fd=Fd}} + end; + Error -> + init_status_error(ReturnPid, Ref, Error) + end; + false -> + % open in read mode first, so we don't create the file if it doesn't exist. + case file:open(Filepath, [read, raw]) of + {ok, Fd_Read} -> + {ok, Fd} = file:open(Filepath, [read, append, raw, binary]), + ok = file:close(Fd_Read), + maybe_track_open_os_files(Options), + {ok, Length} = file:position(Fd, eof), + {ok, #file{fd=Fd, eof=Length}}; + Error -> + init_status_error(ReturnPid, Ref, Error) + end + end. + +maybe_track_open_os_files(FileOptions) -> + case lists:member(sys_db, FileOptions) of + true -> + ok; + false -> + couch_stats_collector:track_process_count({couchdb, open_os_files}) + end. + +terminate(_Reason, _Fd) -> + ok. + + +handle_call({pread_iolist, Pos}, _From, File) -> + {LenIolist, NextPos} = read_raw_iolist_int(File, Pos, 4), + case iolist_to_binary(LenIolist) of + <<1:1/integer,Len:31/integer>> -> % an MD5-prefixed term + {Md5AndIoList, _} = read_raw_iolist_int(File, NextPos, Len+16), + {Md5, IoList} = extract_md5(Md5AndIoList), + case couch_util:md5(IoList) of + Md5 -> + {reply, {ok, IoList}, File}; + _ -> + {stop, file_corruption, {error,file_corruption}, File} + end; + <<0:1/integer,Len:31/integer>> -> + {Iolist, _} = read_raw_iolist_int(File, NextPos, Len), + {reply, {ok, Iolist}, File} + end; +handle_call({pread, Pos, Bytes}, _From, #file{fd=Fd,tail_append_begin=TailAppendBegin}=File) -> + {ok, Bin} = file:pread(Fd, Pos, Bytes), + {reply, {ok, Bin, Pos >= TailAppendBegin}, File}; +handle_call(bytes, _From, #file{eof=Length}=File) -> + {reply, {ok, Length}, File}; +handle_call(sync, _From, #file{fd=Fd}=File) -> + {reply, file:sync(Fd), File}; +handle_call({truncate, Pos}, _From, #file{fd=Fd}=File) -> + {ok, Pos} = file:position(Fd, Pos), + case file:truncate(Fd) of + ok -> + {reply, ok, File#file{eof=Pos}}; + Error -> + {reply, Error, File} + end; +handle_call({append_bin, Bin}, _From, #file{fd=Fd, eof=Pos}=File) -> + Blocks = make_blocks(Pos rem ?SIZE_BLOCK, Bin), + case file:write(Fd, Blocks) of + ok -> + {reply, {ok, Pos}, File#file{eof=Pos+iolist_size(Blocks)}}; + Error -> + {reply, Error, File} + end; +handle_call({write_header, Bin}, _From, #file{fd=Fd, eof=Pos}=File) -> + BinSize = size(Bin), + case Pos rem ?SIZE_BLOCK of + 0 -> + Padding = <<>>; + BlockOffset -> + Padding = <<0:(8*(?SIZE_BLOCK-BlockOffset))>> + end, + FinalBin = [Padding, <<1, BinSize:32/integer>> | make_blocks(5, [Bin])], + case file:write(Fd, FinalBin) of + ok -> + {reply, ok, File#file{eof=Pos+iolist_size(FinalBin)}}; + Error -> + {reply, Error, File} + end; + + +handle_call({upgrade_old_header, Prefix}, _From, #file{fd=Fd}=File) -> + case (catch read_old_header(Fd, Prefix)) of + {ok, Header} -> + TailAppendBegin = File#file.eof, + Bin = term_to_binary(Header), + Md5 = couch_util:md5(Bin), + % now we assemble the final header binary and write to disk + FinalBin = <<Md5/binary, Bin/binary>>, + {reply, ok, _} = handle_call({write_header, FinalBin}, ok, File), + ok = write_old_header(Fd, <<"upgraded">>, TailAppendBegin), + {reply, ok, File#file{tail_append_begin=TailAppendBegin}}; + _Error -> + case (catch read_old_header(Fd, <<"upgraded">>)) of + {ok, TailAppendBegin} -> + {reply, ok, File#file{tail_append_begin = TailAppendBegin}}; + _Error2 -> + {reply, ok, File} + end + end; + + +handle_call(find_header, _From, #file{fd=Fd, eof=Pos}=File) -> + {reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}. + +% 09 UPGRADE CODE +-define(HEADER_SIZE, 2048). % size of each segment of the doubly written header + +% 09 UPGRADE CODE +read_old_header(Fd, Prefix) -> + {ok, Bin} = file:pread(Fd, 0, 2*(?HEADER_SIZE)), + <<Bin1:(?HEADER_SIZE)/binary, Bin2:(?HEADER_SIZE)/binary>> = Bin, + Result = + % read the first header + case extract_header(Prefix, Bin1) of + {ok, Header1} -> + case extract_header(Prefix, Bin2) of + {ok, Header2} -> + case Header1 == Header2 of + true -> + % Everything is completely normal! + {ok, Header1}; + false -> + % To get here we must have two different header versions with signatures intact. + % It's weird but possible (a commit failure right at the 2k boundary). Log it and take the first. + ?LOG_INFO("Header version differences.~nPrimary Header: ~p~nSecondary Header: ~p", [Header1, Header2]), + {ok, Header1} + end; + Error -> + % error reading second header. It's ok, but log it. + ?LOG_INFO("Secondary header corruption (error: ~p). Using primary header.", [Error]), + {ok, Header1} + end; + Error -> + % error reading primary header + case extract_header(Prefix, Bin2) of + {ok, Header2} -> + % log corrupt primary header. It's ok since the secondary is still good. + ?LOG_INFO("Primary header corruption (error: ~p). Using secondary header.", [Error]), + {ok, Header2}; + _ -> + % error reading secondary header too + % return the error, no need to log anything as the caller will be responsible for dealing with the error. + Error + end + end, + case Result of + {ok, {pointer_to_header_data, Ptr}} -> + pread_term(Fd, Ptr); + _ -> + Result + end. + +% 09 UPGRADE CODE +extract_header(Prefix, Bin) -> + SizeOfPrefix = size(Prefix), + SizeOfTermBin = ?HEADER_SIZE - + SizeOfPrefix - + 16, % md5 sig + + <<HeaderPrefix:SizeOfPrefix/binary, TermBin:SizeOfTermBin/binary, Sig:16/binary>> = Bin, + + % check the header prefix + case HeaderPrefix of + Prefix -> + % check the integrity signature + case couch_util:md5(TermBin) == Sig of + true -> + Header = binary_to_term(TermBin), + {ok, Header}; + false -> + header_corrupt + end; + _ -> + unknown_header_type + end. + + +% 09 UPGRADE CODE +write_old_header(Fd, Prefix, Data) -> + TermBin = term_to_binary(Data), + % the size of all the bytes written to the header, including the md5 signature (16 bytes) + FilledSize = byte_size(Prefix) + byte_size(TermBin) + 16, + {TermBin2, FilledSize2} = + case FilledSize > ?HEADER_SIZE of + true -> + % too big! + {ok, Pos} = append_binary(Fd, TermBin), + PtrBin = term_to_binary({pointer_to_header_data, Pos}), + {PtrBin, byte_size(Prefix) + byte_size(PtrBin) + 16}; + false -> + {TermBin, FilledSize} + end, + ok = file:sync(Fd), + % pad out the header with zeros, then take the md5 hash + PadZeros = <<0:(8*(?HEADER_SIZE - FilledSize2))>>, + Sig = couch_util:md5([TermBin2, PadZeros]), + % now we assemble the final header binary and write to disk + WriteBin = <<Prefix/binary, TermBin2/binary, PadZeros/binary, Sig/binary>>, + ?HEADER_SIZE = size(WriteBin), % sanity check + DblWriteBin = [WriteBin, WriteBin], + ok = file:pwrite(Fd, 0, DblWriteBin), + ok = file:sync(Fd). + + +handle_cast(close, Fd) -> + {stop,normal,Fd}. + + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +handle_info({'EXIT', _, normal}, Fd) -> + {noreply, Fd}; +handle_info({'EXIT', _, Reason}, Fd) -> + {stop, Reason, Fd}. + + +find_header(_Fd, -1) -> + no_valid_header; +find_header(Fd, Block) -> + case (catch load_header(Fd, Block)) of + {ok, Bin} -> + {ok, Bin}; + _Error -> + find_header(Fd, Block -1) + end. + +load_header(Fd, Block) -> + {ok, <<1>>} = file:pread(Fd, Block*?SIZE_BLOCK, 1), + {ok, <<HeaderLen:32/integer>>} = file:pread(Fd, (Block*?SIZE_BLOCK) + 1, 4), + TotalBytes = calculate_total_read_len(1, HeaderLen), + {ok, <<RawBin:TotalBytes/binary>>} = + file:pread(Fd, (Block*?SIZE_BLOCK) + 5, TotalBytes), + <<Md5Sig:16/binary, HeaderBin/binary>> = + iolist_to_binary(remove_block_prefixes(1, RawBin)), + Md5Sig = couch_util:md5(HeaderBin), + {ok, HeaderBin}. + +-spec read_raw_iolist_int(#file{}, Pos::non_neg_integer(), Len::non_neg_integer()) -> + {Data::iolist(), CurPos::non_neg_integer()}. +read_raw_iolist_int(#file{fd=Fd, tail_append_begin=TAB}, Pos, Len) -> + BlockOffset = Pos rem ?SIZE_BLOCK, + TotalBytes = calculate_total_read_len(BlockOffset, Len), + {ok, <<RawBin:TotalBytes/binary>>} = file:pread(Fd, Pos, TotalBytes), + if Pos >= TAB -> + {remove_block_prefixes(BlockOffset, RawBin), Pos + TotalBytes}; + true -> + % 09 UPGRADE CODE + <<ReturnBin:Len/binary, _/binary>> = RawBin, + {[ReturnBin], Pos + Len} + end. + +-spec extract_md5(iolist()) -> {binary(), iolist()}. +extract_md5(FullIoList) -> + {Md5List, IoList} = split_iolist(FullIoList, 16, []), + {iolist_to_binary(Md5List), IoList}. + +calculate_total_read_len(0, FinalLen) -> + calculate_total_read_len(1, FinalLen) + 1; +calculate_total_read_len(BlockOffset, FinalLen) -> + case ?SIZE_BLOCK - BlockOffset of + BlockLeft when BlockLeft >= FinalLen -> + FinalLen; + BlockLeft -> + FinalLen + ((FinalLen - BlockLeft) div (?SIZE_BLOCK -1)) + + if ((FinalLen - BlockLeft) rem (?SIZE_BLOCK -1)) =:= 0 -> 0; + true -> 1 end + end. + +remove_block_prefixes(_BlockOffset, <<>>) -> + []; +remove_block_prefixes(0, <<_BlockPrefix,Rest/binary>>) -> + remove_block_prefixes(1, Rest); +remove_block_prefixes(BlockOffset, Bin) -> + BlockBytesAvailable = ?SIZE_BLOCK - BlockOffset, + case size(Bin) of + Size when Size > BlockBytesAvailable -> + <<DataBlock:BlockBytesAvailable/binary,Rest/binary>> = Bin, + [DataBlock | remove_block_prefixes(0, Rest)]; + _Size -> + [Bin] + end. + +make_blocks(_BlockOffset, []) -> + []; +make_blocks(0, IoList) -> + [<<0>> | make_blocks(1, IoList)]; +make_blocks(BlockOffset, IoList) -> + case split_iolist(IoList, (?SIZE_BLOCK - BlockOffset), []) of + {Begin, End} -> + [Begin | make_blocks(0, End)]; + _SplitRemaining -> + IoList + end. + +%% @doc Returns a tuple where the first element contains the leading SplitAt +%% bytes of the original iolist, and the 2nd element is the tail. If SplitAt +%% is larger than byte_size(IoList), return the difference. +-spec split_iolist(IoList::iolist(), SplitAt::non_neg_integer(), Acc::list()) -> + {iolist(), iolist()} | non_neg_integer(). +split_iolist(List, 0, BeginAcc) -> + {lists:reverse(BeginAcc), List}; +split_iolist([], SplitAt, _BeginAcc) -> + SplitAt; +split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) when SplitAt > byte_size(Bin) -> + split_iolist(Rest, SplitAt - byte_size(Bin), [Bin | BeginAcc]); +split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) -> + <<Begin:SplitAt/binary,End/binary>> = Bin, + split_iolist([End | Rest], 0, [Begin | BeginAcc]); +split_iolist([Sublist| Rest], SplitAt, BeginAcc) when is_list(Sublist) -> + case split_iolist(Sublist, SplitAt, BeginAcc) of + {Begin, End} -> + {Begin, [End | Rest]}; + SplitRemaining -> + split_iolist(Rest, SplitAt - (SplitAt - SplitRemaining), [Sublist | BeginAcc]) + end; +split_iolist([Byte | Rest], SplitAt, BeginAcc) when is_integer(Byte) -> + split_iolist(Rest, SplitAt - 1, [Byte | BeginAcc]). diff --git a/apps/couch/src/couch_httpd.erl b/apps/couch/src/couch_httpd.erl new file mode 100644 index 00000000..8a5c699a --- /dev/null +++ b/apps/couch/src/couch_httpd.erl @@ -0,0 +1,988 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_httpd). +-include("couch_db.hrl"). + +-export([start_link/0, stop/0, handle_request/7]). + +-export([header_value/2,header_value/3,qs_value/2,qs_value/3,qs/1,path/1,absolute_uri/2,body_length/1]). +-export([verify_is_server_admin/1,unquote/1,quote/1,recv/2,recv_chunked/4,error_info/1]). +-export([make_fun_spec_strs/1, make_arity_1_fun/1]). +-export([parse_form/1,json_body/1,json_body_obj/1,body/1,doc_etag/1, make_etag/1, etag_respond/3]). +-export([primary_header_value/2,partition/1,serve_file/3,serve_file/4, server_header/0]). +-export([start_chunked_response/3,send_chunk/2,log_request/2]). +-export([start_response_length/4, send/2]). +-export([start_json_response/2, start_json_response/3, end_json_response/1]). +-export([send_response/4,send_method_not_allowed/2,send_error/4, send_redirect/2,send_chunked_error/2]). +-export([send_json/2,send_json/3,send_json/4,last_chunk/1,parse_multipart_request/3]). +-export([accepted_encodings/1,handle_request_int/5,validate_referer/1,validate_ctype/2]). + +start_link() -> + % read config and register for configuration changes + + % just stop if one of the config settings change. couch_server_sup + % will restart us and then we will pick up the new settings. + + BindAddress = couch_config:get("httpd", "bind_address", any), + Port = couch_config:get("httpd", "port", "5984"), + MaxConnections = couch_config:get("httpd", "max_connections", "2048"), + VirtualHosts = couch_config:get("vhosts"), + VhostGlobals = re:split( + couch_config:get("httpd", "vhost_global_handlers", ""), + ", ?", + [{return, list}] + ), + DefaultSpec = "{couch_httpd_db, handle_request}", + DefaultFun = make_arity_1_fun( + couch_config:get("httpd", "default_handler", DefaultSpec) + ), + + UrlHandlersList = lists:map( + fun({UrlKey, SpecStr}) -> + {?l2b(UrlKey), make_arity_1_fun(SpecStr)} + end, couch_config:get("httpd_global_handlers")), + + DbUrlHandlersList = lists:map( + fun({UrlKey, SpecStr}) -> + {?l2b(UrlKey), make_arity_2_fun(SpecStr)} + end, couch_config:get("httpd_db_handlers")), + + DesignUrlHandlersList = lists:map( + fun({UrlKey, SpecStr}) -> + {?l2b(UrlKey), make_arity_3_fun(SpecStr)} + end, couch_config:get("httpd_design_handlers")), + + UrlHandlers = dict:from_list(UrlHandlersList), + DbUrlHandlers = dict:from_list(DbUrlHandlersList), + DesignUrlHandlers = dict:from_list(DesignUrlHandlersList), + Loop = fun(Req)-> + apply(?MODULE, handle_request, [ + Req, DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers, + VirtualHosts, VhostGlobals + ]) + end, + + % and off we go + + {ok, Pid} = case mochiweb_http:start([ + {loop, Loop}, + {name, ?MODULE}, + {ip, BindAddress}, + {port, Port}, + {max, MaxConnections} + ]) of + {ok, MochiPid} -> {ok, MochiPid}; + {error, Reason} -> + io:format("Failure to start Mochiweb: ~s~n",[Reason]), + throw({error, Reason}) + end, + + ok = couch_config:register( + fun("httpd", "bind_address") -> + ?MODULE:stop(); + ("httpd", "port") -> + ?MODULE:stop(); + ("httpd", "max_connections") -> + ?MODULE:stop(); + ("httpd", "default_handler") -> + ?MODULE:stop(); + ("httpd_global_handlers", _) -> + ?MODULE:stop(); + ("httpd_db_handlers", _) -> + ?MODULE:stop(); + ("vhosts", _) -> + ?MODULE:stop() + end, Pid), + + {ok, Pid}. + +% SpecStr is a string like "{my_module, my_fun}" +% or "{my_module, my_fun, <<"my_arg">>}" +make_arity_1_fun(SpecStr) -> + case couch_util:parse_term(SpecStr) of + {ok, {Mod, Fun, SpecArg}} -> + fun(Arg) -> Mod:Fun(Arg, SpecArg) end; + {ok, {Mod, Fun}} -> + fun(Arg) -> Mod:Fun(Arg) end + end. + +make_arity_2_fun(SpecStr) -> + case couch_util:parse_term(SpecStr) of + {ok, {Mod, Fun, SpecArg}} -> + fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end; + {ok, {Mod, Fun}} -> + fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end + end. + +make_arity_3_fun(SpecStr) -> + case couch_util:parse_term(SpecStr) of + {ok, {Mod, Fun, SpecArg}} -> + fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end; + {ok, {Mod, Fun}} -> + fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end + end. + +% SpecStr is "{my_module, my_fun}, {my_module2, my_fun2}" +make_fun_spec_strs(SpecStr) -> + re:split(SpecStr, "(?<=})\\s*,\\s*(?={)", [{return, list}]). + +stop() -> + mochiweb_http:stop(?MODULE). + +%% + +% if there's a vhost definition that matches the request, redirect internally +redirect_to_vhost(MochiReq, DefaultFun, + UrlHandlers, DbUrlHandlers, DesignUrlHandlers, VhostTarget) -> + + Path = MochiReq:get(raw_path), + Target = VhostTarget ++ Path, + ?LOG_DEBUG("Vhost Target: '~p'~n", [Target]), + % build a new mochiweb request + MochiReq1 = mochiweb_request:new(MochiReq:get(socket), + MochiReq:get(method), + Target, + MochiReq:get(version), + MochiReq:get(headers)), + % cleanup, It force mochiweb to reparse raw uri. + MochiReq1:cleanup(), + + handle_request_int(MochiReq1, DefaultFun, + UrlHandlers, DbUrlHandlers, DesignUrlHandlers). + +handle_request(MochiReq, DefaultFun, + UrlHandlers, DbUrlHandlers, DesignUrlHandlers, VirtualHosts, VhostGlobals) -> + + % grab Host from Req + Vhost = MochiReq:get_header_value("Host"), + + % find Vhost in config + case couch_util:get_value(Vhost, VirtualHosts) of + undefined -> % business as usual + handle_request_int(MochiReq, DefaultFun, + UrlHandlers, DbUrlHandlers, DesignUrlHandlers); + VhostTarget -> + case vhost_global(VhostGlobals, MochiReq) of + true ->% global handler for vhosts + handle_request_int(MochiReq, DefaultFun, + UrlHandlers, DbUrlHandlers, DesignUrlHandlers); + _Else -> + % do rewrite + redirect_to_vhost(MochiReq, DefaultFun, + UrlHandlers, DbUrlHandlers, DesignUrlHandlers, VhostTarget) + end + end. + + +handle_request_int(MochiReq, DefaultFun, + UrlHandlers, DbUrlHandlers, DesignUrlHandlers) -> + Begin = now(), + AuthenticationSrcs = make_fun_spec_strs( + couch_config:get("httpd", "authentication_handlers")), + % for the path, use the raw path with the query string and fragment + % removed, but URL quoting left intact + RawUri = MochiReq:get(raw_path), + {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri), + + HandlerKey = + case mochiweb_util:partition(Path, "/") of + {"", "", ""} -> + <<"/">>; % Special case the root url handler + {FirstPart, _, _} -> + list_to_binary(FirstPart) + end, + ?LOG_DEBUG("~p ~s ~p~nHeaders: ~p", [ + MochiReq:get(method), + RawUri, + MochiReq:get(version), + mochiweb_headers:to_list(MochiReq:get(headers)) + ]), + + Method1 = + case MochiReq:get(method) of + % already an atom + Meth when is_atom(Meth) -> Meth; + + % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when + % possible (if any module references the atom, then it's existing). + Meth -> couch_util:to_existing_atom(Meth) + end, + increment_method_stats(Method1), + + % allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header + MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"), + Method2 = case lists:member(MethodOverride, ["GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "COPY"]) of + true -> + ?LOG_INFO("MethodOverride: ~s (real method was ~s)", [MethodOverride, Method1]), + case Method1 of + 'POST' -> couch_util:to_existing_atom(MethodOverride); + _ -> + % Ignore X-HTTP-Method-Override when the original verb isn't POST. + % I'd like to send a 406 error to the client, but that'd require a nasty refactor. + % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>}) + Method1 + end; + _ -> Method1 + end, + + % alias HEAD to GET as mochiweb takes care of stripping the body + Method = case Method2 of + 'HEAD' -> 'GET'; + Other -> Other + end, + + HttpReq = #httpd{ + mochi_req = MochiReq, + peer = MochiReq:get(peer), + method = Method, + path_parts = [list_to_binary(couch_httpd:unquote(Part)) + || Part <- string:tokens(Path, "/")], + db_url_handlers = DbUrlHandlers, + design_url_handlers = DesignUrlHandlers, + default_fun = DefaultFun, + url_handlers = UrlHandlers + }, + + HandlerFun = couch_util:dict_find(HandlerKey, UrlHandlers, DefaultFun), + + {ok, Resp} = + try + case authenticate_request(HttpReq, AuthenticationSrcs) of + #httpd{} = Req -> + HandlerFun(Req); + Response -> + Response + end + catch + throw:{http_head_abort, Resp0} -> + {ok, Resp0}; + throw:{invalid_json, S} -> + ?LOG_ERROR("attempted upload of invalid JSON (set log_level to debug to log it)", []), + ?LOG_DEBUG("Invalid JSON: ~p",[S]), + send_error(HttpReq, {bad_request, "invalid UTF-8 JSON"}); + throw:unacceptable_encoding -> + ?LOG_ERROR("unsupported encoding method for the response", []), + send_error(HttpReq, {not_acceptable, "unsupported encoding"}); + throw:bad_accept_encoding_value -> + ?LOG_ERROR("received invalid Accept-Encoding header", []), + send_error(HttpReq, bad_request); + exit:normal -> + exit(normal); + throw:Error -> + ?LOG_DEBUG("Minor error in HTTP request: ~p",[Error]), + ?LOG_DEBUG("Stacktrace: ~p",[erlang:get_stacktrace()]), + send_error(HttpReq, Error); + error:badarg -> + ?LOG_ERROR("Badarg error in HTTP request",[]), + ?LOG_INFO("Stacktrace: ~p",[erlang:get_stacktrace()]), + send_error(HttpReq, badarg); + error:function_clause -> + ?LOG_ERROR("function_clause error in HTTP request",[]), + ?LOG_INFO("Stacktrace: ~p",[erlang:get_stacktrace()]), + send_error(HttpReq, function_clause); + Tag:Error -> + ?LOG_ERROR("Uncaught error in HTTP request: ~p",[{Tag, Error}]), + ?LOG_INFO("Stacktrace: ~p",[erlang:get_stacktrace()]), + send_error(HttpReq, Error) + end, + RequestTime = round(timer:now_diff(now(), Begin)/1000), + couch_stats_collector:record({couchdb, request_time}, RequestTime), + couch_stats_collector:increment({httpd, requests}), + {ok, Resp}. + +% Try authentication handlers in order until one sets a user_ctx +% the auth funs also have the option of returning a response +% move this to couch_httpd_auth? +authenticate_request(#httpd{user_ctx=#user_ctx{}} = Req, _AuthSrcs) -> + Req; +authenticate_request(#httpd{} = Req, []) -> + case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of + "true" -> + throw({unauthorized, <<"Authentication required.">>}); + "false" -> + Req#httpd{user_ctx=#user_ctx{}} + end; +authenticate_request(#httpd{} = Req, [AuthSrc|Rest]) -> + AuthFun = make_arity_1_fun(AuthSrc), + R = case AuthFun(Req) of + #httpd{user_ctx=#user_ctx{}=UserCtx}=Req2 -> + Req2#httpd{user_ctx=UserCtx#user_ctx{handler=?l2b(AuthSrc)}}; + Else -> Else + end, + authenticate_request(R, Rest); +authenticate_request(Response, _AuthSrcs) -> + Response. + +increment_method_stats(Method) -> + couch_stats_collector:increment({httpd_request_methods, Method}). + +% if so, then it will not be rewritten, but will run as a normal couchdb request. +% normally you'd use this for _uuids _utils and a few of the others you want to keep available on vhosts. You can also use it to make databases 'global'. +vhost_global(VhostGlobals, MochiReq) -> + "/" ++ Path = MochiReq:get(path), + Front = case partition(Path) of + {"", "", ""} -> + "/"; % Special case the root url handler + {FirstPart, _, _} -> + FirstPart + end, + [true] == [true||V <- VhostGlobals, V == Front]. + +validate_referer(Req) -> + Host = host_for_request(Req), + Referer = header_value(Req, "Referer", fail), + case Referer of + fail -> + throw({bad_request, <<"Referer header required.">>}); + Referer -> + {_,RefererHost,_,_,_} = mochiweb_util:urlsplit(Referer), + if + RefererHost =:= Host -> ok; + true -> throw({bad_request, <<"Referer header must match host.">>}) + end + end. + +validate_ctype(Req, Ctype) -> + case couch_httpd:header_value(Req, "Content-Type") of + undefined -> + throw({bad_ctype, "Content-Type must be "++Ctype}); + ReqCtype -> + % ?LOG_ERROR("Ctype ~p ReqCtype ~p",[Ctype,ReqCtype]), + case re:split(ReqCtype, ";", [{return, list}]) of + [Ctype] -> ok; + [Ctype, _Rest] -> ok; + _Else -> + throw({bad_ctype, "Content-Type must be "++Ctype}) + end + end. + +% Utilities + +partition(Path) -> + mochiweb_util:partition(Path, "/"). + +header_value(#httpd{mochi_req=MochiReq}, Key) -> + MochiReq:get_header_value(Key). + +header_value(#httpd{mochi_req=MochiReq}, Key, Default) -> + case MochiReq:get_header_value(Key) of + undefined -> Default; + Value -> Value + end. + +primary_header_value(#httpd{mochi_req=MochiReq}, Key) -> + MochiReq:get_primary_header_value(Key). + +accepted_encodings(#httpd{mochi_req=MochiReq}) -> + case MochiReq:accepted_encodings(["gzip", "identity"]) of + bad_accept_encoding_value -> + throw(bad_accept_encoding_value); + [] -> + throw(unacceptable_encoding); + EncList -> + EncList + end. + +serve_file(Req, RelativePath, DocumentRoot) -> + serve_file(Req, RelativePath, DocumentRoot, []). + +serve_file(#httpd{mochi_req=MochiReq}=Req, RelativePath, DocumentRoot, ExtraHeaders) -> + {ok, MochiReq:serve_file(RelativePath, DocumentRoot, + server_header() ++ couch_httpd_auth:cookie_auth_header(Req, []) ++ ExtraHeaders)}. + +qs_value(Req, Key) -> + qs_value(Req, Key, undefined). + +qs_value(Req, Key, Default) -> + couch_util:get_value(Key, qs(Req), Default). + +qs(#httpd{mochi_req=MochiReq}) -> + MochiReq:parse_qs(). + +path(#httpd{mochi_req=MochiReq}) -> + MochiReq:get(path). + +host_for_request(#httpd{mochi_req=MochiReq}) -> + XHost = couch_config:get("httpd", "x_forwarded_host", "X-Forwarded-Host"), + case MochiReq:get_header_value(XHost) of + undefined -> + case MochiReq:get_header_value("Host") of + undefined -> + {ok, {Address, Port}} = inet:sockname(MochiReq:get(socket)), + inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port); + Value1 -> + Value1 + end; + Value -> Value + end. + +absolute_uri(#httpd{mochi_req=MochiReq}=Req, Path) -> + Host = host_for_request(Req), + XSsl = couch_config:get("httpd", "x_forwarded_ssl", "X-Forwarded-Ssl"), + Scheme = case MochiReq:get_header_value(XSsl) of + "on" -> "https"; + _ -> + XProto = couch_config:get("httpd", "x_forwarded_proto", "X-Forwarded-Proto"), + case MochiReq:get_header_value(XProto) of + % Restrict to "https" and "http" schemes only + "https" -> "https"; + _ -> "http" + end + end, + Scheme ++ "://" ++ Host ++ Path. + +unquote(UrlEncodedString) -> + mochiweb_util:unquote(UrlEncodedString). + +quote(UrlDecodedString) -> + mochiweb_util:quote_plus(UrlDecodedString). + +parse_form(#httpd{mochi_req=MochiReq}) -> + mochiweb_multipart:parse_form(MochiReq). + +recv(#httpd{mochi_req=MochiReq}, Len) -> + MochiReq:recv(Len). + +recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) -> + % Fun is called once with each chunk + % Fun({Length, Binary}, State) + % called with Length == 0 on the last time. + MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState). + +body_length(Req) -> + case header_value(Req, "Transfer-Encoding") of + undefined -> + case header_value(Req, "Content-Length") of + undefined -> undefined; + Length -> list_to_integer(Length) + end; + "chunked" -> chunked; + Unknown -> {unknown_transfer_encoding, Unknown} + end. + +body(#httpd{mochi_req=MochiReq, req_body=ReqBody}) -> + case ReqBody of + undefined -> + % Maximum size of document PUT request body (4GB) + MaxSize = list_to_integer( + couch_config:get("couchdb", "max_document_size", "4294967296")), + MochiReq:recv_body(MaxSize); + _Else -> + ReqBody + end. + +json_body(Httpd) -> + ?JSON_DECODE(body(Httpd)). + +json_body_obj(Httpd) -> + case json_body(Httpd) of + {Props} -> {Props}; + _Else -> + throw({bad_request, "Request body must be a JSON object"}) + end. + + + +doc_etag(#doc{revs={Start, [DiskRev|_]}}) -> + "\"" ++ ?b2l(couch_doc:rev_to_str({Start, DiskRev})) ++ "\"". + +make_etag(Term) -> + <<SigInt:128/integer>> = couch_util:md5(term_to_binary(Term)), + list_to_binary("\"" ++ lists:flatten(io_lib:format("~.36B",[SigInt])) ++ "\""). + +etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) -> + etag_match(Req, binary_to_list(CurrentEtag)); + +etag_match(Req, CurrentEtag) -> + EtagsToMatch = string:tokens( + couch_httpd:header_value(Req, "If-None-Match", ""), ", "), + lists:member(CurrentEtag, EtagsToMatch). + +etag_respond(Req, CurrentEtag, RespFun) -> + case etag_match(Req, CurrentEtag) of + true -> + % the client has this in their cache. + couch_httpd:send_response(Req, 304, [{"Etag", CurrentEtag}], <<>>); + false -> + % Run the function. + RespFun() + end. + +verify_is_server_admin(#httpd{user_ctx=UserCtx}) -> + verify_is_server_admin(UserCtx); +verify_is_server_admin(#user_ctx{roles=Roles}) -> + case lists:member(<<"_admin">>, Roles) of + true -> ok; + false -> throw({unauthorized, <<"You are not a server admin.">>}) + end. + +log_request(#httpd{mochi_req=MochiReq,peer=Peer}, Code) -> + ?LOG_INFO("~s - - ~p ~s ~B", [ + Peer, + couch_util:to_existing_atom(MochiReq:get(method)), + MochiReq:get(raw_path), + couch_util:to_integer(Code) + ]). + + +start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Length) -> + log_request(Req, Code), + couch_stats_collector:increment({httpd_status_codes, Code}), + Resp = MochiReq:start_response_length({Code, Headers ++ server_header() ++ couch_httpd_auth:cookie_auth_header(Req, Headers), Length}), + case MochiReq:get(method) of + 'HEAD' -> throw({http_head_abort, Resp}); + _ -> ok + end, + {ok, Resp}. + +send(Resp, Data) -> + Resp:send(Data), + {ok, Resp}. + +no_resp_conn_header([]) -> + true; +no_resp_conn_header([{Hdr, _}|Rest]) -> + case string:to_lower(Hdr) of + "connection" -> false; + _ -> no_resp_conn_header(Rest) + end. + +http_1_0_keep_alive(Req, Headers) -> + KeepOpen = Req:should_close() == false, + IsHttp10 = Req:get(version) == {1, 0}, + NoRespHeader = no_resp_conn_header(Headers), + case KeepOpen andalso IsHttp10 andalso NoRespHeader of + true -> [{"Connection", "Keep-Alive"} | Headers]; + false -> Headers + end. + +start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) -> + log_request(Req, Code), + couch_stats_collector:increment({httpd_status_codes, Code}), + Headers2 = http_1_0_keep_alive(MochiReq, Headers), + Resp = MochiReq:respond({Code, Headers2 ++ server_header() ++ couch_httpd_auth:cookie_auth_header(Req, Headers2), chunked}), + case MochiReq:get(method) of + 'HEAD' -> throw({http_head_abort, Resp}); + _ -> ok + end, + {ok, Resp}. + +send_chunk(Resp, Data) -> + case iolist_size(Data) of + 0 -> ok; % do nothing + _ -> Resp:write_chunk(Data) + end, + {ok, Resp}. + +last_chunk(Resp) -> + Resp:write_chunk([]), + {ok, Resp}. + +send_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Body) -> + log_request(Req, Code), + couch_stats_collector:increment({httpd_status_codes, Code}), + Headers2 = http_1_0_keep_alive(MochiReq, Headers), + if Code >= 400 -> + ?LOG_DEBUG("httpd ~p error response:~n ~s", [Code, Body]); + true -> ok + end, + {ok, MochiReq:respond({Code, Headers2 ++ server_header() ++ couch_httpd_auth:cookie_auth_header(Req, Headers2), Body})}. + +send_method_not_allowed(Req, Methods) -> + send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>, ?l2b("Only " ++ Methods ++ " allowed")). + +send_json(Req, Value) -> + send_json(Req, 200, Value). + +send_json(Req, Code, Value) -> + send_json(Req, Code, [], Value). + +send_json(Req, Code, Headers, Value) -> + DefaultHeaders = [ + {"Content-Type", negotiate_content_type(Req)}, + {"Cache-Control", "must-revalidate"} + ], + Body = list_to_binary( + [start_jsonp(Req), ?JSON_ENCODE(Value), end_jsonp(), $\n] + ), + send_response(Req, Code, DefaultHeaders ++ Headers, Body). + +start_json_response(Req, Code) -> + start_json_response(Req, Code, []). + +start_json_response(Req, Code, Headers) -> + DefaultHeaders = [ + {"Content-Type", negotiate_content_type(Req)}, + {"Cache-Control", "must-revalidate"} + ], + start_jsonp(Req), % Validate before starting chunked. + %start_chunked_response(Req, Code, DefaultHeaders ++ Headers). + {ok, Resp} = start_chunked_response(Req, Code, DefaultHeaders ++ Headers), + case start_jsonp(Req) of + [] -> ok; + Start -> send_chunk(Resp, Start) + end, + {ok, Resp}. + +end_json_response(Resp) -> + send_chunk(Resp, end_jsonp() ++ [$\n]), + last_chunk(Resp). + +start_jsonp(Req) -> + case get(jsonp) of + undefined -> put(jsonp, qs_value(Req, "callback", no_jsonp)); + _ -> ok + end, + case get(jsonp) of + no_jsonp -> []; + [] -> []; + CallBack -> + try + % make sure jsonp is configured on (default off) + case couch_config:get("httpd", "allow_jsonp", "false") of + "true" -> + validate_callback(CallBack), + CallBack ++ "("; + _Else -> + % this could throw an error message, but instead we just ignore the + % jsonp parameter + % throw({bad_request, <<"JSONP must be configured before using.">>}) + put(jsonp, no_jsonp), + [] + end + catch + Error -> + put(jsonp, no_jsonp), + throw(Error) + end + end. + +end_jsonp() -> + Resp = case get(jsonp) of + no_jsonp -> []; + [] -> []; + _ -> ");" + end, + put(jsonp, undefined), + Resp. + +validate_callback(CallBack) when is_binary(CallBack) -> + validate_callback(binary_to_list(CallBack)); +validate_callback([]) -> + ok; +validate_callback([Char | Rest]) -> + case Char of + _ when Char >= $a andalso Char =< $z -> ok; + _ when Char >= $A andalso Char =< $Z -> ok; + _ when Char >= $0 andalso Char =< $9 -> ok; + _ when Char == $. -> ok; + _ when Char == $_ -> ok; + _ when Char == $[ -> ok; + _ when Char == $] -> ok; + _ -> + throw({bad_request, invalid_callback}) + end, + validate_callback(Rest). + + +error_info({Error, Reason}) when is_list(Reason) -> + error_info({Error, ?l2b(Reason)}); +error_info(bad_request) -> + {400, <<"bad_request">>, <<>>}; +error_info({bad_request, Reason}) -> + {400, <<"bad_request">>, Reason}; +error_info({query_parse_error, Reason}) -> + {400, <<"query_parse_error">>, Reason}; +% Prior art for md5 mismatch resulting in a 400 is from AWS S3 +error_info(md5_mismatch) -> + {400, <<"content_md5_mismatch">>, <<"Possible message corruption.">>}; +error_info(not_found) -> + {404, <<"not_found">>, <<"missing">>}; +error_info({not_found, Reason}) -> + {404, <<"not_found">>, Reason}; +error_info({not_acceptable, Reason}) -> + {406, <<"not_acceptable">>, Reason}; +error_info(conflict) -> + {409, <<"conflict">>, <<"Document update conflict.">>}; +error_info({forbidden, Msg}) -> + {403, <<"forbidden">>, Msg}; +error_info({unauthorized, Msg}) -> + {401, <<"unauthorized">>, Msg}; +error_info(file_exists) -> + {412, <<"file_exists">>, <<"The database could not be " + "created, the file already exists.">>}; +error_info({bad_ctype, Reason}) -> + {415, <<"bad_content_type">>, Reason}; +error_info({error, illegal_database_name}) -> + {400, <<"illegal_database_name">>, <<"Only lowercase characters (a-z), " + "digits (0-9), and any of the characters _, $, (, ), +, -, and / " + "are allowed. Must begin with a letter.">>}; +error_info({missing_stub, Reason}) -> + {412, <<"missing_stub">>, Reason}; +error_info({Error, Reason}) -> + {500, couch_util:to_binary(Error), couch_util:to_binary(Reason)}; +error_info(Error) -> + {500, <<"unknown_error">>, couch_util:to_binary(Error)}. + +error_headers(#httpd{mochi_req=MochiReq}=Req, Code, ErrorStr, ReasonStr) -> + if Code == 401 -> + % this is where the basic auth popup is triggered + case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of + undefined -> + case couch_config:get("httpd", "WWW-Authenticate", nil) of + nil -> + % If the client is a browser and the basic auth popup isn't turned on + % redirect to the session page. + case ErrorStr of + <<"unauthorized">> -> + case couch_config:get("couch_httpd_auth", "authentication_redirect", nil) of + nil -> {Code, []}; + AuthRedirect -> + case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of + "true" -> + % send the browser popup header no matter what if we are require_valid_user + {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]}; + _False -> + % if the accept header matches html, then do the redirect. else proceed as usual. + Accepts = case MochiReq:get_header_value("Accept") of + undefined -> + % According to the HTTP 1.1 spec, if the Accept + % header is missing, it means the client accepts + % all media types. + "html"; + Else -> + Else + end, + case re:run(Accepts, "\\bhtml\\b", + [{capture, none}, caseless]) of + nomatch -> + {Code, []}; + match -> + AuthRedirectBin = ?l2b(AuthRedirect), + UrlReturn = ?l2b(couch_util:url_encode(MochiReq:get(path))), + UrlReason = ?l2b(couch_util:url_encode(ReasonStr)), + {302, [{"Location", couch_httpd:absolute_uri(Req, <<AuthRedirectBin/binary,"?return=",UrlReturn/binary,"&reason=",UrlReason/binary>>)}]} + end + end + end; + _Else -> + {Code, []} + end; + Type -> + {Code, [{"WWW-Authenticate", Type}]} + end; + Type -> + {Code, [{"WWW-Authenticate", Type}]} + end; + true -> + {Code, []} + end. + +send_error(_Req, {already_sent, Resp, _Error}) -> + {ok, Resp}; + +send_error(Req, Error) -> + {Code, ErrorStr, ReasonStr} = error_info(Error), + {Code1, Headers} = error_headers(Req, Code, ErrorStr, ReasonStr), + send_error(Req, Code1, Headers, ErrorStr, ReasonStr). + +send_error(Req, Code, ErrorStr, ReasonStr) -> + send_error(Req, Code, [], ErrorStr, ReasonStr). + +send_error(Req, Code, Headers, ErrorStr, ReasonStr) -> + send_json(Req, Code, Headers, + {[{<<"error">>, ErrorStr}, + {<<"reason">>, ReasonStr}]}). + +% give the option for list functions to output html or other raw errors +send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) -> + send_chunk(Resp, Reason), + last_chunk(Resp); + +send_chunked_error(Resp, Error) -> + {Code, ErrorStr, ReasonStr} = error_info(Error), + JsonError = {[{<<"code">>, Code}, + {<<"error">>, ErrorStr}, + {<<"reason">>, ReasonStr}]}, + send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])), + last_chunk(Resp). + +send_redirect(Req, Path) -> + Headers = [{"Location", couch_httpd:absolute_uri(Req, Path)}], + send_response(Req, 301, Headers, <<>>). + +negotiate_content_type(#httpd{mochi_req=MochiReq}) -> + %% Determine the appropriate Content-Type header for a JSON response + %% depending on the Accept header in the request. A request that explicitly + %% lists the correct JSON MIME type will get that type, otherwise the + %% response will have the generic MIME type "text/plain" + AcceptedTypes = case MochiReq:get_header_value("Accept") of + undefined -> []; + AcceptHeader -> string:tokens(AcceptHeader, ", ") + end, + case lists:member("application/json", AcceptedTypes) of + true -> "application/json"; + false -> "text/plain;charset=utf-8" + end. + +server_header() -> + OTPVersion = "R" ++ integer_to_list(erlang:system_info(compat_rel)) ++ "B", + [{"Server", "CouchDB/" ++ couch_server:get_version() ++ + " (Erlang OTP/" ++ OTPVersion ++ ")"}]. + + +-record(mp, {boundary, buffer, data_fun, callback}). + + +parse_multipart_request(ContentType, DataFun, Callback) -> + Boundary0 = iolist_to_binary(get_boundary(ContentType)), + Boundary = <<"\r\n--", Boundary0/binary>>, + Mp = #mp{boundary= Boundary, + buffer= <<>>, + data_fun=DataFun, + callback=Callback}, + {Mp2, _NilCallback} = read_until(Mp, <<"--", Boundary0/binary>>, + fun(Next)-> nil_callback(Next) end), + #mp{buffer=Buffer, data_fun=DataFun2, callback=Callback2} = + parse_part_header(Mp2), + {Buffer, DataFun2, Callback2}. + +nil_callback(_Data)-> + fun(Next) -> nil_callback(Next) end. + +get_boundary({"multipart/" ++ _, Opts}) -> + case couch_util:get_value("boundary", Opts) of + S when is_list(S) -> + S + end; +get_boundary(ContentType) -> + {"multipart/" ++ _ , Opts} = mochiweb_util:parse_header(ContentType), + get_boundary({"multipart/", Opts}). + + + +split_header(<<>>) -> + []; +split_header(Line) -> + {Name, [$: | Value]} = lists:splitwith(fun (C) -> C =/= $: end, + binary_to_list(Line)), + [{string:to_lower(string:strip(Name)), + mochiweb_util:parse_header(Value)}]. + +read_until(#mp{data_fun=DataFun, buffer=Buffer}=Mp, Pattern, Callback) -> + case find_in_binary(Pattern, Buffer) of + not_found -> + Callback2 = Callback(Buffer), + {Buffer2, DataFun2} = DataFun(), + Buffer3 = iolist_to_binary(Buffer2), + read_until(Mp#mp{data_fun=DataFun2,buffer=Buffer3}, Pattern, Callback2); + {partial, 0} -> + {NewData, DataFun2} = DataFun(), + read_until(Mp#mp{data_fun=DataFun2, + buffer= iolist_to_binary([Buffer,NewData])}, + Pattern, Callback); + {partial, Skip} -> + <<DataChunk:Skip/binary, Rest/binary>> = Buffer, + Callback2 = Callback(DataChunk), + {NewData, DataFun2} = DataFun(), + read_until(Mp#mp{data_fun=DataFun2, + buffer= iolist_to_binary([Rest | NewData])}, + Pattern, Callback2); + {exact, 0} -> + PatternLen = size(Pattern), + <<_:PatternLen/binary, Rest/binary>> = Buffer, + {Mp#mp{buffer= Rest}, Callback}; + {exact, Skip} -> + PatternLen = size(Pattern), + <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer, + Callback2 = Callback(DataChunk), + {Mp#mp{buffer= Rest}, Callback2} + end. + + +parse_part_header(#mp{callback=UserCallBack}=Mp) -> + {Mp2, AccCallback} = read_until(Mp, <<"\r\n\r\n">>, + fun(Next) -> acc_callback(Next, []) end), + HeaderData = AccCallback(get_data), + + Headers = + lists:foldl(fun(Line, Acc) -> + split_header(Line) ++ Acc + end, [], re:split(HeaderData,<<"\r\n">>, [])), + NextCallback = UserCallBack({headers, Headers}), + parse_part_body(Mp2#mp{callback=NextCallback}). + +parse_part_body(#mp{boundary=Prefix, callback=Callback}=Mp) -> + {Mp2, WrappedCallback} = read_until(Mp, Prefix, + fun(Data) -> body_callback_wrapper(Data, Callback) end), + Callback2 = WrappedCallback(get_callback), + Callback3 = Callback2(body_end), + case check_for_last(Mp2#mp{callback=Callback3}) of + {last, #mp{callback=Callback3}=Mp3} -> + Mp3#mp{callback=Callback3(eof)}; + {more, Mp3} -> + parse_part_header(Mp3) + end. + +acc_callback(get_data, Acc)-> + iolist_to_binary(lists:reverse(Acc)); +acc_callback(Data, Acc)-> + fun(Next) -> acc_callback(Next, [Data | Acc]) end. + +body_callback_wrapper(get_callback, Callback) -> + Callback; +body_callback_wrapper(Data, Callback) -> + Callback2 = Callback({body, Data}), + fun(Next) -> body_callback_wrapper(Next, Callback2) end. + + +check_for_last(#mp{buffer=Buffer, data_fun=DataFun}=Mp) -> + case Buffer of + <<"--",_/binary>> -> {last, Mp}; + <<_, _, _/binary>> -> {more, Mp}; + _ -> % not long enough + {Data, DataFun2} = DataFun(), + check_for_last(Mp#mp{buffer= <<Buffer/binary, Data/binary>>, + data_fun = DataFun2}) + end. + +find_in_binary(B, Data) when size(B) > 0 -> + case size(Data) - size(B) of + Last when Last < 0 -> + partial_find(B, Data, 0, size(Data)); + Last -> + find_in_binary(B, size(B), Data, 0, Last) + end. + +find_in_binary(B, BS, D, N, Last) when N =< Last-> + case D of + <<_:N/binary, B:BS/binary, _/binary>> -> + {exact, N}; + _ -> + find_in_binary(B, BS, D, 1 + N, Last) + end; +find_in_binary(B, BS, D, N, Last) when N =:= 1 + Last -> + partial_find(B, D, N, BS - 1). + +partial_find(_B, _D, _N, 0) -> + not_found; +partial_find(B, D, N, K) -> + <<B1:K/binary, _/binary>> = B, + case D of + <<_Skip:N/binary, B1/binary>> -> + {partial, N}; + _ -> + partial_find(B, D, 1 + N, K - 1) + end. + + diff --git a/apps/couch/src/couch_httpd_auth.erl b/apps/couch/src/couch_httpd_auth.erl new file mode 100644 index 00000000..7023e7f3 --- /dev/null +++ b/apps/couch/src/couch_httpd_auth.erl @@ -0,0 +1,349 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_httpd_auth). +-include("couch_db.hrl"). + +-export([default_authentication_handler/1,special_test_authentication_handler/1]). +-export([cookie_authentication_handler/1]). +-export([null_authentication_handler/1]). +-export([proxy_authentification_handler/1]). +-export([cookie_auth_header/2]). +-export([handle_session_req/1]). + +-import(couch_httpd, [header_value/2, send_json/2,send_json/4, send_method_not_allowed/2]). + +special_test_authentication_handler(Req) -> + case header_value(Req, "WWW-Authenticate") of + "X-Couch-Test-Auth " ++ NamePass -> + % NamePass is a colon separated string: "joe schmoe:a password". + [Name, Pass] = re:split(NamePass, ":", [{return, list}]), + case {Name, Pass} of + {"Jan Lehnardt", "apple"} -> ok; + {"Christopher Lenz", "dog food"} -> ok; + {"Noah Slater", "biggiesmalls endian"} -> ok; + {"Chris Anderson", "mp3"} -> ok; + {"Damien Katz", "pecan pie"} -> ok; + {_, _} -> + throw({unauthorized, <<"Name or password is incorrect.">>}) + end, + Req#httpd{user_ctx=#user_ctx{name=?l2b(Name)}}; + _ -> + % No X-Couch-Test-Auth credentials sent, give admin access so the + % previous authentication can be restored after the test + Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}} + end. + +basic_name_pw(Req) -> + AuthorizationHeader = header_value(Req, "Authorization"), + case AuthorizationHeader of + "Basic " ++ Base64Value -> + case string:tokens(?b2l(base64:decode(Base64Value)),":") of + ["_", "_"] -> + % special name and pass to be logged out + nil; + [User, Pass] -> + {User, Pass}; + [User] -> + {User, ""}; + _ -> + nil + end; + _ -> + nil + end. + +default_authentication_handler(Req) -> + case basic_name_pw(Req) of + {User, Pass} -> + case couch_auth_cache:get_user_creds(User) of + nil -> + throw({unauthorized, <<"Name or password is incorrect.">>}); + UserProps -> + UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<>>), + PasswordHash = hash_password(?l2b(Pass), UserSalt), + ExpectedHash = couch_util:get_value(<<"password_sha">>, UserProps, nil), + case couch_util:verify(ExpectedHash, PasswordHash) of + true -> + Req#httpd{user_ctx=#user_ctx{ + name=?l2b(User), + roles=couch_util:get_value(<<"roles">>, UserProps, []) + }}; + _Else -> + throw({unauthorized, <<"Name or password is incorrect.">>}) + end + end; + nil -> + case couch_server:has_admins() of + true -> + Req; + false -> + case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of + "true" -> Req; + % If no admins, and no user required, then everyone is admin! + % Yay, admin party! + _ -> Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}} + end + end + end. + +null_authentication_handler(Req) -> + Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}. + +%% @doc proxy auth handler. +% +% This handler allows creation of a userCtx object from a user authenticated remotly. +% The client just pass specific headers to CouchDB and the handler create the userCtx. +% Headers name can be defined in local.ini. By thefault they are : +% +% * X-Auth-CouchDB-UserName : contain the username, (x_auth_username in +% couch_httpd_auth section) +% * X-Auth-CouchDB-Roles : contain the user roles, list of roles separated by a +% comma (x_auth_roles in couch_httpd_auth section) +% * X-Auth-CouchDB-Token : token to authenticate the authorization (x_auth_token +% in couch_httpd_auth section). This token is an hmac-sha1 created from secret key +% and username. The secret key should be the same in the client and couchdb node. s +% ecret key is the secret key in couch_httpd_auth section of ini. This token is optional +% if value of proxy_use_secret key in couch_httpd_auth section of ini isn't true. +% +proxy_authentification_handler(Req) -> + case proxy_auth_user(Req) of + nil -> Req; + Req2 -> Req2 + end. + +proxy_auth_user(Req) -> + XHeaderUserName = couch_config:get("couch_httpd_auth", "x_auth_username", + "X-Auth-CouchDB-UserName"), + XHeaderRoles = couch_config:get("couch_httpd_auth", "x_auth_roles", + "X-Auth-CouchDB-Roles"), + XHeaderToken = couch_config:get("couch_httpd_auth", "x_auth_token", + "X-Auth-CouchDB-Token"), + case header_value(Req, XHeaderUserName) of + undefined -> nil; + UserName -> + Roles = case header_value(Req, XHeaderRoles) of + undefined -> []; + Else -> + [?l2b(R) || R <- string:tokens(Else, ",")] + end, + case couch_config:get("couch_httpd_auth", "proxy_use_secret", "false") of + "true" -> + case couch_config:get("couch_httpd_auth", "secret", nil) of + nil -> + Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}}; + Secret -> + ExpectedToken = couch_util:to_hex(crypto:sha_mac(Secret, UserName)), + case header_value(Req, XHeaderToken) of + Token when Token == ExpectedToken -> + Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), + roles=Roles}}; + _ -> nil + end + end; + _ -> + Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}} + end + end. + + +cookie_authentication_handler(#httpd{mochi_req=MochiReq}=Req) -> + case MochiReq:get_cookie_value("AuthSession") of + undefined -> Req; + [] -> Req; + Cookie -> + [User, TimeStr | HashParts] = try + AuthSession = couch_util:decodeBase64Url(Cookie), + [_A, _B | _Cs] = string:tokens(?b2l(AuthSession), ":") + catch + _:_Error -> + Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>, + throw({bad_request, Reason}) + end, + % Verify expiry and hash + CurrentTime = make_cookie_time(), + case couch_config:get("couch_httpd_auth", "secret", nil) of + nil -> + ?LOG_ERROR("cookie auth secret is not set",[]), + Req; + SecretStr -> + Secret = ?l2b(SecretStr), + case couch_auth_cache:get_user_creds(User) of + nil -> Req; + UserProps -> + UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>), + FullSecret = <<Secret/binary, UserSalt/binary>>, + ExpectedHash = crypto:sha_mac(FullSecret, User ++ ":" ++ TimeStr), + Hash = ?l2b(string:join(HashParts, ":")), + Timeout = to_int(couch_config:get("couch_httpd_auth", "timeout", 600)), + ?LOG_DEBUG("timeout ~p", [Timeout]), + case (catch erlang:list_to_integer(TimeStr, 16)) of + TimeStamp when CurrentTime < TimeStamp + Timeout -> + case couch_util:verify(ExpectedHash, Hash) of + true -> + TimeLeft = TimeStamp + Timeout - CurrentTime, + ?LOG_DEBUG("Successful cookie auth as: ~p", [User]), + Req#httpd{user_ctx=#user_ctx{ + name=?l2b(User), + roles=couch_util:get_value(<<"roles">>, UserProps, []) + }, auth={FullSecret, TimeLeft < Timeout*0.9}}; + _Else -> + Req + end; + _Else -> + Req + end + end + end + end. + +cookie_auth_header(#httpd{user_ctx=#user_ctx{name=null}}, _Headers) -> []; +cookie_auth_header(#httpd{user_ctx=#user_ctx{name=User}, auth={Secret, true}}, Headers) -> + % Note: we only set the AuthSession cookie if: + % * a valid AuthSession cookie has been received + % * we are outside a 10% timeout window + % * and if an AuthSession cookie hasn't already been set e.g. by a login + % or logout handler. + % The login and logout handlers need to set the AuthSession cookie + % themselves. + CookieHeader = couch_util:get_value("Set-Cookie", Headers, ""), + Cookies = mochiweb_cookies:parse_cookie(CookieHeader), + AuthSession = couch_util:get_value("AuthSession", Cookies), + if AuthSession == undefined -> + TimeStamp = make_cookie_time(), + [cookie_auth_cookie(?b2l(User), Secret, TimeStamp)]; + true -> + [] + end; +cookie_auth_header(_Req, _Headers) -> []. + +cookie_auth_cookie(User, Secret, TimeStamp) -> + SessionData = User ++ ":" ++ erlang:integer_to_list(TimeStamp, 16), + Hash = crypto:sha_mac(Secret, SessionData), + mochiweb_cookies:cookie("AuthSession", + couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)), + [{path, "/"}, {http_only, true}]). % TODO add {secure, true} when SSL is detected + +hash_password(Password, Salt) -> + ?l2b(couch_util:to_hex(crypto:sha(<<Password/binary, Salt/binary>>))). + +ensure_cookie_auth_secret() -> + case couch_config:get("couch_httpd_auth", "secret", nil) of + nil -> + NewSecret = ?b2l(couch_uuids:random()), + couch_config:set("couch_httpd_auth", "secret", NewSecret), + NewSecret; + Secret -> Secret + end. + +% session handlers +% Login handler with user db +% TODO this should also allow a JSON POST +handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req) -> + ReqBody = MochiReq:recv_body(), + Form = case MochiReq:get_primary_header_value("content-type") of + % content type should be json + "application/x-www-form-urlencoded" ++ _ -> + mochiweb_util:parse_qs(ReqBody); + _ -> + [] + end, + UserName = ?l2b(couch_util:get_value("name", Form, "")), + Password = ?l2b(couch_util:get_value("password", Form, "")), + ?LOG_DEBUG("Attempt Login: ~s",[UserName]), + User = case couch_auth_cache:get_user_creds(UserName) of + nil -> []; + Result -> Result + end, + UserSalt = couch_util:get_value(<<"salt">>, User, <<>>), + PasswordHash = hash_password(Password, UserSalt), + ExpectedHash = couch_util:get_value(<<"password_sha">>, User, nil), + case couch_util:verify(ExpectedHash, PasswordHash) of + true -> + % setup the session cookie + Secret = ?l2b(ensure_cookie_auth_secret()), + CurrentTime = make_cookie_time(), + Cookie = cookie_auth_cookie(?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime), + % TODO document the "next" feature in Futon + {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of + nil -> + {200, [Cookie]}; + Redirect -> + {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]} + end, + send_json(Req#httpd{req_body=ReqBody}, Code, Headers, + {[ + {ok, true}, + {name, couch_util:get_value(<<"name">>, User, null)}, + {roles, couch_util:get_value(<<"roles">>, User, [])} + ]}); + _Else -> + % clear the session + Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}, {http_only, true}]), + send_json(Req, 401, [Cookie], {[{error, <<"unauthorized">>},{reason, <<"Name or password is incorrect.">>}]}) + end; +% get user info +% GET /_session +handle_session_req(#httpd{method='GET', user_ctx=UserCtx}=Req) -> + Name = UserCtx#user_ctx.name, + ForceLogin = couch_httpd:qs_value(Req, "basic", "false"), + case {Name, ForceLogin} of + {null, "true"} -> + throw({unauthorized, <<"Please login.">>}); + {Name, _} -> + send_json(Req, {[ + % remove this ok + {ok, true}, + {<<"userCtx">>, {[ + {name, Name}, + {roles, UserCtx#user_ctx.roles} + ]}}, + {info, {[ + {authentication_db, ?l2b(couch_config:get("couch_httpd_auth", "authentication_db"))}, + {authentication_handlers, [auth_name(H) || H <- couch_httpd:make_fun_spec_strs( + couch_config:get("httpd", "authentication_handlers"))]} + ] ++ maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) -> + auth_name(?b2l(Handler)) + end)}} + ]}) + end; +% logout by deleting the session +handle_session_req(#httpd{method='DELETE'}=Req) -> + Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}, {http_only, true}]), + {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of + nil -> + {200, [Cookie]}; + Redirect -> + {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]} + end, + send_json(Req, Code, Headers, {[{ok, true}]}); +handle_session_req(Req) -> + send_method_not_allowed(Req, "GET,HEAD,POST,DELETE"). + +maybe_value(_Key, undefined, _Fun) -> []; +maybe_value(Key, Else, Fun) -> + [{Key, Fun(Else)}]. + +auth_name(String) when is_list(String) -> + [_,_,_,_,_,Name|_] = re:split(String, "[\\W_]", [{return, list}]), + ?l2b(Name). + +to_int(Value) when is_binary(Value) -> + to_int(?b2l(Value)); +to_int(Value) when is_list(Value) -> + list_to_integer(Value); +to_int(Value) when is_integer(Value) -> + Value. + +make_cookie_time() -> + {NowMS, NowS, _} = erlang:now(), + NowMS * 1000000 + NowS. diff --git a/apps/couch/src/couch_httpd_db.erl b/apps/couch/src/couch_httpd_db.erl new file mode 100644 index 00000000..cf4e2120 --- /dev/null +++ b/apps/couch/src/couch_httpd_db.erl @@ -0,0 +1,1214 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_httpd_db). +-include("couch_db.hrl"). + +-export([handle_request/1, handle_compact_req/2, handle_design_req/2, + db_req/2, couch_doc_open/4,handle_changes_req/2, + update_doc_result_to_json/1, update_doc_result_to_json/2, + handle_design_info_req/3, handle_view_cleanup_req/2]). + +-import(couch_httpd, + [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2, + start_json_response/2,start_json_response/3, + send_chunk/2,last_chunk/1,end_json_response/1, + start_chunked_response/3, absolute_uri/2, send/2, + start_response_length/4]). + +-record(doc_query_args, { + options = [], + rev = nil, + open_revs = [], + update_type = interactive_edit, + atts_since = nil +}). + +% Database request handlers +handle_request(#httpd{path_parts=[DbName|RestParts],method=Method, + db_url_handlers=DbUrlHandlers}=Req)-> + case {Method, RestParts} of + {'PUT', []} -> + create_db_req(Req, DbName); + {'DELETE', []} -> + % if we get ?rev=... the user is using a faulty script where the + % document id is empty by accident. Let them recover safely. + case couch_httpd:qs_value(Req, "rev", false) of + false -> delete_db_req(Req, DbName); + _Rev -> throw({bad_request, + "You tried to DELETE a database with a ?=rev parameter. " + ++ "Did you mean to DELETE a document instead?"}) + end; + {_, []} -> + do_db_req(Req, fun db_req/2); + {_, [SecondPart|_]} -> + Handler = couch_util:dict_find(SecondPart, DbUrlHandlers, fun db_req/2), + do_db_req(Req, Handler) + end. + +handle_changes_req(#httpd{method='GET'}=Req, Db) -> + MakeCallback = fun(Resp) -> + fun({change, Change, _}, "continuous") -> + send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]); + ({change, Change, Prepend}, _) -> + send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]); + (start, "continuous") -> + ok; + (start, _) -> + send_chunk(Resp, "{\"results\":[\n"); + ({stop, EndSeq}, "continuous") -> + send_chunk( + Resp, + [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"] + ), + end_json_response(Resp); + ({stop, EndSeq}, _) -> + send_chunk( + Resp, + io_lib:format("\n],\n\"last_seq\":~w}\n", [EndSeq]) + ), + end_json_response(Resp); + (timeout, _) -> + send_chunk(Resp, "\n") + end + end, + ChangesArgs = parse_changes_query(Req), + ChangesFun = couch_changes:handle_changes(ChangesArgs, Req, Db), + WrapperFun = case ChangesArgs#changes_args.feed of + "normal" -> + {ok, Info} = couch_db:get_db_info(Db), + CurrentEtag = couch_httpd:make_etag(Info), + fun(FeedChangesFun) -> + couch_httpd:etag_respond( + Req, + CurrentEtag, + fun() -> + {ok, Resp} = couch_httpd:start_json_response( + Req, 200, [{"Etag", CurrentEtag}] + ), + FeedChangesFun(MakeCallback(Resp)) + end + ) + end; + _ -> + % "longpoll" or "continuous" + {ok, Resp} = couch_httpd:start_json_response(Req, 200), + fun(FeedChangesFun) -> + FeedChangesFun(MakeCallback(Resp)) + end + end, + couch_stats_collector:track_process_count( + {httpd, clients_requesting_changes} + ), + WrapperFun(ChangesFun); + +handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) -> + send_method_not_allowed(Req, "GET,HEAD"). + +handle_compact_req(#httpd{method='POST',path_parts=[DbName,_,Id|_]}=Req, Db) -> + ok = couch_db:check_is_admin(Db), + couch_httpd:validate_ctype(Req, "application/json"), + ok = couch_view_compactor:start_compact(DbName, Id), + send_json(Req, 202, {[{ok, true}]}); + +handle_compact_req(#httpd{method='POST'}=Req, Db) -> + ok = couch_db:check_is_admin(Db), + couch_httpd:validate_ctype(Req, "application/json"), + ok = couch_db:start_compact(Db), + send_json(Req, 202, {[{ok, true}]}); + +handle_compact_req(Req, _Db) -> + send_method_not_allowed(Req, "POST"). + +handle_view_cleanup_req(#httpd{method='POST'}=Req, Db) -> + % delete unreferenced index files + ok = couch_db:check_is_admin(Db), + couch_httpd:validate_ctype(Req, "application/json"), + ok = couch_view:cleanup_index_files(Db), + send_json(Req, 202, {[{ok, true}]}); + +handle_view_cleanup_req(Req, _Db) -> + send_method_not_allowed(Req, "POST"). + + +handle_design_req(#httpd{ + path_parts=[_DbName, _Design, DesignName, <<"_",_/binary>> = Action | _Rest], + design_url_handlers = DesignUrlHandlers + }=Req, Db) -> + % load ddoc + DesignId = <<"_design/", DesignName/binary>>, + DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, []), + Handler = couch_util:dict_find(Action, DesignUrlHandlers, fun(_, _, _) -> + throw({not_found, <<"missing handler: ", Action/binary>>}) + end), + Handler(Req, Db, DDoc); + +handle_design_req(Req, Db) -> + db_req(Req, Db). + +handle_design_info_req(#httpd{ + method='GET', + path_parts=[_DbName, _Design, DesignName, _] + }=Req, Db, _DDoc) -> + DesignId = <<"_design/", DesignName/binary>>, + {ok, GroupInfoList} = couch_view:get_group_info(Db, DesignId), + send_json(Req, 200, {[ + {name, DesignName}, + {view_index, {GroupInfoList}} + ]}); + +handle_design_info_req(Req, _Db, _DDoc) -> + send_method_not_allowed(Req, "GET"). + +create_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) -> + ok = couch_httpd:verify_is_server_admin(Req), + case couch_server:create(DbName, [{user_ctx, UserCtx}]) of + {ok, Db} -> + couch_db:close(Db), + DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)), + send_json(Req, 201, [{"Location", DbUrl}], {[{ok, true}]}); + Error -> + throw(Error) + end. + +delete_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) -> + ok = couch_httpd:verify_is_server_admin(Req), + case couch_server:delete(DbName, [{user_ctx, UserCtx}]) of + ok -> + send_json(Req, 200, {[{ok, true}]}); + Error -> + throw(Error) + end. + +do_db_req(#httpd{user_ctx=UserCtx,path_parts=[DbName|_]}=Req, Fun) -> + case couch_db:open(DbName, [{user_ctx, UserCtx}]) of + {ok, Db} -> + try + Fun(Req, Db) + after + catch couch_db:close(Db) + end; + Error -> + throw(Error) + end. + +db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) -> + {ok, DbInfo} = couch_db:get_db_info(Db), + send_json(Req, {DbInfo}); + +db_req(#httpd{method='POST',path_parts=[DbName]}=Req, Db) -> + couch_httpd:validate_ctype(Req, "application/json"), + Doc = couch_doc:from_json_obj(couch_httpd:json_body(Req)), + Doc2 = case Doc#doc.id of + <<"">> -> + Doc#doc{id=couch_uuids:new(), revs={0, []}}; + _ -> + Doc + end, + DocId = Doc2#doc.id, + case couch_httpd:qs_value(Req, "batch") of + "ok" -> + % async_batching + spawn(fun() -> + case catch(couch_db:update_doc(Db, Doc2, [])) of + {ok, _} -> ok; + Error -> + ?LOG_INFO("Batch doc error (~s): ~p",[DocId, Error]) + end + end), + + send_json(Req, 202, [], {[ + {ok, true}, + {id, DocId} + ]}); + _Normal -> + % normal + {ok, NewRev} = couch_db:update_doc(Db, Doc2, []), + DocUrl = absolute_uri( + Req, binary_to_list(<<"/",DbName/binary,"/", DocId/binary>>)), + send_json(Req, 201, [{"Location", DocUrl}], {[ + {ok, true}, + {id, DocId}, + {rev, couch_doc:rev_to_str(NewRev)} + ]}) + end; + + +db_req(#httpd{path_parts=[_DbName]}=Req, _Db) -> + send_method_not_allowed(Req, "DELETE,GET,HEAD,POST"); + +db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) -> + couch_httpd:validate_ctype(Req, "application/json"), + UpdateSeq = couch_db:get_update_seq(Db), + CommittedSeq = couch_db:get_committed_update_seq(Db), + {ok, StartTime} = + case couch_httpd:qs_value(Req, "seq") of + undefined -> + couch_db:ensure_full_commit(Db); + RequiredStr -> + RequiredSeq = list_to_integer(RequiredStr), + if RequiredSeq > UpdateSeq -> + throw({bad_request, + "can't do a full commit ahead of current update_seq"}); + RequiredSeq > CommittedSeq -> + couch_db:ensure_full_commit(Db); + true -> + {ok, Db#db.instance_start_time} + end + end, + send_json(Req, 201, {[ + {ok, true}, + {instance_start_time, StartTime} + ]}); + +db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) -> + send_method_not_allowed(Req, "POST"); + +db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) -> + couch_stats_collector:increment({httpd, bulk_requests}), + couch_httpd:validate_ctype(Req, "application/json"), + {JsonProps} = couch_httpd:json_body_obj(Req), + DocsArray = couch_util:get_value(<<"docs">>, JsonProps), + case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of + "true" -> + Options = [full_commit]; + "false" -> + Options = [delay_commit]; + _ -> + Options = [] + end, + case couch_util:get_value(<<"new_edits">>, JsonProps, true) of + true -> + Docs = lists:map( + fun({ObjProps} = JsonObj) -> + Doc = couch_doc:from_json_obj(JsonObj), + validate_attachment_names(Doc), + Id = case Doc#doc.id of + <<>> -> couch_uuids:new(); + Id0 -> Id0 + end, + case couch_util:get_value(<<"_rev">>, ObjProps) of + undefined -> + Revs = {0, []}; + Rev -> + {Pos, RevId} = couch_doc:parse_rev(Rev), + Revs = {Pos, [RevId]} + end, + Doc#doc{id=Id,revs=Revs} + end, + DocsArray), + Options2 = + case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of + true -> [all_or_nothing|Options]; + _ -> Options + end, + case couch_db:update_docs(Db, Docs, Options2) of + {ok, Results} -> + % output the results + DocResults = lists:zipwith(fun update_doc_result_to_json/2, + Docs, Results), + send_json(Req, 201, DocResults); + {aborted, Errors} -> + ErrorsJson = + lists:map(fun update_doc_result_to_json/1, Errors), + send_json(Req, 417, ErrorsJson) + end; + false -> + Docs = lists:map(fun(JsonObj) -> + Doc = couch_doc:from_json_obj(JsonObj), + validate_attachment_names(Doc), + Doc + end, DocsArray), + {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes), + ErrorsJson = + lists:map(fun update_doc_result_to_json/1, Errors), + send_json(Req, 201, ErrorsJson) + end; +db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) -> + send_method_not_allowed(Req, "POST"); + +db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) -> + couch_httpd:validate_ctype(Req, "application/json"), + {IdsRevs} = couch_httpd:json_body_obj(Req), + IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs], + + case couch_db:purge_docs(Db, IdsRevs2) of + {ok, PurgeSeq, PurgedIdsRevs} -> + PurgedIdsRevs2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- PurgedIdsRevs], + send_json(Req, 200, {[{<<"purge_seq">>, PurgeSeq}, {<<"purged">>, {PurgedIdsRevs2}}]}); + Error -> + throw(Error) + end; + +db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) -> + send_method_not_allowed(Req, "POST"); + +db_req(#httpd{method='GET',path_parts=[_,<<"_all_docs">>]}=Req, Db) -> + all_docs_view(Req, Db, nil); + +db_req(#httpd{method='POST',path_parts=[_,<<"_all_docs">>]}=Req, Db) -> + {Fields} = couch_httpd:json_body_obj(Req), + case couch_util:get_value(<<"keys">>, Fields, nil) of + nil -> + ?LOG_DEBUG("POST to _all_docs with no keys member.", []), + all_docs_view(Req, Db, nil); + Keys when is_list(Keys) -> + all_docs_view(Req, Db, Keys); + _ -> + throw({bad_request, "`keys` member must be a array."}) + end; + +db_req(#httpd{path_parts=[_,<<"_all_docs">>]}=Req, _Db) -> + send_method_not_allowed(Req, "GET,HEAD,POST"); + +db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) -> + {JsonDocIdRevs} = couch_httpd:json_body_obj(Req), + JsonDocIdRevs2 = [{Id, [couch_doc:parse_rev(RevStr) || RevStr <- RevStrs]} || {Id, RevStrs} <- JsonDocIdRevs], + {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2), + Results2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs, _} <- Results], + send_json(Req, {[ + {missing_revs, {Results2}} + ]}); + +db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) -> + send_method_not_allowed(Req, "POST"); + +db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) -> + {JsonDocIdRevs} = couch_httpd:json_body_obj(Req), + JsonDocIdRevs2 = + [{Id, couch_doc:parse_revs(RevStrs)} || {Id, RevStrs} <- JsonDocIdRevs], + {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2), + Results2 = + lists:map(fun({Id, MissingRevs, PossibleAncestors}) -> + {Id, + {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++ + if PossibleAncestors == [] -> + []; + true -> + [{possible_ancestors, + couch_doc:revs_to_strs(PossibleAncestors)}] + end}} + end, Results), + send_json(Req, {Results2}); + +db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) -> + send_method_not_allowed(Req, "POST"); + +db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>]}=Req, Db) -> + SecObj = couch_httpd:json_body(Req), + ok = couch_db:set_security(Db, SecObj), + send_json(Req, {[{<<"ok">>, true}]}); + +db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) -> + send_json(Req, couch_db:get_security(Db)); + +db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) -> + send_method_not_allowed(Req, "PUT,GET"); + +db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>]}=Req, + Db) -> + Limit = couch_httpd:json_body(Req), + ok = couch_db:set_revs_limit(Db, Limit), + send_json(Req, {[{<<"ok">>, true}]}); + +db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) -> + send_json(Req, couch_db:get_revs_limit(Db)); + +db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) -> + send_method_not_allowed(Req, "PUT,GET"); + +% Special case to enable using an unencoded slash in the URL of design docs, +% as slashes in document IDs must otherwise be URL encoded. +db_req(#httpd{method='GET',mochi_req=MochiReq, path_parts=[DbName,<<"_design/",_/binary>>|_]}=Req, _Db) -> + PathFront = "/" ++ couch_httpd:quote(binary_to_list(DbName)) ++ "/", + [_|PathTail] = re:split(MochiReq:get(raw_path), "_design%2F", + [{return, list}]), + couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++ + mochiweb_util:join(PathTail, "_design%2F")); + +db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) -> + db_doc_req(Req, Db, <<"_design/",Name/binary>>); + +db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) -> + db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts); + + +% Special case to allow for accessing local documents without %2F +% encoding the docid. Throws out requests that don't have the second +% path part or that specify an attachment name. +db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) -> + throw({bad_request, <<"Invalid _local document id.">>}); + +db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) -> + throw({bad_request, <<"Invalid _local document id.">>}); + +db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) -> + db_doc_req(Req, Db, <<"_local/", Name/binary>>); + +db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) -> + throw({bad_request, <<"_local documents do not accept attachments.">>}); + +db_req(#httpd{path_parts=[_, DocId]}=Req, Db) -> + db_doc_req(Req, Db, DocId); + +db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) -> + db_attachment_req(Req, Db, DocId, FileNameParts). + +all_docs_view(Req, Db, Keys) -> + #view_query_args{ + start_key = StartKey, + start_docid = StartDocId, + end_key = EndKey, + end_docid = EndDocId, + limit = Limit, + skip = SkipCount, + direction = Dir, + inclusive_end = Inclusive + } = QueryArgs = couch_httpd_view:parse_view_params(Req, Keys, map), + {ok, Info} = couch_db:get_db_info(Db), + CurrentEtag = couch_httpd:make_etag(Info), + couch_httpd:etag_respond(Req, CurrentEtag, fun() -> + + TotalRowCount = couch_util:get_value(doc_count, Info), + StartId = if is_binary(StartKey) -> StartKey; + true -> StartDocId + end, + EndId = if is_binary(EndKey) -> EndKey; + true -> EndDocId + end, + FoldAccInit = {Limit, SkipCount, undefined, []}, + UpdateSeq = couch_db:get_update_seq(Db), + JsonParams = case couch_httpd:qs_value(Req, "update_seq") of + "true" -> + [{update_seq, UpdateSeq}]; + _Else -> + [] + end, + case Keys of + nil -> + FoldlFun = couch_httpd_view:make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db, UpdateSeq, + TotalRowCount, #view_fold_helper_funs{ + reduce_count = fun couch_db:enum_docs_reduce_to_count/1 + }), + AdapterFun = fun(#full_doc_info{id=Id}=FullDocInfo, Offset, Acc) -> + case couch_doc:to_doc_info(FullDocInfo) of + #doc_info{revs=[#rev_info{deleted=false, rev=Rev}|_]} -> + FoldlFun({{Id, Id}, {[{rev, couch_doc:rev_to_str(Rev)}]}}, Offset, Acc); + #doc_info{revs=[#rev_info{deleted=true}|_]} -> + {ok, Acc} + end + end, + {ok, LastOffset, FoldResult} = couch_db:enum_docs(Db, + AdapterFun, FoldAccInit, [{start_key, StartId}, {dir, Dir}, + {if Inclusive -> end_key; true -> end_key_gt end, EndId}]), + couch_httpd_view:finish_view_fold(Req, TotalRowCount, LastOffset, FoldResult, JsonParams); + _ -> + FoldlFun = couch_httpd_view:make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db, UpdateSeq, + TotalRowCount, #view_fold_helper_funs{ + reduce_count = fun(Offset) -> Offset end + }), + KeyFoldFun = case Dir of + fwd -> + fun lists:foldl/3; + rev -> + fun lists:foldr/3 + end, + FoldResult = KeyFoldFun( + fun(Key, FoldAcc) -> + DocInfo = (catch couch_db:get_doc_info(Db, Key)), + Doc = case DocInfo of + {ok, #doc_info{id=Id, revs=[#rev_info{deleted=false, rev=Rev}|_]}} -> + {{Id, Id}, {[{rev, couch_doc:rev_to_str(Rev)}]}}; + {ok, #doc_info{id=Id, revs=[#rev_info{deleted=true, rev=Rev}|_]}} -> + {{Id, Id}, {[{rev, couch_doc:rev_to_str(Rev)}, {deleted, true}]}}; + not_found -> + {{Key, error}, not_found}; + _ -> + ?LOG_ERROR("Invalid DocInfo: ~p", [DocInfo]), + throw({error, invalid_doc_info}) + end, + {_, FoldAcc2} = FoldlFun(Doc, 0, FoldAcc), + FoldAcc2 + end, FoldAccInit, Keys), + couch_httpd_view:finish_view_fold(Req, TotalRowCount, 0, FoldResult, JsonParams) + end + end). + +db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) -> + % check for the existence of the doc to handle the 404 case. + couch_doc_open(Db, DocId, nil, []), + case couch_httpd:qs_value(Req, "rev") of + undefined -> + update_doc(Req, Db, DocId, + couch_doc_from_req(Req, DocId, {[{<<"_deleted">>,true}]})); + Rev -> + update_doc(Req, Db, DocId, + couch_doc_from_req(Req, DocId, + {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]})) + end; + +db_doc_req(#httpd{method='GET'}=Req, Db, DocId) -> + #doc_query_args{ + rev = Rev, + open_revs = Revs, + options = Options, + atts_since = AttsSince + } = parse_doc_query(Req), + case Revs of + [] -> + Options2 = + if AttsSince /= nil -> + [{atts_since, AttsSince}, attachments | Options]; + true -> Options + end, + Doc = couch_doc_open(Db, DocId, Rev, Options2), + send_doc(Req, Doc, Options2); + _ -> + {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options), + AcceptedTypes = case couch_httpd:header_value(Req, "Accept") of + undefined -> []; + AcceptHeader -> string:tokens(AcceptHeader, "; ") + end, + case lists:member("multipart/mixed", AcceptedTypes) of + false -> + {ok, Resp} = start_json_response(Req, 200), + send_chunk(Resp, "["), + % We loop through the docs. The first time through the separator + % is whitespace, then a comma on subsequent iterations. + lists:foldl( + fun(Result, AccSeparator) -> + case Result of + {ok, Doc} -> + JsonDoc = couch_doc:to_json_obj(Doc, Options), + Json = ?JSON_ENCODE({[{ok, JsonDoc}]}), + send_chunk(Resp, AccSeparator ++ Json); + {{not_found, missing}, RevId} -> + RevStr = couch_doc:rev_to_str(RevId), + Json = ?JSON_ENCODE({[{"missing", RevStr}]}), + send_chunk(Resp, AccSeparator ++ Json) + end, + "," % AccSeparator now has a comma + end, + "", Results), + send_chunk(Resp, "]"), + end_json_response(Resp); + true -> + send_docs_multipart(Req, Results, Options) + end + end; + + +db_doc_req(#httpd{method='POST'}=Req, Db, DocId) -> + couch_httpd:validate_referer(Req), + couch_doc:validate_docid(DocId), + couch_httpd:validate_ctype(Req, "multipart/form-data"), + Form = couch_httpd:parse_form(Req), + case proplists:is_defined("_doc", Form) of + true -> + Json = ?JSON_DECODE(couch_util:get_value("_doc", Form)), + Doc = couch_doc_from_req(Req, DocId, Json); + false -> + Rev = couch_doc:parse_rev(list_to_binary(couch_util:get_value("_rev", Form))), + {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []) + end, + UpdatedAtts = [ + #att{name=validate_attachment_name(Name), + type=list_to_binary(ContentType), + data=Content} || + {Name, {ContentType, _}, Content} <- + proplists:get_all_values("_attachments", Form) + ], + #doc{atts=OldAtts} = Doc, + OldAtts2 = lists:flatmap( + fun(#att{name=OldName}=Att) -> + case [1 || A <- UpdatedAtts, A#att.name == OldName] of + [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it + _ -> [] % the attachment was in the UpdatedAtts, drop it + end + end, OldAtts), + NewDoc = Doc#doc{ + atts = UpdatedAtts ++ OldAtts2 + }, + {ok, NewRev} = couch_db:update_doc(Db, NewDoc, []), + + send_json(Req, 201, [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewRev)) ++ "\""}], {[ + {ok, true}, + {id, DocId}, + {rev, couch_doc:rev_to_str(NewRev)} + ]}); + +db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) -> + #doc_query_args{ + update_type = UpdateType + } = parse_doc_query(Req), + couch_doc:validate_docid(DocId), + + Loc = absolute_uri(Req, "/" ++ ?b2l(Db#db.name) ++ "/" ++ ?b2l(DocId)), + RespHeaders = [{"Location", Loc}], + case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of + ("multipart/related;" ++ _) = ContentType -> + {ok, Doc0} = couch_doc:doc_from_multi_part_stream(ContentType, + fun() -> receive_request_data(Req) end), + Doc = couch_doc_from_req(Req, DocId, Doc0), + update_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType); + _Else -> + case couch_httpd:qs_value(Req, "batch") of + "ok" -> + % batch + Doc = couch_doc_from_req(Req, DocId, couch_httpd:json_body(Req)), + + spawn(fun() -> + case catch(couch_db:update_doc(Db, Doc, [])) of + {ok, _} -> ok; + Error -> + ?LOG_INFO("Batch doc error (~s): ~p",[DocId, Error]) + end + end), + send_json(Req, 202, [], {[ + {ok, true}, + {id, DocId} + ]}); + _Normal -> + % normal + Body = couch_httpd:json_body(Req), + Doc = couch_doc_from_req(Req, DocId, Body), + update_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType) + end + end; + +db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) -> + SourceRev = + case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of + missing_rev -> nil; + Rev -> Rev + end, + {TargetDocId, TargetRevs} = parse_copy_destination_header(Req), + % open old doc + Doc = couch_doc_open(Db, SourceDocId, SourceRev, []), + % save new doc + {ok, NewTargetRev} = couch_db:update_doc(Db, + Doc#doc{id=TargetDocId, revs=TargetRevs}, []), + % respond + send_json(Req, 201, + [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}], + update_doc_result_to_json(TargetDocId, {ok, NewTargetRev})); + +db_doc_req(Req, _Db, _DocId) -> + send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY"). + + +send_doc(Req, Doc, Options) -> + case Doc#doc.meta of + [] -> + DiskEtag = couch_httpd:doc_etag(Doc), + % output etag only when we have no meta + couch_httpd:etag_respond(Req, DiskEtag, fun() -> + send_doc_efficiently(Req, Doc, [{"Etag", DiskEtag}], Options) + end); + _ -> + send_doc_efficiently(Req, Doc, [], Options) + end. + + +send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) -> + send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options)); +send_doc_efficiently(Req, #doc{atts=Atts}=Doc, Headers, Options) -> + case lists:member(attachments, Options) of + true -> + AcceptedTypes = case couch_httpd:header_value(Req, "Accept") of + undefined -> []; + AcceptHeader -> string:tokens(AcceptHeader, ", ") + end, + case lists:member("multipart/related", AcceptedTypes) of + false -> + send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options)); + true -> + Boundary = couch_uuids:random(), + JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, + [attachments, follows|Options])), + {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream( + Boundary,JsonBytes, Atts,false), + CType = {<<"Content-Type">>, ContentType}, + {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len), + couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts, + fun(Data) -> couch_httpd:send(Resp, Data) end, false) + end; + false -> + send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options)) + end. + +send_docs_multipart(Req, Results, Options) -> + OuterBoundary = couch_uuids:random(), + InnerBoundary = couch_uuids:random(), + CType = {"Content-Type", + "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""}, + {ok, Resp} = start_chunked_response(Req, 200, [CType]), + couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>), + lists:foreach( + fun({ok, #doc{atts=Atts}=Doc}) -> + JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, + [attachments,follows|Options])), + {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream( + InnerBoundary, JsonBytes, Atts, false), + couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ", + ContentType/binary, "\r\n\r\n">>), + couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts, + fun(Data) -> couch_httpd:send_chunk(Resp, Data) + end, false), + couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>); + ({{not_found, missing}, RevId}) -> + RevStr = couch_doc:rev_to_str(RevId), + Json = ?JSON_ENCODE({[{"missing", RevStr}]}), + couch_httpd:send_chunk(Resp, + [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>, + Json, + <<"\r\n--", OuterBoundary/binary>>]) + end, Results), + couch_httpd:send_chunk(Resp, <<"--">>), + couch_httpd:last_chunk(Resp). + +receive_request_data(Req) -> + {couch_httpd:recv(Req, 0), fun() -> receive_request_data(Req) end}. + +update_doc_result_to_json({{Id, Rev}, Error}) -> + {_Code, Err, Msg} = couch_httpd:error_info(Error), + {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)}, + {error, Err}, {reason, Msg}]}. + +update_doc_result_to_json(#doc{id=DocId}, Result) -> + update_doc_result_to_json(DocId, Result); +update_doc_result_to_json(DocId, {ok, NewRev}) -> + {[{id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]}; +update_doc_result_to_json(DocId, Error) -> + {_Code, ErrorStr, Reason} = couch_httpd:error_info(Error), + {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}. + + +update_doc(Req, Db, DocId, Doc) -> + update_doc(Req, Db, DocId, Doc, []). + +update_doc(Req, Db, DocId, Doc, Headers) -> + update_doc(Req, Db, DocId, Doc, Headers, interactive_edit). + +update_doc(Req, Db, DocId, #doc{deleted=Deleted}=Doc, Headers, UpdateType) -> + case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of + "true" -> + Options = [full_commit]; + "false" -> + Options = [delay_commit]; + _ -> + Options = [] + end, + {ok, NewRev} = couch_db:update_doc(Db, Doc, Options, UpdateType), + NewRevStr = couch_doc:rev_to_str(NewRev), + ResponseHeaders = [{"Etag", <<"\"", NewRevStr/binary, "\"">>}] ++ Headers, + send_json(Req, if Deleted -> 200; true -> 201 end, + ResponseHeaders, {[ + {ok, true}, + {id, DocId}, + {rev, NewRevStr}]}). + +couch_doc_from_req(Req, DocId, #doc{revs=Revs}=Doc) -> + validate_attachment_names(Doc), + ExplicitDocRev = + case Revs of + {Start,[RevId|_]} -> {Start, RevId}; + _ -> undefined + end, + case extract_header_rev(Req, ExplicitDocRev) of + missing_rev -> + Revs2 = {0, []}; + ExplicitDocRev -> + Revs2 = Revs; + {Pos, Rev} -> + Revs2 = {Pos, [Rev]} + end, + Doc#doc{id=DocId, revs=Revs2}; +couch_doc_from_req(Req, DocId, Json) -> + couch_doc_from_req(Req, DocId, couch_doc:from_json_obj(Json)). + + +% Useful for debugging +% couch_doc_open(Db, DocId) -> +% couch_doc_open(Db, DocId, nil, []). + +couch_doc_open(Db, DocId, Rev, Options) -> + case Rev of + nil -> % open most recent rev + case couch_db:open_doc(Db, DocId, Options) of + {ok, Doc} -> + Doc; + Error -> + throw(Error) + end; + _ -> % open a specific rev (deletions come back as stubs) + case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of + {ok, [{ok, Doc}]} -> + Doc; + {ok, [{{not_found, missing}, Rev}]} -> + throw(not_found); + {ok, [Else]} -> + throw(Else) + end + end. + +% Attachment request handlers + +db_attachment_req(#httpd{method='GET'}=Req, Db, DocId, FileNameParts) -> + FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts),"/")), + #doc_query_args{ + rev=Rev, + options=Options + } = parse_doc_query(Req), + #doc{ + atts=Atts + } = Doc = couch_doc_open(Db, DocId, Rev, Options), + case [A || A <- Atts, A#att.name == FileName] of + [] -> + throw({not_found, "Document is missing attachment"}); + [#att{type=Type, encoding=Enc, disk_len=DiskLen, att_len=AttLen}=Att] -> + Etag = couch_httpd:doc_etag(Doc), + ReqAcceptsAttEnc = lists:member( + atom_to_list(Enc), + couch_httpd:accepted_encodings(Req) + ), + Headers = [ + {"ETag", Etag}, + {"Cache-Control", "must-revalidate"}, + {"Content-Type", binary_to_list(Type)} + ] ++ case ReqAcceptsAttEnc of + true -> + [{"Content-Encoding", atom_to_list(Enc)}]; + _ -> + [] + end, + Len = case {Enc, ReqAcceptsAttEnc} of + {identity, _} -> + % stored and served in identity form + DiskLen; + {_, false} when DiskLen =/= AttLen -> + % Stored encoded, but client doesn't accept the encoding we used, + % so we need to decode on the fly. DiskLen is the identity length + % of the attachment. + DiskLen; + {_, true} -> + % Stored and served encoded. AttLen is the encoded length. + AttLen; + _ -> + % We received an encoded attachment and stored it as such, so we + % don't know the identity length. The client doesn't accept the + % encoding, and since we cannot serve a correct Content-Length + % header we'll fall back to a chunked response. + undefined + end, + AttFun = case ReqAcceptsAttEnc of + false -> + fun couch_doc:att_foldl_decode/3; + true -> + fun couch_doc:att_foldl/3 + end, + couch_httpd:etag_respond( + Req, + Etag, + fun() -> + case Len of + undefined -> + {ok, Resp} = start_chunked_response(Req, 200, Headers), + AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, ok), + last_chunk(Resp); + _ -> + {ok, Resp} = start_response_length(Req, 200, Headers, Len), + AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, ok) + end + end + ) + end; + + +db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) + when (Method == 'PUT') or (Method == 'DELETE') -> + FileName = validate_attachment_name( + mochiweb_util:join( + lists:map(fun binary_to_list/1, + FileNameParts),"/")), + + NewAtt = case Method of + 'DELETE' -> + []; + _ -> + [#att{ + name = FileName, + type = case couch_httpd:header_value(Req,"Content-Type") of + undefined -> + % We could throw an error here or guess by the FileName. + % Currently, just giving it a default. + <<"application/octet-stream">>; + CType -> + list_to_binary(CType) + end, + data = case couch_httpd:body_length(Req) of + undefined -> + <<"">>; + {unknown_transfer_encoding, Unknown} -> + exit({unknown_transfer_encoding, Unknown}); + chunked -> + fun(MaxChunkSize, ChunkFun, InitState) -> + couch_httpd:recv_chunked(Req, MaxChunkSize, + ChunkFun, InitState) + end; + 0 -> + <<"">>; + Length when is_integer(Length) -> + Expect = case couch_httpd:header_value(Req, "expect") of + undefined -> + undefined; + Value when is_list(Value) -> + string:to_lower(Value) + end, + case Expect of + "100-continue" -> + MochiReq:start_raw_response({100, gb_trees:empty()}); + _Else -> + ok + end, + + + fun() -> couch_httpd:recv(Req, 0) end; + Length -> + exit({length_not_integer, Length}) + end, + att_len = case couch_httpd:header_value(Req,"Content-Length") of + undefined -> + undefined; + Length -> + list_to_integer(Length) + end, + md5 = get_md5_header(Req), + encoding = case string:to_lower(string:strip( + couch_httpd:header_value(Req,"Content-Encoding","identity") + )) of + "identity" -> + identity; + "gzip" -> + gzip; + _ -> + throw({ + bad_ctype, + "Only gzip and identity content-encodings are supported" + }) + end + }] + end, + + Doc = case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of + missing_rev -> % make the new doc + couch_doc:validate_docid(DocId), + #doc{id=DocId}; + Rev -> + case couch_db:open_doc_revs(Db, DocId, [Rev], []) of + {ok, [{ok, Doc0}]} -> Doc0; + {ok, [Error]} -> throw(Error) + end + end, + + #doc{atts=Atts} = Doc, + DocEdited = Doc#doc{ + atts = NewAtt ++ [A || A <- Atts, A#att.name /= FileName] + }, + {ok, UpdatedRev} = couch_db:update_doc(Db, DocEdited, []), + #db{name=DbName} = Db, + + {Status, Headers} = case Method of + 'DELETE' -> + {200, []}; + _ -> + {201, [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(UpdatedRev)) ++ "\""}, + {"Location", absolute_uri(Req, "/" ++ + binary_to_list(DbName) ++ "/" ++ + binary_to_list(DocId) ++ "/" ++ + binary_to_list(FileName) + )}]} + end, + send_json(Req,Status, Headers, {[ + {ok, true}, + {id, DocId}, + {rev, couch_doc:rev_to_str(UpdatedRev)} + ]}); + +db_attachment_req(Req, _Db, _DocId, _FileNameParts) -> + send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT"). + + +get_md5_header(Req) -> + ContentMD5 = couch_httpd:header_value(Req, "Content-MD5"), + Length = couch_httpd:body_length(Req), + Trailer = couch_httpd:header_value(Req, "Trailer"), + case {ContentMD5, Length, Trailer} of + _ when is_list(ContentMD5) orelse is_binary(ContentMD5) -> + base64:decode(ContentMD5); + {_, chunked, undefined} -> + <<>>; + {_, chunked, _} -> + case re:run(Trailer, "\\bContent-MD5\\b", [caseless]) of + {match, _} -> + md5_in_footer; + _ -> + <<>> + end; + _ -> + <<>> + end. + +parse_doc_query(Req) -> + lists:foldl(fun({Key,Value}, Args) -> + case {Key, Value} of + {"attachments", "true"} -> + Options = [attachments | Args#doc_query_args.options], + Args#doc_query_args{options=Options}; + {"meta", "true"} -> + Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options], + Args#doc_query_args{options=Options}; + {"revs", "true"} -> + Options = [revs | Args#doc_query_args.options], + Args#doc_query_args{options=Options}; + {"local_seq", "true"} -> + Options = [local_seq | Args#doc_query_args.options], + Args#doc_query_args{options=Options}; + {"revs_info", "true"} -> + Options = [revs_info | Args#doc_query_args.options], + Args#doc_query_args{options=Options}; + {"conflicts", "true"} -> + Options = [conflicts | Args#doc_query_args.options], + Args#doc_query_args{options=Options}; + {"deleted_conflicts", "true"} -> + Options = [deleted_conflicts | Args#doc_query_args.options], + Args#doc_query_args{options=Options}; + {"rev", Rev} -> + Args#doc_query_args{rev=couch_doc:parse_rev(Rev)}; + {"open_revs", "all"} -> + Args#doc_query_args{open_revs=all}; + {"open_revs", RevsJsonStr} -> + JsonArray = ?JSON_DECODE(RevsJsonStr), + Args#doc_query_args{open_revs=couch_doc:parse_revs(JsonArray)}; + {"atts_since", RevsJsonStr} -> + JsonArray = ?JSON_DECODE(RevsJsonStr), + Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)}; + {"new_edits", "false"} -> + Args#doc_query_args{update_type=replicated_changes}; + {"new_edits", "true"} -> + Args#doc_query_args{update_type=interactive_edit}; + {"att_encoding_info", "true"} -> + Options = [att_encoding_info | Args#doc_query_args.options], + Args#doc_query_args{options=Options}; + _Else -> % unknown key value pair, ignore. + Args + end + end, #doc_query_args{}, couch_httpd:qs(Req)). + +parse_changes_query(Req) -> + lists:foldl(fun({Key, Value}, Args) -> + case {Key, Value} of + {"feed", _} -> + Args#changes_args{feed=Value}; + {"descending", "true"} -> + Args#changes_args{dir=rev}; + {"since", _} -> + Args#changes_args{since=list_to_integer(Value)}; + {"limit", _} -> + Args#changes_args{limit=list_to_integer(Value)}; + {"style", _} -> + Args#changes_args{style=list_to_existing_atom(Value)}; + {"heartbeat", "true"} -> + Args#changes_args{heartbeat=true}; + {"heartbeat", _} -> + Args#changes_args{heartbeat=list_to_integer(Value)}; + {"timeout", _} -> + Args#changes_args{timeout=list_to_integer(Value)}; + {"include_docs", "true"} -> + Args#changes_args{include_docs=true}; + {"filter", _} -> + Args#changes_args{filter=Value}; + _Else -> % unknown key value pair, ignore. + Args + end + end, #changes_args{}, couch_httpd:qs(Req)). + +extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)-> + extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev)); +extract_header_rev(Req, ExplicitRev) -> + Etag = case couch_httpd:header_value(Req, "If-Match") of + undefined -> undefined; + Value -> couch_doc:parse_rev(string:strip(Value, both, $")) + end, + case {ExplicitRev, Etag} of + {undefined, undefined} -> missing_rev; + {_, undefined} -> ExplicitRev; + {undefined, _} -> Etag; + _ when ExplicitRev == Etag -> Etag; + _ -> + throw({bad_request, "Document rev and etag have different values"}) + end. + + +parse_copy_destination_header(Req) -> + Destination = couch_httpd:header_value(Req, "Destination"), + case re:run(Destination, "\\?", [{capture, none}]) of + nomatch -> + {list_to_binary(Destination), {0, []}}; + match -> + [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]), + [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]), + {Pos, RevId} = couch_doc:parse_rev(Rev), + {list_to_binary(DocId), {Pos, [RevId]}} + end. + +validate_attachment_names(Doc) -> + lists:foreach(fun(#att{name=Name}) -> + validate_attachment_name(Name) + end, Doc#doc.atts). + +validate_attachment_name(Name) when is_list(Name) -> + validate_attachment_name(list_to_binary(Name)); +validate_attachment_name(<<"_",_/binary>>) -> + throw({bad_request, <<"Attachment name can't start with '_'">>}); +validate_attachment_name(Name) -> + case is_valid_utf8(Name) of + true -> Name; + false -> throw({bad_request, <<"Attachment name is not UTF-8 encoded">>}) + end. + +%% borrowed from mochijson2:json_bin_is_safe() +is_valid_utf8(<<>>) -> + true; +is_valid_utf8(<<C, Rest/binary>>) -> + case C of + $\" -> + false; + $\\ -> + false; + $\b -> + false; + $\f -> + false; + $\n -> + false; + $\r -> + false; + $\t -> + false; + C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF -> + false; + C when C < 16#7f -> + is_valid_utf8(Rest); + _ -> + false + end. diff --git a/apps/couch/src/couch_httpd_external.erl b/apps/couch/src/couch_httpd_external.erl new file mode 100644 index 00000000..07202934 --- /dev/null +++ b/apps/couch/src/couch_httpd_external.erl @@ -0,0 +1,162 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_httpd_external). + +-export([handle_external_req/2, handle_external_req/3]). +-export([send_external_response/2, json_req_obj/2, json_req_obj/3]). +-export([default_or_content_type/2, parse_external_response/1]). + +-import(couch_httpd,[send_error/4]). + +-include("couch_db.hrl"). + +% handle_external_req/2 +% for the old type of config usage: +% _external = {couch_httpd_external, handle_external_req} +% with urls like +% /db/_external/action/design/name +handle_external_req(#httpd{ + path_parts=[_DbName, _External, UrlName | _Path] + }=HttpReq, Db) -> + process_external_req(HttpReq, Db, UrlName); +handle_external_req(#httpd{path_parts=[_, _]}=Req, _Db) -> + send_error(Req, 404, <<"external_server_error">>, <<"No server name specified.">>); +handle_external_req(Req, _) -> + send_error(Req, 404, <<"external_server_error">>, <<"Broken assumption">>). + +% handle_external_req/3 +% for this type of config usage: +% _action = {couch_httpd_external, handle_external_req, <<"action">>} +% with urls like +% /db/_action/design/name +handle_external_req(HttpReq, Db, Name) -> + process_external_req(HttpReq, Db, Name). + +process_external_req(HttpReq, Db, Name) -> + + Response = couch_external_manager:execute(binary_to_list(Name), + json_req_obj(HttpReq, Db)), + + case Response of + {unknown_external_server, Msg} -> + send_error(HttpReq, 404, <<"external_server_error">>, Msg); + _ -> + send_external_response(HttpReq, Response) + end. +json_req_obj(Req, Db) -> json_req_obj(Req, Db, null). +json_req_obj(#httpd{mochi_req=Req, + method=Method, + path_parts=Path, + req_body=ReqBody + }, Db, DocId) -> + Body = case ReqBody of + undefined -> Req:recv_body(); + Else -> Else + end, + ParsedForm = case Req:get_primary_header_value("content-type") of + "application/x-www-form-urlencoded" ++ _ -> + mochiweb_util:parse_qs(Body); + _ -> + [] + end, + Headers = Req:get(headers), + Hlist = mochiweb_headers:to_list(Headers), + {ok, Info} = couch_db:get_db_info(Db), + % add headers... + {[{<<"info">>, {Info}}, + {<<"id">>, DocId}, + {<<"uuid">>, couch_uuids:new()}, + {<<"method">>, Method}, + {<<"path">>, Path}, + {<<"query">>, json_query_keys(to_json_terms(Req:parse_qs()))}, + {<<"headers">>, to_json_terms(Hlist)}, + {<<"body">>, Body}, + {<<"peer">>, ?l2b(Req:get(peer))}, + {<<"form">>, to_json_terms(ParsedForm)}, + {<<"cookie">>, to_json_terms(Req:parse_cookie())}, + {<<"userCtx">>, couch_util:json_user_ctx(Db)}]}. + +to_json_terms(Data) -> + to_json_terms(Data, []). + +to_json_terms([], Acc) -> + {lists:reverse(Acc)}; +to_json_terms([{Key, Value} | Rest], Acc) when is_atom(Key) -> + to_json_terms(Rest, [{list_to_binary(atom_to_list(Key)), list_to_binary(Value)} | Acc]); +to_json_terms([{Key, Value} | Rest], Acc) -> + to_json_terms(Rest, [{list_to_binary(Key), list_to_binary(Value)} | Acc]). + +json_query_keys({Json}) -> + json_query_keys(Json, []). +json_query_keys([], Acc) -> + {lists:reverse(Acc)}; +json_query_keys([{<<"startkey">>, Value} | Rest], Acc) -> + json_query_keys(Rest, [{<<"startkey">>, couch_util:json_decode(Value)}|Acc]); +json_query_keys([{<<"endkey">>, Value} | Rest], Acc) -> + json_query_keys(Rest, [{<<"endkey">>, couch_util:json_decode(Value)}|Acc]); +json_query_keys([{<<"key">>, Value} | Rest], Acc) -> + json_query_keys(Rest, [{<<"key">>, couch_util:json_decode(Value)}|Acc]); +json_query_keys([Term | Rest], Acc) -> + json_query_keys(Rest, [Term|Acc]). + +send_external_response(#httpd{mochi_req=MochiReq}=Req, Response) -> + #extern_resp_args{ + code = Code, + data = Data, + ctype = CType, + headers = Headers + } = parse_external_response(Response), + couch_httpd:log_request(Req, Code), + Resp = MochiReq:respond({Code, + default_or_content_type(CType, Headers ++ couch_httpd:server_header()), Data}), + {ok, Resp}. + +parse_external_response({Response}) -> + lists:foldl(fun({Key,Value}, Args) -> + case {Key, Value} of + {"", _} -> + Args; + {<<"code">>, Value} -> + Args#extern_resp_args{code=Value}; + {<<"stop">>, true} -> + Args#extern_resp_args{stop=true}; + {<<"json">>, Value} -> + Args#extern_resp_args{ + data=?JSON_ENCODE(Value), + ctype="application/json"}; + {<<"body">>, Value} -> + Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"}; + {<<"base64">>, Value} -> + Args#extern_resp_args{ + data=base64:decode(Value), + ctype="application/binary" + }; + {<<"headers">>, {Headers}} -> + NewHeaders = lists:map(fun({Header, HVal}) -> + {binary_to_list(Header), binary_to_list(HVal)} + end, Headers), + Args#extern_resp_args{headers=NewHeaders}; + _ -> % unknown key + Msg = lists:flatten(io_lib:format("Invalid data from external server: ~p", [{Key, Value}])), + throw({external_response_error, Msg}) + end + end, #extern_resp_args{}, Response). + +default_or_content_type(DefaultContentType, Headers) -> + IsContentType = fun({X, _}) -> string:to_lower(X) == "content-type" end, + case lists:any(IsContentType, Headers) of + false -> + [{"Content-Type", DefaultContentType} | Headers]; + true -> + Headers + end. diff --git a/apps/couch/src/couch_httpd_misc_handlers.erl b/apps/couch/src/couch_httpd_misc_handlers.erl new file mode 100644 index 00000000..0a6f4a42 --- /dev/null +++ b/apps/couch/src/couch_httpd_misc_handlers.erl @@ -0,0 +1,219 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_httpd_misc_handlers). + +-export([handle_welcome_req/2,handle_favicon_req/2,handle_utils_dir_req/2, + handle_all_dbs_req/1,handle_replicate_req/1,handle_restart_req/1, + handle_uuids_req/1,handle_config_req/1,handle_log_req/1, + handle_task_status_req/1]). + +-export([increment_update_seq_req/2]). + + +-include("couch_db.hrl"). + +-import(couch_httpd, + [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2, + start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1, + start_chunked_response/3, send_error/4]). + +% httpd global handlers + +handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) -> + send_json(Req, {[ + {couchdb, WelcomeMessage}, + {version, list_to_binary(couch_server:get_version())} + ]}); +handle_welcome_req(Req, _) -> + send_method_not_allowed(Req, "GET,HEAD"). + +handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) -> + {{Year,Month,Day},Time} = erlang:localtime(), + OneYearFromNow = {{Year+1,Month,Day},Time}, + CachingHeaders = [ + %favicon should expire a year from now + {"Cache-Control", "public, max-age=31536000"}, + {"Expires", httpd_util:rfc1123_date(OneYearFromNow)} + ], + couch_httpd:serve_file(Req, "favicon.ico", DocumentRoot, CachingHeaders); + +handle_favicon_req(Req, _) -> + send_method_not_allowed(Req, "GET,HEAD"). + +handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) -> + "/" ++ UrlPath = couch_httpd:path(Req), + case couch_httpd:partition(UrlPath) of + {_ActionKey, "/", RelativePath} -> + % GET /_utils/path or GET /_utils/ + couch_httpd:serve_file(Req, RelativePath, DocumentRoot); + {_ActionKey, "", _RelativePath} -> + % GET /_utils + RedirectPath = couch_httpd:path(Req) ++ "/", + couch_httpd:send_redirect(Req, RedirectPath) + end; +handle_utils_dir_req(Req, _) -> + send_method_not_allowed(Req, "GET,HEAD"). + +handle_all_dbs_req(#httpd{method='GET'}=Req) -> + {ok, DbNames} = couch_server:all_databases(), + send_json(Req, DbNames); +handle_all_dbs_req(Req) -> + send_method_not_allowed(Req, "GET,HEAD"). + + +handle_task_status_req(#httpd{method='GET'}=Req) -> + ok = couch_httpd:verify_is_server_admin(Req), + % convert the list of prop lists to a list of json objects + send_json(Req, [{Props} || Props <- couch_task_status:all()]); +handle_task_status_req(Req) -> + send_method_not_allowed(Req, "GET,HEAD"). + +handle_replicate_req(#httpd{method='POST'}=Req) -> + couch_httpd:validate_ctype(Req, "application/json"), + PostBody = couch_httpd:json_body_obj(Req), + try couch_rep:replicate(PostBody, Req#httpd.user_ctx) of + {ok, {continuous, RepId}} -> + send_json(Req, 202, {[{ok, true}, {<<"_local_id">>, RepId}]}); + {ok, {cancelled, RepId}} -> + send_json(Req, 200, {[{ok, true}, {<<"_local_id">>, RepId}]}); + {ok, {JsonResults}} -> + send_json(Req, {[{ok, true} | JsonResults]}); + {error, {Type, Details}} -> + send_json(Req, 500, {[{error, Type}, {reason, Details}]}); + {error, not_found} -> + send_json(Req, 404, {[{error, not_found}]}); + {error, Reason} -> + send_json(Req, 500, {[{error, Reason}]}) + catch + throw:{db_not_found, Msg} -> + send_json(Req, 404, {[{error, db_not_found}, {reason, Msg}]}) + end; +handle_replicate_req(Req) -> + send_method_not_allowed(Req, "POST"). + + +handle_restart_req(#httpd{method='POST'}=Req) -> + couch_httpd:validate_ctype(Req, "application/json"), + ok = couch_httpd:verify_is_server_admin(Req), + couch_server_sup:restart_core_server(), + send_json(Req, 200, {[{ok, true}]}); +handle_restart_req(Req) -> + send_method_not_allowed(Req, "POST"). + + +handle_uuids_req(#httpd{method='GET'}=Req) -> + Count = list_to_integer(couch_httpd:qs_value(Req, "count", "1")), + UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)], + Etag = couch_httpd:make_etag(UUIDs), + couch_httpd:etag_respond(Req, Etag, fun() -> + CacheBustingHeaders = [ + {"Date", httpd_util:rfc1123_date()}, + {"Cache-Control", "no-cache"}, + % Past date, ON PURPOSE! + {"Expires", "Fri, 01 Jan 1990 00:00:00 GMT"}, + {"Pragma", "no-cache"}, + {"ETag", Etag} + ], + send_json(Req, 200, CacheBustingHeaders, {[{<<"uuids">>, UUIDs}]}) + end); +handle_uuids_req(Req) -> + send_method_not_allowed(Req, "GET"). + + +% Config request handler + + +% GET /_config/ +% GET /_config +handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) -> + ok = couch_httpd:verify_is_server_admin(Req), + Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) -> + case dict:is_key(Section, Acc) of + true -> + dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc); + false -> + dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc) + end + end, dict:new(), couch_config:all()), + KVs = dict:fold(fun(Section, Values, Acc) -> + [{list_to_binary(Section), {Values}} | Acc] + end, [], Grouped), + send_json(Req, 200, {KVs}); +% GET /_config/Section +handle_config_req(#httpd{method='GET', path_parts=[_,Section]}=Req) -> + ok = couch_httpd:verify_is_server_admin(Req), + KVs = [{list_to_binary(Key), list_to_binary(Value)} + || {Key, Value} <- couch_config:get(Section)], + send_json(Req, 200, {KVs}); +% PUT /_config/Section/Key +% "value" +handle_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Req) -> + ok = couch_httpd:verify_is_server_admin(Req), + Value = couch_httpd:json_body(Req), + Persist = couch_httpd:header_value(Req, "X-Couch-Persist") /= "false", + OldValue = couch_config:get(Section, Key, ""), + ok = couch_config:set(Section, Key, ?b2l(Value), Persist), + send_json(Req, 200, list_to_binary(OldValue)); +% GET /_config/Section/Key +handle_config_req(#httpd{method='GET', path_parts=[_, Section, Key]}=Req) -> + ok = couch_httpd:verify_is_server_admin(Req), + case couch_config:get(Section, Key, null) of + null -> + throw({not_found, unknown_config_value}); + Value -> + send_json(Req, 200, list_to_binary(Value)) + end; +% DELETE /_config/Section/Key +handle_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Req) -> + ok = couch_httpd:verify_is_server_admin(Req), + Persist = couch_httpd:header_value(Req, "X-Couch-Persist") /= "false", + case couch_config:get(Section, Key, null) of + null -> + throw({not_found, unknown_config_value}); + OldValue -> + couch_config:delete(Section, Key, Persist), + send_json(Req, 200, list_to_binary(OldValue)) + end; +handle_config_req(Req) -> + send_method_not_allowed(Req, "GET,PUT,DELETE"). + + +% httpd db handlers + +increment_update_seq_req(#httpd{method='POST'}=Req, Db) -> + couch_httpd:validate_ctype(Req, "application/json"), + {ok, NewSeq} = couch_db:increment_update_seq(Db), + send_json(Req, {[{ok, true}, + {update_seq, NewSeq} + ]}); +increment_update_seq_req(Req, _Db) -> + send_method_not_allowed(Req, "POST"). + +% httpd log handlers + +handle_log_req(#httpd{method='GET'}=Req) -> + ok = couch_httpd:verify_is_server_admin(Req), + Bytes = list_to_integer(couch_httpd:qs_value(Req, "bytes", "1000")), + Offset = list_to_integer(couch_httpd:qs_value(Req, "offset", "0")), + Chunk = couch_log:read(Bytes, Offset), + {ok, Resp} = start_chunked_response(Req, 200, [ + % send a plaintext response + {"Content-Type", "text/plain; charset=utf-8"}, + {"Content-Length", integer_to_list(length(Chunk))} + ]), + send_chunk(Resp, Chunk), + last_chunk(Resp); +handle_log_req(Req) -> + send_method_not_allowed(Req, "GET"). + + diff --git a/apps/couch/src/couch_httpd_oauth.erl b/apps/couch/src/couch_httpd_oauth.erl new file mode 100644 index 00000000..05ee10e2 --- /dev/null +++ b/apps/couch/src/couch_httpd_oauth.erl @@ -0,0 +1,176 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_httpd_oauth). +-include("couch_db.hrl"). + +-export([oauth_authentication_handler/1, handle_oauth_req/1, consumer_lookup/2]). + +% OAuth auth handler using per-node user db +oauth_authentication_handler(#httpd{mochi_req=MochiReq}=Req) -> + serve_oauth(Req, fun(URL, Params, Consumer, Signature) -> + AccessToken = couch_util:get_value("oauth_token", Params), + case couch_config:get("oauth_token_secrets", AccessToken) of + undefined -> + couch_httpd:send_error(Req, 400, <<"invalid_token">>, + <<"Invalid OAuth token.">>); + TokenSecret -> + ?LOG_DEBUG("OAuth URL is: ~p", [URL]), + case oauth:verify(Signature, atom_to_list(MochiReq:get(method)), URL, Params, Consumer, TokenSecret) of + true -> + set_user_ctx(Req, AccessToken); + false -> + Req + end + end + end, true). + +% Look up the consumer key and get the roles to give the consumer +set_user_ctx(Req, AccessToken) -> + % TODO move to db storage + Name = case couch_config:get("oauth_token_users", AccessToken) of + undefined -> throw({bad_request, unknown_oauth_token}); + Value -> ?l2b(Value) + end, + case couch_auth_cache:get_user_creds(Name) of + nil -> Req; + User -> + Roles = couch_util:get_value(<<"roles">>, User, []), + Req#httpd{user_ctx=#user_ctx{name=Name, roles=Roles}} + end. + +% OAuth request_token +handle_oauth_req(#httpd{path_parts=[_OAuth, <<"request_token">>], method=Method}=Req) -> + serve_oauth(Req, fun(URL, Params, Consumer, Signature) -> + AccessToken = couch_util:get_value("oauth_token", Params), + TokenSecret = couch_config:get("oauth_token_secrets", AccessToken), + case oauth:verify(Signature, atom_to_list(Method), URL, Params, Consumer, TokenSecret) of + true -> + ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>); + false -> + invalid_signature(Req) + end + end, false); +handle_oauth_req(#httpd{path_parts=[_OAuth, <<"authorize">>]}=Req) -> + {ok, serve_oauth_authorize(Req)}; +handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>], method='GET'}=Req) -> + serve_oauth(Req, fun(URL, Params, Consumer, Signature) -> + case oauth:token(Params) of + "requestkey" -> + case oauth:verify(Signature, "GET", URL, Params, Consumer, "requestsecret") of + true -> + ok(Req, <<"oauth_token=accesskey&oauth_token_secret=accesssecret">>); + false -> + invalid_signature(Req) + end; + _ -> + couch_httpd:send_error(Req, 400, <<"invalid_token">>, <<"Invalid OAuth token.">>) + end + end, false); +handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>]}=Req) -> + couch_httpd:send_method_not_allowed(Req, "GET"). + +invalid_signature(Req) -> + couch_httpd:send_error(Req, 400, <<"invalid_signature">>, <<"Invalid signature value.">>). + +% This needs to be protected i.e. force user to login using HTTP Basic Auth or form-based login. +serve_oauth_authorize(#httpd{method=Method}=Req) -> + case Method of + 'GET' -> + % Confirm with the User that they want to authenticate the Consumer + serve_oauth(Req, fun(URL, Params, Consumer, Signature) -> + AccessToken = couch_util:get_value("oauth_token", Params), + TokenSecret = couch_config:get("oauth_token_secrets", AccessToken), + case oauth:verify(Signature, "GET", URL, Params, Consumer, TokenSecret) of + true -> + ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>); + false -> + invalid_signature(Req) + end + end, false); + 'POST' -> + % If the User has confirmed, we direct the User back to the Consumer with a verification code + serve_oauth(Req, fun(URL, Params, Consumer, Signature) -> + AccessToken = couch_util:get_value("oauth_token", Params), + TokenSecret = couch_config:get("oauth_token_secrets", AccessToken), + case oauth:verify(Signature, "POST", URL, Params, Consumer, TokenSecret) of + true -> + %redirect(oauth_callback, oauth_token, oauth_verifier), + ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>); + false -> + invalid_signature(Req) + end + end, false); + _ -> + couch_httpd:send_method_not_allowed(Req, "GET,POST") + end. + +serve_oauth(#httpd{mochi_req=MochiReq}=Req, Fun, FailSilently) -> + % 1. In the HTTP Authorization header as defined in OAuth HTTP Authorization Scheme. + % 2. As the HTTP POST request body with a content-type of application/x-www-form-urlencoded. + % 3. Added to the URLs in the query part (as defined by [RFC3986] section 3). + AuthHeader = case MochiReq:get_header_value("authorization") of + undefined -> + ""; + Else -> + [Head | Tail] = re:split(Else, "\\s", [{parts, 2}, {return, list}]), + case [string:to_lower(Head) | Tail] of + ["oauth", Rest] -> Rest; + _ -> "" + end + end, + HeaderParams = oauth_uri:params_from_header_string(AuthHeader), + %Realm = couch_util:get_value("realm", HeaderParams), + Params = proplists:delete("realm", HeaderParams) ++ MochiReq:parse_qs(), + ?LOG_DEBUG("OAuth Params: ~p", [Params]), + case couch_util:get_value("oauth_version", Params, "1.0") of + "1.0" -> + case couch_util:get_value("oauth_consumer_key", Params, undefined) of + undefined -> + case FailSilently of + true -> Req; + false -> couch_httpd:send_error(Req, 400, <<"invalid_consumer">>, <<"Invalid consumer.">>) + end; + ConsumerKey -> + SigMethod = couch_util:get_value("oauth_signature_method", Params), + case consumer_lookup(ConsumerKey, SigMethod) of + none -> + couch_httpd:send_error(Req, 400, <<"invalid_consumer">>, <<"Invalid consumer (key or signature method).">>); + Consumer -> + Signature = couch_util:get_value("oauth_signature", Params), + URL = couch_httpd:absolute_uri(Req, MochiReq:get(raw_path)), + Fun(URL, proplists:delete("oauth_signature", Params), + Consumer, Signature) + end + end; + _ -> + couch_httpd:send_error(Req, 400, <<"invalid_oauth_version">>, <<"Invalid OAuth version.">>) + end. + +consumer_lookup(Key, MethodStr) -> + SignatureMethod = case MethodStr of + "PLAINTEXT" -> plaintext; + "HMAC-SHA1" -> hmac_sha1; + %"RSA-SHA1" -> rsa_sha1; + _Else -> undefined + end, + case SignatureMethod of + undefined -> none; + _SupportedMethod -> + case couch_config:get("oauth_consumer_secrets", Key, undefined) of + undefined -> none; + Secret -> {Key, Secret, SignatureMethod} + end + end. + +ok(#httpd{mochi_req=MochiReq}, Body) -> + {ok, MochiReq:respond({200, [], Body})}. diff --git a/apps/couch/src/couch_httpd_rewrite.erl b/apps/couch/src/couch_httpd_rewrite.erl new file mode 100644 index 00000000..ca4ac1f0 --- /dev/null +++ b/apps/couch/src/couch_httpd_rewrite.erl @@ -0,0 +1,425 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. +% +% bind_path is based on bind method from Webmachine + + +%% @doc Module for URL rewriting by pattern matching. + +-module(couch_httpd_rewrite). +-export([handle_rewrite_req/3]). +-include("couch_db.hrl"). + +-define(SEPARATOR, $\/). +-define(MATCH_ALL, {bind, <<"*">>}). + + +%% doc The http rewrite handler. All rewriting is done from +%% /dbname/_design/ddocname/_rewrite by default. +%% +%% each rules should be in rewrites member of the design doc. +%% Ex of a complete rule : +%% +%% { +%% .... +%% "rewrites": [ +%% { +%% "from": "", +%% "to": "index.html", +%% "method": "GET", +%% "query": {} +%% } +%% ] +%% } +%% +%% from: is the path rule used to bind current uri to the rule. It +%% use pattern matching for that. +%% +%% to: rule to rewrite an url. It can contain variables depending on binding +%% variables discovered during pattern matching and query args (url args and from +%% the query member.) +%% +%% method: method to bind the request method to the rule. by default "*" +%% query: query args you want to define they can contain dynamic variable +%% by binding the key to the bindings +%% +%% +%% to and from are path with patterns. pattern can be string starting with ":" or +%% "*". ex: +%% /somepath/:var/* +%% +%% This path is converted in erlang list by splitting "/". Each var are +%% converted in atom. "*" is converted to '*' atom. The pattern matching is done +%% by splitting "/" in request url in a list of token. A string pattern will +%% match equal token. The star atom ('*' in single quotes) will match any number +%% of tokens, but may only be present as the last pathtern in a pathspec. If all +%% tokens are matched and all pathterms are used, then the pathspec matches. It works +%% like webmachine. Each identified token will be reused in to rule and in query +%% +%% The pattern matching is done by first matching the request method to a rule. by +%% default all methods match a rule. (method is equal to "*" by default). Then +%% It will try to match the path to one rule. If no rule match, then a 404 error +%% is displayed. +%% +%% Once a rule is found we rewrite the request url using the "to" and +%% "query" members. The identified token are matched to the rule and +%% will replace var. if '*' is found in the rule it will contain the remaining +%% part if it exists. +%% +%% Examples: +%% +%% Dispatch rule URL TO Tokens +%% +%% {"from": "/a/b", /a/b?k=v /some/b?k=v var =:= b +%% "to": "/some/"} k = v +%% +%% {"from": "/a/b", /a/b /some/b?var=b var =:= b +%% "to": "/some/:var"} +%% +%% {"from": "/a", /a /some +%% "to": "/some/*"} +%% +%% {"from": "/a/*", /a/b/c /some/b/c +%% "to": "/some/*"} +%% +%% {"from": "/a", /a /some +%% "to": "/some/*"} +%% +%% {"from": "/a/:foo/*", /a/b/c /some/b/c?foo=b foo =:= b +%% "to": "/some/:foo/*"} +%% +%% {"from": "/a/:foo", /a/b /some/?k=b&foo=b foo =:= b +%% "to": "/some", +%% "query": { +%% "k": ":foo" +%% }} +%% +%% {"from": "/a", /a?foo=b /some/b foo =:= b +%% "to": "/some/:foo", +%% }} + + + +handle_rewrite_req(#httpd{ + path_parts=[DbName, <<"_design">>, DesignName, _Rewrite|PathParts], + method=Method, + mochi_req=MochiReq}=Req, _Db, DDoc) -> + + % we are in a design handler + DesignId = <<"_design/", DesignName/binary>>, + Prefix = <<"/", DbName/binary, "/", DesignId/binary>>, + QueryList = couch_httpd:qs(Req), + QueryList1 = [{to_binding(K), V} || {K, V} <- QueryList], + + #doc{body={Props}} = DDoc, + + % get rules from ddoc + case couch_util:get_value(<<"rewrites">>, Props) of + undefined -> + couch_httpd:send_error(Req, 404, <<"rewrite_error">>, + <<"Invalid path.">>); + Rules -> + % create dispatch list from rules + DispatchList = [make_rule(Rule) || {Rule} <- Rules], + + %% get raw path by matching url to a rule. + RawPath = case try_bind_path(DispatchList, couch_util:to_binary(Method), PathParts, + QueryList1) of + no_dispatch_path -> + throw(not_found); + {NewPathParts, Bindings} -> + Parts = [quote_plus(X) || X <- NewPathParts], + + % build new path, reencode query args, eventually convert + % them to json + Path = lists:append( + string:join(Parts, [?SEPARATOR]), + case Bindings of + [] -> []; + _ -> [$?, encode_query(Bindings)] + end), + + % if path is relative detect it and rewrite path + case mochiweb_util:safe_relative_path(Path) of + undefined -> + ?b2l(Prefix) ++ "/" ++ Path; + P1 -> + ?b2l(Prefix) ++ "/" ++ P1 + end + + end, + + % normalize final path (fix levels "." and "..") + RawPath1 = ?b2l(iolist_to_binary(normalize_path(RawPath))), + + ?LOG_DEBUG("rewrite to ~p ~n", [RawPath1]), + + % build a new mochiweb request + MochiReq1 = mochiweb_request:new(MochiReq:get(socket), + MochiReq:get(method), + RawPath1, + MochiReq:get(version), + MochiReq:get(headers)), + + % cleanup, It force mochiweb to reparse raw uri. + MochiReq1:cleanup(), + + #httpd{ + db_url_handlers = DbUrlHandlers, + design_url_handlers = DesignUrlHandlers, + default_fun = DefaultFun, + url_handlers = UrlHandlers + } = Req, + couch_httpd:handle_request_int(MochiReq1, DefaultFun, + UrlHandlers, DbUrlHandlers, DesignUrlHandlers) + end. + +quote_plus({bind, X}) -> + mochiweb_util:quote_plus(X); +quote_plus(X) -> + mochiweb_util:quote_plus(X). + +%% @doc Try to find a rule matching current url. If none is found +%% 404 error not_found is raised +try_bind_path([], _Method, _PathParts, _QueryList) -> + no_dispatch_path; +try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) -> + [{PathParts1, Method1}, RedirectPath, QueryArgs] = Dispatch, + case bind_method(Method1, Method) of + true -> + case bind_path(PathParts1, PathParts, []) of + {ok, Remaining, Bindings} -> + Bindings1 = Bindings ++ QueryList, + % we parse query args from the rule and fill + % it eventually with bindings vars + QueryArgs1 = make_query_list(QueryArgs, Bindings1, []), + % remove params in QueryLists1 that are already in + % QueryArgs1 + Bindings2 = lists:foldl(fun({K, V}, Acc) -> + K1 = to_binding(K), + KV = case couch_util:get_value(K1, QueryArgs1) of + undefined -> [{K1, V}]; + _V1 -> [] + end, + Acc ++ KV + end, [], Bindings1), + + FinalBindings = Bindings2 ++ QueryArgs1, + NewPathParts = make_new_path(RedirectPath, FinalBindings, + Remaining, []), + {NewPathParts, FinalBindings}; + fail -> + try_bind_path(Rest, Method, PathParts, QueryList) + end; + false -> + try_bind_path(Rest, Method, PathParts, QueryList) + end. + +%% rewriting dynamically the quey list given as query member in +%% rewrites. Each value is replaced by one binding or an argument +%% passed in url. +make_query_list([], _Bindings, Acc) -> + Acc; +make_query_list([{Key, {Value}}|Rest], Bindings, Acc) -> + Value1 = to_json({Value}), + make_query_list(Rest, Bindings, [{to_binding(Key), Value1}|Acc]); +make_query_list([{Key, Value}|Rest], Bindings, Acc) when is_binary(Value) -> + Value1 = replace_var(Key, Value, Bindings), + make_query_list(Rest, Bindings, [{to_binding(Key), Value1}|Acc]); +make_query_list([{Key, Value}|Rest], Bindings, Acc) when is_list(Value) -> + Value1 = replace_var(Key, Value, Bindings), + make_query_list(Rest, Bindings, [{to_binding(Key), Value1}|Acc]); +make_query_list([{Key, Value}|Rest], Bindings, Acc) -> + make_query_list(Rest, Bindings, [{to_binding(Key), Value}|Acc]). + +replace_var(Key, Value, Bindings) -> + case Value of + <<":", Var/binary>> -> + get_var(Var, Bindings, Value); + _ when is_list(Value) -> + Value1 = lists:foldr(fun(V, Acc) -> + V1 = case V of + <<":", VName/binary>> -> + case get_var(VName, Bindings, V) of + V2 when is_list(V2) -> + iolist_to_binary(V2); + V2 -> V2 + end; + _ -> + + V + end, + [V1|Acc] + end, [], Value), + to_json(Value1); + _ when is_binary(Value) -> + Value; + _ -> + case Key of + <<"key">> -> to_json(Value); + <<"startkey">> -> to_json(Value); + <<"endkey">> -> to_json(Value); + _ -> + lists:flatten(?JSON_ENCODE(Value)) + end + end. + + +get_var(VarName, Props, Default) -> + VarName1 = to_binding(VarName), + couch_util:get_value(VarName1, Props, Default). + +%% doc: build new patch from bindings. bindings are query args +%% (+ dynamic query rewritten if needed) and bindings found in +%% bind_path step. +make_new_path([], _Bindings, _Remaining, Acc) -> + lists:reverse(Acc); +make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) -> + Acc1 = lists:reverse(Acc) ++ Remaining, + Acc1; +make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) -> + Acc1 = lists:reverse(Acc) ++ Remaining, + Acc1; +make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) -> + P2 = case couch_util:get_value({bind, P}, Bindings) of + undefined -> << "undefined">>; + P1 -> P1 + end, + make_new_path(Rest, Bindings, Remaining, [P2|Acc]); +make_new_path([P|Rest], Bindings, Remaining, Acc) -> + make_new_path(Rest, Bindings, Remaining, [P|Acc]). + + +%% @doc If method of the query fith the rule method. If the +%% method rule is '*', which is the default, all +%% request method will bind. It allows us to make rules +%% depending on HTTP method. +bind_method(?MATCH_ALL, _Method) -> + true; +bind_method({bind, Method}, Method) -> + true; +bind_method(_, _) -> + false. + + +%% @doc bind path. Using the rule from we try to bind variables given +%% to the current url by pattern matching +bind_path([], [], Bindings) -> + {ok, [], Bindings}; +bind_path([?MATCH_ALL], Rest, Bindings) when is_list(Rest) -> + {ok, Rest, Bindings}; +bind_path(_, [], _) -> + fail; +bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) -> + bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]); +bind_path([Token|RestToken], [Token|RestMatch], Bindings) -> + bind_path(RestToken, RestMatch, Bindings); +bind_path(_, _, _) -> + fail. + + +%% normalize path. +normalize_path(Path) -> + "/" ++ string:join(normalize_path1(string:tokens(Path, + "/"), []), [?SEPARATOR]). + + +normalize_path1([], Acc) -> + lists:reverse(Acc); +normalize_path1([".."|Rest], Acc) -> + Acc1 = case Acc of + [] -> [".."|Acc]; + [T|_] when T =:= ".." -> [".."|Acc]; + [_|R] -> R + end, + normalize_path1(Rest, Acc1); +normalize_path1(["."|Rest], Acc) -> + normalize_path1(Rest, Acc); +normalize_path1([Path|Rest], Acc) -> + normalize_path1(Rest, [Path|Acc]). + + +%% @doc transform json rule in erlang for pattern matching +make_rule(Rule) -> + Method = case couch_util:get_value(<<"method">>, Rule) of + undefined -> ?MATCH_ALL; + M -> to_binding(M) + end, + QueryArgs = case couch_util:get_value(<<"query">>, Rule) of + undefined -> []; + {Args} -> Args + end, + FromParts = case couch_util:get_value(<<"from">>, Rule) of + undefined -> [?MATCH_ALL]; + From -> + parse_path(From) + end, + ToParts = case couch_util:get_value(<<"to">>, Rule) of + undefined -> + throw({error, invalid_rewrite_target}); + To -> + parse_path(To) + end, + [{FromParts, Method}, ToParts, QueryArgs]. + +parse_path(Path) -> + {ok, SlashRE} = re:compile(<<"\\/">>), + path_to_list(re:split(Path, SlashRE), [], 0). + +%% @doc convert a path rule (from or to) to an erlang list +%% * and path variable starting by ":" are converted +%% in erlang atom. +path_to_list([], Acc, _DotDotCount) -> + lists:reverse(Acc); +path_to_list([<<>>|R], Acc, DotDotCount) -> + path_to_list(R, Acc, DotDotCount); +path_to_list([<<"*">>|R], Acc, DotDotCount) -> + path_to_list(R, [?MATCH_ALL|Acc], DotDotCount); +path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 -> + case couch_config:get("httpd", "secure_rewrites", "true") of + "false" -> + path_to_list(R, [<<"..">>|Acc], DotDotCount+1); + _Else -> + ?LOG_INFO("insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]), + throw({insecure_rewrite_rule, "too many ../.. segments"}) + end; +path_to_list([<<"..">>|R], Acc, DotDotCount) -> + path_to_list(R, [<<"..">>|Acc], DotDotCount+1); +path_to_list([P|R], Acc, DotDotCount) -> + P1 = case P of + <<":", Var/binary>> -> + to_binding(Var); + _ -> P + end, + path_to_list(R, [P1|Acc], DotDotCount). + +encode_query(Props) -> + Props1 = lists:foldl(fun ({{bind, K}, V}, Acc) -> + V1 = case is_list(V) orelse is_binary(V) of + true -> V; + false -> + % probably it's a number + quote_plus(V) + end, + [{K, V1} | Acc] + end, [], Props), + lists:flatten(mochiweb_util:urlencode(Props1)). + +to_binding({bind, V}) -> + {bind, V}; +to_binding(V) when is_list(V) -> + to_binding(?l2b(V)); +to_binding(V) -> + {bind, V}. + +to_json(V) -> + iolist_to_binary(?JSON_ENCODE(V)). diff --git a/apps/couch/src/couch_httpd_show.erl b/apps/couch/src/couch_httpd_show.erl new file mode 100644 index 00000000..d50ca83a --- /dev/null +++ b/apps/couch/src/couch_httpd_show.erl @@ -0,0 +1,399 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_httpd_show). + +-export([handle_doc_show_req/3, handle_doc_update_req/3, handle_view_list_req/3, + handle_view_list/6, get_fun_key/3]). + +-include("couch_db.hrl"). + +-import(couch_httpd, + [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2, + start_json_response/2,send_chunk/2,last_chunk/1,send_chunked_error/2, + start_chunked_response/3, send_error/4]). + + +% /db/_design/foo/_show/bar/docid +% show converts a json doc to a response of any content-type. +% it looks up the doc an then passes it to the query server. +% then it sends the response from the query server to the http client. + +maybe_open_doc(Db, DocId) -> + case catch couch_httpd_db:couch_doc_open(Db, DocId, nil, [conflicts]) of + {not_found, missing} -> nil; + {not_found,deleted} -> nil; + Doc -> Doc + end. +handle_doc_show_req(#httpd{ + path_parts=[_, _, _, _, ShowName, DocId] + }=Req, Db, DDoc) -> + + % open the doc + Doc = maybe_open_doc(Db, DocId), + + % we don't handle revs here b/c they are an internal api + % returns 404 if there is no doc with DocId + handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId); + +handle_doc_show_req(#httpd{ + path_parts=[_, _, _, _, ShowName, DocId|Rest] + }=Req, Db, DDoc) -> + + DocParts = [DocId|Rest], + DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")), + + % open the doc + Doc = maybe_open_doc(Db, DocId1), + + % we don't handle revs here b/c they are an internal api + % pass 404 docs to the show function + handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1); + +handle_doc_show_req(#httpd{ + path_parts=[_, _, _, _, ShowName] + }=Req, Db, DDoc) -> + % with no docid the doc is nil + handle_doc_show(Req, Db, DDoc, ShowName, nil); + +handle_doc_show_req(Req, _Db, _DDoc) -> + send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>). + +handle_doc_show(Req, Db, DDoc, ShowName, Doc) -> + handle_doc_show(Req, Db, DDoc, ShowName, Doc, null). + +handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) -> + % get responder for ddoc/showname + CurrentEtag = show_etag(Req, Doc, DDoc, []), + couch_httpd:etag_respond(Req, CurrentEtag, fun() -> + JsonReq = couch_httpd_external:json_req_obj(Req, Db, DocId), + JsonDoc = couch_query_servers:json_doc(Doc), + [<<"resp">>, ExternalResp] = + couch_query_servers:ddoc_prompt(DDoc, [<<"shows">>, ShowName], [JsonDoc, JsonReq]), + JsonResp = apply_etag(ExternalResp, CurrentEtag), + couch_httpd_external:send_external_response(Req, JsonResp) + end). + + + +show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) -> + Accept = couch_httpd:header_value(Req, "Accept"), + DocPart = case Doc of + nil -> nil; + Doc -> couch_httpd:doc_etag(Doc) + end, + couch_httpd:make_etag({couch_httpd:doc_etag(DDoc), DocPart, Accept, UserCtx#user_ctx.roles, More}). + +get_fun_key(DDoc, Type, Name) -> + #doc{body={Props}} = DDoc, + Lang = couch_util:get_value(<<"language">>, Props, <<"javascript">>), + Src = couch_util:get_nested_json_value({Props}, [Type, Name]), + {Lang, Src}. + +% /db/_design/foo/update/bar/docid +% updates a doc based on a request +% handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) -> +% % anything but GET +% send_method_not_allowed(Req, "POST,PUT,DELETE,ETC"); + +handle_doc_update_req(#httpd{ + path_parts=[_, _, _, _, UpdateName, DocId] + }=Req, Db, DDoc) -> + Doc = try couch_httpd_db:couch_doc_open(Db, DocId, nil, [conflicts]) + catch + _ -> nil + end, + send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId); + +handle_doc_update_req(#httpd{ + path_parts=[_, _, _, _, UpdateName] + }=Req, Db, DDoc) -> + send_doc_update_response(Req, Db, DDoc, UpdateName, nil, null); + +handle_doc_update_req(Req, _Db, _DDoc) -> + send_error(Req, 404, <<"update_error">>, <<"Invalid path.">>). + +send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) -> + JsonReq = couch_httpd_external:json_req_obj(Req, Db, DocId), + JsonDoc = couch_query_servers:json_doc(Doc), + {Code, JsonResp1} = case couch_query_servers:ddoc_prompt(DDoc, + [<<"updates">>, UpdateName], [JsonDoc, JsonReq]) of + [<<"up">>, {NewJsonDoc}, {JsonResp}] -> + Options = case couch_httpd:header_value(Req, "X-Couch-Full-Commit", + "false") of + "true" -> + [full_commit]; + _ -> + [] + end, + NewDoc = couch_doc:from_json_obj({NewJsonDoc}), + {ok, NewRev} = couch_db:update_doc(Db, NewDoc, Options), + NewRevStr = couch_doc:rev_to_str(NewRev), + JsonRespWithRev = {[{<<"headers">>, + {[{<<"X-Couch-Update-NewRev">>, NewRevStr}]}} | JsonResp]}, + {201, JsonRespWithRev}; + [<<"up">>, _Other, JsonResp] -> + {200, JsonResp} + end, + + JsonResp2 = couch_util:json_apply_field({<<"code">>, Code}, JsonResp1), + % todo set location field + couch_httpd_external:send_external_response(Req, JsonResp2). + + +% view-list request with view and list from same design doc. +handle_view_list_req(#httpd{method='GET', + path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) -> + handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, nil); + +% view-list request with view and list from different design docs. +handle_view_list_req(#httpd{method='GET', + path_parts=[_, _, _, _, ListName, ViewDesignName, ViewName]}=Req, Db, DDoc) -> + handle_view_list(Req, Db, DDoc, ListName, {ViewDesignName, ViewName}, nil); + +handle_view_list_req(#httpd{method='GET'}=Req, _Db, _DDoc) -> + send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>); + +handle_view_list_req(#httpd{method='POST', + path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) -> + % {Props2} = couch_httpd:json_body(Req), + ReqBody = couch_httpd:body(Req), + {Props2} = ?JSON_DECODE(ReqBody), + Keys = couch_util:get_value(<<"keys">>, Props2, nil), + handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName, {DesignName, ViewName}, Keys); + +handle_view_list_req(#httpd{method='POST', + path_parts=[_, _, _, _, ListName, ViewDesignName, ViewName]}=Req, Db, DDoc) -> + % {Props2} = couch_httpd:json_body(Req), + ReqBody = couch_httpd:body(Req), + {Props2} = ?JSON_DECODE(ReqBody), + Keys = couch_util:get_value(<<"keys">>, Props2, nil), + handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName, {ViewDesignName, ViewName}, Keys); + +handle_view_list_req(#httpd{method='POST'}=Req, _Db, _DDoc) -> + send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>); + +handle_view_list_req(Req, _Db, _DDoc) -> + send_method_not_allowed(Req, "GET,POST,HEAD"). + +handle_view_list(Req, Db, DDoc, LName, {ViewDesignName, ViewName}, Keys) -> + ViewDesignId = <<"_design/", ViewDesignName/binary>>, + {ViewType, View, Group, QueryArgs} = couch_httpd_view:load_view(Req, Db, {ViewDesignId, ViewName}, Keys), + Etag = list_etag(Req, Db, Group, {couch_httpd:doc_etag(DDoc), Keys}), + couch_httpd:etag_respond(Req, Etag, fun() -> + output_list(ViewType, Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group) + end). + +list_etag(#httpd{user_ctx=UserCtx}=Req, Db, Group, More) -> + Accept = couch_httpd:header_value(Req, "Accept"), + couch_httpd_view:view_group_etag(Group, Db, {More, Accept, UserCtx#user_ctx.roles}). + +output_list(map, Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group) -> + output_map_list(Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group); +output_list(reduce, Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group) -> + output_reduce_list(Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group). + +% next step: +% use with_ddoc_proc/2 to make this simpler +output_map_list(Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group) -> + #view_query_args{ + limit = Limit, + skip = SkipCount + } = QueryArgs, + + FoldAccInit = {Limit, SkipCount, undefined, []}, + {ok, RowCount} = couch_view:get_row_count(View), + + + couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) -> + + ListFoldHelpers = #view_fold_helper_funs{ + reduce_count = fun couch_view:reduce_to_count/1, + start_response = StartListRespFun = make_map_start_resp_fun(QServer, Db, LName), + send_row = make_map_send_row_fun(QServer) + }, + CurrentSeq = Group#group.current_seq, + + {ok, _, FoldResult} = case Keys of + nil -> + FoldlFun = couch_httpd_view:make_view_fold_fun(Req, QueryArgs, Etag, Db, CurrentSeq, RowCount, ListFoldHelpers), + couch_view:fold(View, FoldlFun, FoldAccInit, + couch_httpd_view:make_key_options(QueryArgs)); + Keys -> + lists:foldl( + fun(Key, {ok, _, FoldAcc}) -> + QueryArgs2 = QueryArgs#view_query_args{ + start_key = Key, + end_key = Key + }, + FoldlFun = couch_httpd_view:make_view_fold_fun(Req, QueryArgs2, Etag, Db, CurrentSeq, RowCount, ListFoldHelpers), + couch_view:fold(View, FoldlFun, FoldAcc, + couch_httpd_view:make_key_options(QueryArgs2)) + end, {ok, nil, FoldAccInit}, Keys) + end, + finish_list(Req, QServer, Etag, FoldResult, StartListRespFun, CurrentSeq, RowCount) + end). + + +output_reduce_list(Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group) -> + #view_query_args{ + limit = Limit, + skip = SkipCount, + group_level = GroupLevel + } = QueryArgs, + + CurrentSeq = Group#group.current_seq, + + couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) -> + StartListRespFun = make_reduce_start_resp_fun(QServer, Db, LName), + SendListRowFun = make_reduce_send_row_fun(QServer, Db), + {ok, GroupRowsFun, RespFun} = couch_httpd_view:make_reduce_fold_funs(Req, + GroupLevel, QueryArgs, Etag, CurrentSeq, + #reduce_fold_helper_funs{ + start_response = StartListRespFun, + send_row = SendListRowFun + }), + FoldAccInit = {Limit, SkipCount, undefined, []}, + {ok, FoldResult} = case Keys of + nil -> + couch_view:fold_reduce(View, RespFun, FoldAccInit, [{key_group_fun, GroupRowsFun} | + couch_httpd_view:make_key_options(QueryArgs)]); + Keys -> + lists:foldl( + fun(Key, {ok, FoldAcc}) -> + couch_view:fold_reduce(View, RespFun, FoldAcc, + [{key_group_fun, GroupRowsFun} | + couch_httpd_view:make_key_options( + QueryArgs#view_query_args{start_key=Key, end_key=Key})] + ) + end, {ok, FoldAccInit}, Keys) + end, + finish_list(Req, QServer, Etag, FoldResult, StartListRespFun, CurrentSeq, null) + end). + + +make_map_start_resp_fun(QueryServer, Db, LName) -> + fun(Req, Etag, TotalRows, Offset, _Acc, UpdateSeq) -> + Head = {[{<<"total_rows">>, TotalRows}, {<<"offset">>, Offset}, {<<"update_seq">>, UpdateSeq}]}, + start_list_resp(QueryServer, LName, Req, Db, Head, Etag) + end. + +make_reduce_start_resp_fun(QueryServer, Db, LName) -> + fun(Req2, Etag, _Acc, UpdateSeq) -> + start_list_resp(QueryServer, LName, Req2, Db, {[{<<"update_seq">>, UpdateSeq}]}, Etag) + end. + +start_list_resp(QServer, LName, Req, Db, Head, Etag) -> + JsonReq = couch_httpd_external:json_req_obj(Req, Db), + [<<"start">>,Chunks,JsonResp] = couch_query_servers:ddoc_proc_prompt(QServer, + [<<"lists">>, LName], [Head, JsonReq]), + JsonResp2 = apply_etag(JsonResp, Etag), + #extern_resp_args{ + code = Code, + ctype = CType, + headers = ExtHeaders + } = couch_httpd_external:parse_external_response(JsonResp2), + JsonHeaders = couch_httpd_external:default_or_content_type(CType, ExtHeaders), + {ok, Resp} = start_chunked_response(Req, Code, JsonHeaders), + {ok, Resp, ?b2l(?l2b(Chunks))}. + +make_map_send_row_fun(QueryServer) -> + fun(Resp, Db, Row, IncludeDocs, RowFront) -> + send_list_row(Resp, QueryServer, Db, Row, RowFront, IncludeDocs) + end. + +make_reduce_send_row_fun(QueryServer, Db) -> + fun(Resp, Row, RowFront) -> + send_list_row(Resp, QueryServer, Db, Row, RowFront, false) + end. + +send_list_row(Resp, QueryServer, Db, Row, RowFront, IncludeDoc) -> + try + [Go,Chunks] = prompt_list_row(QueryServer, Db, Row, IncludeDoc), + Chunk = RowFront ++ ?b2l(?l2b(Chunks)), + send_non_empty_chunk(Resp, Chunk), + case Go of + <<"chunks">> -> + {ok, ""}; + <<"end">> -> + {stop, stop} + end + catch + throw:Error -> + send_chunked_error(Resp, Error), + throw({already_sent, Resp, Error}) + end. + + +prompt_list_row({Proc, _DDocId}, Db, {{Key, DocId}, Value}, IncludeDoc) -> + JsonRow = couch_httpd_view:view_row_obj(Db, {{Key, DocId}, Value}, IncludeDoc), + couch_query_servers:proc_prompt(Proc, [<<"list_row">>, JsonRow]); + +prompt_list_row({Proc, _DDocId}, _, {Key, Value}, _IncludeDoc) -> + JsonRow = {[{key, Key}, {value, Value}]}, + couch_query_servers:proc_prompt(Proc, [<<"list_row">>, JsonRow]). + +send_non_empty_chunk(Resp, Chunk) -> + case Chunk of + [] -> ok; + _ -> send_chunk(Resp, Chunk) + end. + +finish_list(Req, {Proc, _DDocId}, Etag, FoldResult, StartFun, CurrentSeq, TotalRows) -> + FoldResult2 = case FoldResult of + {Limit, SkipCount, Response, RowAcc} -> + {Limit, SkipCount, Response, RowAcc, nil}; + Else -> + Else + end, + case FoldResult2 of + {_, _, undefined, _, _} -> + {ok, Resp, BeginBody} = + render_head_for_empty_list(StartFun, Req, Etag, CurrentSeq, TotalRows), + [<<"end">>, Chunks] = couch_query_servers:proc_prompt(Proc, [<<"list_end">>]), + Chunk = BeginBody ++ ?b2l(?l2b(Chunks)), + send_non_empty_chunk(Resp, Chunk); + {_, _, Resp, stop, _} -> + ok; + {_, _, Resp, _, _} -> + [<<"end">>, Chunks] = couch_query_servers:proc_prompt(Proc, [<<"list_end">>]), + send_non_empty_chunk(Resp, ?b2l(?l2b(Chunks))) + end, + last_chunk(Resp). + + +render_head_for_empty_list(StartListRespFun, Req, Etag, CurrentSeq, null) -> + StartListRespFun(Req, Etag, [], CurrentSeq); % for reduce +render_head_for_empty_list(StartListRespFun, Req, Etag, CurrentSeq, TotalRows) -> + StartListRespFun(Req, Etag, TotalRows, null, [], CurrentSeq). + +apply_etag({ExternalResponse}, CurrentEtag) -> + % Here we embark on the delicate task of replacing or creating the + % headers on the JsonResponse object. We need to control the Etag and + % Vary headers. If the external function controls the Etag, we'd have to + % run it to check for a match, which sort of defeats the purpose. + case couch_util:get_value(<<"headers">>, ExternalResponse, nil) of + nil -> + % no JSON headers + % add our Etag and Vary headers to the response + {[{<<"headers">>, {[{<<"Etag">>, CurrentEtag}, {<<"Vary">>, <<"Accept">>}]}} | ExternalResponse]}; + JsonHeaders -> + {[case Field of + {<<"headers">>, JsonHeaders} -> % add our headers + JsonHeadersEtagged = couch_util:json_apply_field({<<"Etag">>, CurrentEtag}, JsonHeaders), + JsonHeadersVaried = couch_util:json_apply_field({<<"Vary">>, <<"Accept">>}, JsonHeadersEtagged), + {<<"headers">>, JsonHeadersVaried}; + _ -> % skip non-header fields + Field + end || Field <- ExternalResponse]} + end. + diff --git a/apps/couch/src/couch_httpd_stats_handlers.erl b/apps/couch/src/couch_httpd_stats_handlers.erl new file mode 100644 index 00000000..41aeaed0 --- /dev/null +++ b/apps/couch/src/couch_httpd_stats_handlers.erl @@ -0,0 +1,56 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_httpd_stats_handlers). +-include("couch_db.hrl"). + +-export([handle_stats_req/1]). +-import(couch_httpd, [ + send_json/2, send_json/3, send_json/4, send_method_not_allowed/2, + start_json_response/2, send_chunk/2, end_json_response/1, + start_chunked_response/3, send_error/4 +]). + +handle_stats_req(#httpd{method='GET', path_parts=[_]}=Req) -> + flush(Req), + send_json(Req, couch_stats_aggregator:all(range(Req))); + +handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod]}) -> + throw({bad_request, <<"Stat names must have exactly to parts.">>}); + +handle_stats_req(#httpd{method='GET', path_parts=[_, Mod, Key]}=Req) -> + flush(Req), + Stats = couch_stats_aggregator:get_json({list_to_atom(binary_to_list(Mod)), + list_to_atom(binary_to_list(Key))}, range(Req)), + send_json(Req, {[{Mod, {[{Key, Stats}]}}]}); + +handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod, _Key | _Extra]}) -> + throw({bad_request, <<"Stat names must have exactly two parts.">>}); + +handle_stats_req(Req) -> + send_method_not_allowed(Req, "GET"). + +range(Req) -> + case couch_util:get_value("range", couch_httpd:qs(Req)) of + undefined -> + 0; + Value -> + list_to_integer(Value) + end. + +flush(Req) -> + case couch_util:get_value("flush", couch_httpd:qs(Req)) of + "true" -> + couch_stats_aggregator:collect_sample(); + _Else -> + ok + end. diff --git a/apps/couch/src/couch_httpd_view.erl b/apps/couch/src/couch_httpd_view.erl new file mode 100644 index 00000000..e1a0dfad --- /dev/null +++ b/apps/couch/src/couch_httpd_view.erl @@ -0,0 +1,692 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_httpd_view). +-include("couch_db.hrl"). + +-export([handle_view_req/3,handle_temp_view_req/2]). + +-export([get_stale_type/1, get_reduce_type/1, parse_view_params/3]). +-export([make_view_fold_fun/7, finish_view_fold/4, finish_view_fold/5, view_row_obj/3]). +-export([view_group_etag/2, view_group_etag/3, make_reduce_fold_funs/6]). +-export([design_doc_view/5, parse_bool_param/1, doc_member/2]). +-export([make_key_options/1, load_view/4]). + +-import(couch_httpd, + [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,send_chunk/2, + start_json_response/2, start_json_response/3, end_json_response/1, + send_chunked_error/2]). + +-import(couch_db,[get_update_seq/1]). + +design_doc_view(Req, Db, DName, ViewName, Keys) -> + DesignId = <<"_design/", DName/binary>>, + Stale = get_stale_type(Req), + Reduce = get_reduce_type(Req), + Result = case couch_view:get_map_view(Db, DesignId, ViewName, Stale) of + {ok, View, Group} -> + QueryArgs = parse_view_params(Req, Keys, map), + output_map_view(Req, View, Group, Db, QueryArgs, Keys); + {not_found, Reason} -> + case couch_view:get_reduce_view(Db, DesignId, ViewName, Stale) of + {ok, ReduceView, Group} -> + case Reduce of + false -> + QueryArgs = parse_view_params(Req, Keys, red_map), + MapView = couch_view:extract_map_view(ReduceView), + output_map_view(Req, MapView, Group, Db, QueryArgs, Keys); + _ -> + QueryArgs = parse_view_params(Req, Keys, reduce), + output_reduce_view(Req, Db, ReduceView, Group, QueryArgs, Keys) + end; + _ -> + throw({not_found, Reason}) + end + end, + couch_stats_collector:increment({httpd, view_reads}), + Result. + +handle_view_req(#httpd{method='GET', + path_parts=[_, _, DName, _, ViewName]}=Req, Db, _DDoc) -> + design_doc_view(Req, Db, DName, ViewName, nil); + +handle_view_req(#httpd{method='POST', + path_parts=[_, _, DName, _, ViewName]}=Req, Db, _DDoc) -> + couch_httpd:validate_ctype(Req, "application/json"), + {Fields} = couch_httpd:json_body_obj(Req), + case couch_util:get_value(<<"keys">>, Fields, nil) of + nil -> + Fmt = "POST to view ~p/~p in database ~p with no keys member.", + ?LOG_DEBUG(Fmt, [DName, ViewName, Db]), + design_doc_view(Req, Db, DName, ViewName, nil); + Keys when is_list(Keys) -> + design_doc_view(Req, Db, DName, ViewName, Keys); + _ -> + throw({bad_request, "`keys` member must be a array."}) + end; + +handle_view_req(Req, _Db, _DDoc) -> + send_method_not_allowed(Req, "GET,POST,HEAD"). + +handle_temp_view_req(#httpd{method='POST'}=Req, Db) -> + couch_httpd:validate_ctype(Req, "application/json"), + ok = couch_db:check_is_admin(Db), + couch_stats_collector:increment({httpd, temporary_view_reads}), + {Props} = couch_httpd:json_body_obj(Req), + Language = couch_util:get_value(<<"language">>, Props, <<"javascript">>), + {DesignOptions} = couch_util:get_value(<<"options">>, Props, {[]}), + MapSrc = couch_util:get_value(<<"map">>, Props), + Keys = couch_util:get_value(<<"keys">>, Props, nil), + Reduce = get_reduce_type(Req), + case couch_util:get_value(<<"reduce">>, Props, null) of + null -> + QueryArgs = parse_view_params(Req, Keys, map), + {ok, View, Group} = couch_view:get_temp_map_view(Db, Language, + DesignOptions, MapSrc), + output_map_view(Req, View, Group, Db, QueryArgs, Keys); + _ when Reduce =:= false -> + QueryArgs = parse_view_params(Req, Keys, red_map), + {ok, View, Group} = couch_view:get_temp_map_view(Db, Language, + DesignOptions, MapSrc), + output_map_view(Req, View, Group, Db, QueryArgs, Keys); + RedSrc -> + QueryArgs = parse_view_params(Req, Keys, reduce), + {ok, View, Group} = couch_view:get_temp_reduce_view(Db, Language, + DesignOptions, MapSrc, RedSrc), + output_reduce_view(Req, Db, View, Group, QueryArgs, Keys) + end; + +handle_temp_view_req(Req, _Db) -> + send_method_not_allowed(Req, "POST"). + +output_map_view(Req, View, Group, Db, QueryArgs, nil) -> + #view_query_args{ + limit = Limit, + skip = SkipCount + } = QueryArgs, + CurrentEtag = view_group_etag(Group, Db), + couch_httpd:etag_respond(Req, CurrentEtag, fun() -> + {ok, RowCount} = couch_view:get_row_count(View), + FoldlFun = make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db, Group#group.current_seq, RowCount, #view_fold_helper_funs{reduce_count=fun couch_view:reduce_to_count/1}), + FoldAccInit = {Limit, SkipCount, undefined, []}, + {ok, LastReduce, FoldResult} = couch_view:fold(View, + FoldlFun, FoldAccInit, make_key_options(QueryArgs)), + finish_view_fold(Req, RowCount, + couch_view:reduce_to_count(LastReduce), FoldResult) + end); + +output_map_view(Req, View, Group, Db, QueryArgs, Keys) -> + #view_query_args{ + limit = Limit, + skip = SkipCount + } = QueryArgs, + CurrentEtag = view_group_etag(Group, Db, Keys), + couch_httpd:etag_respond(Req, CurrentEtag, fun() -> + {ok, RowCount} = couch_view:get_row_count(View), + FoldAccInit = {Limit, SkipCount, undefined, []}, + {LastReduce, FoldResult} = lists:foldl(fun(Key, {_, FoldAcc}) -> + FoldlFun = make_view_fold_fun(Req, QueryArgs#view_query_args{}, + CurrentEtag, Db, Group#group.current_seq, RowCount, + #view_fold_helper_funs{ + reduce_count = fun couch_view:reduce_to_count/1 + }), + {ok, LastReduce, FoldResult} = couch_view:fold(View, FoldlFun, + FoldAcc, make_key_options( + QueryArgs#view_query_args{start_key=Key, end_key=Key})), + {LastReduce, FoldResult} + end, {{[],[]}, FoldAccInit}, Keys), + finish_view_fold(Req, RowCount, couch_view:reduce_to_count(LastReduce), + FoldResult, [{update_seq,Group#group.current_seq}]) + end). + +output_reduce_view(Req, Db, View, Group, QueryArgs, nil) -> + #view_query_args{ + limit = Limit, + skip = Skip, + group_level = GroupLevel + } = QueryArgs, + CurrentEtag = view_group_etag(Group, Db), + couch_httpd:etag_respond(Req, CurrentEtag, fun() -> + {ok, GroupRowsFun, RespFun} = make_reduce_fold_funs(Req, GroupLevel, + QueryArgs, CurrentEtag, Group#group.current_seq, + #reduce_fold_helper_funs{}), + FoldAccInit = {Limit, Skip, undefined, []}, + {ok, {_, _, Resp, _}} = couch_view:fold_reduce(View, + RespFun, FoldAccInit, [{key_group_fun, GroupRowsFun} | + make_key_options(QueryArgs)]), + finish_reduce_fold(Req, Resp) + end); + +output_reduce_view(Req, Db, View, Group, QueryArgs, Keys) -> + #view_query_args{ + limit = Limit, + skip = Skip, + group_level = GroupLevel + } = QueryArgs, + CurrentEtag = view_group_etag(Group, Db, Keys), + couch_httpd:etag_respond(Req, CurrentEtag, fun() -> + {ok, GroupRowsFun, RespFun} = make_reduce_fold_funs(Req, GroupLevel, + QueryArgs, CurrentEtag, Group#group.current_seq, + #reduce_fold_helper_funs{}), + {Resp, _RedAcc3} = lists:foldl( + fun(Key, {Resp, RedAcc}) -> + % run the reduce once for each key in keys, with limit etc + % reapplied for each key + FoldAccInit = {Limit, Skip, Resp, RedAcc}, + {_, {_, _, Resp2, RedAcc2}} = couch_view:fold_reduce(View, + RespFun, FoldAccInit, [{key_group_fun, GroupRowsFun} | + make_key_options(QueryArgs#view_query_args{ + start_key=Key, end_key=Key})]), + % Switch to comma + {Resp2, RedAcc2} + end, + {undefined, []}, Keys), % Start with no comma + finish_reduce_fold(Req, Resp, [{update_seq,Group#group.current_seq}]) + end). + +reverse_key_default(?MIN_STR) -> ?MAX_STR; +reverse_key_default(?MAX_STR) -> ?MIN_STR; +reverse_key_default(Key) -> Key. + +get_stale_type(Req) -> + list_to_existing_atom(couch_httpd:qs_value(Req, "stale", "nil")). + +get_reduce_type(Req) -> + list_to_existing_atom(couch_httpd:qs_value(Req, "reduce", "true")). + +load_view(Req, Db, {ViewDesignId, ViewName}, Keys) -> + Stale = get_stale_type(Req), + Reduce = get_reduce_type(Req), + case couch_view:get_map_view(Db, ViewDesignId, ViewName, Stale) of + {ok, View, Group} -> + QueryArgs = parse_view_params(Req, Keys, map), + {map, View, Group, QueryArgs}; + {not_found, _Reason} -> + case couch_view:get_reduce_view(Db, ViewDesignId, ViewName, Stale) of + {ok, ReduceView, Group} -> + case Reduce of + false -> + QueryArgs = parse_view_params(Req, Keys, map_red), + MapView = couch_view:extract_map_view(ReduceView), + {map, MapView, Group, QueryArgs}; + _ -> + QueryArgs = parse_view_params(Req, Keys, reduce), + {reduce, ReduceView, Group, QueryArgs} + end; + {not_found, Reason} -> + throw({not_found, Reason}) + end + end. + +% query_parse_error could be removed +% we wouldn't need to pass the view type, it'd just parse params. +% I'm not sure what to do about the error handling, but +% it might simplify things to have a parse_view_params function +% that doesn't throw(). +parse_view_params(Req, Keys, ViewType) -> + QueryList = couch_httpd:qs(Req), + QueryParams = + lists:foldl(fun({K, V}, Acc) -> + parse_view_param(K, V) ++ Acc + end, [], QueryList), + IsMultiGet = (Keys =/= nil), + Args = #view_query_args{ + view_type=ViewType, + multi_get=IsMultiGet + }, + QueryArgs = lists:foldl(fun({K, V}, Args2) -> + validate_view_query(K, V, Args2) + end, Args, lists:reverse(QueryParams)), % Reverse to match QS order. + + GroupLevel = QueryArgs#view_query_args.group_level, + case {ViewType, GroupLevel, IsMultiGet} of + {reduce, exact, true} -> + QueryArgs; + {reduce, _, false} -> + QueryArgs; + {reduce, _, _} -> + % we can simplify code if we just drop this error message. + Msg = <<"Multi-key fetchs for reduce " + "view must include `group=true`">>, + throw({query_parse_error, Msg}); + _ -> + QueryArgs + end, + QueryArgs. + +parse_view_param("", _) -> + []; +parse_view_param("key", Value) -> + JsonKey = ?JSON_DECODE(Value), + [{start_key, JsonKey}, {end_key, JsonKey}]; +parse_view_param("startkey_docid", Value) -> + [{start_docid, ?l2b(Value)}]; +parse_view_param("endkey_docid", Value) -> + [{end_docid, ?l2b(Value)}]; +parse_view_param("startkey", Value) -> + [{start_key, ?JSON_DECODE(Value)}]; +parse_view_param("endkey", Value) -> + [{end_key, ?JSON_DECODE(Value)}]; +parse_view_param("limit", Value) -> + [{limit, parse_positive_int_param(Value)}]; +parse_view_param("count", _Value) -> + throw({query_parse_error, <<"Query parameter 'count' is now 'limit'.">>}); +parse_view_param("stale", "ok") -> + [{stale, ok}]; +parse_view_param("stale", _Value) -> + throw({query_parse_error, <<"stale only available as stale=ok">>}); +parse_view_param("update", _Value) -> + throw({query_parse_error, <<"update=false is now stale=ok">>}); +parse_view_param("descending", Value) -> + [{descending, parse_bool_param(Value)}]; +parse_view_param("skip", Value) -> + [{skip, parse_int_param(Value)}]; +parse_view_param("group", Value) -> + case parse_bool_param(Value) of + true -> [{group_level, exact}]; + false -> [{group_level, 0}] + end; +parse_view_param("group_level", Value) -> + [{group_level, parse_positive_int_param(Value)}]; +parse_view_param("inclusive_end", Value) -> + [{inclusive_end, parse_bool_param(Value)}]; +parse_view_param("reduce", Value) -> + [{reduce, parse_bool_param(Value)}]; +parse_view_param("include_docs", Value) -> + [{include_docs, parse_bool_param(Value)}]; +parse_view_param("list", Value) -> + [{list, ?l2b(Value)}]; +parse_view_param("callback", _) -> + []; % Verified in the JSON response functions +parse_view_param(Key, Value) -> + [{extra, {Key, Value}}]. + +validate_view_query(start_key, Value, Args) -> + case Args#view_query_args.multi_get of + true -> + Msg = <<"Query parameter `start_key` is " + "not compatible with multi-get">>, + throw({query_parse_error, Msg}); + _ -> + Args#view_query_args{start_key=Value} + end; +validate_view_query(start_docid, Value, Args) -> + Args#view_query_args{start_docid=Value}; +validate_view_query(end_key, Value, Args) -> + case Args#view_query_args.multi_get of + true-> + Msg = <<"Query parameter `end_key` is " + "not compatible with multi-get">>, + throw({query_parse_error, Msg}); + _ -> + Args#view_query_args{end_key=Value} + end; +validate_view_query(end_docid, Value, Args) -> + Args#view_query_args{end_docid=Value}; +validate_view_query(limit, Value, Args) -> + Args#view_query_args{limit=Value}; +validate_view_query(list, Value, Args) -> + Args#view_query_args{list=Value}; +validate_view_query(stale, _, Args) -> + Args; +validate_view_query(descending, true, Args) -> + case Args#view_query_args.direction of + rev -> Args; % Already reversed + fwd -> + Args#view_query_args{ + direction = rev, + start_docid = + reverse_key_default(Args#view_query_args.start_docid), + end_docid = + reverse_key_default(Args#view_query_args.end_docid) + } + end; +validate_view_query(descending, false, Args) -> + Args; % Ignore default condition +validate_view_query(skip, Value, Args) -> + Args#view_query_args{skip=Value}; +validate_view_query(group_level, Value, Args) -> + case Args#view_query_args.view_type of + reduce -> + Args#view_query_args{group_level=Value}; + _ -> + Msg = <<"Invalid URL parameter 'group' or " + " 'group_level' for non-reduce view.">>, + throw({query_parse_error, Msg}) + end; +validate_view_query(inclusive_end, Value, Args) -> + Args#view_query_args{inclusive_end=Value}; +validate_view_query(reduce, _, Args) -> + case Args#view_query_args.view_type of + map -> + Msg = <<"Invalid URL parameter `reduce` for map view.">>, + throw({query_parse_error, Msg}); + _ -> + Args + end; +validate_view_query(include_docs, true, Args) -> + case Args#view_query_args.view_type of + reduce -> + Msg = <<"Query parameter `include_docs` " + "is invalid for reduce views.">>, + throw({query_parse_error, Msg}); + _ -> + Args#view_query_args{include_docs=true} + end; +% Use the view_query_args record's default value +validate_view_query(include_docs, _Value, Args) -> + Args; +validate_view_query(extra, _Value, Args) -> + Args. + +make_view_fold_fun(Req, QueryArgs, Etag, Db, UpdateSeq, TotalViewCount, HelperFuns) -> + #view_fold_helper_funs{ + start_response = StartRespFun, + send_row = SendRowFun, + reduce_count = ReduceCountFun + } = apply_default_helper_funs(HelperFuns), + + #view_query_args{ + include_docs = IncludeDocs + } = QueryArgs, + + fun({{Key, DocId}, Value}, OffsetReds, + {AccLimit, AccSkip, Resp, RowFunAcc}) -> + case {AccLimit, AccSkip, Resp} of + {0, _, _} -> + % we've done "limit" rows, stop foldling + {stop, {0, 0, Resp, RowFunAcc}}; + {_, AccSkip, _} when AccSkip > 0 -> + % just keep skipping + {ok, {AccLimit, AccSkip - 1, Resp, RowFunAcc}}; + {_, _, undefined} -> + % rendering the first row, first we start the response + Offset = ReduceCountFun(OffsetReds), + {ok, Resp2, RowFunAcc0} = StartRespFun(Req, Etag, + TotalViewCount, Offset, RowFunAcc, UpdateSeq), + {Go, RowFunAcc2} = SendRowFun(Resp2, Db, {{Key, DocId}, Value}, + IncludeDocs, RowFunAcc0), + {Go, {AccLimit - 1, 0, Resp2, RowFunAcc2}}; + {AccLimit, _, Resp} when (AccLimit > 0) -> + % rendering all other rows + {Go, RowFunAcc2} = SendRowFun(Resp, Db, {{Key, DocId}, Value}, + IncludeDocs, RowFunAcc), + {Go, {AccLimit - 1, 0, Resp, RowFunAcc2}} + end + end. + +make_reduce_fold_funs(Req, GroupLevel, _QueryArgs, Etag, UpdateSeq, HelperFuns) -> + #reduce_fold_helper_funs{ + start_response = StartRespFun, + send_row = SendRowFun + } = apply_default_helper_funs(HelperFuns), + + GroupRowsFun = + fun({_Key1,_}, {_Key2,_}) when GroupLevel == 0 -> + true; + ({Key1,_}, {Key2,_}) + when is_integer(GroupLevel) and is_list(Key1) and is_list(Key2) -> + lists:sublist(Key1, GroupLevel) == lists:sublist(Key2, GroupLevel); + ({Key1,_}, {Key2,_}) -> + Key1 == Key2 + end, + + RespFun = fun + (_Key, _Red, {AccLimit, AccSkip, Resp, RowAcc}) when AccSkip > 0 -> + % keep skipping + {ok, {AccLimit, AccSkip - 1, Resp, RowAcc}}; + (_Key, _Red, {0, _AccSkip, Resp, RowAcc}) -> + % we've exhausted limit rows, stop + {stop, {0, _AccSkip, Resp, RowAcc}}; + + (_Key, Red, {AccLimit, 0, undefined, RowAcc0}) when GroupLevel == 0 -> + % we haven't started responding yet and group=false + {ok, Resp2, RowAcc} = StartRespFun(Req, Etag, RowAcc0, UpdateSeq), + {Go, RowAcc2} = SendRowFun(Resp2, {null, Red}, RowAcc), + {Go, {AccLimit - 1, 0, Resp2, RowAcc2}}; + (_Key, Red, {AccLimit, 0, Resp, RowAcc}) when GroupLevel == 0 -> + % group=false but we've already started the response + {Go, RowAcc2} = SendRowFun(Resp, {null, Red}, RowAcc), + {Go, {AccLimit - 1, 0, Resp, RowAcc2}}; + + (Key, Red, {AccLimit, 0, undefined, RowAcc0}) + when is_integer(GroupLevel), is_list(Key) -> + % group_level and we haven't responded yet + {ok, Resp2, RowAcc} = StartRespFun(Req, Etag, RowAcc0, UpdateSeq), + {Go, RowAcc2} = SendRowFun(Resp2, + {lists:sublist(Key, GroupLevel), Red}, RowAcc), + {Go, {AccLimit - 1, 0, Resp2, RowAcc2}}; + (Key, Red, {AccLimit, 0, Resp, RowAcc}) + when is_integer(GroupLevel), is_list(Key) -> + % group_level and we've already started the response + {Go, RowAcc2} = SendRowFun(Resp, + {lists:sublist(Key, GroupLevel), Red}, RowAcc), + {Go, {AccLimit - 1, 0, Resp, RowAcc2}}; + + (Key, Red, {AccLimit, 0, undefined, RowAcc0}) -> + % group=true and we haven't responded yet + {ok, Resp2, RowAcc} = StartRespFun(Req, Etag, RowAcc0, UpdateSeq), + {Go, RowAcc2} = SendRowFun(Resp2, {Key, Red}, RowAcc), + {Go, {AccLimit - 1, 0, Resp2, RowAcc2}}; + (Key, Red, {AccLimit, 0, Resp, RowAcc}) -> + % group=true and we've already started the response + {Go, RowAcc2} = SendRowFun(Resp, {Key, Red}, RowAcc), + {Go, {AccLimit - 1, 0, Resp, RowAcc2}} + end, + {ok, GroupRowsFun, RespFun}. + +apply_default_helper_funs( + #view_fold_helper_funs{ + start_response = StartResp, + send_row = SendRow + }=Helpers) -> + StartResp2 = case StartResp of + undefined -> fun json_view_start_resp/6; + _ -> StartResp + end, + + SendRow2 = case SendRow of + undefined -> fun send_json_view_row/5; + _ -> SendRow + end, + + Helpers#view_fold_helper_funs{ + start_response = StartResp2, + send_row = SendRow2 + }; + + +apply_default_helper_funs( + #reduce_fold_helper_funs{ + start_response = StartResp, + send_row = SendRow + }=Helpers) -> + StartResp2 = case StartResp of + undefined -> fun json_reduce_start_resp/4; + _ -> StartResp + end, + + SendRow2 = case SendRow of + undefined -> fun send_json_reduce_row/3; + _ -> SendRow + end, + + Helpers#reduce_fold_helper_funs{ + start_response = StartResp2, + send_row = SendRow2 + }. + +make_key_options(#view_query_args{direction = Dir}=QueryArgs) -> + [{dir,Dir} | make_start_key_option(QueryArgs) ++ + make_end_key_option(QueryArgs)]. + +make_start_key_option( + #view_query_args{ + start_key = StartKey, + start_docid = StartDocId}) -> + if StartKey == undefined -> + []; + true -> + [{start_key, {StartKey, StartDocId}}] + end. + +make_end_key_option(#view_query_args{end_key = undefined}) -> + []; +make_end_key_option( + #view_query_args{end_key = EndKey, + end_docid = EndDocId, + inclusive_end = true}) -> + [{end_key, {EndKey, EndDocId}}]; +make_end_key_option( + #view_query_args{ + end_key = EndKey, + end_docid = EndDocId, + inclusive_end = false}) -> + [{end_key_gt, {EndKey,reverse_key_default(EndDocId)}}]. + +json_view_start_resp(Req, Etag, TotalViewCount, Offset, _Acc, UpdateSeq) -> + {ok, Resp} = start_json_response(Req, 200, [{"Etag", Etag}]), + BeginBody = case couch_httpd:qs_value(Req, "update_seq") of + "true" -> + io_lib:format( + "{\"total_rows\":~w,\"update_seq\":~w," + "\"offset\":~w,\"rows\":[\r\n", + [TotalViewCount, UpdateSeq, Offset]); + _Else -> + io_lib:format( + "{\"total_rows\":~w,\"offset\":~w,\"rows\":[\r\n", + [TotalViewCount, Offset]) + end, + {ok, Resp, BeginBody}. + +send_json_view_row(Resp, Db, {{Key, DocId}, Value}, IncludeDocs, RowFront) -> + JsonObj = view_row_obj(Db, {{Key, DocId}, Value}, IncludeDocs), + send_chunk(Resp, RowFront ++ ?JSON_ENCODE(JsonObj)), + {ok, ",\r\n"}. + +json_reduce_start_resp(Req, Etag, _Acc0, UpdateSeq) -> + {ok, Resp} = start_json_response(Req, 200, [{"Etag", Etag}]), + case couch_httpd:qs_value(Req, "update_seq") of + "true" -> + {ok, Resp, io_lib:format("{\"update_seq\":~w,\"rows\":[\r\n",[UpdateSeq])}; + _Else -> + {ok, Resp, "{\"rows\":[\r\n"} + end. + +send_json_reduce_row(Resp, {Key, Value}, RowFront) -> + send_chunk(Resp, RowFront ++ ?JSON_ENCODE({[{key, Key}, {value, Value}]})), + {ok, ",\r\n"}. + +view_group_etag(Group, Db) -> + view_group_etag(Group, Db, nil). + +view_group_etag(#group{sig=Sig,current_seq=CurrentSeq}, _Db, Extra) -> + % ?LOG_ERROR("Group ~p",[Group]), + % This is not as granular as it could be. + % If there are updates to the db that do not effect the view index, + % they will change the Etag. For more granular Etags we'd need to keep + % track of the last Db seq that caused an index change. + couch_httpd:make_etag({Sig, CurrentSeq, Extra}). + +% the view row has an error +view_row_obj(_Db, {{Key, error}, Value}, _IncludeDocs) -> + {[{key, Key}, {error, Value}]}; +% include docs in the view output +view_row_obj(Db, {{Key, DocId}, {Props}}, true) -> + Rev = case couch_util:get_value(<<"_rev">>, Props) of + undefined -> + nil; + Rev0 -> + couch_doc:parse_rev(Rev0) + end, + IncludeId = couch_util:get_value(<<"_id">>, Props, DocId), + view_row_with_doc(Db, {{Key, DocId}, {Props}}, {IncludeId, Rev}); +view_row_obj(Db, {{Key, DocId}, Value}, true) -> + view_row_with_doc(Db, {{Key, DocId}, Value}, {DocId, nil}); +% the normal case for rendering a view row +view_row_obj(_Db, {{Key, DocId}, Value}, _IncludeDocs) -> + {[{id, DocId}, {key, Key}, {value, Value}]}. + +view_row_with_doc(Db, {{Key, DocId}, Value}, IdRev) -> + {[{id, DocId}, {key, Key}, {value, Value}] ++ doc_member(Db, IdRev)}. + +doc_member(Db, {DocId, Rev}) -> + ?LOG_DEBUG("Include Doc: ~p ~p", [DocId, Rev]), + case (catch couch_httpd_db:couch_doc_open(Db, DocId, Rev, [])) of + #doc{} = Doc -> + JsonDoc = couch_doc:to_json_obj(Doc, []), + [{doc, JsonDoc}]; + _Else -> + [{doc, null}] + end. + +finish_view_fold(Req, TotalRows, Offset, FoldResult) -> + finish_view_fold(Req, TotalRows, Offset, FoldResult, []). + +finish_view_fold(Req, TotalRows, Offset, FoldResult, Fields) -> + case FoldResult of + {_, _, undefined, _} -> + % nothing found in the view or keys, nothing has been returned + % send empty view + send_json(Req, 200, {[ + {total_rows, TotalRows}, + {offset, Offset}, + {rows, []} + ] ++ Fields}); + {_, _, Resp, _} -> + % end the view + send_chunk(Resp, "\r\n]}"), + end_json_response(Resp) + end. + +finish_reduce_fold(Req, Resp) -> + finish_reduce_fold(Req, Resp, []). + +finish_reduce_fold(Req, Resp, Fields) -> + case Resp of + undefined -> + send_json(Req, 200, {[ + {rows, []} + ] ++ Fields}); + Resp -> + send_chunk(Resp, "\r\n]}"), + end_json_response(Resp) + end. + +parse_bool_param(Val) -> + case string:to_lower(Val) of + "true" -> true; + "false" -> false; + _ -> + Msg = io_lib:format("Invalid boolean parameter: ~p", [Val]), + throw({query_parse_error, ?l2b(Msg)}) + end. + +parse_int_param(Val) -> + case (catch list_to_integer(Val)) of + IntVal when is_integer(IntVal) -> + IntVal; + _ -> + Msg = io_lib:format("Invalid value for integer parameter: ~p", [Val]), + throw({query_parse_error, ?l2b(Msg)}) + end. + +parse_positive_int_param(Val) -> + case parse_int_param(Val) of + IntVal when IntVal >= 0 -> + IntVal; + _ -> + Fmt = "Invalid value for positive integer parameter: ~p", + Msg = io_lib:format(Fmt, [Val]), + throw({query_parse_error, ?l2b(Msg)}) + end. + diff --git a/apps/couch/src/couch_js_functions.hrl b/apps/couch/src/couch_js_functions.hrl new file mode 100644 index 00000000..1f314f6e --- /dev/null +++ b/apps/couch/src/couch_js_functions.hrl @@ -0,0 +1,97 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-define(AUTH_DB_DOC_VALIDATE_FUNCTION, <<" + function(newDoc, oldDoc, userCtx) { + if (newDoc._deleted === true) { + // allow deletes by admins and matching users + // without checking the other fields + if ((userCtx.roles.indexOf('_admin') !== -1) || + (userCtx.name == oldDoc.name)) { + return; + } else { + throw({forbidden: 'Only admins may delete other user docs.'}); + } + } + + if ((oldDoc && oldDoc.type !== 'user') || newDoc.type !== 'user') { + throw({forbidden : 'doc.type must be user'}); + } // we only allow user docs for now + + if (!newDoc.name) { + throw({forbidden: 'doc.name is required'}); + } + + if (!(newDoc.roles && (typeof newDoc.roles.length !== 'undefined'))) { + throw({forbidden: 'doc.roles must be an array'}); + } + + if (newDoc._id !== ('org.couchdb.user:' + newDoc.name)) { + throw({ + forbidden: 'Doc ID must be of the form org.couchdb.user:name' + }); + } + + if (oldDoc) { // validate all updates + if (oldDoc.name !== newDoc.name) { + throw({forbidden: 'Usernames can not be changed.'}); + } + } + + if (newDoc.password_sha && !newDoc.salt) { + throw({ + forbidden: 'Users with password_sha must have a salt.' + + 'See /_utils/script/couch.js for example code.' + }); + } + + if (userCtx.roles.indexOf('_admin') === -1) { + if (oldDoc) { // validate non-admin updates + if (userCtx.name !== newDoc.name) { + throw({ + forbidden: 'You may only update your own user document.' + }); + } + // validate role updates + var oldRoles = oldDoc.roles.sort(); + var newRoles = newDoc.roles.sort(); + + if (oldRoles.length !== newRoles.length) { + throw({forbidden: 'Only _admin may edit roles'}); + } + + for (var i = 0; i < oldRoles.length; i++) { + if (oldRoles[i] !== newRoles[i]) { + throw({forbidden: 'Only _admin may edit roles'}); + } + } + } else if (newDoc.roles.length > 0) { + throw({forbidden: 'Only _admin may set roles'}); + } + } + + // no system roles in users db + for (var i = 0; i < newDoc.roles.length; i++) { + if (newDoc.roles[i][0] === '_') { + throw({ + forbidden: + 'No system roles (starting with underscore) in users db.' + }); + } + } + + // no system names as names + if (newDoc.name[0] === '_') { + throw({forbidden: 'Username may not start with underscore.'}); + } + } +">>). diff --git a/apps/couch/src/couch_key_tree.erl b/apps/couch/src/couch_key_tree.erl new file mode 100644 index 00000000..4fe09bf3 --- /dev/null +++ b/apps/couch/src/couch_key_tree.erl @@ -0,0 +1,329 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_key_tree). + +-export([merge/2, find_missing/2, get_key_leafs/2, get_full_key_paths/2, get/2]). +-export([map/2, get_all_leafs/1, count_leafs/1, remove_leafs/2, + get_all_leafs_full/1,stem/2,map_leafs/2]). + +% a key tree looks like this: +% Tree -> [] or [{Key, Value, ChildTree} | SiblingTree] +% ChildTree -> Tree +% SiblingTree -> [] or [{SiblingKey, Value, Tree} | Tree] +% And each Key < SiblingKey + + +% partial trees arranged by how much they are cut off. + +merge(A, B) -> + {Merged, HasConflicts} = + lists:foldl( + fun(InsertTree, {AccTrees, AccConflicts}) -> + {ok, Merged, Conflicts} = merge_one(AccTrees, InsertTree, [], false), + {Merged, Conflicts or AccConflicts} + end, + {A, false}, B), + if HasConflicts or + ((length(Merged) =/= length(A)) and (length(Merged) =/= length(B))) -> + Conflicts = conflicts; + true -> + Conflicts = no_conflicts + end, + {lists:sort(Merged), Conflicts}. + +merge_one([], Insert, OutAcc, ConflictsAcc) -> + {ok, [Insert | OutAcc], ConflictsAcc}; +merge_one([{Start, Tree}|Rest], {StartInsert, TreeInsert}, OutAcc, ConflictsAcc) -> + if Start =< StartInsert -> + StartA = Start, + StartB = StartInsert, + TreeA = Tree, + TreeB = TreeInsert; + true -> + StartB = Start, + StartA = StartInsert, + TreeB = Tree, + TreeA = TreeInsert + end, + case merge_at([TreeA], StartB - StartA, TreeB) of + {ok, [CombinedTrees], Conflicts} -> + merge_one(Rest, {StartA, CombinedTrees}, OutAcc, Conflicts or ConflictsAcc); + no -> + merge_one(Rest, {StartB, TreeB}, [{StartA, TreeA} | OutAcc], ConflictsAcc) + end. + +merge_at([], _Place, _Insert) -> + no; +merge_at([{Key, Value, SubTree}|Sibs], 0, {InsertKey, InsertValue, InsertSubTree}) -> + if Key == InsertKey -> + {Merge, Conflicts} = merge_simple(SubTree, InsertSubTree), + {ok, [{Key, Value, Merge} | Sibs], Conflicts}; + true -> + case merge_at(Sibs, 0, {InsertKey, InsertValue, InsertSubTree}) of + {ok, Merged, Conflicts} -> + {ok, [{Key, Value, SubTree} | Merged], Conflicts}; + no -> + no + end + end; +merge_at([{Key, Value, SubTree}|Sibs], Place, Insert) -> + case merge_at(SubTree, Place - 1,Insert) of + {ok, Merged, Conflicts} -> + {ok, [{Key, Value, Merged} | Sibs], Conflicts}; + no -> + case merge_at(Sibs, Place, Insert) of + {ok, Merged, Conflicts} -> + {ok, [{Key, Value, SubTree} | Merged], Conflicts}; + no -> + no + end + end. + +% key tree functions +merge_simple([], B) -> + {B, false}; +merge_simple(A, []) -> + {A, false}; +merge_simple([ATree | ANextTree], [BTree | BNextTree]) -> + {AKey, AValue, ASubTree} = ATree, + {BKey, _BValue, BSubTree} = BTree, + if + AKey == BKey -> + %same key + {MergedSubTree, Conflict1} = merge_simple(ASubTree, BSubTree), + {MergedNextTree, Conflict2} = merge_simple(ANextTree, BNextTree), + {[{AKey, AValue, MergedSubTree} | MergedNextTree], Conflict1 or Conflict2}; + AKey < BKey -> + {MTree, _} = merge_simple(ANextTree, [BTree | BNextTree]), + {[ATree | MTree], true}; + true -> + {MTree, _} = merge_simple([ATree | ANextTree], BNextTree), + {[BTree | MTree], true} + end. + +find_missing(_Tree, []) -> + []; +find_missing([], SeachKeys) -> + SeachKeys; +find_missing([{Start, {Key, Value, SubTree}} | RestTree], SeachKeys) -> + PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Start], + ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Start], + Missing = find_missing_simple(Start, [{Key, Value, SubTree}], PossibleKeys), + find_missing(RestTree, ImpossibleKeys ++ Missing). + +find_missing_simple(_Pos, _Tree, []) -> + []; +find_missing_simple(_Pos, [], SeachKeys) -> + SeachKeys; +find_missing_simple(Pos, [{Key, _, SubTree} | RestTree], SeachKeys) -> + PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Pos], + ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Pos], + + SrcKeys2 = PossibleKeys -- [{Pos, Key}], + SrcKeys3 = find_missing_simple(Pos + 1, SubTree, SrcKeys2), + ImpossibleKeys ++ find_missing_simple(Pos, RestTree, SrcKeys3). + + +filter_leafs([], _Keys, FilteredAcc, RemovedKeysAcc) -> + {FilteredAcc, RemovedKeysAcc}; +filter_leafs([{Pos, [{LeafKey, _}|_]} = Path |Rest], Keys, FilteredAcc, RemovedKeysAcc) -> + FilteredKeys = lists:delete({Pos, LeafKey}, Keys), + if FilteredKeys == Keys -> + % this leaf is not a key we are looking to remove + filter_leafs(Rest, Keys, [Path | FilteredAcc], RemovedKeysAcc); + true -> + % this did match a key, remove both the node and the input key + filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc]) + end. + +% Removes any branches from the tree whose leaf node(s) are in the Keys +remove_leafs(Trees, Keys) -> + % flatten each branch in a tree into a tree path + Paths = get_all_leafs_full(Trees), + + % filter out any that are in the keys list. + {FilteredPaths, RemovedKeys} = filter_leafs(Paths, Keys, [], []), + + % convert paths back to trees + NewTree = lists:foldl( + fun({PathPos, Path},TreeAcc) -> + [SingleTree] = lists:foldl( + fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path), + {NewTrees, _} = merge(TreeAcc, [{PathPos + 1 - length(Path), SingleTree}]), + NewTrees + end, [], FilteredPaths), + {NewTree, RemovedKeys}. + + +% get the leafs in the tree matching the keys. The matching key nodes can be +% leafs or an inner nodes. If an inner node, then the leafs for that node +% are returned. +get_key_leafs(Tree, Keys) -> + get_key_leafs(Tree, Keys, []). + +get_key_leafs(_, [], Acc) -> + {Acc, []}; +get_key_leafs([], Keys, Acc) -> + {Acc, Keys}; +get_key_leafs([{Pos, Tree}|Rest], Keys, Acc) -> + {Gotten, RemainingKeys} = get_key_leafs_simple(Pos, [Tree], Keys, []), + get_key_leafs(Rest, RemainingKeys, Gotten ++ Acc). + +get_key_leafs_simple(_Pos, _Tree, [], _KeyPathAcc) -> + {[], []}; +get_key_leafs_simple(_Pos, [], KeysToGet, _KeyPathAcc) -> + {[], KeysToGet}; +get_key_leafs_simple(Pos, [{Key, _Value, SubTree}=Tree | RestTree], KeysToGet, KeyPathAcc) -> + case lists:delete({Pos, Key}, KeysToGet) of + KeysToGet -> % same list, key not found + {LeafsFound, KeysToGet2} = get_key_leafs_simple(Pos + 1, SubTree, KeysToGet, [Key | KeyPathAcc]), + {RestLeafsFound, KeysRemaining} = get_key_leafs_simple(Pos, RestTree, KeysToGet2, KeyPathAcc), + {LeafsFound ++ RestLeafsFound, KeysRemaining}; + KeysToGet2 -> + LeafsFound = get_all_leafs_simple(Pos, [Tree], KeyPathAcc), + LeafKeysFound = [LeafKeyFound || {LeafKeyFound, _} <- LeafsFound], + KeysToGet2 = KeysToGet2 -- LeafKeysFound, + {RestLeafsFound, KeysRemaining} = get_key_leafs_simple(Pos, RestTree, KeysToGet2, KeyPathAcc), + {LeafsFound ++ RestLeafsFound, KeysRemaining} + end. + +get(Tree, KeysToGet) -> + {KeyPaths, KeysNotFound} = get_full_key_paths(Tree, KeysToGet), + FixedResults = [ {Value, {Pos, [Key0 || {Key0, _} <- Path]}} || {Pos, [{_Key, Value}|_]=Path} <- KeyPaths], + {FixedResults, KeysNotFound}. + +get_full_key_paths(Tree, Keys) -> + get_full_key_paths(Tree, Keys, []). + +get_full_key_paths(_, [], Acc) -> + {Acc, []}; +get_full_key_paths([], Keys, Acc) -> + {Acc, Keys}; +get_full_key_paths([{Pos, Tree}|Rest], Keys, Acc) -> + {Gotten, RemainingKeys} = get_full_key_paths(Pos, [Tree], Keys, []), + get_full_key_paths(Rest, RemainingKeys, Gotten ++ Acc). + + +get_full_key_paths(_Pos, _Tree, [], _KeyPathAcc) -> + {[], []}; +get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) -> + {[], KeysToGet}; +get_full_key_paths(Pos, [{KeyId, Value, SubTree} | RestTree], KeysToGet, KeyPathAcc) -> + KeysToGet2 = KeysToGet -- [{Pos, KeyId}], + CurrentNodeResult = + case length(KeysToGet2) =:= length(KeysToGet) of + true -> % not in the key list. + []; + false -> % this node is the key list. return it + [{Pos, [{KeyId, Value} | KeyPathAcc]}] + end, + {KeysGotten, KeysRemaining} = get_full_key_paths(Pos + 1, SubTree, KeysToGet2, [{KeyId, Value} | KeyPathAcc]), + {KeysGotten2, KeysRemaining2} = get_full_key_paths(Pos, RestTree, KeysRemaining, KeyPathAcc), + {CurrentNodeResult ++ KeysGotten ++ KeysGotten2, KeysRemaining2}. + +get_all_leafs_full(Tree) -> + get_all_leafs_full(Tree, []). + +get_all_leafs_full([], Acc) -> + Acc; +get_all_leafs_full([{Pos, Tree} | Rest], Acc) -> + get_all_leafs_full(Rest, get_all_leafs_full_simple(Pos, [Tree], []) ++ Acc). + +get_all_leafs_full_simple(_Pos, [], _KeyPathAcc) -> + []; +get_all_leafs_full_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) -> + [{Pos, [{KeyId, Value} | KeyPathAcc]} | get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc)]; +get_all_leafs_full_simple(Pos, [{KeyId, Value, SubTree} | RestTree], KeyPathAcc) -> + get_all_leafs_full_simple(Pos + 1, SubTree, [{KeyId, Value} | KeyPathAcc]) ++ get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc). + +get_all_leafs(Trees) -> + get_all_leafs(Trees, []). + +get_all_leafs([], Acc) -> + Acc; +get_all_leafs([{Pos, Tree}|Rest], Acc) -> + get_all_leafs(Rest, get_all_leafs_simple(Pos, [Tree], []) ++ Acc). + +get_all_leafs_simple(_Pos, [], _KeyPathAcc) -> + []; +get_all_leafs_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) -> + [{Value, {Pos, [KeyId | KeyPathAcc]}} | get_all_leafs_simple(Pos, RestTree, KeyPathAcc)]; +get_all_leafs_simple(Pos, [{KeyId, _Value, SubTree} | RestTree], KeyPathAcc) -> + get_all_leafs_simple(Pos + 1, SubTree, [KeyId | KeyPathAcc]) ++ get_all_leafs_simple(Pos, RestTree, KeyPathAcc). + + +count_leafs([]) -> + 0; +count_leafs([{_Pos,Tree}|Rest]) -> + count_leafs_simple([Tree]) + count_leafs(Rest). + +count_leafs_simple([]) -> + 0; +count_leafs_simple([{_Key, _Value, []} | RestTree]) -> + 1 + count_leafs_simple(RestTree); +count_leafs_simple([{_Key, _Value, SubTree} | RestTree]) -> + count_leafs_simple(SubTree) + count_leafs_simple(RestTree). + + +map(_Fun, []) -> + []; +map(Fun, [{Pos, Tree}|Rest]) -> + case erlang:fun_info(Fun, arity) of + {arity, 2} -> + [NewTree] = map_simple(fun(A,B,_C) -> Fun(A,B) end, Pos, [Tree]), + [{Pos, NewTree} | map(Fun, Rest)]; + {arity, 3} -> + [NewTree] = map_simple(Fun, Pos, [Tree]), + [{Pos, NewTree} | map(Fun, Rest)] + end. + +map_simple(_Fun, _Pos, []) -> + []; +map_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) -> + Value2 = Fun({Pos, Key}, Value, + if SubTree == [] -> leaf; true -> branch end), + [{Key, Value2, map_simple(Fun, Pos + 1, SubTree)} | map_simple(Fun, Pos, RestTree)]. + + +map_leafs(_Fun, []) -> + []; +map_leafs(Fun, [{Pos, Tree}|Rest]) -> + [NewTree] = map_leafs_simple(Fun, Pos, [Tree]), + [{Pos, NewTree} | map_leafs(Fun, Rest)]. + +map_leafs_simple(_Fun, _Pos, []) -> + []; +map_leafs_simple(Fun, Pos, [{Key, Value, []} | RestTree]) -> + Value2 = Fun({Pos, Key}, Value), + [{Key, Value2, []} | map_leafs_simple(Fun, Pos, RestTree)]; +map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) -> + [{Key, Value, map_leafs_simple(Fun, Pos + 1, SubTree)} | map_leafs_simple(Fun, Pos, RestTree)]. + + +stem(Trees, Limit) -> + % flatten each branch in a tree into a tree path + Paths = get_all_leafs_full(Trees), + + Paths2 = [{Pos, lists:sublist(Path, Limit)} || {Pos, Path} <- Paths], + + % convert paths back to trees + lists:foldl( + fun({PathPos, Path},TreeAcc) -> + [SingleTree] = lists:foldl( + fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path), + {NewTrees, _} = merge(TreeAcc, [{PathPos + 1 - length(Path), SingleTree}]), + NewTrees + end, [], Paths2). + +% Tests moved to test/etap/06?-*.t + diff --git a/apps/couch/src/couch_log.erl b/apps/couch/src/couch_log.erl new file mode 100644 index 00000000..2d62cbb5 --- /dev/null +++ b/apps/couch/src/couch_log.erl @@ -0,0 +1,151 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_log). +-behaviour(gen_event). + +-export([start_link/0,stop/0]). +-export([debug_on/0,info_on/0,get_level/0,get_level_integer/0, set_level/1]). +-export([init/1, handle_event/2, terminate/2, code_change/3, handle_info/2, handle_call/2]). +-export([read/2]). + +-define(LEVEL_ERROR, 3). +-define(LEVEL_INFO, 2). +-define(LEVEL_DEBUG, 1). +-define(LEVEL_TMI, 0). + +level_integer(error) -> ?LEVEL_ERROR; +level_integer(info) -> ?LEVEL_INFO; +level_integer(debug) -> ?LEVEL_DEBUG; +level_integer(tmi) -> ?LEVEL_TMI; +level_integer(_Else) -> ?LEVEL_ERROR. % anything else default to ERROR level + +level_atom(?LEVEL_ERROR) -> error; +level_atom(?LEVEL_INFO) -> info; +level_atom(?LEVEL_DEBUG) -> debug; +level_atom(?LEVEL_TMI) -> tmi. + + +start_link() -> + couch_event_sup:start_link({local, couch_log}, error_logger, couch_log, []). + +stop() -> + couch_event_sup:stop(couch_log). + +init([]) -> + % read config and register for configuration changes + + % just stop if one of the config settings change. couch_server_sup + % will restart us and then we will pick up the new settings. + ok = couch_config:register( + fun("log", "file") -> + ?MODULE:stop(); + ("log", "level") -> + ?MODULE:stop(); + ("log", "include_sasl") -> + ?MODULE:stop() + end), + + Filename = couch_config:get("log", "file", "couchdb.log"), + Level = level_integer(list_to_atom(couch_config:get("log", "level", "info"))), + Sasl = list_to_atom(couch_config:get("log", "include_sasl", "true")), + + case ets:info(?MODULE) of + undefined -> ets:new(?MODULE, [named_table]); + _ -> ok + end, + ets:insert(?MODULE, {level, Level}), + + {ok, Fd} = file:open(Filename, [append]), + {ok, {Fd, Level, Sasl}}. + +debug_on() -> + get_level_integer() =< ?LEVEL_DEBUG. + +info_on() -> + get_level_integer() =< ?LEVEL_INFO. + +set_level(LevelAtom) -> + set_level_integer(level_integer(LevelAtom)). + +get_level() -> + level_atom(get_level_integer()). + +get_level_integer() -> + try + ets:lookup_element(?MODULE, level, 2) + catch error:badarg -> + ?LEVEL_ERROR + end. + +set_level_integer(Int) -> + gen_event:call(error_logger, couch_log, {set_level_integer, Int}). + +handle_event({Pid, couch_error, {Format, Args}}, {Fd, _LogLevel, _Sasl}=State) -> + log(Fd, Pid, error, Format, Args), + {ok, State}; +handle_event({Pid, couch_info, {Format, Args}}, {Fd, LogLevel, _Sasl}=State) +when LogLevel =< ?LEVEL_INFO -> + log(Fd, Pid, info, Format, Args), + {ok, State}; +handle_event({Pid, couch_debug, {Format, Args}}, {Fd, LogLevel, _Sasl}=State) +when LogLevel =< ?LEVEL_DEBUG -> + log(Fd, Pid, debug, Format, Args), + {ok, State}; +handle_event({error_report, _, {Pid, _, _}}=Event, {Fd, _LogLevel, Sasl}=State) +when Sasl =/= false -> + log(Fd, Pid, error, "~p", [Event]), + {ok, State}; +handle_event({error, _, {Pid, Format, Args}}, {Fd, _LogLevel, Sasl}=State) +when Sasl =/= false -> + log(Fd, Pid, error, Format, Args), + {ok, State}; +handle_event({_, _, {Pid, _, _}}=Event, {Fd, LogLevel, _Sasl}=State) +when LogLevel =< ?LEVEL_TMI -> + % log every remaining event if tmi! + log(Fd, Pid, tmi, "~p", [Event]), + {ok, State}; +handle_event(_Event, State) -> + {ok, State}. + +handle_call({set_level_integer, NewLevel}, {Fd, _LogLevel, Sasl}) -> + ets:insert(?MODULE, {level, NewLevel}), + {ok, ok, {Fd, NewLevel, Sasl}}. + +handle_info(_Info, State) -> + {ok, State}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +terminate(_Arg, {Fd, _LoggingLevel, _Sasl}) -> + file:close(Fd). + +log(Fd, Pid, Level, Format, Args) -> + Msg = io_lib:format(Format, Args), + ok = io:format("[~s] [~p] ~s~n", [Level, Pid, Msg]), % dump to console too + Msg2 = re:replace(lists:flatten(Msg),"\\r\\n|\\r|\\n", "\r\n", + [global, {return, list}]), + ok = io:format(Fd, "[~s] [~s] [~p] ~s\r~n\r~n", [httpd_util:rfc1123_date(), Level, Pid, Msg2]). + +read(Bytes, Offset) -> + LogFileName = couch_config:get("log", "file"), + LogFileSize = couch_util:file_read_size(LogFileName), + + {ok, Fd} = file:open(LogFileName, [read]), + Start = lists:max([LogFileSize - Bytes, 0]) + Offset, + + % TODO: truncate chopped first line + % TODO: make streaming + + {ok, Chunk} = file:pread(Fd, Start, LogFileSize), + Chunk. diff --git a/apps/couch/src/couch_native_process.erl b/apps/couch/src/couch_native_process.erl new file mode 100644 index 00000000..b512f712 --- /dev/null +++ b/apps/couch/src/couch_native_process.erl @@ -0,0 +1,402 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); +% you may not use this file except in compliance with the License. +% +% You may obtain a copy of the License at +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, +% software distributed under the License is distributed on an +% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +% either express or implied. +% +% See the License for the specific language governing permissions +% and limitations under the License. +% +% This file drew much inspiration from erlview, which was written by and +% copyright Michael McDaniel [http://autosys.us], and is also under APL 2.0 +% +% +% This module provides the smallest possible native view-server. +% With this module in-place, you can add the following to your couch INI files: +% [native_query_servers] +% erlang={couch_native_process, start_link, []} +% +% Which will then allow following example map function to be used: +% +% fun({Doc}) -> +% % Below, we emit a single record - the _id as key, null as value +% DocId = couch_util:get_value(Doc, <<"_id">>, null), +% Emit(DocId, null) +% end. +% +% which should be roughly the same as the javascript: +% emit(doc._id, null); +% +% This module exposes enough functions such that a native erlang server can +% act as a fully-fleged view server, but no 'helper' functions specifically +% for simplifying your erlang view code. It is expected other third-party +% extensions will evolve which offer useful layers on top of this view server +% to help simplify your view code. +-module(couch_native_process). +-behaviour(gen_server). + +-export([start_link/0,init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3, + handle_info/2]). +-export([set_timeout/2, prompt/2]). + +-define(STATE, native_proc_state). +-record(evstate, {ddocs, funs=[], query_config=[], list_pid=nil, timeout=5000}). + +-include("couch_db.hrl"). + +start_link() -> + gen_server:start_link(?MODULE, [], []). + +% this is a bit messy, see also couch_query_servers handle_info +% stop(_Pid) -> +% ok. + +set_timeout(Pid, TimeOut) -> + gen_server:call(Pid, {set_timeout, TimeOut}). + +prompt(Pid, Data) when is_list(Data) -> + gen_server:call(Pid, {prompt, Data}). + +% gen_server callbacks +init([]) -> + {ok, #evstate{ddocs=dict:new()}}. + +handle_call({set_timeout, TimeOut}, _From, State) -> + {reply, ok, State#evstate{timeout=TimeOut}}; + +handle_call({prompt, Data}, _From, State) -> + ?LOG_DEBUG("Prompt native qs: ~s",[?JSON_ENCODE(Data)]), + {NewState, Resp} = try run(State, to_binary(Data)) of + {S, R} -> {S, R} + catch + throw:{error, Why} -> + {State, [<<"error">>, Why, Why]} + end, + + case Resp of + {error, Reason} -> + Msg = io_lib:format("couch native server error: ~p", [Reason]), + {reply, [<<"error">>, <<"native_query_server">>, list_to_binary(Msg)], NewState}; + [<<"error">> | Rest] -> + % Msg = io_lib:format("couch native server error: ~p", [Rest]), + % TODO: markh? (jan) + {reply, [<<"error">> | Rest], NewState}; + [<<"fatal">> | Rest] -> + % Msg = io_lib:format("couch native server error: ~p", [Rest]), + % TODO: markh? (jan) + {stop, fatal, [<<"error">> | Rest], NewState}; + Resp -> + {reply, Resp, NewState} + end. + +handle_cast(foo, State) -> {noreply, State}. +handle_info({'EXIT',_,normal}, State) -> {noreply, State}; +handle_info({'EXIT',_,Reason}, State) -> + {stop, Reason, State}. +terminate(_Reason, _State) -> ok. +code_change(_OldVersion, State, _Extra) -> {ok, State}. + +run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) -> + Pid ! {self(), list_row, Row}, + receive + {Pid, chunks, Data} -> + {State, [<<"chunks">>, Data]}; + {Pid, list_end, Data} -> + receive + {'EXIT', Pid, normal} -> ok + after State#evstate.timeout -> + throw({timeout, list_cleanup}) + end, + process_flag(trap_exit, erlang:get(do_trap)), + {State#evstate{list_pid=nil}, [<<"end">>, Data]} + after State#evstate.timeout -> + throw({timeout, list_row}) + end; +run(#evstate{list_pid=Pid}=State, [<<"list_end">>]) when is_pid(Pid) -> + Pid ! {self(), list_end}, + Resp = + receive + {Pid, list_end, Data} -> + receive + {'EXIT', Pid, normal} -> ok + after State#evstate.timeout -> + throw({timeout, list_cleanup}) + end, + [<<"end">>, Data] + after State#evstate.timeout -> + throw({timeout, list_end}) + end, + process_flag(trap_exit, erlang:get(do_trap)), + {State#evstate{list_pid=nil}, Resp}; +run(#evstate{list_pid=Pid}=State, _Command) when is_pid(Pid) -> + {State, [<<"error">>, list_error, list_error]}; +run(#evstate{ddocs=DDocs}, [<<"reset">>]) -> + {#evstate{ddocs=DDocs}, true}; +run(#evstate{ddocs=DDocs}, [<<"reset">>, QueryConfig]) -> + {#evstate{ddocs=DDocs, query_config=QueryConfig}, true}; +run(#evstate{funs=Funs}=State, [<<"add_fun">> , BinFunc]) -> + FunInfo = makefun(State, BinFunc), + {State#evstate{funs=Funs ++ [FunInfo]}, true}; +run(State, [<<"map_doc">> , Doc]) -> + Resp = lists:map(fun({Sig, Fun}) -> + erlang:put(Sig, []), + Fun(Doc), + lists:reverse(erlang:get(Sig)) + end, State#evstate.funs), + {State, Resp}; +run(State, [<<"reduce">>, Funs, KVs]) -> + {Keys, Vals} = + lists:foldl(fun([K, V], {KAcc, VAcc}) -> + {[K | KAcc], [V | VAcc]} + end, {[], []}, KVs), + Keys2 = lists:reverse(Keys), + Vals2 = lists:reverse(Vals), + {State, catch reduce(State, Funs, Keys2, Vals2, false)}; +run(State, [<<"rereduce">>, Funs, Vals]) -> + {State, catch reduce(State, Funs, null, Vals, true)}; +run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) -> + DDocs2 = store_ddoc(DDocs, DDocId, DDoc), + {State#evstate{ddocs=DDocs2}, true}; +run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, DDocId | Rest]) -> + DDoc = load_ddoc(DDocs, DDocId), + ddoc(State, DDoc, Rest); +run(_, Unknown) -> + ?LOG_ERROR("Native Process: Unknown command: ~p~n", [Unknown]), + throw({error, unknown_command}). + +ddoc(State, {DDoc}, [FunPath, Args]) -> + % load fun from the FunPath + BFun = lists:foldl(fun + (Key, {Props}) when is_list(Props) -> + couch_util:get_value(Key, Props, nil); + (_Key, Fun) when is_binary(Fun) -> + Fun; + (_Key, nil) -> + throw({error, not_found}); + (_Key, _Fun) -> + throw({error, malformed_ddoc}) + end, {DDoc}, FunPath), + ddoc(State, makefun(State, BFun, {DDoc}), FunPath, Args). + +ddoc(State, {_, Fun}, [<<"validate_doc_update">>], Args) -> + {State, (catch apply(Fun, Args))}; +ddoc(State, {_, Fun}, [<<"filters">>|_], [Docs, Req]) -> + Resp = lists:map(fun(Doc) -> (catch Fun(Doc, Req)) =:= true end, Docs), + {State, [true, Resp]}; +ddoc(State, {_, Fun}, [<<"shows">>|_], Args) -> + Resp = case (catch apply(Fun, Args)) of + FunResp when is_list(FunResp) -> + FunResp; + {FunResp} -> + [<<"resp">>, {FunResp}]; + FunResp -> + FunResp + end, + {State, Resp}; +ddoc(State, {_, Fun}, [<<"updates">>|_], Args) -> + Resp = case (catch apply(Fun, Args)) of + [JsonDoc, JsonResp] -> + [<<"up">>, JsonDoc, JsonResp] + end, + {State, Resp}; +ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) -> + Self = self(), + SpawnFun = fun() -> + LastChunk = (catch apply(Fun, Args)), + case start_list_resp(Self, Sig) of + started -> + receive + {Self, list_row, _Row} -> ignore; + {Self, list_end} -> ignore + after State#evstate.timeout -> + throw({timeout, list_cleanup_pid}) + end; + _ -> + ok + end, + LastChunks = + case erlang:get(Sig) of + undefined -> [LastChunk]; + OtherChunks -> [LastChunk | OtherChunks] + end, + Self ! {self(), list_end, lists:reverse(LastChunks)} + end, + erlang:put(do_trap, process_flag(trap_exit, true)), + Pid = spawn_link(SpawnFun), + Resp = + receive + {Pid, start, Chunks, JsonResp} -> + [<<"start">>, Chunks, JsonResp] + after State#evstate.timeout -> + throw({timeout, list_start}) + end, + {State#evstate{list_pid=Pid}, Resp}. + +store_ddoc(DDocs, DDocId, DDoc) -> + dict:store(DDocId, DDoc, DDocs). +load_ddoc(DDocs, DDocId) -> + try dict:fetch(DDocId, DDocs) of + {DDoc} -> {DDoc} + catch + _:_Else -> throw({error, ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s",[DDocId]))}) + end. + +bindings(State, Sig) -> + bindings(State, Sig, nil). +bindings(State, Sig, DDoc) -> + Self = self(), + + Log = fun(Msg) -> + ?LOG_INFO(Msg, []) + end, + + Emit = fun(Id, Value) -> + Curr = erlang:get(Sig), + erlang:put(Sig, [[Id, Value] | Curr]) + end, + + Start = fun(Headers) -> + erlang:put(list_headers, Headers) + end, + + Send = fun(Chunk) -> + Curr = + case erlang:get(Sig) of + undefined -> []; + Else -> Else + end, + erlang:put(Sig, [Chunk | Curr]) + end, + + GetRow = fun() -> + case start_list_resp(Self, Sig) of + started -> + ok; + _ -> + Chunks = + case erlang:get(Sig) of + undefined -> []; + CurrChunks -> CurrChunks + end, + Self ! {self(), chunks, lists:reverse(Chunks)} + end, + erlang:put(Sig, []), + receive + {Self, list_row, Row} -> Row; + {Self, list_end} -> nil + after State#evstate.timeout -> + throw({timeout, list_pid_getrow}) + end + end, + + FoldRows = fun(Fun, Acc) -> foldrows(GetRow, Fun, Acc) end, + + Bindings = [ + {'Log', Log}, + {'Emit', Emit}, + {'Start', Start}, + {'Send', Send}, + {'GetRow', GetRow}, + {'FoldRows', FoldRows} + ], + case DDoc of + {_Props} -> + Bindings ++ [{'DDoc', DDoc}]; + _Else -> Bindings + end. + +% thanks to erlview, via: +% http://erlang.org/pipermail/erlang-questions/2003-November/010544.html +makefun(State, Source) -> + Sig = couch_util:md5(Source), + BindFuns = bindings(State, Sig), + {Sig, makefun(State, Source, BindFuns)}. +makefun(State, Source, {DDoc}) -> + Sig = couch_util:md5(lists:flatten([Source, term_to_binary(DDoc)])), + BindFuns = bindings(State, Sig, {DDoc}), + {Sig, makefun(State, Source, BindFuns)}; +makefun(_State, Source, BindFuns) when is_list(BindFuns) -> + FunStr = binary_to_list(Source), + {ok, Tokens, _} = erl_scan:string(FunStr), + Form = case (catch erl_parse:parse_exprs(Tokens)) of + {ok, [ParsedForm]} -> + ParsedForm; + {error, {LineNum, _Mod, [Mesg, Params]}}=Error -> + io:format(standard_error, "Syntax error on line: ~p~n", [LineNum]), + io:format(standard_error, "~s~p~n", [Mesg, Params]), + throw(Error) + end, + Bindings = lists:foldl(fun({Name, Fun}, Acc) -> + erl_eval:add_binding(Name, Fun, Acc) + end, erl_eval:new_bindings(), BindFuns), + {value, Fun, _} = erl_eval:expr(Form, Bindings), + Fun. + +reduce(State, BinFuns, Keys, Vals, ReReduce) -> + Funs = case is_list(BinFuns) of + true -> + lists:map(fun(BF) -> makefun(State, BF) end, BinFuns); + _ -> + [makefun(State, BinFuns)] + end, + Reds = lists:map(fun({_Sig, Fun}) -> + Fun(Keys, Vals, ReReduce) + end, Funs), + [true, Reds]. + +foldrows(GetRow, ProcRow, Acc) -> + case GetRow() of + nil -> + {ok, Acc}; + Row -> + case (catch ProcRow(Row, Acc)) of + {ok, Acc2} -> + foldrows(GetRow, ProcRow, Acc2); + {stop, Acc2} -> + {ok, Acc2} + end + end. + +start_list_resp(Self, Sig) -> + case erlang:get(list_started) of + undefined -> + Headers = + case erlang:get(list_headers) of + undefined -> {[{<<"headers">>, {[]}}]}; + CurrHdrs -> CurrHdrs + end, + Chunks = + case erlang:get(Sig) of + undefined -> []; + CurrChunks -> CurrChunks + end, + Self ! {self(), start, lists:reverse(Chunks), Headers}, + erlang:put(list_started, true), + erlang:put(Sig, []), + started; + _ -> + ok + end. + +to_binary({Data}) -> + Pred = fun({Key, Value}) -> + {to_binary(Key), to_binary(Value)} + end, + {lists:map(Pred, Data)}; +to_binary(Data) when is_list(Data) -> + [to_binary(D) || D <- Data]; +to_binary(null) -> + null; +to_binary(true) -> + true; +to_binary(false) -> + false; +to_binary(Data) when is_atom(Data) -> + list_to_binary(atom_to_list(Data)); +to_binary(Data) -> + Data. diff --git a/apps/couch/src/couch_os_process.erl b/apps/couch/src/couch_os_process.erl new file mode 100644 index 00000000..5776776b --- /dev/null +++ b/apps/couch/src/couch_os_process.erl @@ -0,0 +1,185 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_os_process). +-behaviour(gen_server). + +-export([start_link/1, start_link/2, start_link/3, stop/1]). +-export([set_timeout/2, prompt/2]). +-export([send/2, writeline/2, readline/1, writejson/2, readjson/1]). +-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]). + +-include("couch_db.hrl"). + +-define(PORT_OPTIONS, [stream, {line, 1024}, binary, exit_status, hide]). + +-record(os_proc, + {command, + port, + writer, + reader, + timeout=5000 + }). + +start_link(Command) -> + start_link(Command, []). +start_link(Command, Options) -> + start_link(Command, Options, ?PORT_OPTIONS). +start_link(Command, Options, PortOptions) -> + gen_server:start_link(couch_os_process, [Command, Options, PortOptions], []). + +stop(Pid) -> + gen_server:cast(Pid, stop). + +% Read/Write API +set_timeout(Pid, TimeOut) when is_integer(TimeOut) -> + ok = gen_server:call(Pid, {set_timeout, TimeOut}). + +% Used by couch_db_update_notifier.erl +send(Pid, Data) -> + gen_server:cast(Pid, {send, Data}). + +prompt(Pid, Data) -> + case gen_server:call(Pid, {prompt, Data}, infinity) of + {ok, Result} -> + Result; + Error -> + ?LOG_ERROR("OS Process Error ~p :: ~p",[Pid,Error]), + throw(Error) + end. + +% Utility functions for reading and writing +% in custom functions +writeline(OsProc, Data) when is_record(OsProc, os_proc) -> + port_command(OsProc#os_proc.port, Data ++ "\n"). + +readline(#os_proc{} = OsProc) -> + readline(OsProc, []). +readline(#os_proc{port = Port} = OsProc, Acc) -> + receive + {Port, {data, {noeol, Data}}} -> + readline(OsProc, [Data|Acc]); + {Port, {data, {eol, Data}}} -> + lists:reverse(Acc, Data); + {Port, Err} -> + catch port_close(Port), + throw({os_process_error, Err}) + after OsProc#os_proc.timeout -> + catch port_close(Port), + throw({os_process_error, "OS process timed out."}) + end. + +% Standard JSON functions +writejson(OsProc, Data) when is_record(OsProc, os_proc) -> + JsonData = ?JSON_ENCODE(Data), + ?LOG_DEBUG("OS Process ~p Input :: ~s", [OsProc#os_proc.port, JsonData]), + true = writeline(OsProc, JsonData). + +readjson(OsProc) when is_record(OsProc, os_proc) -> + Line = readline(OsProc), + ?LOG_DEBUG("OS Process ~p Output :: ~s", [OsProc#os_proc.port, Line]), + case ?JSON_DECODE(Line) of + [<<"log">>, Msg] when is_binary(Msg) -> + % we got a message to log. Log it and continue + ?LOG_INFO("OS Process ~p Log :: ~s", [OsProc#os_proc.port, Msg]), + readjson(OsProc); + [<<"error">>, Id, Reason] -> + throw({couch_util:to_existing_atom(Id),Reason}); + [<<"fatal">>, Id, Reason] -> + ?LOG_INFO("OS Process ~p Fatal Error :: ~s ~p",[OsProc#os_proc.port, Id, Reason]), + throw({couch_util:to_existing_atom(Id),Reason}); + Result -> + Result + end. + + +% gen_server API +init([Command, Options, PortOptions]) -> + process_flag(trap_exit, true), + PrivDir = couch_util:priv_dir(), + Spawnkiller = filename:join(PrivDir, "couchspawnkillable"), + BaseProc = #os_proc{ + command=Command, + port=open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions), + writer=fun writejson/2, + reader=fun readjson/1 + }, + KillCmd = readline(BaseProc), + Pid = self(), + ?LOG_DEBUG("OS Process Start :: ~p", [BaseProc#os_proc.port]), + spawn(fun() -> + % this ensure the real os process is killed when this process dies. + erlang:monitor(process, Pid), + receive _ -> ok end, + os:cmd(?b2l(KillCmd)) + end), + OsProc = + lists:foldl(fun(Opt, Proc) -> + case Opt of + {writer, Writer} when is_function(Writer) -> + Proc#os_proc{writer=Writer}; + {reader, Reader} when is_function(Reader) -> + Proc#os_proc{reader=Reader}; + {timeout, TimeOut} when is_integer(TimeOut) -> + Proc#os_proc{timeout=TimeOut} + end + end, BaseProc, Options), + {ok, OsProc}. + +terminate(_Reason, #os_proc{port=Port}) -> + catch port_close(Port), + ok. + +handle_call({set_timeout, TimeOut}, _From, OsProc) -> + {reply, ok, OsProc#os_proc{timeout=TimeOut}}; +handle_call({prompt, Data}, _From, OsProc) -> + #os_proc{writer=Writer, reader=Reader} = OsProc, + try + Writer(OsProc, Data), + {reply, {ok, Reader(OsProc)}, OsProc} + catch + throw:{error, OsError} -> + {reply, OsError, OsProc}; + throw:{fatal, OsError} -> + {stop, normal, OsError, OsProc}; + throw:OtherError -> + {stop, normal, OtherError, OsProc} + end. + +handle_cast({send, Data}, #os_proc{writer=Writer}=OsProc) -> + try + Writer(OsProc, Data), + {noreply, OsProc} + catch + throw:OsError -> + ?LOG_ERROR("Failed sending data: ~p -> ~p", [Data, OsError]), + {stop, normal, OsProc} + end; +handle_cast(stop, OsProc) -> + {stop, normal, OsProc}; +handle_cast(Msg, OsProc) -> + ?LOG_DEBUG("OS Proc: Unknown cast: ~p", [Msg]), + {noreply, OsProc}. + +handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) -> + ?LOG_INFO("OS Process terminated normally", []), + {stop, normal, OsProc}; +handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) -> + ?LOG_ERROR("OS Process died with status: ~p", [Status]), + {stop, {exit_status, Status}, OsProc}; +handle_info(Msg, OsProc) -> + ?LOG_DEBUG("OS Proc: Unknown info: ~p", [Msg]), + {noreply, OsProc}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + diff --git a/apps/couch/src/couch_query_servers.erl b/apps/couch/src/couch_query_servers.erl new file mode 100644 index 00000000..c4f1bf0b --- /dev/null +++ b/apps/couch/src/couch_query_servers.erl @@ -0,0 +1,485 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_query_servers). +-behaviour(gen_server). + +-export([start_link/0]). + +-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]). +-export([start_doc_map/2, map_docs/2, stop_doc_map/1]). +-export([reduce/3, rereduce/3,validate_doc_update/5]). +-export([filter_docs/5]). + +-export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]). + +% -export([test/0]). + +-include("couch_db.hrl"). + +-record(proc, { + pid, + lang, + ddoc_keys = [], + prompt_fun, + set_timeout_fun, + stop_fun +}). + +start_link() -> + gen_server:start_link({local, couch_query_servers}, couch_query_servers, [], []). + +start_doc_map(Lang, Functions) -> + Proc = get_os_process(Lang), + lists:foreach(fun(FunctionSource) -> + true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource]) + end, Functions), + {ok, Proc}. + +map_docs(Proc, Docs) -> + % send the documents + Results = lists:map( + fun(Doc) -> + Json = couch_doc:to_json_obj(Doc, []), + + FunsResults = proc_prompt(Proc, [<<"map_doc">>, Json]), + % the results are a json array of function map yields like this: + % [FunResults1, FunResults2 ...] + % where funresults is are json arrays of key value pairs: + % [[Key1, Value1], [Key2, Value2]] + % Convert the key, value pairs to tuples like + % [{Key1, Value1}, {Key2, Value2}] + lists:map( + fun(FunRs) -> + [list_to_tuple(FunResult) || FunResult <- FunRs] + end, + FunsResults) + end, + Docs), + {ok, Results}. + + +stop_doc_map(nil) -> + ok; +stop_doc_map(Proc) -> + ok = ret_os_process(Proc). + +group_reductions_results([]) -> + []; +group_reductions_results(List) -> + {Heads, Tails} = lists:foldl( + fun([H|T], {HAcc,TAcc}) -> + {[H|HAcc], [T|TAcc]} + end, {[], []}, List), + case Tails of + [[]|_] -> % no tails left + [Heads]; + _ -> + [Heads | group_reductions_results(Tails)] + end. + +rereduce(_Lang, [], _ReducedValues) -> + {ok, []}; +rereduce(Lang, RedSrcs, ReducedValues) -> + Grouped = group_reductions_results(ReducedValues), + Results = lists:zipwith( + fun + (<<"_", _/binary>> = FunSrc, Values) -> + {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []), + Result; + (FunSrc, Values) -> + os_rereduce(Lang, [FunSrc], Values) + end, RedSrcs, Grouped), + {ok, Results}. + +reduce(_Lang, [], _KVs) -> + {ok, []}; +reduce(Lang, RedSrcs, KVs) -> + {OsRedSrcs, BuiltinReds} = lists:partition(fun + (<<"_", _/binary>>) -> false; + (_OsFun) -> true + end, RedSrcs), + {ok, OsResults} = os_reduce(Lang, OsRedSrcs, KVs), + {ok, BuiltinResults} = builtin_reduce(reduce, BuiltinReds, KVs, []), + recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, []). + +recombine_reduce_results([], [], [], Acc) -> + {ok, lists:reverse(Acc)}; +recombine_reduce_results([<<"_", _/binary>>|RedSrcs], OsResults, [BRes|BuiltinResults], Acc) -> + recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes|Acc]); +recombine_reduce_results([_OsFun|RedSrcs], [OsR|OsResults], BuiltinResults, Acc) -> + recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR|Acc]). + +os_reduce(_Lang, [], _KVs) -> + {ok, []}; +os_reduce(Lang, OsRedSrcs, KVs) -> + Proc = get_os_process(Lang), + OsResults = try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of + [true, Reductions] -> Reductions + after + ok = ret_os_process(Proc) + end, + {ok, OsResults}. + +os_rereduce(_Lang, [], _KVs) -> + {ok, []}; +os_rereduce(Lang, OsRedSrcs, KVs) -> + Proc = get_os_process(Lang), + try proc_prompt(Proc, [<<"rereduce">>, OsRedSrcs, KVs]) of + [true, [Reduction]] -> Reduction + after + ok = ret_os_process(Proc) + end. + + +builtin_reduce(_Re, [], _KVs, Acc) -> + {ok, lists:reverse(Acc)}; +builtin_reduce(Re, [<<"_sum",_/binary>>|BuiltinReds], KVs, Acc) -> + Sum = builtin_sum_rows(KVs), + builtin_reduce(Re, BuiltinReds, KVs, [Sum|Acc]); +builtin_reduce(reduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) -> + Count = length(KVs), + builtin_reduce(reduce, BuiltinReds, KVs, [Count|Acc]); +builtin_reduce(rereduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) -> + Count = builtin_sum_rows(KVs), + builtin_reduce(rereduce, BuiltinReds, KVs, [Count|Acc]); +builtin_reduce(Re, [<<"_stats",_/binary>>|BuiltinReds], KVs, Acc) -> + Stats = builtin_stats(Re, KVs), + builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]). + +builtin_sum_rows(KVs) -> + lists:foldl(fun + ([_Key, Value], Acc) when is_number(Value) -> + Acc + Value; + (_Else, _Acc) -> + throw({invalid_value, <<"builtin _sum function requires map values to be numbers">>}) + end, 0, KVs). + +builtin_stats(reduce, [[_,First]|Rest]) when is_number(First) -> + Stats = lists:foldl(fun([_K,V], {S,C,Mi,Ma,Sq}) when is_number(V) -> + {S+V, C+1, erlang:min(Mi,V), erlang:max(Ma,V), Sq+(V*V)}; + (_, _) -> + throw({invalid_value, + <<"builtin _stats function requires map values to be numbers">>}) + end, {First,1,First,First,First*First}, Rest), + {Sum, Cnt, Min, Max, Sqr} = Stats, + {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]}; + +builtin_stats(rereduce, [[_,First]|Rest]) -> + {[{sum,Sum0}, {count,Cnt0}, {min,Min0}, {max,Max0}, {sumsqr,Sqr0}]} = First, + Stats = lists:foldl(fun([_K,Red], {S,C,Mi,Ma,Sq}) -> + {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]} = Red, + {Sum+S, Cnt+C, erlang:min(Min,Mi), erlang:max(Max,Ma), Sqr+Sq} + end, {Sum0,Cnt0,Min0,Max0,Sqr0}, Rest), + {Sum, Cnt, Min, Max, Sqr} = Stats, + {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]}. + +% use the function stored in ddoc.validate_doc_update to test an update. +validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) -> + JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]), + JsonDiskDoc = json_doc(DiskDoc), + case ddoc_prompt(DDoc, [<<"validate_doc_update">>], [JsonEditDoc, JsonDiskDoc, Ctx, SecObj]) of + 1 -> + ok; + {[{<<"forbidden">>, Message}]} -> + throw({forbidden, Message}); + {[{<<"unauthorized">>, Message}]} -> + throw({unauthorized, Message}) + end. + +json_doc(nil) -> null; +json_doc(Doc) -> + couch_doc:to_json_obj(Doc, [revs]). + +filter_docs(Req, Db, DDoc, FName, Docs) -> + JsonReq = case Req of + {json_req, JsonObj} -> + JsonObj; + #httpd{} = HttpReq -> + couch_httpd_external:json_req_obj(HttpReq, Db) + end, + JsonDocs = [couch_doc:to_json_obj(Doc, [revs]) || Doc <- Docs], + [true, Passes] = ddoc_prompt(DDoc, [<<"filters">>, FName], [JsonDocs, JsonReq]), + {ok, Passes}. + +ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) -> + proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args]). + +ddoc_prompt(DDoc, FunPath, Args) -> + with_ddoc_proc(DDoc, fun({Proc, DDocId}) -> + proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args]) + end). + +with_ddoc_proc(#doc{id=DDocId,revs={Start, [DiskRev|_]}}=DDoc, Fun) -> + Rev = couch_doc:rev_to_str({Start, DiskRev}), + DDocKey = {DDocId, Rev}, + Proc = get_ddoc_process(DDoc, DDocKey), + try Fun({Proc, DDocId}) + after + ok = ret_os_process(Proc) + end. + +init([]) -> + % read config and register for configuration changes + + % just stop if one of the config settings change. couch_server_sup + % will restart us and then we will pick up the new settings. + + ok = couch_config:register( + fun("query_servers" ++ _, _) -> + supervisor:terminate_child(couch_secondary_services, query_servers), + supervisor:restart_child(couch_secondary_services, query_servers) + end), + ok = couch_config:register( + fun("native_query_servers" ++ _, _) -> + supervisor:terminate_child(couch_secondary_services, query_servers), + [supervisor:restart_child(couch_secondary_services, query_servers)] + end), + + Langs = ets:new(couch_query_server_langs, [set, private]), + PidProcs = ets:new(couch_query_server_pid_langs, [set, private]), + LangProcs = ets:new(couch_query_server_procs, [set, private]), + % 'query_servers' specifies an OS command-line to execute. + lists:foreach(fun({Lang, Command}) -> + true = ets:insert(Langs, {?l2b(Lang), + couch_os_process, start_link, [Command]}) + end, couch_config:get("query_servers")), + % 'native_query_servers' specifies a {Module, Func, Arg} tuple. + lists:foreach(fun({Lang, SpecStr}) -> + {ok, {Mod, Fun, SpecArg}} = couch_util:parse_term(SpecStr), + true = ets:insert(Langs, {?l2b(Lang), + Mod, Fun, SpecArg}) + end, couch_config:get("native_query_servers")), + process_flag(trap_exit, true), + {ok, {Langs, % Keyed by language name, value is {Mod,Func,Arg} + PidProcs, % Keyed by PID, valus is a #proc record. + LangProcs % Keyed by language name, value is a #proc record + }}. + +terminate(_Reason, {_Langs, PidProcs, _LangProcs}) -> + [couch_util:shutdown_sync(P) || {P,_} <- ets:tab2list(PidProcs)], + ok. + +handle_call({get_proc, #doc{body={Props}}=DDoc, DDocKey}, _From, {Langs, PidProcs, LangProcs}=Server) -> + % Note to future self. Add max process limit. + Lang = couch_util:get_value(<<"language">>, Props, <<"javascript">>), + case ets:lookup(LangProcs, Lang) of + [{Lang, [P|Rest]}] -> + % find a proc in the set that has the DDoc + case proc_with_ddoc(DDoc, DDocKey, [P|Rest]) of + {ok, Proc} -> + rem_from_list(LangProcs, Lang, Proc), + {reply, {ok, Proc, get_query_server_config()}, Server}; + Error -> + {reply, Error, Server} + end; + _ -> + case (catch new_process(Langs, Lang)) of + {ok, Proc} -> + add_value(PidProcs, Proc#proc.pid, Proc), + case proc_with_ddoc(DDoc, DDocKey, [Proc]) of + {ok, Proc2} -> + {reply, {ok, Proc2, get_query_server_config()}, Server}; + Error -> + {reply, Error, Server} + end; + Error -> + {reply, Error, Server} + end + end; +handle_call({get_proc, Lang}, _From, {Langs, PidProcs, LangProcs}=Server) -> + % Note to future self. Add max process limit. + case ets:lookup(LangProcs, Lang) of + [{Lang, [Proc|_]}] -> + rem_from_list(LangProcs, Lang, Proc), + {reply, {ok, Proc, get_query_server_config()}, Server}; + _ -> + case (catch new_process(Langs, Lang)) of + {ok, Proc} -> + add_value(PidProcs, Proc#proc.pid, Proc), + {reply, {ok, Proc, get_query_server_config()}, Server}; + Error -> + {reply, Error, Server} + end + end; +handle_call({unlink_proc, Pid}, _From, {_, PidProcs, _}=Server) -> + rem_value(PidProcs, Pid), + unlink(Pid), + {reply, ok, Server}; +handle_call({ret_proc, Proc}, _From, {_, PidProcs, LangProcs}=Server) -> + % Along with max process limit, here we should check + % if we're over the limit and discard when we are. + add_value(PidProcs, Proc#proc.pid, Proc), + add_to_list(LangProcs, Proc#proc.lang, Proc), + link(Proc#proc.pid), + {reply, true, Server}. + +handle_cast(_Whatever, Server) -> + {noreply, Server}. + +handle_info({'EXIT', Pid, Status}, {_, PidProcs, LangProcs}=Server) -> + case ets:lookup(PidProcs, Pid) of + [{Pid, Proc}] -> + case Status of + normal -> ok; + _ -> ?LOG_DEBUG("Linked process died abnormally: ~p (reason: ~p)", [Pid, Status]) + end, + rem_value(PidProcs, Pid), + catch rem_from_list(LangProcs, Proc#proc.lang, Proc), + {noreply, Server}; + [] -> + case Status of + normal -> + {noreply, Server}; + _ -> + {stop, Status, Server} + end + end. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +% Private API + +get_query_server_config() -> + ReduceLimit = list_to_atom( + couch_config:get("query_server_config","reduce_limit","true")), + {[{<<"reduce_limit">>, ReduceLimit}]}. + +new_process(Langs, Lang) -> + case ets:lookup(Langs, Lang) of + [{Lang, Mod, Func, Arg}] -> + {ok, Pid} = apply(Mod, Func, Arg), + {ok, #proc{lang=Lang, + pid=Pid, + % Called via proc_prompt, proc_set_timeout, and proc_stop + prompt_fun={Mod, prompt}, + set_timeout_fun={Mod, set_timeout}, + stop_fun={Mod, stop}}}; + _ -> + {unknown_query_language, Lang} + end. + +proc_with_ddoc(DDoc, DDocKey, LangProcs) -> + DDocProcs = lists:filter(fun(#proc{ddoc_keys=Keys}) -> + lists:any(fun(Key) -> + Key == DDocKey + end, Keys) + end, LangProcs), + case DDocProcs of + [DDocProc|_] -> + ?LOG_DEBUG("DDocProc found for DDocKey: ~p",[DDocKey]), + {ok, DDocProc}; + [] -> + [TeachProc|_] = LangProcs, + ?LOG_DEBUG("Teach ddoc to new proc ~p with DDocKey: ~p",[TeachProc, DDocKey]), + {ok, SmartProc} = teach_ddoc(DDoc, DDocKey, TeachProc), + {ok, SmartProc} + end. + +proc_prompt(Proc, Args) -> + {Mod, Func} = Proc#proc.prompt_fun, + apply(Mod, Func, [Proc#proc.pid, Args]). + +proc_stop(Proc) -> + {Mod, Func} = Proc#proc.stop_fun, + apply(Mod, Func, [Proc#proc.pid]). + +proc_set_timeout(Proc, Timeout) -> + {Mod, Func} = Proc#proc.set_timeout_fun, + apply(Mod, Func, [Proc#proc.pid, Timeout]). + +teach_ddoc(DDoc, {DDocId, _Rev}=DDocKey, #proc{ddoc_keys=Keys}=Proc) -> + % send ddoc over the wire + % we only share the rev with the client we know to update code + % but it only keeps the latest copy, per each ddoc, around. + true = proc_prompt(Proc, [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]), + % we should remove any other ddocs keys for this docid + % because the query server overwrites without the rev + Keys2 = [{D,R} || {D,R} <- Keys, D /= DDocId], + % add ddoc to the proc + {ok, Proc#proc{ddoc_keys=[DDocKey|Keys2]}}. + +get_ddoc_process(#doc{} = DDoc, DDocKey) -> + % remove this case statement + case gen_server:call(couch_query_servers, {get_proc, DDoc, DDocKey}) of + {ok, Proc, QueryConfig} -> + % process knows the ddoc + case (catch proc_prompt(Proc, [<<"reset">>, QueryConfig])) of + true -> + proc_set_timeout(Proc, list_to_integer(couch_config:get( + "couchdb", "os_process_timeout", "5000"))), + link(Proc#proc.pid), + gen_server:call(couch_query_servers, {unlink_proc, Proc#proc.pid}), + Proc; + _ -> + catch proc_stop(Proc), + get_ddoc_process(DDoc, DDocKey) + end; + Error -> + throw(Error) + end. + +get_os_process(Lang) -> + case gen_server:call(couch_query_servers, {get_proc, Lang}) of + {ok, Proc, QueryConfig} -> + case (catch proc_prompt(Proc, [<<"reset">>, QueryConfig])) of + true -> + proc_set_timeout(Proc, list_to_integer(couch_config:get( + "couchdb", "os_process_timeout", "5000"))), + link(Proc#proc.pid), + gen_server:call(couch_query_servers, {unlink_proc, Proc#proc.pid}), + Proc; + _ -> + catch proc_stop(Proc), + get_os_process(Lang) + end; + Error -> + throw(Error) + end. + +ret_os_process(Proc) -> + true = gen_server:call(couch_query_servers, {ret_proc, Proc}), + catch unlink(Proc#proc.pid), + ok. + +add_value(Tid, Key, Value) -> + true = ets:insert(Tid, {Key, Value}). + +rem_value(Tid, Key) -> + true = ets:delete(Tid, Key). + +add_to_list(Tid, Key, Value) -> + case ets:lookup(Tid, Key) of + [{Key, Vals}] -> + true = ets:insert(Tid, {Key, [Value|Vals]}); + [] -> + true = ets:insert(Tid, {Key, [Value]}) + end. + +rem_from_list(Tid, Key, Value) when is_record(Value, proc)-> + Pid = Value#proc.pid, + case ets:lookup(Tid, Key) of + [{Key, Vals}] -> + % make a new values list that doesn't include the Value arg + NewValues = [Val || #proc{pid=P}=Val <- Vals, P /= Pid], + ets:insert(Tid, {Key, NewValues}); + [] -> ok + end; +rem_from_list(Tid, Key, Value) -> + case ets:lookup(Tid, Key) of + [{Key, Vals}] -> + % make a new values list that doesn't include the Value arg + NewValues = [Val || Val <- Vals, Val /= Value], + ets:insert(Tid, {Key, NewValues}); + [] -> ok + end. diff --git a/apps/couch/src/couch_ref_counter.erl b/apps/couch/src/couch_ref_counter.erl new file mode 100644 index 00000000..5a111ab6 --- /dev/null +++ b/apps/couch/src/couch_ref_counter.erl @@ -0,0 +1,111 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_ref_counter). +-behaviour(gen_server). + +-export([start/1, init/1, terminate/2, handle_call/3, handle_cast/2, code_change/3, handle_info/2]). +-export([drop/1,drop/2,add/1,add/2,count/1]). + +start(ChildProcs) -> + gen_server:start(couch_ref_counter, {self(), ChildProcs}, []). + + +drop(RefCounterPid) -> + drop(RefCounterPid, self()). + +drop(RefCounterPid, Pid) -> + gen_server:call(RefCounterPid, {drop, Pid}). + + +add(RefCounterPid) -> + add(RefCounterPid, self()). + +add(RefCounterPid, Pid) -> + gen_server:call(RefCounterPid, {add, Pid}). + +count(RefCounterPid) -> + gen_server:call(RefCounterPid, count). + +% server functions + +-record(srv, + { + referrers=dict:new(), % a dict of each ref counting proc. + child_procs=[] + }). + +init({Pid, ChildProcs}) -> + [link(ChildProc) || ChildProc <- ChildProcs], + Referrers = dict:from_list([{Pid, {erlang:monitor(process, Pid), 1}}]), + {ok, #srv{referrers=Referrers, child_procs=ChildProcs}}. + + +terminate(_Reason, #srv{child_procs=ChildProcs}) -> + [couch_util:shutdown_sync(Pid) || Pid <- ChildProcs], + ok. + + +handle_call({add, Pid},_From, #srv{referrers=Referrers}=Srv) -> + Referrers2 = + case dict:find(Pid, Referrers) of + error -> + dict:store(Pid, {erlang:monitor(process, Pid), 1}, Referrers); + {ok, {MonRef, RefCnt}} -> + dict:store(Pid, {MonRef, RefCnt + 1}, Referrers) + end, + {reply, ok, Srv#srv{referrers=Referrers2}}; +handle_call(count, _From, Srv) -> + {monitors, Monitors} = process_info(self(), monitors), + {reply, length(Monitors), Srv}; +handle_call({drop, Pid}, _From, #srv{referrers=Referrers}=Srv) -> + Referrers2 = + case dict:find(Pid, Referrers) of + {ok, {MonRef, 1}} -> + erlang:demonitor(MonRef, [flush]), + dict:erase(Pid, Referrers); + {ok, {MonRef, Num}} -> + dict:store(Pid, {MonRef, Num-1}, Referrers); + error -> + Referrers + end, + Srv2 = Srv#srv{referrers=Referrers2}, + case should_close() of + true -> + {stop,normal,ok,Srv2}; + false -> + {reply, ok, Srv2} + end. + +handle_cast(Msg, _Srv)-> + exit({unknown_msg,Msg}). + + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +handle_info({'DOWN', MonRef, _, Pid, _}, #srv{referrers=Referrers}=Srv) -> + {ok, {MonRef, _RefCount}} = dict:find(Pid, Referrers), + Srv2 = Srv#srv{referrers=dict:erase(Pid, Referrers)}, + case should_close() of + true -> + {stop,normal,Srv2}; + false -> + {noreply,Srv2} + end. + + +should_close() -> + case process_info(self(), monitors) of + {monitors, []} -> true; + _ -> false + end. diff --git a/apps/couch/src/couch_rep.erl b/apps/couch/src/couch_rep.erl new file mode 100644 index 00000000..65573e8c --- /dev/null +++ b/apps/couch/src/couch_rep.erl @@ -0,0 +1,748 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_rep). +-behaviour(gen_server). +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3]). + +-export([replicate/2, checkpoint/1]). + +-include("couch_db.hrl"). + +-record(state, { + changes_feed, + missing_revs, + reader, + writer, + + source, + target, + continuous, + create_target, + init_args, + checkpoint_scheduled = nil, + + start_seq, + history, + source_log, + target_log, + rep_starttime, + src_starttime, + tgt_starttime, + checkpoint_history = nil, + + listeners = [], + complete = false, + committed_seq = 0, + + stats = nil, + doc_ids = nil +}). + +%% convenience function to do a simple replication from the shell +replicate(Source, Target) when is_list(Source) -> + replicate(?l2b(Source), Target); +replicate(Source, Target) when is_binary(Source), is_list(Target) -> + replicate(Source, ?l2b(Target)); +replicate(Source, Target) when is_binary(Source), is_binary(Target) -> + replicate({[{<<"source">>, Source}, {<<"target">>, Target}]}, #user_ctx{}); + +%% function handling POST to _replicate +replicate({Props}=PostBody, UserCtx) -> + {BaseId, Extension} = make_replication_id(PostBody, UserCtx), + Replicator = {BaseId ++ Extension, + {gen_server, start_link, [?MODULE, [BaseId, PostBody, UserCtx], []]}, + temporary, + 1, + worker, + [?MODULE] + }, + + case couch_util:get_value(<<"cancel">>, Props, false) of + true -> + case supervisor:terminate_child(couch_rep_sup, BaseId ++ Extension) of + {error, not_found} -> + {error, not_found}; + ok -> + ok = supervisor:delete_child(couch_rep_sup, BaseId ++ Extension), + {ok, {cancelled, ?l2b(BaseId)}} + end; + false -> + Server = start_replication_server(Replicator), + + case couch_util:get_value(<<"continuous">>, Props, false) of + true -> + {ok, {continuous, ?l2b(BaseId)}}; + false -> + get_result(Server, PostBody, UserCtx) + end + end. + +checkpoint(Server) -> + gen_server:cast(Server, do_checkpoint). + +get_result(Server, PostBody, UserCtx) -> + try gen_server:call(Server, get_result, infinity) of + retry -> replicate(PostBody, UserCtx); + Else -> Else + catch + exit:{noproc, {gen_server, call, [Server, get_result , infinity]}} -> + %% oops, this replication just finished -- restart it. + replicate(PostBody, UserCtx); + exit:{normal, {gen_server, call, [Server, get_result , infinity]}} -> + %% we made the call during terminate + replicate(PostBody, UserCtx) + end. + +init(InitArgs) -> + try do_init(InitArgs) + catch throw:{db_not_found, DbUrl} -> {stop, {db_not_found, DbUrl}} end. + +do_init([RepId, {PostProps}, UserCtx] = InitArgs) -> + process_flag(trap_exit, true), + + SourceProps = couch_util:get_value(<<"source">>, PostProps), + TargetProps = couch_util:get_value(<<"target">>, PostProps), + + DocIds = couch_util:get_value(<<"doc_ids">>, PostProps, nil), + Continuous = couch_util:get_value(<<"continuous">>, PostProps, false), + CreateTarget = couch_util:get_value(<<"create_target">>, PostProps, false), + + ProxyParams = parse_proxy_params( + couch_util:get_value(<<"proxy">>, PostProps, [])), + Source = open_db(SourceProps, UserCtx, ProxyParams), + Target = open_db(TargetProps, UserCtx, ProxyParams, CreateTarget), + + SourceInfo = dbinfo(Source), + TargetInfo = dbinfo(Target), + + case DocIds of + List when is_list(List) -> + % Fast replication using only a list of doc IDs to replicate. + % Replication sessions, checkpoints and logs are not created + % since the update sequence number of the source DB is not used + % for determining which documents are copied into the target DB. + SourceLog = nil, + TargetLog = nil, + + StartSeq = nil, + History = nil, + + ChangesFeed = nil, + MissingRevs = nil, + + {ok, Reader} = + couch_rep_reader:start_link(self(), Source, DocIds, PostProps); + + _ -> + % Replication using the _changes API (DB sequence update numbers). + SourceLog = open_replication_log(Source, RepId), + TargetLog = open_replication_log(Target, RepId), + + {StartSeq, History} = compare_replication_logs(SourceLog, TargetLog), + + {ok, ChangesFeed} = + couch_rep_changes_feed:start_link(self(), Source, StartSeq, PostProps), + {ok, MissingRevs} = + couch_rep_missing_revs:start_link(self(), Target, ChangesFeed, PostProps), + {ok, Reader} = + couch_rep_reader:start_link(self(), Source, MissingRevs, PostProps) + end, + + {ok, Writer} = + couch_rep_writer:start_link(self(), Target, Reader, PostProps), + + Stats = ets:new(replication_stats, [set, private]), + ets:insert(Stats, {total_revs,0}), + ets:insert(Stats, {missing_revs, 0}), + ets:insert(Stats, {docs_read, 0}), + ets:insert(Stats, {docs_written, 0}), + ets:insert(Stats, {doc_write_failures, 0}), + + {ShortId, _} = lists:split(6, RepId), + couch_task_status:add_task("Replication", io_lib:format("~s: ~s -> ~s", + [ShortId, dbname(Source), dbname(Target)]), "Starting"), + + State = #state{ + changes_feed = ChangesFeed, + missing_revs = MissingRevs, + reader = Reader, + writer = Writer, + + source = Source, + target = Target, + continuous = Continuous, + create_target = CreateTarget, + init_args = InitArgs, + stats = Stats, + checkpoint_scheduled = nil, + + start_seq = StartSeq, + history = History, + source_log = SourceLog, + target_log = TargetLog, + rep_starttime = httpd_util:rfc1123_date(), + src_starttime = couch_util:get_value(instance_start_time, SourceInfo), + tgt_starttime = couch_util:get_value(instance_start_time, TargetInfo), + doc_ids = DocIds + }, + {ok, State}. + +handle_call(get_result, From, #state{complete=true, listeners=[]} = State) -> + {stop, normal, State#state{listeners=[From]}}; +handle_call(get_result, From, State) -> + Listeners = State#state.listeners, + {noreply, State#state{listeners=[From|Listeners]}}. + +handle_cast(do_checkpoint, State) -> + {noreply, do_checkpoint(State)}; + +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info({missing_revs_checkpoint, SourceSeq}, State) -> + couch_task_status:update("MR Processed source update #~p", [SourceSeq]), + {noreply, schedule_checkpoint(State#state{committed_seq = SourceSeq})}; + +handle_info({writer_checkpoint, SourceSeq}, #state{committed_seq=N} = State) + when SourceSeq > N -> + MissingRevs = State#state.missing_revs, + ok = gen_server:cast(MissingRevs, {update_committed_seq, SourceSeq}), + couch_task_status:update("W Processed source update #~p", [SourceSeq]), + {noreply, schedule_checkpoint(State#state{committed_seq = SourceSeq})}; +handle_info({writer_checkpoint, _}, State) -> + {noreply, State}; + +handle_info({update_stats, Key, N}, State) -> + ets:update_counter(State#state.stats, Key, N), + {noreply, State}; + +handle_info({'DOWN', _, _, _, _}, State) -> + ?LOG_INFO("replication terminating because local DB is shutting down", []), + timer:cancel(State#state.checkpoint_scheduled), + {stop, shutdown, State}; + +handle_info({'EXIT', Writer, normal}, #state{writer=Writer} = State) -> + case State#state.listeners of + [] -> + {noreply, State#state{complete = true}}; + _Else -> + {stop, normal, State} + end; + +handle_info({'EXIT', _, normal}, State) -> + {noreply, State}; +handle_info({'EXIT', _Pid, {Err, Reason}}, State) when Err == source_error; + Err == target_error -> + ?LOG_INFO("replication terminating due to ~p: ~p", [Err, Reason]), + timer:cancel(State#state.checkpoint_scheduled), + {stop, shutdown, State}; +handle_info({'EXIT', _Pid, Reason}, State) -> + {stop, Reason, State}. + +terminate(normal, #state{checkpoint_scheduled=nil} = State) -> + do_terminate(State); + +terminate(normal, State) -> + timer:cancel(State#state.checkpoint_scheduled), + do_terminate(do_checkpoint(State)); + +terminate(Reason, State) -> + #state{ + listeners = Listeners, + source = Source, + target = Target, + stats = Stats + } = State, + [gen_server:reply(L, {error, Reason}) || L <- Listeners], + ets:delete(Stats), + close_db(Target), + close_db(Source). + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +% internal funs + +start_replication_server(Replicator) -> + RepId = element(1, Replicator), + case supervisor:start_child(couch_rep_sup, Replicator) of + {ok, Pid} -> + ?LOG_INFO("starting new replication ~p at ~p", [RepId, Pid]), + Pid; + {error, already_present} -> + case supervisor:restart_child(couch_rep_sup, RepId) of + {ok, Pid} -> + ?LOG_INFO("starting replication ~p at ~p", [RepId, Pid]), + Pid; + {error, running} -> + %% this error occurs if multiple replicators are racing + %% each other to start and somebody else won. Just grab + %% the Pid by calling start_child again. + {error, {already_started, Pid}} = + supervisor:start_child(couch_rep_sup, Replicator), + ?LOG_DEBUG("replication ~p already running at ~p", [RepId, Pid]), + Pid; + {error, {db_not_found, DbUrl}} -> + throw({db_not_found, <<"could not open ", DbUrl/binary>>}) + end; + {error, {already_started, Pid}} -> + ?LOG_DEBUG("replication ~p already running at ~p", [RepId, Pid]), + Pid; + {error, {{db_not_found, DbUrl}, _}} -> + throw({db_not_found, <<"could not open ", DbUrl/binary>>}) + end. + +compare_replication_logs(SrcDoc, TgtDoc) -> + #doc{body={RepRecProps}} = SrcDoc, + #doc{body={RepRecPropsTgt}} = TgtDoc, + case couch_util:get_value(<<"session_id">>, RepRecProps) == + couch_util:get_value(<<"session_id">>, RepRecPropsTgt) of + true -> + % if the records have the same session id, + % then we have a valid replication history + OldSeqNum = couch_util:get_value(<<"source_last_seq">>, RepRecProps, 0), + OldHistory = couch_util:get_value(<<"history">>, RepRecProps, []), + {OldSeqNum, OldHistory}; + false -> + SourceHistory = couch_util:get_value(<<"history">>, RepRecProps, []), + TargetHistory = couch_util:get_value(<<"history">>, RepRecPropsTgt, []), + ?LOG_INFO("Replication records differ. " + "Scanning histories to find a common ancestor.", []), + ?LOG_DEBUG("Record on source:~p~nRecord on target:~p~n", + [RepRecProps, RepRecPropsTgt]), + compare_rep_history(SourceHistory, TargetHistory) + end. + +compare_rep_history(S, T) when S =:= [] orelse T =:= [] -> + ?LOG_INFO("no common ancestry -- performing full replication", []), + {0, []}; +compare_rep_history([{S}|SourceRest], [{T}|TargetRest]=Target) -> + SourceId = couch_util:get_value(<<"session_id">>, S), + case has_session_id(SourceId, Target) of + true -> + RecordSeqNum = couch_util:get_value(<<"recorded_seq">>, S, 0), + ?LOG_INFO("found a common replication record with source_seq ~p", + [RecordSeqNum]), + {RecordSeqNum, SourceRest}; + false -> + TargetId = couch_util:get_value(<<"session_id">>, T), + case has_session_id(TargetId, SourceRest) of + true -> + RecordSeqNum = couch_util:get_value(<<"recorded_seq">>, T, 0), + ?LOG_INFO("found a common replication record with source_seq ~p", + [RecordSeqNum]), + {RecordSeqNum, TargetRest}; + false -> + compare_rep_history(SourceRest, TargetRest) + end + end. + +close_db(#http_db{}) -> + ok; +close_db(Db) -> + couch_db:close(Db). + +dbname(#http_db{url = Url}) -> + strip_password(Url); +dbname(#db{name = Name}) -> + Name. + +strip_password(Url) -> + re:replace(Url, + "http(s)?://([^:]+):[^@]+@(.*)$", + "http\\1://\\2:*****@\\3", + [{return, list}]). + +dbinfo(#http_db{} = Db) -> + {DbProps} = couch_rep_httpc:request(Db), + [{list_to_existing_atom(?b2l(K)), V} || {K,V} <- DbProps]; +dbinfo(Db) -> + {ok, Info} = couch_db:get_db_info(Db), + Info. + +do_terminate(#state{doc_ids=DocIds} = State) when is_list(DocIds) -> + #state{ + listeners = Listeners, + rep_starttime = ReplicationStartTime, + stats = Stats + } = State, + + RepByDocsJson = {[ + {<<"start_time">>, ?l2b(ReplicationStartTime)}, + {<<"end_time">>, ?l2b(httpd_util:rfc1123_date())}, + {<<"docs_read">>, ets:lookup_element(Stats, docs_read, 2)}, + {<<"docs_written">>, ets:lookup_element(Stats, docs_written, 2)}, + {<<"doc_write_failures">>, + ets:lookup_element(Stats, doc_write_failures, 2)} + ]}, + + terminate_cleanup(State), + [gen_server:reply(L, {ok, RepByDocsJson}) || L <- lists:reverse(Listeners)]; + +do_terminate(State) -> + #state{ + checkpoint_history = CheckpointHistory, + committed_seq = NewSeq, + listeners = Listeners, + source = Source, + continuous = Continuous, + source_log = #doc{body={OldHistory}} + } = State, + + NewRepHistory = case CheckpointHistory of + nil -> + {[{<<"no_changes">>, true} | OldHistory]}; + _Else -> + CheckpointHistory + end, + + %% reply to original requester + OtherListeners = case Continuous of + true -> + []; % continuous replications have no listeners + _ -> + [Original|Rest] = lists:reverse(Listeners), + gen_server:reply(Original, {ok, NewRepHistory}), + Rest + end, + + %% maybe trigger another replication. If this replicator uses a local + %% source Db, changes to that Db since we started will not be included in + %% this pass. + case up_to_date(Source, NewSeq) of + true -> + [gen_server:reply(R, {ok, NewRepHistory}) || R <- OtherListeners]; + false -> + [gen_server:reply(R, retry) || R <- OtherListeners] + end, + terminate_cleanup(State). + +terminate_cleanup(#state{source=Source, target=Target, stats=Stats}) -> + couch_task_status:update("Finishing"), + close_db(Target), + close_db(Source), + ets:delete(Stats). + +has_session_id(_SessionId, []) -> + false; +has_session_id(SessionId, [{Props} | Rest]) -> + case couch_util:get_value(<<"session_id">>, Props, nil) of + SessionId -> + true; + _Else -> + has_session_id(SessionId, Rest) + end. + +maybe_append_options(Options, Props) -> + lists:foldl(fun(Option, Acc) -> + Acc ++ + case couch_util:get_value(Option, Props, false) of + true -> + "+" ++ ?b2l(Option); + false -> + "" + end + end, [], Options). + +make_replication_id({Props}, UserCtx) -> + %% funky algorithm to preserve backwards compatibility + {ok, HostName} = inet:gethostname(), + % Port = mochiweb_socket_server:get(couch_httpd, port), + Src = get_rep_endpoint(UserCtx, couch_util:get_value(<<"source">>, Props)), + Tgt = get_rep_endpoint(UserCtx, couch_util:get_value(<<"target">>, Props)), + Base = [HostName, Src, Tgt] ++ + case couch_util:get_value(<<"filter">>, Props) of + undefined -> + case couch_util:get_value(<<"doc_ids">>, Props) of + undefined -> + []; + DocIds -> + [DocIds] + end; + Filter -> + [Filter, couch_util:get_value(<<"query_params">>, Props, {[]})] + end, + Extension = maybe_append_options( + [<<"continuous">>, <<"create_target">>], Props), + {couch_util:to_hex(couch_util:md5(term_to_binary(Base))), Extension}. + +maybe_add_trailing_slash(Url) -> + re:replace(Url, "[^/]$", "&/", [{return, list}]). + +get_rep_endpoint(_UserCtx, {Props}) -> + Url = maybe_add_trailing_slash(couch_util:get_value(<<"url">>, Props)), + {BinHeaders} = couch_util:get_value(<<"headers">>, Props, {[]}), + {Auth} = couch_util:get_value(<<"auth">>, Props, {[]}), + case couch_util:get_value(<<"oauth">>, Auth) of + undefined -> + {remote, Url, [{?b2l(K),?b2l(V)} || {K,V} <- BinHeaders]}; + {OAuth} -> + {remote, Url, [{?b2l(K),?b2l(V)} || {K,V} <- BinHeaders], OAuth} + end; +get_rep_endpoint(_UserCtx, <<"http://",_/binary>>=Url) -> + {remote, maybe_add_trailing_slash(Url), []}; +get_rep_endpoint(_UserCtx, <<"https://",_/binary>>=Url) -> + {remote, maybe_add_trailing_slash(Url), []}; +get_rep_endpoint(UserCtx, <<DbName/binary>>) -> + {local, DbName, UserCtx}. + +open_replication_log(#http_db{}=Db, RepId) -> + DocId = ?LOCAL_DOC_PREFIX ++ RepId, + Req = Db#http_db{resource=couch_util:url_encode(DocId)}, + case couch_rep_httpc:request(Req) of + {[{<<"error">>, _}, {<<"reason">>, _}]} -> + ?LOG_DEBUG("didn't find a replication log for ~s", [Db#http_db.url]), + #doc{id=?l2b(DocId)}; + Doc -> + ?LOG_DEBUG("found a replication log for ~s", [Db#http_db.url]), + couch_doc:from_json_obj(Doc) + end; +open_replication_log(Db, RepId) -> + DocId = ?l2b(?LOCAL_DOC_PREFIX ++ RepId), + case couch_db:open_doc(Db, DocId, []) of + {ok, Doc} -> + ?LOG_DEBUG("found a replication log for ~s", [Db#db.name]), + Doc; + _ -> + ?LOG_DEBUG("didn't find a replication log for ~s", [Db#db.name]), + #doc{id=DocId} + end. + +open_db(Props, UserCtx, ProxyParams) -> + open_db(Props, UserCtx, ProxyParams, false). + +open_db({Props}, _UserCtx, ProxyParams, CreateTarget) -> + Url = maybe_add_trailing_slash(couch_util:get_value(<<"url">>, Props)), + {AuthProps} = couch_util:get_value(<<"auth">>, Props, {[]}), + {BinHeaders} = couch_util:get_value(<<"headers">>, Props, {[]}), + Headers = [{?b2l(K),?b2l(V)} || {K,V} <- BinHeaders], + DefaultHeaders = (#http_db{})#http_db.headers, + Db1 = #http_db{ + url = Url, + auth = AuthProps, + headers = lists:ukeymerge(1, Headers, DefaultHeaders) + }, + Db = Db1#http_db{options = Db1#http_db.options ++ ProxyParams}, + couch_rep_httpc:db_exists(Db, CreateTarget); +open_db(<<"http://",_/binary>>=Url, _, ProxyParams, CreateTarget) -> + open_db({[{<<"url">>,Url}]}, [], ProxyParams, CreateTarget); +open_db(<<"https://",_/binary>>=Url, _, ProxyParams, CreateTarget) -> + open_db({[{<<"url">>,Url}]}, [], ProxyParams, CreateTarget); +open_db(<<DbName/binary>>, UserCtx, _ProxyParams, CreateTarget) -> + case CreateTarget of + true -> + ok = couch_httpd:verify_is_server_admin(UserCtx), + couch_server:create(DbName, [{user_ctx, UserCtx}]); + false -> ok + end, + + case couch_db:open(DbName, [{user_ctx, UserCtx}]) of + {ok, Db} -> + couch_db:monitor(Db), + Db; + {not_found, no_db_file} -> throw({db_not_found, DbName}) + end. + +schedule_checkpoint(#state{checkpoint_scheduled = nil} = State) -> + Server = self(), + case timer:apply_after(5000, couch_rep, checkpoint, [Server]) of + {ok, TRef} -> + State#state{checkpoint_scheduled = TRef}; + Error -> + ?LOG_ERROR("tried to schedule a checkpoint but got ~p", [Error]), + State + end; +schedule_checkpoint(State) -> + State. + +do_checkpoint(State) -> + #state{ + source = Source, + target = Target, + committed_seq = NewSeqNum, + start_seq = StartSeqNum, + history = OldHistory, + source_log = SourceLog, + target_log = TargetLog, + rep_starttime = ReplicationStartTime, + src_starttime = SrcInstanceStartTime, + tgt_starttime = TgtInstanceStartTime, + stats = Stats + } = State, + case commit_to_both(Source, Target, NewSeqNum) of + {SrcInstanceStartTime, TgtInstanceStartTime} -> + ?LOG_INFO("recording a checkpoint for ~s -> ~s at source update_seq ~p", + [dbname(Source), dbname(Target), NewSeqNum]), + SessionId = couch_uuids:random(), + NewHistoryEntry = {[ + {<<"session_id">>, SessionId}, + {<<"start_time">>, list_to_binary(ReplicationStartTime)}, + {<<"end_time">>, list_to_binary(httpd_util:rfc1123_date())}, + {<<"start_last_seq">>, StartSeqNum}, + {<<"end_last_seq">>, NewSeqNum}, + {<<"recorded_seq">>, NewSeqNum}, + {<<"missing_checked">>, ets:lookup_element(Stats, total_revs, 2)}, + {<<"missing_found">>, ets:lookup_element(Stats, missing_revs, 2)}, + {<<"docs_read">>, ets:lookup_element(Stats, docs_read, 2)}, + {<<"docs_written">>, ets:lookup_element(Stats, docs_written, 2)}, + {<<"doc_write_failures">>, + ets:lookup_element(Stats, doc_write_failures, 2)} + ]}, + % limit history to 50 entries + NewRepHistory = {[ + {<<"session_id">>, SessionId}, + {<<"source_last_seq">>, NewSeqNum}, + {<<"history">>, lists:sublist([NewHistoryEntry | OldHistory], 50)} + ]}, + + try + {SrcRevPos,SrcRevId} = + update_local_doc(Source, SourceLog#doc{body=NewRepHistory}), + {TgtRevPos,TgtRevId} = + update_local_doc(Target, TargetLog#doc{body=NewRepHistory}), + State#state{ + checkpoint_scheduled = nil, + checkpoint_history = NewRepHistory, + source_log = SourceLog#doc{revs={SrcRevPos, [SrcRevId]}}, + target_log = TargetLog#doc{revs={TgtRevPos, [TgtRevId]}} + } + catch throw:conflict -> + ?LOG_ERROR("checkpoint failure: conflict (are you replicating to " + "yourself?)", []), + State + end; + _Else -> + ?LOG_INFO("rebooting ~s -> ~s from last known replication checkpoint", + [dbname(Source), dbname(Target)]), + #state{ + changes_feed = CF, + missing_revs = MR, + reader = Reader, + writer = Writer + } = State, + Pids = [Writer, Reader, MR, CF], + [unlink(Pid) || Pid <- Pids], + [exit(Pid, shutdown) || Pid <- Pids], + close_db(Target), + close_db(Source), + {ok, NewState} = init(State#state.init_args), + NewState#state{listeners=State#state.listeners} + end. + +commit_to_both(Source, Target, RequiredSeq) -> + % commit the src async + ParentPid = self(), + SrcCommitPid = spawn_link(fun() -> + ParentPid ! {self(), ensure_full_commit(Source, RequiredSeq)} end), + + % commit tgt sync + TargetStartTime = ensure_full_commit(Target), + + SourceStartTime = + receive + {SrcCommitPid, Timestamp} -> + Timestamp; + {'EXIT', SrcCommitPid, {http_request_failed, _}} -> + exit(replication_link_failure) + end, + {SourceStartTime, TargetStartTime}. + +ensure_full_commit(#http_db{headers = Headers} = Target) -> + Req = Target#http_db{ + resource = "_ensure_full_commit", + method = post, + headers = couch_util:proplist_apply_field({"Content-Type", "application/json"}, Headers) + }, + {ResultProps} = couch_rep_httpc:request(Req), + true = couch_util:get_value(<<"ok">>, ResultProps), + couch_util:get_value(<<"instance_start_time">>, ResultProps); +ensure_full_commit(Target) -> + {ok, NewDb} = couch_db:open_int(Target#db.name, []), + UpdateSeq = couch_db:get_update_seq(Target), + CommitSeq = couch_db:get_committed_update_seq(NewDb), + InstanceStartTime = NewDb#db.instance_start_time, + couch_db:close(NewDb), + if UpdateSeq > CommitSeq -> + ?LOG_DEBUG("target needs a full commit: update ~p commit ~p", + [UpdateSeq, CommitSeq]), + {ok, DbStartTime} = couch_db:ensure_full_commit(Target), + DbStartTime; + true -> + ?LOG_DEBUG("target doesn't need a full commit", []), + InstanceStartTime + end. + +ensure_full_commit(#http_db{headers = Headers} = Source, RequiredSeq) -> + Req = Source#http_db{ + resource = "_ensure_full_commit", + method = post, + qs = [{seq, RequiredSeq}], + headers = couch_util:proplist_apply_field({"Content-Type", "application/json"}, Headers) + }, + {ResultProps} = couch_rep_httpc:request(Req), + case couch_util:get_value(<<"ok">>, ResultProps) of + true -> + couch_util:get_value(<<"instance_start_time">>, ResultProps); + undefined -> nil end; +ensure_full_commit(Source, RequiredSeq) -> + {ok, NewDb} = couch_db:open_int(Source#db.name, []), + CommitSeq = couch_db:get_committed_update_seq(NewDb), + InstanceStartTime = NewDb#db.instance_start_time, + couch_db:close(NewDb), + if RequiredSeq > CommitSeq -> + ?LOG_DEBUG("source needs a full commit: required ~p committed ~p", + [RequiredSeq, CommitSeq]), + {ok, DbStartTime} = couch_db:ensure_full_commit(Source), + DbStartTime; + true -> + ?LOG_DEBUG("source doesn't need a full commit", []), + InstanceStartTime + end. + +update_local_doc(#http_db{} = Db, #doc{id=DocId} = Doc) -> + Req = Db#http_db{ + resource = couch_util:url_encode(DocId), + method = put, + body = couch_doc:to_json_obj(Doc, [attachments]), + headers = [{"x-couch-full-commit", "false"} | Db#http_db.headers] + }, + {ResponseMembers} = couch_rep_httpc:request(Req), + Rev = couch_util:get_value(<<"rev">>, ResponseMembers), + couch_doc:parse_rev(Rev); +update_local_doc(Db, Doc) -> + {ok, Result} = couch_db:update_doc(Db, Doc, [delay_commit]), + Result. + +up_to_date(#http_db{}, _Seq) -> + true; +up_to_date(Source, Seq) -> + {ok, NewDb} = couch_db:open_int(Source#db.name, []), + T = NewDb#db.update_seq == Seq, + couch_db:close(NewDb), + T. + +parse_proxy_params(ProxyUrl) when is_binary(ProxyUrl) -> + parse_proxy_params(?b2l(ProxyUrl)); +parse_proxy_params([]) -> + []; +parse_proxy_params(ProxyUrl) -> + {url, _, Base, Port, User, Passwd, _Path, _Proto} = + ibrowse_lib:parse_url(ProxyUrl), + [{proxy_host, Base}, {proxy_port, Port}] ++ + case is_list(User) andalso is_list(Passwd) of + false -> + []; + true -> + [{proxy_user, User}, {proxy_password, Passwd}] + end. diff --git a/apps/couch/src/couch_rep_att.erl b/apps/couch/src/couch_rep_att.erl new file mode 100644 index 00000000..28b8945c --- /dev/null +++ b/apps/couch/src/couch_rep_att.erl @@ -0,0 +1,120 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_rep_att). + +-export([convert_stub/2, cleanup/0]). + +-include("couch_db.hrl"). + +convert_stub(#att{data=stub, name=Name} = Attachment, + {#http_db{} = Db, Id, Rev}) -> + {Pos, [RevId|_]} = Rev, + Request = Db#http_db{ + resource = lists:flatten([couch_util:url_encode(Id), "/", + couch_util:url_encode(Name)]), + qs = [{rev, couch_doc:rev_to_str({Pos,RevId})}] + }, + Ref = make_ref(), + RcvFun = fun() -> attachment_receiver(Ref, Request) end, + Attachment#att{data=RcvFun}. + +cleanup() -> + receive + {ibrowse_async_response, _, _} -> + %% TODO maybe log, didn't expect to have data here + cleanup(); + {ibrowse_async_response_end, _} -> + cleanup(); + {ibrowse_async_headers, _, _, _} -> + cleanup() + after 0 -> + erase(), + ok + end. + +% internal funs + +attachment_receiver(Ref, Request) -> + try case get(Ref) of + undefined -> + {ReqId, ContentEncoding} = start_http_request(Request), + put(Ref, {ReqId, ContentEncoding}), + receive_data(Ref, ReqId, ContentEncoding); + {ReqId, ContentEncoding} -> + receive_data(Ref, ReqId, ContentEncoding) + end + catch + throw:{attachment_request_failed, _} -> + case {Request#http_db.retries, Request#http_db.pause} of + {0, _} -> + ?LOG_INFO("request for ~p failed", [Request#http_db.resource]), + throw({attachment_request_failed, max_retries_reached}); + {N, Pause} when N > 0 -> + ?LOG_INFO("request for ~p timed out, retrying in ~p seconds", + [Request#http_db.resource, Pause/1000]), + timer:sleep(Pause), + cleanup(), + attachment_receiver(Ref, Request#http_db{retries = N-1}) + end + end. + +receive_data(Ref, ReqId, ContentEncoding) -> + receive + {ibrowse_async_response, ReqId, {chunk_start,_}} -> + receive_data(Ref, ReqId, ContentEncoding); + {ibrowse_async_response, ReqId, chunk_end} -> + receive_data(Ref, ReqId, ContentEncoding); + {ibrowse_async_response, ReqId, {error, Err}} -> + ?LOG_ERROR("streaming attachment ~p failed with ~p", [ReqId, Err]), + throw({attachment_request_failed, Err}); + {ibrowse_async_response, ReqId, Data} -> + % ?LOG_DEBUG("got ~p bytes for ~p", [size(Data), ReqId]), + Data; + {ibrowse_async_response_end, ReqId} -> + ?LOG_ERROR("streaming att. ended but more data requested ~p", [ReqId]), + throw({attachment_request_failed, premature_end}) + after 31000 -> + throw({attachment_request_failed, timeout}) + end. + +start_http_request(Req) -> + %% set stream_to here because self() has changed + Req2 = Req#http_db{options = [{stream_to,self()} | Req#http_db.options]}, + {ibrowse_req_id, ReqId} = couch_rep_httpc:request(Req2), + receive {ibrowse_async_headers, ReqId, Code, Headers} -> + case validate_headers(Req2, list_to_integer(Code), Headers) of + {ok, ContentEncoding} -> + {ReqId, ContentEncoding}; + {ok, ContentEncoding, NewReqId} -> + {NewReqId, ContentEncoding} + end + after 10000 -> + throw({attachment_request_failed, timeout}) + end. + +validate_headers(_Req, 200, Headers) -> + MochiHeaders = mochiweb_headers:make(Headers), + {ok, mochiweb_headers:get_value("Content-Encoding", MochiHeaders)}; +validate_headers(Req, Code, Headers) when Code > 299, Code < 400 -> + Url = mochiweb_headers:get_value("Location",mochiweb_headers:make(Headers)), + NewReq = couch_rep_httpc:redirected_request(Req, Url), + {ibrowse_req_id, ReqId} = couch_rep_httpc:request(NewReq), + receive {ibrowse_async_headers, ReqId, NewCode, NewHeaders} -> + {ok, Encoding} = validate_headers(NewReq, list_to_integer(NewCode), + NewHeaders) + end, + {ok, Encoding, ReqId}; +validate_headers(Req, Code, _Headers) -> + #http_db{url=Url, resource=Resource} = Req, + ?LOG_ERROR("got ~p for ~s~s", [Code, Url, Resource]), + throw({attachment_request_failed, {bad_code, Code}}). diff --git a/apps/couch/src/couch_rep_changes_feed.erl b/apps/couch/src/couch_rep_changes_feed.erl new file mode 100644 index 00000000..66696912 --- /dev/null +++ b/apps/couch/src/couch_rep_changes_feed.erl @@ -0,0 +1,386 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_rep_changes_feed). +-behaviour(gen_server). +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3]). + +-export([start_link/4, next/1, stop/1]). + +-define(BUFFER_SIZE, 1000). + +-include("couch_db.hrl"). +-include_lib("ibrowse/include/ibrowse.hrl"). + +-record (state, { + changes_from = nil, + changes_loop = nil, + init_args, + last_seq, + conn = nil, + reqid = nil, + complete = false, + count = 0, + partial_chunk = <<>>, + reply_to = nil, + rows = queue:new() +}). + +start_link(Parent, Source, StartSeq, PostProps) -> + gen_server:start_link(?MODULE, [Parent, Source, StartSeq, PostProps], []). + +next(Server) -> + gen_server:call(Server, next_changes, infinity). + +stop(Server) -> + gen_server:call(Server, stop). + +init([_Parent, #http_db{}=Source, Since, PostProps] = Args) -> + process_flag(trap_exit, true), + Feed = case couch_util:get_value(<<"continuous">>, PostProps, false) of + false -> + normal; + true -> + continuous + end, + BaseQS = [ + {"style", all_docs}, + {"heartbeat", 10000}, + {"since", Since}, + {"feed", Feed} + ], + QS = case couch_util:get_value(<<"filter">>, PostProps) of + undefined -> + BaseQS; + FilterName -> + {Params} = couch_util:get_value(<<"query_params">>, PostProps, {[]}), + lists:foldr( + fun({K, V}, QSAcc) -> + Ks = couch_util:to_list(K), + case proplists:is_defined(Ks, QSAcc) of + true -> + QSAcc; + false -> + [{Ks, V} | QSAcc] + end + end, + [{"filter", FilterName} | BaseQS], + Params + ) + end, + Pid = couch_rep_httpc:spawn_link_worker_process(Source), + Req = Source#http_db{ + resource = "_changes", + qs = QS, + conn = Pid, + options = [{stream_to, {self(), once}}, {response_format, binary}], + headers = Source#http_db.headers -- [{"Accept-Encoding", "gzip"}] + }, + {ibrowse_req_id, ReqId} = couch_rep_httpc:request(Req), + + receive + {ibrowse_async_headers, ReqId, "200", _} -> + ibrowse:stream_next(ReqId), + {ok, #state{conn=Pid, last_seq=Since, reqid=ReqId, init_args=Args}}; + {ibrowse_async_headers, ReqId, Code, Hdrs} when Code=="301"; Code=="302" -> + catch ibrowse:stop_worker_process(Pid), + Url2 = mochiweb_headers:get_value("Location", mochiweb_headers:make(Hdrs)), + %% TODO use couch_httpc:request instead of start_http_request + {Pid2, ReqId2} = start_http_request(Url2), + receive {ibrowse_async_headers, ReqId2, "200", _} -> + {ok, #state{conn=Pid2, last_seq=Since, reqid=ReqId2, init_args=Args}} + after 30000 -> + {stop, changes_timeout} + end; + {ibrowse_async_headers, ReqId, "404", _} -> + catch ibrowse:stop_worker_process(Pid), + ?LOG_INFO("source doesn't have _changes, trying _all_docs_by_seq", []), + Self = self(), + BySeqPid = spawn_link(fun() -> by_seq_loop(Self, Source, Since) end), + {ok, #state{last_seq=Since, changes_loop=BySeqPid, init_args=Args}}; + {ibrowse_async_headers, ReqId, Code, _} -> + {stop, {changes_error_code, list_to_integer(Code)}} + after 10000 -> + {stop, changes_timeout} + end; + +init([_Parent, Source, Since, PostProps] = InitArgs) -> + process_flag(trap_exit, true), + Server = self(), + ChangesArgs = #changes_args{ + style = all_docs, + since = Since, + filter = ?b2l(couch_util:get_value(<<"filter">>, PostProps, <<>>)), + feed = case couch_util:get_value(<<"continuous">>, PostProps, false) of + true -> + "continuous"; + false -> + "normal" + end, + timeout = infinity + }, + ChangesPid = spawn_link(fun() -> + ChangesFeedFun = couch_changes:handle_changes( + ChangesArgs, + {json_req, filter_json_req(Source, PostProps)}, + Source + ), + ChangesFeedFun(fun({change, Change, _}, _) -> + gen_server:call(Server, {add_change, Change}, infinity); + (_, _) -> + ok + end) + end), + {ok, #state{changes_loop=ChangesPid, init_args=InitArgs}}. + +filter_json_req(Db, PostProps) -> + case couch_util:get_value(<<"filter">>, PostProps) of + undefined -> + {[]}; + FilterName -> + {Query} = couch_util:get_value(<<"query_params">>, PostProps, {[]}), + {ok, Info} = couch_db:get_db_info(Db), + % simulate a request to db_name/_changes + {[ + {<<"info">>, {Info}}, + {<<"id">>, null}, + {<<"method">>, 'GET'}, + {<<"path">>, [couch_db:name(Db), <<"_changes">>]}, + {<<"query">>, {[{<<"filter">>, FilterName} | Query]}}, + {<<"headers">>, []}, + {<<"body">>, []}, + {<<"peer">>, <<"replicator">>}, + {<<"form">>, []}, + {<<"cookie">>, []}, + {<<"userCtx">>, couch_util:json_user_ctx(Db)} + ]} + end. + +handle_call({add_change, Row}, From, State) -> + handle_add_change(Row, From, State); + +handle_call(next_changes, From, State) -> + handle_next_changes(From, State); + +handle_call(stop, _From, State) -> + {stop, normal, ok, State}. + +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info({ibrowse_async_headers, Id, Code, Hdrs}, #state{reqid=Id}=State) -> + handle_headers(list_to_integer(Code), Hdrs, State); + +handle_info({ibrowse_async_response, Id, {error,connection_closed}}, + #state{reqid=Id}=State) -> + handle_retry(State); + +handle_info({ibrowse_async_response, Id, {error,E}}, #state{reqid=Id}=State) -> + {stop, {error, E}, State}; + +handle_info({ibrowse_async_response, Id, Chunk}, #state{reqid=Id}=State) -> + Messages = [M || M <- re:split(Chunk, ",?\n", [trim]), M =/= <<>>], + handle_messages(Messages, State); + +handle_info({ibrowse_async_response_end, Id}, #state{reqid=Id} = State) -> + handle_feed_completion(State); + +handle_info({'EXIT', From, normal}, #state{changes_loop=From} = State) -> + handle_feed_completion(State); + +handle_info({'EXIT', From, Reason}, #state{changes_loop=From} = State) -> + ?LOG_ERROR("changes_loop died with reason ~p", [Reason]), + {stop, changes_loop_died, State}; + +handle_info({'EXIT', _From, normal}, State) -> + {noreply, State}; + +handle_info(Msg, State) -> + ?LOG_DEBUG("unexpected message at changes_feed ~p", [Msg]), + {noreply, State}. + +terminate(_Reason, State) -> + #state{ + changes_loop = ChangesPid, + conn = Conn + } = State, + if is_pid(ChangesPid) -> exit(ChangesPid, stop); true -> ok end, + if is_pid(Conn) -> catch ibrowse:stop_worker_process(Conn); true -> ok end, + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%internal funs + +handle_add_change(Row, From, #state{reply_to=nil} = State) -> + #state{ + count = Count, + rows = Rows + } = State, + NewState = State#state{count=Count+1, rows=queue:in(Row,Rows)}, + if Count < ?BUFFER_SIZE -> + {reply, ok, NewState}; + true -> + {noreply, NewState#state{changes_from=From}} + end; +handle_add_change(Row, _From, #state{count=0} = State) -> + gen_server:reply(State#state.reply_to, [Row]), + {reply, ok, State#state{reply_to=nil}}. + +handle_next_changes(From, #state{count=0}=State) -> + if State#state.complete -> + {stop, normal, complete, State}; + true -> + {noreply, State#state{reply_to=From}} + end; +handle_next_changes(_From, State) -> + #state{ + changes_from = ChangesFrom, + rows = Rows + } = State, + NewState = State#state{count=0, changes_from=nil, rows=queue:new()}, + maybe_stream_next(NewState), + if ChangesFrom =/= nil -> gen_server:reply(ChangesFrom, ok); true -> ok end, + {reply, queue:to_list(Rows), NewState}. + +handle_headers(200, _, State) -> + maybe_stream_next(State), + {noreply, State}; +handle_headers(301, Hdrs, State) -> + catch ibrowse:stop_worker_process(State#state.conn), + Url = mochiweb_headers:get_value("Location", mochiweb_headers:make(Hdrs)), + %% TODO use couch_httpc:request instead of start_http_request + {Pid, ReqId} = start_http_request(Url), + {noreply, State#state{conn=Pid, reqid=ReqId}}; +handle_headers(Code, Hdrs, State) -> + ?LOG_ERROR("replicator changes feed failed with code ~s and Headers ~n~p", + [Code,Hdrs]), + {stop, {error, Code}, State}. + +handle_messages([], State) -> + maybe_stream_next(State), + {noreply, State}; +handle_messages([<<"{\"results\":[">>|Rest], State) -> + handle_messages(Rest, State); +handle_messages([<<"]">>, <<"\"last_seq\":", _/binary>>], State) -> + handle_feed_completion(State); +handle_messages([<<"{\"last_seq\":", _/binary>>], State) -> + handle_feed_completion(State); +handle_messages([Chunk|Rest], State) -> + #state{ + count = Count, + partial_chunk = Partial, + rows = Rows + } = State, + NewState = try + Row = {Props} = decode_row(<<Partial/binary, Chunk/binary>>), + case State of + #state{reply_to=nil} -> + State#state{ + count = Count+1, + last_seq = couch_util:get_value(<<"seq">>, Props), + partial_chunk = <<>>, + rows=queue:in(Row,Rows) + }; + #state{count=0, reply_to=From}-> + gen_server:reply(From, [Row]), + State#state{reply_to = nil, partial_chunk = <<>>} + end + catch + throw:{invalid_json, Bad} -> + State#state{partial_chunk = Bad} + end, + handle_messages(Rest, NewState). + +handle_feed_completion(#state{reply_to=nil} = State)-> + {noreply, State#state{complete=true}}; +handle_feed_completion(#state{count=0} = State) -> + gen_server:reply(State#state.reply_to, complete), + {stop, normal, State}. + +handle_retry(State) -> + ?LOG_DEBUG("retrying changes feed because our connection closed", []), + #state{ + count = Count, + init_args = [_, Source, _, PostProps], + last_seq = Since, + reply_to = ReplyTo, + rows = Rows + } = State, + case init([nil, Source, Since, PostProps]) of + {ok, State1} -> + MergedState = State1#state{ + count = Count, + reply_to = ReplyTo, + rows = Rows + }, + {noreply, MergedState}; + _ -> + {stop, {error, connection_closed}, State} + end. + +by_seq_loop(Server, Source, StartSeq) -> + Req = Source#http_db{ + resource = "_all_docs_by_seq", + qs = [{limit, 1000}, {startkey, StartSeq}] + }, + {Results} = couch_rep_httpc:request(Req), + Rows = couch_util:get_value(<<"rows">>, Results), + if Rows =:= [] -> exit(normal); true -> ok end, + EndSeq = lists:foldl(fun({RowInfoList}, _) -> + Id = couch_util:get_value(<<"id">>, RowInfoList), + Seq = couch_util:get_value(<<"key">>, RowInfoList), + {RowProps} = couch_util:get_value(<<"value">>, RowInfoList), + RawRevs = [ + couch_util:get_value(<<"rev">>, RowProps), + couch_util:get_value(<<"conflicts">>, RowProps, []), + couch_util:get_value(<<"deleted_conflicts">>, RowProps, []) + ], + ParsedRevs = couch_doc:parse_revs(lists:flatten(RawRevs)), + Change = {[ + {<<"seq">>, Seq}, + {<<"id">>, Id}, + {<<"changes">>, [{[{<<"rev">>,R}]} || R <- ParsedRevs]} + ]}, + gen_server:call(Server, {add_change, Change}, infinity), + Seq + end, 0, Rows), + by_seq_loop(Server, Source, EndSeq). + +decode_row(<<",", Rest/binary>>) -> + decode_row(Rest); +decode_row(Row) -> + ?JSON_DECODE(Row). + +maybe_stream_next(#state{reqid=nil}) -> + ok; +maybe_stream_next(#state{complete=false, count=N} = S) when N < ?BUFFER_SIZE -> + timer:cancel(get(timeout)), + {ok, Timeout} = timer:exit_after(31000, changes_timeout), + put(timeout, Timeout), + ibrowse:stream_next(S#state.reqid); +maybe_stream_next(_) -> + timer:cancel(get(timeout)). + +start_http_request(RawUrl) -> + Url = ibrowse_lib:parse_url(RawUrl), + {ok, Pid} = ibrowse:spawn_link_worker_process(Url#url.host, Url#url.port), + Opts = [ + {stream_to, {self(), once}}, + {inactivity_timeout, 31000}, + {response_format, binary} + ], + {ibrowse_req_id, Id} = + ibrowse:send_req_direct(Pid, RawUrl, [], get, [], Opts, infinity), + {Pid, Id}. diff --git a/apps/couch/src/couch_rep_httpc.erl b/apps/couch/src/couch_rep_httpc.erl new file mode 100644 index 00000000..aaa38106 --- /dev/null +++ b/apps/couch/src/couch_rep_httpc.erl @@ -0,0 +1,245 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_rep_httpc). +-include("couch_db.hrl"). +-include_lib("ibrowse/include/ibrowse.hrl"). + +-export([db_exists/1, db_exists/2, full_url/1, request/1, redirected_request/2, + spawn_worker_process/1, spawn_link_worker_process/1]). + +request(#http_db{} = Req) -> + do_request(Req). + +do_request(#http_db{url=Url} = Req) when is_binary(Url) -> + do_request(Req#http_db{url = ?b2l(Url)}); + +do_request(Req) -> + #http_db{ + auth = Auth, + body = B, + conn = Conn, + headers = Headers0, + method = Method, + options = Opts, + qs = QS + } = Req, + Url = full_url(Req), + Headers = case couch_util:get_value(<<"oauth">>, Auth) of + undefined -> + Headers0; + {OAuthProps} -> + [oauth_header(Url, QS, Method, OAuthProps) | Headers0] + end, + Body = case B of + {Fun, InitialState} when is_function(Fun) -> + {Fun, InitialState}; + nil -> + []; + _Else -> + iolist_to_binary(?JSON_ENCODE(B)) + end, + Resp = case Conn of + nil -> + ibrowse:send_req(Url, Headers, Method, Body, Opts, infinity); + _ -> + ibrowse:send_req_direct(Conn, Url, Headers, Method, Body, Opts, infinity) + end, + process_response(Resp, Req). + +db_exists(Req) -> + db_exists(Req, Req#http_db.url). + +db_exists(Req, true) -> + db_exists(Req, Req#http_db.url, true); + +db_exists(Req, false) -> + db_exists(Req, Req#http_db.url, false); + +db_exists(Req, CanonicalUrl) -> + db_exists(Req, CanonicalUrl, false). + +db_exists(Req, CanonicalUrl, CreateDB) -> + #http_db{ + auth = Auth, + headers = Headers0, + url = Url + } = Req, + HeadersFun = fun(Method) -> + case couch_util:get_value(<<"oauth">>, Auth) of + undefined -> + Headers0; + {OAuthProps} -> + [oauth_header(Url, [], Method, OAuthProps) | Headers0] + end + end, + case CreateDB of + true -> + catch ibrowse:send_req(Url, HeadersFun(put), put); + _Else -> ok + end, + case catch ibrowse:send_req(Url, HeadersFun(head), head) of + {ok, "200", _, _} -> + Req#http_db{url = CanonicalUrl}; + {ok, "301", RespHeaders, _} -> + RedirectUrl = redirect_url(RespHeaders, Req#http_db.url), + db_exists(Req#http_db{url = RedirectUrl}, RedirectUrl); + {ok, "302", RespHeaders, _} -> + RedirectUrl = redirect_url(RespHeaders, Req#http_db.url), + db_exists(Req#http_db{url = RedirectUrl}, CanonicalUrl); + Error -> + ?LOG_DEBUG("DB at ~s could not be found because ~p", [Url, Error]), + throw({db_not_found, ?l2b(Url)}) + end. + +redirect_url(RespHeaders, OrigUrl) -> + MochiHeaders = mochiweb_headers:make(RespHeaders), + RedUrl = mochiweb_headers:get_value("Location", MochiHeaders), + {url, _, Base, Port, _, _, Path, Proto} = ibrowse_lib:parse_url(RedUrl), + {url, _, _, _, User, Passwd, _, _} = ibrowse_lib:parse_url(OrigUrl), + Creds = case is_list(User) andalso is_list(Passwd) of + true -> + User ++ ":" ++ Passwd ++ "@"; + false -> + [] + end, + atom_to_list(Proto) ++ "://" ++ Creds ++ Base ++ ":" ++ + integer_to_list(Port) ++ Path. + +full_url(#http_db{url=Url} = Req) when is_binary(Url) -> + full_url(Req#http_db{url = ?b2l(Url)}); + +full_url(#http_db{qs=[]} = Req) -> + Req#http_db.url ++ Req#http_db.resource; + +full_url(Req) -> + #http_db{ + url = Url, + resource = Resource, + qs = QS + } = Req, + QStr = lists:map(fun({K,V}) -> io_lib:format("~s=~s", + [couch_util:to_list(K), couch_util:to_list(V)]) end, QS), + lists:flatten([Url, Resource, "?", string:join(QStr, "&")]). + +process_response({ok, Status, Headers, Body}, Req) -> + Code = list_to_integer(Status), + if Code =:= 200; Code =:= 201 -> + ?JSON_DECODE(maybe_decompress(Headers, Body)); + Code =:= 301; Code =:= 302 -> + RedirectUrl = redirect_url(Headers, Req#http_db.url), + do_request(redirected_request(Req, RedirectUrl)); + Code =:= 409 -> + throw(conflict); + Code >= 400, Code < 500 -> + ?JSON_DECODE(maybe_decompress(Headers, Body)); + Code =:= 500; Code =:= 502; Code =:= 503 -> + #http_db{pause = Pause, retries = Retries} = Req, + ?LOG_INFO("retrying couch_rep_httpc request in ~p seconds " ++ + % "due to remote server error: ~s~s", [Pause/1000, Req#http_db.url, + "due to remote server error: ~p Body ~s", [Pause/1000, Code, + Body]), + timer:sleep(Pause), + do_request(Req#http_db{retries = Retries-1, pause = 2*Pause}); + true -> + exit({http_request_failed, ?l2b(["unhandled response code ", Status])}) + end; + +process_response({ibrowse_req_id, Id}, _Req) -> + {ibrowse_req_id, Id}; + +process_response({error, _Reason}, #http_db{url=Url, retries=0}) -> + ?LOG_ERROR("couch_rep_httpc request failed after 10 retries: ~s", [Url]), + exit({http_request_failed, ?l2b(["failed to replicate ", Url])}); +process_response({error, Reason}, Req) -> + #http_db{ + method = Method, + retries = Retries, + pause = Pause + } = Req, + ShortReason = case Reason of + connection_closed -> + connection_closed; + {'EXIT', {noproc, _}} -> + noproc; + {'EXIT', {normal, _}} -> + normal; + Else -> + Else + end, + ?LOG_DEBUG("retrying couch_rep_httpc ~p request in ~p seconds due to " ++ + "{error, ~p}", [Method, Pause/1000, ShortReason]), + timer:sleep(Pause), + if Reason == worker_is_dead -> + C = spawn_link_worker_process(Req), + do_request(Req#http_db{retries = Retries-1, pause = 2*Pause, conn=C}); + true -> + do_request(Req#http_db{retries = Retries-1, pause = 2*Pause}) + end. + +redirected_request(Req, RedirectUrl) -> + {Base, QStr, _} = mochiweb_util:urlsplit_path(RedirectUrl), + QS = mochiweb_util:parse_qs(QStr), + Hdrs = case couch_util:get_value(<<"oauth">>, Req#http_db.auth) of + undefined -> + Req#http_db.headers; + _Else -> + lists:keydelete("Authorization", 1, Req#http_db.headers) + end, + Req#http_db{url=Base, resource="", qs=QS, headers=Hdrs}. + +spawn_worker_process(Req) -> + Url = ibrowse_lib:parse_url(Req#http_db.url), + {ok, Pid} = ibrowse_http_client:start(Url), + Pid. + +spawn_link_worker_process(Req) -> + Url = ibrowse_lib:parse_url(Req#http_db.url), + {ok, Pid} = ibrowse_http_client:start_link(Url), + Pid. + +maybe_decompress(Headers, Body) -> + MochiHeaders = mochiweb_headers:make(Headers), + case mochiweb_headers:get_value("Content-Encoding", MochiHeaders) of + "gzip" -> + zlib:gunzip(Body); + _ -> + Body + end. + +oauth_header(Url, QS, Action, Props) -> + % erlang-oauth doesn't like iolists + QSL = [{couch_util:to_list(K), ?b2l(?l2b(couch_util:to_list(V)))} || + {K,V} <- QS], + ConsumerKey = ?b2l(couch_util:get_value(<<"consumer_key">>, Props)), + Token = ?b2l(couch_util:get_value(<<"token">>, Props)), + TokenSecret = ?b2l(couch_util:get_value(<<"token_secret">>, Props)), + ConsumerSecret = ?b2l(couch_util:get_value(<<"consumer_secret">>, Props)), + SignatureMethodStr = ?b2l(couch_util:get_value(<<"signature_method">>, Props, <<"HMAC-SHA1">>)), + SignatureMethodAtom = case SignatureMethodStr of + "PLAINTEXT" -> + plaintext; + "HMAC-SHA1" -> + hmac_sha1; + "RSA-SHA1" -> + rsa_sha1 + end, + Consumer = {ConsumerKey, ConsumerSecret, SignatureMethodAtom}, + Method = case Action of + get -> "GET"; + post -> "POST"; + put -> "PUT"; + head -> "HEAD" + end, + Params = oauth:signed_params(Method, Url, QSL, Consumer, Token, TokenSecret) + -- QSL, + {"Authorization", "OAuth " ++ oauth_uri:params_to_header_string(Params)}. diff --git a/apps/couch/src/couch_rep_missing_revs.erl b/apps/couch/src/couch_rep_missing_revs.erl new file mode 100644 index 00000000..1eff6774 --- /dev/null +++ b/apps/couch/src/couch_rep_missing_revs.erl @@ -0,0 +1,198 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_rep_missing_revs). +-behaviour(gen_server). +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3]). + +-export([start_link/4, next/1, stop/1]). + +-define(BUFFER_SIZE, 1000). + +-include("couch_db.hrl"). + +-record (state, { + changes_loop, + changes_from = nil, + target, + parent, + complete = false, + count = 0, + reply_to = nil, + rows = queue:new(), + high_source_seq = 0, + high_missing_seq = 0, + high_committed_seq = 0 +}). + +start_link(Parent, Target, ChangesFeed, PostProps) -> + gen_server:start_link(?MODULE, [Parent, Target, ChangesFeed, PostProps], []). + +next(Server) -> + gen_server:call(Server, next_missing_revs, infinity). + +stop(Server) -> + gen_server:call(Server, stop). + +init([Parent, Target, ChangesFeed, _PostProps]) -> + process_flag(trap_exit, true), + Self = self(), + Pid = spawn_link(fun() -> changes_loop(Self, ChangesFeed, Target) end), + {ok, #state{changes_loop=Pid, target=Target, parent=Parent}}. + +handle_call({add_missing_revs, {HighSeq, Revs}}, From, State) -> + State#state.parent ! {update_stats, missing_revs, length(Revs)}, + handle_add_missing_revs(HighSeq, Revs, From, State); + +handle_call(next_missing_revs, From, State) -> + handle_next_missing_revs(From, State). + +handle_cast({update_committed_seq, N}, State) -> + if State#state.high_committed_seq < N -> + ?LOG_DEBUG("missing_revs updating committed seq to ~p", [N]); + true -> ok end, + {noreply, State#state{high_committed_seq=N}}. + +handle_info({'EXIT', Pid, Reason}, #state{changes_loop=Pid} = State) -> + handle_changes_loop_exit(Reason, State); + +handle_info(Msg, State) -> + ?LOG_INFO("unexpected message ~p", [Msg]), + {noreply, State}. + +terminate(_Reason, #state{changes_loop=Pid}) when is_pid(Pid) -> + exit(Pid, shutdown), + ok; +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%internal funs + +handle_add_missing_revs(HighSeq, [], _From, State) -> + NewState = State#state{high_source_seq=HighSeq}, + maybe_checkpoint(NewState), + {reply, ok, NewState}; +handle_add_missing_revs(HighSeq, Revs, From, #state{reply_to=nil} = State) -> + #state{rows=Rows, count=Count} = State, + NewState = State#state{ + rows = queue:join(Rows, queue:from_list(Revs)), + count = Count + length(Revs), + high_source_seq = HighSeq, + high_missing_seq = HighSeq + }, + if NewState#state.count < ?BUFFER_SIZE -> + {reply, ok, NewState}; + true -> + {noreply, NewState#state{changes_from=From}} + end; +handle_add_missing_revs(HighSeq, Revs, _From, #state{count=0} = State) -> + gen_server:reply(State#state.reply_to, {HighSeq, Revs}), + NewState = State#state{ + high_source_seq = HighSeq, + high_missing_seq = HighSeq, + reply_to = nil + }, + {reply, ok, NewState}. + +handle_next_missing_revs(From, #state{count=0} = State) -> + if State#state.complete -> + {stop, normal, complete, State}; + true -> + {noreply, State#state{reply_to=From}} + end; +handle_next_missing_revs(_From, State) -> + #state{ + changes_from = ChangesFrom, + high_missing_seq = HighSeq, + rows = Rows + } = State, + if ChangesFrom =/= nil -> gen_server:reply(ChangesFrom, ok); true -> ok end, + NewState = State#state{count=0, changes_from=nil, rows=queue:new()}, + {reply, {HighSeq, queue:to_list(Rows)}, NewState}. + +handle_changes_loop_exit(normal, State) -> + if State#state.reply_to =/= nil -> + gen_server:reply(State#state.reply_to, complete), + {stop, normal, State}; + true -> + {noreply, State#state{complete=true, changes_loop=nil}} + end; +handle_changes_loop_exit(Reason, State) -> + {stop, Reason, State#state{changes_loop=nil}}. + +changes_loop(OurServer, SourceChangesServer, Target) -> + case couch_rep_changes_feed:next(SourceChangesServer) of + complete -> + exit(normal); + Changes -> + MissingRevs = get_missing_revs(Target, Changes), + gen_server:call(OurServer, {add_missing_revs, MissingRevs}, infinity) + end, + changes_loop(OurServer, SourceChangesServer, Target). + +get_missing_revs(#http_db{}=Target, Changes) -> + Transform = fun({Props}) -> + C = couch_util:get_value(<<"changes">>, Props), + Id = couch_util:get_value(<<"id">>, Props), + {Id, [R || {[{<<"rev">>, R}]} <- C]} + end, + IdRevsList = [Transform(Change) || Change <- Changes], + SeqDict = changes_dictionary(Changes), + {LastProps} = lists:last(Changes), + HighSeq = couch_util:get_value(<<"seq">>, LastProps), + Request = Target#http_db{ + resource = "_missing_revs", + method = post, + body = {IdRevsList} + }, + {Resp} = couch_rep_httpc:request(Request), + case couch_util:get_value(<<"missing_revs">>, Resp) of + {MissingRevs} -> + X = [{Id, dict:fetch(Id, SeqDict), couch_doc:parse_revs(RevStrs)} || + {Id,RevStrs} <- MissingRevs], + {HighSeq, X}; + _ -> + exit({target_error, couch_util:get_value(<<"error">>, Resp)}) + end; + +get_missing_revs(Target, Changes) -> + Transform = fun({Props}) -> + C = couch_util:get_value(<<"changes">>, Props), + Id = couch_util:get_value(<<"id">>, Props), + {Id, [couch_doc:parse_rev(R) || {[{<<"rev">>, R}]} <- C]} + end, + IdRevsList = [Transform(Change) || Change <- Changes], + SeqDict = changes_dictionary(Changes), + {LastProps} = lists:last(Changes), + HighSeq = couch_util:get_value(<<"seq">>, LastProps), + {ok, Results} = couch_db:get_missing_revs(Target, IdRevsList), + {HighSeq, [{Id, dict:fetch(Id, SeqDict), Revs} || {Id, Revs, _} <- Results]}. + +changes_dictionary(ChangeList) -> + KVs = [{couch_util:get_value(<<"id">>,C), couch_util:get_value(<<"seq">>,C)} + || {C} <- ChangeList], + dict:from_list(KVs). + +%% save a checkpoint if no revs are missing on target so we don't +%% rescan metadata unnecessarily +maybe_checkpoint(#state{high_missing_seq=N, high_committed_seq=N} = State) -> + #state{ + parent = Parent, + high_source_seq = SourceSeq + } = State, + Parent ! {missing_revs_checkpoint, SourceSeq}; +maybe_checkpoint(_State) -> + ok. diff --git a/apps/couch/src/couch_rep_reader.erl b/apps/couch/src/couch_rep_reader.erl new file mode 100644 index 00000000..8722f3f5 --- /dev/null +++ b/apps/couch/src/couch_rep_reader.erl @@ -0,0 +1,340 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_rep_reader). +-behaviour(gen_server). +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3]). + +-export([start_link/4, next/1]). + +-import(couch_util, [url_encode/1]). + +-define (BUFFER_SIZE, 1000). +-define (MAX_CONCURRENT_REQUESTS, 100). +-define (MAX_CONNECTIONS, 20). +-define (MAX_PIPELINE_SIZE, 50). + +-include("couch_db.hrl"). +-include_lib("ibrowse/include/ibrowse.hrl"). + +-record (state, { + parent, + source, + missing_revs, + reader_loop, + reader_from = [], + count = 0, + docs = queue:new(), + reply_to = nil, + complete = false, + monitor_count = 0, + pending_doc_request = nil, + requested_seqs = [], + opened_seqs = [] +}). + +start_link(Parent, Source, MissingRevs_or_DocIds, PostProps) -> + gen_server:start_link( + ?MODULE, [Parent, Source, MissingRevs_or_DocIds, PostProps], [] + ). + +next(Pid) -> + gen_server:call(Pid, next_docs, infinity). + +init([Parent, Source, MissingRevs_or_DocIds, _PostProps]) -> + process_flag(trap_exit, true), + if is_record(Source, http_db) -> + #url{host=Host, port=Port} = ibrowse_lib:parse_url(Source#http_db.url), + ibrowse:set_max_sessions(Host, Port, ?MAX_CONNECTIONS), + ibrowse:set_max_pipeline_size(Host, Port, ?MAX_PIPELINE_SIZE); + true -> ok end, + Self = self(), + ReaderLoop = spawn_link( + fun() -> reader_loop(Self, Source, MissingRevs_or_DocIds) end + ), + MissingRevs = case MissingRevs_or_DocIds of + Pid when is_pid(Pid) -> + Pid; + _ListDocIds -> + nil + end, + State = #state{ + parent = Parent, + source = Source, + missing_revs = MissingRevs, + reader_loop = ReaderLoop + }, + {ok, State}. + +handle_call({add_docs, Seq, Docs}, From, State) -> + State#state.parent ! {update_stats, docs_read, length(Docs)}, + handle_add_docs(Seq, lists:flatten(Docs), From, State); + +handle_call({add_request_seqs, Seqs}, _From, State) -> + SeqList = State#state.requested_seqs, + {reply, ok, State#state{requested_seqs = lists:merge(Seqs, SeqList)}}; + +handle_call(next_docs, From, State) -> + handle_next_docs(From, State); + +handle_call({open_remote_doc, Id, Seq, Revs}, From, State) -> + handle_open_remote_doc(Id, Seq, Revs, From, State). + +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info({'DOWN', _, _, _, Reason}, State) -> + handle_monitor_down(Reason, State); + +handle_info({'EXIT', Loop, complete}, #state{reader_loop=Loop} = State) -> + handle_reader_loop_complete(State). + +terminate(_Reason, _State) -> + % ?LOG_INFO("rep reader terminating with reason ~p", [_Reason]), + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%internal funs + +handle_add_docs(_Seq, [], _From, State) -> + {reply, ok, State}; +handle_add_docs(Seq, DocsToAdd, From, #state{reply_to=nil} = State) -> + State1 = update_sequence_lists(Seq, State), + NewState = State1#state{ + docs = queue:join(State1#state.docs, queue:from_list(DocsToAdd)), + count = State1#state.count + length(DocsToAdd) + }, + if NewState#state.count < ?BUFFER_SIZE -> + {reply, ok, NewState}; + true -> + {noreply, NewState#state{reader_from=[From|State#state.reader_from]}} + end; +handle_add_docs(Seq, DocsToAdd, _From, #state{count=0} = State) -> + NewState = update_sequence_lists(Seq, State), + HighSeq = calculate_new_high_seq(NewState), + gen_server:reply(State#state.reply_to, {HighSeq, DocsToAdd}), + {reply, ok, NewState#state{reply_to=nil}}. + +handle_next_docs(From, #state{count=0} = State) -> + if State#state.complete -> + {stop, normal, {complete, calculate_new_high_seq(State)}, State}; + true -> + {noreply, State#state{reply_to=From}} + end; +handle_next_docs(_From, State) -> + #state{ + reader_from = ReaderFrom, + docs = Docs + } = State, + [gen_server:reply(F, ok) || F <- ReaderFrom], + NewState = State#state{count=0, reader_from=[], docs=queue:new()}, + {reply, {calculate_new_high_seq(State), queue:to_list(Docs)}, NewState}. + +handle_open_remote_doc(Id, Seq, Revs, From, #state{monitor_count=N} = State) + when N > ?MAX_CONCURRENT_REQUESTS -> + {noreply, State#state{pending_doc_request={From,Id,Seq,Revs}}}; +handle_open_remote_doc(Id, Seq, Revs, _, #state{source=#http_db{}} = State) -> + #state{ + monitor_count = Count, + source = Source + } = State, + {_, _Ref} = spawn_document_request(Source, Id, Seq, Revs), + {reply, ok, State#state{monitor_count = Count+1}}. + +handle_monitor_down(normal, #state{pending_doc_request=nil, reply_to=nil, + monitor_count=1, complete=waiting_on_monitors} = State) -> + {noreply, State#state{complete=true, monitor_count=0}}; +handle_monitor_down(normal, #state{pending_doc_request=nil, reply_to=From, + monitor_count=1, complete=waiting_on_monitors} = State) -> + gen_server:reply(From, {complete, calculate_new_high_seq(State)}), + {stop, normal, State#state{complete=true, monitor_count=0}}; +handle_monitor_down(normal, #state{pending_doc_request=nil} = State) -> + #state{monitor_count = Count} = State, + {noreply, State#state{monitor_count = Count-1}}; +handle_monitor_down(normal, State) -> + #state{ + source = Source, + pending_doc_request = {From, Id, Seq, Revs} + } = State, + gen_server:reply(From, ok), + {_, _NewRef} = spawn_document_request(Source, Id, Seq, Revs), + {noreply, State#state{pending_doc_request=nil}}; +handle_monitor_down(Reason, State) -> + {stop, Reason, State}. + +handle_reader_loop_complete(#state{reply_to=nil, monitor_count=0} = State) -> + {noreply, State#state{complete = true}}; +handle_reader_loop_complete(#state{monitor_count=0} = State) -> + HighSeq = calculate_new_high_seq(State), + gen_server:reply(State#state.reply_to, {complete, HighSeq}), + {stop, normal, State}; +handle_reader_loop_complete(State) -> + {noreply, State#state{complete = waiting_on_monitors}}. + +calculate_new_high_seq(#state{missing_revs=nil}) -> + nil; +calculate_new_high_seq(#state{requested_seqs=[], opened_seqs=[Open|_]}) -> + Open; +calculate_new_high_seq(#state{requested_seqs=[Req|_], opened_seqs=[Open|_]}) + when Req < Open -> + 0; +calculate_new_high_seq(#state{opened_seqs=[]}) -> + 0; +calculate_new_high_seq(State) -> + hd(State#state.opened_seqs). + +split_revlist(Rev, {[CurrentAcc|Rest], BaseLength, Length}) -> + case Length+size(Rev) > 8192 of + false -> + {[[Rev|CurrentAcc] | Rest], BaseLength, Length+size(Rev)}; + true -> + {[[Rev],CurrentAcc|Rest], BaseLength, BaseLength} + end. + +% We store outstanding requested sequences and a subset of already opened +% sequences in 2 ordered lists. The subset of opened seqs is a) the largest +% opened seq smaller than the smallest outstanding request seq plus b) all the +% opened seqs greater than the smallest outstanding request. I believe its the +% minimal set of info needed to correctly calculate which seqs have been +% replicated (because remote docs can be opened out-of-order) -- APK +update_sequence_lists(_Seq, #state{missing_revs=nil} = State) -> + State; +update_sequence_lists(Seq, State) -> + Requested = lists:delete(Seq, State#state.requested_seqs), + AllOpened = lists:merge([Seq], State#state.opened_seqs), + Opened = case Requested of + [] -> + [lists:last(AllOpened)]; + [EarliestReq|_] -> + case lists:splitwith(fun(X) -> X < EarliestReq end, AllOpened) of + {[], Greater} -> + Greater; + {Less, Greater} -> + [lists:last(Less) | Greater] + end + end, + State#state{ + requested_seqs = Requested, + opened_seqs = Opened + }. + +open_doc_revs(#http_db{} = DbS, DocId, Revs) -> + %% all this logic just splits up revision lists that are too long for + %% MochiWeb into multiple requests + BaseQS = [{revs,true}, {latest,true}, {att_encoding_info,true}], + BaseReq = DbS#http_db{resource=url_encode(DocId), qs=BaseQS}, + BaseLength = length(couch_rep_httpc:full_url(BaseReq)) + 11, % &open_revs= + + {RevLists, _, _} = lists:foldl(fun split_revlist/2, + {[[]], BaseLength, BaseLength}, couch_doc:revs_to_strs(Revs)), + + Requests = [BaseReq#http_db{ + qs = [{open_revs, ?JSON_ENCODE(RevList)} | BaseQS] + } || RevList <- RevLists], + JsonResults = lists:flatten([couch_rep_httpc:request(R) || R <- Requests]), + + Transform = + fun({[{<<"missing">>, Rev}]}) -> + {{not_found, missing}, couch_doc:parse_rev(Rev)}; + ({[{<<"ok">>, Json}]}) -> + #doc{id=Id, revs=Rev, atts=Atts} = Doc = couch_doc:from_json_obj(Json), + Doc#doc{atts=[couch_rep_att:convert_stub(A, {DbS,Id,Rev}) || A <- Atts]} + end, + [Transform(Result) || Result <- JsonResults]. + +open_doc(#http_db{} = DbS, DocId) -> + % get latest rev of the doc + Req = DbS#http_db{ + resource=url_encode(DocId), + qs=[{att_encoding_info, true}] + }, + case couch_rep_httpc:request(Req) of + {[{<<"error">>,<<"not_found">>}, {<<"reason">>,<<"missing">>}]} -> + []; + Json -> + #doc{id=Id, revs=Rev, atts=Atts} = Doc = couch_doc:from_json_obj(Json), + [Doc#doc{ + atts=[couch_rep_att:convert_stub(A, {DbS,Id,Rev}) || A <- Atts] + }] + end. + +reader_loop(ReaderServer, Source, DocIds) when is_list(DocIds) -> + case Source of + #http_db{} -> + [gen_server:call(ReaderServer, {open_remote_doc, Id, nil, nil}, + infinity) || Id <- DocIds]; + _LocalDb -> + Docs = lists:foldr(fun(Id, Acc) -> + case couch_db:open_doc(Source, Id) of + {ok, Doc} -> + [Doc | Acc]; + _ -> + Acc + end + end, [], DocIds), + gen_server:call(ReaderServer, {add_docs, nil, Docs}, infinity) + end, + exit(complete); + +reader_loop(ReaderServer, Source, MissingRevsServer) -> + case couch_rep_missing_revs:next(MissingRevsServer) of + complete -> + exit(complete); + {HighSeq, IdsRevs} -> + % to be safe, make sure Results are sorted by source_seq + SortedIdsRevs = lists:keysort(2, IdsRevs), + RequestSeqs = [S || {_,S,_} <- SortedIdsRevs], + gen_server:call(ReaderServer, {add_request_seqs, RequestSeqs}, infinity), + case Source of + #http_db{} -> + [gen_server:call(ReaderServer, {open_remote_doc, Id, Seq, Revs}, + infinity) || {Id,Seq,Revs} <- SortedIdsRevs], + reader_loop(ReaderServer, Source, MissingRevsServer); + _Local -> + Source2 = maybe_reopen_db(Source, HighSeq), + lists:foreach(fun({Id,Seq,Revs}) -> + {ok, Docs} = couch_db:open_doc_revs(Source2, Id, Revs, [latest]), + JustTheDocs = [Doc || {ok, Doc} <- Docs], + gen_server:call(ReaderServer, {add_docs, Seq, JustTheDocs}, + infinity) + end, SortedIdsRevs), + reader_loop(ReaderServer, Source2, MissingRevsServer) + end + end. + +maybe_reopen_db(#db{update_seq=OldSeq} = Db, HighSeq) when HighSeq > OldSeq -> + {ok, NewDb} = couch_db:open(Db#db.name, [{user_ctx, Db#db.user_ctx}]), + couch_db:close(Db), + NewDb; +maybe_reopen_db(Db, _HighSeq) -> + Db. + +spawn_document_request(Source, Id, nil, nil) -> + spawn_document_request(Source, Id); +spawn_document_request(Source, Id, Seq, Revs) -> + Server = self(), + SpawnFun = fun() -> + Results = open_doc_revs(Source, Id, Revs), + gen_server:call(Server, {add_docs, Seq, Results}, infinity) + end, + spawn_monitor(SpawnFun). + +spawn_document_request(Source, Id) -> + Server = self(), + SpawnFun = fun() -> + Results = open_doc(Source, Id), + gen_server:call(Server, {add_docs, nil, Results}, infinity) + end, + spawn_monitor(SpawnFun). diff --git a/apps/couch/src/couch_rep_sup.erl b/apps/couch/src/couch_rep_sup.erl new file mode 100644 index 00000000..1318c598 --- /dev/null +++ b/apps/couch/src/couch_rep_sup.erl @@ -0,0 +1,31 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_rep_sup). +-behaviour(supervisor). +-export([init/1, start_link/0]). + +-include("couch_db.hrl"). + +start_link() -> + supervisor:start_link({local,?MODULE}, ?MODULE, []). + +%%============================================================================= +%% supervisor callbacks +%%============================================================================= + +init([]) -> + {ok, {{one_for_one, 3, 10}, []}}. + +%%============================================================================= +%% internal functions +%%============================================================================= diff --git a/apps/couch/src/couch_rep_writer.erl b/apps/couch/src/couch_rep_writer.erl new file mode 100644 index 00000000..dd6396fd --- /dev/null +++ b/apps/couch/src/couch_rep_writer.erl @@ -0,0 +1,170 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_rep_writer). + +-export([start_link/4]). + +-include("couch_db.hrl"). + +start_link(Parent, Target, Reader, _PostProps) -> + {ok, spawn_link(fun() -> writer_loop(Parent, Reader, Target) end)}. + +writer_loop(Parent, Reader, Target) -> + case couch_rep_reader:next(Reader) of + {complete, nil} -> + ok; + {complete, FinalSeq} -> + Parent ! {writer_checkpoint, FinalSeq}, + ok; + {HighSeq, Docs} -> + DocCount = length(Docs), + try write_docs(Target, Docs) of + {ok, []} -> + Parent ! {update_stats, docs_written, DocCount}; + {ok, Errors} -> + ErrorCount = length(Errors), + Parent ! {update_stats, doc_write_failures, ErrorCount}, + Parent ! {update_stats, docs_written, DocCount - ErrorCount} + catch + {attachment_request_failed, Err} -> + ?LOG_DEBUG("writer failed to write an attachment ~p", [Err]), + exit({attachment_request_failed, Err, Docs}) + end, + case HighSeq of + nil -> + ok; + _SeqNumber -> + Parent ! {writer_checkpoint, HighSeq} + end, + couch_rep_att:cleanup(), + couch_util:should_flush(), + writer_loop(Parent, Reader, Target) + end. + +write_docs(#http_db{} = Db, Docs) -> + {DocsAtts, DocsNoAtts} = lists:partition( + fun(#doc{atts=[]}) -> false; (_) -> true end, + Docs + ), + ErrorsJson0 = write_bulk_docs(Db, DocsNoAtts), + ErrorsJson = lists:foldl( + fun(Doc, Acc) -> write_multi_part_doc(Db, Doc) ++ Acc end, + ErrorsJson0, + DocsAtts + ), + {ok, ErrorsJson}; +write_docs(Db, Docs) -> + couch_db:update_docs(Db, Docs, [delay_commit], replicated_changes). + +write_bulk_docs(_Db, []) -> + []; +write_bulk_docs(#http_db{headers = Headers} = Db, Docs) -> + JsonDocs = [ + couch_doc:to_json_obj(Doc, [revs, att_gzip_length]) || Doc <- Docs + ], + Request = Db#http_db{ + resource = "_bulk_docs", + method = post, + body = {[{new_edits, false}, {docs, JsonDocs}]}, + headers = couch_util:proplist_apply_field({"Content-Type", "application/json"}, [{"X-Couch-Full-Commit", "false"} | Headers]) + }, + ErrorsJson = case couch_rep_httpc:request(Request) of + {FailProps} -> + exit({target_error, couch_util:get_value(<<"error">>, FailProps)}); + List when is_list(List) -> + List + end, + [write_docs_1(V) || V <- ErrorsJson]. + +write_multi_part_doc(#http_db{headers=Headers} = Db, #doc{atts=Atts} = Doc) -> + JsonBytes = ?JSON_ENCODE( + couch_doc:to_json_obj( + Doc, + [follows, att_encoding_info, attachments] + ) + ), + Boundary = couch_uuids:random(), + {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream( + Boundary, JsonBytes, Atts, true + ), + StreamerPid = spawn_link( + fun() -> streamer_fun(Boundary, JsonBytes, Atts) end + ), + BodyFun = fun(Acc) -> + DataQueue = case Acc of + nil -> + StreamerPid ! {start, self()}, + receive + {queue, Q} -> + Q + end; + Queue -> + Queue + end, + case couch_work_queue:dequeue(DataQueue) of + closed -> + eof; + {ok, Data} -> + {ok, iolist_to_binary(Data), DataQueue} + end + end, + Request = Db#http_db{ + resource = couch_util:url_encode(Doc#doc.id), + method = put, + qs = [{new_edits, false}], + body = {BodyFun, nil}, + headers = [ + {"x-couch-full-commit", "false"}, + {"Content-Type", ?b2l(ContentType)}, + {"Content-Length", Len} | Headers + ] + }, + Result = case couch_rep_httpc:request(Request) of + {[{<<"error">>, Error}, {<<"reason">>, Reason}]} -> + {Pos, [RevId | _]} = Doc#doc.revs, + ErrId = couch_util:to_existing_atom(Error), + [{Doc#doc.id, couch_doc:rev_to_str({Pos, RevId})}, {ErrId, Reason}]; + _ -> + [] + end, + StreamerPid ! stop, + Result. + +streamer_fun(Boundary, JsonBytes, Atts) -> + receive + stop -> + ok; + {start, From} -> + % better use a brand new queue, to ensure there's no garbage from + % a previous (failed) iteration + {ok, DataQueue} = couch_work_queue:new(1024 * 1024, 1000), + From ! {queue, DataQueue}, + couch_doc:doc_to_multi_part_stream( + Boundary, + JsonBytes, + Atts, + fun(Data) -> + couch_work_queue:queue(DataQueue, Data) + end, + true + ), + couch_work_queue:close(DataQueue), + streamer_fun(Boundary, JsonBytes, Atts) + end. + +write_docs_1({Props}) -> + Id = couch_util:get_value(<<"id">>, Props), + Rev = couch_doc:parse_rev(couch_util:get_value(<<"rev">>, Props)), + ErrId = couch_util:to_existing_atom(couch_util:get_value(<<"error">>, Props)), + Reason = couch_util:get_value(<<"reason">>, Props), + {{Id, Rev}, {ErrId, Reason}}. diff --git a/apps/couch/src/couch_server.erl b/apps/couch/src/couch_server.erl new file mode 100644 index 00000000..43fd9044 --- /dev/null +++ b/apps/couch/src/couch_server.erl @@ -0,0 +1,399 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_server). +-behaviour(gen_server). + +-export([open/2,create/2,delete/2,all_databases/0,get_version/0]). +-export([init/1, handle_call/3,sup_start_link/0]). +-export([handle_cast/2,code_change/3,handle_info/2,terminate/2]). +-export([dev_start/0,is_admin/2,has_admins/0,get_stats/0]). + +-include("couch_db.hrl"). + +-record(server,{ + root_dir = [], + dbname_regexp, + max_dbs_open=100, + dbs_open=0, + start_time="" + }). + +dev_start() -> + couch:stop(), + up_to_date = make:all([load, debug_info]), + couch:start(). + +get_version() -> + Apps = application:loaded_applications(), + case lists:keysearch(couch, 1, Apps) of + {value, {_, _, Vsn}} -> + Vsn; + false -> + "0.0.0" + end. + +get_stats() -> + {ok, #server{start_time=Time,dbs_open=Open}} = + gen_server:call(couch_server, get_server), + [{start_time, ?l2b(Time)}, {dbs_open, Open}]. + +sup_start_link() -> + gen_server:start_link({local, couch_server}, couch_server, [], []). + +open(DbName, Options) -> + case gen_server:call(couch_server, {open, DbName, Options}, infinity) of + {ok, Db} -> + Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}), + {ok, Db#db{user_ctx=Ctx}}; + Error -> + Error + end. + +create(DbName, Options) -> + case gen_server:call(couch_server, {create, DbName, Options}, infinity) of + {ok, Db} -> + Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}), + {ok, Db#db{user_ctx=Ctx}}; + Error -> + Error + end. + +delete(DbName, Options) -> + gen_server:call(couch_server, {delete, DbName, Options}, infinity). + +check_dbname(#server{dbname_regexp=RegExp}, DbName) -> + case re:run(DbName, RegExp, [{capture, none}]) of + nomatch -> + case DbName of + "_users" -> ok; + _Else -> + {error, illegal_database_name} + end; + match -> + ok + end. + +is_admin(User, ClearPwd) -> + case couch_config:get("admins", User) of + "-hashed-" ++ HashedPwdAndSalt -> + [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","), + couch_util:to_hex(crypto:sha(ClearPwd ++ Salt)) == HashedPwd; + _Else -> + false + end. + +has_admins() -> + couch_config:get("admins") /= []. + +get_full_filename(Server, DbName) -> + filename:join([Server#server.root_dir, "./" ++ DbName ++ ".couch"]). + +hash_admin_passwords() -> + hash_admin_passwords(true). + +hash_admin_passwords(Persist) -> + lists:foreach( + fun({_User, "-hashed-" ++ _}) -> + ok; % already hashed + ({User, ClearPassword}) -> + Salt = ?b2l(couch_uuids:random()), + Hashed = couch_util:to_hex(crypto:sha(ClearPassword ++ Salt)), + couch_config:set("admins", + User, "-hashed-" ++ Hashed ++ "," ++ Salt, Persist) + end, couch_config:get("admins")). + +init([]) -> + % read config and register for configuration changes + + % just stop if one of the config settings change. couch_server_sup + % will restart us and then we will pick up the new settings. + + RootDir = couch_config:get("couchdb", "database_dir", "."), + MaxDbsOpen = list_to_integer( + couch_config:get("couchdb", "max_dbs_open")), + Self = self(), + ok = couch_config:register( + fun("couchdb", "database_dir") -> + exit(Self, config_change) + end), + ok = couch_config:register( + fun("couchdb", "max_dbs_open", Max) -> + gen_server:call(couch_server, + {set_max_dbs_open, list_to_integer(Max)}) + end), + ok = couch_file:init_delete_dir(RootDir), + hash_admin_passwords(), + ok = couch_config:register( + fun("admins", _Key, _Value, Persist) -> + % spawn here so couch_config doesn't try to call itself + spawn(fun() -> hash_admin_passwords(Persist) end) + end, false), + {ok, RegExp} = re:compile("^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*$"), + ets:new(couch_dbs_by_name, [set, private, named_table]), + ets:new(couch_dbs_by_pid, [set, private, named_table]), + ets:new(couch_dbs_by_lru, [ordered_set, private, named_table]), + ets:new(couch_sys_dbs, [set, private, named_table]), + process_flag(trap_exit, true), + {ok, #server{root_dir=RootDir, + dbname_regexp=RegExp, + max_dbs_open=MaxDbsOpen, + start_time=httpd_util:rfc1123_date()}}. + +terminate(_Reason, _Srv) -> + [couch_util:shutdown_sync(Pid) || {_, {Pid, _LruTime}} <- + ets:tab2list(couch_dbs_by_name)], + ok. + +all_databases() -> + {ok, #server{root_dir=Root}} = gen_server:call(couch_server, get_server), + NormRoot = couch_util:normpath(Root), + Filenames = + filelib:fold_files(Root, "^[a-z0-9\\_\\$()\\+\\-]*[\\.]couch$", true, + fun(Filename, AccIn) -> + NormFilename = couch_util:normpath(Filename), + case NormFilename -- NormRoot of + [$/ | RelativeFilename] -> ok; + RelativeFilename -> ok + end, + [list_to_binary(filename:rootname(RelativeFilename, ".couch")) | AccIn] + end, []), + {ok, Filenames}. + + +maybe_close_lru_db(#server{dbs_open=NumOpen, max_dbs_open=MaxOpen}=Server) + when NumOpen < MaxOpen -> + {ok, Server}; +maybe_close_lru_db(#server{dbs_open=NumOpen}=Server) -> + % must free up the lru db. + case try_close_lru(now()) of + ok -> + {ok, Server#server{dbs_open=NumOpen - 1}}; + Error -> Error + end. + +try_close_lru(StartTime) -> + LruTime = get_lru(), + if LruTime > StartTime -> + % this means we've looped through all our opened dbs and found them + % all in use. + {error, all_dbs_active}; + true -> + [{_, DbName}] = ets:lookup(couch_dbs_by_lru, LruTime), + [{_, {opened, MainPid, LruTime}}] = ets:lookup(couch_dbs_by_name, DbName), + case couch_db:is_idle(MainPid) of + true -> + couch_util:shutdown_sync(MainPid), + true = ets:delete(couch_dbs_by_lru, LruTime), + true = ets:delete(couch_dbs_by_name, DbName), + true = ets:delete(couch_dbs_by_pid, MainPid), + true = ets:delete(couch_sys_dbs, DbName), + ok; + false -> + % this still has referrers. Go ahead and give it a current lru time + % and try the next one in the table. + NewLruTime = now(), + true = ets:insert(couch_dbs_by_name, {DbName, {opened, MainPid, NewLruTime}}), + true = ets:insert(couch_dbs_by_pid, {MainPid, DbName}), + true = ets:delete(couch_dbs_by_lru, LruTime), + true = ets:insert(couch_dbs_by_lru, {NewLruTime, DbName}), + try_close_lru(StartTime) + end + end. + +get_lru() -> + get_lru(ets:first(couch_dbs_by_lru)). + +get_lru(LruTime) -> + [{LruTime, DbName}] = ets:lookup(couch_dbs_by_lru, LruTime), + case ets:member(couch_sys_dbs, DbName) of + false -> + LruTime; + true -> + [{_, {opened, MainPid, _}}] = ets:lookup(couch_dbs_by_name, DbName), + case couch_db:is_idle(MainPid) of + true -> + LruTime; + false -> + get_lru(ets:next(couch_dbs_by_lru, LruTime)) + end + end. + +open_async(Server, From, DbName, Filepath, Options) -> + Parent = self(), + Opener = spawn_link(fun() -> + Res = couch_db:start_link(DbName, Filepath, Options), + gen_server:call( + Parent, {open_result, DbName, Res, Options}, infinity + ), + unlink(Parent), + case Res of + {ok, DbReader} -> + unlink(DbReader); + _ -> + ok + end + end), + true = ets:insert(couch_dbs_by_name, {DbName, {opening, Opener, [From]}}), + true = ets:insert(couch_dbs_by_pid, {Opener, DbName}), + DbsOpen = case lists:member(sys_db, Options) of + true -> + true = ets:insert(couch_sys_dbs, {DbName, true}), + Server#server.dbs_open; + false -> + Server#server.dbs_open + 1 + end, + Server#server{dbs_open = DbsOpen}. + +handle_call({set_max_dbs_open, Max}, _From, Server) -> + {reply, ok, Server#server{max_dbs_open=Max}}; +handle_call(get_server, _From, Server) -> + {reply, {ok, Server}, Server}; +handle_call({open_result, DbName, {ok, OpenedDbPid}, Options}, _From, Server) -> + link(OpenedDbPid), + [{DbName, {opening,Opener,Froms}}] = ets:lookup(couch_dbs_by_name, DbName), + lists:foreach(fun({FromPid,_}=From) -> + gen_server:reply(From, + catch couch_db:open_ref_counted(OpenedDbPid, FromPid)) + end, Froms), + LruTime = now(), + true = ets:insert(couch_dbs_by_name, + {DbName, {opened, OpenedDbPid, LruTime}}), + true = ets:delete(couch_dbs_by_pid, Opener), + true = ets:insert(couch_dbs_by_pid, {OpenedDbPid, DbName}), + true = ets:insert(couch_dbs_by_lru, {LruTime, DbName}), + case lists:member(create, Options) of + true -> + couch_db_update_notifier:notify({created, DbName}); + false -> + ok + end, + {reply, ok, Server}; +handle_call({open_result, DbName, Error, Options}, _From, Server) -> + [{DbName, {opening,Opener,Froms}}] = ets:lookup(couch_dbs_by_name, DbName), + lists:foreach(fun(From) -> + gen_server:reply(From, Error) + end, Froms), + true = ets:delete(couch_dbs_by_name, DbName), + true = ets:delete(couch_dbs_by_pid, Opener), + DbsOpen = case lists:member(sys_db, Options) of + true -> + true = ets:delete(couch_sys_dbs, DbName), + Server#server.dbs_open; + false -> + Server#server.dbs_open - 1 + end, + {reply, ok, Server#server{dbs_open = DbsOpen}}; +handle_call({open, DbName, Options}, {FromPid,_}=From, Server) -> + LruTime = now(), + case ets:lookup(couch_dbs_by_name, DbName) of + [] -> + open_db(DbName, Server, Options, From); + [{_, {opening, Opener, Froms}}] -> + true = ets:insert(couch_dbs_by_name, {DbName, {opening, Opener, [From|Froms]}}), + {noreply, Server}; + [{_, {opened, MainPid, PrevLruTime}}] -> + true = ets:insert(couch_dbs_by_name, {DbName, {opened, MainPid, LruTime}}), + true = ets:delete(couch_dbs_by_lru, PrevLruTime), + true = ets:insert(couch_dbs_by_lru, {LruTime, DbName}), + {reply, couch_db:open_ref_counted(MainPid, FromPid), Server} + end; +handle_call({create, DbName, Options}, From, Server) -> + case ets:lookup(couch_dbs_by_name, DbName) of + [] -> + open_db(DbName, Server, [create | Options], From); + [_AlreadyRunningDb] -> + {reply, file_exists, Server} + end; +handle_call({delete, DbName, _Options}, _From, Server) -> + DbNameList = binary_to_list(DbName), + case check_dbname(Server, DbNameList) of + ok -> + FullFilepath = get_full_filename(Server, DbNameList), + UpdateState = + case ets:lookup(couch_dbs_by_name, DbName) of + [] -> false; + [{_, {opening, Pid, Froms}}] -> + couch_util:shutdown_sync(Pid), + true = ets:delete(couch_dbs_by_name, DbName), + true = ets:delete(couch_dbs_by_pid, Pid), + [gen_server:send_result(F, not_found) || F <- Froms], + true; + [{_, {opened, Pid, LruTime}}] -> + couch_util:shutdown_sync(Pid), + true = ets:delete(couch_dbs_by_name, DbName), + true = ets:delete(couch_dbs_by_pid, Pid), + true = ets:delete(couch_dbs_by_lru, LruTime), + true + end, + Server2 = case UpdateState of + true -> + DbsOpen = case ets:member(couch_sys_dbs, DbName) of + true -> + true = ets:delete(couch_sys_dbs, DbName), + Server#server.dbs_open; + false -> + Server#server.dbs_open - 1 + end, + Server#server{dbs_open = DbsOpen}; + false -> + Server + end, + + %% Delete any leftover .compact files. If we don't do this a subsequent + %% request for this DB will try to open the .compact file and use it. + couch_file:delete(Server#server.root_dir, FullFilepath ++ ".compact"), + + case couch_file:delete(Server#server.root_dir, FullFilepath) of + ok -> + couch_db_update_notifier:notify({deleted, DbName}), + {reply, ok, Server2}; + {error, enoent} -> + {reply, not_found, Server2}; + Else -> + {reply, Else, Server2} + end; + Error -> + {reply, Error, Server} + end. + +handle_cast(Msg, _Server) -> + exit({unknown_cast_message, Msg}). + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +handle_info({'EXIT', _Pid, config_change}, Server) -> + {noreply, shutdown, Server}; +handle_info(Error, _Server) -> + ?LOG_ERROR("Unexpected message, restarting couch_server: ~p", [Error]), + exit(kill). + +open_db(DbName, Server, Options, From) -> + DbNameList = binary_to_list(DbName), + case check_dbname(Server, DbNameList) of + ok -> + Filepath = get_full_filename(Server, DbNameList), + case lists:member(sys_db, Options) of + true -> + {noreply, open_async(Server, From, DbName, Filepath, Options)}; + false -> + case maybe_close_lru_db(Server) of + {ok, Server2} -> + {noreply, open_async(Server2, From, DbName, Filepath, Options)}; + CloseError -> + {reply, CloseError, Server} + end + end; + Error -> + {reply, Error, Server} + end. diff --git a/apps/couch/src/couch_server_sup.erl b/apps/couch/src/couch_server_sup.erl new file mode 100644 index 00000000..4f0445da --- /dev/null +++ b/apps/couch/src/couch_server_sup.erl @@ -0,0 +1,193 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_server_sup). +-behaviour(supervisor). + + +-export([start_link/1,stop/0, couch_config_start_link_wrapper/2, + start_primary_services/0,start_secondary_services/0, + restart_core_server/0]). + +-include("couch_db.hrl"). + +%% supervisor callbacks +-export([init/1]). + +start_link(IniFiles) -> + case whereis(couch_server_sup) of + undefined -> + start_server(IniFiles); + _Else -> + {error, already_started} + end. + +restart_core_server() -> + init:restart(). + +couch_config_start_link_wrapper(IniFiles, FirstConfigPid) -> + case is_process_alive(FirstConfigPid) of + true -> + link(FirstConfigPid), + {ok, FirstConfigPid}; + false -> couch_config:start_link(IniFiles) + end. + +start_server(IniFiles) -> + case init:get_argument(pidfile) of + {ok, [PidFile]} -> + case file:write_file(PidFile, os:getpid()) of + ok -> ok; + Error -> io:format("Failed to write PID file ~s, error: ~p", [PidFile, Error]) + end; + _ -> ok + end, + + {ok, ConfigPid} = couch_config:start_link(IniFiles), + + LogLevel = couch_config:get("log", "level", "info"), + % announce startup + io:format("Apache CouchDB ~s (LogLevel=~s) is starting.~n", [ + couch_server:get_version(), + LogLevel + ]), + case LogLevel of + "debug" -> + io:format("Configuration Settings ~p:~n", [IniFiles]), + [io:format(" [~s] ~s=~p~n", [Module, Variable, Value]) + || {{Module, Variable}, Value} <- couch_config:all()]; + _ -> ok + end, + + LibDir = + case couch_config:get("couchdb", "util_driver_dir", null) of + null -> + filename:join(couch_util:priv_dir(), "lib"); + LibDir0 -> LibDir0 + end, + + ok = couch_util:start_driver(LibDir), + + BaseChildSpecs = + {{one_for_all, 10, 3600}, + [{couch_config, + {couch_server_sup, couch_config_start_link_wrapper, [IniFiles, ConfigPid]}, + permanent, + brutal_kill, + worker, + [couch_config]}, + {couch_primary_services, + {couch_server_sup, start_primary_services, []}, + permanent, + infinity, + supervisor, + [couch_server_sup]}, + {couch_secondary_services, + {couch_server_sup, start_secondary_services, []}, + permanent, + infinity, + supervisor, + [couch_server_sup]} + ]}, + + % ensure these applications are running + application:start(ibrowse), + application:start(crypto), + + {ok, Pid} = supervisor:start_link( + {local, couch_server_sup}, couch_server_sup, BaseChildSpecs), + + % launch the icu bridge + % just restart if one of the config settings change. + + couch_config:register( + fun("couchdb", "util_driver_dir") -> + ?MODULE:stop(); + ("daemons", _) -> + ?MODULE:stop() + end, Pid), + + unlink(ConfigPid), + + Ip = couch_config:get("httpd", "bind_address"), + Port = mochiweb_socket_server:get(couch_httpd, port), + io:format("Apache CouchDB has started. Time to relax.~n"), + ?LOG_INFO("Apache CouchDB has started on http://~s:~w/", [Ip, Port]), + + case couch_config:get("couchdb", "uri_file", null) of + null -> ok; + UriFile -> + Line = io_lib:format("http://~s:~w/~n", [Ip, Port]), + file:write_file(UriFile, Line) + end, + + {ok, Pid}. + +start_primary_services() -> + supervisor:start_link({local, couch_primary_services}, couch_server_sup, + {{one_for_one, 10, 3600}, + [{couch_log, + {couch_log, start_link, []}, + permanent, + brutal_kill, + worker, + [couch_log]}, + {couch_replication_supervisor, + {couch_rep_sup, start_link, []}, + permanent, + infinity, + supervisor, + [couch_rep_sup]}, + {couch_task_status, + {couch_task_status, start_link, []}, + permanent, + brutal_kill, + worker, + [couch_task_status]}, + {couch_server, + {couch_server, sup_start_link, []}, + permanent, + 1000, + worker, + [couch_server]}, + {couch_db_update_event, + {gen_event, start_link, [{local, couch_db_update}]}, + permanent, + brutal_kill, + worker, + dynamic} + ] + }). + +start_secondary_services() -> + DaemonChildSpecs = [ + begin + {ok, {Module, Fun, Args}} = couch_util:parse_term(SpecStr), + + {list_to_atom(Name), + {Module, Fun, Args}, + permanent, + 1000, + worker, + [Module]} + end + || {Name, SpecStr} + <- couch_config:get("daemons"), SpecStr /= ""], + + supervisor:start_link({local, couch_secondary_services}, couch_server_sup, + {{one_for_one, 10, 3600}, DaemonChildSpecs}). + +stop() -> + catch exit(whereis(couch_server_sup), normal). + +init(ChildSpecs) -> + {ok, ChildSpecs}. diff --git a/apps/couch/src/couch_stats_aggregator.erl b/apps/couch/src/couch_stats_aggregator.erl new file mode 100644 index 00000000..6090355d --- /dev/null +++ b/apps/couch/src/couch_stats_aggregator.erl @@ -0,0 +1,297 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_stats_aggregator). +-behaviour(gen_server). + +-export([start/0, start/1, stop/0]). +-export([all/0, all/1, get/1, get/2, get_json/1, get_json/2, collect_sample/0]). + +-export([init/1, terminate/2, code_change/3]). +-export([handle_call/3, handle_cast/2, handle_info/2]). + +-record(aggregate, { + description = <<"">>, + seconds = 0, + count = 0, + current = null, + sum = null, + mean = null, + variance = null, + stddev = null, + min = null, + max = null, + samples = [] +}). + + +start() -> + PrivDir = couch_util:priv_dir(), + start(filename:join(PrivDir, "stat_descriptions.cfg")). + +start(FileName) -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [FileName], []). + +stop() -> + gen_server:cast(?MODULE, stop). + +all() -> + ?MODULE:all(0). +all(Time) when is_binary(Time) -> + ?MODULE:all(list_to_integer(binary_to_list(Time))); +all(Time) when is_atom(Time) -> + ?MODULE:all(list_to_integer(atom_to_list(Time))); +all(Time) when is_integer(Time) -> + Aggs = ets:match(?MODULE, {{'$1', Time}, '$2'}), + Stats = lists:map(fun([Key, Agg]) -> {Key, Agg} end, Aggs), + case Stats of + [] -> + {[]}; + _ -> + Ret = lists:foldl(fun({{Mod, Key}, Agg}, Acc) -> + CurrKeys = case proplists:lookup(Mod, Acc) of + none -> []; + {Mod, {Keys}} -> Keys + end, + NewMod = {[{Key, to_json_term(Agg)} | CurrKeys]}, + [{Mod, NewMod} | proplists:delete(Mod, Acc)] + end, [], Stats), + {Ret} + end. + +get(Key) -> + ?MODULE:get(Key, 0). +get(Key, Time) when is_binary(Time) -> + ?MODULE:get(Key, list_to_integer(binary_to_list(Time))); +get(Key, Time) when is_atom(Time) -> + ?MODULE:get(Key, list_to_integer(atom_to_list(Time))); +get(Key, Time) when is_integer(Time) -> + case ets:lookup(?MODULE, {make_key(Key), Time}) of + [] -> #aggregate{seconds=Time}; + [{_, Agg}] -> Agg + end. + +get_json(Key) -> + get_json(Key, 0). +get_json(Key, Time) -> + to_json_term(?MODULE:get(Key, Time)). + +collect_sample() -> + gen_server:call(?MODULE, collect_sample, infinity). + + +init(StatDescsFileName) -> + % Create an aggregate entry for each {description, rate} pair. + ets:new(?MODULE, [named_table, set, protected]), + SampleStr = couch_config:get("stats", "samples", "[0]"), + {ok, Samples} = couch_util:parse_term(SampleStr), + {ok, Descs} = file:consult(StatDescsFileName), + lists:foreach(fun({Sect, Key, Value}) -> + lists:foreach(fun(Secs) -> + Agg = #aggregate{ + description=list_to_binary(Value), + seconds=Secs + }, + ets:insert(?MODULE, {{{Sect, Key}, Secs}, Agg}) + end, Samples) + end, Descs), + + Self = self(), + ok = couch_config:register( + fun("stats", _) -> exit(Self, config_change) end + ), + + Rate = list_to_integer(couch_config:get("stats", "rate", "1000")), + % TODO: Add timer_start to kernel start options. + {ok, TRef} = timer:apply_after(Rate, ?MODULE, collect_sample, []), + {ok, {TRef, Rate}}. + +terminate(_Reason, {TRef, _Rate}) -> + timer:cancel(TRef), + ok. + +handle_call(collect_sample, _, {OldTRef, SampleInterval}) -> + timer:cancel(OldTRef), + {ok, TRef} = timer:apply_after(SampleInterval, ?MODULE, collect_sample, []), + % Gather new stats values to add. + Incs = lists:map(fun({Key, Value}) -> + {Key, {incremental, Value}} + end, couch_stats_collector:all(incremental)), + Abs = lists:map(fun({Key, Values}) -> + couch_stats_collector:clear(Key), + Values2 = case Values of + X when is_list(X) -> X; + Else -> [Else] + end, + {_, Mean} = lists:foldl(fun(Val, {Count, Curr}) -> + {Count+1, Curr + (Val - Curr) / (Count+1)} + end, {0, 0}, Values2), + {Key, {absolute, Mean}} + end, couch_stats_collector:all(absolute)), + + Values = Incs ++ Abs, + Now = erlang:now(), + lists:foreach(fun({{Key, Rate}, Agg}) -> + NewAgg = case proplists:lookup(Key, Values) of + none -> + rem_values(Now, Agg); + {Key, {Type, Value}} -> + NewValue = new_value(Type, Value, Agg#aggregate.current), + Agg2 = add_value(Now, NewValue, Agg), + rem_values(Now, Agg2) + end, + ets:insert(?MODULE, {{Key, Rate}, NewAgg}) + end, ets:tab2list(?MODULE)), + {reply, ok, {TRef, SampleInterval}}. + +handle_cast(stop, State) -> + {stop, normal, State}. + +handle_info(_Info, State) -> + {noreply, State}. + +code_change(_OldVersion, State, _Extra) -> + {ok, State}. + + +new_value(incremental, Value, null) -> + Value; +new_value(incremental, Value, Current) -> + Value - Current; +new_value(absolute, Value, _Current) -> + Value. + +add_value(Time, Value, #aggregate{count=Count, seconds=Secs}=Agg) when Count < 1 -> + Samples = case Secs of + 0 -> []; + _ -> [{Time, Value}] + end, + Agg#aggregate{ + count=1, + current=Value, + sum=Value, + mean=Value, + variance=0.0, + stddev=null, + min=Value, + max=Value, + samples=Samples + }; +add_value(Time, Value, Agg) -> + #aggregate{ + count=Count, + current=Current, + sum=Sum, + mean=Mean, + variance=Variance, + samples=Samples + } = Agg, + + NewCount = Count + 1, + NewMean = Mean + (Value - Mean) / NewCount, + NewVariance = Variance + (Value - Mean) * (Value - NewMean), + StdDev = case NewCount > 1 of + false -> null; + _ -> math:sqrt(NewVariance / (NewCount - 1)) + end, + Agg2 = Agg#aggregate{ + count=NewCount, + current=Current + Value, + sum=Sum + Value, + mean=NewMean, + variance=NewVariance, + stddev=StdDev, + min=lists:min([Agg#aggregate.min, Value]), + max=lists:max([Agg#aggregate.max, Value]) + }, + case Agg2#aggregate.seconds of + 0 -> Agg2; + _ -> Agg2#aggregate{samples=[{Time, Value} | Samples]} + end. + +rem_values(Time, Agg) -> + Seconds = Agg#aggregate.seconds, + Samples = Agg#aggregate.samples, + Pred = fun({When, _Value}) -> + timer:now_diff(Time, When) =< (Seconds * 1000000) + end, + {Keep, Remove} = lists:splitwith(Pred, Samples), + Agg2 = lists:foldl(fun({_, Value}, Acc) -> + rem_value(Value, Acc) + end, Agg, Remove), + Agg2#aggregate{samples=Keep}. + +rem_value(_Value, #aggregate{count=Count, seconds=Secs}) when Count =< 1 -> + #aggregate{seconds=Secs}; +rem_value(Value, Agg) -> + #aggregate{ + count=Count, + sum=Sum, + mean=Mean, + variance=Variance + } = Agg, + + OldMean = (Mean * Count - Value) / (Count - 1), + OldVariance = Variance - (Value - OldMean) * (Value - Mean), + OldCount = Count - 1, + StdDev = case OldCount > 1 of + false -> null; + _ -> math:sqrt(clamp_value(OldVariance / (OldCount - 1))) + end, + Agg#aggregate{ + count=OldCount, + sum=Sum-Value, + mean=clamp_value(OldMean), + variance=clamp_value(OldVariance), + stddev=StdDev + }. + +to_json_term(Agg) -> + {Min, Max} = case Agg#aggregate.seconds > 0 of + false -> + {Agg#aggregate.min, Agg#aggregate.max}; + _ -> + case length(Agg#aggregate.samples) > 0 of + true -> + Extract = fun({_Time, Value}) -> Value end, + Samples = lists:map(Extract, Agg#aggregate.samples), + {lists:min(Samples), lists:max(Samples)}; + _ -> + {null, null} + end + end, + {[ + {description, Agg#aggregate.description}, + {current, round_value(Agg#aggregate.sum)}, + {sum, round_value(Agg#aggregate.sum)}, + {mean, round_value(Agg#aggregate.mean)}, + {stddev, round_value(Agg#aggregate.stddev)}, + {min, Min}, + {max, Max} + ]}. + +make_key({Mod, Val}) when is_integer(Val) -> + {Mod, list_to_atom(integer_to_list(Val))}; +make_key(Key) -> + Key. + +round_value(Val) when not is_number(Val) -> + Val; +round_value(Val) when Val == 0 -> + Val; +round_value(Val) -> + erlang:round(Val * 1000.0) / 1000.0. + +clamp_value(Val) when Val > 0.00000000000001 -> + Val; +clamp_value(_) -> + 0.0. diff --git a/apps/couch/src/couch_stats_collector.erl b/apps/couch/src/couch_stats_collector.erl new file mode 100644 index 00000000..f7b9bb48 --- /dev/null +++ b/apps/couch/src/couch_stats_collector.erl @@ -0,0 +1,136 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +% todo +% - remove existance check on increment(), decrement() and record(). have +% modules initialize counters on startup. + +-module(couch_stats_collector). + +-behaviour(gen_server). + +-export([start/0, stop/0]). +-export([all/0, all/1, get/1, increment/1, decrement/1, record/2, clear/1]). +-export([track_process_count/1, track_process_count/2]). + +-export([init/1, terminate/2, code_change/3]). +-export([handle_call/3, handle_cast/2, handle_info/2]). + +-define(HIT_TABLE, stats_hit_table). +-define(ABS_TABLE, stats_abs_table). + +start() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +stop() -> + gen_server:call(?MODULE, stop). + +all() -> + ets:tab2list(?HIT_TABLE) ++ abs_to_list(). + +all(Type) -> + case Type of + incremental -> ets:tab2list(?HIT_TABLE); + absolute -> abs_to_list() + end. + +get(Key) -> + case ets:lookup(?HIT_TABLE, Key) of + [] -> + case ets:lookup(?ABS_TABLE, Key) of + [] -> + nil; + AbsVals -> + lists:map(fun({_, Value}) -> Value end, AbsVals) + end; + [{_, Counter}] -> + Counter + end. + +increment(Key) -> + Key2 = make_key(Key), + case catch ets:update_counter(?HIT_TABLE, Key2, 1) of + {'EXIT', {badarg, _}} -> + catch ets:insert(?HIT_TABLE, {Key2, 1}), + ok; + _ -> + ok + end. + +decrement(Key) -> + Key2 = make_key(Key), + case catch ets:update_counter(?HIT_TABLE, Key2, -1) of + {'EXIT', {badarg, _}} -> + catch ets:insert(?HIT_TABLE, {Key2, -1}), + ok; + _ -> ok + end. + +record(Key, Value) -> + catch ets:insert(?ABS_TABLE, {make_key(Key), Value}). + +clear(Key) -> + catch ets:delete(?ABS_TABLE, make_key(Key)). + +track_process_count(Stat) -> + track_process_count(self(), Stat). + +track_process_count(Pid, Stat) -> + MonitorFun = fun() -> + Ref = erlang:monitor(process, Pid), + receive {'DOWN', Ref, _, _, _} -> ok end, + couch_stats_collector:decrement(Stat) + end, + case (catch couch_stats_collector:increment(Stat)) of + ok -> spawn(MonitorFun); + _ -> ok + end. + + +init(_) -> + ets:new(?HIT_TABLE, [named_table, set, public]), + ets:new(?ABS_TABLE, [named_table, duplicate_bag, public]), + {ok, nil}. + +terminate(_Reason, _State) -> + ok. + +handle_call(stop, _, State) -> + {stop, normal, stopped, State}. + +handle_cast(foo, State) -> + {noreply, State}. + +handle_info(_Info, State) -> + {noreply, State}. + +code_change(_OldVersion, State, _Extra) -> + {ok, State}. + + +make_key({Module, Key}) when is_integer(Key) -> + {Module, list_to_atom(integer_to_list(Key))}; +make_key(Key) -> + Key. + +abs_to_list() -> + SortedKVs = lists:sort(ets:tab2list(?ABS_TABLE)), + lists:foldl(fun({Key, Val}, Acc) -> + case Acc of + [] -> + [{Key, [Val]}]; + [{Key, Prev} | Rest] -> + [{Key, [Val | Prev]} | Rest]; + Others -> + [{Key, [Val]} | Others] + end + end, [], SortedKVs).
\ No newline at end of file diff --git a/apps/couch/src/couch_stream.erl b/apps/couch/src/couch_stream.erl new file mode 100644 index 00000000..04c17770 --- /dev/null +++ b/apps/couch/src/couch_stream.erl @@ -0,0 +1,319 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_stream). +-behaviour(gen_server). + + +-define(FILE_POINTER_BYTES, 8). +-define(FILE_POINTER_BITS, 8*(?FILE_POINTER_BYTES)). + +-define(STREAM_OFFSET_BYTES, 4). +-define(STREAM_OFFSET_BITS, 8*(?STREAM_OFFSET_BYTES)). + +-define(HUGE_CHUNK, 1000000000). % Huge chuck size when reading all in one go + +-define(DEFAULT_STREAM_CHUNK, 16#00100000). % 1 meg chunks when streaming data + +-export([open/1, open/3, close/1, write/2, foldl/4, foldl/5, foldl_decode/6, + old_foldl/5,old_copy_to_new_stream/4]). +-export([copy_to_new_stream/3,old_read_term/2]). +-export([init/1, terminate/2, handle_call/3]). +-export([handle_cast/2,code_change/3,handle_info/2]). + +-include("couch_db.hrl"). + +-record(stream, + {fd = 0, + written_pointers=[], + buffer_list = [], + buffer_len = 0, + max_buffer = 4096, + written_len = 0, + md5, + % md5 of the content without any transformation applied (e.g. compression) + % needed for the attachment upload integrity check (ticket 558) + identity_md5, + identity_len = 0, + encoding_fun, + end_encoding_fun + }). + + +%%% Interface functions %%% + +open(Fd) -> + open(Fd, identity, []). + +open(Fd, Encoding, Options) -> + gen_server:start_link(couch_stream, {Fd, Encoding, Options}, []). + +close(Pid) -> + gen_server:call(Pid, close, infinity). + +copy_to_new_stream(Fd, PosList, DestFd) -> + {ok, Dest} = open(DestFd), + foldl(Fd, PosList, + fun(Bin, _) -> + ok = write(Dest, Bin) + end, ok), + close(Dest). + + +% 09 UPGRADE CODE +old_copy_to_new_stream(Fd, Pos, Len, DestFd) -> + {ok, Dest} = open(DestFd), + old_foldl(Fd, Pos, Len, + fun(Bin, _) -> + ok = write(Dest, Bin) + end, ok), + close(Dest). + +% 09 UPGRADE CODE +old_foldl(_Fd, null, 0, _Fun, Acc) -> + Acc; +old_foldl(Fd, OldPointer, Len, Fun, Acc) when is_tuple(OldPointer)-> + {ok, Acc2, _} = old_stream_data(Fd, OldPointer, Len, ?DEFAULT_STREAM_CHUNK, Fun, Acc), + Acc2. + +foldl(_Fd, [], _Fun, Acc) -> + Acc; +foldl(Fd, [Pos|Rest], Fun, Acc) -> + {ok, Bin} = couch_file:pread_iolist(Fd, Pos), + foldl(Fd, Rest, Fun, Fun(Bin, Acc)). + +foldl(Fd, PosList, <<>>, Fun, Acc) -> + foldl(Fd, PosList, Fun, Acc); +foldl(Fd, PosList, Md5, Fun, Acc) -> + foldl(Fd, PosList, Md5, couch_util:md5_init(), Fun, Acc). + +foldl_decode(Fd, PosList, Md5, Enc, Fun, Acc) -> + {DecDataFun, DecEndFun} = case Enc of + gzip -> + ungzip_init(); + identity -> + identity_enc_dec_funs() + end, + Result = foldl_decode( + DecDataFun, Fd, PosList, Md5, couch_util:md5_init(), Fun, Acc + ), + DecEndFun(), + Result. + +foldl(_Fd, [], Md5, Md5Acc, _Fun, Acc) -> + Md5 = couch_util:md5_final(Md5Acc), + Acc; +foldl(Fd, [Pos], Md5, Md5Acc, Fun, Acc) -> + {ok, Bin} = couch_file:pread_iolist(Fd, Pos), + Md5 = couch_util:md5_final(couch_util:md5_update(Md5Acc, Bin)), + Fun(Bin, Acc); +foldl(Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) -> + {ok, Bin} = couch_file:pread_iolist(Fd, Pos), + foldl(Fd, Rest, Md5, couch_util:md5_update(Md5Acc, Bin), Fun, Fun(Bin, Acc)). + +foldl_decode(_DecFun, _Fd, [], Md5, Md5Acc, _Fun, Acc) -> + Md5 = couch_util:md5_final(Md5Acc), + Acc; +foldl_decode(DecFun, Fd, [Pos], Md5, Md5Acc, Fun, Acc) -> + {ok, EncBin} = couch_file:pread_iolist(Fd, Pos), + Md5 = couch_util:md5_final(couch_util:md5_update(Md5Acc, EncBin)), + Bin = DecFun(EncBin), + Fun(Bin, Acc); +foldl_decode(DecFun, Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) -> + {ok, EncBin} = couch_file:pread_iolist(Fd, Pos), + Bin = DecFun(EncBin), + Md5Acc2 = couch_util:md5_update(Md5Acc, EncBin), + foldl_decode(DecFun, Fd, Rest, Md5, Md5Acc2, Fun, Fun(Bin, Acc)). + +gzip_init(Options) -> + case couch_util:get_value(compression_level, Options, 0) of + Lvl when Lvl >= 1 andalso Lvl =< 9 -> + Z = zlib:open(), + % 15 = ?MAX_WBITS (defined in the zlib module) + % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1 + ok = zlib:deflateInit(Z, Lvl, deflated, 16 + 15, 8, default), + { + fun(Data) -> + zlib:deflate(Z, Data) + end, + fun() -> + Last = zlib:deflate(Z, [], finish), + ok = zlib:deflateEnd(Z), + ok = zlib:close(Z), + Last + end + }; + _ -> + identity_enc_dec_funs() + end. + +ungzip_init() -> + Z = zlib:open(), + zlib:inflateInit(Z, 16 + 15), + { + fun(Data) -> + zlib:inflate(Z, Data) + end, + fun() -> + ok = zlib:inflateEnd(Z), + ok = zlib:close(Z) + end + }. + +identity_enc_dec_funs() -> + { + fun(Data) -> Data end, + fun() -> [] end + }. + +write(_Pid, <<>>) -> + ok; +write(Pid, Bin) -> + gen_server:call(Pid, {write, Bin}, infinity). + + +init({Fd, Encoding, Options}) -> + {EncodingFun, EndEncodingFun} = case Encoding of + identity -> + identity_enc_dec_funs(); + gzip -> + gzip_init(Options) + end, + {ok, #stream{ + fd=Fd, + md5=couch_util:md5_init(), + identity_md5=couch_util:md5_init(), + encoding_fun=EncodingFun, + end_encoding_fun=EndEncodingFun + } + }. + +terminate(_Reason, _Stream) -> + ok. + +handle_call({write, Bin}, _From, Stream) -> + BinSize = iolist_size(Bin), + #stream{ + fd = Fd, + written_len = WrittenLen, + written_pointers = Written, + buffer_len = BufferLen, + buffer_list = Buffer, + max_buffer = Max, + md5 = Md5, + identity_md5 = IdenMd5, + identity_len = IdenLen, + encoding_fun = EncodingFun} = Stream, + if BinSize + BufferLen > Max -> + WriteBin = lists:reverse(Buffer, [Bin]), + IdenMd5_2 = couch_util:md5_update(IdenMd5, WriteBin), + case EncodingFun(WriteBin) of + [] -> + % case where the encoder did some internal buffering + % (zlib does it for example) + WrittenLen2 = WrittenLen, + Md5_2 = Md5, + Written2 = Written; + WriteBin2 -> + {ok, Pos} = couch_file:append_binary(Fd, WriteBin2), + WrittenLen2 = WrittenLen + iolist_size(WriteBin2), + Md5_2 = couch_util:md5_update(Md5, WriteBin2), + Written2 = [Pos|Written] + end, + + {reply, ok, Stream#stream{ + written_len=WrittenLen2, + written_pointers=Written2, + buffer_list=[], + buffer_len=0, + md5=Md5_2, + identity_md5=IdenMd5_2, + identity_len=IdenLen + BinSize}}; + true -> + {reply, ok, Stream#stream{ + buffer_list=[Bin|Buffer], + buffer_len=BufferLen + BinSize, + identity_len=IdenLen + BinSize}} + end; +handle_call(close, _From, Stream) -> + #stream{ + fd = Fd, + written_len = WrittenLen, + written_pointers = Written, + buffer_list = Buffer, + md5 = Md5, + identity_md5 = IdenMd5, + identity_len = IdenLen, + encoding_fun = EncodingFun, + end_encoding_fun = EndEncodingFun} = Stream, + + WriteBin = lists:reverse(Buffer), + IdenMd5Final = couch_util:md5_final(couch_util:md5_update(IdenMd5, WriteBin)), + WriteBin2 = EncodingFun(WriteBin) ++ EndEncodingFun(), + Md5Final = couch_util:md5_final(couch_util:md5_update(Md5, WriteBin2)), + Result = case WriteBin2 of + [] -> + {lists:reverse(Written), WrittenLen, IdenLen, Md5Final, IdenMd5Final}; + _ -> + {ok, Pos} = couch_file:append_binary(Fd, WriteBin2), + StreamInfo = lists:reverse(Written, [Pos]), + StreamLen = WrittenLen + iolist_size(WriteBin2), + {StreamInfo, StreamLen, IdenLen, Md5Final, IdenMd5Final} + end, + {stop, normal, Result, Stream}. + +handle_cast(_Msg, State) -> + {noreply,State}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +handle_info(_Info, State) -> + {noreply, State}. + + + +% 09 UPGRADE CODE +old_read_term(Fd, Sp) -> + {ok, <<TermLen:(?STREAM_OFFSET_BITS)>>, Sp2} + = old_read(Fd, Sp, ?STREAM_OFFSET_BYTES), + {ok, Bin, _Sp3} = old_read(Fd, Sp2, TermLen), + {ok, binary_to_term(Bin)}. + +old_read(Fd, Sp, Num) -> + {ok, RevBin, Sp2} = old_stream_data(Fd, Sp, Num, ?HUGE_CHUNK, fun(Bin, Acc) -> [Bin | Acc] end, []), + Bin = list_to_binary(lists:reverse(RevBin)), + {ok, Bin, Sp2}. + +% 09 UPGRADE CODE +old_stream_data(_Fd, Sp, 0, _MaxChunk, _Fun, Acc) -> + {ok, Acc, Sp}; +old_stream_data(Fd, {Pos, 0}, Num, MaxChunk, Fun, Acc) -> + {ok, <<NextPos:(?FILE_POINTER_BITS), NextOffset:(?STREAM_OFFSET_BITS)>>} + = couch_file:old_pread(Fd, Pos, ?FILE_POINTER_BYTES + ?STREAM_OFFSET_BYTES), + Sp = {NextPos, NextOffset}, + % Check NextPos is past current Pos (this is always true in a stream) + % Guards against potential infinite loops caused by corruption. + case NextPos > Pos of + true -> ok; + false -> throw({error, stream_corruption}) + end, + old_stream_data(Fd, Sp, Num, MaxChunk, Fun, Acc); +old_stream_data(Fd, {Pos, Offset}, Num, MaxChunk, Fun, Acc) -> + ReadAmount = lists:min([MaxChunk, Num, Offset]), + {ok, Bin} = couch_file:old_pread(Fd, Pos, ReadAmount), + Sp = {Pos + ReadAmount, Offset - ReadAmount}, + old_stream_data(Fd, Sp, Num - ReadAmount, MaxChunk, Fun, Fun(Bin, Acc)). + + +% Tests moved to tests/etap/050-stream.t + diff --git a/apps/couch/src/couch_task_status.erl b/apps/couch/src/couch_task_status.erl new file mode 100644 index 00000000..c4487dc4 --- /dev/null +++ b/apps/couch/src/couch_task_status.erl @@ -0,0 +1,124 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_task_status). +-behaviour(gen_server). + +% This module allows is used to track the status of long running tasks. +% Long running tasks register (add_task/3) then update their status (update/1) +% and the task and status is added to tasks list. When the tracked task dies +% it will be automatically removed the tracking. To get the tasks list, use the +% all/0 function + +-export([start_link/0, stop/0]). +-export([all/0, add_task/3, update/1, update/2, set_update_frequency/1]). + +-export([init/1, terminate/2, code_change/3]). +-export([handle_call/3, handle_cast/2, handle_info/2]). + +-import(couch_util, [to_binary/1]). + +-include("couch_db.hrl"). + + +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + + +stop() -> + gen_server:cast(?MODULE, stop). + + +all() -> + gen_server:call(?MODULE, all). + + +add_task(Type, TaskName, StatusText) -> + put(task_status_update, {{0, 0, 0}, 0}), + Msg = { + add_task, + to_binary(Type), + to_binary(TaskName), + to_binary(StatusText) + }, + gen_server:call(?MODULE, Msg). + + +set_update_frequency(Msecs) -> + put(task_status_update, {{0, 0, 0}, Msecs * 1000}). + + +update(StatusText) -> + update("~s", [StatusText]). + +update(Format, Data) -> + {LastUpdateTime, Frequency} = get(task_status_update), + case timer:now_diff(Now = now(), LastUpdateTime) >= Frequency of + true -> + put(task_status_update, {Now, Frequency}), + Msg = ?l2b(io_lib:format(Format, Data)), + gen_server:cast(?MODULE, {update_status, self(), Msg}); + false -> + ok + end. + + +init([]) -> + % read configuration settings and register for configuration changes + ets:new(?MODULE, [ordered_set, protected, named_table]), + {ok, nil}. + + +terminate(_Reason,_State) -> + ok. + + +handle_call({add_task, Type, TaskName, StatusText}, {From, _}, Server) -> + case ets:lookup(?MODULE, From) of + [] -> + true = ets:insert(?MODULE, {From, {Type, TaskName, StatusText}}), + erlang:monitor(process, From), + {reply, ok, Server}; + [_] -> + {reply, {add_task_error, already_registered}, Server} + end; +handle_call(all, _, Server) -> + All = [ + [ + {type, Type}, + {task, Task}, + {status, Status}, + {pid, ?l2b(pid_to_list(Pid))} + ] + || + {Pid, {Type, Task, Status}} <- ets:tab2list(?MODULE) + ], + {reply, All, Server}. + + +handle_cast({update_status, Pid, StatusText}, Server) -> + [{Pid, {Type, TaskName, _StatusText}}] = ets:lookup(?MODULE, Pid), + ?LOG_DEBUG("New task status for ~s: ~s",[TaskName, StatusText]), + true = ets:insert(?MODULE, {Pid, {Type, TaskName, StatusText}}), + {noreply, Server}; +handle_cast(stop, State) -> + {stop, normal, State}. + +handle_info({'DOWN', _MonitorRef, _Type, Pid, _Info}, Server) -> + %% should we also erlang:demonitor(_MonitorRef), ? + ets:delete(?MODULE, Pid), + {noreply, Server}. + + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + diff --git a/apps/couch/src/couch_util.erl b/apps/couch/src/couch_util.erl new file mode 100644 index 00000000..8217a268 --- /dev/null +++ b/apps/couch/src/couch_util.erl @@ -0,0 +1,454 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_util). + +-export([priv_dir/0, start_driver/1, normpath/1]). +-export([should_flush/0, should_flush/1, to_existing_atom/1]). +-export([rand32/0, implode/2, collate/2, collate/3]). +-export([abs_pathname/1,abs_pathname/2, trim/1, ascii_lower/1]). +-export([encodeBase64Url/1, decodeBase64Url/1]). +-export([to_hex/1, parse_term/1, dict_find/3]). +-export([file_read_size/1, get_nested_json_value/2, json_user_ctx/1]). +-export([proplist_apply_field/2, json_apply_field/2]). +-export([to_binary/1, to_integer/1, to_list/1, url_encode/1]). +-export([json_encode/1, json_decode/1]). +-export([verify/2,simple_call/2,shutdown_sync/1]). +-export([compressible_att_type/1]). +-export([get_value/2, get_value/3]). +-export([md5/1, md5_init/0, md5_update/2, md5_final/1]). +-export([reorder_results/2]). + +-include("couch_db.hrl"). +-include_lib("kernel/include/file.hrl"). + +% arbitrarily chosen amount of memory to use before flushing to disk +-define(FLUSH_MAX_MEM, 10000000). + +priv_dir() -> + case code:priv_dir(couch) of + {error, bad_name} -> + % small hack, in dev mode "app" is couchdb. Fixing requires + % renaming src/couch to src/couch. Not really worth the hassle. + % -Damien + code:priv_dir(couchdb); + Dir -> Dir + end. + +start_driver(LibDir) -> + case erl_ddll:load_driver(LibDir, "couch_icu_driver") of + ok -> + ok; + {error, already_loaded} -> + ok = erl_ddll:reload_driver(LibDir, "couch_icu_driver"); + {error, Error} -> + exit(erl_ddll:format_error(Error)) + end. + +% Normalize a pathname by removing .. and . components. +normpath(Path) -> + normparts(filename:split(Path), []). + +normparts([], Acc) -> + filename:join(lists:reverse(Acc)); +normparts([".." | RestParts], [_Drop | RestAcc]) -> + normparts(RestParts, RestAcc); +normparts(["." | RestParts], Acc) -> + normparts(RestParts, Acc); +normparts([Part | RestParts], Acc) -> + normparts(RestParts, [Part | Acc]). + +% works like list_to_existing_atom, except can be list or binary and it +% gives you the original value instead of an error if no existing atom. +to_existing_atom(V) when is_list(V) -> + try list_to_existing_atom(V) catch _:_ -> V end; +to_existing_atom(V) when is_binary(V) -> + try list_to_existing_atom(?b2l(V)) catch _:_ -> V end; +to_existing_atom(V) when is_atom(V) -> + V. + +shutdown_sync(Pid) when not is_pid(Pid)-> + ok; +shutdown_sync(Pid) -> + MRef = erlang:monitor(process, Pid), + try + catch unlink(Pid), + catch exit(Pid, shutdown), + receive + {'DOWN', MRef, _, _, _} -> + ok + end + after + erlang:demonitor(MRef, [flush]) + end. + + +simple_call(Pid, Message) -> + MRef = erlang:monitor(process, Pid), + try + Pid ! {self(), Message}, + receive + {Pid, Result} -> + Result; + {'DOWN', MRef, _, _, Reason} -> + exit(Reason) + end + after + erlang:demonitor(MRef, [flush]) + end. + +to_hex([]) -> + []; +to_hex(Bin) when is_binary(Bin) -> + to_hex(binary_to_list(Bin)); +to_hex([H|T]) -> + [to_digit(H div 16), to_digit(H rem 16) | to_hex(T)]. + +to_digit(N) when N < 10 -> $0 + N; +to_digit(N) -> $a + N-10. + + +parse_term(Bin) when is_binary(Bin) -> + parse_term(binary_to_list(Bin)); +parse_term(List) -> + {ok, Tokens, _} = erl_scan:string(List ++ "."), + erl_parse:parse_term(Tokens). + +get_value(Key, List) -> + get_value(Key, List, undefined). + +get_value(Key, List, Default) -> + case lists:keysearch(Key, 1, List) of + {value, {Key,Value}} -> + Value; + false -> + Default + end. + +get_nested_json_value({Props}, [Key|Keys]) -> + case couch_util:get_value(Key, Props, nil) of + nil -> throw({not_found, <<"missing json key: ", Key/binary>>}); + Value -> get_nested_json_value(Value, Keys) + end; +get_nested_json_value(Value, []) -> + Value; +get_nested_json_value(_NotJSONObj, _) -> + throw({not_found, json_mismatch}). + +proplist_apply_field(H, L) -> + {R} = json_apply_field(H, {L}), + R. + +json_apply_field(H, {L}) -> + json_apply_field(H, L, []). +json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) -> + json_apply_field({Key, NewValue}, Headers, Acc); +json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) -> + json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]); +json_apply_field({Key, NewValue}, [], Acc) -> + {[{Key, NewValue}|Acc]}. + +json_user_ctx(#db{name=DbName, user_ctx=Ctx}) -> + {[{<<"db">>, DbName}, + {<<"name">>,Ctx#user_ctx.name}, + {<<"roles">>,Ctx#user_ctx.roles}]}. + + +% returns a random integer +rand32() -> + crypto:rand_uniform(0, 16#100000000). + +% given a pathname "../foo/bar/" it gives back the fully qualified +% absolute pathname. +abs_pathname(" " ++ Filename) -> + % strip leading whitspace + abs_pathname(Filename); +abs_pathname([$/ |_]=Filename) -> + Filename; +abs_pathname(Filename) -> + {ok, Cwd} = file:get_cwd(), + {Filename2, Args} = separate_cmd_args(Filename, ""), + abs_pathname(Filename2, Cwd) ++ Args. + +abs_pathname(Filename, Dir) -> + Name = filename:absname(Filename, Dir ++ "/"), + OutFilename = filename:join(fix_path_list(filename:split(Name), [])), + % If the filename is a dir (last char slash, put back end slash + case string:right(Filename,1) of + "/" -> + OutFilename ++ "/"; + "\\" -> + OutFilename ++ "/"; + _Else-> + OutFilename + end. + +% if this as an executable with arguments, seperate out the arguments +% ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"} +separate_cmd_args("", CmdAcc) -> + {lists:reverse(CmdAcc), ""}; +separate_cmd_args("\\ " ++ Rest, CmdAcc) -> % handle skipped value + separate_cmd_args(Rest, " \\" ++ CmdAcc); +separate_cmd_args(" " ++ Rest, CmdAcc) -> + {lists:reverse(CmdAcc), " " ++ Rest}; +separate_cmd_args([Char|Rest], CmdAcc) -> + separate_cmd_args(Rest, [Char | CmdAcc]). + +% lowercases string bytes that are the ascii characters A-Z. +% All other characters/bytes are ignored. +ascii_lower(String) -> + ascii_lower(String, []). + +ascii_lower([], Acc) -> + lists:reverse(Acc); +ascii_lower([Char | RestString], Acc) when Char >= $A, Char =< $B -> + ascii_lower(RestString, [Char + ($a-$A) | Acc]); +ascii_lower([Char | RestString], Acc) -> + ascii_lower(RestString, [Char | Acc]). + +% Is a character whitespace? +is_whitespace($\s) -> true; +is_whitespace($\t) -> true; +is_whitespace($\n) -> true; +is_whitespace($\r) -> true; +is_whitespace(_Else) -> false. + + +% removes leading and trailing whitespace from a string +trim(String) -> + String2 = lists:dropwhile(fun is_whitespace/1, String), + lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))). + +% takes a heirarchical list of dirs and removes the dots ".", double dots +% ".." and the corresponding parent dirs. +fix_path_list([], Acc) -> + lists:reverse(Acc); +fix_path_list([".."|Rest], [_PrevAcc|RestAcc]) -> + fix_path_list(Rest, RestAcc); +fix_path_list(["."|Rest], Acc) -> + fix_path_list(Rest, Acc); +fix_path_list([Dir | Rest], Acc) -> + fix_path_list(Rest, [Dir | Acc]). + + +implode(List, Sep) -> + implode(List, Sep, []). + +implode([], _Sep, Acc) -> + lists:flatten(lists:reverse(Acc)); +implode([H], Sep, Acc) -> + implode([], Sep, [H|Acc]); +implode([H|T], Sep, Acc) -> + implode(T, Sep, [Sep,H|Acc]). + + +drv_port() -> + case get(couch_drv_port) of + undefined -> + Port = open_port({spawn, "couch_icu_driver"}, []), + put(couch_drv_port, Port), + Port; + Port -> + Port + end. + +collate(A, B) -> + collate(A, B, []). + +collate(A, B, Options) when is_binary(A), is_binary(B) -> + Operation = + case lists:member(nocase, Options) of + true -> 1; % Case insensitive + false -> 0 % Case sensitive + end, + SizeA = byte_size(A), + SizeB = byte_size(B), + Bin = <<SizeA:32/native, A/binary, SizeB:32/native, B/binary>>, + [Result] = erlang:port_control(drv_port(), Operation, Bin), + % Result is 0 for lt, 1 for eq and 2 for gt. Subtract 1 to return the + % expected typical -1, 0, 1 + Result - 1. + +should_flush() -> + should_flush(?FLUSH_MAX_MEM). + +should_flush(MemThreshHold) -> + {memory, ProcMem} = process_info(self(), memory), + BinMem = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end, + 0, element(2,process_info(self(), binary))), + if ProcMem+BinMem > 2*MemThreshHold -> + garbage_collect(), + {memory, ProcMem2} = process_info(self(), memory), + BinMem2 = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end, + 0, element(2,process_info(self(), binary))), + ProcMem2+BinMem2 > MemThreshHold; + true -> false end. + +encodeBase64Url(Url) -> + Url1 = iolist_to_binary(re:replace(base64:encode(Url), "=+$", "")), + Url2 = iolist_to_binary(re:replace(Url1, "/", "_", [global])), + iolist_to_binary(re:replace(Url2, "\\+", "-", [global])). + +decodeBase64Url(Url64) -> + Url1 = re:replace(iolist_to_binary(Url64), "-", "+", [global]), + Url2 = iolist_to_binary( + re:replace(iolist_to_binary(Url1), "_", "/", [global]) + ), + Padding = ?l2b(lists:duplicate((4 - size(Url2) rem 4) rem 4, $=)), + base64:decode(<<Url2/binary, Padding/binary>>). + +dict_find(Key, Dict, DefaultValue) -> + case dict:find(Key, Dict) of + {ok, Value} -> + Value; + error -> + DefaultValue + end. + + +file_read_size(FileName) -> + case file:read_file_info(FileName) of + {ok, FileInfo} -> + FileInfo#file_info.size; + Error -> Error + end. + +to_binary(V) when is_binary(V) -> + V; +to_binary(V) when is_list(V) -> + try + list_to_binary(V) + catch + _ -> + list_to_binary(io_lib:format("~p", [V])) + end; +to_binary(V) when is_atom(V) -> + list_to_binary(atom_to_list(V)); +to_binary(V) -> + list_to_binary(io_lib:format("~p", [V])). + +to_integer(V) when is_integer(V) -> + V; +to_integer(V) when is_list(V) -> + erlang:list_to_integer(V); +to_integer(V) when is_binary(V) -> + erlang:list_to_integer(binary_to_list(V)). + +to_list(V) when is_list(V) -> + V; +to_list(V) when is_binary(V) -> + binary_to_list(V); +to_list(V) when is_atom(V) -> + atom_to_list(V); +to_list(V) -> + lists:flatten(io_lib:format("~p", [V])). + +url_encode(Bin) when is_binary(Bin) -> + url_encode(binary_to_list(Bin)); +url_encode([H|T]) -> + if + H >= $a, $z >= H -> + [H|url_encode(T)]; + H >= $A, $Z >= H -> + [H|url_encode(T)]; + H >= $0, $9 >= H -> + [H|url_encode(T)]; + H == $_; H == $.; H == $-; H == $: -> + [H|url_encode(T)]; + true -> + case lists:flatten(io_lib:format("~.16.0B", [H])) of + [X, Y] -> + [$%, X, Y | url_encode(T)]; + [X] -> + [$%, $0, X | url_encode(T)] + end + end; +url_encode([]) -> + []. + +json_encode(V) -> + Handler = + fun({L}) when is_list(L) -> + {struct,L}; + (Bad) -> + exit({json_encode, {bad_term, Bad}}) + end, + (mochijson2:encoder([{handler, Handler}]))(V). + +json_decode(V) -> + try (mochijson2:decoder([{object_hook, fun({struct,L}) -> {L} end}]))(V) + catch + _Type:_Error -> + throw({invalid_json,V}) + end. + +verify([X|RestX], [Y|RestY], Result) -> + verify(RestX, RestY, (X bxor Y) bor Result); +verify([], [], Result) -> + Result == 0. + +verify(<<X/binary>>, <<Y/binary>>) -> + verify(?b2l(X), ?b2l(Y)); +verify(X, Y) when is_list(X) and is_list(Y) -> + case length(X) == length(Y) of + true -> + verify(X, Y, 0); + false -> + false + end; +verify(_X, _Y) -> false. + +compressible_att_type(MimeType) when is_binary(MimeType) -> + compressible_att_type(?b2l(MimeType)); +compressible_att_type(MimeType) -> + TypeExpList = re:split( + couch_config:get("attachments", "compressible_types", ""), + ", ?", + [{return, list}] + ), + lists:any( + fun(TypeExp) -> + Regexp = "^\\s*" ++ + re:replace(TypeExp, "\\*", ".*", [{return, list}]) ++ "\\s*$", + case re:run(MimeType, Regexp, [caseless]) of + {match, _} -> + true; + _ -> + false + end + end, + [T || T <- TypeExpList, T /= []] + ). + +-spec md5(Data::(iolist() | binary())) -> Digest::binary(). +md5(Data) -> + try crypto:md5(Data) catch error:_ -> erlang:md5(Data) end. + +-spec md5_init() -> Context::binary(). +md5_init() -> + try crypto:md5_init() catch error:_ -> erlang:md5_init() end. + +-spec md5_update(Context::binary(), Data::(iolist() | binary())) -> + NewContext::binary(). +md5_update(Ctx, D) -> + try crypto:md5_update(Ctx,D) catch error:_ -> erlang:md5_update(Ctx,D) end. + +-spec md5_final(Context::binary()) -> Digest::binary(). +md5_final(Ctx) -> + try crypto:md5_final(Ctx) catch error:_ -> erlang:md5_final(Ctx) end. + +% linear search is faster for small lists, length() is 0.5 ms for 100k list +reorder_results(Keys, SortedResults) when length(Keys) < 100 -> + [couch_util:get_value(Key, SortedResults) || Key <- Keys]; +reorder_results(Keys, SortedResults) -> + KeyDict = dict:from_list(SortedResults), + [dict:fetch(Key, KeyDict) || Key <- Keys]. diff --git a/apps/couch/src/couch_uuids.erl b/apps/couch/src/couch_uuids.erl new file mode 100644 index 00000000..e1851e1d --- /dev/null +++ b/apps/couch/src/couch_uuids.erl @@ -0,0 +1,95 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. +-module(couch_uuids). +-include("couch_db.hrl"). + +-behaviour(gen_server). + +-export([start/0, stop/0]). +-export([new/0, random/0, utc_random/0]). + +-export([init/1, terminate/2, code_change/3]). +-export([handle_call/3, handle_cast/2, handle_info/2]). + +start() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +stop() -> + gen_server:cast(?MODULE, stop). + +new() -> + gen_server:call(?MODULE, create). + +random() -> + list_to_binary(couch_util:to_hex(crypto:rand_bytes(16))). + +utc_random() -> + Now = {_, _, Micro} = now(), + Nowish = calendar:now_to_universal_time(Now), + Nowsecs = calendar:datetime_to_gregorian_seconds(Nowish), + Then = calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}), + Prefix = io_lib:format("~14.16.0b", [(Nowsecs - Then) * 1000000 + Micro]), + list_to_binary(Prefix ++ couch_util:to_hex(crypto:rand_bytes(9))). + +init([]) -> + ok = couch_config:register( + fun("uuids", _) -> gen_server:cast(?MODULE, change) end + ), + {ok, state()}. + +terminate(_Reason, _State) -> + ok. + +handle_call(create, _From, random) -> + {reply, random(), random}; +handle_call(create, _From, utc_random) -> + {reply, utc_random(), utc_random}; +handle_call(create, _From, {sequential, Pref, Seq}) -> + Result = ?l2b(Pref ++ io_lib:format("~6.16.0b", [Seq])), + case Seq >= 16#fff000 of + true -> + {reply, Result, {sequential, new_prefix(), inc()}}; + _ -> + {reply, Result, {sequential, Pref, Seq + inc()}} + end. + +handle_cast(change, _State) -> + {noreply, state()}; +handle_cast(stop, State) -> + {stop, normal, State}; +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info(_Info, State) -> + {noreply, State}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +new_prefix() -> + couch_util:to_hex((crypto:rand_bytes(13))). + +inc() -> + crypto:rand_uniform(1, 16#ffe). + +state() -> + AlgoStr = couch_config:get("uuids", "algorithm", "random"), + case couch_util:to_existing_atom(AlgoStr) of + random -> + random; + utc_random -> + utc_random; + sequential -> + {sequential, new_prefix(), inc()}; + Unknown -> + throw({unknown_uuid_algorithm, Unknown}) + end. diff --git a/apps/couch/src/couch_view.erl b/apps/couch/src/couch_view.erl new file mode 100644 index 00000000..38c0a783 --- /dev/null +++ b/apps/couch/src/couch_view.erl @@ -0,0 +1,438 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_view). +-behaviour(gen_server). + +-export([start_link/0,fold/4,less_json/2,less_json_ids/2,expand_dups/2, + detuple_kvs/2,init/1,terminate/2,handle_call/3,handle_cast/2,handle_info/2, + code_change/3,get_reduce_view/4,get_temp_reduce_view/5,get_temp_map_view/4, + get_map_view/4,get_row_count/1,reduce_to_count/1,fold_reduce/4, + extract_map_view/1,get_group_server/2,get_group_info/2,cleanup_index_files/1]). + +-include("couch_db.hrl"). + + +-record(server,{ + root_dir = []}). + +start_link() -> + gen_server:start_link({local, couch_view}, couch_view, [], []). + +get_temp_updater(DbName, Language, DesignOptions, MapSrc, RedSrc) -> + % make temp group + % do we need to close this db? + {ok, _Db, Group} = + couch_view_group:open_temp_group(DbName, Language, DesignOptions, MapSrc, RedSrc), + case gen_server:call(couch_view, {get_group_server, DbName, Group}) of + {ok, Pid} -> + Pid; + Error -> + throw(Error) + end. + +get_group_server(DbName, GroupId) -> + % get signature for group + case couch_view_group:open_db_group(DbName, GroupId) of + % do we need to close this db? + {ok, _Db, Group} -> + case gen_server:call(couch_view, {get_group_server, DbName, Group}) of + {ok, Pid} -> + Pid; + Error -> + throw(Error) + end; + Error -> + throw(Error) + end. + +get_group(Db, GroupId, Stale) -> + MinUpdateSeq = case Stale of + ok -> 0; + _Else -> couch_db:get_update_seq(Db) + end, + couch_view_group:request_group( + get_group_server(couch_db:name(Db), GroupId), + MinUpdateSeq). + +get_temp_group(Db, Language, DesignOptions, MapSrc, RedSrc) -> + couch_view_group:request_group( + get_temp_updater(couch_db:name(Db), Language, DesignOptions, MapSrc, RedSrc), + couch_db:get_update_seq(Db)). + +get_group_info(Db, GroupId) -> + couch_view_group:request_group_info( + get_group_server(couch_db:name(Db), GroupId)). + +cleanup_index_files(Db) -> + % load all ddocs + {ok, DesignDocs} = couch_db:get_design_docs(Db), + + % make unique list of group sigs + Sigs = lists:map(fun(#doc{id = GroupId}) -> + {ok, Info} = get_group_info(Db, GroupId), + ?b2l(couch_util:get_value(signature, Info)) + end, [DD||DD <- DesignDocs, DD#doc.deleted == false]), + + FileList = list_index_files(Db), + + % regex that matches all ddocs + RegExp = "("++ string:join(Sigs, "|") ++")", + + % filter out the ones in use + DeleteFiles = [FilePath + || FilePath <- FileList, + re:run(FilePath, RegExp, [{capture, none}]) =:= nomatch], + % delete unused files + ?LOG_DEBUG("deleting unused view index files: ~p",[DeleteFiles]), + RootDir = couch_config:get("couchdb", "view_index_dir"), + [couch_file:delete(RootDir,File,false)||File <- DeleteFiles], + ok. + +list_index_files(Db) -> + % call server to fetch the index files + RootDir = couch_config:get("couchdb", "view_index_dir"), + filelib:wildcard(RootDir ++ "/." ++ ?b2l(couch_db:name(Db)) ++ "_design"++"/*"). + + +get_row_count(#view{btree=Bt}) -> + {ok, {Count, _Reds}} = couch_btree:full_reduce(Bt), + {ok, Count}. + +get_temp_reduce_view(Db, Language, DesignOptions, MapSrc, RedSrc) -> + {ok, #group{views=[View]}=Group} = + get_temp_group(Db, Language, DesignOptions, MapSrc, RedSrc), + {ok, {temp_reduce, View}, Group}. + + +get_reduce_view(Db, GroupId, Name, Update) -> + case get_group(Db, GroupId, Update) of + {ok, #group{views=Views,def_lang=Lang}=Group} -> + case get_reduce_view0(Name, Lang, Views) of + {ok, View} -> + {ok, View, Group}; + Else -> + Else + end; + Error -> + Error + end. + +get_reduce_view0(_Name, _Lang, []) -> + {not_found, missing_named_view}; +get_reduce_view0(Name, Lang, [#view{reduce_funs=RedFuns}=View|Rest]) -> + case get_key_pos(Name, RedFuns, 0) of + 0 -> get_reduce_view0(Name, Lang, Rest); + N -> {ok, {reduce, N, Lang, View}} + end. + +extract_map_view({reduce, _N, _Lang, View}) -> + View. + +detuple_kvs([], Acc) -> + lists:reverse(Acc); +detuple_kvs([KV | Rest], Acc) -> + {{Key,Id},Value} = KV, + NKV = [[Key, Id], Value], + detuple_kvs(Rest, [NKV | Acc]). + +expand_dups([], Acc) -> + lists:reverse(Acc); +expand_dups([{Key, {dups, Vals}} | Rest], Acc) -> + Expanded = [{Key, Val} || Val <- Vals], + expand_dups(Rest, Expanded ++ Acc); +expand_dups([KV | Rest], Acc) -> + expand_dups(Rest, [KV | Acc]). + +fold_reduce({temp_reduce, #view{btree=Bt}}, Fun, Acc, Options) -> + WrapperFun = fun({GroupedKey, _}, PartialReds, Acc0) -> + {_, [Red]} = couch_btree:final_reduce(Bt, PartialReds), + Fun(GroupedKey, Red, Acc0) + end, + couch_btree:fold_reduce(Bt, WrapperFun, Acc, Options); + +fold_reduce({reduce, NthRed, Lang, #view{btree=Bt, reduce_funs=RedFuns}}, Fun, Acc, Options) -> + PreResultPadding = lists:duplicate(NthRed - 1, []), + PostResultPadding = lists:duplicate(length(RedFuns) - NthRed, []), + {_Name, FunSrc} = lists:nth(NthRed,RedFuns), + ReduceFun = + fun(reduce, KVs) -> + {ok, Reduced} = couch_query_servers:reduce(Lang, [FunSrc], detuple_kvs(expand_dups(KVs, []),[])), + {0, PreResultPadding ++ Reduced ++ PostResultPadding}; + (rereduce, Reds) -> + UserReds = [[lists:nth(NthRed, UserRedsList)] || {_, UserRedsList} <- Reds], + {ok, Reduced} = couch_query_servers:rereduce(Lang, [FunSrc], UserReds), + {0, PreResultPadding ++ Reduced ++ PostResultPadding} + end, + WrapperFun = fun({GroupedKey, _}, PartialReds, Acc0) -> + {_, Reds} = couch_btree:final_reduce(ReduceFun, PartialReds), + Fun(GroupedKey, lists:nth(NthRed, Reds), Acc0) + end, + couch_btree:fold_reduce(Bt, WrapperFun, Acc, Options). + +get_key_pos(_Key, [], _N) -> + 0; +get_key_pos(Key, [{Key1,_Value}|_], N) when Key == Key1 -> + N + 1; +get_key_pos(Key, [_|Rest], N) -> + get_key_pos(Key, Rest, N+1). + + +get_temp_map_view(Db, Language, DesignOptions, Src) -> + {ok, #group{views=[View]}=Group} = get_temp_group(Db, Language, DesignOptions, Src, []), + {ok, View, Group}. + +get_map_view(Db, GroupId, Name, Stale) -> + case get_group(Db, GroupId, Stale) of + {ok, #group{views=Views}=Group} -> + case get_map_view0(Name, Views) of + {ok, View} -> + {ok, View, Group}; + Else -> + Else + end; + Error -> + Error + end. + +get_map_view0(_Name, []) -> + {not_found, missing_named_view}; +get_map_view0(Name, [#view{map_names=MapNames}=View|Rest]) -> + case lists:member(Name, MapNames) of + true -> {ok, View}; + false -> get_map_view0(Name, Rest) + end. + +reduce_to_count(Reductions) -> + {Count, _} = + couch_btree:final_reduce( + fun(reduce, KVs) -> + Count = lists:sum( + [case V of {dups, Vals} -> length(Vals); _ -> 1 end + || {_,V} <- KVs]), + {Count, []}; + (rereduce, Reds) -> + {lists:sum([Count0 || {Count0, _} <- Reds]), []} + end, Reductions), + Count. + + + +fold_fun(_Fun, [], _, Acc) -> + {ok, Acc}; +fold_fun(Fun, [KV|Rest], {KVReds, Reds}, Acc) -> + case Fun(KV, {KVReds, Reds}, Acc) of + {ok, Acc2} -> + fold_fun(Fun, Rest, {[KV|KVReds], Reds}, Acc2); + {stop, Acc2} -> + {stop, Acc2} + end. + + +fold(#view{btree=Btree}, Fun, Acc, Options) -> + WrapperFun = + fun(KV, Reds, Acc2) -> + fold_fun(Fun, expand_dups([KV],[]), Reds, Acc2) + end, + {ok, _LastReduce, _AccResult} = couch_btree:fold(Btree, WrapperFun, Acc, Options). + + +init([]) -> + % read configuration settings and register for configuration changes + RootDir = couch_config:get("couchdb", "view_index_dir"), + Self = self(), + ok = couch_config:register( + fun("couchdb", "view_index_dir")-> + exit(Self, config_change) + end), + + couch_db_update_notifier:start_link( + fun({deleted, DbName}) -> + gen_server:cast(couch_view, {reset_indexes, DbName}); + ({created, DbName}) -> + gen_server:cast(couch_view, {reset_indexes, DbName}); + (_Else) -> + ok + end), + ets:new(couch_groups_by_db, [bag, private, named_table]), + ets:new(group_servers_by_sig, [set, protected, named_table]), + ets:new(couch_groups_by_updater, [set, private, named_table]), + process_flag(trap_exit, true), + ok = couch_file:init_delete_dir(RootDir), + {ok, #server{root_dir=RootDir}}. + + +terminate(_Reason, _Srv) -> + [couch_util:shutdown_sync(Pid) || {Pid, _} <- + ets:tab2list(couch_groups_by_updater)], + ok. + + +handle_call({get_group_server, DbName, + #group{name=GroupId,sig=Sig}=Group}, _From, #server{root_dir=Root}=Server) -> + case ets:lookup(group_servers_by_sig, {DbName, Sig}) of + [] -> + ?LOG_DEBUG("Spawning new group server for view group ~s in database ~s.", + [GroupId, DbName]), + case (catch couch_view_group:start_link({Root, DbName, Group})) of + {ok, NewPid} -> + add_to_ets(NewPid, DbName, Sig), + {reply, {ok, NewPid}, Server}; + {error, invalid_view_seq} -> + do_reset_indexes(DbName, Root), + case (catch couch_view_group:start_link({Root, DbName, Group})) of + {ok, NewPid} -> + add_to_ets(NewPid, DbName, Sig), + {reply, {ok, NewPid}, Server}; + Error -> + {reply, Error, Server} + end; + Error -> + {reply, Error, Server} + end; + [{_, ExistingPid}] -> + {reply, {ok, ExistingPid}, Server} + end. + +handle_cast({reset_indexes, DbName}, #server{root_dir=Root}=Server) -> + do_reset_indexes(DbName, Root), + {noreply, Server}. + +do_reset_indexes(DbName, Root) -> + % shutdown all the updaters and clear the files, the db got changed + Names = ets:lookup(couch_groups_by_db, DbName), + lists:foreach( + fun({_DbName, Sig}) -> + ?LOG_DEBUG("Killing update process for view group ~s. in database ~s.", [Sig, DbName]), + [{_, Pid}] = ets:lookup(group_servers_by_sig, {DbName, Sig}), + couch_util:shutdown_sync(Pid), + delete_from_ets(Pid, DbName, Sig) + end, Names), + delete_index_dir(Root, DbName), + RootDelDir = couch_config:get("couchdb", "view_index_dir"), + couch_file:delete(RootDelDir, Root ++ "/." ++ ?b2l(DbName) ++ "_temp"). + +handle_info({'EXIT', FromPid, Reason}, Server) -> + case ets:lookup(couch_groups_by_updater, FromPid) of + [] -> + if Reason /= normal -> + % non-updater linked process died, we propagate the error + ?LOG_ERROR("Exit on non-updater process: ~p", [Reason]), + exit(Reason); + true -> ok + end; + [{_, {DbName, GroupId}}] -> + delete_from_ets(FromPid, DbName, GroupId) + end, + {noreply, Server}. + +add_to_ets(Pid, DbName, Sig) -> + true = ets:insert(couch_groups_by_updater, {Pid, {DbName, Sig}}), + true = ets:insert(group_servers_by_sig, {{DbName, Sig}, Pid}), + true = ets:insert(couch_groups_by_db, {DbName, Sig}). + +delete_from_ets(Pid, DbName, Sig) -> + true = ets:delete(couch_groups_by_updater, Pid), + true = ets:delete(group_servers_by_sig, {DbName, Sig}), + true = ets:delete_object(couch_groups_by_db, {DbName, Sig}). + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + + +delete_index_dir(RootDir, DbName) -> + nuke_dir(RootDir, RootDir ++ "/." ++ ?b2l(DbName) ++ "_design"). + +nuke_dir(RootDelDir, Dir) -> + case file:list_dir(Dir) of + {error, enoent} -> ok; % doesn't exist + {ok, Files} -> + lists:foreach( + fun(File)-> + Full = Dir ++ "/" ++ File, + case couch_file:delete(RootDelDir, Full, false) of + ok -> ok; + {error, eperm} -> + ok = nuke_dir(RootDelDir, Full) + end + end, + Files), + ok = file:del_dir(Dir) + end. + + +% keys come back in the language of btree - tuples. +less_json_ids({JsonA, IdA}, {JsonB, IdB}) -> + case less_json0(JsonA, JsonB) of + 0 -> + IdA < IdB; + Result -> + Result < 0 + end. + +less_json(A,B) -> + less_json0(A,B) < 0. + +less_json0(A,A) -> 0; + +less_json0(A,B) when is_atom(A), is_atom(B) -> atom_sort(A) - atom_sort(B); +less_json0(A,_) when is_atom(A) -> -1; +less_json0(_,B) when is_atom(B) -> 1; + +less_json0(A,B) when is_number(A), is_number(B) -> A - B; +less_json0(A,_) when is_number(A) -> -1; +less_json0(_,B) when is_number(B) -> 1; + +less_json0(A,B) when is_binary(A), is_binary(B) -> couch_util:collate(A,B); +less_json0(A,_) when is_binary(A) -> -1; +less_json0(_,B) when is_binary(B) -> 1; + +less_json0(A,B) when is_list(A), is_list(B) -> less_list(A,B); +less_json0(A,_) when is_list(A) -> -1; +less_json0(_,B) when is_list(B) -> 1; + +less_json0({A},{B}) when is_list(A), is_list(B) -> less_props(A,B); +less_json0({A},_) when is_list(A) -> -1; +less_json0(_,{B}) when is_list(B) -> 1. + +atom_sort(null) -> 1; +atom_sort(false) -> 2; +atom_sort(true) -> 3. + +less_props([], [_|_]) -> + -1; +less_props(_, []) -> + 1; +less_props([{AKey, AValue}|RestA], [{BKey, BValue}|RestB]) -> + case couch_util:collate(AKey, BKey) of + 0 -> + case less_json0(AValue, BValue) of + 0 -> + less_props(RestA, RestB); + Result -> + Result + end; + Result -> + Result + end. + +less_list([], [_|_]) -> + -1; +less_list(_, []) -> + 1; +less_list([A|RestA], [B|RestB]) -> + case less_json0(A,B) of + 0 -> + less_list(RestA, RestB); + Result -> + Result + end. diff --git a/apps/couch/src/couch_view_compactor.erl b/apps/couch/src/couch_view_compactor.erl new file mode 100644 index 00000000..895556bf --- /dev/null +++ b/apps/couch/src/couch_view_compactor.erl @@ -0,0 +1,98 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_view_compactor). + +-include ("couch_db.hrl"). + +-export([start_compact/2]). + +%% @spec start_compact(DbName::binary(), GroupId:binary()) -> ok +%% @doc Compacts the views. GroupId must not include the _design/ prefix +start_compact(DbName, GroupId) -> + Pid = couch_view:get_group_server(DbName, <<"_design/",GroupId/binary>>), + gen_server:cast(Pid, {start_compact, fun compact_group/2}). + +%%============================================================================= +%% internal functions +%%============================================================================= + +%% @spec compact_group(Group, NewGroup) -> ok +compact_group(Group, EmptyGroup) -> + #group{ + current_seq = Seq, + id_btree = IdBtree, + name = GroupId, + views = Views + } = Group, + + #group{ + db = Db, + id_btree = EmptyIdBtree, + views = EmptyViews + } = EmptyGroup, + + {ok, {Count, _}} = couch_btree:full_reduce(Db#db.fulldocinfo_by_id_btree), + + <<"_design", ShortName/binary>> = GroupId, + DbName = couch_db:name(Db), + TaskName = <<DbName/binary, ShortName/binary>>, + couch_task_status:add_task(<<"View Group Compaction">>, TaskName, <<"">>), + + Fun = fun(KV, {Bt, Acc, TotalCopied}) -> + if TotalCopied rem 10000 =:= 0 -> + couch_task_status:update("Copied ~p of ~p Ids (~p%)", + [TotalCopied, Count, (TotalCopied*100) div Count]), + {ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV|Acc])), + {ok, {Bt2, [], TotalCopied+1}}; + true -> + {ok, {Bt, [KV|Acc], TotalCopied+1}} + end + end, + {ok, _, {Bt3, Uncopied, _Total}} = couch_btree:foldl(IdBtree, Fun, + {EmptyIdBtree, [], 0}), + {ok, NewIdBtree} = couch_btree:add(Bt3, lists:reverse(Uncopied)), + + NewViews = lists:map(fun({View, EmptyView}) -> + compact_view(View, EmptyView) + end, lists:zip(Views, EmptyViews)), + + NewGroup = EmptyGroup#group{ + id_btree=NewIdBtree, + views=NewViews, + current_seq=Seq + }, + + Pid = couch_view:get_group_server(DbName, GroupId), + gen_server:cast(Pid, {compact_done, NewGroup}). + +%% @spec compact_view(View, EmptyView, Retry) -> CompactView +compact_view(View, EmptyView) -> + {ok, Count} = couch_view:get_row_count(View), + + %% Key is {Key,DocId} + Fun = fun(KV, {Bt, Acc, TotalCopied}) -> + if TotalCopied rem 10000 =:= 0 -> + couch_task_status:update("View #~p: copied ~p of ~p KVs (~p%)", + [View#view.id_num, TotalCopied, Count, (TotalCopied*100) div Count]), + {ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV|Acc])), + {ok, {Bt2, [], TotalCopied + 1}}; + true -> + {ok, {Bt, [KV|Acc], TotalCopied + 1}} + end + end, + + {ok, _, {Bt3, Uncopied, _Total}} = couch_btree:foldl(View#view.btree, Fun, + {EmptyView#view.btree, [], 0}), + {ok, NewBt} = couch_btree:add(Bt3, lists:reverse(Uncopied)), + EmptyView#view{btree = NewBt}. + diff --git a/apps/couch/src/couch_view_group.erl b/apps/couch/src/couch_view_group.erl new file mode 100644 index 00000000..f01befdf --- /dev/null +++ b/apps/couch/src/couch_view_group.erl @@ -0,0 +1,592 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_view_group). +-behaviour(gen_server). + +%% API +-export([start_link/1, request_group/2, request_group_info/1]). +-export([open_db_group/2, open_temp_group/5, design_doc_to_view_group/1,design_root/2]). + +%% gen_server callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +-include("couch_db.hrl"). + +-record(group_state, { + type, + db_name, + init_args, + group, + updater_pid=nil, + compactor_pid=nil, + waiting_commit=false, + waiting_list=[], + ref_counter=nil +}). + +% api methods +request_group(Pid, Seq) -> + ?LOG_DEBUG("request_group {Pid, Seq} ~p", [{Pid, Seq}]), + case gen_server:call(Pid, {request_group, Seq}, infinity) of + {ok, Group, RefCounter} -> + couch_ref_counter:add(RefCounter), + {ok, Group}; + Error -> + ?LOG_DEBUG("request_group Error ~p", [Error]), + throw(Error) + end. + +request_group_info(Pid) -> + case gen_server:call(Pid, request_group_info) of + {ok, GroupInfoList} -> + {ok, GroupInfoList}; + Error -> + throw(Error) + end. + +% from template +start_link(InitArgs) -> + case gen_server:start_link(couch_view_group, + {InitArgs, self(), Ref = make_ref()}, []) of + {ok, Pid} -> + {ok, Pid}; + ignore -> + receive + {Ref, Pid, Error} -> + case process_info(self(), trap_exit) of + {trap_exit, true} -> receive {'EXIT', Pid, _} -> ok end; + {trap_exit, false} -> ok + end, + Error + end; + Error -> + Error + end. + +% init creates a closure which spawns the appropriate view_updater. +init({InitArgs, ReturnPid, Ref}) -> + process_flag(trap_exit, true), + case prepare_group(InitArgs, false) of + {ok, #group{db=Db, fd=Fd, current_seq=Seq}=Group} -> + case Seq > couch_db:get_update_seq(Db) of + true -> + ReturnPid ! {Ref, self(), {error, invalid_view_seq}}, + ignore; + _ -> + couch_db:monitor(Db), + Owner = self(), + Pid = spawn_link( + fun()-> couch_view_updater:update(Owner, Group) end + ), + {ok, RefCounter} = couch_ref_counter:start([Fd]), + {ok, #group_state{ + db_name=couch_db:name(Db), + init_args=InitArgs, + updater_pid = Pid, + group=Group, + ref_counter=RefCounter}} + end; + Error -> + ReturnPid ! {Ref, self(), Error}, + ignore + end. + + + + +% There are two sources of messages: couch_view, which requests an up to date +% view group, and the couch_view_updater, which when spawned, updates the +% group and sends it back here. We employ a caching mechanism, so that between +% database writes, we don't have to spawn a couch_view_updater with every view +% request. + +% The caching mechanism: each request is submitted with a seq_id for the +% database at the time it was read. We guarantee to return a view from that +% sequence or newer. + +% If the request sequence is higher than our current high_target seq, we set +% that as the highest seqence. If the updater is not running, we launch it. + +handle_call({request_group, RequestSeq}, From, + #group_state{ + db_name=DbName, + group=#group{current_seq=Seq}=Group, + updater_pid=nil, + waiting_list=WaitList + }=State) when RequestSeq > Seq -> + {ok, Db} = couch_db:open_int(DbName, []), + Group2 = Group#group{db=Db}, + Owner = self(), + Pid = spawn_link(fun()-> couch_view_updater:update(Owner, Group2) end), + + {noreply, State#group_state{ + updater_pid=Pid, + group=Group2, + waiting_list=[{From,RequestSeq}|WaitList] + }, infinity}; + + +% If the request seqence is less than or equal to the seq_id of a known Group, +% we respond with that Group. +handle_call({request_group, RequestSeq}, _From, #group_state{ + group = #group{current_seq=GroupSeq} = Group, + ref_counter = RefCounter + } = State) when RequestSeq =< GroupSeq -> + {reply, {ok, Group, RefCounter}, State}; + +% Otherwise: TargetSeq => RequestSeq > GroupSeq +% We've already initiated the appropriate action, so just hold the response until the group is up to the RequestSeq +handle_call({request_group, RequestSeq}, From, + #group_state{waiting_list=WaitList}=State) -> + {noreply, State#group_state{ + waiting_list=[{From, RequestSeq}|WaitList] + }, infinity}; + +handle_call(request_group_info, _From, State) -> + GroupInfo = get_group_info(State), + {reply, {ok, GroupInfo}, State}. + +handle_cast({start_compact, CompactFun}, #group_state{compactor_pid=nil} + = State) -> + #group_state{ + group = #group{name = GroupId, sig = GroupSig} = Group, + init_args = {RootDir, DbName, _} + } = State, + ?LOG_INFO("View index compaction starting for ~s ~s", [DbName, GroupId]), + {ok, Db} = couch_db:open_int(DbName, []), + {ok, Fd} = open_index_file(compact, RootDir, DbName, GroupSig), + NewGroup = reset_file(Db, Fd, DbName, Group), + Pid = spawn_link(fun() -> CompactFun(Group, NewGroup) end), + {noreply, State#group_state{compactor_pid = Pid}}; +handle_cast({start_compact, _}, State) -> + %% compact already running, this is a no-op + {noreply, State}; + +handle_cast({compact_done, #group{current_seq=NewSeq} = NewGroup}, + #group_state{group = #group{current_seq=OldSeq}} = State) + when NewSeq >= OldSeq -> + #group_state{ + group = #group{name=GroupId, fd=OldFd, sig=GroupSig} = Group, + init_args = {RootDir, DbName, _}, + updater_pid = UpdaterPid, + ref_counter = RefCounter + } = State, + + ?LOG_INFO("View index compaction complete for ~s ~s", [DbName, GroupId]), + FileName = index_file_name(RootDir, DbName, GroupSig), + CompactName = index_file_name(compact, RootDir, DbName, GroupSig), + ok = couch_file:delete(RootDir, FileName), + ok = file:rename(CompactName, FileName), + + %% if an updater is running, kill it and start a new one + NewUpdaterPid = + if is_pid(UpdaterPid) -> + unlink(UpdaterPid), + exit(UpdaterPid, view_compaction_complete), + Owner = self(), + spawn_link(fun()-> couch_view_updater:update(Owner, NewGroup) end); + true -> + nil + end, + + %% cleanup old group + unlink(OldFd), + couch_ref_counter:drop(RefCounter), + {ok, NewRefCounter} = couch_ref_counter:start([NewGroup#group.fd]), + case Group#group.db of + nil -> ok; + Else -> couch_db:close(Else) + end, + + self() ! delayed_commit, + {noreply, State#group_state{ + group=NewGroup, + ref_counter=NewRefCounter, + compactor_pid=nil, + updater_pid=NewUpdaterPid + }}; +handle_cast({compact_done, NewGroup}, State) -> + #group_state{ + group = #group{name = GroupId, current_seq = CurrentSeq}, + init_args={_RootDir, DbName, _} + } = State, + ?LOG_INFO("View index compaction still behind for ~s ~s -- current: ~p " ++ + "compact: ~p", [DbName, GroupId, CurrentSeq, NewGroup#group.current_seq]), + couch_db:close(NewGroup#group.db), + {ok, Db} = couch_db:open_int(DbName, []), + Pid = spawn_link(fun() -> + {_,Ref} = erlang:spawn_monitor(fun() -> + couch_view_updater:update(nil, NewGroup#group{db = Db}) + end), + receive + {'DOWN', Ref, _, _, {new_group, NewGroup2}} -> + #group{name=GroupId} = NewGroup2, + Pid2 = couch_view:get_group_server(DbName, GroupId), + gen_server:cast(Pid2, {compact_done, NewGroup2}) + end + end), + {noreply, State#group_state{compactor_pid = Pid}}; + +handle_cast({partial_update, Pid, NewGroup}, #group_state{updater_pid=Pid} + = State) -> + #group_state{ + db_name = DbName, + waiting_commit = WaitingCommit + } = State, + NewSeq = NewGroup#group.current_seq, + ?LOG_INFO("checkpointing view update at seq ~p for ~s ~s", [NewSeq, + DbName, NewGroup#group.name]), + if not WaitingCommit -> + erlang:send_after(1000, self(), delayed_commit); + true -> ok + end, + {noreply, State#group_state{group=NewGroup, waiting_commit=true}}; +handle_cast({partial_update, _, _}, State) -> + %% message from an old (probably pre-compaction) updater; ignore + {noreply, State}. + +handle_info(delayed_commit, #group_state{db_name=DbName,group=Group}=State) -> + {ok, Db} = couch_db:open_int(DbName, []), + CommittedSeq = couch_db:get_committed_update_seq(Db), + couch_db:close(Db), + if CommittedSeq >= Group#group.current_seq -> + % save the header + Header = {Group#group.sig, get_index_header_data(Group)}, + ok = couch_file:write_header(Group#group.fd, Header), + {noreply, State#group_state{waiting_commit=false}}; + true -> + % We can't commit the header because the database seq that's fully + % committed to disk is still behind us. If we committed now and the + % database lost those changes our view could be forever out of sync + % with the database. But a crash before we commit these changes, no big + % deal, we only lose incremental changes since last committal. + erlang:send_after(1000, self(), delayed_commit), + {noreply, State#group_state{waiting_commit=true}} + end; + +handle_info({'EXIT', FromPid, {new_group, #group{db=Db}=Group}}, + #group_state{db_name=DbName, + updater_pid=UpPid, + ref_counter=RefCounter, + waiting_list=WaitList, + waiting_commit=WaitingCommit}=State) when UpPid == FromPid -> + ok = couch_db:close(Db), + if not WaitingCommit -> + erlang:send_after(1000, self(), delayed_commit); + true -> ok + end, + case reply_with_group(Group, WaitList, [], RefCounter) of + [] -> + {noreply, State#group_state{waiting_commit=true, waiting_list=[], + group=Group#group{db=nil}, updater_pid=nil}}; + StillWaiting -> + % we still have some waiters, reopen the database and reupdate the index + {ok, Db2} = couch_db:open_int(DbName, []), + Group2 = Group#group{db=Db2}, + Owner = self(), + Pid = spawn_link(fun() -> couch_view_updater:update(Owner, Group2) end), + {noreply, State#group_state{waiting_commit=true, + waiting_list=StillWaiting, group=Group2, updater_pid=Pid}} + end; +handle_info({'EXIT', _, {new_group, _}}, State) -> + %% message from an old (probably pre-compaction) updater; ignore + {noreply, State}; + +handle_info({'EXIT', FromPid, reset}, + #group_state{ + init_args=InitArgs, + updater_pid=UpPid, + group=Group}=State) when UpPid == FromPid -> + ok = couch_db:close(Group#group.db), + case prepare_group(InitArgs, true) of + {ok, ResetGroup} -> + Owner = self(), + Pid = spawn_link(fun()-> couch_view_updater:update(Owner, ResetGroup) end), + {noreply, State#group_state{ + updater_pid=Pid, + group=ResetGroup}}; + Error -> + {stop, normal, reply_all(State, Error)} + end; +handle_info({'EXIT', _, reset}, State) -> + %% message from an old (probably pre-compaction) updater; ignore + {noreply, State}; + +handle_info({'EXIT', _FromPid, normal}, State) -> + {noreply, State}; + +handle_info({'EXIT', FromPid, {{nocatch, Reason}, _Trace}}, State) -> + ?LOG_DEBUG("Uncaught throw() in linked pid: ~p", [{FromPid, Reason}]), + {stop, Reason, State}; + +handle_info({'EXIT', FromPid, Reason}, State) -> + ?LOG_DEBUG("Exit from linked pid: ~p", [{FromPid, Reason}]), + {stop, Reason, State}; + +handle_info({'DOWN',_,_,_,_}, State) -> + ?LOG_INFO("Shutting down view group server, monitored db is closing.", []), + {stop, normal, reply_all(State, shutdown)}. + + +terminate(Reason, #group_state{updater_pid=Update, compactor_pid=Compact}=S) -> + reply_all(S, Reason), + couch_util:shutdown_sync(Update), + couch_util:shutdown_sync(Compact), + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%% Local Functions + +% reply_with_group/3 +% for each item in the WaitingList {Pid, Seq} +% if the Seq is =< GroupSeq, reply +reply_with_group(Group=#group{current_seq=GroupSeq}, [{Pid, Seq}|WaitList], + StillWaiting, RefCounter) when Seq =< GroupSeq -> + gen_server:reply(Pid, {ok, Group, RefCounter}), + reply_with_group(Group, WaitList, StillWaiting, RefCounter); + +% else +% put it in the continuing waiting list +reply_with_group(Group, [{Pid, Seq}|WaitList], StillWaiting, RefCounter) -> + reply_with_group(Group, WaitList, [{Pid, Seq}|StillWaiting], RefCounter); + +% return the still waiting list +reply_with_group(_Group, [], StillWaiting, _RefCounter) -> + StillWaiting. + +reply_all(#group_state{waiting_list=WaitList}=State, Reply) -> + [catch gen_server:reply(Pid, Reply) || {Pid, _} <- WaitList], + State#group_state{waiting_list=[]}. + +prepare_group({RootDir, DbName, #group{sig=Sig}=Group}, ForceReset)-> + case couch_db:open_int(DbName, []) of + {ok, Db} -> + case open_index_file(RootDir, DbName, Sig) of + {ok, Fd} -> + if ForceReset -> + % this can happen if we missed a purge + {ok, reset_file(Db, Fd, DbName, Group)}; + true -> + % 09 UPGRADE CODE + ok = couch_file:upgrade_old_header(Fd, <<$r, $c, $k, 0>>), + case (catch couch_file:read_header(Fd)) of + {ok, {Sig, HeaderInfo}} -> + % sigs match! + {ok, init_group(Db, Fd, Group, HeaderInfo)}; + _ -> + % this happens on a new file + {ok, reset_file(Db, Fd, DbName, Group)} + end + end; + Error -> + catch delete_index_file(RootDir, DbName, Sig), + Error + end; + Else -> + Else + end. + +get_index_header_data(#group{current_seq=Seq, purge_seq=PurgeSeq, + id_btree=IdBtree,views=Views}) -> + ViewStates = [couch_btree:get_state(Btree) || #view{btree=Btree} <- Views], + #index_header{seq=Seq, + purge_seq=PurgeSeq, + id_btree_state=couch_btree:get_state(IdBtree), + view_states=ViewStates}. + +hex_sig(GroupSig) -> + couch_util:to_hex(?b2l(GroupSig)). + +design_root(RootDir, DbName) -> + RootDir ++ "/." ++ ?b2l(DbName) ++ "_design/". + +index_file_name(RootDir, DbName, GroupSig) -> + design_root(RootDir, DbName) ++ hex_sig(GroupSig) ++".view". + +index_file_name(compact, RootDir, DbName, GroupSig) -> + design_root(RootDir, DbName) ++ hex_sig(GroupSig) ++".compact.view". + + +open_index_file(RootDir, DbName, GroupSig) -> + FileName = index_file_name(RootDir, DbName, GroupSig), + case couch_file:open(FileName) of + {ok, Fd} -> {ok, Fd}; + {error, enoent} -> couch_file:open(FileName, [create]); + Error -> Error + end. + +open_index_file(compact, RootDir, DbName, GroupSig) -> + FileName = index_file_name(compact, RootDir, DbName, GroupSig), + case couch_file:open(FileName) of + {ok, Fd} -> {ok, Fd}; + {error, enoent} -> couch_file:open(FileName, [create]); + Error -> Error + end. + +open_temp_group(DbName, Language, DesignOptions, MapSrc, RedSrc) -> + case couch_db:open_int(DbName, []) of + {ok, Db} -> + View = #view{map_names=[<<"_temp">>], + id_num=0, + btree=nil, + def=MapSrc, + reduce_funs= if RedSrc==[] -> []; true -> [{<<"_temp">>, RedSrc}] end, + options=DesignOptions}, + + {ok, Db, set_view_sig(#group{name = <<"_temp">>, db=Db, views=[View], + def_lang=Language, design_options=DesignOptions})}; + Error -> + Error + end. + +set_view_sig(#group{ + views=Views, + def_lang=Language, + design_options=DesignOptions}=G) -> + G#group{sig=couch_util:md5(term_to_binary({Views, Language, DesignOptions}))}. + +open_db_group(DbName, GroupId) -> + case couch_db:open_int(DbName, []) of + {ok, Db} -> + case couch_db:open_doc(Db, GroupId) of + {ok, Doc} -> + {ok, Db, design_doc_to_view_group(Doc)}; + Else -> + couch_db:close(Db), + Else + end; + Else -> + Else + end. + +get_group_info(State) -> + #group_state{ + group=Group, + updater_pid=UpdaterPid, + compactor_pid=CompactorPid, + waiting_commit=WaitingCommit, + waiting_list=WaitersList + } = State, + #group{ + fd = Fd, + sig = GroupSig, + def_lang = Lang, + current_seq=CurrentSeq, + purge_seq=PurgeSeq + } = Group, + {ok, Size} = couch_file:bytes(Fd), + [ + {signature, ?l2b(hex_sig(GroupSig))}, + {language, Lang}, + {disk_size, Size}, + {updater_running, UpdaterPid /= nil}, + {compact_running, CompactorPid /= nil}, + {waiting_commit, WaitingCommit}, + {waiting_clients, length(WaitersList)}, + {update_seq, CurrentSeq}, + {purge_seq, PurgeSeq} + ]. + +% maybe move to another module +design_doc_to_view_group(#doc{id=Id,body={Fields}}) -> + Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>), + {DesignOptions} = couch_util:get_value(<<"options">>, Fields, {[]}), + {RawViews} = couch_util:get_value(<<"views">>, Fields, {[]}), + % add the views to a dictionary object, with the map source as the key + DictBySrc = + lists:foldl( + fun({Name, {MRFuns}}, DictBySrcAcc) -> + MapSrc = couch_util:get_value(<<"map">>, MRFuns), + RedSrc = couch_util:get_value(<<"reduce">>, MRFuns, null), + {ViewOptions} = couch_util:get_value(<<"options">>, MRFuns, {[]}), + View = + case dict:find({MapSrc, ViewOptions}, DictBySrcAcc) of + {ok, View0} -> View0; + error -> #view{def=MapSrc, options=ViewOptions} % create new view object + end, + View2 = + if RedSrc == null -> + View#view{map_names=[Name|View#view.map_names]}; + true -> + View#view{reduce_funs=[{Name,RedSrc}|View#view.reduce_funs]} + end, + dict:store({MapSrc, ViewOptions}, View2, DictBySrcAcc) + end, dict:new(), RawViews), + % number the views + {Views, _N} = lists:mapfoldl( + fun({_Src, View}, N) -> + {View#view{id_num=N},N+1} + end, 0, lists:sort(dict:to_list(DictBySrc))), + + set_view_sig(#group{name=Id, views=Views, def_lang=Language, design_options=DesignOptions}). + +reset_group(#group{views=Views}=Group) -> + Views2 = [View#view{btree=nil} || View <- Views], + Group#group{db=nil,fd=nil,query_server=nil,current_seq=0, + id_btree=nil,views=Views2}. + +reset_file(Db, Fd, DbName, #group{sig=Sig,name=Name} = Group) -> + ?LOG_DEBUG("Resetting group index \"~s\" in db ~s", [Name, DbName]), + ok = couch_file:truncate(Fd, 0), + ok = couch_file:write_header(Fd, {Sig, nil}), + init_group(Db, Fd, reset_group(Group), nil). + +delete_index_file(RootDir, DbName, GroupSig) -> + couch_file:delete(RootDir, index_file_name(RootDir, DbName, GroupSig)). + +init_group(Db, Fd, #group{views=Views}=Group, nil) -> + init_group(Db, Fd, Group, + #index_header{seq=0, purge_seq=couch_db:get_purge_seq(Db), + id_btree_state=nil, view_states=[nil || _ <- Views]}); +init_group(Db, Fd, #group{def_lang=Lang,views=Views}= + Group, IndexHeader) -> + #index_header{seq=Seq, purge_seq=PurgeSeq, + id_btree_state=IdBtreeState, view_states=ViewStates} = IndexHeader, + {ok, IdBtree} = couch_btree:open(IdBtreeState, Fd), + Views2 = lists:zipwith( + fun(BtreeState, #view{reduce_funs=RedFuns,options=Options}=View) -> + FunSrcs = [FunSrc || {_Name, FunSrc} <- RedFuns], + ReduceFun = + fun(reduce, KVs) -> + KVs2 = couch_view:expand_dups(KVs,[]), + KVs3 = couch_view:detuple_kvs(KVs2,[]), + {ok, Reduced} = couch_query_servers:reduce(Lang, FunSrcs, + KVs3), + {length(KVs3), Reduced}; + (rereduce, Reds) -> + Count = lists:sum([Count0 || {Count0, _} <- Reds]), + UserReds = [UserRedsList || {_, UserRedsList} <- Reds], + {ok, Reduced} = couch_query_servers:rereduce(Lang, FunSrcs, + UserReds), + {Count, Reduced} + end, + + case couch_util:get_value(<<"collation">>, Options, <<"default">>) of + <<"default">> -> + Less = fun couch_view:less_json_ids/2; + <<"raw">> -> + Less = fun(A,B) -> A < B end + end, + {ok, Btree} = couch_btree:open(BtreeState, Fd, + [{less, Less}, + {reduce, ReduceFun}]), + View#view{btree=Btree} + end, + ViewStates, Views), + Group#group{db=Db, fd=Fd, current_seq=Seq, purge_seq=PurgeSeq, + id_btree=IdBtree, views=Views2}. + + diff --git a/apps/couch/src/couch_view_updater.erl b/apps/couch/src/couch_view_updater.erl new file mode 100644 index 00000000..2a9c960f --- /dev/null +++ b/apps/couch/src/couch_view_updater.erl @@ -0,0 +1,252 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_view_updater). + +-export([update/2]). + +-include("couch_db.hrl"). + +-spec update(_, #group{}) -> no_return(). + +update(Owner, Group) -> + #group{ + db = #db{name=DbName} = Db, + name = GroupName, + current_seq = Seq, + purge_seq = PurgeSeq + } = Group, + couch_task_status:add_task(<<"View Group Indexer">>, <<DbName/binary," ",GroupName/binary>>, <<"Starting index update">>), + + DbPurgeSeq = couch_db:get_purge_seq(Db), + Group2 = + if DbPurgeSeq == PurgeSeq -> + Group; + DbPurgeSeq == PurgeSeq + 1 -> + couch_task_status:update(<<"Removing purged entries from view index.">>), + purge_index(Group); + true -> + couch_task_status:update(<<"Resetting view index due to lost purge entries.">>), + exit(reset) + end, + {ok, MapQueue} = couch_work_queue:new(100000, 500), + {ok, WriteQueue} = couch_work_queue:new(100000, 500), + Self = self(), + ViewEmptyKVs = [{View, []} || View <- Group2#group.views], + spawn_link(fun() -> do_maps(Group, MapQueue, WriteQueue, ViewEmptyKVs) end), + spawn_link(fun() -> do_writes(Self, Owner, Group2, WriteQueue, Seq == 0) end), + % compute on all docs modified since we last computed. + TotalChanges = couch_db:count_changes_since(Db, Seq), + % update status every half second + couch_task_status:set_update_frequency(500), + #group{ design_options = DesignOptions } = Group, + IncludeDesign = couch_util:get_value(<<"include_design">>, + DesignOptions, false), + LocalSeq = couch_util:get_value(<<"local_seq">>, DesignOptions, false), + DocOpts = + case LocalSeq of + true -> [conflicts, deleted_conflicts, local_seq]; + _ -> [conflicts, deleted_conflicts] + end, + {ok, _, _} + = couch_db:enum_docs_since( + Db, + Seq, + fun(DocInfo, _, ChangesProcessed) -> + couch_task_status:update("Processed ~p of ~p changes (~p%)", + [ChangesProcessed, TotalChanges, (ChangesProcessed*100) div TotalChanges]), + load_doc(Db, DocInfo, MapQueue, DocOpts, IncludeDesign), + {ok, ChangesProcessed+1} + end, + 0, []), + couch_task_status:set_update_frequency(0), + couch_task_status:update("Finishing."), + couch_work_queue:close(MapQueue), + receive {new_group, NewGroup} -> + exit({new_group, + NewGroup#group{current_seq=couch_db:get_update_seq(Db)}}) + end. + + +purge_index(#group{db=Db, views=Views, id_btree=IdBtree}=Group) -> + {ok, PurgedIdsRevs} = couch_db:get_last_purged(Db), + Ids = [Id || {Id, _Revs} <- PurgedIdsRevs], + {ok, Lookups, IdBtree2} = couch_btree:query_modify(IdBtree, Ids, [], Ids), + + % now populate the dictionary with all the keys to delete + ViewKeysToRemoveDict = lists:foldl( + fun({ok,{DocId,ViewNumRowKeys}}, ViewDictAcc) -> + lists:foldl( + fun({ViewNum, RowKey}, ViewDictAcc2) -> + dict:append(ViewNum, {RowKey, DocId}, ViewDictAcc2) + end, ViewDictAcc, ViewNumRowKeys); + ({not_found, _}, ViewDictAcc) -> + ViewDictAcc + end, dict:new(), Lookups), + + % Now remove the values from the btrees + Views2 = lists:map( + fun(#view{id_num=Num,btree=Btree}=View) -> + case dict:find(Num, ViewKeysToRemoveDict) of + {ok, RemoveKeys} -> + {ok, Btree2} = couch_btree:add_remove(Btree, [], RemoveKeys), + View#view{btree=Btree2}; + error -> % no keys to remove in this view + View + end + end, Views), + Group#group{id_btree=IdBtree2, + views=Views2, + purge_seq=couch_db:get_purge_seq(Db)}. + + +load_doc(Db, DocInfo, MapQueue, DocOpts, IncludeDesign) -> + #doc_info{id=DocId, high_seq=Seq, revs=[#rev_info{deleted=Deleted}|_]} = DocInfo, + case {IncludeDesign, DocId} of + {false, <<?DESIGN_DOC_PREFIX, _/binary>>} -> % we skip design docs + ok; + _ -> + if Deleted -> + couch_work_queue:queue(MapQueue, {Seq, #doc{id=DocId, deleted=true}}); + true -> + {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts), + couch_work_queue:queue(MapQueue, {Seq, Doc}) + end + end. + +do_maps(Group, MapQueue, WriteQueue, ViewEmptyKVs) -> + case couch_work_queue:dequeue(MapQueue) of + closed -> + couch_work_queue:close(WriteQueue), + couch_query_servers:stop_doc_map(Group#group.query_server); + {ok, Queue} -> + Docs = [Doc || {_,#doc{deleted=false}=Doc} <- Queue], + DelKVs = [{Id, []} || {_, #doc{deleted=true,id=Id}} <- Queue], + LastSeq = lists:max([Seq || {Seq, _Doc} <- Queue]), + {Group1, Results} = view_compute(Group, Docs), + {ViewKVs, DocIdViewIdKeys} = view_insert_query_results(Docs, + Results, ViewEmptyKVs, DelKVs), + couch_work_queue:queue(WriteQueue, {LastSeq, ViewKVs, DocIdViewIdKeys}), + do_maps(Group1, MapQueue, WriteQueue, ViewEmptyKVs) + end. + +do_writes(Parent, Owner, Group, WriteQueue, InitialBuild) -> + case couch_work_queue:dequeue(WriteQueue) of + closed -> + Parent ! {new_group, Group}; + {ok, Queue} -> + {NewSeq, ViewKeyValues, DocIdViewIdKeys} = lists:foldl( + fun({Seq, ViewKVs, DocIdViewIdKeys}, nil) -> + {Seq, ViewKVs, DocIdViewIdKeys}; + ({Seq, ViewKVs, DocIdViewIdKeys}, Acc) -> + {Seq2, AccViewKVs, AccDocIdViewIdKeys} = Acc, + AccViewKVs2 = lists:zipwith( + fun({View, KVsIn}, {_View, KVsAcc}) -> + {View, KVsIn ++ KVsAcc} + end, ViewKVs, AccViewKVs), + {lists:max([Seq, Seq2]), + AccViewKVs2, DocIdViewIdKeys ++ AccDocIdViewIdKeys} + end, nil, Queue), + Group2 = write_changes(Group, ViewKeyValues, DocIdViewIdKeys, NewSeq, + InitialBuild), + case Owner of + nil -> ok; + _ -> ok = gen_server:cast(Owner, {partial_update, Parent, Group2}) + end, + do_writes(Parent, Owner, Group2, WriteQueue, InitialBuild) + end. + +view_insert_query_results([], [], ViewKVs, DocIdViewIdKeysAcc) -> + {ViewKVs, DocIdViewIdKeysAcc}; +view_insert_query_results([Doc|RestDocs], [QueryResults | RestResults], ViewKVs, DocIdViewIdKeysAcc) -> + {NewViewKVs, NewViewIdKeys} = view_insert_doc_query_results(Doc, QueryResults, ViewKVs, [], []), + NewDocIdViewIdKeys = [{Doc#doc.id, NewViewIdKeys} | DocIdViewIdKeysAcc], + view_insert_query_results(RestDocs, RestResults, NewViewKVs, NewDocIdViewIdKeys). + + +view_insert_doc_query_results(_Doc, [], [], ViewKVsAcc, ViewIdKeysAcc) -> + {lists:reverse(ViewKVsAcc), lists:reverse(ViewIdKeysAcc)}; +view_insert_doc_query_results(#doc{id=DocId}=Doc, [ResultKVs|RestResults], [{View, KVs}|RestViewKVs], ViewKVsAcc, ViewIdKeysAcc) -> + % Take any identical keys and combine the values + ResultKVs2 = lists:foldl( + fun({Key,Value}, [{PrevKey,PrevVal}|AccRest]) -> + case Key == PrevKey of + true -> + case PrevVal of + {dups, Dups} -> + [{PrevKey, {dups, [Value|Dups]}} | AccRest]; + _ -> + [{PrevKey, {dups, [Value,PrevVal]}} | AccRest] + end; + false -> + [{Key,Value},{PrevKey,PrevVal}|AccRest] + end; + (KV, []) -> + [KV] + end, [], lists:sort(ResultKVs)), + NewKVs = [{{Key, DocId}, Value} || {Key, Value} <- ResultKVs2], + NewViewKVsAcc = [{View, NewKVs ++ KVs} | ViewKVsAcc], + NewViewIdKeys = [{View#view.id_num, Key} || {Key, _Value} <- ResultKVs2], + NewViewIdKeysAcc = NewViewIdKeys ++ ViewIdKeysAcc, + view_insert_doc_query_results(Doc, RestResults, RestViewKVs, NewViewKVsAcc, NewViewIdKeysAcc). + +view_compute(Group, []) -> + {Group, []}; +view_compute(#group{def_lang=DefLang, query_server=QueryServerIn}=Group, Docs) -> + {ok, QueryServer} = + case QueryServerIn of + nil -> % doc map not started + Definitions = [View#view.def || View <- Group#group.views], + couch_query_servers:start_doc_map(DefLang, Definitions); + _ -> + {ok, QueryServerIn} + end, + {ok, Results} = couch_query_servers:map_docs(QueryServer, Docs), + {Group#group{query_server=QueryServer}, Results}. + + + +write_changes(Group, ViewKeyValuesToAdd, DocIdViewIdKeys, NewSeq, InitialBuild) -> + #group{id_btree=IdBtree} = Group, + + AddDocIdViewIdKeys = [{DocId, ViewIdKeys} || {DocId, ViewIdKeys} <- DocIdViewIdKeys, ViewIdKeys /= []], + if InitialBuild -> + RemoveDocIds = [], + LookupDocIds = []; + true -> + RemoveDocIds = [DocId || {DocId, ViewIdKeys} <- DocIdViewIdKeys, ViewIdKeys == []], + LookupDocIds = [DocId || {DocId, _ViewIdKeys} <- DocIdViewIdKeys] + end, + {ok, LookupResults, IdBtree2} + = couch_btree:query_modify(IdBtree, LookupDocIds, AddDocIdViewIdKeys, RemoveDocIds), + KeysToRemoveByView = lists:foldl( + fun(LookupResult, KeysToRemoveByViewAcc) -> + case LookupResult of + {ok, {DocId, ViewIdKeys}} -> + lists:foldl( + fun({ViewId, Key}, KeysToRemoveByViewAcc2) -> + dict:append(ViewId, {Key, DocId}, KeysToRemoveByViewAcc2) + end, + KeysToRemoveByViewAcc, ViewIdKeys); + {not_found, _} -> + KeysToRemoveByViewAcc + end + end, + dict:new(), LookupResults), + Views2 = lists:zipwith(fun(View, {_View, AddKeyValues}) -> + KeysToRemove = couch_util:dict_find(View#view.id_num, KeysToRemoveByView, []), + {ok, ViewBtree2} = couch_btree:add_remove(View#view.btree, AddKeyValues, KeysToRemove), + View#view{btree = ViewBtree2} + end, Group#group.views, ViewKeyValuesToAdd), + Group#group{views=Views2, current_seq=NewSeq, id_btree=IdBtree2}. + + diff --git a/apps/couch/src/couch_work_queue.erl b/apps/couch/src/couch_work_queue.erl new file mode 100644 index 00000000..decfcad8 --- /dev/null +++ b/apps/couch/src/couch_work_queue.erl @@ -0,0 +1,115 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_work_queue). +-behaviour(gen_server). + +-export([new/2,queue/2,dequeue/1,dequeue/2,close/1]). +-export([init/1, terminate/2, handle_call/3, handle_cast/2, code_change/3, handle_info/2]). + +-record(q, { + queue=queue:new(), + blocked=[], + max_size, + max_items, + items=0, + size=0, + work_waiter=nil, + close_on_dequeue=false +}). + +new(MaxSize, MaxItems) -> + gen_server:start_link(couch_work_queue, {MaxSize, MaxItems}, []). + +queue(Wq, Item) -> + gen_server:call(Wq, {queue, Item}, infinity). + +dequeue(Wq) -> + dequeue(Wq, all). + +dequeue(Wq, MaxItems) -> + try gen_server:call(Wq, {dequeue, MaxItems}, infinity) + catch + _:_ -> closed + end. + +close(Wq) -> + gen_server:cast(Wq, close). + + +init({MaxSize,MaxItems}) -> + {ok, #q{max_size=MaxSize, max_items=MaxItems}}. + +terminate(_Reason, #q{work_waiter=nil}) -> + ok; +terminate(_Reason, #q{work_waiter={WWFrom, _}}) -> + gen_server:reply(WWFrom, closed). + +handle_call({queue, Item}, From, #q{work_waiter=nil}=Q0) -> + Q = Q0#q{size=Q0#q.size + byte_size(term_to_binary(Item)), + items=Q0#q.items + 1, + queue=queue:in(Item, Q0#q.queue)}, + case (Q#q.size >= Q#q.max_size) orelse + (Q#q.items >= Q#q.max_items) of + true -> + {noreply, Q#q{blocked=[From | Q#q.blocked]}}; + false -> + {reply, ok, Q} + end; +handle_call({queue, Item}, _From, #q{work_waiter={WWFrom, _Max}}=Q) -> + gen_server:reply(WWFrom, {ok, [Item]}), + {reply, ok, Q#q{work_waiter=nil}}; +handle_call({dequeue, _Max}, _From, #q{work_waiter=WW}) when WW /= nil -> + exit("Only one caller allowed to wait for work at a time"); +handle_call({dequeue, Max}, From, #q{items=0}=Q) -> + {noreply, Q#q{work_waiter={From, Max}}}; +handle_call({dequeue, Max}, _From, #q{queue=Queue, max_size=MaxSize, + max_items=MaxItems, items=Items,close_on_dequeue=Close}=Q) -> + if Max >= Items orelse Max == all -> + [gen_server:reply(From, ok) || From <- Q#q.blocked], + Q2 = #q{max_size=MaxSize, max_items=MaxItems}, + if Close -> + {stop, normal, {ok, queue:to_list(Queue)}, Q2}; + true -> + {reply, {ok, queue:to_list(Queue)}, Q2} + end; + true -> + {DequeuedItems, Queue2, Blocked2} = + dequeue_items(Max, Queue, Q#q.blocked, []), + {reply, {ok, DequeuedItems}, + Q#q{items=Items-Max,blocked=Blocked2,queue=Queue2}} + end. + +dequeue_items(0, Queue, Blocked, DequeuedAcc) -> + {lists:reverse(DequeuedAcc), Queue, Blocked}; +dequeue_items(NumItems, Queue, Blocked, DequeuedAcc) -> + {{value, Item}, Queue2} = queue:out(Queue), + case Blocked of + [] -> + Blocked2 = Blocked; + [From|Blocked2] -> + gen_server:reply(From, ok) + end, + dequeue_items(NumItems-1, Queue2, Blocked2, [Item | DequeuedAcc]). + + +handle_cast(close, #q{items=0}=Q) -> + {stop, normal, Q}; +handle_cast(close, Q) -> + {noreply, Q#q{close_on_dequeue=true}}. + + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +handle_info(X, Q) -> + {stop, X, Q}. |