diff options
author | Noah Slater <nslater@apache.org> | 2009-07-06 00:33:50 +0000 |
---|---|---|
committer | Noah Slater <nslater@apache.org> | 2009-07-06 00:33:50 +0000 |
commit | 282b96ddd9a84b740788c2358ec0f5fedafb7cc6 (patch) | |
tree | fb48e605ceb8079d0195d3b1ec0eca7110fa7ef2 /src/couchdb | |
parent | b5cc085d3bc6316063f14adedf20632ee904875d (diff) |
trimmed trailing whitespace
git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@791350 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'src/couchdb')
38 files changed, 604 insertions, 604 deletions
diff --git a/src/couchdb/couch_batch_save.erl b/src/couchdb/couch_batch_save.erl index 43a6f2dd..c1e5b866 100644 --- a/src/couchdb/couch_batch_save.erl +++ b/src/couchdb/couch_batch_save.erl @@ -46,10 +46,10 @@ start_link(BatchSize, BatchInterval) -> eventually_save_doc(DbName, Doc, UserCtx) -> % find or create a process for the {DbName, UserCtx} pair {ok, Pid} = batch_pid_for_db_and_user(DbName, UserCtx), - % hand it the document + % hand it the document ?LOG_DEBUG("sending doc to batch ~p",[Pid]), ok = send_doc_to_batch(Pid, Doc). - + %%-------------------------------------------------------------------- %% Function: commit_now(DbName) -> committed %% Description: Commits all docs for the DB. Does not reply until @@ -72,8 +72,8 @@ commit_now(DbName, UserCtx) -> %%-------------------------------------------------------------------- % commit_all() -> % committed = gen_server:call(couch_batch_save, commit_now, infinity). -% - +% + %%==================================================================== %% gen_server callbacks %%==================================================================== @@ -102,9 +102,9 @@ handle_call({make_pid, DbName, UserCtx}, _From, #batch_state{ batch_size=BatchSize, batch_interval=BatchInterval }=State) -> - % Create the pid in a serialized process. + % Create the pid in a serialized process. % We checked before to see that we need the Pid, but the check is outside - % the gen_server for parellelism. We check again here to ensure we don't + % the gen_server for parellelism. We check again here to ensure we don't % make a duplicate. Resp = case ets:lookup(couch_batch_save_by_db, {DbName,UserCtx}) of [{_, Pid}] -> @@ -114,8 +114,8 @@ handle_call({make_pid, DbName, UserCtx}, _From, #batch_state{ % no match % start and record the doc collector process ?LOG_DEBUG("making a batch pid ~p",[{DbName, UserCtx}]), - Pid = spawn_link(fun() -> - doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new) + Pid = spawn_link(fun() -> + doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new) end), true = ets:insert_new(couch_batch_save_by_db, {{DbName, UserCtx}, Pid}), {ok, Pid} @@ -168,7 +168,7 @@ code_change(_OldVsn, State, _Extra) -> commit_user_docs(_DbName, _UserCtx, []) -> {ok, []}; - + commit_user_docs(DbName, UserCtx, Docs) -> ?LOG_INFO("Committing ~p batch docs to ~p",[length(Docs), DbName]), case couch_db:open(DbName, [{user_ctx, UserCtx}]) of @@ -194,7 +194,7 @@ commit_every_ms(Pid, BatchInterval) -> send_commit(Pid) -> Pid ! {self(), commit}, - receive + receive {Pid, committed} -> ok end. @@ -225,7 +225,7 @@ send_doc_to_batch(Pid, Doc) -> end. % the loop that holds documents between commits -doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new) -> +doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new) -> % start a process that triggers commit every BatchInterval milliseconds _IntervalPid = spawn_link(fun() -> commit_every_ms(self(), BatchInterval) end), doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, []); @@ -233,7 +233,7 @@ doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new) -> doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, Docs) when length(Docs) >= BatchSize-> collector_commit(DbName, UserCtx, BatchInterval, Docs), exit(normal); - + doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, Docs) -> receive {From, add_doc, Doc} -> diff --git a/src/couchdb/couch_batch_save_sup.erl b/src/couchdb/couch_batch_save_sup.erl index 42cf1aba..678e0a89 100644 --- a/src/couchdb/couch_batch_save_sup.erl +++ b/src/couchdb/couch_batch_save_sup.erl @@ -27,9 +27,9 @@ init([]) -> exit(Self, reload_config) end), - BatchSize = list_to_integer(couch_config:get("couchdb", + BatchSize = list_to_integer(couch_config:get("couchdb", "batch_save_size","1000")), - BatchInterval = list_to_integer(couch_config:get("couchdb", + BatchInterval = list_to_integer(couch_config:get("couchdb", "batch_save_interval","1000")), Batch = {batch, {couch_batch_save, start_link, [BatchSize, BatchInterval]}, diff --git a/src/couchdb/couch_btree.erl b/src/couchdb/couch_btree.erl index c9079781..8df3cd66 100644 --- a/src/couchdb/couch_btree.erl +++ b/src/couchdb/couch_btree.erl @@ -39,7 +39,7 @@ less(#btree{less=Less}, A, B) -> % pass in 'nil' for State if a new Btree. open(State, Fd) -> {ok, #btree{root=State, fd=Fd}}. - + set_options(Bt, []) -> Bt; set_options(Bt, [{split, Extract}|Rest]) -> @@ -68,7 +68,7 @@ final_reduce(Reduce, {[], Reductions}) -> final_reduce(Reduce, {KVs, Reductions}) -> Red = Reduce(reduce, KVs), final_reduce(Reduce, {[], [Red | Reductions]}). - + fold_reduce(Bt, StartKey, EndKey, KeyGroupFun, Fun, Acc) -> fold_reduce(Bt, fwd, StartKey, EndKey, KeyGroupFun, Fun, Acc). @@ -189,7 +189,7 @@ lookup(Bt, {Pointer, _Reds}, Keys) -> lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) -> {ok, lists:reverse(Output)}; - + lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when size(NodeTuple) < LowerBound -> {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])}; @@ -238,7 +238,7 @@ complete_root(Bt, KPs) -> {ok, ResultKeyPointers, Bt2} = write_node(Bt, kp_node, KPs), complete_root(Bt2, ResultKeyPointers). -%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%% +%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%% % It is inaccurate as it does not account for compression when blocks are % written. Plus with the "case size(term_to_binary(InList)) of" code it's % probably really inefficient. @@ -277,7 +277,7 @@ modify_node(Bt, RootPointerInfo, Actions, QueryOutput) -> {NodeType, NodeList} = get_node(Bt, Pointer) end, NodeTuple = list_to_tuple(NodeList), - + {ok, NewNodeList, QueryOutput2, Bt2} = case NodeType of kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput); @@ -320,7 +320,7 @@ write_node(Bt, NodeType, NodeList) -> ANodeList <- NodeListList ], {ok, ResultList, Bt}. - + modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) -> modify_node(Bt, nil, Actions, QueryOutput); modify_kpnode(Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) -> @@ -350,15 +350,15 @@ modify_kpnode(Bt, NodeTuple, LowerBound, LowerBound, N - 1, ResultNode)), modify_kpnode(Bt2, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2) end. - + bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End -> Tail; bounded_tuple_to_revlist(Tuple, Start, End, Tail) -> bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]). - + bounded_tuple_to_list(Tuple, Start, End, Tail) -> bounded_tuple_to_list2(Tuple, Start, End, [], Tail). - + bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End -> lists:reverse(Acc, Tail); bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) -> @@ -426,10 +426,10 @@ modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | end. -reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _KeyEnd, GroupedKey, GroupedKVsAcc, +reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _KeyEnd, GroupedKey, GroupedKVsAcc, GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) -> - {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey}; -reduce_stream_node(Bt, Dir, {P, _R}, KeyStart, KeyEnd, GroupedKey, GroupedKVsAcc, + {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey}; +reduce_stream_node(Bt, Dir, {P, _R}, KeyStart, KeyEnd, GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) -> case get_node(Bt, P) of {kp_node, NodeList} -> @@ -475,7 +475,7 @@ reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc, reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc); _ -> - + case KeyGroupFun(GroupedKey, Key) of true -> reduce_stream_kv_node2(Bt, RestKVs, GroupedKey, @@ -531,7 +531,7 @@ reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, KeyEnd, GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) -> {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) -> KeyGroupFun(GroupedKey, Key) end, NodeList), - {GroupedNodes, UngroupedNodes} = + {GroupedNodes, UngroupedNodes} = case Grouped0 of [] -> {Grouped0, Ungrouped0}; @@ -542,7 +542,7 @@ reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, KeyEnd, GroupedReds = [R || {_, {_,R}} <- GroupedNodes], case UngroupedNodes of [{_Key, NodeInfo}|RestNodes] -> - {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} = + {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} = reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, KeyEnd, GroupedKey, GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc), reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, KeyEnd, GroupedKey2, diff --git a/src/couchdb/couch_config.erl b/src/couchdb/couch_config.erl index 5b93c4ec..a182b80c 100644 --- a/src/couchdb/couch_config.erl +++ b/src/couchdb/couch_config.erl @@ -54,7 +54,7 @@ get(Section) -> get(Section, Key) -> ?MODULE:get(Section, Key, undefined). - + get(Section, Key, Default) when is_binary(Section) and is_binary(Key) -> ?MODULE:get(?b2l(Section), ?b2l(Key), Default); get(Section, Key, Default) -> @@ -194,9 +194,9 @@ parse_ini_file(IniFile) -> {ok, [ValueName|LineValues]} -> % yeehaw, got a line! RemainingLine = couch_util:implode(LineValues, "="), % removes comments - {ok, [LineValue | _Rest]} = + {ok, [LineValue | _Rest]} = regexp:split(RemainingLine, " ;|\t;"), - {AccSectionName, + {AccSectionName, [{{AccSectionName, ValueName}, LineValue} | AccValues]} end end diff --git a/src/couchdb/couch_config_writer.erl b/src/couchdb/couch_config_writer.erl index e47b9052..9861f842 100644 --- a/src/couchdb/couch_config_writer.erl +++ b/src/couchdb/couch_config_writer.erl @@ -54,7 +54,7 @@ save_to_file({{Section, Option}, Value}, File) -> _ -> NewFileContents2 end, - + ok = file:write_file(File, list_to_binary(NewFileContents)), ok. @@ -66,7 +66,7 @@ save_loop({{Section, Option}, Value}, [Line|Rest], OldCurrentSection, Contents, NewCurrentSection = parse_module(Line, OldCurrentSection), % if the current Section is the one we want to change, try to match % each line with the Option - NewContents = + NewContents = case NewCurrentSection of Section -> case OldCurrentSection of @@ -87,21 +87,21 @@ save_loop({{Section, Option}, Value}, [Line|Rest], OldCurrentSection, Contents, end; _ -> % we got into a new [section] {NewLine, DoneOptions2} = append_var_to_section( - {{Section, Option}, Value}, - Line, - OldCurrentSection, + {{Section, Option}, Value}, + Line, + OldCurrentSection, DoneOptions), NewLine end; _ -> % we are reading [NewCurrentSection] {NewLine, DoneOptions2} = append_var_to_section( - {{Section, Option}, Value}, - Line, - OldCurrentSection, + {{Section, Option}, Value}, + Line, + OldCurrentSection, DoneOptions), NewLine end, - % clumsy way to only append a newline character if the line is not empty. We need this to + % clumsy way to only append a newline character if the line is not empty. We need this to % avoid having a newline inserted at the top of the target file each time we save it. Contents2 = case Contents of "" -> ""; _ -> Contents ++ "\n" end, % go to next line @@ -110,7 +110,7 @@ save_loop({{Section, Option}, Value}, [Line|Rest], OldCurrentSection, Contents, save_loop({{Section, Option}, Value}, [], OldSection, NewFileContents, DoneOptions) -> case lists:member(Option, DoneOptions) of % append Deferred Option - false when Section == OldSection -> + false when Section == OldSection -> {NewFileContents ++ "\n" ++ Option ++ " = " ++ Value ++ "\n", DoneOptions}; % we're out of new lines, just return the new file's contents _ -> {NewFileContents, DoneOptions} @@ -131,7 +131,7 @@ append_var_to_section({{Section, Option}, Value}, Line, OldCurrentSection, DoneO _ -> {Line, DoneOptions} end. - + %% @spec parse_module(Line::string(), OldSection::string()) -> string() %% @doc Tries to match a line against a pattern specifying a ini module or %% section ("[Section]"). Returns OldSection if no match is found. diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index 2f0fa847..33b4d542 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -140,7 +140,7 @@ get_doc_info(Db, Id) -> Else -> Else end. - + % returns {ok, DocInfo} or not_found get_full_doc_info(Db, Id) -> [Result] = get_full_doc_infos(Db, [Id]), @@ -154,13 +154,13 @@ increment_update_seq(#db{update_pid=UpdatePid}) -> purge_docs(#db{update_pid=UpdatePid}, IdsRevs) -> gen_server:call(UpdatePid, {purge_docs, IdsRevs}). - + get_committed_update_seq(#db{committed_update_seq=Seq}) -> Seq. get_update_seq(#db{update_seq=Seq})-> Seq. - + get_purge_seq(#db{header=#db_header{purge_seq=PurgeSeq}})-> PurgeSeq. @@ -230,7 +230,7 @@ set_revs_limit(_Db, _Limit) -> name(#db{name=Name}) -> Name. - + update_doc(Db, Doc, Options) -> case update_docs(Db, [Doc], Options) of {ok, [{ok, NewRev}]} -> @@ -241,7 +241,7 @@ update_doc(Db, Doc, Options) -> update_docs(Db, Docs) -> update_docs(Db, Docs, []). - + % group_alike_docs groups the sorted documents into sublist buckets, by id. % ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]] group_alike_docs(Docs) -> @@ -375,7 +375,7 @@ update_docs(#db{update_pid=UpdatePid}=Db, Docs, Options) -> prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) -> - Errors2 = [{{Id, {Pos, Rev}}, Error} || + Errors2 = [{{Id, {Pos, Rev}}, Error} || {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors], {lists:reverse(AccPrepped), lists:reverse(Errors2)}; prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) -> @@ -406,9 +406,9 @@ prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldI fun(#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, {AccValidated, AccErrors2}) -> case dict:find({Pos, RevId}, LeafRevsFullDict) of {ok, {Start, Path}} -> - % our unflushed doc is a leaf node. Go back on the path + % our unflushed doc is a leaf node. Go back on the path % to find the previous rev that's on disk. - PrevRevResult = + PrevRevResult = case couch_doc:has_stubs(Doc) of true -> [_PrevRevFull | [PrevRevFull | _]=PrevPath] = Path, @@ -420,14 +420,14 @@ prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldI Doc2 = couch_doc:merge_stubs(Doc, DiskDoc), {ok, Doc2, fun() -> DiskDoc end} end; - false -> + false -> {ok, Doc, fun() -> make_first_doc_on_disk(Db,Id,Start-1, tl(Path)) end} end, case PrevRevResult of - {ok, NewDoc, LoadPrevRevFun} -> + {ok, NewDoc, LoadPrevRevFun} -> case validate_doc_update(Db, NewDoc, LoadPrevRevFun) of ok -> {[NewDoc | AccValidated], AccErrors2}; @@ -450,7 +450,7 @@ prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldI update_docs(Db, Docs, Options, replicated_changes) -> couch_stats_collector:increment({couchdb, database_writes}), DocBuckets = group_alike_docs(Docs), - + case (Db#db.validate_doc_funs /= []) orelse lists:any( fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true; @@ -459,7 +459,7 @@ update_docs(Db, Docs, Options, replicated_changes) -> true -> Ids = [Id || [#doc{id=Id}|_] <- DocBuckets], ExistingDocs = get_full_doc_infos(Db, Ids), - + {DocBuckets2, DocErrors} = prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocs, [], []), DocBuckets3 = [Bucket || [_|_]=Bucket <- DocBuckets2]; % remove empty buckets @@ -469,7 +469,7 @@ update_docs(Db, Docs, Options, replicated_changes) -> end, {ok, []} = write_and_commit(Db, DocBuckets3, [merge_conflicts | Options]), {ok, DocErrors}; - + update_docs(Db, Docs, Options, interactive_edit) -> couch_stats_collector:increment({couchdb, database_writes}), AllOrNothing = lists:member(all_or_nothing, Options), @@ -485,7 +485,7 @@ update_docs(Db, Docs, Options, interactive_edit) -> end end, Docs), DocBuckets = group_alike_docs(Docs2), - + case (Db#db.validate_doc_funs /= []) orelse lists:any( fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) -> @@ -497,16 +497,16 @@ update_docs(Db, Docs, Options, interactive_edit) -> % lookup the doc by id and get the most recent Ids = [Id || [#doc{id=Id}|_] <- DocBuckets], ExistingDocInfos = get_full_doc_infos(Db, Ids), - + {DocBucketsPrepped, Failures} = case AllOrNothing of true -> - prep_and_validate_replicated_updates(Db, DocBuckets, + prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocInfos, [], []); false -> prep_and_validate_updates(Db, DocBuckets, ExistingDocInfos, [], []) end, - + % strip out any empty buckets DocBuckets2 = [Bucket || [_|_] = Bucket <- DocBucketsPrepped]; false -> @@ -517,7 +517,7 @@ update_docs(Db, Docs, Options, interactive_edit) -> if (AllOrNothing) and (Failures /= []) -> {aborted, Failures}; true -> - Options2 = if AllOrNothing -> [merge_conflicts]; + Options2 = if AllOrNothing -> [merge_conflicts]; true -> [] end ++ Options, {ok, CommitFailures} = write_and_commit(Db, DocBuckets2, Options2), FailDict = dict:from_list(CommitFailures ++ Failures), @@ -575,24 +575,24 @@ doc_flush_binaries(Doc, Fd) -> flush_binary(Fd, {Fd0, StreamPointer, Len}) when Fd0 == Fd -> % already written to our file, nothing to write {Fd, StreamPointer, Len}; - + flush_binary(Fd, {OtherFd, StreamPointer, Len}) when is_tuple(StreamPointer) -> - {NewStreamData, Len} = + {NewStreamData, Len} = couch_stream:old_copy_to_new_stream(OtherFd, StreamPointer, Len, Fd), {Fd, NewStreamData, Len}; flush_binary(Fd, {OtherFd, StreamPointer, Len}) -> - {NewStreamData, Len} = + {NewStreamData, Len} = couch_stream:copy_to_new_stream(OtherFd, StreamPointer, Fd), {Fd, NewStreamData, Len}; - + flush_binary(Fd, Bin) when is_binary(Bin) -> with_stream(Fd, fun(OutputStream) -> couch_stream:write(OutputStream, Bin) end); - + flush_binary(Fd, {StreamFun, undefined}) when is_function(StreamFun) -> - with_stream(Fd, fun(OutputStream) -> + with_stream(Fd, fun(OutputStream) -> % StreamFun(MaxChunkSize, WriterFun) must call WriterFun % once for each chunk of the attachment, StreamFun(4096, @@ -606,19 +606,19 @@ flush_binary(Fd, {StreamFun, undefined}) when is_function(StreamFun) -> couch_stream:write(OutputStream, Bin) end, ok) end); - + flush_binary(Fd, {Fun, Len}) when is_function(Fun) -> - with_stream(Fd, fun(OutputStream) -> + with_stream(Fd, fun(OutputStream) -> write_streamed_attachment(OutputStream, Fun, Len) end). - + with_stream(Fd, Fun) -> {ok, OutputStream} = couch_stream:open(Fd), Fun(OutputStream), {StreamInfo, Len} = couch_stream:close(OutputStream), {Fd, StreamInfo, Len}. - + write_streamed_attachment(_Stream, _F, 0) -> ok; write_streamed_attachment(Stream, F, LenLeft) -> @@ -656,14 +656,14 @@ changes_since(Db, Style, StartSeq, Fun, Acc) -> Infos = [DocInfo]; all_docs -> % make each rev it's own doc info - Infos = [DocInfo#doc_info{revs=[RevInfo]} || + Infos = [DocInfo#doc_info{revs=[RevInfo]} || #rev_info{seq=RevSeq}=RevInfo <- Revs, StartSeq < RevSeq] end, Fun(Infos, Acc2) end, Acc). count_changes_since(Db, SinceSeq) -> - {ok, Changes} = + {ok, Changes} = couch_btree:fold_reduce(Db#db.docinfo_by_seq_btree, SinceSeq + 1, % startkey ok, % endkey @@ -673,7 +673,7 @@ count_changes_since(Db, SinceSeq) -> end, 0), Changes. - + enum_docs_since(Db, SinceSeq, Direction, InFun, Acc) -> couch_btree:fold(Db#db.docinfo_by_seq_btree, SinceSeq + 1, Direction, InFun, Acc). @@ -698,13 +698,13 @@ init({DbName, Filepath, Fd, Options}) -> terminate(Reason, _Db) -> couch_util:terminate_linked(Reason), ok. - + handle_call({open_ref_count, OpenerPid}, _, #db{fd_ref_counter=RefCntr}=Db) -> ok = couch_ref_counter:add(RefCntr, OpenerPid), {reply, {ok, Db}, Db}; -handle_call(is_idle, _From, #db{fd_ref_counter=RefCntr, compactor_pid=Compact, +handle_call(is_idle, _From, #db{fd_ref_counter=RefCntr, compactor_pid=Compact, waiting_delayed_commit=Delay}=Db) -> - % Idle means no referrers. Unless in the middle of a compaction file switch, + % Idle means no referrers. Unless in the middle of a compaction file switch, % there are always at least 2 referrers, couch_db_updater and us. {reply, (Delay == nil) and (Compact == nil) and (couch_ref_counter:count(RefCntr) == 2), Db}; handle_call({db_updated, NewDb}, _From, #db{fd_ref_counter=OldRefCntr}) -> @@ -782,7 +782,7 @@ open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) -> Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}), {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}}; open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) -> - #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} = + #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} = DocInfo = couch_doc:to_doc_info(FullDocInfo), {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]), Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath), @@ -799,11 +799,11 @@ doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTre case lists:member(revs_info, Options) of false -> []; true -> - {[{Pos, RevPath}],[]} = + {[{Pos, RevPath}],[]} = couch_key_tree:get_full_key_paths(RevTree, [Rev]), - + [{revs_info, Pos, lists:map( - fun({Rev1, {true, _Sp, _UpdateSeq}}) -> + fun({Rev1, {true, _Sp, _UpdateSeq}}) -> {Rev1, deleted}; ({Rev1, {false, _Sp, _UpdateSeq}}) -> {Rev1, available}; @@ -849,7 +849,7 @@ doc_to_tree_simple(Doc, [RevId]) -> doc_to_tree_simple(Doc, [RevId | Rest]) -> [{RevId, ?REV_MISSING, doc_to_tree_simple(Doc, Rest)}]. - + make_doc(#db{fd=Fd}, Id, Deleted, Bp, RevisionPath) -> {BodyData, BinValues} = case Bp of @@ -867,6 +867,6 @@ make_doc(#db{fd=Fd}, Id, Deleted, Bp, RevisionPath) -> attachments = BinValues, deleted = Deleted }. - - - + + + diff --git a/src/couchdb/couch_db.hrl b/src/couchdb/couch_db.hrl index 905b489b..abb301eb 100644 --- a/src/couchdb/couch_db.hrl +++ b/src/couchdb/couch_db.hrl @@ -21,7 +21,7 @@ -define(l2b(V), list_to_binary(V)). -define(DEFAULT_ATTACHMENT_CONTENT_TYPE, <<"application/octet-stream">>). - + -define(LOG_DEBUG(Format, Args), case couch_log:debug_on() of true -> error_logger:info_report(couch_debug, {Format, Args}); @@ -69,7 +69,7 @@ req_body = undefined, design_url_handlers }). - + -record(doc, { @@ -91,7 +91,7 @@ % couch_db:open_doc(Db, Id, Options). meta = [] }). - + -record(user_ctx, @@ -112,7 +112,7 @@ -define(LATEST_DISK_VERSION, 3). -record(db_header, - {disk_version = ?LATEST_DISK_VERSION, + {disk_version = ?LATEST_DISK_VERSION, update_seq = 0, unused = 0, fulldocinfo_by_id_btree_state = nil, diff --git a/src/couchdb/couch_db_update_notifier_sup.erl b/src/couchdb/couch_db_update_notifier_sup.erl index 69d6b1b0..76400637 100644 --- a/src/couchdb/couch_db_update_notifier_sup.erl +++ b/src/couchdb/couch_db_update_notifier_sup.erl @@ -33,11 +33,11 @@ init([]) -> ok = couch_config:register( fun("update_notification", Key, Value) -> reload_config(Key, Value) end ), - + UpdateNotifierExes = couch_config:get("update_notification"), - + {ok, - {{one_for_one, 10, 3600}, + {{one_for_one, 10, 3600}, lists:map(fun({Name, UpdateNotifierExe}) -> {Name, {couch_db_update_notifier, start_link, [UpdateNotifierExe]}, @@ -47,7 +47,7 @@ init([]) -> [couch_db_update_notifier]} end, UpdateNotifierExes)}}. -%% @doc when update_notification configuration changes, terminate the process +%% @doc when update_notification configuration changes, terminate the process %% for that notifier and start a new one with the updated config reload_config(Id, Exe) -> ChildSpec = { diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl index 6fef29eb..b715a3bf 100644 --- a/src/couchdb/couch_db_updater.erl +++ b/src/couchdb/couch_db_updater.erl @@ -32,7 +32,7 @@ init({MainPid, DbName, Filepath, Fd, Options}) -> ok = couch_file:upgrade_old_header(Fd, <<$g, $m, $k, 0>>), % 09 UPGRADE CODE {ok, Header} = couch_file:read_header(Fd) end, - + Db = init_db(DbName, Filepath, Fd, Header), Db2 = refresh_validate_doc_funs(Db), {ok, Db2#db{main_pid=MainPid}}. @@ -90,7 +90,7 @@ handle_call({purge_docs, IdRevs}, _From, Db) -> } = Db, DocLookups = couch_btree:lookup(DocInfoByIdBTree, [Id || {Id, _Revs} <- IdRevs]), - + NewDocInfos = lists:zipwith( fun({_Id, Revs}, {ok, #full_doc_info{rev_tree=Tree}=FullDocInfo}) -> case couch_key_tree:remove_leafs(Tree, Revs) of @@ -103,17 +103,17 @@ handle_call({purge_docs, IdRevs}, _From, Db) -> nil end, IdRevs, DocLookups), - + SeqsToRemove = [Seq || {#full_doc_info{update_seq=Seq},_} <- NewDocInfos], - + FullDocInfoToUpdate = [FullInfo || {#full_doc_info{rev_tree=Tree}=FullInfo,_} <- NewDocInfos, Tree /= []], - + IdRevsPurged = [{Id, Revs} || {#full_doc_info{id=Id}, Revs} <- NewDocInfos], - + {DocInfoToUpdate, NewSeq} = lists:mapfoldl( fun(#full_doc_info{rev_tree=Tree}=FullInfo, SeqAcc) -> Tree2 = couch_key_tree:map_leafs( fun(RevInfo) -> @@ -122,27 +122,27 @@ handle_call({purge_docs, IdRevs}, _From, Db) -> {couch_doc:to_doc_info(FullInfo#full_doc_info{rev_tree=Tree2}), SeqAcc + 1} end, LastSeq, FullDocInfoToUpdate), - + IdsToRemove = [Id || {#full_doc_info{id=Id,rev_tree=[]},_} <- NewDocInfos], - + {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree, DocInfoToUpdate, SeqsToRemove), {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree, FullDocInfoToUpdate, IdsToRemove), {ok, Pointer} = couch_file:append_term(Fd, IdRevsPurged), - + Db2 = commit_data( Db#db{ fulldocinfo_by_id_btree = DocInfoByIdBTree2, docinfo_by_seq_btree = DocInfoBySeqBTree2, update_seq = NewSeq + 1, header=Header#db_header{purge_seq=PurgeSeq+1, purged_docs=Pointer}}), - + ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}), couch_db_update_notifier:notify({updated, Db#db.name}), {reply, {ok, Db2#db.update_seq, IdRevsPurged}, Db2}. - + handle_cast(start_compact, Db) -> case Db#db.compactor_pid of @@ -168,10 +168,10 @@ handle_cast({compact_done, CompactFilepath}, #db{filepath=Filepath}=Db) -> {ok, LocalDocs} = couch_btree:foldl(Db#db.local_docs_btree, fun(Value, _Offset, Acc) -> {ok, [Value | Acc]} end, []), {ok, NewLocalBtree} = couch_btree:add(NewDb#db.local_docs_btree, LocalDocs), - + NewDb2 = commit_data( NewDb#db{local_docs_btree=NewLocalBtree, main_pid = Db#db.main_pid,filepath = Filepath}), - + ?LOG_DEBUG("CouchDB swapping files ~s and ~s.", [Filepath, CompactFilepath]), file:delete(Filepath), @@ -198,25 +198,25 @@ code_change(_OldVsn, State, _Extra) -> btree_by_seq_split(#doc_info{id=Id, high_seq=KeySeq, revs=Revs}) -> - RevInfos = [{Rev, Seq, Bp} || + RevInfos = [{Rev, Seq, Bp} || #rev_info{rev=Rev,seq=Seq,deleted=false,body_sp=Bp} <- Revs], - DeletedRevInfos = [{Rev, Seq, Bp} || + DeletedRevInfos = [{Rev, Seq, Bp} || #rev_info{rev=Rev,seq=Seq,deleted=true,body_sp=Bp} <- Revs], {KeySeq,{Id, RevInfos, DeletedRevInfos}}. - + btree_by_seq_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) -> #doc_info{ id = Id, high_seq=KeySeq, - revs = - [#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp = Bp} || - {Rev, Seq, Bp} <- RevInfos] ++ - [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} || + revs = + [#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp = Bp} || + {Rev, Seq, Bp} <- RevInfos] ++ + [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} || {Rev, Seq, Bp} <- DeletedRevInfos]}; btree_by_seq_join(KeySeq,{Id, Rev, Bp, Conflicts, DelConflicts, Deleted}) -> % 09 UPGRADE CODE % this is the 0.9.0 and earlier by_seq record. It's missing the body pointers - % and individual seq nums for conflicts that are currently in the index, + % and individual seq nums for conflicts that are currently in the index, % meaning the filtered _changes api will not work except for on main docs. % Simply compact a 0.9.0 database to upgrade the index. #doc_info{ @@ -252,7 +252,7 @@ btree_by_id_join(Id, {HighSeq, Deleted, DiskTree}) -> % This is fixed by compacting the database. {IsDeleted, BodyPointer, HighSeq} end, DiskTree), - + #full_doc_info{id=Id, update_seq=HighSeq, deleted=Deleted==1, rev_tree=Tree}. btree_by_id_reduce(reduce, FullDocInfos) -> @@ -262,7 +262,7 @@ btree_by_id_reduce(reduce, FullDocInfos) -> btree_by_id_reduce(rereduce, Reds) -> {lists:sum([Count || {Count,_} <- Reds]), lists:sum([DelCount || {_, DelCount} <- Reds])}. - + btree_by_seq_reduce(reduce, DocInfos) -> % count the number of documents length(DocInfos); @@ -293,16 +293,16 @@ init_db(DbName, Filepath, Fd, Header0) -> _ -> throw({database_disk_version_error, "Incorrect disk header version"}) end, Less = fun less_docid/2, - + {ok, FsyncOptions} = couch_util:parse_term( - couch_config:get("couchdb", "fsync_options", + couch_config:get("couchdb", "fsync_options", "[before_header, after_header, on_file_open]")), - + case lists:member(on_file_open, FsyncOptions) of true -> ok = couch_file:sync(Fd); _ -> ok end, - + {ok, IdBtree} = couch_btree:open(Header#db_header.fulldocinfo_by_id_btree_state, Fd, [{split, fun(X) -> btree_by_id_split(X) end}, {join, fun(X,Y) -> btree_by_id_join(X,Y) end}, @@ -347,7 +347,7 @@ init_db(DbName, Filepath, Fd, Header0) -> close_db(#db{fd_ref_counter = RefCntr}) -> couch_ref_counter:drop(RefCntr). - + refresh_validate_doc_funs(Db) -> {ok, DesignDocs} = couch_db:get_design_docs(Db), @@ -424,7 +424,7 @@ merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList], 0 -> AccRemoveSeqs; _ -> [OldSeq | AccRemoveSeqs] end, - merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo, + merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo, [NewInfo|AccNewInfos], RemoveSeqs, NewConflicts, AccSeq+1) end. @@ -443,7 +443,7 @@ new_index_entries([FullDocInfo|RestInfos], AccById, AccBySeq) -> stem_full_doc_infos(#db{revs_limit=Limit}, DocInfos) -> [Info#full_doc_info{rev_tree=couch_key_tree:stem(Tree, Limit)} || #full_doc_info{rev_tree=Tree}=Info <- DocInfos]. - + update_docs_int(Db, DocsList, Options) -> #db{ @@ -461,9 +461,9 @@ update_docs_int(Db, DocsList, Options) -> {[Docs | DocsListAcc], NonRepDocsAcc} end end, {[], []}, DocsList), - - Ids = [Id || [#doc{id=Id}|_] <- DocsList2], - + + Ids = [Id || [#doc{id=Id}|_] <- DocsList2], + % lookup up the old documents, if they exist. OldDocLookups = couch_btree:lookup(DocInfoByIdBTree, Ids), OldDocInfos = lists:zipwith( @@ -473,23 +473,23 @@ update_docs_int(Db, DocsList, Options) -> #full_doc_info{id=Id} end, Ids, OldDocLookups), - + % Merge the new docs into the revision trees. {ok, NewDocInfos0, RemoveSeqs, Conflicts, NewSeq} = merge_rev_trees( lists:member(merge_conflicts, Options), DocsList2, OldDocInfos, [], [], [], LastSeq), - + NewFullDocInfos = stem_full_doc_infos(Db, NewDocInfos0), - + % All documents are now ready to write. - + {ok, LocalConflicts, Db2} = update_local_docs(Db, NonRepDocs), - + % Write out the document summaries (the bodies are stored in the nodes of % the trees, the attachments are already written to disk) {ok, FlushedFullDocInfos} = flush_trees(Db2, NewFullDocInfos, []), - - {IndexFullDocInfos, IndexDocInfos} = + + {IndexFullDocInfos, IndexDocInfos} = new_index_entries(FlushedFullDocInfos, [], []), % and the indexes @@ -500,7 +500,7 @@ update_docs_int(Db, DocsList, Options) -> fulldocinfo_by_id_btree = DocInfoByIdBTree2, docinfo_by_seq_btree = DocInfoBySeqBTree2, update_seq = NewSeq}, - + % Check if we just updated any design documents, and update the validation % funs if we did. case [1 || <<"_design/",_/binary>> <- Ids] of @@ -509,8 +509,8 @@ update_docs_int(Db, DocsList, Options) -> _ -> Db4 = refresh_validate_doc_funs(Db3) end, - - {ok, LocalConflicts ++ Conflicts, + + {ok, LocalConflicts ++ Conflicts, commit_data(Db4, not lists:member(full_commit, Options))}. @@ -534,13 +534,13 @@ update_local_docs(#db{local_docs_btree=Btree}=Db, Docs) -> false -> {conflict, {Id, {0, RevStr}}} end - + end, Docs, OldDocLookups), BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries], BtreeIdsUpdate = [ByIdDocInfo || {update, ByIdDocInfo} <- BtreeEntries], Conflicts = [{conflict, IdRev} || {conflict, IdRev} <- BtreeEntries], - + {ok, Btree2} = couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove), @@ -580,14 +580,14 @@ commit_data(#db{fd=Fd,header=OldHeader,fsync_options=FsyncOptions}=Db, Delay) -> true -> ok = couch_file:sync(Fd); _ -> ok end, - + ok = couch_file:write_header(Fd, Header), - + case lists:member(after_header, FsyncOptions) of true -> ok = couch_file:sync(Fd); _ -> ok end, - + Db#db{waiting_delayed_commit=nil, header=Header, committed_update_seq=Db#db.update_seq} @@ -622,11 +622,11 @@ copy_rev_tree_attachments(SrcFd, DestFd, [{RevId, _, SubTree} | RestTree]) -> % inner node, only copy info/data from leaf nodes [{RevId, ?REV_MISSING, copy_rev_tree_attachments(SrcFd, DestFd, SubTree)} | copy_rev_tree_attachments(SrcFd, DestFd, RestTree)]. - + copy_docs(#db{fd=SrcFd}=Db, #db{fd=DestFd}=NewDb, InfoBySeq, Retry) -> Ids = [Id || #doc_info{id=Id} <- InfoBySeq], LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids), - + % write out the attachments NewFullDocInfos0 = lists:map( fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) -> @@ -656,7 +656,7 @@ copy_docs(#db{fd=SrcFd}=Db, #db{fd=DestFd}=NewDb, InfoBySeq, Retry) -> Existing = couch_btree:lookup(NewDb#db.fulldocinfo_by_id_btree, Ids), [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing] end, - + {ok, DocInfoBTree} = couch_btree:add_remove( NewDb#db.docinfo_by_seq_btree, NewDocInfos, RemoveSeqs), {ok, FullDocInfoBTree} = couch_btree:add_remove( @@ -665,14 +665,14 @@ copy_docs(#db{fd=SrcFd}=Db, #db{fd=DestFd}=NewDb, InfoBySeq, Retry) -> docinfo_by_seq_btree=DocInfoBTree}. - + copy_compact(Db, NewDb0, Retry) -> FsyncOptions = [Op || Op <- NewDb0#db.fsync_options, Op == before_header], NewDb = NewDb0#db{fsync_options=FsyncOptions}, TotalChanges = couch_db:count_changes_since(Db, NewDb#db.update_seq), EnumBySeqFun = fun(#doc_info{high_seq=Seq}=DocInfo, _Offset, {AccNewDb, AccUncopied, TotalCopied}) -> - couch_task_status:update("Copied ~p of ~p changes (~p%)", + couch_task_status:update("Copied ~p of ~p changes (~p%)", [TotalCopied, TotalChanges, (TotalCopied*100) div TotalChanges]), if TotalCopied rem 1000 == 0 -> NewDb2 = copy_docs(Db, AccNewDb, lists:reverse([DocInfo | AccUncopied]), Retry), @@ -681,20 +681,20 @@ copy_compact(Db, NewDb0, Retry) -> true -> {ok, {NewDb2#db{update_seq=Seq}, [], TotalCopied + 1}} end; - true -> + true -> {ok, {AccNewDb, [DocInfo | AccUncopied], TotalCopied + 1}} end end, - + couch_task_status:set_update_frequency(500), - + {ok, {NewDb2, Uncopied, TotalChanges}} = couch_btree:foldl(Db#db.docinfo_by_seq_btree, NewDb#db.update_seq + 1, EnumBySeqFun, {NewDb, [], 0}), - - couch_task_status:update("Flushing"), - + + couch_task_status:update("Flushing"), + NewDb3 = copy_docs(Db, NewDb2, lists:reverse(Uncopied), Retry), - + % copy misc header values if NewDb3#db.admins /= Db#db.admins -> {ok, Ptr} = couch_file:append_term(NewDb3#db.fd, Db#db.admins), @@ -702,7 +702,7 @@ copy_compact(Db, NewDb0, Retry) -> true -> NewDb4 = NewDb3 end, - + commit_data(NewDb4#db{update_seq=Db#db.update_seq}). start_copy_compact(#db{name=Name,filepath=Filepath}=Db) -> @@ -721,7 +721,7 @@ start_copy_compact(#db{name=Name,filepath=Filepath}=Db) -> end, NewDb = init_db(Name, CompactFile, Fd, Header), NewDb2 = copy_compact(Db, NewDb, Retry), - + gen_server:cast(Db#db.update_pid, {compact_done, CompactFile}), close_db(NewDb2). - + diff --git a/src/couchdb/couch_doc.erl b/src/couchdb/couch_doc.erl index b9747a01..6c9a119c 100644 --- a/src/couchdb/couch_doc.erl +++ b/src/couchdb/couch_doc.erl @@ -34,7 +34,7 @@ to_json_revisions(Options, Start, RevIds) -> case lists:member(revs, Options) of false -> []; true -> - [{<<"_revisions">>, {[{<<"start">>, Start}, + [{<<"_revisions">>, {[{<<"start">>, Start}, {<<"ids">>, RevIds}]}}] end. @@ -115,10 +115,10 @@ to_json_attachments(Attachments, Options) -> to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds}, meta=Meta}=Doc,Options)-> - {[{<<"_id">>, Id}] + {[{<<"_id">>, Id}] ++ to_json_rev(Start, RevIds) ++ to_json_body(Del, Body) - ++ to_json_revisions(Options, Start, RevIds) + ++ to_json_revisions(Options, Start, RevIds) ++ to_json_meta(Meta) ++ to_json_attachments(Doc#doc.attachments, Options) }. @@ -133,13 +133,13 @@ parse_rev(Rev) when is_binary(Rev) -> parse_rev(?b2l(Rev)); parse_rev(Rev) when is_list(Rev) -> SplitRev = lists:splitwith(fun($-) -> false; (_) -> true end, Rev), - case SplitRev of + case SplitRev of {Pos, [$- | RevId]} -> {list_to_integer(Pos), ?l2b(RevId)}; _Else -> throw({bad_request, <<"Invalid rev format">>}) end; parse_rev(_BadRev) -> throw({bad_request, <<"Invalid rev format">>}). - + parse_revs([]) -> []; parse_revs([Rev | Rest]) -> @@ -161,20 +161,20 @@ validate_docid(Id) -> transfer_fields([], #doc{body=Fields}=Doc) -> % convert fields back to json object Doc#doc{body={lists:reverse(Fields)}}; - + transfer_fields([{<<"_id">>, Id} | Rest], Doc) -> validate_docid(Id), transfer_fields(Rest, Doc#doc{id=Id}); - + transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc) -> {Pos, RevId} = parse_rev(Rev), transfer_fields(Rest, Doc#doc{revs={Pos, [RevId]}}); - + transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc) -> % we already got the rev from the _revisions transfer_fields(Rest,Doc); - + transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) -> Bins = lists:flatmap(fun({Name, {BinProps}}) -> case proplists:get_value(<<"stub">>, BinProps) of @@ -190,7 +190,7 @@ transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) -> end end, JsonBins), transfer_fields(Rest, Doc#doc{attachments=Bins}); - + transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) -> RevIds = proplists:get_value(<<"ids">>, Props), Start = proplists:get_value(<<"start">>, Props), @@ -204,7 +204,7 @@ transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) -> [throw({doc_validation, "RevId isn't a string"}) || RevId <- RevIds, not is_binary(RevId)], transfer_fields(Rest, Doc#doc{revs={Start, RevIds}}); - + transfer_fields([{<<"_deleted">>, B} | Rest], Doc) when (B==true) or (B==false) -> transfer_fields(Rest, Doc#doc{deleted=B}); @@ -222,7 +222,7 @@ transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc) -> transfer_fields([{<<"_",Name/binary>>, _} | _], _) -> throw({doc_validation, ?l2b(io_lib:format("Bad special document member: _~s", [Name]))}); - + transfer_fields([Field | Rest], #doc{body=Fields}=Doc) -> transfer_fields(Rest, Doc#doc{body=[Field|Fields]}). @@ -237,11 +237,11 @@ max_seq([#rev_info{seq=Seq}|Rest], Max) -> to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree}) -> RevInfosAndPath = - [{#rev_info{deleted=Del,body_sp=Bp,seq=Seq,rev={Pos,RevId}}, Path} || - {{Del, Bp, Seq},{Pos, [RevId|_]}=Path} <- + [{#rev_info{deleted=Del,body_sp=Bp,seq=Seq,rev={Pos,RevId}}, Path} || + {{Del, Bp, Seq},{Pos, [RevId|_]}=Path} <- couch_key_tree:get_all_leafs(Tree)], SortedRevInfosAndPath = lists:sort( - fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA}, + fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA}, {#rev_info{deleted=DeletedB,rev=RevB}, _PathB}) -> % sort descending by {not deleted, rev} {not DeletedA, RevA} > {not DeletedB, RevB} @@ -282,7 +282,7 @@ get_validate_doc_fun(#doc{body={Props}}) -> Lang, FunSrc, EditDoc, DiskDoc, Ctx) end end. - + has_stubs(#doc{attachments=Bins}) -> has_stubs(Bins); diff --git a/src/couchdb/couch_erl_driver.c b/src/couchdb/couch_erl_driver.c index 18fabc2b..0569f0c3 100644 --- a/src/couchdb/couch_erl_driver.c +++ b/src/couchdb/couch_erl_driver.c @@ -56,7 +56,7 @@ static ErlDrvData couch_drv_start(ErlDrvPort port, char *buff) return ERL_DRV_ERROR_GENERAL; pData->port = port; - + pData->coll = ucol_open("", &status); if (U_FAILURE(status)) { couch_drv_stop((ErlDrvData)pData); @@ -140,7 +140,7 @@ static int couch_drv_control(ErlDrvData drv_data, unsigned int command, char *pB return return_control_result(&response, sizeof(response), rbuf, rlen); } - + default: return -1; } diff --git a/src/couchdb/couch_external_manager.erl b/src/couchdb/couch_external_manager.erl index 034e0c50..1becaa9c 100644 --- a/src/couchdb/couch_external_manager.erl +++ b/src/couchdb/couch_external_manager.erl @@ -19,7 +19,7 @@ -include("couch_db.hrl"). start_link() -> - gen_server:start_link({local, couch_external_manager}, + gen_server:start_link({local, couch_external_manager}, couch_external_manager, [], []). execute(UrlName, JsonReq) -> diff --git a/src/couchdb/couch_external_server.erl b/src/couchdb/couch_external_server.erl index d81c4f85..107e27d6 100644 --- a/src/couchdb/couch_external_server.erl +++ b/src/couchdb/couch_external_server.erl @@ -14,7 +14,7 @@ -behaviour(gen_server). -export([start_link/2, stop/1, execute/2]). --export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]). +-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]). -include("couch_db.hrl"). diff --git a/src/couchdb/couch_file.erl b/src/couchdb/couch_file.erl index 2021bf5b..65863ee2 100644 --- a/src/couchdb/couch_file.erl +++ b/src/couchdb/couch_file.erl @@ -36,7 +36,7 @@ open(Filepath) -> open(Filepath, []). - + open(Filepath, Options) -> case gen_server:start_link(couch_file, {Filepath, Options, self(), Ref = make_ref()}, []) of @@ -76,7 +76,7 @@ append_term(Fd, Term) -> %% serialized term. Use pread_term to read the term back. %% or {error, Reason}. %%---------------------------------------------------------------------- - + append_binary(Fd, Bin) -> Size = iolist_size(Bin), gen_server:call(Fd, {append_bin, [<<Size:32/integer>>, Bin]}, infinity). @@ -89,7 +89,7 @@ append_binary(Fd, Bin) -> %% or {error, Reason}. %%---------------------------------------------------------------------- - + pread_term(Fd, Pos) -> {ok, Bin} = pread_binary(Fd, Pos), {ok, binary_to_term(Bin)}. @@ -178,14 +178,14 @@ read_header(Fd) -> Else -> Else end. - + write_header(Fd, Data) -> Bin = term_to_binary(Data), Md5 = erlang:md5(Bin), % now we assemble the final header binary and write to disk FinalBin = <<Md5/binary, Bin/binary>>, gen_server:call(Fd, {write_header, FinalBin}, infinity). - + @@ -301,7 +301,7 @@ handle_call({upgrade_old_header, Prefix}, _From, #file{fd=Fd}=File) -> handle_call(find_header, _From, #file{fd=Fd}=File) -> {ok, Pos} = file:position(Fd, eof), {reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}. - + % 09 UPGRADE CODE -define(HEADER_SIZE, 2048). % size of each segment of the doubly written header @@ -349,7 +349,7 @@ read_old_header(Fd, Prefix) -> _ -> Result end. - + % 09 UPGRADE CODE extract_header(Prefix, Bin) -> SizeOfPrefix = size(Prefix), @@ -373,7 +373,7 @@ extract_header(Prefix, Bin) -> _ -> unknown_header_type end. - + % 09 UPGRADE CODE write_old_header(Fd, Prefix, Data) -> @@ -401,7 +401,7 @@ write_old_header(Fd, Prefix, Data) -> ok = file:pwrite(Fd, 0, DblWriteBin), ok = file:sync(Fd). - + handle_cast(close, Fd) -> {stop,normal,Fd}. @@ -422,14 +422,14 @@ find_header(Fd, Block) -> _Error -> find_header(Fd, Block -1) end. - + load_header(Fd, Block) -> {ok, <<1>>} = file:pread(Fd, Block*?SIZE_BLOCK, 1), {ok, <<HeaderLen:32/integer>>} = file:pread(Fd, (Block*?SIZE_BLOCK) + 1, 4), TotalBytes = calculate_total_read_len(1, HeaderLen), - {ok, <<RawBin:TotalBytes/binary>>} = + {ok, <<RawBin:TotalBytes/binary>>} = file:pread(Fd, (Block*?SIZE_BLOCK) + 5, TotalBytes), - <<Md5Sig:16/binary, HeaderBin/binary>> = + <<Md5Sig:16/binary, HeaderBin/binary>> = iolist_to_binary(remove_block_prefixes(1, RawBin)), Md5Sig = erlang:md5(HeaderBin), {ok, HeaderBin}. diff --git a/src/couchdb/couch_httpd.erl b/src/couchdb/couch_httpd.erl index 91877cca..5c0869c1 100644 --- a/src/couchdb/couch_httpd.erl +++ b/src/couchdb/couch_httpd.erl @@ -34,17 +34,17 @@ start_link() -> BindAddress = couch_config:get("httpd", "bind_address", any), Port = couch_config:get("httpd", "port", "5984"), - + DefaultSpec = "{couch_httpd_db, handle_request}", DefaultFun = make_arity_1_fun( couch_config:get("httpd", "default_handler", DefaultSpec) ), - + UrlHandlersList = lists:map( fun({UrlKey, SpecStr}) -> {?l2b(UrlKey), make_arity_1_fun(SpecStr)} end, couch_config:get("httpd_global_handlers")), - + DbUrlHandlersList = lists:map( fun({UrlKey, SpecStr}) -> {?l2b(UrlKey), make_arity_2_fun(SpecStr)} @@ -65,7 +65,7 @@ start_link() -> end, % and off we go - + {ok, Pid} = case mochiweb_http:start([ {loop, Loop}, {name, ?MODULE}, @@ -93,7 +93,7 @@ start_link() -> {ok, Pid}. -% SpecStr is a string like "{my_module, my_fun}" +% SpecStr is a string like "{my_module, my_fun}" % or "{my_module, my_fun, <<"my_arg">>}" make_arity_1_fun(SpecStr) -> case couch_util:parse_term(SpecStr) of @@ -110,11 +110,11 @@ make_arity_2_fun(SpecStr) -> {ok, {Mod, Fun}} -> fun(Arg1, Arg2) -> apply(Mod, Fun, [Arg1, Arg2]) end end. - + stop() -> mochiweb_http:stop(?MODULE). - + handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers) -> @@ -125,7 +125,7 @@ handle_request(MochiReq, DefaultFun, % removed, but URL quoting left intact RawUri = MochiReq:get(raw_path), {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri), - + HandlerKey = case mochiweb_util:partition(Path, "/") of {"", "", ""} -> @@ -139,19 +139,19 @@ handle_request(MochiReq, DefaultFun, MochiReq:get(version), mochiweb_headers:to_list(MochiReq:get(headers)) ]), - + Method1 = case MochiReq:get(method) of % already an atom Meth when is_atom(Meth) -> Meth; - + % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when % possible (if any module references the atom, then it's existing). Meth -> couch_util:to_existing_atom(Meth) end, - + increment_method_stats(Method1), - + % alias HEAD to GET as mochiweb takes care of stripping the body Method = case Method1 of 'HEAD' -> 'GET'; @@ -264,13 +264,13 @@ header_value(#httpd{mochi_req=MochiReq}, Key, Default) -> primary_header_value(#httpd{mochi_req=MochiReq}, Key) -> MochiReq:get_primary_header_value(Key). - + serve_file(#httpd{mochi_req=MochiReq}, RelativePath, DocumentRoot) -> {ok, MochiReq:serve_file(RelativePath, DocumentRoot, server_header())}. qs_value(Req, Key) -> qs_value(Req, Key, undefined). - + qs_value(Req, Key, Default) -> proplists:get_value(Key, qs(Req), Default). @@ -319,7 +319,7 @@ json_body(Httpd) -> json_body_obj(Httpd) -> case json_body(Httpd) of {Props} -> {Props}; - _Else -> + _Else -> throw({bad_request, "Request body must be a JSON object"}) end. @@ -457,7 +457,7 @@ end_jsonp() -> end, put(jsonp, undefined), Resp. - + validate_callback(CallBack) when is_binary(CallBack) -> validate_callback(binary_to_list(CallBack)); validate_callback([]) -> @@ -507,10 +507,10 @@ error_info(Error) -> send_error(_Req, {already_sent, Resp, _Error}) -> {ok, Resp}; - + send_error(Req, Error) -> {Code, ErrorStr, ReasonStr} = error_info(Error), - if Code == 401 -> + if Code == 401 -> case couch_config:get("httpd", "WWW-Authenticate", nil) of nil -> Headers = []; @@ -524,7 +524,7 @@ send_error(Req, Error) -> send_error(Req, Code, ErrorStr, ReasonStr) -> send_error(Req, Code, [], ErrorStr, ReasonStr). - + send_error(Req, Code, Headers, ErrorStr, ReasonStr) -> send_json(Req, Code, Headers, {[{<<"error">>, ErrorStr}, @@ -538,7 +538,7 @@ send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) -> send_chunked_error(Resp, Error) -> {Code, ErrorStr, ReasonStr} = error_info(Error), JsonError = {[{<<"code">>, Code}, - {<<"error">>, ErrorStr}, + {<<"error">>, ErrorStr}, {<<"reason">>, ReasonStr}]}, send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])), send_chunk(Resp, []). diff --git a/src/couchdb/couch_httpd_db.erl b/src/couchdb/couch_httpd_db.erl index edb2f310..c00fd873 100644 --- a/src/couchdb/couch_httpd_db.erl +++ b/src/couchdb/couch_httpd_db.erl @@ -13,7 +13,7 @@ -module(couch_httpd_db). -include("couch_db.hrl"). --export([handle_request/1, handle_compact_req/2, handle_design_req/2, +-export([handle_request/1, handle_compact_req/2, handle_design_req/2, db_req/2, couch_doc_open/4,handle_changes_req/2, update_doc_result_to_json/1, update_doc_result_to_json/2, handle_design_info_req/2, handle_view_cleanup_req/2]). @@ -28,7 +28,7 @@ rev = nil, open_revs = [] }). - + % Database request handlers handle_request(#httpd{path_parts=[DbName|RestParts],method=Method, db_url_handlers=DbUrlHandlers}=Req)-> @@ -86,9 +86,9 @@ handle_changes_req(#httpd{method='GET',path_parts=[DbName|_]}=Req, Db) -> couch_db_update_notifier:stop(Notify), get_rest_db_updated() % clean out any remaining update messages end; - + "false" -> - {ok, {LastSeq, _Prepend}} = + {ok, {LastSeq, _Prepend}} = send_changes(Req, Resp, Db, StartSeq, <<"">>), send_chunk(Resp, io_lib:format("\n],\n\"last_seq\":~w}\n", [LastSeq])), send_chunk(Resp, "") @@ -106,7 +106,7 @@ wait_db_updated(Timeout, TimeoutFun) -> stop -> stop end end. - + get_rest_db_updated() -> receive db_updated -> get_rest_db_updated() after 0 -> updated @@ -127,7 +127,7 @@ keep_sending_changes(#httpd{user_ctx=UserCtx,path_parts=[DbName|_]}=Req, Resp, D send_changes(Req, Resp, Db, StartSeq, Prepend0) -> Style = list_to_existing_atom( couch_httpd:qs_value(Req, "style", "main_only")), - couch_db:changes_since(Db, Style, StartSeq, + couch_db:changes_since(Db, Style, StartSeq, fun([#doc_info{id=Id, high_seq=Seq}|_]=DocInfos, {_, Prepend}) -> FilterFun = fun(#doc_info{revs=[#rev_info{rev=Rev}|_]}) -> @@ -139,7 +139,7 @@ send_changes(Req, Resp, Db, StartSeq, Prepend0) -> [] -> {ok, {Seq, Prepend}}; _ -> - send_chunk(Resp, + send_chunk(Resp, [Prepend, ?JSON_ENCODE({[{seq,Seq}, {id, Id}, {changes,Results}]})]), {ok, {Seq, <<",\n">>}} @@ -172,7 +172,7 @@ handle_design_req(#httpd{ }=Req, Db) -> Handler = couch_util:dict_find(Action, DesignUrlHandlers, fun db_req/2), Handler(Req, Db); - + handle_design_req(Req, Db) -> db_req(Req, Db). @@ -188,7 +188,7 @@ handle_design_info_req(#httpd{ {name, DesignName}, {view_index, {GroupInfoList}} ]}); - + handle_design_info_req(Req, _Db) -> send_method_not_allowed(Req, "GET"). @@ -244,7 +244,7 @@ db_req(#httpd{method='POST',path_parts=[DbName]}=Req, Db) -> _Normal -> % normal {ok, NewRev} = couch_db:update_doc(Db, Doc#doc{id=DocId}, []), - DocUrl = absolute_uri(Req, + DocUrl = absolute_uri(Req, binary_to_list(<<"/",DbName/binary,"/",DocId/binary>>)), send_json(Req, 201, [{"Location", DocUrl}], {[ {ok, true}, @@ -265,7 +265,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) - {ok, true}, {instance_start_time, DbStartTime} ]}); - + db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) -> send_method_not_allowed(Req, "POST"); @@ -311,14 +311,14 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) -> Docs, Results), send_json(Req, 201, DocResults); {aborted, Errors} -> - ErrorsJson = + ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors), send_json(Req, 417, ErrorsJson) end; false -> Docs = [couch_doc:from_json_obj(JsonObj) || JsonObj <- DocsArray], {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes), - ErrorsJson = + ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors), send_json(Req, 201, ErrorsJson) end; @@ -328,7 +328,7 @@ db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) -> db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) -> {IdsRevs} = couch_httpd:json_body_obj(Req), IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs], - + case couch_db:purge_docs(Db, IdsRevs2) of {ok, PurgeSeq, PurgedIdsRevs} -> PurgedIdsRevs2 = [{Id, couch_doc:rev_to_strs(Revs)} || {Id, Revs} <- PurgedIdsRevs], @@ -339,7 +339,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) -> db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) -> send_method_not_allowed(Req, "POST"); - + db_req(#httpd{method='GET',path_parts=[_,<<"_all_docs">>]}=Req, Db) -> all_docs_view(Req, Db, nil); @@ -357,7 +357,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_all_docs">>]}=Req, Db) -> db_req(#httpd{path_parts=[_,<<"_all_docs">>]}=Req, _Db) -> send_method_not_allowed(Req, "GET,HEAD,POST"); - + db_req(#httpd{method='GET',path_parts=[_,<<"_all_docs_by_seq">>]}=Req, Db) -> #view_query_args{ start_key = StartKey, @@ -450,18 +450,18 @@ db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) -> db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) -> send_method_not_allowed(Req, "PUT,GET"); -% Special case to enable using an unencoded slash in the URL of design docs, +% Special case to enable using an unencoded slash in the URL of design docs, % as slashes in document IDs must otherwise be URL encoded. db_req(#httpd{method='GET',mochi_req=MochiReq, path_parts=[DbName,<<"_design/",_/binary>>|_]}=Req, _Db) -> PathFront = "/" ++ couch_httpd:quote(binary_to_list(DbName)) ++ "/", RawSplit = regexp:split(MochiReq:get(raw_path),"_design%2F"), {ok, [PathFront|PathTail]} = RawSplit, - couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++ + couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++ mochiweb_util:join(PathTail, "_design%2F")); db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) -> db_doc_req(Req, Db, <<"_design/",Name/binary>>); - + db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) -> db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts); @@ -472,7 +472,7 @@ db_req(#httpd{path_parts=[_, DocId]}=Req, Db) -> db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) -> db_attachment_req(Req, Db, DocId, FileNameParts). -all_docs_view(Req, Db, Keys) -> +all_docs_view(Req, Db, Keys) -> #view_query_args{ start_key = StartKey, start_docid = StartDocId, @@ -483,17 +483,17 @@ all_docs_view(Req, Db, Keys) -> } = QueryArgs = couch_httpd_view:parse_view_params(Req, Keys, map), {ok, Info} = couch_db:get_db_info(Db), CurrentEtag = couch_httpd:make_etag(proplists:get_value(update_seq, Info)), - couch_httpd:etag_respond(Req, CurrentEtag, fun() -> - + couch_httpd:etag_respond(Req, CurrentEtag, fun() -> + TotalRowCount = proplists:get_value(doc_count, Info), StartId = if is_binary(StartKey) -> StartKey; true -> StartDocId end, FoldAccInit = {Limit, SkipCount, undefined, []}, - + case Keys of nil -> - PassedEndFun = + PassedEndFun = case Dir of fwd -> fun(ViewKey, _ViewId) -> @@ -504,7 +504,7 @@ all_docs_view(Req, Db, Keys) -> couch_db_updater:less_docid(ViewKey, EndKey) end end, - + FoldlFun = couch_httpd_view:make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db, TotalRowCount, #view_fold_helper_funs{ reduce_count = fun couch_db:enum_docs_reduce_to_count/1, @@ -518,7 +518,7 @@ all_docs_view(Req, Db, Keys) -> {ok, Acc} end end, - {ok, FoldResult} = couch_db:enum_docs(Db, StartId, Dir, + {ok, FoldResult} = couch_db:enum_docs(Db, StartId, Dir, AdapterFun, FoldAccInit), couch_httpd_view:finish_view_fold(Req, TotalRowCount, {ok, FoldResult}); _ -> @@ -554,7 +554,7 @@ all_docs_view(Req, Db, Keys) -> Acc end end, {ok, FoldAccInit}, Keys), - couch_httpd_view:finish_view_fold(Req, TotalRowCount, {ok, FoldResult}) + couch_httpd_view:finish_view_fold(Req, TotalRowCount, {ok, FoldResult}) end end). @@ -580,7 +580,7 @@ db_doc_req(#httpd{method='GET'}=Req, Db, DocId) -> [] -> Doc = couch_doc_open(Db, DocId, Rev, Options), DiskEtag = couch_httpd:doc_etag(Doc), - couch_httpd:etag_respond(Req, DiskEtag, fun() -> + couch_httpd:etag_respond(Req, DiskEtag, fun() -> Headers = case Doc#doc.meta of [] -> [{"Etag", DiskEtag}]; % output etag only when we have no meta _ -> [] @@ -668,10 +668,10 @@ db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) -> % open old doc Doc = couch_doc_open(Db, SourceDocId, SourceRev, []), % save new doc - {ok, NewTargetRev} = couch_db:update_doc(Db, - Doc#doc{id=TargetDocId, revs=TargetRevs}, []), + {ok, NewTargetRev} = couch_db:update_doc(Db, + Doc#doc{id=TargetDocId, revs=TargetRevs}, []), % respond - send_json(Req, 201, + send_json(Req, 201, [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}], update_doc_result_to_json(TargetDocId, {ok, NewTargetRev})); @@ -698,7 +698,7 @@ update_doc(Req, Db, DocId, Json) -> update_doc(Req, Db, DocId, Json, Headers) -> #doc{deleted=Deleted} = Doc = couch_doc_from_req(Req, DocId, Json), - + case couch_httpd:header_value(Req, "X-Couch-Full-Commit", "false") of "true" -> Options = [full_commit]; @@ -729,7 +729,7 @@ couch_doc_from_req(Req, DocId, Json) -> Revs = {Pos, [Rev]} end, Doc#doc{id=DocId, revs=Revs}. - + % Useful for debugging % couch_doc_open(Db, DocId) -> @@ -758,13 +758,13 @@ couch_doc_open(Db, DocId, Rev, Options) -> db_attachment_req(#httpd{method='GET'}=Req, Db, DocId, FileNameParts) -> FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts),"/")), #doc_query_args{ - rev=Rev, + rev=Rev, options=Options } = parse_doc_query(Req), #doc{ attachments=Attachments } = Doc = couch_doc_open(Db, DocId, Rev, Options), - + case proplists:get_value(FileName, Attachments) of undefined -> throw({not_found, "Document is missing attachment"}); @@ -789,9 +789,9 @@ db_attachment_req(#httpd{method=Method}=Req, Db, DocId, FileNameParts) when (Method == 'PUT') or (Method == 'DELETE') -> FileName = validate_attachment_name( mochiweb_util:join( - lists:map(fun binary_to_list/1, + lists:map(fun binary_to_list/1, FileNameParts),"/")), - + NewAttachment = case Method of 'DELETE' -> []; @@ -807,12 +807,12 @@ db_attachment_req(#httpd{method=Method}=Req, Db, DocId, FileNameParts) list_to_binary(CType) end, case couch_httpd:header_value(Req,"Content-Length") of - undefined -> - {fun(MaxChunkSize, ChunkFun, InitState) -> - couch_httpd:recv_chunked(Req, MaxChunkSize, - ChunkFun, InitState) + undefined -> + {fun(MaxChunkSize, ChunkFun, InitState) -> + couch_httpd:recv_chunked(Req, MaxChunkSize, + ChunkFun, InitState) end, undefined}; - Length -> + Length -> {fun() -> couch_httpd:recv(Req, 0) end, list_to_integer(Length)} end @@ -901,7 +901,7 @@ extract_header_rev(Req, ExplicitRev) -> parse_copy_destination_header(Req) -> Destination = couch_httpd:header_value(Req, "Destination"), case regexp:match(Destination, "\\?") of - nomatch -> + nomatch -> {list_to_binary(Destination), {0, []}}; {match, _, _} -> {ok, [DocId, RevQueryOptions]} = regexp:split(Destination, "\\?"), @@ -911,7 +911,7 @@ parse_copy_destination_header(Req) -> end. validate_attachment_names(Doc) -> - lists:foreach(fun({Name, _}) -> + lists:foreach(fun({Name, _}) -> validate_attachment_name(Name) end, Doc#doc.attachments). diff --git a/src/couchdb/couch_httpd_external.erl b/src/couchdb/couch_httpd_external.erl index 949bd83a..709d8337 100644 --- a/src/couchdb/couch_httpd_external.erl +++ b/src/couchdb/couch_httpd_external.erl @@ -34,7 +34,7 @@ handle_external_req(#httpd{path_parts=[_, _]}=Req, _Db) -> handle_external_req(Req, _) -> send_error(Req, 404, <<"external_server_error">>, <<"Broken assumption">>). -% handle_external_req/3 +% handle_external_req/3 % for this type of config usage: % _action = {couch_httpd_external, handle_external_req, <<"action">>} % with urls like @@ -44,7 +44,7 @@ handle_external_req(HttpReq, Db, Name) -> process_external_req(HttpReq, Db, Name) -> - Response = couch_external_manager:execute(binary_to_list(Name), + Response = couch_external_manager:execute(binary_to_list(Name), json_req_obj(HttpReq, Db)), case Response of @@ -54,7 +54,7 @@ process_external_req(HttpReq, Db, Name) -> send_external_response(HttpReq, Response) end. -json_req_obj(#httpd{mochi_req=Req, +json_req_obj(#httpd{mochi_req=Req, method=Verb, path_parts=Path, req_body=ReqBody @@ -99,7 +99,7 @@ send_external_response(#httpd{mochi_req=MochiReq}, Response) -> ctype = CType, headers = Headers } = parse_external_response(Response), - Resp = MochiReq:respond({Code, + Resp = MochiReq:respond({Code, default_or_content_type(CType, Headers ++ couch_httpd:server_header()), Data}), {ok, Resp}. @@ -120,7 +120,7 @@ parse_external_response({Response}) -> Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"}; {<<"base64">>, Value} -> Args#extern_resp_args{ - data=couch_util:decodeBase64(Value), + data=couch_util:decodeBase64(Value), ctype="application/binary" }; {<<"headers">>, {Headers}} -> diff --git a/src/couchdb/couch_httpd_misc_handlers.erl b/src/couchdb/couch_httpd_misc_handlers.erl index a49bbef6..e7c04997 100644 --- a/src/couchdb/couch_httpd_misc_handlers.erl +++ b/src/couchdb/couch_httpd_misc_handlers.erl @@ -16,7 +16,7 @@ handle_all_dbs_req/1,handle_replicate_req/1,handle_restart_req/1, handle_uuids_req/1,handle_config_req/1,handle_log_req/1, handle_task_status_req/1,handle_sleep_req/1,handle_whoami_req/1]). - + -export([increment_update_seq_req/2]). @@ -41,7 +41,7 @@ handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) -> couch_httpd:serve_file(Req, "favicon.ico", DocumentRoot); handle_favicon_req(Req, _) -> send_method_not_allowed(Req, "GET,HEAD"). - + handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) -> "/" ++ UrlPath = couch_httpd:path(Req), case couch_httpd:partition(UrlPath) of @@ -83,7 +83,7 @@ fix_db_url(UrlBin) -> $/ -> Url; _ -> Url ++ "/" end). - + get_rep_endpoint(_Req, {Props}) -> Url = proplists:get_value(<<"url">>, Props), @@ -136,7 +136,7 @@ handle_uuids_req(Req) -> % Config request handler - + % GET /_config/ % GET /_config handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) -> diff --git a/src/couchdb/couch_httpd_show.erl b/src/couchdb/couch_httpd_show.erl index 9b65c076..854b3d80 100644 --- a/src/couchdb/couch_httpd_show.erl +++ b/src/couchdb/couch_httpd_show.erl @@ -11,7 +11,7 @@ % the License. -module(couch_httpd_show). - + -export([handle_doc_show_req/2, handle_view_list_req/2]). @@ -21,7 +21,7 @@ [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2, start_json_response/2,send_chunk/2,send_chunked_error/2, start_chunked_response/3, send_error/4]). - + handle_doc_show_req(#httpd{ method='GET', path_parts=[_DbName, _Design, DesignName, _Show, ShowName, DocId] @@ -93,7 +93,7 @@ send_view_list_response(Lang, ListSrc, ViewName, DesignId, Req, Db, Keys) -> Stale = couch_httpd_view:get_stale_type(Req), Reduce = couch_httpd_view:get_reduce_type(Req), case couch_view:get_map_view(Db, DesignId, ViewName, Stale) of - {ok, View, Group} -> + {ok, View, Group} -> QueryArgs = couch_httpd_view:parse_view_params(Req, Keys, map), output_map_list(Req, Lang, ListSrc, View, Group, Db, QueryArgs, Keys); {not_found, _Reason} -> @@ -139,7 +139,7 @@ output_map_list(#httpd{mochi_req=MReq}=Req, Lang, ListSrc, View, Group, Db, Quer StartListRespFun = make_map_start_resp_fun(QueryServer, Db), SendListRowFun = make_map_send_row_fun(QueryServer), - + FoldlFun = couch_httpd_view:make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db, RowCount, #view_fold_helper_funs{ reduce_count = fun couch_view:reduce_to_count/1, @@ -200,7 +200,7 @@ make_reduce_start_resp_fun(QueryServer, _Req, Db, _CurrentEtag) -> end. start_list_resp(QueryServer, Req, Db, Head, Etag) -> - [<<"start">>,Chunks,JsonResp] = couch_query_servers:render_list_head(QueryServer, + [<<"start">>,Chunks,JsonResp] = couch_query_servers:render_list_head(QueryServer, Req, Db, Head), JsonResp2 = apply_etag(JsonResp, Etag), #extern_resp_args{ @@ -266,9 +266,9 @@ output_reduce_list(#httpd{mochi_req=MReq}=Req, Lang, ListSrc, View, Group, Db, Q couch_httpd:etag_respond(Req, CurrentEtag, fun() -> StartListRespFun = make_reduce_start_resp_fun(QueryServer, Req, Db, CurrentEtag), SendListRowFun = make_reduce_send_row_fun(QueryServer, Db), - - {ok, GroupRowsFun, RespFun} = couch_httpd_view:make_reduce_fold_funs(Req, - GroupLevel, QueryArgs, CurrentEtag, + + {ok, GroupRowsFun, RespFun} = couch_httpd_view:make_reduce_fold_funs(Req, + GroupLevel, QueryArgs, CurrentEtag, #reduce_fold_helper_funs{ start_response = StartListRespFun, send_row = SendListRowFun @@ -300,9 +300,9 @@ output_reduce_list(#httpd{mochi_req=MReq}=Req, Lang, ListSrc, View, Group, Db, Q couch_httpd:etag_respond(Req, CurrentEtag, fun() -> StartListRespFun = make_reduce_start_resp_fun(QueryServer, Req, Db, CurrentEtag), SendListRowFun = make_reduce_send_row_fun(QueryServer, Db), - + {ok, GroupRowsFun, RespFun} = couch_httpd_view:make_reduce_fold_funs(Req, - GroupLevel, QueryArgs, CurrentEtag, + GroupLevel, QueryArgs, CurrentEtag, #reduce_fold_helper_funs{ start_response = StartListRespFun, send_row = SendListRowFun @@ -319,7 +319,7 @@ output_reduce_list(#httpd{mochi_req=MReq}=Req, Lang, ListSrc, View, Group, Db, Q finish_list(Req, QueryServer, Etag, FoldResult, StartFun, TotalRows) -> case FoldResult of {_, _, undefined, _} -> - {ok, Resp, BeginBody} = + {ok, Resp, BeginBody} = render_head_for_empty_list(StartFun, Req, Etag, TotalRows), [<<"end">>, Chunks] = couch_query_servers:render_list_tail(QueryServer), Chunk = BeginBody ++ ?b2l(?l2b(Chunks)), @@ -343,8 +343,8 @@ send_doc_show_response(Lang, ShowSrc, DocId, nil, #httpd{mochi_req=MReq}=Req, Db Hlist = mochiweb_headers:to_list(Headers), Accept = proplists:get_value('Accept', Hlist), CurrentEtag = couch_httpd:make_etag({Lang, ShowSrc, nil, Accept}), - couch_httpd:etag_respond(Req, CurrentEtag, fun() -> - [<<"resp">>, ExternalResp] = couch_query_servers:render_doc_show(Lang, ShowSrc, + couch_httpd:etag_respond(Req, CurrentEtag, fun() -> + [<<"resp">>, ExternalResp] = couch_query_servers:render_doc_show(Lang, ShowSrc, DocId, nil, Req, Db), JsonResp = apply_etag(ExternalResp, CurrentEtag), couch_httpd_external:send_external_response(Req, JsonResp) @@ -356,9 +356,9 @@ send_doc_show_response(Lang, ShowSrc, DocId, #doc{revs=Revs}=Doc, #httpd{mochi_r Hlist = mochiweb_headers:to_list(Headers), Accept = proplists:get_value('Accept', Hlist), CurrentEtag = couch_httpd:make_etag({Lang, ShowSrc, Revs, Accept}), - % We know our etag now - couch_httpd:etag_respond(Req, CurrentEtag, fun() -> - [<<"resp">>, ExternalResp] = couch_query_servers:render_doc_show(Lang, ShowSrc, + % We know our etag now + couch_httpd:etag_respond(Req, CurrentEtag, fun() -> + [<<"resp">>, ExternalResp] = couch_query_servers:render_doc_show(Lang, ShowSrc, DocId, Doc, Req, Db), JsonResp = apply_etag(ExternalResp, CurrentEtag), couch_httpd_external:send_external_response(Req, JsonResp) @@ -378,9 +378,9 @@ set_or_replace_header({Key, NewValue}, [], Acc) -> [{Key, NewValue}|Acc]. apply_etag({ExternalResponse}, CurrentEtag) -> - % Here we embark on the delicate task of replacing or creating the - % headers on the JsonResponse object. We need to control the Etag and - % Vary headers. If the external function controls the Etag, we'd have to + % Here we embark on the delicate task of replacing or creating the + % headers on the JsonResponse object. We need to control the Etag and + % Vary headers. If the external function controls the Etag, we'd have to % run it to check for a match, which sort of defeats the purpose. case proplists:get_value(<<"headers">>, ExternalResponse, nil) of nil -> @@ -397,4 +397,4 @@ apply_etag({ExternalResponse}, CurrentEtag) -> Field end || Field <- ExternalResponse]} end. - + diff --git a/src/couchdb/couch_httpd_view.erl b/src/couchdb/couch_httpd_view.erl index 0feb2fac..c0d7be7f 100644 --- a/src/couchdb/couch_httpd_view.erl +++ b/src/couchdb/couch_httpd_view.erl @@ -83,12 +83,12 @@ handle_temp_view_req(#httpd{method='POST'}=Req, Db) -> case proplists:get_value(<<"reduce">>, Props, null) of null -> QueryArgs = parse_view_params(Req, Keys, map), - {ok, View, Group} = couch_view:get_temp_map_view(Db, Language, + {ok, View, Group} = couch_view:get_temp_map_view(Db, Language, DesignOptions, MapSrc), output_map_view(Req, View, Group, Db, QueryArgs, Keys); RedSrc -> QueryArgs = parse_view_params(Req, Keys, reduce), - {ok, View, Group} = couch_view:get_temp_reduce_view(Db, Language, + {ok, View, Group} = couch_view:get_temp_reduce_view(Db, Language, DesignOptions, MapSrc, RedSrc), output_reduce_view(Req, View, Group, QueryArgs, Keys) end; @@ -105,7 +105,7 @@ output_map_view(Req, View, Group, Db, QueryArgs, nil) -> start_docid = StartDocId } = QueryArgs, CurrentEtag = view_group_etag(Group), - couch_httpd:etag_respond(Req, CurrentEtag, fun() -> + couch_httpd:etag_respond(Req, CurrentEtag, fun() -> {ok, RowCount} = couch_view:get_row_count(View), Start = {StartKey, StartDocId}, FoldlFun = make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db, RowCount, #view_fold_helper_funs{reduce_count=fun couch_view:reduce_to_count/1}), @@ -113,7 +113,7 @@ output_map_view(Req, View, Group, Db, QueryArgs, nil) -> FoldResult = couch_view:fold(View, Start, Dir, FoldlFun, FoldAccInit), finish_view_fold(Req, RowCount, FoldResult) end); - + output_map_view(Req, View, Group, Db, QueryArgs, Keys) -> #view_query_args{ limit = Limit, @@ -122,7 +122,7 @@ output_map_view(Req, View, Group, Db, QueryArgs, Keys) -> start_docid = StartDocId } = QueryArgs, CurrentEtag = view_group_etag(Group, Keys), - couch_httpd:etag_respond(Req, CurrentEtag, fun() -> + couch_httpd:etag_respond(Req, CurrentEtag, fun() -> {ok, RowCount} = couch_view:get_row_count(View), FoldAccInit = {Limit, SkipCount, undefined, []}, FoldResult = lists:foldl( @@ -132,7 +132,7 @@ output_map_view(Req, View, Group, Db, QueryArgs, Keys) -> QueryArgs#view_query_args{ start_key = Key, end_key = Key - }, CurrentEtag, Db, RowCount, + }, CurrentEtag, Db, RowCount, #view_fold_helper_funs{ reduce_count = fun couch_view:reduce_to_count/1 }), @@ -156,11 +156,11 @@ output_reduce_view(Req, View, Group, QueryArgs, nil) -> couch_httpd:etag_respond(Req, CurrentEtag, fun() -> {ok, GroupRowsFun, RespFun} = make_reduce_fold_funs(Req, GroupLevel, QueryArgs, CurrentEtag, #reduce_fold_helper_funs{}), FoldAccInit = {Limit, Skip, undefined, []}, - {ok, {_, _, Resp, _}} = couch_view:fold_reduce(View, Dir, {StartKey, StartDocId}, + {ok, {_, _, Resp, _}} = couch_view:fold_reduce(View, Dir, {StartKey, StartDocId}, {EndKey, EndDocId}, GroupRowsFun, RespFun, FoldAccInit), finish_reduce_fold(Req, Resp) end); - + output_reduce_view(Req, View, Group, QueryArgs, Keys) -> #view_query_args{ limit = Limit, @@ -177,7 +177,7 @@ output_reduce_view(Req, View, Group, QueryArgs, Keys) -> fun(Key, {Resp, RedAcc}) -> % run the reduce once for each key in keys, with limit etc reapplied for each key FoldAccInit = {Limit, Skip, Resp, RedAcc}, - {_, {_, _, Resp2, RedAcc2}} = couch_view:fold_reduce(View, Dir, {Key, StartDocId}, + {_, {_, _, Resp2, RedAcc2}} = couch_view:fold_reduce(View, Dir, {Key, StartDocId}, {Key, EndDocId}, GroupRowsFun, RespFun, FoldAccInit), % Switch to comma {Resp2, RedAcc2} @@ -198,7 +198,7 @@ get_reduce_type(Req) -> parse_view_params(Req, Keys, ViewType) -> QueryList = couch_httpd:qs(Req), - QueryParams = + QueryParams = lists:foldl(fun({K, V}, Acc) -> parse_view_param(K, V) ++ Acc end, [], QueryList), @@ -360,13 +360,13 @@ make_view_fold_fun(Req, QueryArgs, Etag, Db, TotalViewCount, HelperFuns) -> inclusive_end = InclusiveEnd, direction = Dir } = QueryArgs, - + #view_fold_helper_funs{ passed_end = PassedEndFun, start_response = StartRespFun, send_row = SendRowFun, reduce_count = ReduceCountFun - } = apply_default_helper_funs(HelperFuns, + } = apply_default_helper_funs(HelperFuns, {Dir, EndKey, EndDocId, InclusiveEnd}), #view_query_args{ @@ -390,12 +390,12 @@ make_view_fold_fun(Req, QueryArgs, Etag, Db, TotalViewCount, HelperFuns) -> Offset = ReduceCountFun(OffsetReds), {ok, Resp2, RowFunAcc0} = StartRespFun(Req, Etag, TotalViewCount, Offset, RowFunAcc), - {Go, RowFunAcc2} = SendRowFun(Resp2, Db, {{Key, DocId}, Value}, + {Go, RowFunAcc2} = SendRowFun(Resp2, Db, {{Key, DocId}, Value}, IncludeDocs, RowFunAcc0), {Go, {AccLimit - 1, 0, Resp2, RowFunAcc2}}; {_, AccLimit, _, Resp} when (AccLimit > 0) -> % rendering all other rows - {Go, RowFunAcc2} = SendRowFun(Resp, Db, {{Key, DocId}, Value}, + {Go, RowFunAcc2} = SendRowFun(Resp, Db, {{Key, DocId}, Value}, IncludeDocs, RowFunAcc), {Go, {AccLimit - 1, 0, Resp, RowFunAcc2}} end @@ -439,7 +439,7 @@ make_reduce_fold_funs(Req, GroupLevel, _QueryArgs, Etag, HelperFuns) -> when is_integer(GroupLevel), is_list(Key) -> % group_level and we haven't responded yet {ok, Resp2, RowAcc} = StartRespFun(Req, Etag, RowAcc0), - {Go, RowAcc2} = SendRowFun(Resp2, {lists:sublist(Key, GroupLevel), Red}, RowAcc), + {Go, RowAcc2} = SendRowFun(Resp2, {lists:sublist(Key, GroupLevel), Red}, RowAcc), {Go, {AccLimit - 1, 0, Resp2, RowAcc2}}; (Key, Red, {AccLimit, 0, Resp, RowAcc}) when is_integer(GroupLevel), is_list(Key) -> @@ -551,15 +551,15 @@ json_reduce_start_resp(Req, Etag, _Acc0) -> send_json_reduce_row(Resp, {Key, Value}, RowFront) -> send_chunk(Resp, RowFront ++ ?JSON_ENCODE({[{key, Key}, {value, Value}]})), - {ok, ",\r\n"}. + {ok, ",\r\n"}. view_group_etag(Group) -> view_group_etag(Group, nil). - + view_group_etag(#group{sig=Sig,current_seq=CurrentSeq}, Extra) -> % This is not as granular as it could be. % If there are updates to the db that do not effect the view index, - % they will change the Etag. For more granular Etags we'd need to keep + % they will change the Etag. For more granular Etags we'd need to keep % track of the last Db seq that caused an index change. couch_httpd:make_etag({Sig, CurrentSeq, Extra}). @@ -591,10 +591,10 @@ view_row_with_doc(Db, {{Key, DocId}, Value}, Rev) -> {not_found, deleted} -> {[{id, DocId}, {key, Key}, {value, Value}]}; Doc -> - JsonDoc = couch_doc:to_json_obj(Doc, []), + JsonDoc = couch_doc:to_json_obj(Doc, []), {[{id, DocId}, {key, Key}, {value, Value}, {doc, JsonDoc}]} end. - + finish_view_fold(Req, TotalRows, FoldResult) -> case FoldResult of {ok, {_, _, undefined, _}} -> diff --git a/src/couchdb/couch_js.c b/src/couchdb/couch_js.c index d95b9db0..43f2da12 100644 --- a/src/couchdb/couch_js.c +++ b/src/couchdb/couch_js.c @@ -489,12 +489,12 @@ char* JSValToChar(JSContext* context, jsval* arg) { jsmsg = JS_ValueToString(context,*arg); len = JS_GetStringLength(jsmsg); tmp = JS_GetStringBytes(jsmsg); - + c = (char*)malloc(len+1); c[len] = '\0'; int i; - + for(i = 0;i < len;i++) { c[i] = tmp[i]; } @@ -541,11 +541,11 @@ struct curl_slist* generateCurlHeaders(JSContext* context,jsval* arg) { } JSObject* iterator = JS_NewPropertyIterator(context,header_obj); - + jsval *jsProperty = JS_malloc(context,sizeof(jsval)); jsval *jsValue = JS_malloc(context,sizeof(jsval)); jsid *jsId = JS_malloc(context,sizeof(jsid)); - + while(JS_NextProperty(context,iterator,jsId) == JS_TRUE) { if(*jsId == JSVAL_VOID) { @@ -569,7 +569,7 @@ struct curl_slist* generateCurlHeaders(JSContext* context,jsval* arg) { append_Buffer(bTmp,"",1); slist = curl_slist_append(slist,bTmp->data); - + free_Buffer(bTmp); free(jsPropertyValue); free(jsPropertyName); @@ -595,7 +595,7 @@ GetHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) // Run GC JS_MaybeGC(context); - + // Init Curl if((handle = curl_easy_init()) == NULL) { return JS_FALSE; @@ -616,7 +616,7 @@ GetHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) curl_easy_setopt(handle,CURLOPT_WRITEHEADER,b); curl_easy_setopt(handle,CURLOPT_URL,url); curl_easy_setopt(handle,CURLOPT_HTTPGET,1); - curl_easy_setopt(handle,CURLOPT_FOLLOWLOCATION,1); + curl_easy_setopt(handle,CURLOPT_FOLLOWLOCATION,1); curl_easy_setopt(handle,CURLOPT_NOPROGRESS,1); curl_easy_setopt(handle,CURLOPT_IPRESOLVE,CURL_IPRESOLVE_V4); @@ -654,7 +654,7 @@ GetHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) /* Shrink the buffer to the real size and store its value in rval */ shrink_Buffer(b); BufferToJSVal(context,b,rval); - + // Free Buffer free_Buffer(b); @@ -679,7 +679,7 @@ HeadHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval // Run GC JS_MaybeGC(context); - + // Init Curl if((handle = curl_easy_init()) == NULL) { return JS_FALSE; @@ -741,7 +741,7 @@ HeadHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval /* Shrink the buffer to the real size and store its value in rval */ shrink_Buffer(b); BufferToJSVal(context,b,rval); - + // Free Buffer free_Buffer(b); @@ -803,7 +803,7 @@ PostHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval struct curl_slist *slist = generateCurlHeaders(context,argv+2); // Initialize Headers if(slist != NULL) { - curl_easy_setopt(handle,CURLOPT_HTTPHEADER,slist); + curl_easy_setopt(handle,CURLOPT_HTTPHEADER,slist); } int exitcode; @@ -858,17 +858,17 @@ PutHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) // Allocate buffer that will store the get resultant b = init_Buffer(); - + // Allocate data buffer and move data into them b_data = (BufferCount)malloc(sizeof(Buffer) + sizeof(int)); b_data->buffer = init_Buffer(); b_data->pos = 0; - + data = JSValToChar(context,(argv+1)); readlen = strlen(data); - - + + // TODO: remove strlen append_Buffer(b_data->buffer,data,readlen); @@ -893,7 +893,7 @@ PutHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) curl_easy_setopt(handle,CURLOPT_URL,url); curl_easy_setopt(handle,CURLOPT_UPLOAD,1); curl_easy_setopt(handle,CURLOPT_INFILESIZE,readlen); - + // Curl structure @@ -908,11 +908,11 @@ PutHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) // Use only ipv4 curl_easy_setopt(handle,CURLOPT_IPRESOLVE,CURL_IPRESOLVE_V4); - + // Perform int exitcode; - + if((exitcode = curl_easy_perform(handle)) != 0) { if(slist != NULL) curl_slist_free_all(slist); @@ -939,7 +939,7 @@ PutHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) shrink_Buffer(b); BufferToJSVal(context,b,rval); - + free_Buffer(b); if(rval == NULL) { @@ -1023,7 +1023,7 @@ DelHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) shrink_Buffer(b); BufferToJSVal(context,b,rval); - + if(rval == NULL) { curl_easy_cleanup(handle); return JS_FALSE; @@ -1105,7 +1105,7 @@ CopyHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval shrink_Buffer(b); BufferToJSVal(context,b,rval); - + if(rval == NULL) { curl_easy_cleanup(handle); return JS_FALSE; @@ -1187,7 +1187,7 @@ MoveHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval shrink_Buffer(b); BufferToJSVal(context,b,rval); - + if(rval == NULL) { curl_easy_cleanup(handle); return JS_FALSE; diff --git a/src/couchdb/couch_key_tree.erl b/src/couchdb/couch_key_tree.erl index d08f5ede..3177087d 100644 --- a/src/couchdb/couch_key_tree.erl +++ b/src/couchdb/couch_key_tree.erl @@ -26,14 +26,14 @@ % partial trees arranged by how much they are cut off. merge(A, B) -> - {Merged, HasConflicts} = + {Merged, HasConflicts} = lists:foldl( fun(InsertTree, {AccTrees, AccConflicts}) -> {ok, Merged, Conflicts} = merge_one(AccTrees, InsertTree, [], false), {Merged, Conflicts or AccConflicts} end, {A, false}, B), - if HasConflicts or + if HasConflicts or ((length(Merged) /= length(A)) and (length(Merged) /= length(B))) -> Conflicts = conflicts; true -> @@ -61,7 +61,7 @@ merge_one([{Start, Tree}|Rest], {StartInsert, TreeInsert}, OutAcc, ConflictsAcc) no -> merge_one(Rest, {StartB, TreeB}, [{StartA, TreeA} | OutAcc], ConflictsAcc) end. - + merge_at([], _Place, _Insert) -> no; merge_at([{Key, Value, SubTree}|Sibs], 0, {InsertKey, InsertValue, InsertSubTree}) -> @@ -120,7 +120,7 @@ find_missing([{Start, {Key, Value, SubTree}} | RestTree], SeachKeys) -> ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Start], Missing = find_missing_simple(Start, [{Key, Value, SubTree}], PossibleKeys), find_missing(RestTree, ImpossibleKeys ++ Missing). - + find_missing_simple(_Pos, _Tree, []) -> []; find_missing_simple(_Pos, [], SeachKeys) -> @@ -128,7 +128,7 @@ find_missing_simple(_Pos, [], SeachKeys) -> find_missing_simple(Pos, [{Key, _, SubTree} | RestTree], SeachKeys) -> PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Pos], ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Pos], - + SrcKeys2 = PossibleKeys -- [{Pos, Key}], SrcKeys3 = find_missing_simple(Pos + 1, SubTree, SrcKeys2), ImpossibleKeys ++ find_missing_simple(Pos, RestTree, SrcKeys3). @@ -145,15 +145,15 @@ filter_leafs([{Pos, [{LeafKey, _}|_]} = Path |Rest], Keys, FilteredAcc, RemovedK % this did match a key, remove both the node and the input key filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc]) end. - + % Removes any branches from the tree whose leaf node(s) are in the Keys remove_leafs(Trees, Keys) -> % flatten each branch in a tree into a tree path Paths = get_all_leafs_full(Trees), - + % filter out any that are in the keys list. {FilteredPaths, RemovedKeys} = filter_leafs(Paths, Keys, [], []), - + % convert paths back to trees NewTree = lists:foldl( fun({PathPos, Path},TreeAcc) -> @@ -170,7 +170,7 @@ remove_leafs(Trees, Keys) -> % are returned. get_key_leafs(Tree, Keys) -> get_key_leafs(Tree, Keys, []). - + get_key_leafs(_, [], Acc) -> {Acc, []}; get_key_leafs([], Keys, Acc) -> @@ -178,14 +178,14 @@ get_key_leafs([], Keys, Acc) -> get_key_leafs([{Pos, Tree}|Rest], Keys, Acc) -> {Gotten, RemainingKeys} = get_key_leafs_simple(Pos, [Tree], Keys, []), get_key_leafs(Rest, RemainingKeys, Gotten ++ Acc). - + get_key_leafs_simple(_Pos, _Tree, [], _KeyPathAcc) -> {[], []}; get_key_leafs_simple(_Pos, [], KeysToGet, _KeyPathAcc) -> {[], KeysToGet}; get_key_leafs_simple(Pos, [{Key, _Value, SubTree}=Tree | RestTree], KeysToGet, KeyPathAcc) -> case lists:delete({Pos, Key}, KeysToGet) of - KeysToGet -> % same list, key not found + KeysToGet -> % same list, key not found {LeafsFound, KeysToGet2} = get_key_leafs_simple(Pos + 1, SubTree, KeysToGet, [Key | KeyPathAcc]), {RestLeafsFound, KeysRemaining} = get_key_leafs_simple(Pos, RestTree, KeysToGet2, KeyPathAcc), {LeafsFound ++ RestLeafsFound, KeysRemaining}; @@ -201,10 +201,10 @@ get(Tree, KeysToGet) -> {KeyPaths, KeysNotFound} = get_full_key_paths(Tree, KeysToGet), FixedResults = [ {Value, {Pos, [Key0 || {Key0, _} <- Path]}} || {Pos, [{_Key, Value}|_]=Path} <- KeyPaths], {FixedResults, KeysNotFound}. - + get_full_key_paths(Tree, Keys) -> get_full_key_paths(Tree, Keys, []). - + get_full_key_paths(_, [], Acc) -> {Acc, []}; get_full_key_paths([], Keys, Acc) -> @@ -212,8 +212,8 @@ get_full_key_paths([], Keys, Acc) -> get_full_key_paths([{Pos, Tree}|Rest], Keys, Acc) -> {Gotten, RemainingKeys} = get_full_key_paths(Pos, [Tree], Keys, []), get_full_key_paths(Rest, RemainingKeys, Gotten ++ Acc). - - + + get_full_key_paths(_Pos, _Tree, [], _KeyPathAcc) -> {[], []}; get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) -> @@ -233,12 +233,12 @@ get_full_key_paths(Pos, [{KeyId, Value, SubTree} | RestTree], KeysToGet, KeyPath get_all_leafs_full(Tree) -> get_all_leafs_full(Tree, []). - + get_all_leafs_full([], Acc) -> Acc; get_all_leafs_full([{Pos, Tree} | Rest], Acc) -> get_all_leafs_full(Rest, get_all_leafs_full_simple(Pos, [Tree], []) ++ Acc). - + get_all_leafs_full_simple(_Pos, [], _KeyPathAcc) -> []; get_all_leafs_full_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) -> @@ -253,7 +253,7 @@ get_all_leafs([], Acc) -> Acc; get_all_leafs([{Pos, Tree}|Rest], Acc) -> get_all_leafs(Rest, get_all_leafs_simple(Pos, [Tree], []) ++ Acc). - + get_all_leafs_simple(_Pos, [], _KeyPathAcc) -> []; get_all_leafs_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) -> @@ -266,7 +266,7 @@ count_leafs([]) -> 0; count_leafs([{_Pos,Tree}|Rest]) -> count_leafs_simple([Tree]) + count_leafs(Rest). - + count_leafs_simple([]) -> 0; count_leafs_simple([{_Key, _Value, []} | RestTree]) -> @@ -274,7 +274,7 @@ count_leafs_simple([{_Key, _Value, []} | RestTree]) -> count_leafs_simple([{_Key, _Value, SubTree} | RestTree]) -> count_leafs_simple(SubTree) + count_leafs_simple(RestTree). - + map(_Fun, []) -> []; map(Fun, [{Pos, Tree}|Rest]) -> @@ -287,7 +287,7 @@ map_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) -> Value2 = Fun({Pos, Key}, Value), [{Key, Value2, map_simple(Fun, Pos + 1, SubTree)} | map_simple(Fun, Pos, RestTree)]. - + map_leafs(_Fun, []) -> []; map_leafs(Fun, [{Pos, Tree}|Rest]) -> @@ -306,9 +306,9 @@ map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) -> stem(Trees, Limit) -> % flatten each branch in a tree into a tree path Paths = get_all_leafs_full(Trees), - + Paths2 = [{Pos, lists:sublist(Path, Limit)} || {Pos, Path} <- Paths], - + % convert paths back to trees lists:foldl( fun({PathPos, Path},TreeAcc) -> diff --git a/src/couchdb/couch_log.erl b/src/couchdb/couch_log.erl index 14c262d0..b5507bb6 100644 --- a/src/couchdb/couch_log.erl +++ b/src/couchdb/couch_log.erl @@ -43,7 +43,7 @@ stop() -> init([]) -> % read config and register for configuration changes - + % just stop if one of the config settings change. couch_server_sup % will restart us and then we will pick up the new settings. ok = couch_config:register( @@ -52,7 +52,7 @@ init([]) -> ("log", "level") -> ?MODULE:stop() end), - + Filename = couch_config:get("log", "file", "couchdb.log"), Level = couch_config:get("log", "level", "info"), diff --git a/src/couchdb/couch_query_servers.erl b/src/couchdb/couch_query_servers.erl index 5a1dc90a..077a7518 100644 --- a/src/couchdb/couch_query_servers.erl +++ b/src/couchdb/couch_query_servers.erl @@ -18,7 +18,7 @@ -export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3,stop/0]). -export([start_doc_map/2, map_docs/2, stop_doc_map/1]). -export([reduce/3, rereduce/3,validate_doc_update/5]). --export([render_doc_show/6, start_view_list/2, +-export([render_doc_show/6, start_view_list/2, render_list_head/4, render_list_row/3, render_list_tail/1]). % -export([test/0]). @@ -42,7 +42,7 @@ map_docs({_Lang, Pid}, Docs) -> Results = lists:map( fun(Doc) -> Json = couch_doc:to_json_obj(Doc, []), - + FunsResults = couch_os_process:prompt(Pid, [<<"map_doc">>, Json]), % the results are a json array of function map yields like this: % [FunResults1, FunResults2 ...] @@ -90,7 +90,7 @@ rereduce(Lang, RedSrcs, ReducedValues) -> {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []), Result; (FunSrc, Values) -> - [true, [Result]] = + [true, [Result]] = couch_os_process:prompt(Pid, [<<"rereduce">>, [FunSrc], Values]), Result end, RedSrcs, Grouped) @@ -121,7 +121,7 @@ os_reduce(_Lang, [], _KVs) -> {ok, []}; os_reduce(Lang, OsRedSrcs, KVs) -> Pid = get_os_process(Lang), - OsResults = try couch_os_process:prompt(Pid, + OsResults = try couch_os_process:prompt(Pid, [<<"reduce">>, OsRedSrcs, KVs]) of [true, Reductions] -> Reductions after @@ -143,22 +143,22 @@ builtin_reduce(rereduce, [<<"_count">>|BuiltinReds], KVs, Acc) -> builtin_sum_rows(KVs) -> lists:foldl(fun - ([_Key, Value], Acc) when is_number(Value) -> + ([_Key, Value], Acc) when is_number(Value) -> Acc + Value; - (_Else, _Acc) -> + (_Else, _Acc) -> throw({invalid_value, <<"builtin _sum function requires map values to be numbers">>}) end, 0, KVs). - + validate_doc_update(Lang, FunSrc, EditDoc, DiskDoc, Ctx) -> Pid = get_os_process(Lang), JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]), JsonDiskDoc = if DiskDoc == nil -> null; - true -> + true -> couch_doc:to_json_obj(DiskDoc, [revs]) end, - try couch_os_process:prompt(Pid, + try couch_os_process:prompt(Pid, [<<"validate">>, FunSrc, JsonEditDoc, JsonDiskDoc, Ctx]) of 1 -> ok; @@ -181,7 +181,7 @@ render_doc_show(Lang, ShowSrc, DocId, Doc, Req, Db) -> {DocId, nil} -> {{append_docid(DocId, JsonReqIn)}, null}; _ -> {{append_docid(DocId, JsonReqIn)}, couch_doc:to_json_obj(Doc, [revs])} end, - try couch_os_process:prompt(Pid, + try couch_os_process:prompt(Pid, [<<"show">>, ShowSrc, JsonDoc, JsonReq]) of FormResp -> FormResp @@ -209,18 +209,18 @@ render_list_row({_Lang, Pid}, _, {Key, Value}) -> render_list_tail({Lang, Pid}) -> JsonResp = couch_os_process:prompt(Pid, [<<"list_end">>]), ok = ret_os_process(Lang, Pid), - JsonResp. - + JsonResp. + init([]) -> - + % read config and register for configuration changes - + % just stop if one of the config settings change. couch_server_sup % will restart us and then we will pick up the new settings. - + ok = couch_config:register( fun("query_servers" ++ _, _) -> ?MODULE:stop() diff --git a/src/couchdb/couch_ref_counter.erl b/src/couchdb/couch_ref_counter.erl index 0fbec729..59ede9c9 100644 --- a/src/couchdb/couch_ref_counter.erl +++ b/src/couchdb/couch_ref_counter.erl @@ -18,11 +18,11 @@ start(ChildProcs) -> gen_server:start(couch_ref_counter, {self(), ChildProcs}, []). - - + + drop(RefCounterPid) -> drop(RefCounterPid, self()). - + drop(RefCounterPid, Pid) -> gen_server:cast(RefCounterPid, {drop, Pid}). @@ -42,7 +42,7 @@ count(RefCounterPid) -> { referrers=dict:new() % a dict of each ref counting proc. }). - + init({Pid, ChildProcs}) -> [link(ChildProc) || ChildProc <- ChildProcs], Referrers = dict:from_list([{Pid, {erlang:monitor(process, Pid), 1}}]), diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index f5b560e9..a503684b 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -12,7 +12,7 @@ -module(couch_rep). -behaviour(gen_server). --export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). -export([replicate/2]). @@ -24,24 +24,24 @@ -include("couch_db.hrl"). -include("../ibrowse/ibrowse.hrl"). -%% @spec replicate(Source::binary(), Target::binary()) -> +%% @spec replicate(Source::binary(), Target::binary()) -> %% {ok, Stats} | {error, Reason} -%% @doc Triggers a replication. Stats is a JSON Object with the following +%% @doc Triggers a replication. Stats is a JSON Object with the following %% keys: session_id (UUID), source_last_seq (integer), and history (array). %% Each element of the history is an Object with keys start_time, end_time, %% start_last_seq, end_last_seq, missing_checked, missing_found, docs_read, %% and docs_written. %% %% The supervisor will try to restart the replication in case of any error -%% other than shutdown. Just call this function again to listen for the +%% other than shutdown. Just call this function again to listen for the %% result of the retry. replicate(Source, Target) -> - + {ok, HostName} = inet:gethostname(), RepId = couch_util:to_hex( erlang:md5(term_to_binary([HostName, Source, Target]))), Args = [?MODULE, [RepId, Source,Target], []], - + Replicator = {RepId, {gen_server, start_link, Args}, transient, @@ -49,31 +49,31 @@ replicate(Source, Target) -> worker, [?MODULE] }, - + Server = case supervisor:start_child(couch_rep_sup, Replicator) of - {ok, Pid} -> + {ok, Pid} -> ?LOG_INFO("starting new replication ~p at ~p", [RepId, Pid]), Pid; {error, already_present} -> case supervisor:restart_child(couch_rep_sup, RepId) of - {ok, Pid} -> + {ok, Pid} -> ?LOG_INFO("starting replication ~p at ~p", [RepId, Pid]), Pid; - {error, running} -> + {error, running} -> %% this error occurs if multiple replicators are racing %% each other to start and somebody else won. Just grab %% the Pid by calling start_child again. - {error, {already_started, Pid}} = + {error, {already_started, Pid}} = supervisor:start_child(couch_rep_sup, Replicator), ?LOG_INFO("replication ~p already running at ~p", [RepId, Pid]), Pid end; - {error, {already_started, Pid}} -> + {error, {already_started, Pid}} -> ?LOG_INFO("replication ~p already running at ~p", [RepId, Pid]), Pid end, - - case gen_server:call(Server, get_result, infinity) of + + case gen_server:call(Server, get_result, infinity) of retry -> replicate(Source, Target); Else -> Else end. @@ -87,7 +87,7 @@ replicate(Source, Target) -> headers }). - + -record(state, { context, current_seq, @@ -103,19 +103,19 @@ replicate(Source, Target) -> init([RepId, Source, Target]) -> process_flag(trap_exit, true), - + {ok, DbSrc, SrcName} = open_db(Source), {ok, DbTgt, TgtName} = open_db(Target), - + DocKey = ?l2b(?LOCAL_DOC_PREFIX ++ RepId), - + {ok, InfoSrc} = get_db_info(DbSrc), {ok, InfoTgt} = get_db_info(DbTgt), - + ReplicationStartTime = httpd_util:rfc1123_date(), SrcInstanceStartTime = proplists:get_value(instance_start_time, InfoSrc), TgtInstanceStartTime = proplists:get_value(instance_start_time, InfoTgt), - + RepRecDocSrc = case open_doc(DbSrc, DocKey, []) of {ok, SrcDoc} -> @@ -123,7 +123,7 @@ init([RepId, Source, Target]) -> SrcDoc; _ -> #doc{id=DocKey} end, - + RepRecDocTgt = case open_doc(DbTgt, DocKey, []) of {ok, TgtDoc} -> @@ -131,11 +131,11 @@ init([RepId, Source, Target]) -> TgtDoc; _ -> #doc{id=DocKey} end, - + #doc{body={RepRecProps}} = RepRecDocSrc, #doc{body={RepRecPropsTgt}} = RepRecDocTgt, - - case proplists:get_value(<<"session_id">>, RepRecProps) == + + case proplists:get_value(<<"session_id">>, RepRecProps) == proplists:get_value(<<"session_id">>, RepRecPropsTgt) of true -> % if the records have the same session id, @@ -150,7 +150,7 @@ init([RepId, Source, Target]) -> OldSeqNum = 0, OldHistory = [] end, - + Context = [ {start_seq, OldSeqNum}, {history, OldHistory}, @@ -160,20 +160,20 @@ init([RepId, Source, Target]) -> {src_record, RepRecDocSrc}, {tgt_record, RepRecDocTgt} ], - + Stats = ets:new(replication_stats, [set, private]), ets:insert(Stats, {total_revs,0}), ets:insert(Stats, {missing_revs, 0}), ets:insert(Stats, {docs_read, 0}), ets:insert(Stats, {docs_written, 0}), ets:insert(Stats, {doc_write_failures, 0}), - + couch_task_status:add_task("Replication", <<SrcName/binary, " -> ", TgtName/binary>>, "Starting"), - + Parent = self(), Pid = spawn_link(fun() -> enum_docs_since(Parent,DbSrc,DbTgt,{OldSeqNum,0}) end), - + State = #state{ context = Context, current_seq = OldSeqNum, @@ -182,7 +182,7 @@ init([RepId, Source, Target]) -> target = DbTgt, stats = Stats }, - + {ok, State}. handle_call(get_result, From, #state{listeners=L,done=true} = State) -> {stop, normal, State#state{listeners=[From|L]}}; @@ -200,14 +200,14 @@ handle_call({replicate_doc, {Id, Revs}}, {Pid,_}, #state{enum_pid=Pid} = State) } = State, ets:update_counter(Stats, missing_revs, length(Revs)), - + %% get document(s) {ok, DocResults} = open_doc_revs(Source, Id, Revs, [latest]), Docs = [RevDoc || {ok, RevDoc} <- DocResults], ets:update_counter(Stats, docs_read, length(Docs)), - + %% save them (maybe in a buffer) - {NewBuffer, NewContext} = + {NewBuffer, NewContext} = case should_flush(lists:flatlength([Docs|Buffer])) of true -> Docs2 = lists:flatten([Docs|Buffer]), @@ -227,7 +227,7 @@ handle_call({replicate_doc, {Id, Revs}}, {Pid,_}, #state{enum_pid=Pid} = State) false -> {[Docs | Buffer], Context} end, - + {reply, ok, State#state{context=NewContext, docs_buffer=NewBuffer}}; handle_call({fin, {LastSeq, RevsCount}}, {Pid,_}, #state{enum_pid=Pid} = State) -> @@ -255,7 +255,7 @@ handle_info({'EXIT', Pid, Reason}, #state{enum_pid=Pid} = State) -> Parent = self(), NewPid = spawn_link(fun() -> enum_docs_since(Parent,Src,Tgt,{Seq,0}) end), {noreply, State#state{enum_pid=NewPid}}; - + %% if any linked process dies, respawn the enumerator to get things going again handle_info({'EXIT', _From, normal}, State) -> {noreply, State}; @@ -277,7 +277,7 @@ terminate(normal, State) -> target = Target, stats = Stats } = State, - + try update_docs(Target, lists:flatten(Buffer), [], replicated_changes) of {ok, Errors} -> dump_update_errors(Errors), @@ -289,18 +289,18 @@ terminate(normal, State) -> ?LOG_ERROR("attachment request failed during final write", []), exit({internal_server_error, replication_link_failure}) end, - + couch_task_status:update("Finishing"), - + {ok, NewRepHistory, _} = do_checkpoint(Source, Target, Context, Seq, Stats), ets:delete(Stats), close_db(Target), - + [Original|Rest] = Listeners, gen_server:reply(Original, {ok, NewRepHistory}), - - %% maybe trigger another replication. If this replicator uses a local - %% source Db, changes to that Db since we started will not be included in + + %% maybe trigger another replication. If this replicator uses a local + %% source Db, changes to that Db since we started will not be included in %% this pass. case up_to_date(Source, Seq) of true -> @@ -319,9 +319,9 @@ terminate(Reason, State) -> target = Target, stats = Stats } = State, - + [gen_server:reply(L, {error, Reason}) || L <- Listeners], - + ets:delete(Stats), close_db(Target), close_db(Source). @@ -345,19 +345,19 @@ dump_update_errors([{{Id, Rev}, Error}|Rest]) -> attachment_loop(ReqId, Conn) -> couch_util:should_flush(), - receive + receive {From, {set_req_id, NewId}} -> %% we learn the ReqId to listen for From ! {self(), {ok, NewId}}, attachment_loop(NewId, Conn); {ibrowse_async_headers, ReqId, Status, Headers} -> %% we got header, give the controlling process a chance to react - receive - {From, gimme_status} -> + receive + {From, gimme_status} -> %% send status/headers to controller From ! {self(), {status, Status, Headers}}, receive - {From, continue} -> + {From, continue} -> %% normal case attachment_loop(ReqId, Conn); {From, fail} -> @@ -382,7 +382,7 @@ attachment_loop(ReqId, Conn) -> ?LOG_ERROR("streaming attachment failed with ~p", [Err]), catch ibrowse:stop_worker_process(Conn), exit(attachment_request_failed); - {ibrowse_async_response, ReqId, Data} -> + {ibrowse_async_response, ReqId, Data} -> receive {From, gimme_data} -> From ! {self(), Data} end, attachment_loop(ReqId, Conn); {ibrowse_async_response_end, ReqId} -> @@ -396,7 +396,7 @@ attachment_stub_converter(DbS, Id, Rev, {Name, {stub, Type, Length}}) -> Url = lists:flatten([DbUrl, url_encode(Id), "/", url_encode(?b2l(Name)), "?rev=", ?b2l(couch_doc:rev_to_str({Pos,RevId}))]), ?LOG_DEBUG("Attachment URL ~s", [Url]), - {ok, RcvFun} = make_attachment_stub_receiver(Url, Headers, Name, + {ok, RcvFun} = make_attachment_stub_receiver(Url, Headers, Name, Type, Length), {Name, {Type, {RcvFun, Length}}}. @@ -404,21 +404,21 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length) -> make_attachment_stub_receiver(Url, Headers, Name, Type, Length, 10, 1000). make_attachment_stub_receiver(Url, _Headers, _Name, _Type, _Length, 0, _Pause) -> - ?LOG_ERROR("streaming attachment request failed after 10 retries: ~s", + ?LOG_ERROR("streaming attachment request failed after 10 retries: ~s", [Url]), exit({attachment_request_failed, ?l2b(["failed to replicate ", Url])}); - + make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause) -> %% start the process that receives attachment data from ibrowse #url{host=Host, port=Port} = ibrowse_lib:parse_url(Url), {ok, Conn} = ibrowse:spawn_link_worker_process(Host, Port), Pid = spawn_link(fun() -> attachment_loop(nil, Conn) end), - + %% make the async request Opts = [{stream_to, Pid}, {response_format, binary}], - ReqId = + ReqId = case ibrowse:send_req_direct(Conn, Url, Headers, get, [], Opts, infinity) of - {ibrowse_req_id, X} -> + {ibrowse_req_id, X} -> X; {error, Reason} -> ?LOG_INFO("retrying couch_rep attachment request in ~p " ++ @@ -428,11 +428,11 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause) make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries-1, 2*Pause) end, - + %% tell our receiver about the ReqId it needs to look for Pid ! {self(), {set_req_id, ReqId}}, - receive - {Pid, {ok, ReqId}} -> + receive + {Pid, {ok, ReqId}} -> ok; {'EXIT', Pid, _Reason} -> catch ibrowse:stop_worker_process(Conn), @@ -440,19 +440,19 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause) make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries-1, 2*Pause) end, - + %% wait for headers to ensure that we have a 200 status code %% this is where we follow redirects etc - Pid ! {self(), gimme_status}, + Pid ! {self(), gimme_status}, receive {'EXIT', Pid, attachment_request_failed} -> catch ibrowse:stop_worker_process(Conn), make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries-1, Pause); - {Pid, {status, StreamStatus, StreamHeaders}} -> + {Pid, {status, StreamStatus, StreamHeaders}} -> ?LOG_DEBUG("streaming attachment Status ~p Headers ~p", [StreamStatus, StreamHeaders]), - + ResponseCode = list_to_integer(StreamStatus), if ResponseCode >= 200, ResponseCode < 300 -> @@ -461,10 +461,10 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause) %% this function goes into the streaming attachment code. %% It gets executed by the replication gen_server, so it can't %% be the one to actually receive the ibrowse data. - {ok, fun() -> - Pid ! {self(), gimme_data}, - receive - {Pid, Data} -> + {ok, fun() -> + Pid ! {self(), gimme_data}, + receive + {Pid, Data} -> Data; {'EXIT', Pid, attachment_request_failed} -> throw(attachment_write_failed) @@ -473,25 +473,25 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause) ResponseCode >= 300, ResponseCode < 400 -> % follow the redirect Pid ! {self(), stop_ok}, - RedirectUrl = mochiweb_headers:get_value("Location", + RedirectUrl = mochiweb_headers:get_value("Location", mochiweb_headers:make(StreamHeaders)), catch ibrowse:stop_worker_process(Conn), make_attachment_stub_receiver(RedirectUrl, Headers, Name, Type, Length, Retries - 1, Pause); - ResponseCode >= 400, ResponseCode < 500 -> + ResponseCode >= 400, ResponseCode < 500 -> % an error... log and fail - ?LOG_ERROR("streaming attachment failed with code ~p: ~s", + ?LOG_ERROR("streaming attachment failed with code ~p: ~s", [ResponseCode, Url]), Pid ! {self(), fail}, exit(attachment_request_failed); ResponseCode == 500 -> % an error... log and retry - ?LOG_INFO("retrying couch_rep attachment request in ~p " ++ + ?LOG_INFO("retrying couch_rep attachment request in ~p " ++ "seconds due to 500 response: ~s", [Pause/1000, Url]), Pid ! {self(), fail}, catch ibrowse:stop_worker_process(Conn), timer:sleep(Pause), - make_attachment_stub_receiver(Url, Headers, Name, Type, Length, + make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries - 1, 2*Pause) end end. @@ -522,28 +522,28 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) -> {src_record, #doc{body={LastRepRecord}}=RepRecDocSrc}, {tgt_record, RepRecDocTgt} ] = Context, - + case NewSeqNum == StartSeqNum andalso OldHistory /= [] of true -> % nothing changed, don't record results {ok, {[{<<"no_changes">>, true} | LastRepRecord]}, Context}; false -> % something changed, record results for incremental replication, - + % commit changes to both src and tgt. The src because if changes - % we replicated are lost, we'll record the a seq number ahead + % we replicated are lost, we'll record the a seq number ahead % of what was committed. If those changes are lost and the seq number % reverts to a previous committed value, we will skip future changes % when new doc updates are given our already replicated seq nums. - + % commit the src async ParentPid = self(), - SrcCommitPid = spawn_link(fun() -> + SrcCommitPid = spawn_link(fun() -> ParentPid ! {self(), ensure_full_commit(Source)} end), - + % commit tgt sync {ok, TgtInstanceStartTime2} = ensure_full_commit(Target), - + SrcInstanceStartTime2 = receive {SrcCommitPid, {ok, Timestamp}} -> @@ -551,7 +551,7 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) -> {'EXIT', SrcCommitPid, {http_request_failed, _}} -> exit(replication_link_failure) end, - + RecordSeqNum = if SrcInstanceStartTime2 == SrcInstanceStartTime andalso TgtInstanceStartTime2 == TgtInstanceStartTime -> @@ -562,7 +562,7 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) -> "replication is redone and documents reexamined.", []), StartSeqNum end, - + NewHistoryEntry = { [{<<"start_time">>, list_to_binary(ReplicationStartTime)}, {<<"end_time">>, list_to_binary(httpd_util:rfc1123_date())}, @@ -582,11 +582,11 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) -> {<<"source_last_seq">>, RecordSeqNum}, {<<"history">>, HistEntries}]}, - {ok, {SrcRevPos,SrcRevId}} = update_doc(Source, + {ok, {SrcRevPos,SrcRevId}} = update_doc(Source, RepRecDocSrc#doc{body=NewRepHistory}, []), {ok, {TgtRevPos,TgtRevId}} = update_doc(Target, RepRecDocTgt#doc{body=NewRepHistory}, []), - + NewContext = [ {start_seq, StartSeqNum}, {history, OldHistory}, @@ -596,9 +596,9 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) -> {src_record, RepRecDocSrc#doc{revs={SrcRevPos,[SrcRevId]}}}, {tgt_record, RepRecDocTgt#doc{revs={TgtRevPos,[TgtRevId]}}} ], - + {ok, NewRepHistory, NewContext} - + end. do_http_request(Url, Action, Headers) -> @@ -610,7 +610,7 @@ do_http_request(Url, Action, Headers, JsonBody) -> do_http_request(Url, Action, Headers, Body, Retries, Pause) when is_binary(Url) -> do_http_request(?b2l(Url), Action, Headers, Body, Retries, Pause); do_http_request(Url, Action, _Headers, _JsonBody, 0, _Pause) -> - ?LOG_ERROR("couch_rep HTTP ~p request failed after 10 retries: ~s", + ?LOG_ERROR("couch_rep HTTP ~p request failed after 10 retries: ~s", [Action, Url]), exit({http_request_failed, ?l2b(["failed to replicate ", Url])}); do_http_request(Url, Action, Headers, JsonBody, Retries, Pause) -> @@ -637,27 +637,27 @@ do_http_request(Url, Action, Headers, JsonBody, Retries, Pause) -> ResponseCode >= 200, ResponseCode < 300 -> ?JSON_DECODE(ResponseBody); ResponseCode >= 300, ResponseCode < 400 -> - RedirectUrl = mochiweb_headers:get_value("Location", + RedirectUrl = mochiweb_headers:get_value("Location", mochiweb_headers:make(ResponseHeaders)), do_http_request(RedirectUrl, Action, Headers, JsonBody, Retries-1, Pause); - ResponseCode >= 400, ResponseCode < 500 -> - ?JSON_DECODE(ResponseBody); + ResponseCode >= 400, ResponseCode < 500 -> + ?JSON_DECODE(ResponseBody); ResponseCode == 500 -> - ?LOG_INFO("retrying couch_rep HTTP ~p request in ~p seconds " ++ + ?LOG_INFO("retrying couch_rep HTTP ~p request in ~p seconds " ++ "due to 500 error: ~s", [Action, Pause/1000, Url]), timer:sleep(Pause), do_http_request(Url, Action, Headers, JsonBody, Retries - 1, 2*Pause) end; {error, Reason} -> - ?LOG_INFO("retrying couch_rep HTTP ~p request in ~p seconds due to " ++ + ?LOG_INFO("retrying couch_rep HTTP ~p request in ~p seconds due to " ++ "{error, ~p}: ~s", [Action, Pause/1000, Reason, Url]), timer:sleep(Pause), do_http_request(Url, Action, Headers, JsonBody, Retries - 1, 2*Pause) end. ensure_full_commit(#http_db{uri=DbUrl, headers=Headers}) -> - {ResultProps} = do_http_request(DbUrl ++ "_ensure_full_commit", post, + {ResultProps} = do_http_request(DbUrl ++ "_ensure_full_commit", post, Headers, true), true = proplists:get_value(<<"ok">>, ResultProps), {ok, proplists:get_value(<<"instance_start_time">>, ResultProps)}; @@ -672,22 +672,22 @@ enum_docs_since(Pid, DbSource, DbTarget, {StartSeq, RevsCount}) -> SrcRevsList = lists:map(fun(#doc_info{id=Id,revs=RevInfos}) -> SrcRevs = [Rev || #rev_info{rev=Rev} <- RevInfos], {Id, SrcRevs} - end, DocInfoList), + end, DocInfoList), {ok, MissingRevs} = get_missing_revs(DbTarget, SrcRevsList), - + %% do we need to check for success here? - [gen_server:call(Pid, {replicate_doc, Info}, infinity) + [gen_server:call(Pid, {replicate_doc, Info}, infinity) || Info <- MissingRevs ], - + #doc_info{high_seq=LastSeq} = lists:last(DocInfoList), RevsCount2 = RevsCount + length(SrcRevsList), gen_server:cast(Pid, {increment_update_seq, LastSeq}), - + enum_docs_since(Pid, DbSource, DbTarget, {LastSeq, RevsCount2}) end. - + get_db_info(#http_db{uri=DbUrl, headers=Headers}) -> {DbProps} = do_http_request(DbUrl, get, Headers), {ok, [{list_to_atom(?b2l(K)), V} || {K,V} <- DbProps]}; @@ -695,14 +695,14 @@ get_db_info(Db) -> couch_db:get_db_info(Db). get_doc_info_list(#http_db{uri=DbUrl, headers=Headers}, StartSeq) -> - Url = DbUrl ++ "_all_docs_by_seq?limit=100&startkey=" + Url = DbUrl ++ "_all_docs_by_seq?limit=100&startkey=" ++ integer_to_list(StartSeq), {Results} = do_http_request(Url, get, Headers), lists:map(fun({RowInfoList}) -> {RowValueProps} = proplists:get_value(<<"value">>, RowInfoList), Seq = proplists:get_value(<<"key">>, RowInfoList), - Revs = - [#rev_info{rev=couch_doc:parse_rev(proplists:get_value(<<"rev">>, RowValueProps)), deleted = proplists:get_value(<<"deleted">>, RowValueProps, false)} | + Revs = + [#rev_info{rev=couch_doc:parse_rev(proplists:get_value(<<"rev">>, RowValueProps)), deleted = proplists:get_value(<<"deleted">>, RowValueProps, false)} | [#rev_info{rev=Rev,deleted=false} || Rev <- couch_doc:parse_revs(proplists:get_value(<<"conflicts">>, RowValueProps, []))] ++ [#rev_info{rev=Rev,deleted=true} || Rev <- couch_doc:parse_revs(proplists:get_value(<<"deleted_conflicts">>, RowValueProps, []))]], #doc_info{ @@ -712,11 +712,11 @@ get_doc_info_list(#http_db{uri=DbUrl, headers=Headers}, StartSeq) -> } end, proplists:get_value(<<"rows">>, Results)); get_doc_info_list(DbSource, StartSeq) -> - {ok, {_Count, DocInfoList}} = couch_db:enum_docs_since(DbSource, StartSeq, + {ok, {_Count, DocInfoList}} = couch_db:enum_docs_since(DbSource, StartSeq, fun (_, _, {100, DocInfoList}) -> {stop, {100, DocInfoList}}; - (DocInfo, _, {Count, DocInfoList}) -> - {ok, {Count+1, [DocInfo|DocInfoList]}} + (DocInfo, _, {Count, DocInfoList}) -> + {ok, {Count+1, [DocInfo|DocInfoList]}} end, {0, []}), lists:reverse(DocInfoList). @@ -742,14 +742,14 @@ open_doc(#http_db{uri=DbUrl, headers=Headers}, DocId, Options) -> open_doc(Db, DocId, Options) -> couch_db:open_doc(Db, DocId, Options). -open_doc_revs(#http_db{uri=DbUrl, headers=Headers} = DbS, DocId, Revs0, +open_doc_revs(#http_db{uri=DbUrl, headers=Headers} = DbS, DocId, Revs0, [latest]) -> Revs = couch_doc:rev_to_strs(Revs0), BaseUrl = DbUrl ++ url_encode(DocId) ++ "?revs=true&latest=true", - + %% MochiWeb expects URLs < 8KB long, so maybe split into multiple requests MaxN = trunc((8192 - length(BaseUrl))/14), - + JsonResults = case length(Revs) > MaxN of false -> Url = ?l2b(BaseUrl ++ "&open_revs=" ++ ?JSON_ENCODE(Revs)), @@ -766,7 +766,7 @@ open_doc_revs(#http_db{uri=DbUrl, headers=Headers} = DbS, DocId, Revs0, Acc ++ do_http_request(?l2b(BaseUrl ++ "&open_revs=" ++ ?JSON_ENCODE(lists:reverse(Rest))), get, Headers) end, - + Results = lists:map( fun({[{<<"missing">>, Rev}]}) -> @@ -791,7 +791,7 @@ should_flush(DocCount) when DocCount > ?BUFFER_NDOCS -> should_flush(_DocCount) -> MeAndMyLinks = [self()| [P || P <- element(2,process_info(self(),links)), is_pid(P)]], - + case length(MeAndMyLinks)/2 > ?BUFFER_NATTACHMENTS of true -> true; false -> diff --git a/src/couchdb/couch_server.erl b/src/couchdb/couch_server.erl index 3bf59724..7c79683e 100644 --- a/src/couchdb/couch_server.erl +++ b/src/couchdb/couch_server.erl @@ -42,7 +42,7 @@ start(_Type, _Args) -> restart() -> stop(), start(). - + stop() -> couch_server_sup:stop(). @@ -127,7 +127,7 @@ hash_admin_passwords() -> init([]) -> % read config and register for configuration changes - + % just stop if one of the config settings change. couch_server_sup % will restart us and then we will pick up the new settings. @@ -292,7 +292,7 @@ handle_call({delete, DbName, _Options}, _From, Server) -> case check_dbname(Server, DbNameList) of ok -> FullFilepath = get_full_filename(Server, DbNameList), - Server2 = + Server2 = case ets:lookup(couch_dbs_by_name, DbName) of [] -> Server; [{_, {Pid, LruTime}}] -> @@ -303,11 +303,11 @@ handle_call({delete, DbName, _Options}, _From, Server) -> true = ets:delete(couch_dbs_by_lru, LruTime), Server#server{dbs_open=Server#server.dbs_open - 1} end, - + %% Delete any leftover .compact files. If we don't do this a subsequent %% request for this DB will try to open the .compact file and use it. file:delete(FullFilepath ++ ".compact"), - + case file:delete(FullFilepath) of ok -> couch_db_update_notifier:notify({deleted, DbName}), @@ -326,7 +326,7 @@ handle_cast(Msg, _Server) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. - + handle_info({'EXIT', _Pid, config_change}, _Server) -> exit(kill); handle_info({'EXIT', Pid, _Reason}, #server{dbs_open=DbsOpen}=Server) -> diff --git a/src/couchdb/couch_server_sup.erl b/src/couchdb/couch_server_sup.erl index 4c77dbe1..663c8ee0 100644 --- a/src/couchdb/couch_server_sup.erl +++ b/src/couchdb/couch_server_sup.erl @@ -83,7 +83,7 @@ start_server(IniFiles) -> ok = couch_util:start_driver(LibDir), BaseChildSpecs = - {{one_for_all, 10, 3600}, + {{one_for_all, 10, 3600}, [{couch_config, {couch_server_sup, couch_config_start_link_wrapper, [IniFiles, ConfigPid]}, permanent, @@ -130,7 +130,7 @@ start_server(IniFiles) -> start_primary_services() -> supervisor:start_link({local, couch_primary_services}, couch_server_sup, - {{one_for_one, 10, 3600}, + {{one_for_one, 10, 3600}, [{couch_log, {couch_log, start_link, []}, permanent, @@ -168,7 +168,7 @@ start_secondary_services() -> DaemonChildSpecs = [ begin {ok, {Module, Fun, Args}} = couch_util:parse_term(SpecStr), - + {list_to_atom(Name), {Module, Fun, Args}, permanent, diff --git a/src/couchdb/couch_stats_aggregator.erl b/src/couchdb/couch_stats_aggregator.erl index 821bf60f..2e8ea380 100644 --- a/src/couchdb/couch_stats_aggregator.erl +++ b/src/couchdb/couch_stats_aggregator.erl @@ -18,7 +18,7 @@ -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). --export([start/0, stop/0, +-export([start/0, stop/0, get/1, get/2, get_json/1, get_json/2, all/0, time_passed/0, clear_aggregates/1]). @@ -34,7 +34,7 @@ start() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). - + stop() -> gen_server:call(?MODULE, stop). @@ -47,18 +47,18 @@ get_json(Key) -> gen_server:call(?MODULE, {get_json, Key}). get_json(Key, Time) -> gen_server:call(?MODULE, {get_json, Key, Time}). - + time_passed() -> gen_server:call(?MODULE, time_passed). -clear_aggregates(Time) -> +clear_aggregates(Time) -> gen_server:call(?MODULE, {clear_aggregates, Time}). all() -> gen_server:call(?MODULE, all). % GEN_SERVER - + init(_) -> ets:new(?MODULE, [named_table, set, protected]), init_timers(), @@ -86,13 +86,13 @@ handle_call(time_passed, _, OldState) -> % the foldls below could probably be refactored into a less code-duping form % update aggregates on incremental counters - NextState = lists:foldl(fun(Counter, State) -> + NextState = lists:foldl(fun(Counter, State) -> {Key, Value} = Counter, update_aggregates_loop(Key, Value, State, incremental) end, OldState, ?COLLECTOR:all(incremental)), % update aggregates on absolute value counters - NewState = lists:foldl(fun(Counter, State) -> + NewState = lists:foldl(fun(Counter, State) -> {Key, Value} = Counter, % clear the counter, we've got the important bits in State ?COLLECTOR:clear(Key), @@ -117,7 +117,7 @@ handle_call(stop, _, State) -> % Stats = [{Key, TimesProplist}] % TimesProplist = [{Time, Aggrgates}] % Aggregates = #aggregates{} -% +% % [ % {Key, [ % {TimeA, #aggregates{}}, @@ -126,7 +126,7 @@ handle_call(stop, _, State) -> % {TimeD, #aggregates{}} % ] % }, -% +% % ] %% clear the aggregats record for a specific Time = 60 | 300 | 900 @@ -134,7 +134,7 @@ do_clear_aggregates(Time, #state{aggregates=Stats}) -> NewStats = lists:map(fun({Key, TimesProplist}) -> {Key, case proplists:lookup(Time, TimesProplist) of % do have stats for this key, if we don't, return Stat unmodified - none -> + none -> TimesProplist; % there are stats, let's unset the Time one {_Time, _Stat} -> @@ -177,12 +177,12 @@ update_aggregates_loop(Key, Values, State, CounterType) -> % {'900',{aggregates,1,1,1,0,0,1,1}}]}] [{_Key, StatsList}] = case proplists:lookup(Key, AllStats) of none -> [{Key, [ - {'0', empty}, + {'0', empty}, {'60', empty}, {'300', empty}, {'900', empty} ]}]; - AllStatsMatch -> + AllStatsMatch -> [AllStatsMatch] end, @@ -236,7 +236,7 @@ update_aggregates(Value, Stat, CounterType) -> incremental -> Value - Current; absolute -> Value end, - % Knuth, The Art of Computer Programming, vol. 2, p. 232. + % Knuth, The Art of Computer Programming, vol. 2, p. 232. NewCount = Count + 1, NewMean = Mean + (NewValue - Mean) / NewCount, % NewCount is never 0. NewVariance = Variance + (NewValue - Mean) * (NewValue - NewMean), @@ -288,29 +288,29 @@ do_get_all(#state{aggregates=Stats}=State) -> init_descriptions() -> - % ets is probably overkill here, but I didn't manage to keep the + % ets is probably overkill here, but I didn't manage to keep the % descriptions in the gen_server state. Which means there is probably % a bug in one of the handle_call() functions most likely the one that % handles the time_passed message. But don't tell anyone, the math is % correct :) -- Jan - % Style guide for descriptions: Start with a lowercase letter & do not add + % Style guide for descriptions: Start with a lowercase letter & do not add % a trailing full-stop / period. - + % please keep this in alphabetical order ets:insert(?MODULE, {{couchdb, database_writes}, <<"number of times a database was changed">>}), ets:insert(?MODULE, {{couchdb, database_reads}, <<"number of times a document was read from a database">>}), ets:insert(?MODULE, {{couchdb, open_databases}, <<"number of open databases">>}), ets:insert(?MODULE, {{couchdb, open_os_files}, <<"number of file descriptors CouchDB has open">>}), ets:insert(?MODULE, {{couchdb, request_time}, <<"length of a request inside CouchDB without MochiWeb">>}), - + ets:insert(?MODULE, {{httpd, bulk_requests}, <<"number of bulk requests">>}), ets:insert(?MODULE, {{httpd, requests}, <<"number of HTTP requests">>}), ets:insert(?MODULE, {{httpd, temporary_view_reads}, <<"number of temporary view reads">>}), ets:insert(?MODULE, {{httpd, view_reads}, <<"number of view reads">>}), ets:insert(?MODULE, {{httpd, clients_requesting_changes}, <<"Number of clients currently requesting continuous _changes">>}), - + ets:insert(?MODULE, {{httpd_request_methods, 'COPY'}, <<"number of HTTP COPY requests">>}), ets:insert(?MODULE, {{httpd_request_methods, 'DELETE'}, <<"number of HTTP DELETE requests">>}), ets:insert(?MODULE, {{httpd_request_methods, 'GET'}, <<"number of HTTP GET requests">>}), @@ -318,7 +318,7 @@ init_descriptions() -> ets:insert(?MODULE, {{httpd_request_methods, 'MOVE'}, <<"number of HTTP MOVE requests">>}), ets:insert(?MODULE, {{httpd_request_methods, 'POST'}, <<"number of HTTP POST requests">>}), ets:insert(?MODULE, {{httpd_request_methods, 'PUT'}, <<"number of HTTP PUT requests">>}), - + ets:insert(?MODULE, {{httpd_status_codes, '200'}, <<"number of HTTP 200 OK responses">>}), ets:insert(?MODULE, {{httpd_status_codes, '201'}, <<"number of HTTP 201 Created responses">>}), ets:insert(?MODULE, {{httpd_status_codes, '202'}, <<"number of HTTP 202 Accepted responses">>}), @@ -338,12 +338,12 @@ init_descriptions() -> % Timer init_timers() -> - + % OTP docs on timer: http://erlang.org/doc/man/timer.html % start() -> ok - % Starts the timer server. Normally, the server does not need to be - % started explicitly. It is started dynamically if it is needed. This is - % useful during development, but in a target system the server should be + % Starts the timer server. Normally, the server does not need to be + % started explicitly. It is started dynamically if it is needed. This is + % useful during development, but in a target system the server should be % started explicitly. Use configuration parameters for kernel for this. % % TODO: Add timer_start to kernel start options. @@ -361,7 +361,7 @@ init_timers() -> % Unused gen_server behaviour API functions that we need to declare. - + %% @doc Unused handle_cast(foo, State) -> {noreply, State}. diff --git a/src/couchdb/couch_stats_collector.erl b/src/couchdb/couch_stats_collector.erl index 9139f6cb..cec8138e 100644 --- a/src/couchdb/couch_stats_collector.erl +++ b/src/couchdb/couch_stats_collector.erl @@ -22,7 +22,7 @@ terminate/2, code_change/3]). --export([start/0, stop/0, get/1, +-export([start/0, stop/0, get/1, increment/1, decrement/1, track_process_count/1, track_process_count/2, record/2, clear/1, @@ -38,15 +38,15 @@ start() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). - + stop() -> gen_server:call(?MODULE, stop). get(Key) -> case ets:lookup(?HIT_COUNTER_TABLE, Key) of - [] -> + [] -> case ets:lookup(?ABSOLUTE_VALUE_COUNTER_TABLE, Key) of - [] -> + [] -> 0; Result2 -> extract_value_from_ets_result(Key, Result2) end; @@ -62,7 +62,7 @@ increment(Key) -> ok; _ -> ok end. - + decrement(Key) -> case catch ets:update_counter(?HIT_COUNTER_TABLE, Key, -1) of {'EXIT', {badarg, _}} -> @@ -70,7 +70,7 @@ decrement(Key) -> ok; _ -> ok end. - + record(Key, Value) -> ets:insert(?ABSOLUTE_VALUE_COUNTER_TABLE, {Key, Value}). @@ -78,7 +78,7 @@ clear(Key) -> true = ets:delete(?ABSOLUTE_VALUE_COUNTER_TABLE, Key). all() -> - lists:append(ets:tab2list(?HIT_COUNTER_TABLE), + lists:append(ets:tab2list(?HIT_COUNTER_TABLE), ets:tab2list(?ABSOLUTE_VALUE_COUNTER_TABLE)). all(Type) -> @@ -123,7 +123,7 @@ extract_value_from_ets_result(_Key, Result) -> % Unused gen_server behaviour API functions that we need to declare. - + %% @doc Unused handle_cast(foo, State) -> {noreply, State}. diff --git a/src/couchdb/couch_stream.erl b/src/couchdb/couch_stream.erl index e61d6605..54234ee5 100644 --- a/src/couchdb/couch_stream.erl +++ b/src/couchdb/couch_stream.erl @@ -67,7 +67,7 @@ old_copy_to_new_stream(Fd, Pos, Len, DestFd) -> end, ok), close(Dest). -% 09 UPGRADE CODE +% 09 UPGRADE CODE old_foldl(_Fd, null, 0, _Fun, Acc) -> Acc; old_foldl(Fd, OldPointer, Len, Fun, Acc) when is_tuple(OldPointer)-> @@ -119,7 +119,7 @@ handle_call(close, _From, Stream) -> written_pointers = Written, buffer_len = BufferLen, buffer_list = Buffer} = Stream, - + case Buffer of [] -> Result = {lists:reverse(Written), WrittenLen}; @@ -137,7 +137,7 @@ code_change(_OldVsn, State, _Extra) -> handle_info(_Info, State) -> {noreply, State}. - + % 09 UPGRADE CODE diff --git a/src/couchdb/couch_task_status.erl b/src/couchdb/couch_task_status.erl index ee7bdff5..28758a00 100644 --- a/src/couchdb/couch_task_status.erl +++ b/src/couchdb/couch_task_status.erl @@ -59,7 +59,7 @@ set_update_frequency(Msecs) -> update(StatusText) -> update("~s", [StatusText]). - + update(Format, Data) -> {LastUpdateTime, Frequency} = get(task_status_update), case timer:now_diff(Now = now(), LastUpdateTime) >= Frequency of diff --git a/src/couchdb/couch_util.erl b/src/couchdb/couch_util.erl index 1a2929e4..7cf19354 100644 --- a/src/couchdb/couch_util.erl +++ b/src/couchdb/couch_util.erl @@ -56,7 +56,7 @@ terminate_linked(Reason) -> new_uuid() -> list_to_binary(to_hex(crypto:rand_bytes(16))). - + to_hex([]) -> []; to_hex(Bin) when is_binary(Bin) -> @@ -73,7 +73,7 @@ parse_term(Bin) when is_binary(Bin)-> parse_term(List) -> {ok, Tokens, _} = erl_scan:string(List ++ "."), erl_parse:parse_term(Tokens). - + % returns a random integer rand32() -> @@ -193,15 +193,15 @@ collate(A, B, Options) when is_binary(A), is_binary(B) -> should_flush() -> should_flush(?FLUSH_MAX_MEM). - + should_flush(MemThreshHold) -> {memory, ProcMem} = process_info(self(), memory), - BinMem = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end, + BinMem = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end, 0, element(2,process_info(self(), binary))), if ProcMem+BinMem > 2*MemThreshHold -> garbage_collect(), {memory, ProcMem2} = process_info(self(), memory), - BinMem2 = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end, + BinMem2 = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end, 0, element(2,process_info(self(), binary))), if ProcMem2+BinMem2 > MemThreshHold -> true; @@ -230,7 +230,7 @@ encodeBase64(Bs) when list(Bs) -> encodeBase64(list_to_binary(Bs), <<>>); encodeBase64(Bs) -> encodeBase64(Bs, <<>>). - + encodeBase64(<<B:3/binary, Bs/binary>>, Acc) -> <<C1:6, C2:6, C3:6, C4:6>> = B, encodeBase64(Bs, <<Acc/binary, (enc(C1)), (enc(C2)), (enc(C3)), (enc(C4))>>); diff --git a/src/couchdb/couch_view.erl b/src/couchdb/couch_view.erl index 87feea12..93c3a493 100644 --- a/src/couchdb/couch_view.erl +++ b/src/couchdb/couch_view.erl @@ -24,14 +24,14 @@ -record(server,{ root_dir = []}). - + start_link() -> gen_server:start_link({local, couch_view}, couch_view, [], []). get_temp_updater(DbName, Language, DesignOptions, MapSrc, RedSrc) -> % make temp group % do we need to close this db? - {ok, _Db, Group} = + {ok, _Db, Group} = couch_view_group:open_temp_group(DbName, Language, DesignOptions, MapSrc, RedSrc), case gen_server:call(couch_view, {get_group_server, DbName, Group}) of {ok, Pid} -> @@ -44,7 +44,7 @@ get_group_server(DbName, GroupId) -> % get signature for group case couch_view_group:open_db_group(DbName, GroupId) of % do we need to close this db? - {ok, _Db, Group} -> + {ok, _Db, Group} -> case gen_server:call(couch_view, {get_group_server, DbName, Group}) of {ok, Pid} -> Pid; @@ -54,7 +54,7 @@ get_group_server(DbName, GroupId) -> Error -> throw(Error) end. - + get_group(Db, GroupId, Stale) -> MinUpdateSeq = case Stale of ok -> 0; @@ -73,23 +73,23 @@ get_group_info(Db, GroupId) -> couch_view_group:request_group_info( get_group_server(couch_db:name(Db), GroupId)). -cleanup_index_files(Db) -> +cleanup_index_files(Db) -> % load all ddocs {ok, DesignDocs} = couch_db:get_design_docs(Db), - + % make unique list of group sigs Sigs = lists:map(fun(#doc{id = GroupId} = DDoc) -> {ok, Info} = get_group_info(Db, GroupId), ?b2l(proplists:get_value(signature, Info)) end, [DD||DD <- DesignDocs, DD#doc.deleted == false]), - + FileList = list_index_files(Db), - + % regex that matches all ddocs RegExp = "("++ string:join(Sigs, "|") ++")", % filter out the ones in use - DeleteFiles = lists:filter(fun(FilePath) -> + DeleteFiles = lists:filter(fun(FilePath) -> regexp:first_match(FilePath, RegExp)==nomatch end, FileList), % delete unused files @@ -108,7 +108,7 @@ get_row_count(#view{btree=Bt}) -> {ok, Count}. get_temp_reduce_view(Db, Language, DesignOptions, MapSrc, RedSrc) -> - {ok, #group{views=[View]}=Group} = + {ok, #group{views=[View]}=Group} = get_temp_group(Db, Language, DesignOptions, MapSrc, RedSrc), {ok, {temp_reduce, View}, Group}. @@ -161,7 +161,7 @@ fold_reduce({temp_reduce, #view{btree=Bt}}, Dir, StartKey, EndKey, GroupFun, Fun couch_btree:fold_reduce(Bt, Dir, StartKey, EndKey, GroupFun, WrapperFun, Acc); -fold_reduce({reduce, NthRed, Lang, #view{btree=Bt, reduce_funs=RedFuns}}, Dir, StartKey, EndKey, GroupFun, Fun, Acc) -> +fold_reduce({reduce, NthRed, Lang, #view{btree=Bt, reduce_funs=RedFuns}}, Dir, StartKey, EndKey, GroupFun, Fun, Acc) -> PreResultPadding = lists:duplicate(NthRed - 1, []), PostResultPadding = lists:duplicate(length(RedFuns) - NthRed, []), {_Name, FunSrc} = lists:nth(NthRed,RedFuns), @@ -180,7 +180,7 @@ fold_reduce({reduce, NthRed, Lang, #view{btree=Bt, reduce_funs=RedFuns}}, Dir, S end, couch_btree:fold_reduce(Bt, Dir, StartKey, EndKey, GroupFun, WrapperFun, Acc). - + get_key_pos(_Key, [], _N) -> 0; get_key_pos(Key, [{Key1,_Value}|_], N) when Key == Key1 -> @@ -215,7 +215,7 @@ get_map_view0(Name, [#view{map_names=MapNames}=View|Rest]) -> end. reduce_to_count(Reductions) -> - {Count, _} = + {Count, _} = couch_btree:final_reduce( fun(reduce, KVs) -> Count = lists:sum( @@ -226,9 +226,9 @@ reduce_to_count(Reductions) -> {lists:sum([Count0 || {Count0, _} <- Reds]), []} end, Reductions), Count. - - + + fold_fun(_Fun, [], _, Acc) -> {ok, Acc}; fold_fun(Fun, [KV|Rest], {KVReds, Reds}, Acc) -> @@ -258,7 +258,7 @@ init([]) -> fun("couchdb", "view_index_dir")-> exit(Self, config_change) end), - + couch_db_update_notifier:start_link( fun({deleted, DbName}) -> gen_server:cast(couch_view, {reset_indexes, DbName}); @@ -279,11 +279,11 @@ terminate(Reason, _Srv) -> ok. -handle_call({get_group_server, DbName, +handle_call({get_group_server, DbName, #group{name=GroupId,sig=Sig}=Group}, _From, #server{root_dir=Root}=Server) -> case ets:lookup(group_servers_by_sig, {DbName, Sig}) of [] -> - ?LOG_DEBUG("Spawning new group server for view group ~s in database ~s.", + ?LOG_DEBUG("Spawning new group server for view group ~s in database ~s.", [GroupId, DbName]), case (catch couch_view_group:start_link({Root, DbName, Group})) of {ok, NewPid} -> @@ -325,12 +325,12 @@ handle_info({'EXIT', FromPid, Reason}, Server) -> delete_from_ets(FromPid, DbName, GroupId) end, {noreply, Server}. - + add_to_ets(Pid, DbName, Sig) -> true = ets:insert(couch_groups_by_updater, {Pid, {DbName, Sig}}), true = ets:insert(group_servers_by_sig, {{DbName, Sig}, Pid}), true = ets:insert(couch_groups_by_db, {DbName, Sig}). - + delete_from_ets(Pid, DbName, Sig) -> true = ets:delete(couch_groups_by_updater, Pid), true = ets:delete(group_servers_by_sig, {DbName, Sig}), @@ -356,7 +356,7 @@ nuke_dir(Dir) -> ok = nuke_dir(Full) end end, - Files), + Files), ok = file:del_dir(Dir) end. @@ -400,7 +400,7 @@ less_same_type(A, B) when is_list(A) -> less_list(A, B); less_same_type(A, B) -> A < B. - + less_props([], [_|_]) -> true; less_props(_, []) -> diff --git a/src/couchdb/couch_view_compactor.erl b/src/couchdb/couch_view_compactor.erl index 63c0ff75..22e58223 100644 --- a/src/couchdb/couch_view_compactor.erl +++ b/src/couchdb/couch_view_compactor.erl @@ -34,20 +34,20 @@ compact_group(Group, EmptyGroup) -> name = GroupId, views = Views } = Group, - + #group{ db = Db, id_btree = EmptyIdBtree, views = EmptyViews } = EmptyGroup, - + {ok, {Count, _}} = couch_btree:full_reduce(Db#db.fulldocinfo_by_id_btree), - + <<"_design", ShortName/binary>> = GroupId, DbName = couch_db:name(Db), TaskName = <<DbName/binary, ShortName/binary>>, couch_task_status:add_task(<<"View Group Compaction">>, TaskName, <<"">>), - + Fun = fun(KV, {Bt, Acc, TotalCopied}) -> if TotalCopied rem 10000 == 0 -> couch_task_status:update("Copied ~p of ~p Ids (~p%)", @@ -58,27 +58,27 @@ compact_group(Group, EmptyGroup) -> {ok, {Bt, [KV|Acc], TotalCopied+1}} end end, - {ok, {Bt3, Uncopied, _Total}} = couch_btree:foldl(IdBtree, Fun, + {ok, {Bt3, Uncopied, _Total}} = couch_btree:foldl(IdBtree, Fun, {EmptyIdBtree, [], 0}), {ok, NewIdBtree} = couch_btree:add(Bt3, lists:reverse(Uncopied)), - + NewViews = lists:map(fun({View, EmptyView}) -> compact_view(View, EmptyView) end, lists:zip(Views, EmptyViews)), - + NewGroup = EmptyGroup#group{ - id_btree=NewIdBtree, - views=NewViews, + id_btree=NewIdBtree, + views=NewViews, current_seq=Seq }, - + Pid = couch_view:get_group_server(DbName, GroupId), gen_server:cast(Pid, {compact_done, NewGroup}). %% @spec compact_view(View, EmptyView, Retry) -> CompactView compact_view(View, EmptyView) -> {ok, Count} = couch_view:get_row_count(View), - + %% Key is {Key,DocId} Fun = fun(KV, {Bt, Acc, TotalCopied}) -> if TotalCopied rem 10000 == 0 -> @@ -86,12 +86,12 @@ compact_view(View, EmptyView) -> [View#view.id_num, TotalCopied, Count, (TotalCopied*100) div Count]), {ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV|Acc])), {ok, {Bt2, [], TotalCopied + 1}}; - true -> + true -> {ok, {Bt, [KV|Acc], TotalCopied + 1}} end end, - - {ok, {Bt3, Uncopied, _Total}} = couch_btree:foldl(View#view.btree, Fun, + + {ok, {Bt3, Uncopied, _Total}} = couch_btree:foldl(View#view.btree, Fun, {EmptyView#view.btree, [], 0}), {ok, NewBt} = couch_btree:add(Bt3, lists:reverse(Uncopied)), EmptyView#view{btree = NewBt}. diff --git a/src/couchdb/couch_view_group.erl b/src/couchdb/couch_view_group.erl index 0b390b22..cc2f37a6 100644 --- a/src/couchdb/couch_view_group.erl +++ b/src/couchdb/couch_view_group.erl @@ -22,7 +22,7 @@ terminate/2, code_change/3]). -include("couch_db.hrl"). - + -record(group_state, { type, db_name, @@ -70,7 +70,7 @@ start_link(InitArgs) -> {InitArgs, self(), Ref = make_ref()}, []) of {ok, Pid} -> {ok, Pid}; - ignore -> + ignore -> receive {Ref, Pid, Error} -> case process_info(self(), trap_exit) of @@ -83,7 +83,7 @@ start_link(InitArgs) -> Error end. -% init creates a closure which spawns the appropriate view_updater. +% init creates a closure which spawns the appropriate view_updater. init({InitArgs, ReturnPid, Ref}) -> process_flag(trap_exit, true), case prepare_group(InitArgs, false) of @@ -118,7 +118,7 @@ init({InitArgs, ReturnPid, Ref}) -> % If the request sequence is higher than our current high_target seq, we set % that as the highest seqence. If the updater is not running, we launch it. -handle_call({request_group, RequestSeq}, From, +handle_call({request_group, RequestSeq}, From, #group_state{ db_name=DbName, group=#group{current_seq=Seq}=Group, @@ -128,13 +128,13 @@ handle_call({request_group, RequestSeq}, From, {ok, Db} = couch_db:open(DbName, []), Group2 = Group#group{db=Db}, Pid = spawn_link(fun()-> couch_view_updater:update(Group2) end), - + {noreply, State#group_state{ updater_pid=Pid, group=Group2, waiting_list=[{From,RequestSeq}|WaitList] }, infinity}; - + % If the request seqence is less than or equal to the seq_id of a known Group, % we respond with that Group. @@ -159,7 +159,7 @@ handle_call(request_group_info, _From, #group_state{ GroupInfo = get_group_info(Group, CompactorPid), {reply, {ok, GroupInfo}, State}. -handle_cast({start_compact, CompactFun}, #group_state{ compactor_pid=nil, +handle_cast({start_compact, CompactFun}, #group_state{ compactor_pid=nil, group=Group, init_args={view, RootDir, DbName, GroupId} } = State) -> ?LOG_INFO("Starting view group compaction", []), {ok, Db} = couch_db:open(DbName, []), @@ -171,10 +171,10 @@ handle_cast({start_compact, _}, State) -> %% compact already running, this is a no-op {noreply, State}; -handle_cast({compact_done, #group{fd=NewFd, current_seq=NewSeq} = NewGroup}, - #group_state{ +handle_cast({compact_done, #group{fd=NewFd, current_seq=NewSeq} = NewGroup}, + #group_state{ group = #group{current_seq=OldSeq, sig=GroupSig} = Group, - init_args = {view, RootDir, DbName, _GroupId}, + init_args = {view, RootDir, DbName, _GroupId}, updater_pid = nil, ref_counter = RefCounter } = State) when NewSeq >= OldSeq -> @@ -183,7 +183,7 @@ handle_cast({compact_done, #group{fd=NewFd, current_seq=NewSeq} = NewGroup}, CompactName = index_file_name(compact, RootDir, DbName, GroupSig), file:delete(FileName), ok = file:rename(CompactName, FileName), - + %% cleanup old group couch_ref_counter:drop(RefCounter), {ok, NewRefCounter} = couch_ref_counter:start([NewFd]), @@ -191,20 +191,20 @@ handle_cast({compact_done, #group{fd=NewFd, current_seq=NewSeq} = NewGroup}, nil -> ok; Else -> couch_db:close(Else) end, - + erlang:send_after(1000, self(), delayed_commit), {noreply, State#group_state{ - group=NewGroup, + group=NewGroup, ref_counter=NewRefCounter, compactor_pid=nil }}; -handle_cast({compact_done, NewGroup}, #group_state{ +handle_cast({compact_done, NewGroup}, #group_state{ init_args={view, _RootDir, DbName, GroupId} } = State) -> ?LOG_INFO("View index compaction still behind main file", []), couch_db:close(NewGroup#group.db), {ok, Db} = couch_db:open(DbName, []), - Pid = spawn_link(fun() -> - {_,Ref} = erlang:spawn_monitor(fun() -> + Pid = spawn_link(fun() -> + {_,Ref} = erlang:spawn_monitor(fun() -> couch_view_updater:update(NewGroup#group{db = Db}) end), receive @@ -258,8 +258,8 @@ handle_info({'EXIT', FromPid, {new_group, #group{db=Db}=Group}}, {noreply, State#group_state{waiting_commit=true, waiting_list=StillWaiting, group=Group2, updater_pid=Pid}} end; - -handle_info({'EXIT', FromPid, reset}, + +handle_info({'EXIT', FromPid, reset}, #group_state{ init_args=InitArgs, updater_pid=UpPid, @@ -274,10 +274,10 @@ handle_info({'EXIT', FromPid, reset}, Error -> {stop, normal, reply_all(State, Error)} end; - + handle_info({'EXIT', _FromPid, normal}, State) -> {noreply, State}; - + handle_info({'EXIT', FromPid, {{nocatch, Reason}, _Trace}}, State) -> ?LOG_DEBUG("Uncaught throw() in linked pid: ~p", [{FromPid, Reason}]), {stop, Reason, State}; @@ -285,7 +285,7 @@ handle_info({'EXIT', FromPid, {{nocatch, Reason}, _Trace}}, State) -> handle_info({'EXIT', FromPid, Reason}, State) -> ?LOG_DEBUG("Exit from linked pid: ~p", [{FromPid, Reason}]), {stop, Reason, State}; - + handle_info({'DOWN',_,_,_,_}, State) -> ?LOG_INFO("Shutting down view group server, monitored db is closing.", []), {stop, normal, reply_all(State, shutdown)}. @@ -305,13 +305,13 @@ code_change(_OldVsn, State, _Extra) -> % reply_with_group/3 % for each item in the WaitingList {Pid, Seq} % if the Seq is =< GroupSeq, reply -reply_with_group(Group=#group{current_seq=GroupSeq}, [{Pid, Seq}|WaitList], +reply_with_group(Group=#group{current_seq=GroupSeq}, [{Pid, Seq}|WaitList], StillWaiting, RefCounter) when Seq =< GroupSeq -> gen_server:reply(Pid, {ok, Group, RefCounter}), reply_with_group(Group, WaitList, StillWaiting, RefCounter); % else -% put it in the continuing waiting list +% put it in the continuing waiting list reply_with_group(Group, [{Pid, Seq}|WaitList], StillWaiting, RefCounter) -> reply_with_group(Group, WaitList, [{Pid, Seq}|StillWaiting], RefCounter); @@ -351,7 +351,7 @@ prepare_group({RootDir, DbName, #group{sig=Sig}=Group}, ForceReset)-> Else end. -get_index_header_data(#group{current_seq=Seq, purge_seq=PurgeSeq, +get_index_header_data(#group{current_seq=Seq, purge_seq=PurgeSeq, id_btree=IdBtree,views=Views}) -> ViewStates = [couch_btree:get_state(Btree) || #view{btree=Btree} <- Views], #index_header{seq=Seq, @@ -364,7 +364,7 @@ hex_sig(GroupSig) -> design_root(RootDir, DbName) -> RootDir ++ "/." ++ ?b2l(DbName) ++ "_design/". - + index_file_name(RootDir, DbName, GroupSig) -> design_root(RootDir, DbName) ++ hex_sig(GroupSig) ++".view". @@ -390,17 +390,17 @@ open_temp_group(DbName, Language, DesignOptions, MapSrc, RedSrc) -> reduce_funs= if RedSrc==[] -> []; true -> [{<<"_temp">>, RedSrc}] end}, {ok, Db, #group{ - name = <<"_temp">>, + name = <<"_temp">>, db=Db, - views=[View], - def_lang=Language, + views=[View], + def_lang=Language, design_options=DesignOptions, sig = erlang:md5(term_to_binary({[View], Language, DesignOptions})) }}; Error -> Error end. - + open_db_group(DbName, GroupId) -> case couch_db:open(DbName, []) of {ok, Db} -> @@ -425,7 +425,7 @@ get_group_info(#group{ {signature, ?l2b(hex_sig(GroupSig))}, {language, Lang}, {disk_size, Size}, - {compact_running, CompactorPid /= nil} + {compact_running, CompactorPid /= nil} ]. % maybe move to another module @@ -490,11 +490,11 @@ init_group(Db, Fd, #group{def_lang=Lang,views=Views}=Group, IndexHeader) -> Views2 = lists:zipwith( fun(BtreeState, #view{reduce_funs=RedFuns}=View) -> FunSrcs = [FunSrc || {_Name, FunSrc} <- RedFuns], - ReduceFun = + ReduceFun = fun(reduce, KVs) -> KVs2 = couch_view:expand_dups(KVs,[]), KVs3 = couch_view:detuple_kvs(KVs2,[]), - {ok, Reduced} = couch_query_servers:reduce(Lang, FunSrcs, + {ok, Reduced} = couch_query_servers:reduce(Lang, FunSrcs, KVs3), {length(KVs3), Reduced}; (rereduce, Reds) -> diff --git a/src/couchdb/couch_view_updater.erl b/src/couchdb/couch_view_updater.erl index 11dfb544..97ce3c31 100644 --- a/src/couchdb/couch_view_updater.erl +++ b/src/couchdb/couch_view_updater.erl @@ -18,7 +18,7 @@ update(#group{db=#db{name=DbName}=Db,name=GroupName,current_seq=Seq,purge_seq=PurgeSeq}=Group) -> couch_task_status:add_task(<<"View Group Indexer">>, <<DbName/binary," ",GroupName/binary>>, <<"Starting index update">>), - + DbPurgeSeq = couch_db:get_purge_seq(Db), Group2 = if DbPurgeSeq == PurgeSeq -> @@ -30,7 +30,7 @@ update(#group{db=#db{name=DbName}=Db,name=GroupName,current_seq=Seq,purge_seq=Pu couch_task_status:update(<<"Resetting view index due to lost purge entries.">>), exit(reset) end, - + ViewEmptyKVs = [{View, []} || View <- Group2#group.views], % compute on all docs modified since we last computed. TotalChanges = couch_db:count_changes_since(Db, Seq), @@ -95,9 +95,9 @@ process_doc(Db, DocInfo, {Docs, #group{sig=Sig,name=GroupId,design_options=Desig % This fun computes once for each document #doc_info{id=DocId, revs=[#rev_info{deleted=Deleted}|_]} = DocInfo, - IncludeDesign = proplists:get_value(<<"include_design">>, + IncludeDesign = proplists:get_value(<<"include_design">>, DesignOptions, false), - LocalSeq = proplists:get_value(<<"local_seq">>, + LocalSeq = proplists:get_value(<<"local_seq">>, DesignOptions, false), DocOpts = case LocalSeq of true -> @@ -113,15 +113,15 @@ process_doc(Db, DocInfo, {Docs, #group{sig=Sig,name=GroupId,design_options=Desig if Deleted -> {Docs, [{DocId, []} | DocIdViewIdKeys]}; true -> - {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, + {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts), {[Doc | Docs], DocIdViewIdKeys} end, - + case couch_util:should_flush() of true -> {Group1, Results} = view_compute(Group, Docs2), - {ViewKVs3, DocIdViewIdKeys3} = view_insert_query_results(Docs2, + {ViewKVs3, DocIdViewIdKeys3} = view_insert_query_results(Docs2, Results, ViewKVs, DocIdViewIdKeys2), {ok, Group2} = write_changes(Group1, ViewKVs3, DocIdViewIdKeys3, DocInfo#doc_info.high_seq), @@ -159,7 +159,7 @@ view_insert_doc_query_results(#doc{id=DocId}=Doc, [ResultKVs|RestResults], [{Vie [{Key,Value},{PrevKey,PrevVal}|AccRest] end; (KV, []) -> - [KV] + [KV] end, [], lists:sort(ResultKVs)), NewKVs = [{{Key, DocId}, Value} || {Key, Value} <- ResultKVs2], NewViewKVsAcc = [{View, NewKVs ++ KVs} | ViewKVsAcc], diff --git a/src/couchdb/curlhelper.c b/src/couchdb/curlhelper.c index 99b2e6ab..116612cd 100644 --- a/src/couchdb/curlhelper.c +++ b/src/couchdb/curlhelper.c @@ -38,7 +38,7 @@ Buffer init_Buffer() { } void free_Buffer(Buffer b) { - if(b == NULL) + if(b == NULL) return; if(b->data != NULL) free(b->data); @@ -186,7 +186,7 @@ int set_List(List l, int pos, void* ptr) { } *(l->elements + pos) = ptr; - + return TRUE; } |