diff options
author | Noah Slater <nslater@apache.org> | 2009-07-06 00:33:50 +0000 |
---|---|---|
committer | Noah Slater <nslater@apache.org> | 2009-07-06 00:33:50 +0000 |
commit | 282b96ddd9a84b740788c2358ec0f5fedafb7cc6 (patch) | |
tree | fb48e605ceb8079d0195d3b1ec0eca7110fa7ef2 /src/couchdb/couch_rep.erl | |
parent | b5cc085d3bc6316063f14adedf20632ee904875d (diff) |
trimmed trailing whitespace
git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@791350 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'src/couchdb/couch_rep.erl')
-rw-r--r-- | src/couchdb/couch_rep.erl | 216 |
1 files changed, 108 insertions, 108 deletions
diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index f5b560e9..a503684b 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -12,7 +12,7 @@ -module(couch_rep). -behaviour(gen_server). --export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). -export([replicate/2]). @@ -24,24 +24,24 @@ -include("couch_db.hrl"). -include("../ibrowse/ibrowse.hrl"). -%% @spec replicate(Source::binary(), Target::binary()) -> +%% @spec replicate(Source::binary(), Target::binary()) -> %% {ok, Stats} | {error, Reason} -%% @doc Triggers a replication. Stats is a JSON Object with the following +%% @doc Triggers a replication. Stats is a JSON Object with the following %% keys: session_id (UUID), source_last_seq (integer), and history (array). %% Each element of the history is an Object with keys start_time, end_time, %% start_last_seq, end_last_seq, missing_checked, missing_found, docs_read, %% and docs_written. %% %% The supervisor will try to restart the replication in case of any error -%% other than shutdown. Just call this function again to listen for the +%% other than shutdown. Just call this function again to listen for the %% result of the retry. replicate(Source, Target) -> - + {ok, HostName} = inet:gethostname(), RepId = couch_util:to_hex( erlang:md5(term_to_binary([HostName, Source, Target]))), Args = [?MODULE, [RepId, Source,Target], []], - + Replicator = {RepId, {gen_server, start_link, Args}, transient, @@ -49,31 +49,31 @@ replicate(Source, Target) -> worker, [?MODULE] }, - + Server = case supervisor:start_child(couch_rep_sup, Replicator) of - {ok, Pid} -> + {ok, Pid} -> ?LOG_INFO("starting new replication ~p at ~p", [RepId, Pid]), Pid; {error, already_present} -> case supervisor:restart_child(couch_rep_sup, RepId) of - {ok, Pid} -> + {ok, Pid} -> ?LOG_INFO("starting replication ~p at ~p", [RepId, Pid]), Pid; - {error, running} -> + {error, running} -> %% this error occurs if multiple replicators are racing %% each other to start and somebody else won. Just grab %% the Pid by calling start_child again. - {error, {already_started, Pid}} = + {error, {already_started, Pid}} = supervisor:start_child(couch_rep_sup, Replicator), ?LOG_INFO("replication ~p already running at ~p", [RepId, Pid]), Pid end; - {error, {already_started, Pid}} -> + {error, {already_started, Pid}} -> ?LOG_INFO("replication ~p already running at ~p", [RepId, Pid]), Pid end, - - case gen_server:call(Server, get_result, infinity) of + + case gen_server:call(Server, get_result, infinity) of retry -> replicate(Source, Target); Else -> Else end. @@ -87,7 +87,7 @@ replicate(Source, Target) -> headers }). - + -record(state, { context, current_seq, @@ -103,19 +103,19 @@ replicate(Source, Target) -> init([RepId, Source, Target]) -> process_flag(trap_exit, true), - + {ok, DbSrc, SrcName} = open_db(Source), {ok, DbTgt, TgtName} = open_db(Target), - + DocKey = ?l2b(?LOCAL_DOC_PREFIX ++ RepId), - + {ok, InfoSrc} = get_db_info(DbSrc), {ok, InfoTgt} = get_db_info(DbTgt), - + ReplicationStartTime = httpd_util:rfc1123_date(), SrcInstanceStartTime = proplists:get_value(instance_start_time, InfoSrc), TgtInstanceStartTime = proplists:get_value(instance_start_time, InfoTgt), - + RepRecDocSrc = case open_doc(DbSrc, DocKey, []) of {ok, SrcDoc} -> @@ -123,7 +123,7 @@ init([RepId, Source, Target]) -> SrcDoc; _ -> #doc{id=DocKey} end, - + RepRecDocTgt = case open_doc(DbTgt, DocKey, []) of {ok, TgtDoc} -> @@ -131,11 +131,11 @@ init([RepId, Source, Target]) -> TgtDoc; _ -> #doc{id=DocKey} end, - + #doc{body={RepRecProps}} = RepRecDocSrc, #doc{body={RepRecPropsTgt}} = RepRecDocTgt, - - case proplists:get_value(<<"session_id">>, RepRecProps) == + + case proplists:get_value(<<"session_id">>, RepRecProps) == proplists:get_value(<<"session_id">>, RepRecPropsTgt) of true -> % if the records have the same session id, @@ -150,7 +150,7 @@ init([RepId, Source, Target]) -> OldSeqNum = 0, OldHistory = [] end, - + Context = [ {start_seq, OldSeqNum}, {history, OldHistory}, @@ -160,20 +160,20 @@ init([RepId, Source, Target]) -> {src_record, RepRecDocSrc}, {tgt_record, RepRecDocTgt} ], - + Stats = ets:new(replication_stats, [set, private]), ets:insert(Stats, {total_revs,0}), ets:insert(Stats, {missing_revs, 0}), ets:insert(Stats, {docs_read, 0}), ets:insert(Stats, {docs_written, 0}), ets:insert(Stats, {doc_write_failures, 0}), - + couch_task_status:add_task("Replication", <<SrcName/binary, " -> ", TgtName/binary>>, "Starting"), - + Parent = self(), Pid = spawn_link(fun() -> enum_docs_since(Parent,DbSrc,DbTgt,{OldSeqNum,0}) end), - + State = #state{ context = Context, current_seq = OldSeqNum, @@ -182,7 +182,7 @@ init([RepId, Source, Target]) -> target = DbTgt, stats = Stats }, - + {ok, State}. handle_call(get_result, From, #state{listeners=L,done=true} = State) -> {stop, normal, State#state{listeners=[From|L]}}; @@ -200,14 +200,14 @@ handle_call({replicate_doc, {Id, Revs}}, {Pid,_}, #state{enum_pid=Pid} = State) } = State, ets:update_counter(Stats, missing_revs, length(Revs)), - + %% get document(s) {ok, DocResults} = open_doc_revs(Source, Id, Revs, [latest]), Docs = [RevDoc || {ok, RevDoc} <- DocResults], ets:update_counter(Stats, docs_read, length(Docs)), - + %% save them (maybe in a buffer) - {NewBuffer, NewContext} = + {NewBuffer, NewContext} = case should_flush(lists:flatlength([Docs|Buffer])) of true -> Docs2 = lists:flatten([Docs|Buffer]), @@ -227,7 +227,7 @@ handle_call({replicate_doc, {Id, Revs}}, {Pid,_}, #state{enum_pid=Pid} = State) false -> {[Docs | Buffer], Context} end, - + {reply, ok, State#state{context=NewContext, docs_buffer=NewBuffer}}; handle_call({fin, {LastSeq, RevsCount}}, {Pid,_}, #state{enum_pid=Pid} = State) -> @@ -255,7 +255,7 @@ handle_info({'EXIT', Pid, Reason}, #state{enum_pid=Pid} = State) -> Parent = self(), NewPid = spawn_link(fun() -> enum_docs_since(Parent,Src,Tgt,{Seq,0}) end), {noreply, State#state{enum_pid=NewPid}}; - + %% if any linked process dies, respawn the enumerator to get things going again handle_info({'EXIT', _From, normal}, State) -> {noreply, State}; @@ -277,7 +277,7 @@ terminate(normal, State) -> target = Target, stats = Stats } = State, - + try update_docs(Target, lists:flatten(Buffer), [], replicated_changes) of {ok, Errors} -> dump_update_errors(Errors), @@ -289,18 +289,18 @@ terminate(normal, State) -> ?LOG_ERROR("attachment request failed during final write", []), exit({internal_server_error, replication_link_failure}) end, - + couch_task_status:update("Finishing"), - + {ok, NewRepHistory, _} = do_checkpoint(Source, Target, Context, Seq, Stats), ets:delete(Stats), close_db(Target), - + [Original|Rest] = Listeners, gen_server:reply(Original, {ok, NewRepHistory}), - - %% maybe trigger another replication. If this replicator uses a local - %% source Db, changes to that Db since we started will not be included in + + %% maybe trigger another replication. If this replicator uses a local + %% source Db, changes to that Db since we started will not be included in %% this pass. case up_to_date(Source, Seq) of true -> @@ -319,9 +319,9 @@ terminate(Reason, State) -> target = Target, stats = Stats } = State, - + [gen_server:reply(L, {error, Reason}) || L <- Listeners], - + ets:delete(Stats), close_db(Target), close_db(Source). @@ -345,19 +345,19 @@ dump_update_errors([{{Id, Rev}, Error}|Rest]) -> attachment_loop(ReqId, Conn) -> couch_util:should_flush(), - receive + receive {From, {set_req_id, NewId}} -> %% we learn the ReqId to listen for From ! {self(), {ok, NewId}}, attachment_loop(NewId, Conn); {ibrowse_async_headers, ReqId, Status, Headers} -> %% we got header, give the controlling process a chance to react - receive - {From, gimme_status} -> + receive + {From, gimme_status} -> %% send status/headers to controller From ! {self(), {status, Status, Headers}}, receive - {From, continue} -> + {From, continue} -> %% normal case attachment_loop(ReqId, Conn); {From, fail} -> @@ -382,7 +382,7 @@ attachment_loop(ReqId, Conn) -> ?LOG_ERROR("streaming attachment failed with ~p", [Err]), catch ibrowse:stop_worker_process(Conn), exit(attachment_request_failed); - {ibrowse_async_response, ReqId, Data} -> + {ibrowse_async_response, ReqId, Data} -> receive {From, gimme_data} -> From ! {self(), Data} end, attachment_loop(ReqId, Conn); {ibrowse_async_response_end, ReqId} -> @@ -396,7 +396,7 @@ attachment_stub_converter(DbS, Id, Rev, {Name, {stub, Type, Length}}) -> Url = lists:flatten([DbUrl, url_encode(Id), "/", url_encode(?b2l(Name)), "?rev=", ?b2l(couch_doc:rev_to_str({Pos,RevId}))]), ?LOG_DEBUG("Attachment URL ~s", [Url]), - {ok, RcvFun} = make_attachment_stub_receiver(Url, Headers, Name, + {ok, RcvFun} = make_attachment_stub_receiver(Url, Headers, Name, Type, Length), {Name, {Type, {RcvFun, Length}}}. @@ -404,21 +404,21 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length) -> make_attachment_stub_receiver(Url, Headers, Name, Type, Length, 10, 1000). make_attachment_stub_receiver(Url, _Headers, _Name, _Type, _Length, 0, _Pause) -> - ?LOG_ERROR("streaming attachment request failed after 10 retries: ~s", + ?LOG_ERROR("streaming attachment request failed after 10 retries: ~s", [Url]), exit({attachment_request_failed, ?l2b(["failed to replicate ", Url])}); - + make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause) -> %% start the process that receives attachment data from ibrowse #url{host=Host, port=Port} = ibrowse_lib:parse_url(Url), {ok, Conn} = ibrowse:spawn_link_worker_process(Host, Port), Pid = spawn_link(fun() -> attachment_loop(nil, Conn) end), - + %% make the async request Opts = [{stream_to, Pid}, {response_format, binary}], - ReqId = + ReqId = case ibrowse:send_req_direct(Conn, Url, Headers, get, [], Opts, infinity) of - {ibrowse_req_id, X} -> + {ibrowse_req_id, X} -> X; {error, Reason} -> ?LOG_INFO("retrying couch_rep attachment request in ~p " ++ @@ -428,11 +428,11 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause) make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries-1, 2*Pause) end, - + %% tell our receiver about the ReqId it needs to look for Pid ! {self(), {set_req_id, ReqId}}, - receive - {Pid, {ok, ReqId}} -> + receive + {Pid, {ok, ReqId}} -> ok; {'EXIT', Pid, _Reason} -> catch ibrowse:stop_worker_process(Conn), @@ -440,19 +440,19 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause) make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries-1, 2*Pause) end, - + %% wait for headers to ensure that we have a 200 status code %% this is where we follow redirects etc - Pid ! {self(), gimme_status}, + Pid ! {self(), gimme_status}, receive {'EXIT', Pid, attachment_request_failed} -> catch ibrowse:stop_worker_process(Conn), make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries-1, Pause); - {Pid, {status, StreamStatus, StreamHeaders}} -> + {Pid, {status, StreamStatus, StreamHeaders}} -> ?LOG_DEBUG("streaming attachment Status ~p Headers ~p", [StreamStatus, StreamHeaders]), - + ResponseCode = list_to_integer(StreamStatus), if ResponseCode >= 200, ResponseCode < 300 -> @@ -461,10 +461,10 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause) %% this function goes into the streaming attachment code. %% It gets executed by the replication gen_server, so it can't %% be the one to actually receive the ibrowse data. - {ok, fun() -> - Pid ! {self(), gimme_data}, - receive - {Pid, Data} -> + {ok, fun() -> + Pid ! {self(), gimme_data}, + receive + {Pid, Data} -> Data; {'EXIT', Pid, attachment_request_failed} -> throw(attachment_write_failed) @@ -473,25 +473,25 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause) ResponseCode >= 300, ResponseCode < 400 -> % follow the redirect Pid ! {self(), stop_ok}, - RedirectUrl = mochiweb_headers:get_value("Location", + RedirectUrl = mochiweb_headers:get_value("Location", mochiweb_headers:make(StreamHeaders)), catch ibrowse:stop_worker_process(Conn), make_attachment_stub_receiver(RedirectUrl, Headers, Name, Type, Length, Retries - 1, Pause); - ResponseCode >= 400, ResponseCode < 500 -> + ResponseCode >= 400, ResponseCode < 500 -> % an error... log and fail - ?LOG_ERROR("streaming attachment failed with code ~p: ~s", + ?LOG_ERROR("streaming attachment failed with code ~p: ~s", [ResponseCode, Url]), Pid ! {self(), fail}, exit(attachment_request_failed); ResponseCode == 500 -> % an error... log and retry - ?LOG_INFO("retrying couch_rep attachment request in ~p " ++ + ?LOG_INFO("retrying couch_rep attachment request in ~p " ++ "seconds due to 500 response: ~s", [Pause/1000, Url]), Pid ! {self(), fail}, catch ibrowse:stop_worker_process(Conn), timer:sleep(Pause), - make_attachment_stub_receiver(Url, Headers, Name, Type, Length, + make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries - 1, 2*Pause) end end. @@ -522,28 +522,28 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) -> {src_record, #doc{body={LastRepRecord}}=RepRecDocSrc}, {tgt_record, RepRecDocTgt} ] = Context, - + case NewSeqNum == StartSeqNum andalso OldHistory /= [] of true -> % nothing changed, don't record results {ok, {[{<<"no_changes">>, true} | LastRepRecord]}, Context}; false -> % something changed, record results for incremental replication, - + % commit changes to both src and tgt. The src because if changes - % we replicated are lost, we'll record the a seq number ahead + % we replicated are lost, we'll record the a seq number ahead % of what was committed. If those changes are lost and the seq number % reverts to a previous committed value, we will skip future changes % when new doc updates are given our already replicated seq nums. - + % commit the src async ParentPid = self(), - SrcCommitPid = spawn_link(fun() -> + SrcCommitPid = spawn_link(fun() -> ParentPid ! {self(), ensure_full_commit(Source)} end), - + % commit tgt sync {ok, TgtInstanceStartTime2} = ensure_full_commit(Target), - + SrcInstanceStartTime2 = receive {SrcCommitPid, {ok, Timestamp}} -> @@ -551,7 +551,7 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) -> {'EXIT', SrcCommitPid, {http_request_failed, _}} -> exit(replication_link_failure) end, - + RecordSeqNum = if SrcInstanceStartTime2 == SrcInstanceStartTime andalso TgtInstanceStartTime2 == TgtInstanceStartTime -> @@ -562,7 +562,7 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) -> "replication is redone and documents reexamined.", []), StartSeqNum end, - + NewHistoryEntry = { [{<<"start_time">>, list_to_binary(ReplicationStartTime)}, {<<"end_time">>, list_to_binary(httpd_util:rfc1123_date())}, @@ -582,11 +582,11 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) -> {<<"source_last_seq">>, RecordSeqNum}, {<<"history">>, HistEntries}]}, - {ok, {SrcRevPos,SrcRevId}} = update_doc(Source, + {ok, {SrcRevPos,SrcRevId}} = update_doc(Source, RepRecDocSrc#doc{body=NewRepHistory}, []), {ok, {TgtRevPos,TgtRevId}} = update_doc(Target, RepRecDocTgt#doc{body=NewRepHistory}, []), - + NewContext = [ {start_seq, StartSeqNum}, {history, OldHistory}, @@ -596,9 +596,9 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) -> {src_record, RepRecDocSrc#doc{revs={SrcRevPos,[SrcRevId]}}}, {tgt_record, RepRecDocTgt#doc{revs={TgtRevPos,[TgtRevId]}}} ], - + {ok, NewRepHistory, NewContext} - + end. do_http_request(Url, Action, Headers) -> @@ -610,7 +610,7 @@ do_http_request(Url, Action, Headers, JsonBody) -> do_http_request(Url, Action, Headers, Body, Retries, Pause) when is_binary(Url) -> do_http_request(?b2l(Url), Action, Headers, Body, Retries, Pause); do_http_request(Url, Action, _Headers, _JsonBody, 0, _Pause) -> - ?LOG_ERROR("couch_rep HTTP ~p request failed after 10 retries: ~s", + ?LOG_ERROR("couch_rep HTTP ~p request failed after 10 retries: ~s", [Action, Url]), exit({http_request_failed, ?l2b(["failed to replicate ", Url])}); do_http_request(Url, Action, Headers, JsonBody, Retries, Pause) -> @@ -637,27 +637,27 @@ do_http_request(Url, Action, Headers, JsonBody, Retries, Pause) -> ResponseCode >= 200, ResponseCode < 300 -> ?JSON_DECODE(ResponseBody); ResponseCode >= 300, ResponseCode < 400 -> - RedirectUrl = mochiweb_headers:get_value("Location", + RedirectUrl = mochiweb_headers:get_value("Location", mochiweb_headers:make(ResponseHeaders)), do_http_request(RedirectUrl, Action, Headers, JsonBody, Retries-1, Pause); - ResponseCode >= 400, ResponseCode < 500 -> - ?JSON_DECODE(ResponseBody); + ResponseCode >= 400, ResponseCode < 500 -> + ?JSON_DECODE(ResponseBody); ResponseCode == 500 -> - ?LOG_INFO("retrying couch_rep HTTP ~p request in ~p seconds " ++ + ?LOG_INFO("retrying couch_rep HTTP ~p request in ~p seconds " ++ "due to 500 error: ~s", [Action, Pause/1000, Url]), timer:sleep(Pause), do_http_request(Url, Action, Headers, JsonBody, Retries - 1, 2*Pause) end; {error, Reason} -> - ?LOG_INFO("retrying couch_rep HTTP ~p request in ~p seconds due to " ++ + ?LOG_INFO("retrying couch_rep HTTP ~p request in ~p seconds due to " ++ "{error, ~p}: ~s", [Action, Pause/1000, Reason, Url]), timer:sleep(Pause), do_http_request(Url, Action, Headers, JsonBody, Retries - 1, 2*Pause) end. ensure_full_commit(#http_db{uri=DbUrl, headers=Headers}) -> - {ResultProps} = do_http_request(DbUrl ++ "_ensure_full_commit", post, + {ResultProps} = do_http_request(DbUrl ++ "_ensure_full_commit", post, Headers, true), true = proplists:get_value(<<"ok">>, ResultProps), {ok, proplists:get_value(<<"instance_start_time">>, ResultProps)}; @@ -672,22 +672,22 @@ enum_docs_since(Pid, DbSource, DbTarget, {StartSeq, RevsCount}) -> SrcRevsList = lists:map(fun(#doc_info{id=Id,revs=RevInfos}) -> SrcRevs = [Rev || #rev_info{rev=Rev} <- RevInfos], {Id, SrcRevs} - end, DocInfoList), + end, DocInfoList), {ok, MissingRevs} = get_missing_revs(DbTarget, SrcRevsList), - + %% do we need to check for success here? - [gen_server:call(Pid, {replicate_doc, Info}, infinity) + [gen_server:call(Pid, {replicate_doc, Info}, infinity) || Info <- MissingRevs ], - + #doc_info{high_seq=LastSeq} = lists:last(DocInfoList), RevsCount2 = RevsCount + length(SrcRevsList), gen_server:cast(Pid, {increment_update_seq, LastSeq}), - + enum_docs_since(Pid, DbSource, DbTarget, {LastSeq, RevsCount2}) end. - + get_db_info(#http_db{uri=DbUrl, headers=Headers}) -> {DbProps} = do_http_request(DbUrl, get, Headers), {ok, [{list_to_atom(?b2l(K)), V} || {K,V} <- DbProps]}; @@ -695,14 +695,14 @@ get_db_info(Db) -> couch_db:get_db_info(Db). get_doc_info_list(#http_db{uri=DbUrl, headers=Headers}, StartSeq) -> - Url = DbUrl ++ "_all_docs_by_seq?limit=100&startkey=" + Url = DbUrl ++ "_all_docs_by_seq?limit=100&startkey=" ++ integer_to_list(StartSeq), {Results} = do_http_request(Url, get, Headers), lists:map(fun({RowInfoList}) -> {RowValueProps} = proplists:get_value(<<"value">>, RowInfoList), Seq = proplists:get_value(<<"key">>, RowInfoList), - Revs = - [#rev_info{rev=couch_doc:parse_rev(proplists:get_value(<<"rev">>, RowValueProps)), deleted = proplists:get_value(<<"deleted">>, RowValueProps, false)} | + Revs = + [#rev_info{rev=couch_doc:parse_rev(proplists:get_value(<<"rev">>, RowValueProps)), deleted = proplists:get_value(<<"deleted">>, RowValueProps, false)} | [#rev_info{rev=Rev,deleted=false} || Rev <- couch_doc:parse_revs(proplists:get_value(<<"conflicts">>, RowValueProps, []))] ++ [#rev_info{rev=Rev,deleted=true} || Rev <- couch_doc:parse_revs(proplists:get_value(<<"deleted_conflicts">>, RowValueProps, []))]], #doc_info{ @@ -712,11 +712,11 @@ get_doc_info_list(#http_db{uri=DbUrl, headers=Headers}, StartSeq) -> } end, proplists:get_value(<<"rows">>, Results)); get_doc_info_list(DbSource, StartSeq) -> - {ok, {_Count, DocInfoList}} = couch_db:enum_docs_since(DbSource, StartSeq, + {ok, {_Count, DocInfoList}} = couch_db:enum_docs_since(DbSource, StartSeq, fun (_, _, {100, DocInfoList}) -> {stop, {100, DocInfoList}}; - (DocInfo, _, {Count, DocInfoList}) -> - {ok, {Count+1, [DocInfo|DocInfoList]}} + (DocInfo, _, {Count, DocInfoList}) -> + {ok, {Count+1, [DocInfo|DocInfoList]}} end, {0, []}), lists:reverse(DocInfoList). @@ -742,14 +742,14 @@ open_doc(#http_db{uri=DbUrl, headers=Headers}, DocId, Options) -> open_doc(Db, DocId, Options) -> couch_db:open_doc(Db, DocId, Options). -open_doc_revs(#http_db{uri=DbUrl, headers=Headers} = DbS, DocId, Revs0, +open_doc_revs(#http_db{uri=DbUrl, headers=Headers} = DbS, DocId, Revs0, [latest]) -> Revs = couch_doc:rev_to_strs(Revs0), BaseUrl = DbUrl ++ url_encode(DocId) ++ "?revs=true&latest=true", - + %% MochiWeb expects URLs < 8KB long, so maybe split into multiple requests MaxN = trunc((8192 - length(BaseUrl))/14), - + JsonResults = case length(Revs) > MaxN of false -> Url = ?l2b(BaseUrl ++ "&open_revs=" ++ ?JSON_ENCODE(Revs)), @@ -766,7 +766,7 @@ open_doc_revs(#http_db{uri=DbUrl, headers=Headers} = DbS, DocId, Revs0, Acc ++ do_http_request(?l2b(BaseUrl ++ "&open_revs=" ++ ?JSON_ENCODE(lists:reverse(Rest))), get, Headers) end, - + Results = lists:map( fun({[{<<"missing">>, Rev}]}) -> @@ -791,7 +791,7 @@ should_flush(DocCount) when DocCount > ?BUFFER_NDOCS -> should_flush(_DocCount) -> MeAndMyLinks = [self()| [P || P <- element(2,process_info(self(),links)), is_pid(P)]], - + case length(MeAndMyLinks)/2 > ?BUFFER_NATTACHMENTS of true -> true; false -> |