From 91e2121c913a54a77482ed3883f7a8d2d00801a4 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 23 Jun 2011 09:45:39 +0000 Subject: Merged revision 1138796 from trunk Simpler and safer db open/closing in view group servers This makes the opening and closing of databases in the view group server to be more friendly with the db reference counting system, avoiding more potential db file leaking after compaction, as we currently open a database in one process and use it on another process (view compactor, view updater). This significantly reduces the chances of failure when compacting very large views as discussed in COUCHDB-994. This relates to COUCHDB-926 and COUCHDB-994. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1138798 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db.hrl | 1 - src/couchdb/couch_view_compactor.erl | 8 ++-- src/couchdb/couch_view_group.erl | 72 ++++++++++++++---------------------- src/couchdb/couch_view_updater.erl | 14 ++++--- 4 files changed, 39 insertions(+), 56 deletions(-) diff --git a/src/couchdb/couch_db.hrl b/src/couchdb/couch_db.hrl index 003cb688..31318782 100644 --- a/src/couchdb/couch_db.hrl +++ b/src/couchdb/couch_db.hrl @@ -206,7 +206,6 @@ -record(group, { sig=nil, - db=nil, fd=nil, name, def_lang, diff --git a/src/couchdb/couch_view_compactor.erl b/src/couchdb/couch_view_compactor.erl index 8eda43e9..734605f0 100644 --- a/src/couchdb/couch_view_compactor.erl +++ b/src/couchdb/couch_view_compactor.erl @@ -20,14 +20,14 @@ %% @doc Compacts the views. GroupId must not include the _design/ prefix start_compact(DbName, GroupId) -> Pid = couch_view:get_group_server(DbName, <<"_design/",GroupId/binary>>), - gen_server:cast(Pid, {start_compact, fun compact_group/2}). + gen_server:cast(Pid, {start_compact, fun compact_group/3}). %%============================================================================= %% internal functions %%============================================================================= %% @spec compact_group(Group, NewGroup) -> ok -compact_group(Group, EmptyGroup) -> +compact_group(Group, EmptyGroup, DbName) -> #group{ current_seq = Seq, id_btree = IdBtree, @@ -36,15 +36,15 @@ compact_group(Group, EmptyGroup) -> } = Group, #group{ - db = Db, id_btree = EmptyIdBtree, views = EmptyViews } = EmptyGroup, + {ok, Db} = couch_db:open_int(DbName, []), {ok, {Count, _}} = couch_btree:full_reduce(Db#db.fulldocinfo_by_id_btree), + couch_db:close(Db), <<"_design", ShortName/binary>> = GroupId, - DbName = couch_db:name(Db), TaskName = <>, couch_task_status:add_task(<<"View Group Compaction">>, TaskName, <<"">>), diff --git a/src/couchdb/couch_view_group.erl b/src/couchdb/couch_view_group.erl index 6ef1dcb4..448a7dcf 100644 --- a/src/couchdb/couch_view_group.erl +++ b/src/couchdb/couch_view_group.erl @@ -78,7 +78,7 @@ start_link(InitArgs) -> init({{_, DbName, _} = InitArgs, ReturnPid, Ref}) -> process_flag(trap_exit, true), try prepare_group(InitArgs, false) of - {ok, #group{db=Db, fd=Fd, current_seq=Seq}=Group} -> + {ok, Db, #group{fd=Fd, current_seq=Seq}=Group} -> case Seq > couch_db:get_update_seq(Db) of true -> ReturnPid ! {Ref, self(), {error, invalid_view_seq}}, @@ -90,7 +90,7 @@ init({{_, DbName, _} = InitArgs, ReturnPid, Ref}) -> {ok, #group_state{ db_name=DbName, init_args=InitArgs, - group=Group#group{db=nil}, + group=Group, ref_counter=RefCounter}} end; Error -> @@ -124,14 +124,11 @@ handle_call({request_group, RequestSeq}, From, updater_pid=nil, waiting_list=WaitList }=State) when RequestSeq > Seq -> - {ok, Db} = couch_db:open_int(DbName, []), - Group2 = Group#group{db=Db}, Owner = self(), - Pid = spawn_link(fun()-> couch_view_updater:update(Owner, Group2) end), + Pid = spawn_link(fun()-> couch_view_updater:update(Owner, Group, DbName) end), {noreply, State#group_state{ updater_pid=Pid, - group=Group2, waiting_list=[{From,RequestSeq}|WaitList] }, infinity}; @@ -166,7 +163,8 @@ handle_cast({start_compact, CompactFun}, #group_state{compactor_pid=nil} {ok, Db} = couch_db:open_int(DbName, []), {ok, Fd} = open_index_file(compact, RootDir, DbName, GroupSig), NewGroup = reset_file(Db, Fd, DbName, Group), - Pid = spawn_link(fun() -> CompactFun(Group, NewGroup) end), + couch_db:close(Db), + Pid = spawn_link(fun() -> CompactFun(Group, NewGroup, DbName) end), {noreply, State#group_state{compactor_pid = Pid}}; handle_cast({start_compact, _}, State) -> %% compact already running, this is a no-op @@ -176,7 +174,7 @@ handle_cast({compact_done, #group{current_seq=NewSeq} = NewGroup}, #group_state{group = #group{current_seq=OldSeq}} = State) when NewSeq >= OldSeq -> #group_state{ - group = #group{name=GroupId, fd=OldFd, sig=GroupSig} = Group, + group = #group{name=GroupId, fd=OldFd, sig=GroupSig}, init_args = {RootDir, DbName, _}, updater_pid = UpdaterPid, compactor_pid = CompactorPid, @@ -195,7 +193,7 @@ handle_cast({compact_done, #group{current_seq=NewSeq} = NewGroup}, unlink(UpdaterPid), exit(UpdaterPid, view_compaction_complete), Owner = self(), - spawn_link(fun()-> couch_view_updater:update(Owner, NewGroup) end); + spawn_link(fun()-> couch_view_updater:update(Owner, NewGroup, DbName) end); true -> nil end, @@ -206,19 +204,10 @@ handle_cast({compact_done, #group{current_seq=NewSeq} = NewGroup}, unlink(OldFd), couch_ref_counter:drop(RefCounter), {ok, NewRefCounter} = couch_ref_counter:start([NewGroup#group.fd]), - case Group#group.db of - nil -> ok; - Else -> couch_db:close(Else) - end, - - case NewGroup#group.db of - nil -> ok; - _ -> couch_db:close(NewGroup#group.db) - end, self() ! delayed_commit, {noreply, State#group_state{ - group=NewGroup#group{db = nil}, + group=NewGroup, ref_counter=NewRefCounter, compactor_pid=nil, updater_pid=NewUpdaterPid @@ -230,18 +219,15 @@ handle_cast({compact_done, NewGroup}, State) -> } = State, ?LOG_INFO("View index compaction still behind for ~s ~s -- current: ~p " ++ "compact: ~p", [DbName, GroupId, CurrentSeq, NewGroup#group.current_seq]), - couch_db:close(NewGroup#group.db), Pid = spawn_link(fun() -> - {ok, Db} = couch_db:open_int(DbName, []), {_,Ref} = erlang:spawn_monitor(fun() -> - couch_view_updater:update(nil, NewGroup#group{db = Db}) + couch_view_updater:update(nil, NewGroup, DbName) end), receive {'DOWN', Ref, _, _, {new_group, NewGroup2}} -> - couch_db:close(Db), #group{name=GroupId} = NewGroup2, Pid2 = couch_view:get_group_server(DbName, GroupId), - gen_server:cast(Pid2, {compact_done, NewGroup2#group{db = nil}}) + gen_server:cast(Pid2, {compact_done, NewGroup2}) end end), {noreply, State#group_state{compactor_pid = Pid}}; @@ -283,13 +269,12 @@ handle_info(delayed_commit, #group_state{db_name=DbName,group=Group}=State) -> {noreply, State#group_state{waiting_commit=true}} end; -handle_info({'EXIT', FromPid, {new_group, #group{db=Db}=Group}}, +handle_info({'EXIT', FromPid, {new_group, Group}}, #group_state{db_name=DbName, updater_pid=UpPid, ref_counter=RefCounter, waiting_list=WaitList, waiting_commit=WaitingCommit}=State) when UpPid == FromPid -> - ok = couch_db:close(Db), if not WaitingCommit -> erlang:send_after(1000, self(), delayed_commit); true -> ok @@ -297,30 +282,27 @@ handle_info({'EXIT', FromPid, {new_group, #group{db=Db}=Group}}, case reply_with_group(Group, WaitList, [], RefCounter) of [] -> {noreply, State#group_state{waiting_commit=true, waiting_list=[], - group=Group#group{db=nil}, updater_pid=nil}}; + group=Group, updater_pid=nil}}; StillWaiting -> % we still have some waiters, reopen the database and reupdate the index - {ok, Db2} = couch_db:open_int(DbName, []), - Group2 = Group#group{db=Db2}, Owner = self(), - Pid = spawn_link(fun() -> couch_view_updater:update(Owner, Group2) end), + Pid = spawn_link(fun() -> couch_view_updater:update(Owner, Group, DbName) end), {noreply, State#group_state{waiting_commit=true, - waiting_list=StillWaiting, group=Group2, updater_pid=Pid}} + waiting_list=StillWaiting, updater_pid=Pid}} end; handle_info({'EXIT', _, {new_group, _}}, State) -> %% message from an old (probably pre-compaction) updater; ignore {noreply, State}; -handle_info({'EXIT', FromPid, reset}, - #group_state{ - init_args=InitArgs, - updater_pid=UpPid, - group=Group}=State) when UpPid == FromPid -> - ok = couch_db:close(Group#group.db), +handle_info({'EXIT', UpPid, reset}, + #group_state{init_args=InitArgs, updater_pid=UpPid} = State) -> case prepare_group(InitArgs, true) of - {ok, ResetGroup} -> + {ok, Db, ResetGroup} -> Owner = self(), - Pid = spawn_link(fun()-> couch_view_updater:update(Owner, ResetGroup) end), + couch_db:close(Db), + Pid = spawn_link(fun() -> + couch_view_updater:update(Owner, ResetGroup, Db#db.name) + end), {noreply, State#group_state{ updater_pid=Pid, group=ResetGroup}}; @@ -386,17 +368,17 @@ prepare_group({RootDir, DbName, #group{sig=Sig}=Group}, ForceReset)-> {ok, Fd} -> if ForceReset -> % this can happen if we missed a purge - {ok, reset_file(Db, Fd, DbName, Group)}; + {ok, Db, reset_file(Db, Fd, DbName, Group)}; true -> % 09 UPGRADE CODE ok = couch_file:upgrade_old_header(Fd, <<$r, $c, $k, 0>>), case (catch couch_file:read_header(Fd)) of {ok, {Sig, HeaderInfo}} -> % sigs match! - {ok, init_group(Db, Fd, Group, HeaderInfo)}; + {ok, Db, init_group(Db, Fd, Group, HeaderInfo)}; _ -> % this happens on a new file - {ok, reset_file(Db, Fd, DbName, Group)} + {ok, Db, reset_file(Db, Fd, DbName, Group)} end end; Error -> @@ -582,7 +564,7 @@ design_doc_to_view_group(#doc{id=Id,body={Fields}}) -> reset_group(#group{views=Views}=Group) -> Views2 = [View#view{btree=nil} || View <- Views], - Group#group{db=nil,fd=nil,query_server=nil,current_seq=0, + Group#group{fd=nil,query_server=nil,current_seq=0, id_btree=nil,views=Views2}. reset_file(Db, Fd, DbName, #group{sig=Sig,name=Name} = Group) -> @@ -598,7 +580,7 @@ init_group(Db, Fd, #group{views=Views}=Group, nil) -> init_group(Db, Fd, Group, #index_header{seq=0, purge_seq=couch_db:get_purge_seq(Db), id_btree_state=nil, view_states=[{nil, 0, 0} || _ <- Views]}); -init_group(Db, Fd, #group{def_lang=Lang,views=Views}= +init_group(_Db, Fd, #group{def_lang=Lang,views=Views}= Group, IndexHeader) -> #index_header{seq=Seq, purge_seq=PurgeSeq, id_btree_state=IdBtreeState, view_states=ViewStates} = IndexHeader, @@ -638,5 +620,5 @@ init_group(Db, Fd, #group{def_lang=Lang,views=Views}= View#view{btree=Btree, update_seq=USeq, purge_seq=PSeq} end, ViewStates2, Views), - Group#group{db=Db, fd=Fd, current_seq=Seq, purge_seq=PurgeSeq, + Group#group{fd=Fd, current_seq=Seq, purge_seq=PurgeSeq, id_btree=IdBtree, views=Views2}. diff --git a/src/couchdb/couch_view_updater.erl b/src/couchdb/couch_view_updater.erl index 8e089fa9..2cc390df 100644 --- a/src/couchdb/couch_view_updater.erl +++ b/src/couchdb/couch_view_updater.erl @@ -12,30 +12,31 @@ -module(couch_view_updater). --export([update/2]). +-export([update/3]). -include("couch_db.hrl"). --spec update(_, #group{}) -> no_return(). +-spec update(_, #group{}, Dbname::binary()) -> no_return(). -update(Owner, Group) -> +update(Owner, Group, DbName) -> #group{ - db = #db{name=DbName} = Db, name = GroupName, current_seq = Seq, purge_seq = PurgeSeq } = Group, couch_task_status:add_task(<<"View Group Indexer">>, <>, <<"Starting index update">>), + {ok, Db} = couch_db:open_int(DbName, []), DbPurgeSeq = couch_db:get_purge_seq(Db), Group2 = if DbPurgeSeq == PurgeSeq -> Group; DbPurgeSeq == PurgeSeq + 1 -> couch_task_status:update(<<"Removing purged entries from view index.">>), - purge_index(Group); + purge_index(Group, Db); true -> couch_task_status:update(<<"Resetting view index due to lost purge entries.">>), + couch_db:close(Db), exit(reset) end, {ok, MapQueue} = couch_work_queue:new( @@ -73,13 +74,14 @@ update(Owner, Group) -> couch_task_status:set_update_frequency(0), couch_task_status:update("Finishing."), couch_work_queue:close(MapQueue), + couch_db:close(Db), receive {new_group, NewGroup} -> exit({new_group, NewGroup#group{current_seq=couch_db:get_update_seq(Db)}}) end. -purge_index(#group{db=Db, views=Views, id_btree=IdBtree}=Group) -> +purge_index(#group{views=Views, id_btree=IdBtree}=Group, Db) -> {ok, PurgedIdsRevs} = couch_db:get_last_purged(Db), Ids = [Id || {Id, _Revs} <- PurgedIdsRevs], {ok, Lookups, IdBtree2} = couch_btree:query_modify(IdBtree, Ids, [], Ids), -- cgit v1.2.3 From 71eb2040ff0da968868a3d97a5a1dee4eb4ea651 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 23 Jun 2011 09:46:46 +0000 Subject: Merged revision 1104168 from trunk Add infinity timeout to couch_ref_counter calls After compacting a very large database, the updater calls the couch_db gen_server with a db record that contains a new ref counter. The couch_db gen_server calls drop on the old ref counter and calls add on the new ref counter. However since the system is busy deleting the old db file or garbage collecting, one of the ref counter calls times out, causing couch_db's terminate to invoked and terminate calls shutdown on the updater. However the updater is waiting for the call it made to couch_db to complete, which can't complete since it's waiting for the updater. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1138799 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_ref_counter.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/couchdb/couch_ref_counter.erl b/src/couchdb/couch_ref_counter.erl index 5a111ab6..a774f469 100644 --- a/src/couchdb/couch_ref_counter.erl +++ b/src/couchdb/couch_ref_counter.erl @@ -24,14 +24,14 @@ drop(RefCounterPid) -> drop(RefCounterPid, self()). drop(RefCounterPid, Pid) -> - gen_server:call(RefCounterPid, {drop, Pid}). + gen_server:call(RefCounterPid, {drop, Pid}, infinity). add(RefCounterPid) -> add(RefCounterPid, self()). add(RefCounterPid, Pid) -> - gen_server:call(RefCounterPid, {add, Pid}). + gen_server:call(RefCounterPid, {add, Pid}, infinity). count(RefCounterPid) -> gen_server:call(RefCounterPid, count). -- cgit v1.2.3 From 7e906e64a56dbb1dceb1ce95023a2a1af5af8ec9 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sun, 26 Jun 2011 18:19:03 +0000 Subject: Merged revision 1139894 from trunk Don't steal data from subsequent requests in the pipeline git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1139897 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_httpd.erl | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/couchdb/couch_httpd.erl b/src/couchdb/couch_httpd.erl index 7c7781f6..e472d094 100644 --- a/src/couchdb/couch_httpd.erl +++ b/src/couchdb/couch_httpd.erl @@ -469,16 +469,24 @@ body_length(Req) -> Unknown -> {unknown_transfer_encoding, Unknown} end. -body(#httpd{mochi_req=MochiReq, req_body=ReqBody}) -> - case ReqBody of +body(#httpd{mochi_req=MochiReq, req_body=undefined} = Req) -> + case body_length(Req) of undefined -> - % Maximum size of document PUT request body (4GB) MaxSize = list_to_integer( couch_config:get("couchdb", "max_document_size", "4294967296")), MochiReq:recv_body(MaxSize); - _Else -> - ReqBody - end. + chunked -> + ChunkFun = fun({0, _Footers}, Acc) -> + lists:reverse(Acc); + ({_Len, Chunk}, Acc) -> + [Chunk | Acc] + end, + recv_chunked(Req, 8192, ChunkFun, []); + Len -> + MochiReq:recv_body(Len) + end; +body(#httpd{req_body=ReqBody}) -> + ReqBody. json_body(Httpd) -> ?JSON_DECODE(body(Httpd)). -- cgit v1.2.3 From d698504b2d2648d87855c53c1ff3df36598db6f6 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Tue, 28 Jun 2011 23:01:30 +0000 Subject: Backport revision 1140886 from trunk Improved error message in the replicator git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1140887 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_rep.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index 9d86e7a5..4fabdd99 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -322,7 +322,7 @@ start_replication_server(Replicator) -> throw({db_not_found, <<"could not open ", DbUrl/binary>>}); {error, {unauthorized, DbUrl}} -> throw({unauthorized, - <<"unauthorized to access database ", DbUrl/binary>>}); + <<"unauthorized to access or create database ", DbUrl/binary>>}); {error, {'EXIT', {badarg, [{erlang, apply, [gen_server, start_link, undefined]} | _]}}} -> % Clause to deal with a change in the supervisor module introduced @@ -338,7 +338,7 @@ start_replication_server(Replicator) -> throw({db_not_found, <<"could not open ", DbUrl/binary>>}); {error, {{unauthorized, DbUrl}, _}} -> throw({unauthorized, - <<"unauthorized to access database ", DbUrl/binary>>}) + <<"unauthorized to access or create database ", DbUrl/binary>>}) end. compare_replication_logs(SrcDoc, TgtDoc) -> -- cgit v1.2.3 From b727a764be6d7ff9fe367cc6b317c4fc7f1baba3 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Thu, 30 Jun 2011 13:04:39 +0000 Subject: COUCHDB-1206 - include current_seq in view ETag for include_docs=true git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1141522 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/etags_views.js | 8 ++++++++ src/couchdb/couch_httpd_show.erl | 6 +++--- src/couchdb/couch_httpd_view.erl | 26 ++++++++++++++------------ 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/share/www/script/test/etags_views.js b/share/www/script/test/etags_views.js index f556d6ac..6d8e97b8 100644 --- a/share/www/script/test/etags_views.js +++ b/share/www/script/test/etags_views.js @@ -70,6 +70,14 @@ couchTests.etags_views = function(debug) { xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/basicView"); var etag1 = xhr.getResponseHeader("etag"); T(etag1 == etag); + + // verify ETag always changes for include_docs=true on update + xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/basicView?include_docs=true"); + var etag1 = xhr.getResponseHeader("etag"); + T(db.save({"_id":"doc2", "foo":"bar"}).ok); + xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/basicView?include_docs=true"); + var etag2 = xhr.getResponseHeader("etag"); + T(etag1 != etag2); // Verify that purges affect etags xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/fooView"); diff --git a/src/couchdb/couch_httpd_show.erl b/src/couchdb/couch_httpd_show.erl index 59f74e1c..a215b1da 100644 --- a/src/couchdb/couch_httpd_show.erl +++ b/src/couchdb/couch_httpd_show.erl @@ -190,14 +190,14 @@ handle_view_list_req(Req, _Db, _DDoc) -> handle_view_list(Req, Db, DDoc, LName, {ViewDesignName, ViewName}, Keys) -> ViewDesignId = <<"_design/", ViewDesignName/binary>>, {ViewType, View, Group, QueryArgs} = couch_httpd_view:load_view(Req, Db, {ViewDesignId, ViewName}, Keys), - Etag = list_etag(Req, Db, Group, View, {couch_httpd:doc_etag(DDoc), Keys}), + Etag = list_etag(Req, Db, Group, View, QueryArgs, {couch_httpd:doc_etag(DDoc), Keys}), couch_httpd:etag_respond(Req, Etag, fun() -> output_list(ViewType, Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group) end). -list_etag(#httpd{user_ctx=UserCtx}=Req, Db, Group, View, More) -> +list_etag(#httpd{user_ctx=UserCtx}=Req, Db, Group, View, QueryArgs, More) -> Accept = couch_httpd:header_value(Req, "Accept"), - couch_httpd_view:view_etag(Db, Group, View, {More, Accept, UserCtx#user_ctx.roles}). + couch_httpd_view:view_etag(Db, Group, View, QueryArgs, {More, Accept, UserCtx#user_ctx.roles}). output_list(map, Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group) -> output_map_list(Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group); diff --git a/src/couchdb/couch_httpd_view.erl b/src/couchdb/couch_httpd_view.erl index b71fc2c6..1f279417 100644 --- a/src/couchdb/couch_httpd_view.erl +++ b/src/couchdb/couch_httpd_view.erl @@ -17,7 +17,7 @@ -export([parse_view_params/3]). -export([make_view_fold_fun/7, finish_view_fold/4, finish_view_fold/5, view_row_obj/4]). --export([view_etag/3, view_etag/4, make_reduce_fold_funs/6]). +-export([view_etag/5, make_reduce_fold_funs/6]). -export([design_doc_view/5, parse_bool_param/1, doc_member/3]). -export([make_key_options/1, load_view/4]). @@ -114,7 +114,7 @@ output_map_view(Req, View, Group, Db, QueryArgs, nil) -> limit = Limit, skip = SkipCount } = QueryArgs, - CurrentEtag = view_etag(Db, Group, View), + CurrentEtag = view_etag(Db, Group, View, QueryArgs), couch_httpd:etag_respond(Req, CurrentEtag, fun() -> {ok, RowCount} = couch_view:get_row_count(View), FoldlFun = make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db, Group#group.current_seq, RowCount, #view_fold_helper_funs{reduce_count=fun couch_view:reduce_to_count/1}), @@ -130,7 +130,7 @@ output_map_view(Req, View, Group, Db, QueryArgs, Keys) -> limit = Limit, skip = SkipCount } = QueryArgs, - CurrentEtag = view_etag(Db, Group, View, Keys), + CurrentEtag = view_etag(Db, Group, View, QueryArgs, Keys), couch_httpd:etag_respond(Req, CurrentEtag, fun() -> {ok, RowCount} = couch_view:get_row_count(View), FoldAccInit = {Limit, SkipCount, undefined, []}, @@ -155,7 +155,7 @@ output_reduce_view(Req, Db, View, Group, QueryArgs, nil) -> skip = Skip, group_level = GroupLevel } = QueryArgs, - CurrentEtag = view_etag(Db, Group, View), + CurrentEtag = view_etag(Db, Group, View, QueryArgs), couch_httpd:etag_respond(Req, CurrentEtag, fun() -> {ok, GroupRowsFun, RespFun} = make_reduce_fold_funs(Req, GroupLevel, QueryArgs, CurrentEtag, Group#group.current_seq, @@ -173,7 +173,7 @@ output_reduce_view(Req, Db, View, Group, QueryArgs, Keys) -> skip = Skip, group_level = GroupLevel } = QueryArgs, - CurrentEtag = view_etag(Db, Group, View, Keys), + CurrentEtag = view_etag(Db, Group, View, QueryArgs, Keys), couch_httpd:etag_respond(Req, CurrentEtag, fun() -> {ok, GroupRowsFun, RespFun} = make_reduce_fold_funs(Req, GroupLevel, QueryArgs, CurrentEtag, Group#group.current_seq, @@ -640,14 +640,16 @@ send_json_reduce_row(Resp, {Key, Value}, RowFront) -> send_chunk(Resp, RowFront ++ ?JSON_ENCODE({[{key, Key}, {value, Value}]})), {ok, ",\r\n"}. -view_etag(Db, Group, View) -> - view_etag(Db, Group, View, nil). +view_etag(Db, Group, View, QueryArgs) -> + view_etag(Db, Group, View, QueryArgs, nil). -view_etag(Db, Group, {reduce, _, _, View}, Extra) -> - view_etag(Db, Group, View, Extra); -view_etag(Db, Group, {temp_reduce, View}, Extra) -> - view_etag(Db, Group, View, Extra); -view_etag(_Db, #group{sig=Sig}, #view{update_seq=UpdateSeq, purge_seq=PurgeSeq}, Extra) -> +view_etag(Db, Group, {reduce, _, _, View}, QueryArgs, Extra) -> + view_etag(Db, Group, View, QueryArgs, Extra); +view_etag(Db, Group, {temp_reduce, View}, QueryArgs, Extra) -> + view_etag(Db, Group, View, QueryArgs, Extra); +view_etag(_Db, #group{sig=Sig, current_seq=CurrentSeq}, _View, #view_query_args{include_docs=true}, Extra) -> + couch_httpd:make_etag({Sig, CurrentSeq, Extra}); +view_etag(_Db, #group{sig=Sig}, #view{update_seq=UpdateSeq, purge_seq=PurgeSeq}, _QueryArgs, Extra) -> couch_httpd:make_etag({Sig, UpdateSeq, PurgeSeq, Extra}). % the view row has an error -- cgit v1.2.3 From 364d54f64dc75466cc0aa9a07458fdaa9fa486ec Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Thu, 30 Jun 2011 13:16:13 +0000 Subject: COUCHDB-1206 - document in CHANGES/NEWS. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1141524 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 2 ++ NEWS | 2 ++ 2 files changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index 54a2e03c..12efc0b3 100644 --- a/CHANGES +++ b/CHANGES @@ -6,6 +6,8 @@ Version 1.1.1 This version has not been released yet. +* ETags for views include current sequence if include_docs=true. + Version 1.1.0 ------------- diff --git a/NEWS b/NEWS index 97eb58e7..4803ba24 100644 --- a/NEWS +++ b/NEWS @@ -12,6 +12,8 @@ Version 1.1.1 This version has not been released yet. +* ETags for views include current sequence if include_docs=true. + Version 1.1.0 ------------- -- cgit v1.2.3 From 276406da94a03d46afec074b141732c7487e2f11 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sat, 2 Jul 2011 18:44:00 +0000 Subject: Restart replications on error If a replication transitions to the "error" state, attempt to restart it up to "max_replication_retry_count" times (.ini configuration parameter). This number of retry attempts can now be set to "infinity" as well. This was already current behaviour in trunk (upcoming 1.2). Closes COUCHDB-1194. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1142258 13f79535-47bb-0310-9956-ffa450edef68 --- etc/couchdb/default.ini.tpl.in | 1 + src/couchdb/couch_rep.erl | 144 ++-------- src/couchdb/couch_replication_manager.erl | 424 ++++++++++++++++++++++-------- 3 files changed, 328 insertions(+), 241 deletions(-) diff --git a/etc/couchdb/default.ini.tpl.in b/etc/couchdb/default.ini.tpl.in index f5dc24af..1592e330 100644 --- a/etc/couchdb/default.ini.tpl.in +++ b/etc/couchdb/default.ini.tpl.in @@ -137,6 +137,7 @@ compressible_types = text/*, application/javascript, application/json, applicati [replicator] db = _replicator +; Maximum replicaton retry count can be a non-negative integer or "infinity". max_replication_retry_count = 10 max_http_sessions = 20 max_http_pipeline_size = 50 diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index 4fabdd99..a9c156e9 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -16,12 +16,10 @@ code_change/3]). -export([replicate/2, checkpoint/1]). --export([ensure_rep_db_exists/0, make_replication_id/2]). +-export([make_replication_id/2]). -export([start_replication/3, end_replication/1, get_result/4]). --export([update_rep_doc/2]). -include("couch_db.hrl"). --include("couch_js_functions.hrl"). -include("../ibrowse/ibrowse.hrl"). -define(REP_ID_VERSION, 2). @@ -54,7 +52,6 @@ committed_seq = 0, stats = nil, - rep_doc = nil, source_db_update_notifier = nil, target_db_update_notifier = nil }). @@ -94,11 +91,11 @@ end_replication({BaseId, Extension}) -> end end. -start_replication(RepDoc, {BaseId, Extension}, UserCtx) -> +start_replication(RepDoc, {BaseId, Extension} = RepId, UserCtx) -> Replicator = { BaseId ++ Extension, {gen_server, start_link, - [?MODULE, [BaseId, RepDoc, UserCtx], []]}, + [?MODULE, [RepId, RepDoc, UserCtx], []]}, temporary, 1, worker, @@ -135,7 +132,7 @@ init(InitArgs) -> {stop, Error} end. -do_init([RepId, {PostProps} = RepDoc, UserCtx] = InitArgs) -> +do_init([{BaseId, _Ext} = RepId, {PostProps}, UserCtx] = InitArgs) -> process_flag(trap_exit, true), SourceProps = couch_util:get_value(<<"source">>, PostProps), @@ -152,10 +149,8 @@ do_init([RepId, {PostProps} = RepDoc, UserCtx] = InitArgs) -> SourceInfo = dbinfo(Source), TargetInfo = dbinfo(Target), - maybe_set_triggered(RepDoc, RepId), - [SourceLog, TargetLog] = find_replication_logs( - [Source, Target], RepId, {PostProps}, UserCtx), + [Source, Target], BaseId, {PostProps}, UserCtx), {StartSeq, History} = compare_replication_logs(SourceLog, TargetLog), {ok, ChangesFeed} = @@ -174,10 +169,12 @@ do_init([RepId, {PostProps} = RepDoc, UserCtx] = InitArgs) -> ets:insert(Stats, {docs_written, 0}), ets:insert(Stats, {doc_write_failures, 0}), - {ShortId, _} = lists:split(6, RepId), + {ShortId, _} = lists:split(6, BaseId), couch_task_status:add_task("Replication", io_lib:format("~s: ~s -> ~s", [ShortId, dbname(Source), dbname(Target)]), "Starting"), + couch_replication_manager:replication_started(RepId), + State = #state{ changes_feed = ChangesFeed, missing_revs = MissingRevs, @@ -200,7 +197,6 @@ do_init([RepId, {PostProps} = RepDoc, UserCtx] = InitArgs) -> rep_starttime = httpd_util:rfc1123_date(), src_starttime = couch_util:get_value(instance_start_time, SourceInfo), tgt_starttime = couch_util:get_value(instance_start_time, TargetInfo), - rep_doc = RepDoc, source_db_update_notifier = source_db_update_notifier(Source), target_db_update_notifier = target_db_update_notifier(Target) }, @@ -272,27 +268,24 @@ handle_info({'EXIT', _Pid, {Err, Reason}}, State) when Err == source_error; handle_info({'EXIT', _Pid, Reason}, State) -> {stop, Reason, State}. -terminate(normal, #state{checkpoint_scheduled=nil} = State) -> +terminate(normal, #state{checkpoint_scheduled=nil, init_args=[RepId | _]} = State) -> do_terminate(State), - update_rep_doc( - State#state.rep_doc, [{<<"_replication_state">>, <<"completed">>}]); + couch_replication_manager:replication_completed(RepId); -terminate(normal, State) -> +terminate(normal, #state{init_args=[RepId | _]} = State) -> timer:cancel(State#state.checkpoint_scheduled), do_terminate(do_checkpoint(State)), - update_rep_doc( - State#state.rep_doc, [{<<"_replication_state">>, <<"completed">>}]); + couch_replication_manager:replication_completed(RepId); terminate(shutdown, #state{listeners = Listeners} = State) -> % continuous replication stopped [gen_server:reply(L, {ok, stopped}) || L <- Listeners], terminate_cleanup(State); -terminate(Reason, #state{listeners = Listeners} = State) -> +terminate(Reason, #state{listeners = Listeners, init_args=[RepId | _]} = State) -> [gen_server:reply(L, {error, Reason}) || L <- Listeners], terminate_cleanup(State), - update_rep_doc( - State#state.rep_doc, [{<<"_replication_state">>, <<"error">>}]). + couch_replication_manager:replication_error(RepId, Reason). code_change(_OldVsn, State, _Extra) -> {ok, State}. @@ -698,7 +691,7 @@ do_checkpoint(State) -> src_starttime = SrcInstanceStartTime, tgt_starttime = TgtInstanceStartTime, stats = Stats, - rep_doc = {RepDoc} + init_args = [_RepId, {RepDoc} | _] } = State, case commit_to_both(Source, Target, NewSeqNum) of {SrcInstanceStartTime, TgtInstanceStartTime} -> @@ -901,113 +894,6 @@ parse_proxy_params(ProxyUrl) -> [{proxy_user, User}, {proxy_password, Passwd}] end. -update_rep_doc({Props} = _RepDoc, KVs) -> - case couch_util:get_value(<<"_id">>, Props) of - undefined -> - % replication triggered by POSTing to _replicate/ - ok; - RepDocId -> - % replication triggered by adding a Rep Doc to the replicator DB - {ok, RepDb} = ensure_rep_db_exists(), - case couch_db:open_doc(RepDb, RepDocId, []) of - {ok, LatestRepDoc} -> - update_rep_doc(RepDb, LatestRepDoc, KVs); - _ -> - ok - end, - couch_db:close(RepDb) - end. - -update_rep_doc(RepDb, #doc{body = {RepDocBody}} = RepDoc, KVs) -> - NewRepDocBody = lists:foldl( - fun({<<"_replication_state">> = K, State} = KV, Body) -> - case couch_util:get_value(K, Body) of - State -> - Body; - _ -> - Body1 = lists:keystore(K, 1, Body, KV), - lists:keystore( - <<"_replication_state_time">>, 1, - Body1, {<<"_replication_state_time">>, timestamp()}) - end; - ({K, _V} = KV, Body) -> - lists:keystore(K, 1, Body, KV) - end, - RepDocBody, - KVs - ), - case NewRepDocBody of - RepDocBody -> - ok; - _ -> - % might not succeed - when the replication doc is deleted right - % before this update (not an error) - couch_db:update_doc(RepDb, RepDoc#doc{body = {NewRepDocBody}}, []) - end. - -% RFC3339 timestamps. -% Note: doesn't include the time seconds fraction (RFC3339 says it's optional). -timestamp() -> - {{Year, Month, Day}, {Hour, Min, Sec}} = calendar:now_to_local_time(now()), - UTime = erlang:universaltime(), - LocalTime = calendar:universal_time_to_local_time(UTime), - DiffSecs = calendar:datetime_to_gregorian_seconds(LocalTime) - - calendar:datetime_to_gregorian_seconds(UTime), - zone(DiffSecs div 3600, (DiffSecs rem 3600) div 60), - iolist_to_binary( - io_lib:format("~4..0w-~2..0w-~2..0wT~2..0w:~2..0w:~2..0w~s", - [Year, Month, Day, Hour, Min, Sec, - zone(DiffSecs div 3600, (DiffSecs rem 3600) div 60)])). - -zone(Hr, Min) when Hr >= 0, Min >= 0 -> - io_lib:format("+~2..0w:~2..0w", [Hr, Min]); -zone(Hr, Min) -> - io_lib:format("-~2..0w:~2..0w", [abs(Hr), abs(Min)]). - - -maybe_set_triggered({RepProps} = RepDoc, RepId) -> - case couch_util:get_value(<<"_replication_state">>, RepProps) of - <<"triggered">> -> - ok; - _ -> - update_rep_doc( - RepDoc, - [ - {<<"_replication_state">>, <<"triggered">>}, - {<<"_replication_id">>, ?l2b(RepId)} - ] - ) - end. - -ensure_rep_db_exists() -> - DbName = ?l2b(couch_config:get("replicator", "db", "_replicator")), - Opts = [ - {user_ctx, #user_ctx{roles=[<<"_admin">>, <<"_replicator">>]}}, - sys_db - ], - case couch_db:open(DbName, Opts) of - {ok, Db} -> - Db; - _Error -> - {ok, Db} = couch_db:create(DbName, Opts) - end, - ok = ensure_rep_ddoc_exists(Db, <<"_design/_replicator">>), - {ok, Db}. - -ensure_rep_ddoc_exists(RepDb, DDocID) -> - case couch_db:open_doc(RepDb, DDocID, []) of - {ok, _Doc} -> - ok; - _ -> - DDoc = couch_doc:from_json_obj({[ - {<<"_id">>, DDocID}, - {<<"language">>, <<"javascript">>}, - {<<"validate_doc_update">>, ?REP_DB_DOC_VALIDATE_FUN} - ]}), - {ok, _Rev} = couch_db:update_doc(RepDb, DDoc, []) - end, - ok. - source_db_update_notifier(#db{name = DbName}) -> Server = self(), {ok, Notifier} = couch_db_update_notifier:start_link( diff --git a/src/couchdb/couch_replication_manager.erl b/src/couchdb/couch_replication_manager.erl index e3d97c37..7e2c8118 100644 --- a/src/couchdb/couch_replication_manager.erl +++ b/src/couchdb/couch_replication_manager.erl @@ -13,14 +13,20 @@ -module(couch_replication_manager). -behaviour(gen_server). +% public API +-export([replication_started/1, replication_completed/1, replication_error/2]). + +% gen_server callbacks -export([start_link/0, init/1, handle_call/3, handle_info/2, handle_cast/2]). -export([code_change/3, terminate/2]). -include("couch_db.hrl"). +-include("couch_js_functions.hrl"). --define(DOC_ID_TO_REP_ID, rep_doc_id_to_rep_id). --define(REP_ID_TO_DOC_ID, rep_id_to_rep_doc_id). --define(INITIAL_WAIT, 5). +-define(DOC_TO_REP, couch_rep_doc_id_to_rep_id). +-define(REP_TO_STATE, couch_rep_id_to_rep_state). +-define(INITIAL_WAIT, 2.5). % seconds +-define(MAX_WAIT, 600). % seconds -record(state, { changes_feed_loop = nil, @@ -30,6 +36,16 @@ max_retries }). +-record(rep_state, { + doc_id, + user_ctx, + doc, + starting, + retries_left, + max_retries, + wait = ?INITIAL_WAIT +}). + -import(couch_util, [ get_value/2, get_value/3, @@ -40,17 +56,56 @@ start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +replication_started({BaseId, _} = RepId) -> + case rep_state(RepId) of + nil -> + ok; + #rep_state{doc_id = DocId} -> + update_rep_doc(DocId, [ + {<<"_replication_state">>, <<"triggered">>}, + {<<"_replication_id">>, ?l2b(BaseId)}]), + ok = gen_server:call(?MODULE, {rep_started, RepId}, infinity), + ?LOG_INFO("Document `~s` triggered replication `~s`", + [DocId, pp_rep_id(RepId)]) + end. + + +replication_completed(RepId) -> + case rep_state(RepId) of + nil -> + ok; + #rep_state{doc_id = DocId} = St -> + update_rep_doc(DocId, [{<<"_replication_state">>, <<"completed">>}]), + ok = gen_server:call(?MODULE, {rep_complete, RepId}, infinity), + ?LOG_INFO("Replication `~s` finished (triggered by document `~s`)", + [pp_rep_id(RepId), DocId]) + end. + + +replication_error({BaseId, _} = RepId, Error) -> + case rep_state(RepId) of + nil -> + ok; + #rep_state{doc_id = DocId} -> + % TODO: maybe add error reason to replication document + update_rep_doc(DocId, [ + {<<"_replication_state">>, <<"error">>}, + {<<"_replication_id">>, ?l2b(BaseId)}]), + ok = gen_server:call(?MODULE, {rep_error, RepId, Error}, infinity) + end. + + init(_) -> process_flag(trap_exit, true), - _ = ets:new(?DOC_ID_TO_REP_ID, [named_table, set, protected]), - _ = ets:new(?REP_ID_TO_DOC_ID, [named_table, set, private]), + ?DOC_TO_REP = ets:new(?DOC_TO_REP, [named_table, set, protected]), + ?REP_TO_STATE = ets:new(?REP_TO_STATE, [named_table, set, protected]), Server = self(), ok = couch_config:register( fun("replicator", "db", NewName) -> ok = gen_server:cast(Server, {rep_db_changed, ?l2b(NewName)}); - ("replicator", "max_replication_retry_count", NewMaxRetries1) -> - NewMaxRetries = list_to_integer(NewMaxRetries1), - ok = gen_server:cast(Server, {set_max_retries, NewMaxRetries}) + ("replicator", "max_replication_retry_count", V) -> + ok = gen_server:cast(Server, {set_max_retries, retries_value(V)}) end ), {Loop, RepDbName} = changes_feed_loop(), @@ -58,7 +113,7 @@ init(_) -> changes_feed_loop = Loop, rep_db_name = RepDbName, db_notifier = db_update_notifier(), - max_retries = list_to_integer( + max_retries = retries_value( couch_config:get("replicator", "max_replication_retry_count", "10")) }}. @@ -68,32 +123,35 @@ handle_call({rep_db_update, {ChangeProps} = Change}, _From, State) -> process_update(State, Change) catch _Tag:Error -> - JsonRepDoc = get_value(doc, ChangeProps), - rep_db_update_error(Error, JsonRepDoc), + {RepProps} = get_value(doc, ChangeProps), + DocId = get_value(<<"_id">>, RepProps), + rep_db_update_error(Error, DocId), State end, {reply, ok, NewState}; -handle_call({triggered, {BaseId, _}}, _From, State) -> - [{BaseId, {DocId, true}}] = ets:lookup(?REP_ID_TO_DOC_ID, BaseId), - true = ets:insert(?REP_ID_TO_DOC_ID, {BaseId, {DocId, false}}), +handle_call({rep_started, RepId}, _From, State) -> + case rep_state(RepId) of + nil -> + ok; + RepState -> + NewRepState = RepState#rep_state{ + starting = false, + retries_left = State#state.max_retries, + max_retries = State#state.max_retries, + wait = ?INITIAL_WAIT + }, + true = ets:insert(?REP_TO_STATE, {RepId, NewRepState}) + end, {reply, ok, State}; -handle_call({restart_failure, {Props} = RepDoc, Error}, _From, State) -> - DocId = get_value(<<"_id">>, Props), - [{DocId, {{BaseId, _} = RepId, MaxRetries}}] = ets:lookup( - ?DOC_ID_TO_REP_ID, DocId), - ?LOG_ERROR("Failed to start replication `~s` after ~p attempts using " - "the document `~s`. Last error reason was: ~p", - [pp_rep_id(RepId), MaxRetries, DocId, Error]), - couch_rep:update_rep_doc( - RepDoc, - [{<<"_replication_state">>, <<"error">>}, - {<<"_replication_id">>, ?l2b(BaseId)}]), - true = ets:delete(?REP_ID_TO_DOC_ID, BaseId), - true = ets:delete(?DOC_ID_TO_REP_ID, DocId), +handle_call({rep_complete, RepId}, _From, State) -> + true = ets:delete(?REP_TO_STATE, RepId), {reply, ok, State}; +handle_call({rep_error, RepId, Error}, _From, State) -> + {reply, ok, replication_error(State, RepId, Error)}; + handle_call(Msg, From, State) -> ?LOG_ERROR("Replication manager received unexpected call ~p from ~p", [Msg, From]), @@ -150,8 +208,8 @@ terminate(_Reason, State) -> catch exit(Pid, stop) end, [Loop | StartPids]), - true = ets:delete(?REP_ID_TO_DOC_ID), - true = ets:delete(?DOC_ID_TO_REP_ID), + true = ets:delete(?REP_TO_STATE), + true = ets:delete(?DOC_TO_REP), couch_db_update_notifier:stop(Notifier). @@ -160,7 +218,7 @@ code_change(_OldVsn, State, _Extra) -> changes_feed_loop() -> - {ok, RepDb} = couch_rep:ensure_rep_db_exists(), + {ok, RepDb} = ensure_rep_db_exists(), Server = self(), Pid = spawn_link( fun() -> @@ -245,21 +303,20 @@ process_update(State, {Change}) -> State; false -> case get_value(<<"_replication_state">>, RepProps) of + undefined -> + maybe_start_replication(State, DocId, JsonRepDoc); + <<"triggered">> -> + maybe_start_replication(State, DocId, JsonRepDoc); <<"completed">> -> replication_complete(DocId), State; - <<"error">> -> - stop_replication(DocId), - State; - <<"triggered">> -> - maybe_start_replication(State, DocId, JsonRepDoc); - undefined -> - maybe_start_replication(State, DocId, JsonRepDoc) + _ -> + State end end. -rep_db_update_error(Error, {Props} = JsonRepDoc) -> +rep_db_update_error(Error, DocId) -> case Error of {bad_rep_doc, Reason} -> ok; @@ -267,9 +324,8 @@ rep_db_update_error(Error, {Props} = JsonRepDoc) -> Reason = to_binary(Error) end, ?LOG_ERROR("Replication manager, error processing document `~s`: ~s", - [get_value(<<"_id">>, Props), Reason]), - couch_rep:update_rep_doc( - JsonRepDoc, [{<<"_replication_state">>, <<"error">>}]). + [DocId, Reason]), + update_rep_doc(DocId, [{<<"_replication_state">>, <<"error">>}]). rep_user_ctx({RepDoc}) -> @@ -284,30 +340,39 @@ rep_user_ctx({RepDoc}) -> end. -maybe_start_replication(#state{max_retries = MaxRetries} = State, - DocId, JsonRepDoc) -> - UserCtx = rep_user_ctx(JsonRepDoc), - {BaseId, _} = RepId = make_rep_id(JsonRepDoc, UserCtx), - case ets:lookup(?REP_ID_TO_DOC_ID, BaseId) of - [] -> - true = ets:insert(?REP_ID_TO_DOC_ID, {BaseId, {DocId, true}}), - true = ets:insert(?DOC_ID_TO_REP_ID, {DocId, {RepId, MaxRetries}}), +maybe_start_replication(State, DocId, RepDoc) -> + UserCtx = rep_user_ctx(RepDoc), + {BaseId, _} = RepId = make_rep_id(RepDoc, UserCtx), + case rep_state(RepId) of + nil -> + RepState = #rep_state{ + doc_id = DocId, + user_ctx = UserCtx, + doc = RepDoc, + starting = true, + retries_left = State#state.max_retries, + max_retries = State#state.max_retries + }, + true = ets:insert(?REP_TO_STATE, {RepId, RepState}), + true = ets:insert(?DOC_TO_REP, {DocId, RepId}), + ?LOG_INFO("Attempting to start replication `~s` (document `~s`).", + [pp_rep_id(RepId), DocId]), Server = self(), Pid = spawn_link(fun() -> - start_replication(Server, JsonRepDoc, RepId, UserCtx, MaxRetries) + start_replication(Server, RepDoc, RepId, UserCtx, 0) end), State#state{rep_start_pids = [Pid | State#state.rep_start_pids]}; - [{BaseId, {DocId, _}}] -> + #rep_state{doc_id = DocId} -> State; - [{BaseId, {OtherDocId, false}}] -> + #rep_state{starting = false, doc_id = OtherDocId} -> ?LOG_INFO("The replication specified by the document `~s` was already" " triggered by the document `~s`", [DocId, OtherDocId]), - maybe_tag_rep_doc(JsonRepDoc, ?l2b(BaseId)), + maybe_tag_rep_doc(DocId, RepDoc, ?l2b(BaseId)), State; - [{BaseId, {OtherDocId, true}}] -> + #rep_state{starting = true, doc_id = OtherDocId} -> ?LOG_INFO("The replication specified by the document `~s` is already" " being triggered by the document `~s`", [DocId, OtherDocId]), - maybe_tag_rep_doc(JsonRepDoc, ?l2b(BaseId)), + maybe_tag_rep_doc(DocId, RepDoc, ?l2b(BaseId)), State end. @@ -323,98 +388,233 @@ make_rep_id(RepDoc, UserCtx) -> end. -maybe_tag_rep_doc({Props} = JsonRepDoc, RepId) -> - case get_value(<<"_replication_id">>, Props) of +maybe_tag_rep_doc(DocId, {RepProps}, RepId) -> + case get_value(<<"_replication_id">>, RepProps) of RepId -> ok; _ -> - couch_rep:update_rep_doc(JsonRepDoc, [{<<"_replication_id">>, RepId}]) + update_rep_doc(DocId, [{<<"_replication_id">>, RepId}]) end. -start_replication(Server, {RepProps} = RepDoc, RepId, UserCtx, MaxRetries) -> +start_replication(Server, RepDoc, RepId, UserCtx, Wait) -> + ok = timer:sleep(Wait * 1000), case (catch couch_rep:start_replication(RepDoc, RepId, UserCtx)) of Pid when is_pid(Pid) -> - ?LOG_INFO("Document `~s` triggered replication `~s`", - [get_value(<<"_id">>, RepProps), pp_rep_id(RepId)]), - ok = gen_server:call(Server, {triggered, RepId}, infinity), + ok = gen_server:call(Server, {rep_started, RepId}, infinity), couch_rep:get_result(Pid, RepId, RepDoc, UserCtx); Error -> - couch_rep:update_rep_doc( - RepDoc, - [{<<"_replication_state">>, <<"error">>}, - {<<"_replication_id">>, ?l2b(element(1, RepId))}]), - keep_retrying( - Server, RepId, RepDoc, UserCtx, Error, ?INITIAL_WAIT, MaxRetries) + replication_error(RepId, Error) end. -keep_retrying(Server, _RepId, RepDoc, _UserCtx, Error, _Wait, 0) -> - ok = gen_server:call(Server, {restart_failure, RepDoc, Error}, infinity); - -keep_retrying(Server, RepId, RepDoc, UserCtx, Error, Wait, RetriesLeft) -> - {RepProps} = RepDoc, - DocId = get_value(<<"_id">>, RepProps), - ?LOG_ERROR("Error starting replication `~s` (document `~s`): ~p. " - "Retrying in ~p seconds", [pp_rep_id(RepId), DocId, Error, Wait]), - ok = timer:sleep(Wait * 1000), - case (catch couch_rep:start_replication(RepDoc, RepId, UserCtx)) of - Pid when is_pid(Pid) -> - ok = gen_server:call(Server, {triggered, RepId}, infinity), - [{DocId, {RepId, MaxRetries}}] = ets:lookup(?DOC_ID_TO_REP_ID, DocId), - ?LOG_INFO("Document `~s` triggered replication `~s` after ~p attempts", - [DocId, pp_rep_id(RepId), MaxRetries - RetriesLeft + 1]), - couch_rep:get_result(Pid, RepId, RepDoc, UserCtx); - NewError -> - keep_retrying( - Server, RepId, RepDoc, UserCtx, NewError, Wait * 2, RetriesLeft - 1) +replication_complete(DocId) -> + case ets:lookup(?DOC_TO_REP, DocId) of + [{DocId, RepId}] -> + case rep_state(RepId) of + nil -> + couch_rep:end_replication(RepId); + #rep_state{} -> + ok + end, + true = ets:delete(?DOC_TO_REP, DocId); + _ -> + ok end. rep_doc_deleted(DocId) -> - case stop_replication(DocId) of - {ok, RepId} -> + case ets:lookup(?DOC_TO_REP, DocId) of + [{DocId, RepId}] -> + couch_rep:end_replication(RepId), + true = ets:delete(?REP_TO_STATE, RepId), + true = ets:delete(?DOC_TO_REP, DocId), ?LOG_INFO("Stopped replication `~s` because replication document `~s`" " was deleted", [pp_rep_id(RepId), DocId]); - none -> + [] -> ok end. -replication_complete(DocId) -> - case stop_replication(DocId) of - {ok, RepId} -> - ?LOG_INFO("Replication `~s` finished (triggered by document `~s`)", - [pp_rep_id(RepId), DocId]); - none -> - ok +replication_error(State, RepId, Error) -> + case rep_state(RepId) of + nil -> + State; + RepState -> + maybe_retry_replication(RepId, RepState, Error, State) end. - -stop_replication(DocId) -> - case ets:lookup(?DOC_ID_TO_REP_ID, DocId) of - [{DocId, {{BaseId, _} = RepId, _MaxRetries}}] -> - couch_rep:end_replication(RepId), - true = ets:delete(?REP_ID_TO_DOC_ID, BaseId), - true = ets:delete(?DOC_ID_TO_REP_ID, DocId), - {ok, RepId}; - [] -> - none - end. +maybe_retry_replication(RepId, #rep_state{retries_left = 0} = RepState, Error, State) -> + #rep_state{ + doc_id = DocId, + max_retries = MaxRetries + } = RepState, + couch_rep:end_replication(RepId), + true = ets:delete(?REP_TO_STATE, RepId), + true = ets:delete(?DOC_TO_REP, DocId), + ?LOG_ERROR("Error in replication `~s` (triggered by document `~s`): ~s" + "~nReached maximum retry attempts (~p).", + [pp_rep_id(RepId), DocId, to_binary(error_reason(Error)), MaxRetries]), + State; + +maybe_retry_replication(RepId, RepState, Error, State) -> + #rep_state{ + doc_id = DocId, + user_ctx = UserCtx, + doc = RepDoc + } = RepState, + #rep_state{wait = Wait} = NewRepState = state_after_error(RepState), + true = ets:insert(?REP_TO_STATE, {RepId, NewRepState}), + ?LOG_ERROR("Error in replication `~s` (triggered by document `~s`): ~s" + "~nRestarting replication in ~p seconds.", + [pp_rep_id(RepId), DocId, to_binary(error_reason(Error)), Wait]), + Server = self(), + Pid = spawn_link(fun() -> + start_replication(Server, RepDoc, RepId, UserCtx, Wait) + end), + State#state{rep_start_pids = [Pid | State#state.rep_start_pids]}. stop_all_replications() -> ?LOG_INFO("Stopping all ongoing replications because the replicator" " database was deleted or changed", []), ets:foldl( - fun({_, {RepId, _}}, _) -> + fun({_, RepId}, _) -> couch_rep:end_replication(RepId) end, - ok, ?DOC_ID_TO_REP_ID), - true = ets:delete_all_objects(?REP_ID_TO_DOC_ID), - true = ets:delete_all_objects(?DOC_ID_TO_REP_ID). + ok, ?DOC_TO_REP), + true = ets:delete_all_objects(?REP_TO_STATE), + true = ets:delete_all_objects(?DOC_TO_REP). + + +update_rep_doc(RepDocId, KVs) -> + {ok, RepDb} = ensure_rep_db_exists(), + try + case couch_db:open_doc(RepDb, RepDocId, []) of + {ok, LatestRepDoc} -> + update_rep_doc(RepDb, LatestRepDoc, KVs); + _ -> + ok + end + catch throw:conflict -> + % Shouldn't happen, as by default only the role _replicator can + % update replication documents. + ?LOG_ERROR("Conflict error when updating replication document `~s`." + " Retrying.", [RepDocId]), + ok = timer:sleep(5), + update_rep_doc(RepDocId, KVs) + after + couch_db:close(RepDb) + end. + +update_rep_doc(RepDb, #doc{body = {RepDocBody}} = RepDoc, KVs) -> + NewRepDocBody = lists:foldl( + fun({<<"_replication_state">> = K, State} = KV, Body) -> + case get_value(K, Body) of + State -> + Body; + _ -> + Body1 = lists:keystore(K, 1, Body, KV), + lists:keystore( + <<"_replication_state_time">>, 1, Body1, + {<<"_replication_state_time">>, timestamp()}) + end; + ({K, _V} = KV, Body) -> + lists:keystore(K, 1, Body, KV) + end, + RepDocBody, KVs), + case NewRepDocBody of + RepDocBody -> + ok; + _ -> + % Might not succeed - when the replication doc is deleted right + % before this update (not an error, ignore). + couch_db:update_doc(RepDb, RepDoc#doc{body = {NewRepDocBody}}, []) + end. + + +% RFC3339 timestamps. +% Note: doesn't include the time seconds fraction (RFC3339 says it's optional). +timestamp() -> + {{Year, Month, Day}, {Hour, Min, Sec}} = calendar:now_to_local_time(now()), + UTime = erlang:universaltime(), + LocalTime = calendar:universal_time_to_local_time(UTime), + DiffSecs = calendar:datetime_to_gregorian_seconds(LocalTime) - + calendar:datetime_to_gregorian_seconds(UTime), + zone(DiffSecs div 3600, (DiffSecs rem 3600) div 60), + iolist_to_binary( + io_lib:format("~4..0w-~2..0w-~2..0wT~2..0w:~2..0w:~2..0w~s", + [Year, Month, Day, Hour, Min, Sec, + zone(DiffSecs div 3600, (DiffSecs rem 3600) div 60)])). + +zone(Hr, Min) when Hr >= 0, Min >= 0 -> + io_lib:format("+~2..0w:~2..0w", [Hr, Min]); +zone(Hr, Min) -> + io_lib:format("-~2..0w:~2..0w", [abs(Hr), abs(Min)]). + + +ensure_rep_db_exists() -> + DbName = ?l2b(couch_config:get("replicator", "db", "_replicator")), + Opts = [ + {user_ctx, #user_ctx{roles=[<<"_admin">>, <<"_replicator">>]}}, + sys_db + ], + case couch_db:open(DbName, Opts) of + {ok, Db} -> + Db; + _Error -> + {ok, Db} = couch_db:create(DbName, Opts) + end, + ok = ensure_rep_ddoc_exists(Db, <<"_design/_replicator">>), + {ok, Db}. + + +ensure_rep_ddoc_exists(RepDb, DDocID) -> + case couch_db:open_doc(RepDb, DDocID, []) of + {ok, _Doc} -> + ok; + _ -> + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DDocID}, + {<<"language">>, <<"javascript">>}, + {<<"validate_doc_update">>, ?REP_DB_DOC_VALIDATE_FUN} + ]}), + {ok, _Rev} = couch_db:update_doc(RepDb, DDoc, []) + end, + ok. % pretty-print replication id pp_rep_id({Base, Extension}) -> Base ++ Extension. + + +rep_state(RepId) -> + case ets:lookup(?REP_TO_STATE, RepId) of + [{RepId, RepState}] -> + RepState; + [] -> + nil + end. + + +error_reason({error, Reason}) -> + Reason; +error_reason(Reason) -> + Reason. + + +retries_value("infinity") -> + infinity; +retries_value(Value) -> + list_to_integer(Value). + + +state_after_error(#rep_state{retries_left = Left, wait = Wait} = State) -> + Wait2 = erlang:min(trunc(Wait * 2), ?MAX_WAIT), + case Left of + infinity -> + State#rep_state{wait = Wait2}; + _ -> + State#rep_state{retries_left = Left - 1, wait = Wait2} + end. -- cgit v1.2.3 From 7369394c77ac91b0edc0f885a6f96f5d9a27743e Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sat, 2 Jul 2011 18:47:54 +0000 Subject: Merge revision 1142259 from trunk Replication manager, ignore db monitor messages Ignore local database monitor messages. These messages are received in some circunstances because replication processes spawn local database monitors but never do the corresponding erlang:demonitor/1,2 calls. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1142260 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_replication_manager.erl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/couchdb/couch_replication_manager.erl b/src/couchdb/couch_replication_manager.erl index 7e2c8118..943cafa8 100644 --- a/src/couchdb/couch_replication_manager.erl +++ b/src/couchdb/couch_replication_manager.erl @@ -190,6 +190,10 @@ handle_info({'EXIT', From, normal}, #state{rep_start_pids = Pids} = State) -> % one of the replication start processes terminated successfully {noreply, State#state{rep_start_pids = Pids -- [From]}}; +handle_info({'DOWN', _Ref, _, _, _}, State) -> + % From a db monitor created by a replication process. Ignore. + {noreply, State}; + handle_info(Msg, State) -> ?LOG_ERROR("Replication manager received unexpected message ~p", [Msg]), {stop, {unexpected_msg, Msg}, State}. -- cgit v1.2.3 From 5caca0fdf68e8b345544ff74591df51da691de5e Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sat, 2 Jul 2011 18:52:14 +0000 Subject: Merged revision 1142262 from trunk On server startup, restart replications in error If we setup a continuous replication which goes into an error state and restart Couch just before the replication is retried (before it transitions to the triggered state), the user has to manually restart the replication (recreating the document or deleting its _replication_state field). git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1142263 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_replication_manager.erl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/couchdb/couch_replication_manager.erl b/src/couchdb/couch_replication_manager.erl index 943cafa8..b3fc3e3c 100644 --- a/src/couchdb/couch_replication_manager.erl +++ b/src/couchdb/couch_replication_manager.erl @@ -314,8 +314,13 @@ process_update(State, {Change}) -> <<"completed">> -> replication_complete(DocId), State; - _ -> - State + <<"error">> -> + case ets:lookup(?DOC_TO_REP, DocId) of + [] -> + maybe_start_replication(State, DocId, JsonRepDoc); + _ -> + State + end end end. -- cgit v1.2.3 From 6ef446ab98848d1b2b42785a1826567627af8e7a Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sun, 3 Jul 2011 09:58:54 +0000 Subject: Remove warning about unused variable git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1142410 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_replication_manager.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/couchdb/couch_replication_manager.erl b/src/couchdb/couch_replication_manager.erl index b3fc3e3c..b3c6db11 100644 --- a/src/couchdb/couch_replication_manager.erl +++ b/src/couchdb/couch_replication_manager.erl @@ -75,7 +75,7 @@ replication_completed(RepId) -> case rep_state(RepId) of nil -> ok; - #rep_state{doc_id = DocId} = St -> + #rep_state{doc_id = DocId} -> update_rep_doc(DocId, [{<<"_replication_state">>, <<"completed">>}]), ok = gen_server:call(?MODULE, {rep_complete, RepId}, infinity), ?LOG_INFO("Replication `~s` finished (triggered by document `~s`)", -- cgit v1.2.3 From 5fa79a85bcb2eea059fd948e89433965f8160b1f Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Mon, 4 Jul 2011 14:35:13 +0000 Subject: Backport r1142685 from trunk: Allow "/" as vhost target. Includes a one-line whitespace fix. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1142689 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_httpd_vhost.erl | 8 ++++++-- test/etap/160-vhosts.t | 14 +++++++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/couchdb/couch_httpd_vhost.erl b/src/couchdb/couch_httpd_vhost.erl index 9bfb5951..a9517600 100644 --- a/src/couchdb/couch_httpd_vhost.erl +++ b/src/couchdb/couch_httpd_vhost.erl @@ -216,15 +216,19 @@ code_change(_OldVsn, State, _Extra) -> {ok, State}. +append_path("/"=_Target, "/"=_Path) -> + "/"; +append_path(Target, Path) -> + Target ++ Path. % default redirect vhost handler redirect_to_vhost(MochiReq, VhostTarget) -> Path = MochiReq:get(raw_path), - Target = VhostTarget ++ Path, + Target = append_path(VhostTarget, Path), ?LOG_DEBUG("Vhost Target: '~p'~n", [Target]), - + Headers = mochiweb_headers:enter("x-couchdb-vhost-path", Path, MochiReq:get(headers)), diff --git a/test/etap/160-vhosts.t b/test/etap/160-vhosts.t index 8dac53e5..4bb8e926 100755 --- a/test/etap/160-vhosts.t +++ b/test/etap/160-vhosts.t @@ -52,7 +52,7 @@ admin_user_ctx() -> {user_ctx, #user_ctx{roles=[<<"_admin">>]}}. main(_) -> test_util:init_code_path(), - etap:plan(14), + etap:plan(15), case (catch test()) of ok -> etap:end_tests(); @@ -115,6 +115,7 @@ test() -> ok = couch_config:set("vhosts", "*.example2.com/test", "/*", false), ok = couch_config:set("vhosts", "*/test1", "/etap-test-db/_design/doc1/_show/test", false), + ok = couch_config:set("vhosts", "example3.com", "/", false), % let couch_httpd restart timer:sleep(100), @@ -133,6 +134,7 @@ test() -> test_vhost_request_path1(), test_vhost_request_path2(), test_vhost_request_path3(), + test_vhost_request_to_root(), %% restart boilerplate couch_db:close(Db), @@ -289,3 +291,13 @@ test_vhost_request_path3() -> end, true, <<"path in req ok">>); _Else -> etap:is(false, true, <<"ibrowse fail">>) end. + +test_vhost_request_to_root() -> + Uri = server(), + case ibrowse:send_req(Uri, [], get, [], []) of + {ok, _, _, Body} -> + {JsonBody} = couch_util:json_decode(Body), + HasCouchDBWelcome = proplists:is_defined(<<"couchdb">>, JsonBody), + etap:is(HasCouchDBWelcome, true, "should allow redirect to /"); + _Else -> etap:is(false, true, <<"ibrowse fail">>) + end. -- cgit v1.2.3 From 8d53b7d926c9169c914f6421e681842529d9ac3f Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Wed, 6 Jul 2011 12:27:15 +0000 Subject: Merge revision 1143375 from trunk Redefine logging macros With these macro definitions we don't evaluate the arguments if the corresponding log level is not enabled. This behaviour was accidently removed by the patch from COUCHDB-1054. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1143376 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db.hrl | 16 ++++++++++++++-- src/couchdb/couch_log.erl | 18 ++++-------------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/src/couchdb/couch_db.hrl b/src/couchdb/couch_db.hrl index 31318782..fefbb8ef 100644 --- a/src/couchdb/couch_db.hrl +++ b/src/couchdb/couch_db.hrl @@ -25,8 +25,20 @@ -define(DEFAULT_ATTACHMENT_CONTENT_TYPE, <<"application/octet-stream">>). --define(LOG_DEBUG(Format, Args), couch_log:debug(Format, Args)). --define(LOG_INFO(Format, Args), couch_log:info(Format, Args)). +-define(LOG_DEBUG(Format, Args), + case couch_log:debug_on() of + true -> + couch_log:debug(Format, Args); + false -> ok + end). + +-define(LOG_INFO(Format, Args), + case couch_log:info_on() of + true -> + couch_log:info(Format, Args); + false -> ok + end). + -define(LOG_ERROR(Format, Args), couch_log:error(Format, Args)). -record(rev_info, diff --git a/src/couchdb/couch_log.erl b/src/couchdb/couch_log.erl index b3d3297c..67549bcc 100644 --- a/src/couchdb/couch_log.erl +++ b/src/couchdb/couch_log.erl @@ -25,22 +25,12 @@ -define(LEVEL_TMI, 0). debug(Format, Args) -> - case debug_on() of - false -> - ok; - true -> - {ConsoleMsg, FileMsg} = get_log_messages(self(), debug, Format, Args), - gen_event:sync_notify(error_logger, {couch_debug, ConsoleMsg, FileMsg}) - end. + {ConsoleMsg, FileMsg} = get_log_messages(self(), debug, Format, Args), + gen_event:sync_notify(error_logger, {couch_debug, ConsoleMsg, FileMsg}). info(Format, Args) -> - case info_on() of - false -> - ok; - true -> - {ConsoleMsg, FileMsg} = get_log_messages(self(), info, Format, Args), - gen_event:sync_notify(error_logger, {couch_info, ConsoleMsg, FileMsg}) - end. + {ConsoleMsg, FileMsg} = get_log_messages(self(), info, Format, Args), + gen_event:sync_notify(error_logger, {couch_info, ConsoleMsg, FileMsg}). error(Format, Args) -> {ConsoleMsg, FileMsg} = get_log_messages(self(), error, Format, Args), -- cgit v1.2.3 From 9e4abecfc2d26dc9ec109c24d97ee03c111e1d30 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Mon, 25 Jul 2011 21:25:40 +0000 Subject: assert that calls to file functions actually succeed. 1) couch_file:sync could leave open fd's if close failed. Now we'll get a trace. 2) couch_file:append_term failing would be bad, so let's test that too. backported from trunk r1150915 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1150918 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db_updater.erl | 6 +++--- src/couchdb/couch_file.erl | 2 +- src/couchdb/couch_server_sup.erl | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl index 2b317d95..714a337f 100644 --- a/src/couchdb/couch_db_updater.erl +++ b/src/couchdb/couch_db_updater.erl @@ -49,7 +49,7 @@ init({MainPid, DbName, Filepath, Fd, Options}) -> terminate(_Reason, Db) -> - couch_file:close(Db#db.fd), + ok = couch_file:close(Db#db.fd), couch_util:shutdown_sync(Db#db.compactor_pid), couch_util:shutdown_sync(Db#db.fd_ref_counter), ok. @@ -484,9 +484,9 @@ flush_trees(#db{fd=Fd,header=Header}=Db, {ok, NewSummaryPointer} = case Header#db_header.disk_version < 4 of true -> - couch_file:append_term(Fd, {Doc#doc.body, DiskAtts}); + {ok, _} = couch_file:append_term(Fd, {Doc#doc.body, DiskAtts}); false -> - couch_file:append_term_md5(Fd, {Doc#doc.body, DiskAtts}) + {ok, _} = couch_file:append_term_md5(Fd, {Doc#doc.body, DiskAtts}) end, {IsDeleted, NewSummaryPointer, UpdateSeq}; _ -> diff --git a/src/couchdb/couch_file.erl b/src/couchdb/couch_file.erl index 7b677034..07ec18dc 100644 --- a/src/couchdb/couch_file.erl +++ b/src/couchdb/couch_file.erl @@ -164,7 +164,7 @@ truncate(Fd, Pos) -> sync(Filepath) when is_list(Filepath) -> {ok, Fd} = file:open(Filepath, [append, raw]), - try file:sync(Fd) after file:close(Fd) end; + try ok = file:sync(Fd) after ok = file:close(Fd) end; sync(Fd) -> gen_server:call(Fd, sync, infinity). diff --git a/src/couchdb/couch_server_sup.erl b/src/couchdb/couch_server_sup.erl index fafd83ed..dd210381 100644 --- a/src/couchdb/couch_server_sup.erl +++ b/src/couchdb/couch_server_sup.erl @@ -135,7 +135,7 @@ start_server(IniFiles) -> undefined -> []; Uri -> io_lib:format("~s~n", [Uri]) end end || Uri <- Uris], - file:write_file(UriFile, Lines) + ok = file:write_file(UriFile, Lines) end, {ok, Pid}. -- cgit v1.2.3 From 6f43183770b858906e6f41e59f65a0513615479d Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Thu, 28 Jul 2011 19:14:08 +0000 Subject: revert redundant pattern match. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1151976 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db_updater.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl index 714a337f..6493a3b3 100644 --- a/src/couchdb/couch_db_updater.erl +++ b/src/couchdb/couch_db_updater.erl @@ -484,9 +484,9 @@ flush_trees(#db{fd=Fd,header=Header}=Db, {ok, NewSummaryPointer} = case Header#db_header.disk_version < 4 of true -> - {ok, _} = couch_file:append_term(Fd, {Doc#doc.body, DiskAtts}); + couch_file:append_term(Fd, {Doc#doc.body, DiskAtts}); false -> - {ok, _} = couch_file:append_term_md5(Fd, {Doc#doc.body, DiskAtts}) + couch_file:append_term_md5(Fd, {Doc#doc.body, DiskAtts}) end, {IsDeleted, NewSummaryPointer, UpdateSeq}; _ -> -- cgit v1.2.3 From daa640b113907bb28fc9b26e38849f239d353363 Mon Sep 17 00:00:00 2001 From: Randall Leeds Date: Sat, 30 Jul 2011 00:37:28 +0000 Subject: Backport of r1152397 from trunk Call traversal handlers in btree folds Previously the fold function was only invoked for leafs. With this change it's possible to define a fold function which is called for inner nodes and can return a tuple {skip, Acc} in order to skip exploration of the branch. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1152405 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_btree.erl | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/src/couchdb/couch_btree.erl b/src/couchdb/couch_btree.erl index f8c126f3..91bc8f1b 100644 --- a/src/couchdb/couch_btree.erl +++ b/src/couchdb/couch_btree.erl @@ -105,9 +105,17 @@ full_reduce(#btree{root={_P, Red}}) -> % wraps a 2 arity function with the proper 3 arity function convert_fun_arity(Fun) when is_function(Fun, 2) -> - fun(KV, _Reds, AccIn) -> Fun(KV, AccIn) end; + fun + (visit, KV, _Reds, AccIn) -> Fun(KV, AccIn); + (traverse, _K, _Red, AccIn) -> {ok, AccIn} + end; convert_fun_arity(Fun) when is_function(Fun, 3) -> - Fun. % Already arity 3 + fun + (visit, KV, Reds, AccIn) -> Fun(KV, Reds, AccIn); + (traverse, _K, _Red, AccIn) -> {ok, AccIn} + end; +convert_fun_arity(Fun) when is_function(Fun, 4) -> + Fun. % Already arity 4 make_key_in_end_range_function(#btree{less=Less}, fwd, Options) -> case couch_util:get_value(end_key_gt, Options) of @@ -604,12 +612,17 @@ stream_node(Bt, Reds, {Pointer, _Reds}, InRange, Dir, Fun, Acc) -> stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) -> {ok, Acc}; -stream_kp_node(Bt, Reds, [{_Key, {Pointer, Red}} | Rest], InRange, Dir, Fun, Acc) -> - case stream_node(Bt, Reds, {Pointer, Red}, InRange, Dir, Fun, Acc) of +stream_kp_node(Bt, Reds, [{Key, {Pointer, Red}} | Rest], InRange, Dir, Fun, Acc) -> + case Fun(traverse, Key, Red, Acc) of {ok, Acc2} -> - stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2); - {stop, LastReds, Acc2} -> - {stop, LastReds, Acc2} + case stream_node(Bt, Reds, {Pointer, Red}, InRange, Dir, Fun, Acc2) of + {ok, Acc3} -> + stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3); + {stop, LastReds, Acc3} -> + {stop, LastReds, Acc3} + end; + {skip, Acc2} -> + stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2) end. drop_nodes(_Bt, Reds, _StartKey, []) -> @@ -670,7 +683,7 @@ stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) -> {stop, {PrevKVs, Reds}, Acc}; true -> AssembledKV = assemble(Bt, K, V), - case Fun(AssembledKV, {PrevKVs, Reds}, Acc) of + case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of {ok, Acc2} -> stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2); {stop, Acc2} -> -- cgit v1.2.3 From 2a92b4b41f261a8ddca496d729a39bbd20c41320 Mon Sep 17 00:00:00 2001 From: Randall Leeds Date: Sat, 30 Jul 2011 00:37:32 +0000 Subject: Backport of r1152398 from trunk Fix COUCHDB-1076 - views skip empty btree branches git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1152406 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db.erl | 6 ++++-- src/couchdb/couch_view.erl | 12 ++++++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index 85f7e291..3a12e738 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -271,7 +271,8 @@ get_db_info(Db) -> {ok, InfoList}. get_design_docs(#db{fulldocinfo_by_id_btree=Btree}=Db) -> - {ok,_, Docs} = couch_btree:fold(Btree, + {ok, _, Docs} = couch_view:fold( + #view{btree=Btree}, fun(#full_doc_info{id= <<"_design/",_/binary>>}=FullDocInfo, _Reds, AccDocs) -> {ok, Doc} = couch_db:open_doc_int(Db, FullDocInfo, []), {ok, [Doc | AccDocs]}; @@ -976,7 +977,8 @@ enum_docs_since(Db, SinceSeq, InFun, Acc, Options) -> {ok, enum_docs_since_reduce_to_count(LastReduction), AccOut}. enum_docs(Db, InFun, InAcc, Options) -> - {ok, LastReduce, OutAcc} = couch_btree:fold(Db#db.fulldocinfo_by_id_btree, InFun, InAcc, Options), + {ok, LastReduce, OutAcc} = couch_view:fold( + #view{btree=Db#db.fulldocinfo_by_id_btree}, InFun, InAcc, Options), {ok, enum_docs_reduce_to_count(LastReduce), OutAcc}. % server functions diff --git a/src/couchdb/couch_view.erl b/src/couchdb/couch_view.erl index c9a9b2cc..88cadec7 100644 --- a/src/couchdb/couch_view.erl +++ b/src/couchdb/couch_view.erl @@ -252,8 +252,16 @@ fold_fun(Fun, [KV|Rest], {KVReds, Reds}, Acc) -> fold(#view{btree=Btree}, Fun, Acc, Options) -> WrapperFun = - fun(KV, Reds, Acc2) -> - fold_fun(Fun, expand_dups([KV],[]), Reds, Acc2) + fun(visit, KV, Reds, Acc2) -> + fold_fun(Fun, expand_dups([KV],[]), Reds, Acc2); + (traverse, LK, Red, Acc2) + when is_function(Fun, 4) -> + Fun(traverse, LK, Red, Acc2); + (traverse, _LK, Red, {_, Skip, _, _} = Acc2) + when Skip >= element(1, Red) -> + {skip, setelement(2, Acc2, Skip - element(1, Red))}; + (traverse, _, _, Acc2) -> + {ok, Acc2} end, {ok, _LastReduce, _AccResult} = couch_btree:fold(Btree, WrapperFun, Acc, Options). -- cgit v1.2.3 From 916f33c309b53f981400e39a0d65281fd03c6754 Mon Sep 17 00:00:00 2001 From: Randall Leeds Date: Sat, 30 Jul 2011 00:37:37 +0000 Subject: Backport of r1152399 from trunk add an etap test for btree traversal callbacks git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1152407 13f79535-47bb-0310-9956-ffa450edef68 --- test/etap/020-btree-basics.t | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/test/etap/020-btree-basics.t b/test/etap/020-btree-basics.t index 18c4a836..8ab453ee 100755 --- a/test/etap/020-btree-basics.t +++ b/test/etap/020-btree-basics.t @@ -127,6 +127,7 @@ test_btree(Btree, KeyValues) -> ok = test_key_access(Btree, KeyValues), ok = test_lookup_access(Btree, KeyValues), ok = test_final_reductions(Btree, KeyValues), + ok = test_traversal_callbacks(Btree, KeyValues), true. test_add_remove(Btree, OutKeyValues, RemainingKeyValues) -> @@ -187,6 +188,18 @@ test_final_reductions(Btree, KeyValues) -> KVLen = FoldLRed + FoldRRed, ok. +test_traversal_callbacks(Btree, KeyValues) -> + FoldFun = + fun + (visit, GroupedKey, Unreduced, Acc) -> + {ok, Acc andalso false}; + (traverse, _LK, _Red, Acc) -> + {skip, Acc andalso true} + end, + % With 250 items the root is a kp. Always skipping should reduce to true. + {ok, _, true} = couch_btree:fold(Btree, FoldFun, true, [{dir, fwd}]), + ok. + shuffle(List) -> randomize(round(math:log(length(List)) + 0.5), List). -- cgit v1.2.3 From 5385dee622f39df1110bdca76e2a1b1c3bd5540c Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Wed, 10 Aug 2011 20:23:52 +0000 Subject: COUCHDB-1244 - close file descriptor in couch_log:read/2 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1156351 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_log.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/couchdb/couch_log.erl b/src/couchdb/couch_log.erl index 67549bcc..bb75f161 100644 --- a/src/couchdb/couch_log.erl +++ b/src/couchdb/couch_log.erl @@ -180,4 +180,5 @@ read(Bytes, Offset) -> % TODO: make streaming {ok, Chunk} = file:pread(Fd, Start, LogFileSize), + ok = file:close(Fd), Chunk. -- cgit v1.2.3 From b2db4f11376472fa801c58f98dcc2e356783e276 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Wed, 10 Aug 2011 20:47:27 +0000 Subject: Merged revision 1156360 from trunk Prevent data loss on db creation request 1) Create and populate a database 2) Restart the server 3) Send a PUT request to create the database - the server will override the existing file, making all previous documents no longer accessible nor recoverable git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1156361 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/basics.js | 19 +++++++++++++++++++ src/couchdb/couch_server.erl | 12 +++++++----- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/share/www/script/test/basics.js b/share/www/script/test/basics.js index 30c27c11..5dcf9fa9 100644 --- a/share/www/script/test/basics.js +++ b/share/www/script/test/basics.js @@ -246,4 +246,23 @@ couchTests.basics = function(debug) { result = JSON.parse(xhr.responseText); TEquals("bad_request", result.error); TEquals("You tried to DELETE a database with a ?=rev parameter. Did you mean to DELETE a document instead?", result.reason); + + // On restart, a request for creating a database that already exists can + // not override the existing database file + db = new CouchDB("test_suite_foobar"); + db.deleteDb(); + xhr = CouchDB.request("PUT", "/" + db.name); + TEquals(201, xhr.status); + + TEquals(true, db.save({"_id": "doc1"}).ok); + TEquals(true, db.ensureFullCommit().ok); + + TEquals(1, db.info().doc_count); + + restartServer(); + + xhr = CouchDB.request("PUT", "/" + db.name); + TEquals(412, xhr.status); + + TEquals(1, db.info().doc_count); }; diff --git a/src/couchdb/couch_server.erl b/src/couchdb/couch_server.erl index 7870d69e..b9503d2f 100644 --- a/src/couchdb/couch_server.erl +++ b/src/couchdb/couch_server.erl @@ -314,11 +314,13 @@ handle_call({open, DbName, Options}, {FromPid,_}=From, Server) -> {reply, couch_db:open_ref_counted(MainPid, FromPid), Server} end; handle_call({create, DbName, Options}, From, Server) -> - case ets:lookup(couch_dbs_by_name, DbName) of - [] -> - open_db(DbName, Server, [create | Options], From); - [_AlreadyRunningDb] -> - {reply, file_exists, Server} + FileName = get_full_filename(Server, ?b2l(DbName)), + case file:open(FileName, [read]) of + {ok, Fd} -> + ok = file:close(Fd), + {reply, file_exists, Server}; + Error -> + open_db(DbName, Server, [create | Options], From) end; handle_call({delete, DbName, _Options}, _From, Server) -> DbNameList = binary_to_list(DbName), -- cgit v1.2.3 From 0ac7f70c85f3be8fc65d814473b9a5a5a0aed0f7 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Wed, 10 Aug 2011 21:06:50 +0000 Subject: COUCHDB-1245 - enforce maximum chunk size for _log call to better manage memory. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1156369 13f79535-47bb-0310-9956-ffa450edef68 --- etc/couchdb/default.ini.tpl.in | 1 + src/couchdb/couch_log.erl | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/etc/couchdb/default.ini.tpl.in b/etc/couchdb/default.ini.tpl.in index 1592e330..18a5c1c8 100644 --- a/etc/couchdb/default.ini.tpl.in +++ b/etc/couchdb/default.ini.tpl.in @@ -24,6 +24,7 @@ allow_jsonp = false ;server_options = [{backlog, 128}, {acceptor_pool_size, 16}] ; For more socket options, consult Erlang's module 'inet' man page. ;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}] +log_max_chunk_size = 1000000 [ssl] port = 6984 diff --git a/src/couchdb/couch_log.erl b/src/couchdb/couch_log.erl index bb75f161..ab649cac 100644 --- a/src/couchdb/couch_log.erl +++ b/src/couchdb/couch_log.erl @@ -172,6 +172,15 @@ get_log_messages(Pid, Level, Format, Args) -> read(Bytes, Offset) -> LogFileName = couch_config:get("log", "file"), LogFileSize = filelib:file_size(LogFileName), + MaxChunkSize = list_to_integer( + couch_config:get("httpd", "log_max_chunk_size", "1000000")), + case Bytes > MaxChunkSize of + true -> + throw({bad_request, "'bytes' cannot exceed " ++ + integer_to_list(MaxChunkSize)}); + false -> + ok + end, {ok, Fd} = file:open(LogFileName, [read]), Start = lists:max([LogFileSize - Bytes, 0]) + Offset, -- cgit v1.2.3 From f269c49f1d1172fc390f6d54bb79d4841f15adee Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Wed, 10 Aug 2011 22:01:33 +0000 Subject: Revert couch_server changes added in revision 1156361 Leaving the tests however since it doesn't hurt. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1156385 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_server.erl | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/couchdb/couch_server.erl b/src/couchdb/couch_server.erl index b9503d2f..7870d69e 100644 --- a/src/couchdb/couch_server.erl +++ b/src/couchdb/couch_server.erl @@ -314,13 +314,11 @@ handle_call({open, DbName, Options}, {FromPid,_}=From, Server) -> {reply, couch_db:open_ref_counted(MainPid, FromPid), Server} end; handle_call({create, DbName, Options}, From, Server) -> - FileName = get_full_filename(Server, ?b2l(DbName)), - case file:open(FileName, [read]) of - {ok, Fd} -> - ok = file:close(Fd), - {reply, file_exists, Server}; - Error -> - open_db(DbName, Server, [create | Options], From) + case ets:lookup(couch_dbs_by_name, DbName) of + [] -> + open_db(DbName, Server, [create | Options], From); + [_AlreadyRunningDb] -> + {reply, file_exists, Server} end; handle_call({delete, DbName, _Options}, _From, Server) -> DbNameList = binary_to_list(DbName), -- cgit v1.2.3 From 2eb62337efc1171d1ea1e4392f8cacf0dabc1ab0 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sat, 13 Aug 2011 22:10:00 +0000 Subject: Merge revision 1157428 from trunk Doc validation functions from deleted ddocs must be ignored If a design document is deleted by updating it with a "_deleted" field set to the boolean value true, its validate_doc_update function should be ignored for subsequent document insertions/updates. This closes COUCHDB-1227. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1157429 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/design_docs.js | 39 ++++++++++++++++++++++++++++++++++++ src/couchdb/couch_db.erl | 4 +++- 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/share/www/script/test/design_docs.js b/share/www/script/test/design_docs.js index 702f0441..dd38858a 100644 --- a/share/www/script/test/design_docs.js +++ b/share/www/script/test/design_docs.js @@ -421,6 +421,45 @@ couchTests.design_docs = function(debug) { run_on_modified_server(server_config, testFun); + // COUCHDB-1227 - if a design document is deleted, by adding a "_deleted" + // field with the boolean value true, its validate_doc_update functions + // should no longer have effect. + db.deleteDb(); + db.createDb(); + var ddoc = { + _id: "_design/test", + language: "javascript", + validate_doc_update: (function(newDoc, oldDoc, userCtx, secObj) { + if (newDoc.value % 2 == 0) { + throw({forbidden: "dont like even numbers"}); + } + return true; + }).toString() + }; + + TEquals(true, db.save(ddoc).ok); + try { + db.save({_id: "doc1", value: 4}); + T(false, "doc insertion should have failed"); + } catch (x) { + TEquals("forbidden", x.error); + } + + var doc = db.open("doc1"); + TEquals(null, doc); + ddoc._deleted = true; + TEquals(true, db.save(ddoc).ok); + + try { + TEquals(true, db.save({_id: "doc1", value: 4}).ok); + } catch (x) { + T(false, "doc insertion should have succeeded"); + } + + doc = db.open("doc1"); + TEquals(true, doc !== null, "doc was not persisted"); + TEquals(4, doc.value); + // cleanup db.deleteDb(); db2.deleteDb(); diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index 3a12e738..f919c72c 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -273,7 +273,9 @@ get_db_info(Db) -> get_design_docs(#db{fulldocinfo_by_id_btree=Btree}=Db) -> {ok, _, Docs} = couch_view:fold( #view{btree=Btree}, - fun(#full_doc_info{id= <<"_design/",_/binary>>}=FullDocInfo, _Reds, AccDocs) -> + fun(#full_doc_info{deleted = true}, _Reds, AccDocs) -> + {ok, AccDocs}; + (#full_doc_info{id= <<"_design/",_/binary>>}=FullDocInfo, _Reds, AccDocs) -> {ok, Doc} = couch_db:open_doc_int(Db, FullDocInfo, []), {ok, [Doc | AccDocs]}; (_, _Reds, AccDocs) -> -- cgit v1.2.3 From 0dc0f8b41f627876f65183ea1d99c68b16abeae0 Mon Sep 17 00:00:00 2001 From: Paul Joseph Davis Date: Mon, 15 Aug 2011 03:57:48 +0000 Subject: Fix empty range check for raw collation. The check for empty ranges was not taking into account the view option for raw collation. This fixes that by passing the couch_btree:less/2 function into the check. Patch by: Jason Smith Back port of: 1156506, 1156507, 1156509, 1156509, 1156610 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1157706 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/all_docs.js | 7 ++++ share/www/script/test/view_collation_raw.js | 9 ++++- src/couchdb/couch_btree.erl | 1 + src/couchdb/couch_httpd_db.erl | 4 ++- src/couchdb/couch_httpd_view.erl | 54 ++++++++++++++++++++--------- 5 files changed, 56 insertions(+), 19 deletions(-) diff --git a/share/www/script/test/all_docs.js b/share/www/script/test/all_docs.js index 1d83aa95..1afe701d 100644 --- a/share/www/script/test/all_docs.js +++ b/share/www/script/test/all_docs.js @@ -41,6 +41,13 @@ couchTests.all_docs = function(debug) { var all = db.allDocs({startkey:"2"}); T(all.offset == 2); + // Confirm that queries may assume raw collation. + var raw = db.allDocs({ + startkey: "org.couchdb.user:", + endkey: "org.couchdb.user;" + }); + TEquals(0, raw.rows.length); + // check that the docs show up in the seq view in the order they were created var changes = db.changes(); var ids = ["0","3","1","2"]; diff --git a/share/www/script/test/view_collation_raw.js b/share/www/script/test/view_collation_raw.js index 31624cdb..779f7eb8 100644 --- a/share/www/script/test/view_collation_raw.js +++ b/share/www/script/test/view_collation_raw.js @@ -76,12 +76,19 @@ couchTests.view_collation_raw = function(debug) { } } T(db.save(designDoc).ok); + + // Confirm that everything collates correctly. var rows = db.view("test/test").rows; for (i=0; i db_attachment_req(Req, Db, DocId, FileNameParts). all_docs_view(Req, Db, Keys) -> + RawCollator = fun(A, B) -> A < B end, #view_query_args{ start_key = StartKey, start_docid = StartDocId, @@ -486,7 +487,8 @@ all_docs_view(Req, Db, Keys) -> skip = SkipCount, direction = Dir, inclusive_end = Inclusive - } = QueryArgs = couch_httpd_view:parse_view_params(Req, Keys, map), + } = QueryArgs + = couch_httpd_view:parse_view_params(Req, Keys, map, RawCollator), {ok, Info} = couch_db:get_db_info(Db), CurrentEtag = couch_httpd:make_etag(Info), couch_httpd:etag_respond(Req, CurrentEtag, fun() -> diff --git a/src/couchdb/couch_httpd_view.erl b/src/couchdb/couch_httpd_view.erl index 1f279417..082a5039 100644 --- a/src/couchdb/couch_httpd_view.erl +++ b/src/couchdb/couch_httpd_view.erl @@ -15,7 +15,7 @@ -export([handle_view_req/3,handle_temp_view_req/2]). --export([parse_view_params/3]). +-export([parse_view_params/4]). -export([make_view_fold_fun/7, finish_view_fold/4, finish_view_fold/5, view_row_obj/4]). -export([view_etag/5, make_reduce_fold_funs/6]). -export([design_doc_view/5, parse_bool_param/1, doc_member/3]). @@ -34,18 +34,19 @@ design_doc_view(Req, Db, DName, ViewName, Keys) -> Reduce = get_reduce_type(Req), Result = case couch_view:get_map_view(Db, DesignId, ViewName, Stale) of {ok, View, Group} -> - QueryArgs = parse_view_params(Req, Keys, map), + QueryArgs = parse_view_params(Req, Keys, map, view_collator(View)), output_map_view(Req, View, Group, Db, QueryArgs, Keys); {not_found, Reason} -> case couch_view:get_reduce_view(Db, DesignId, ViewName, Stale) of {ok, ReduceView, Group} -> + Collator = view_collator(ReduceView), case Reduce of false -> - QueryArgs = parse_view_params(Req, Keys, red_map), + QueryArgs = parse_view_params(Req, Keys, red_map, Collator), MapView = couch_view:extract_map_view(ReduceView), output_map_view(Req, MapView, Group, Db, QueryArgs, Keys); _ -> - QueryArgs = parse_view_params(Req, Keys, reduce), + QueryArgs = parse_view_params(Req, Keys, reduce, Collator), output_reduce_view(Req, Db, ReduceView, Group, QueryArgs, Keys) end; _ -> @@ -90,19 +91,19 @@ handle_temp_view_req(#httpd{method='POST'}=Req, Db) -> Reduce = get_reduce_type(Req), case couch_util:get_value(<<"reduce">>, Props, null) of null -> - QueryArgs = parse_view_params(Req, Keys, map), {ok, View, Group} = couch_view:get_temp_map_view(Db, Language, DesignOptions, MapSrc), + QueryArgs = parse_view_params(Req, Keys, map, view_collator(View)), output_map_view(Req, View, Group, Db, QueryArgs, Keys); _ when Reduce =:= false -> - QueryArgs = parse_view_params(Req, Keys, red_map), {ok, View, Group} = couch_view:get_temp_map_view(Db, Language, DesignOptions, MapSrc), + QueryArgs = parse_view_params(Req, Keys, red_map, view_collator(View)), output_map_view(Req, View, Group, Db, QueryArgs, Keys); RedSrc -> - QueryArgs = parse_view_params(Req, Keys, reduce), {ok, View, Group} = couch_view:get_temp_reduce_view(Db, Language, DesignOptions, MapSrc, RedSrc), + QueryArgs = parse_view_params(Req, Keys, reduce, view_collator(View)), output_reduce_view(Req, Db, View, Group, QueryArgs, Keys) end; @@ -209,18 +210,19 @@ load_view(Req, Db, {ViewDesignId, ViewName}, Keys) -> Reduce = get_reduce_type(Req), case couch_view:get_map_view(Db, ViewDesignId, ViewName, Stale) of {ok, View, Group} -> - QueryArgs = parse_view_params(Req, Keys, map), + QueryArgs = parse_view_params(Req, Keys, map, view_collator(View)), {map, View, Group, QueryArgs}; {not_found, _Reason} -> case couch_view:get_reduce_view(Db, ViewDesignId, ViewName, Stale) of {ok, ReduceView, Group} -> + Collator = view_collator(ReduceView), case Reduce of false -> - QueryArgs = parse_view_params(Req, Keys, map_red), + QueryArgs = parse_view_params(Req, Keys, map_red, Collator), MapView = couch_view:extract_map_view(ReduceView), {map, MapView, Group, QueryArgs}; _ -> - QueryArgs = parse_view_params(Req, Keys, reduce), + QueryArgs = parse_view_params(Req, Keys, reduce, Collator), {reduce, ReduceView, Group, QueryArgs} end; {not_found, Reason} -> @@ -228,12 +230,30 @@ load_view(Req, Db, {ViewDesignId, ViewName}, Keys) -> end end. +view_collator({reduce, _N, _Lang, View}) -> + view_collator(View); + +view_collator({temp_reduce, View}) -> + view_collator(View); + +view_collator(#view{btree=Btree}) -> + % Return an "is-less-than" predicate by calling into the btree's + % collator. For raw collation, couch_btree compares arbitrary + % Erlang terms, but for normal (ICU) collation, it expects + % {Json, Id} tuples. + fun + ({_JsonA, _IdA}=A, {_JsonB, _IdB}=B) -> + couch_btree:less(Btree, A, B); + (JsonA, JsonB) -> + couch_btree:less(Btree, {JsonA, null}, {JsonB, null}) + end. + % query_parse_error could be removed % we wouldn't need to pass the view type, it'd just parse params. % I'm not sure what to do about the error handling, but % it might simplify things to have a parse_view_params function % that doesn't throw(). -parse_view_params(Req, Keys, ViewType) -> +parse_view_params(Req, Keys, ViewType, LessThan) -> QueryList = couch_httpd:qs(Req), QueryParams = lists:foldl(fun({K, V}, Acc) -> @@ -247,7 +267,7 @@ parse_view_params(Req, Keys, ViewType) -> QueryArgs = lists:foldl(fun({K, V}, Args2) -> validate_view_query(K, V, Args2) end, Args, lists:reverse(QueryParams)), % Reverse to match QS order. - warn_on_empty_key_range(QueryArgs), + warn_on_empty_key_range(QueryArgs, LessThan), GroupLevel = QueryArgs#view_query_args.group_level, case {ViewType, GroupLevel, IsMultiGet} of {reduce, exact, true} -> @@ -328,15 +348,15 @@ parse_view_param("callback", _) -> parse_view_param(Key, Value) -> [{extra, {Key, Value}}]. -warn_on_empty_key_range(#view_query_args{start_key=undefined}) -> +warn_on_empty_key_range(#view_query_args{start_key=undefined}, _Lt) -> ok; -warn_on_empty_key_range(#view_query_args{end_key=undefined}) -> +warn_on_empty_key_range(#view_query_args{end_key=undefined}, _Lt) -> ok; -warn_on_empty_key_range(#view_query_args{start_key=A, end_key=A}) -> +warn_on_empty_key_range(#view_query_args{start_key=A, end_key=A}, _Lt) -> ok; warn_on_empty_key_range(#view_query_args{ - start_key=StartKey, end_key=EndKey, direction=Dir}) -> - case {Dir, couch_view:less_json(StartKey, EndKey)} of + start_key=StartKey, end_key=EndKey, direction=Dir}, LessThan) -> + case {Dir, LessThan(StartKey, EndKey)} of {fwd, false} -> throw({query_parse_error, <<"No rows can match your key range, reverse your ", -- cgit v1.2.3 From 82a10d77d0c206c89e640ca7097d8e12e95eb8be Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 18 Aug 2011 05:50:42 +0000 Subject: Merge revision 1159045 from trunk Fix dead lock case in the os process pool Part of this patch was done by Paul Davis. The patch also introduces a test case to validate that the os process pool (couch_query_servers) operates as it should. Closes COUCHDB-1246. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1159049 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_os_process.erl | 6 +- src/couchdb/couch_query_servers.erl | 22 +++-- test/etap/210-os-proc-pool.t | 161 ++++++++++++++++++++++++++++++++++++ test/etap/Makefile.am | 4 +- 4 files changed, 180 insertions(+), 13 deletions(-) create mode 100644 test/etap/210-os-proc-pool.t diff --git a/src/couchdb/couch_os_process.erl b/src/couchdb/couch_os_process.erl index 5776776b..3ee52865 100644 --- a/src/couchdb/couch_os_process.erl +++ b/src/couchdb/couch_os_process.erl @@ -104,7 +104,6 @@ readjson(OsProc) when is_record(OsProc, os_proc) -> % gen_server API init([Command, Options, PortOptions]) -> - process_flag(trap_exit, true), PrivDir = couch_util:priv_dir(), Spawnkiller = filename:join(PrivDir, "couchspawnkillable"), BaseProc = #os_proc{ @@ -175,10 +174,7 @@ handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) -> {stop, normal, OsProc}; handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) -> ?LOG_ERROR("OS Process died with status: ~p", [Status]), - {stop, {exit_status, Status}, OsProc}; -handle_info(Msg, OsProc) -> - ?LOG_DEBUG("OS Proc: Unknown info: ~p", [Msg]), - {noreply, OsProc}. + {stop, {exit_status, Status}, OsProc}. code_change(_OldVsn, State, _Extra) -> {ok, State}. diff --git a/src/couchdb/couch_query_servers.erl b/src/couchdb/couch_query_servers.erl index b0e46937..f8bbcaed 100644 --- a/src/couchdb/couch_query_servers.erl +++ b/src/couchdb/couch_query_servers.erl @@ -22,7 +22,8 @@ -export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]). -% -export([test/0]). +% For 210-os-proc-pool.t +-export([get_os_process/1, ret_os_process/1]). -include("couch_db.hrl"). @@ -343,8 +344,7 @@ handle_call({get_proc, Lang}, From, Server) -> Error -> {reply, Error, Server} end; -handle_call({unlink_proc, Pid}, _From, #qserver{pid_procs=PidProcs}=Server) -> - rem_value(PidProcs, Pid), +handle_call({unlink_proc, Pid}, _From, Server) -> unlink(Pid), {reply, ok, Server}; handle_call({ret_proc, Proc}, _From, #qserver{ @@ -352,15 +352,22 @@ handle_call({ret_proc, Proc}, _From, #qserver{ lang_procs=LangProcs}=Server) -> % Along with max process limit, here we should check % if we're over the limit and discard when we are. - add_value(PidProcs, Proc#proc.pid, Proc), - add_to_list(LangProcs, Proc#proc.lang, Proc), - link(Proc#proc.pid), + case is_process_alive(Proc#proc.pid) of + true -> + add_value(PidProcs, Proc#proc.pid, Proc), + add_to_list(LangProcs, Proc#proc.lang, Proc), + link(Proc#proc.pid); + false -> + ok + end, {reply, true, service_waitlist(Server)}. handle_cast(_Whatever, Server) -> {noreply, Server}. -handle_info({'EXIT', Pid, Status}, #qserver{ +handle_info({'EXIT', _, _}, Server) -> + {noreply, Server}; +handle_info({'DOWN', _, process, Pid, Status}, #qserver{ pid_procs=PidProcs, lang_procs=LangProcs, lang_limits=LangLimits}=Server) -> @@ -461,6 +468,7 @@ new_process(Langs, LangLimits, Lang) -> case ets:lookup(Langs, Lang) of [{Lang, Mod, Func, Arg}] -> {ok, Pid} = apply(Mod, Func, Arg), + erlang:monitor(process, Pid), true = ets:insert(LangLimits, {Lang, Lim, Current+1}), {ok, #proc{lang=Lang, pid=Pid, diff --git a/test/etap/210-os-proc-pool.t b/test/etap/210-os-proc-pool.t new file mode 100644 index 00000000..b68d66be --- /dev/null +++ b/test/etap/210-os-proc-pool.t @@ -0,0 +1,161 @@ +#!/usr/bin/env escript +%% -*- erlang -*- +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +main(_) -> + test_util:init_code_path(), + + etap:plan(19), + case (catch test()) of + ok -> + etap:end_tests(); + Other -> + etap:diag(io_lib:format("Test died abnormally: ~p", [Other])), + etap:bail(Other) + end, + ok. + + +test() -> + couch_server_sup:start_link(test_util:config_files()), + couch_config:set("query_server_config", "os_process_limit", "3", false), + + test_pool_full(), + test_client_unexpected_exit(), + + couch_server_sup:stop(), + ok. + + +test_pool_full() -> + Client1 = spawn_client(), + Client2 = spawn_client(), + Client3 = spawn_client(), + + etap:diag("Check that we can spawn the max number of processes."), + etap:is(ping_client(Client1), ok, "Client 1 started ok."), + etap:is(ping_client(Client2), ok, "Client 2 started ok."), + etap:is(ping_client(Client3), ok, "Client 3 started ok."), + + Proc1 = get_client_proc(Client1, "1"), + Proc2 = get_client_proc(Client2, "2"), + Proc3 = get_client_proc(Client3, "3"), + etap:isnt(Proc1, Proc2, "Clients 1 and 2 got different procs."), + etap:isnt(Proc2, Proc3, "Clients 2 and 3 got different procs."), + + etap:diag("Check that client 4 blocks waiting for a process."), + Client4 = spawn_client(), + etap:is(ping_client(Client4), timeout, "Client 4 blocked while waiting."), + + etap:diag("Check that stopping a client gives up its process."), + etap:is(stop_client(Client1), ok, "First client stopped."), + + etap:diag("And check that our blocked process has been unblocked."), + etap:is(ping_client(Client4), ok, "Client was unblocked."), + + Proc4 = get_client_proc(Client4, "4"), + etap:is(Proc4, Proc1, "Client 4 got proc that client 1 got before."), + + lists:map(fun(C) -> ok = stop_client(C) end, [Client2, Client3, Client4]). + + +test_client_unexpected_exit() -> + Client1 = spawn_client(), + Client2 = spawn_client(), + Client3 = spawn_client(), + + etap:diag("Check that up to os_process_limit clients started."), + etap:is(ping_client(Client1), ok, "Client 1 started ok."), + etap:is(ping_client(Client2), ok, "Client 2 started ok."), + etap:is(ping_client(Client3), ok, "Client 3 started ok."), + + Proc1 = get_client_proc(Client1, "1"), + Proc2 = get_client_proc(Client2, "2"), + Proc3 = get_client_proc(Client3, "3"), + etap:isnt(Proc1, Proc2, "Clients 1 and 2 got different procs."), + etap:isnt(Proc2, Proc3, "Clients 2 and 3 got different procs."), + + etap:diag("Check that killing a client frees an os_process."), + etap:is(kill_client(Client1), ok, "Client 1 died all right."), + + etap:diag("Check that a new client is not blocked on boot."), + Client4 = spawn_client(), + etap:is(ping_client(Client4), ok, "New client booted without blocking."), + + Proc4 = get_client_proc(Client4, "4"), + etap:isnt(Proc4, Proc1, + "Client 4 got a proc different from the one client 1 got before."), + etap:isnt(Proc4, Proc2, "Client 4's proc different from client 2's proc."), + etap:isnt(Proc4, Proc3, "Client 4's proc different from client 3's proc."), + + lists:map(fun(C) -> ok = stop_client(C) end, [Client2, Client3, Client4]). + + +spawn_client() -> + Parent = self(), + Ref = make_ref(), + Pid = spawn(fun() -> + Proc = couch_query_servers:get_os_process(<<"javascript">>), + loop(Parent, Ref, Proc) + end), + {Pid, Ref}. + + +ping_client({Pid, Ref}) -> + Pid ! ping, + receive + {pong, Ref} -> ok + after 3000 -> timeout + end. + + +get_client_proc({Pid, Ref}, ClientName) -> + Pid ! get_proc, + receive + {proc, Ref, Proc} -> Proc + after 3000 -> + etap:bail("Timeout getting client " ++ ClientName ++ " proc.") + end. + + +stop_client({Pid, Ref}) -> + Pid ! stop, + receive + {stop, Ref} -> ok + after 3000 -> timeout + end. + + +kill_client({Pid, Ref}) -> + Pid ! die, + receive + {die, Ref} -> ok + after 3000 -> timeout + end. + + +loop(Parent, Ref, Proc) -> + receive + ping -> + Parent ! {pong, Ref}, + loop(Parent, Ref, Proc); + get_proc -> + Parent ! {proc, Ref, Proc}, + loop(Parent, Ref, Proc); + stop -> + couch_query_servers:ret_os_process(Proc), + Parent ! {stop, Ref}; + die -> + Parent ! {die, Ref}, + exit(some_error) + end. diff --git a/test/etap/Makefile.am b/test/etap/Makefile.am index ecbc3a93..c3a4ddab 100644 --- a/test/etap/Makefile.am +++ b/test/etap/Makefile.am @@ -87,4 +87,6 @@ EXTRA_DIST = \ 180-http-proxy.ini \ 180-http-proxy.t \ 190-oauth.t \ - 200-view-group-no-db-leaks.t + 200-view-group-no-db-leaks.t \ + 210-os-proc-pool.t + -- cgit v1.2.3 From 33c0215fb3b06d814ea8c48e6f88c68833e8d1b3 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 18 Aug 2011 05:51:31 +0000 Subject: Added executable bit to test/etap/210-os-proc-pool.t git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1159050 13f79535-47bb-0310-9956-ffa450edef68 --- test/etap/210-os-proc-pool.t | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 test/etap/210-os-proc-pool.t diff --git a/test/etap/210-os-proc-pool.t b/test/etap/210-os-proc-pool.t old mode 100644 new mode 100755 -- cgit v1.2.3 From 542f2732a4240a737cac02a29e6c58d90571e742 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 18 Aug 2011 06:40:39 +0000 Subject: Add missing assertions to test/etap/210-os-proc-pool.t git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1159059 13f79535-47bb-0310-9956-ffa450edef68 --- test/etap/210-os-proc-pool.t | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/etap/210-os-proc-pool.t b/test/etap/210-os-proc-pool.t index b68d66be..d80707e8 100755 --- a/test/etap/210-os-proc-pool.t +++ b/test/etap/210-os-proc-pool.t @@ -15,7 +15,7 @@ main(_) -> test_util:init_code_path(), - etap:plan(19), + etap:plan(21), case (catch test()) of ok -> etap:end_tests(); @@ -52,6 +52,7 @@ test_pool_full() -> Proc3 = get_client_proc(Client3, "3"), etap:isnt(Proc1, Proc2, "Clients 1 and 2 got different procs."), etap:isnt(Proc2, Proc3, "Clients 2 and 3 got different procs."), + etap:isnt(Proc1, Proc3, "Clients 1 and 3 got different procs."), etap:diag("Check that client 4 blocks waiting for a process."), Client4 = spawn_client(), @@ -84,6 +85,7 @@ test_client_unexpected_exit() -> Proc3 = get_client_proc(Client3, "3"), etap:isnt(Proc1, Proc2, "Clients 1 and 2 got different procs."), etap:isnt(Proc2, Proc3, "Clients 2 and 3 got different procs."), + etap:isnt(Proc1, Proc3, "Clients 1 and 3 got different procs."), etap:diag("Check that killing a client frees an os_process."), etap:is(kill_client(Client1), ok, "Client 1 died all right."), -- cgit v1.2.3 From 045fa896188ab3ca8dc55c8e6ac59a0b2dfd92db Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Fri, 19 Aug 2011 04:51:20 +0000 Subject: Updated changes feed connect timeout to 30 seconds This is the timeout used everywhere else. Some users are constantly unable to replicate since under some scenarios (namely via SSL) a timeout of 10 seconds is too short. This is related to a thread in the users mailing list: http://mail-archives.apache.org/mod_mbox/couchdb-user/201106.mbox/%3CBANLkTik3y-nN1uWMyNgzA_0ZSjxXdcMqQw@mail.gmail.com%3E git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1159501 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_rep_changes_feed.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/couchdb/couch_rep_changes_feed.erl b/src/couchdb/couch_rep_changes_feed.erl index 1c298937..a0696bf2 100644 --- a/src/couchdb/couch_rep_changes_feed.erl +++ b/src/couchdb/couch_rep_changes_feed.erl @@ -154,7 +154,7 @@ init([Parent, #http_db{headers = Headers0} = Source, Since, PostProps]) -> end; {ibrowse_async_headers, ReqId, Code, _} -> {stop, {changes_error_code, list_to_integer(Code)}} - after 10000 -> + after 30000 -> {stop, changes_timeout} end; -- cgit v1.2.3 From 476b01cd4ae27d81b4831091f40ca278eb872160 Mon Sep 17 00:00:00 2001 From: Randall Leeds Date: Tue, 23 Aug 2011 19:19:22 +0000 Subject: bring windows in line with r1001283 (COUCHDB-1197) Thanks to Dave Cottlehuber for the patch. This is a backport of r1160857 from trunk git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1160859 13f79535-47bb-0310-9956-ffa450edef68 --- etc/windows/couchdb.iss.tpl | 1 + 1 file changed, 1 insertion(+) diff --git a/etc/windows/couchdb.iss.tpl b/etc/windows/couchdb.iss.tpl index 8a32561d..868ac498 100644 --- a/etc/windows/couchdb.iss.tpl +++ b/etc/windows/couchdb.iss.tpl @@ -55,6 +55,7 @@ Source: "%openssl_bin_dir%\libeay32.dll"; DestDir: "{app}\bin"; Flags: ignorever [Dirs] Name: "{app}\var\lib\couchdb"; Permissions: authusers-modify Name: "{app}\var\log\couchdb"; Permissions: authusers-modify +Name: "{app}\var\run\couchdb"; Permissions: authusers-modify Name: "{app}\etc\couchdb"; Permissions: authusers-modify [Icons] -- cgit v1.2.3 From 9472aff5d8caca9fb973a55bc5eccae2cfb63c1e Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Thu, 25 Aug 2011 19:07:26 +0000 Subject: Do not suppress old revisions from _changes Requests to '_changes?style=all_docs' with a 'since' parameter would suppress revisions of a document generated before the start of the feed. The assumption may have been that those revisions were seen by the client in a previous request to _changes, but this is not necessarily true if the client is resuming the feed after a partial download. The safe thing to do is to include all leaf revisions for each document in the feed. Thanks Bob Dionne for the test code. Closes COUCHDB-1256 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1161702 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/changes.js | 26 ++++++++++++++++++++++++++ src/couchdb/couch_db.erl | 16 ++-------------- 2 files changed, 28 insertions(+), 14 deletions(-) diff --git a/share/www/script/test/changes.js b/share/www/script/test/changes.js index ea22bfb3..284f1985 100644 --- a/share/www/script/test/changes.js +++ b/share/www/script/test/changes.js @@ -507,6 +507,32 @@ couchTests.changes = function(debug) { CouchDB.request("GET", "/" + db.name + "/_changes"); TEquals(0, CouchDB.requestStats('httpd', 'clients_requesting_changes').current); + // COUCHDB-1256 + T(db.deleteDb()); + T(db.createDb()); + + T(db.save({"_id":"foo", "a" : 123}).ok); + T(db.save({"_id":"bar", "a" : 456}).ok); + + options = { + headers: {"Content-Type": "application/json"}, + body: JSON.stringify({"_rev":"1-cc609831f0ca66e8cd3d4c1e0d98108a", "a":456}) + }; + req = CouchDB.request("PUT", "/" + db.name + "/foo?new_edits=false", options); + + req = CouchDB.request("GET", "/" + db.name + "/_changes?style=all_docs"); + resp = JSON.parse(req.responseText); + + TEquals(3, resp.last_seq); + TEquals(2, resp.results.length); + + req = CouchDB.request("GET", "/" + db.name + "/_changes?style=all_docs&since=2"); + resp = JSON.parse(req.responseText); + + TEquals(3, resp.last_seq); + TEquals(1, resp.results.length); + TEquals(2, resp.results[0].changes.length); + // cleanup db.deleteDb(); }; diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index f919c72c..3d8395f4 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -947,20 +947,8 @@ enum_docs_reduce_to_count(Reds) -> changes_since(Db, Style, StartSeq, Fun, Acc) -> changes_since(Db, Style, StartSeq, Fun, [], Acc). -changes_since(Db, Style, StartSeq, Fun, Options, Acc) -> - Wrapper = fun(DocInfo, _Offset, Acc2) -> - #doc_info{revs=Revs} = DocInfo, - DocInfo2 = - case Style of - main_only -> - DocInfo; - all_docs -> - % remove revs before the seq - DocInfo#doc_info{revs=[RevInfo || - #rev_info{seq=RevSeq}=RevInfo <- Revs, StartSeq < RevSeq]} - end, - Fun(DocInfo2, Acc2) - end, +changes_since(Db, _Style, StartSeq, Fun, Options, Acc) -> + Wrapper = fun(DocInfo, _Offset, Acc2) -> Fun(DocInfo, Acc2) end, {ok, _LastReduction, AccOut} = couch_btree:fold(Db#db.docinfo_by_seq_btree, Wrapper, Acc, [{start_key, StartSeq + 1}] ++ Options), {ok, AccOut}. -- cgit v1.2.3 From 595ceeb8be4b5fea4bf40264cf294cf10f63eb3e Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Thu, 25 Aug 2011 19:07:33 +0000 Subject: Remove Style where we have no need for it git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1161703 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_changes.erl | 2 -- src/couchdb/couch_db.erl | 8 ++++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/couchdb/couch_changes.erl b/src/couchdb/couch_changes.erl index 6eb6f7e1..2be37797 100644 --- a/src/couchdb/couch_changes.erl +++ b/src/couchdb/couch_changes.erl @@ -197,7 +197,6 @@ start_sending_changes(Callback, UserAcc, ResponseType) -> send_changes(Args, Callback, UserAcc, Db, StartSeq, Prepend) -> #changes_args{ - style = Style, include_docs = IncludeDocs, conflicts = Conflicts, limit = Limit, @@ -207,7 +206,6 @@ send_changes(Args, Callback, UserAcc, Db, StartSeq, Prepend) -> } = Args, couch_db:changes_since( Db, - Style, StartSeq, fun changes_enumerator/2, [{dir, Dir}], diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index 3d8395f4..b9b48d7d 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -25,7 +25,7 @@ -export([start_link/3,open_doc_int/3,ensure_full_commit/1]). -export([set_security/2,get_security/1]). -export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]). --export([changes_since/5,changes_since/6,read_doc/2,new_revid/1]). +-export([changes_since/4,changes_since/5,read_doc/2,new_revid/1]). -export([check_is_admin/1, check_is_reader/1]). -export([reopen/1]). @@ -944,10 +944,10 @@ enum_docs_reduce_to_count(Reds) -> fun couch_db_updater:btree_by_id_reduce/2, Reds), Count. -changes_since(Db, Style, StartSeq, Fun, Acc) -> - changes_since(Db, Style, StartSeq, Fun, [], Acc). +changes_since(Db, StartSeq, Fun, Acc) -> + changes_since(Db, StartSeq, Fun, [], Acc). -changes_since(Db, _Style, StartSeq, Fun, Options, Acc) -> +changes_since(Db, StartSeq, Fun, Options, Acc) -> Wrapper = fun(DocInfo, _Offset, Acc2) -> Fun(DocInfo, Acc2) end, {ok, _LastReduction, AccOut} = couch_btree:fold(Db#db.docinfo_by_seq_btree, Wrapper, Acc, [{start_key, StartSeq + 1}] ++ Options), -- cgit v1.2.3 From 745dc431376f115e645fadf82526f4e9ac24d4c5 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sat, 27 Aug 2011 07:30:35 +0000 Subject: Fix replication crash on database compaction There were race conditions that made replications crash when a local endpoint database is compacted. Patch tested by Joan Touzet. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1162306 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_rep_reader.erl | 11 +++-------- src/couchdb/couch_rep_writer.erl | 16 +++++++++++++++- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/src/couchdb/couch_rep_reader.erl b/src/couchdb/couch_rep_reader.erl index 0d344e5c..856699e1 100644 --- a/src/couchdb/couch_rep_reader.erl +++ b/src/couchdb/couch_rep_reader.erl @@ -244,7 +244,7 @@ reader_loop(ReaderServer, Parent, Source, MissingRevsServer) -> case couch_rep_missing_revs:next(MissingRevsServer) of complete -> exit(complete); - {HighSeq, IdsRevs} -> + {_HighSeq, IdsRevs} -> % to be safe, make sure Results are sorted by source_seq SortedIdsRevs = lists:keysort(2, IdsRevs), RequestSeqs = [S || {_,S,_} <- SortedIdsRevs], @@ -256,7 +256,8 @@ reader_loop(ReaderServer, Parent, Source, MissingRevsServer) -> reader_loop(ReaderServer, Parent, Source, MissingRevsServer); _Local -> {ok, Source1} = gen_server:call(Parent, get_source_db, infinity), - Source2 = maybe_reopen_db(Source1, HighSeq), + {ok, Source2} = couch_db:open( + Source1#db.name, [{user_ctx, Source1#db.user_ctx}]), lists:foreach(fun({Id,Seq,Revs}) -> {ok, Docs} = couch_db:open_doc_revs(Source2, Id, Revs, [latest]), JustTheDocs = [Doc || {ok, Doc} <- Docs], @@ -268,12 +269,6 @@ reader_loop(ReaderServer, Parent, Source, MissingRevsServer) -> end end. -maybe_reopen_db(#db{update_seq=OldSeq} = Db, HighSeq) when HighSeq > OldSeq -> - {ok, NewDb} = couch_db:open(Db#db.name, [{user_ctx, Db#db.user_ctx}]), - NewDb; -maybe_reopen_db(Db, _HighSeq) -> - Db. - spawn_document_request(Source, Id, Seq, Revs) -> Server = self(), SpawnFun = fun() -> diff --git a/src/couchdb/couch_rep_writer.erl b/src/couchdb/couch_rep_writer.erl index 2b722e8e..40323925 100644 --- a/src/couchdb/couch_rep_writer.erl +++ b/src/couchdb/couch_rep_writer.erl @@ -26,7 +26,8 @@ writer_loop(Parent, Reader) -> ok; {HighSeq, Docs} -> DocCount = length(Docs), - {ok, Target} = gen_server:call(Parent, get_target_db, infinity), + {ok, Target0} = gen_server:call(Parent, get_target_db, infinity), + Target = open_db(Target0), try write_docs(Target, Docs) of {ok, []} -> Parent ! {update_stats, docs_written, DocCount}; @@ -38,6 +39,8 @@ writer_loop(Parent, Reader) -> {attachment_request_failed, Err} -> ?LOG_DEBUG("writer failed to write an attachment ~p", [Err]), exit({attachment_request_failed, Err, Docs}) + after + close_db(Target) end, Parent ! {writer_checkpoint, HighSeq}, couch_rep_att:cleanup(), @@ -163,3 +166,14 @@ write_docs_1({Props}) -> ErrId = couch_util:to_existing_atom(couch_util:get_value(<<"error">>, Props)), Reason = couch_util:get_value(<<"reason">>, Props), {{Id, Rev}, {ErrId, Reason}}. + +open_db(#db{name = Name, user_ctx = UserCtx}) -> + {ok, Db} = couch_db:open(Name, [{user_ctx, UserCtx}]), + Db; +open_db(HttpDb) -> + HttpDb. + +close_db(#db{} = Db) -> + couch_db:close(Db); +close_db(_HttpDb) -> + ok. -- cgit v1.2.3 From d87bcb83439ddd58ebb96abb4cabe911ae868eda Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sat, 27 Aug 2011 07:40:54 +0000 Subject: Remove unnecessary call after revision 1162306 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1162307 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_rep_reader.erl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/couchdb/couch_rep_reader.erl b/src/couchdb/couch_rep_reader.erl index 856699e1..1e8ca074 100644 --- a/src/couchdb/couch_rep_reader.erl +++ b/src/couchdb/couch_rep_reader.erl @@ -255,9 +255,8 @@ reader_loop(ReaderServer, Parent, Source, MissingRevsServer) -> infinity) || {Id,Seq,Revs} <- SortedIdsRevs], reader_loop(ReaderServer, Parent, Source, MissingRevsServer); _Local -> - {ok, Source1} = gen_server:call(Parent, get_source_db, infinity), {ok, Source2} = couch_db:open( - Source1#db.name, [{user_ctx, Source1#db.user_ctx}]), + Source#db.name, [{user_ctx, Source#db.user_ctx}]), lists:foreach(fun({Id,Seq,Revs}) -> {ok, Docs} = couch_db:open_doc_revs(Source2, Id, Revs, [latest]), JustTheDocs = [Doc || {ok, Doc} <- Docs], -- cgit v1.2.3 From 25f208d526e3838e1721c3903a12af1051a29c94 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Wed, 31 Aug 2011 19:09:05 +0000 Subject: Fix order of parameters in log message git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1163747 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_rep.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index a9c156e9..9d90fee3 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -542,7 +542,7 @@ filter_code(Filter, Props, UserCtx) -> DocErrorMsg = io_lib:format( "Couldn't open document `_design/~s` from source " "database `~s`: ~s", - [dbname(Source), DDocName, couch_util:to_binary(DocError)]), + [DDocName, dbname(Source), couch_util:to_binary(DocError)]), throw({error, iolist_to_binary(DocErrorMsg)}) end, Code = couch_util:get_nested_json_value( -- cgit v1.2.3 From 840cb720094f660904789210a4b07a10cf90430f Mon Sep 17 00:00:00 2001 From: Paul Joseph Davis Date: Fri, 2 Sep 2011 04:34:04 +0000 Subject: Fixes COUCHDB-1265 Backport of 1164350 from trunk. Slightly modified for an export declaration conflict and removing a clause that only applies to trunk. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1164351 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/recreate_doc.js | 41 +++++++++++++++++++++++++++++++++++ src/couchdb/couch_doc.erl | 16 +++++++++----- src/couchdb/couch_key_tree.erl | 17 ++++++++++++++- 3 files changed, 68 insertions(+), 6 deletions(-) diff --git a/share/www/script/test/recreate_doc.js b/share/www/script/test/recreate_doc.js index 05843558..a1cfb8f8 100644 --- a/share/www/script/test/recreate_doc.js +++ b/share/www/script/test/recreate_doc.js @@ -77,4 +77,45 @@ couchTests.recreate_doc = function(debug) { } catch (e) { T(e.error == "conflict"); } + + db.deleteDb(); + db.createDb(); + + // COUCHDB-1265 + // Resuscitate an unavailable old revision and make sure that it + // doesn't introduce duplicates into the _changes feed. + + var doc = {_id: "bar", count: 0}; + T(db.save(doc).ok); + var ghost = {_id: "bar", _rev: doc._rev, count: doc.count}; + for(var i = 0; i < 2; i++) { + doc.count += 1; + T(db.save(doc).ok); + } + + // Compact so that the old revision to be resuscitated will be + // in the rev_tree as ?REV_MISSING + db.compact(); + while(db.info().compact_running) {} + + // Saving the ghost here puts it back in the rev_tree in such + // a way as to create a new update_seq but without changing a + // leaf revision. This would cause the #full_doc_info{} and + // #doc_info{} records to diverge in their idea of what the + // doc's update_seq is and end up introducing a duplicate in + // the _changes feed the next time this doc is updated. + T(db.save(ghost, {new_edits: false}).ok); + + // The duplicate would have been introduce here becuase the #doc_info{} + // would not have been removed correctly. + T(db.save(doc).ok); + + // And finally assert that there are no duplicates in _changes. + var req = CouchDB.request("GET", "/test_suite_db/_changes"); + var resp = JSON.parse(req.responseText); + var docids = {}; + for(var i = 0; i < resp.results.length; i++) { + T(docids[resp.results[i].id] === undefined, "Duplicates in _changes feed."); + docids[resp.results[i].id] = true; + } }; diff --git a/src/couchdb/couch_doc.erl b/src/couchdb/couch_doc.erl index 531eb6bb..a6700d59 100644 --- a/src/couchdb/couch_doc.erl +++ b/src/couchdb/couch_doc.erl @@ -302,10 +302,16 @@ to_doc_info(FullDocInfo) -> {DocInfo, _Path} = to_doc_info_path(FullDocInfo), DocInfo. -max_seq([], Max) -> - Max; -max_seq([#rev_info{seq=Seq}|Rest], Max) -> - max_seq(Rest, if Max > Seq -> Max; true -> Seq end). +max_seq(Tree) -> + FoldFun = fun({_Pos, _Key}, Value, _Type, MaxOldSeq) -> + case Value of + {_Deleted, _DiskPos, OldTreeSeq} -> + erlang:max(MaxOldSeq, OldTreeSeq); + _ -> + MaxOldSeq + end + end, + couch_key_tree:fold(FoldFun, 0, Tree). to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree}) -> RevInfosAndPath = @@ -320,7 +326,7 @@ to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree}) -> end, RevInfosAndPath), [{_RevInfo, WinPath}|_] = SortedRevInfosAndPath, RevInfos = [RevInfo || {RevInfo, _Path} <- SortedRevInfosAndPath], - {#doc_info{id=Id, high_seq=max_seq(RevInfos, 0), revs=RevInfos}, WinPath}. + {#doc_info{id=Id, high_seq=max_seq(Tree), revs=RevInfos}, WinPath}. diff --git a/src/couchdb/couch_key_tree.erl b/src/couchdb/couch_key_tree.erl index 48a76b1d..367c9e33 100644 --- a/src/couchdb/couch_key_tree.erl +++ b/src/couchdb/couch_key_tree.erl @@ -49,7 +49,7 @@ -export([merge/3, find_missing/2, get_key_leafs/2, get_full_key_paths/2, get/2]). -export([map/2, get_all_leafs/1, count_leafs/1, remove_leafs/2, - get_all_leafs_full/1,stem/2,map_leafs/2]). + get_all_leafs_full/1,stem/2,map_leafs/2, fold/3]). -include("couch_db.hrl"). @@ -325,6 +325,21 @@ count_leafs_simple([{_Key, _Value, SubTree} | RestTree]) -> count_leafs_simple(SubTree) + count_leafs_simple(RestTree). +fold(_Fun, Acc, []) -> + Acc; +fold(Fun, Acc0, [{Pos, Tree}|Rest]) -> + Acc1 = fold_simple(Fun, Acc0, Pos, [Tree]), + fold(Fun, Acc1, Rest). + +fold_simple(_Fun, Acc, _Pos, []) -> + Acc; +fold_simple(Fun, Acc0, Pos, [{Key, Value, SubTree} | RestTree]) -> + Type = if SubTree == [] -> leaf; true -> branch end, + Acc1 = Fun({Pos, Key}, Value, Type, Acc0), + Acc2 = fold_simple(Fun, Acc1, Pos+1, SubTree), + fold_simple(Fun, Acc2, Pos, RestTree). + + map(_Fun, []) -> []; map(Fun, [{Pos, Tree}|Rest]) -> -- cgit v1.2.3 From 61ca44e384ca10b0b7bf68123c79ed43715ea9c9 Mon Sep 17 00:00:00 2001 From: Randall Leeds Date: Wed, 7 Sep 2011 22:08:21 +0000 Subject: additional libtool lubrication for windows build This is related to COUCHDB-1197 Thanks Dave Cottlehuber for the patch Backport of r1166438 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1166439 13f79535-47bb-0310-9956-ffa450edef68 --- configure.ac | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/configure.ac b/configure.ac index 0cc277a4..2bdf5aed 100644 --- a/configure.ac +++ b/configure.ac @@ -427,10 +427,12 @@ AC_OUTPUT # PLEASE, someone help put this out of its misery!! # This hackery is being tracked via COUCHDB-440. if test x${IS_WINDOWS} = xTRUE; then - sed -e 's,libname_spec="lib\\$name",libname_spec="\\\$name",' \ + mv libtool libtool.dist + sed -E -e 's,libname_spec="lib\\$name",libname_spec="\\\$name",' \ -e 's,-link,,' \ - < libtool > libtool.tmp - mv libtool.tmp libtool + -e 's/-Xlinker --out-implib -Xlinker \\\$lib//' \ + -e 's/(-shared -nostdlib)/-dll \1/' \ + < libtool.dist > libtool # probably would chmod +x if we weren't on windows... fi -- cgit v1.2.3 From 71081311b2b11ed923869ce5bd11ce7b0fd1da2c Mon Sep 17 00:00:00 2001 From: Paul Joseph Davis Date: Thu, 8 Sep 2011 05:00:10 +0000 Subject: Fix CouchJS compatibility with older SpiderMonkey. This is a backport of the work to make CouchJS build against all of the major versions of SpiderMonkey. Thanks to Randall Leeds and Chris Coulson for the original patches. Backport based on r1137464 and r1164346 Fixes COUCHDB-1078 Fixes COUCHDB-1260 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1166525 13f79535-47bb-0310-9956-ffa450edef68 --- configure.ac | 41 +++-- share/server/mimeparse.js | 2 +- share/www/script/couch_test_runner.js | 15 +- src/couchdb/priv/Makefile.am | 11 +- src/couchdb/priv/couch_js/http.c | 318 ++++++++++++--------------------- src/couchdb/priv/couch_js/http.h | 10 +- src/couchdb/priv/couch_js/main.c | 327 +--------------------------------- src/couchdb/priv/couch_js/utf8.c | 9 +- test/javascript/run.tpl | 2 +- 9 files changed, 179 insertions(+), 556 deletions(-) diff --git a/configure.ac b/configure.ac index 2bdf5aed..5cf37635 100644 --- a/configure.ac +++ b/configure.ac @@ -108,13 +108,15 @@ esac AM_CONDITIONAL([WINDOWS], [test x$IS_WINDOWS = xTRUE]) -AC_CHECK_LIB([mozjs], [JS_NewContext], [JS_LIB_BASE=mozjs], [ - AC_CHECK_LIB([js], [JS_NewContext], [JS_LIB_BASE=js], [ - AC_CHECK_LIB([js3250], [JS_NewContext], [JS_LIB_BASE=js3250], [ - AC_CHECK_LIB([js32], [JS_NewContext], [JS_LIB_BASE=js32], [ - AC_MSG_ERROR([Could not find the js library. +AC_CHECK_LIB([mozjs185], [JS_NewContext], [JS_LIB_BASE=mozjs185], [ + AC_CHECK_LIB([mozjs185-1.0], [JS_NewContext], [JS_LIB_BASE=mozjs185-1.0], [ + AC_CHECK_LIB([mozjs], [JS_NewContext], [JS_LIB_BASE=mozjs], [ + AC_CHECK_LIB([js], [JS_NewContext], [JS_LIB_BASE=js], [ + AC_CHECK_LIB([js3250], [JS_NewContext], [JS_LIB_BASE=js3250], [ + AC_CHECK_LIB([js32], [JS_NewContext], [JS_LIB_BASE=js32], [ + AC_MSG_ERROR([Could not find the js library. -Is the Mozilla SpiderMonkey library installed?])])])])]) +Is the Mozilla SpiderMonkey library installed?])])])])])])]) AC_SUBST(JS_LIB_BASE) @@ -177,16 +179,19 @@ Are the Mozilla SpiderMonkey headers installed?]) AC_SUBST(JSLIB) AC_LANG_PUSH(C) -OLD_CFLAGS="$CFLAGS" -CFLAGS="-Werror-implicit-function-declaration" -AC_COMPILE_IFELSE( - [AC_LANG_PROGRAM( - [[#include ]], - [[JS_SetOperationCallback(0, 0);]] - )], - AC_DEFINE([USE_JS_SETOPCB], [], [Use new JS_SetOperationCallback]) -) -CFLAGS="$OLD_CFLAGS" + +AC_CHECK_LIB([$JS_LIB_BASE], [JS_NewCompartmentAndGlobalObject], + AC_DEFINE([SM185], [1], + [Use SpiderMonkey 1.8.5])) + +AC_CHECK_LIB([$JS_LIB_BASE], [JS_ThrowStopIteration], + AC_DEFINE([SM180], [1], + [Use SpiderMonkey 1.8.0])) + +AC_CHECK_LIB([$JS_LIB_BASE], [JS_GetStringCharsAndLength], + AC_DEFINE([HAVE_JS_GET_STRING_CHARS_AND_LENGTH], [1], + [Use newer JS_GetCharsAndLength function.])) + AC_LANG_POP(C) AC_ARG_WITH([win32-icu-binaries], [AC_HELP_STRING([--with-win32-icu-binaries=PATH], @@ -228,10 +233,10 @@ case "$(uname -s)" in CPPFLAGS="-D_XOPEN_SOURCE $CPPFLAGS" ;; FreeBSD) - LIBS="$LIBS -lcrypt" + LIBS="$LIBS -lm -lcrypt" ;; OpenBSD) - LIBS="$LIBS -lcrypto" + LIBS="$LIBS -lm -lcrypto" ;; esac diff --git a/share/server/mimeparse.js b/share/server/mimeparse.js index 3642a194..42b600fa 100644 --- a/share/server/mimeparse.js +++ b/share/server/mimeparse.js @@ -97,7 +97,7 @@ var Mimeparse = (function() { if ((type == targetType || type == "*" || targetType == "*") && (subtype == targetSubtype || subtype == "*" || targetSubtype == "*")) { var matchCount = 0; - for (param in targetParams) { + for (var param in targetParams) { if (param != 'q' && params[param] && params[param] == targetParams[param]) { matchCount += 1; } diff --git a/share/www/script/couch_test_runner.js b/share/www/script/couch_test_runner.js index 55a6533f..e14640b6 100644 --- a/share/www/script/couch_test_runner.js +++ b/share/www/script/couch_test_runner.js @@ -414,9 +414,22 @@ function waitForSuccess(fun, tag) { function waitForRestart() { var waiting = true; - while (waiting) { + // Wait for the server to go down but don't + // wait too long because we might miss the + // unavailable period. + var count = 25; + while (waiting && count > 0) { + count--; try { CouchDB.request("GET", "/"); + } catch(e) { + waiting = false; + } + } + // Wait for it to come back up + waiting = true; + while (waiting) { + try { CouchDB.request("GET", "/"); waiting = false; } catch(e) { diff --git a/src/couchdb/priv/Makefile.am b/src/couchdb/priv/Makefile.am index b36d828d..3d11e663 100644 --- a/src/couchdb/priv/Makefile.am +++ b/src/couchdb/priv/Makefile.am @@ -16,7 +16,10 @@ couchprivlibdir = $(couchlibdir)/priv/lib EXTRA_DIST = \ spawnkillable/couchspawnkillable.sh \ - stat_descriptions.cfg.in + stat_descriptions.cfg.in \ + couch_js/sm170.c \ + couch_js/sm180.c \ + couch_js/sm185.c CLEANFILES = stat_descriptions.cfg @@ -42,12 +45,14 @@ COUCHJS_SRCS = \ couch_js/http.h \ couch_js/main.c \ couch_js/utf8.c \ - couch_js/utf8.h + couch_js/utf8.h \ + couch_js/util.h \ + couch_js/util.c locallibbin_PROGRAMS = couchjs couchjs_SOURCES = $(COUCHJS_SRCS) couchjs_LDFLAGS = $(CURL_LDFLAGS) -couchjs_CFLAGS = -D_BSD_SOURCE $(CURL_CFLAGS) +couchjs_CFLAGS = -g -Wall -Werror -D_BSD_SOURCE $(CURL_CFLAGS) couchjs_LDADD = $(CURL_LDFLAGS) @JSLIB@ couchpriv_DATA = stat_descriptions.cfg diff --git a/src/couchdb/priv/couch_js/http.c b/src/couchdb/priv/couch_js/http.c index 6c2a8a82..77078e35 100644 --- a/src/couchdb/priv/couch_js/http.c +++ b/src/couchdb/priv/couch_js/http.c @@ -14,19 +14,31 @@ #include #include #include +#include "config.h" +#include "utf8.h" + + #include -#include "utf8.h" -#ifdef XP_WIN +void +http_check_enabled() +{ + return; +} + + // Map some of the string function names to things which exist on Windows +#ifdef XP_WIN #define strcasecmp _strcmpi #define strncasecmp _strnicmp #define snprintf _snprintf #endif + typedef struct curl_slist CurlHeaders; + typedef struct { int method; char* url; @@ -34,8 +46,10 @@ typedef struct { jsint last_status; } HTTPData; + char* METHODS[] = {"GET", "HEAD", "POST", "PUT", "DELETE", "COPY", NULL}; + #define GET 0 #define HEAD 1 #define POST 2 @@ -43,14 +57,17 @@ char* METHODS[] = {"GET", "HEAD", "POST", "PUT", "DELETE", "COPY", NULL}; #define DELETE 4 #define COPY 5 + static JSBool go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t blen); + static JSString* str_from_binary(JSContext* cx, char* data, size_t length); -static JSBool -constructor(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) + +JSBool +http_ctor(JSContext* cx, JSObject* req) { HTTPData* http = NULL; JSBool ret = JS_FALSE; @@ -67,12 +84,12 @@ constructor(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) http->req_headers = NULL; http->last_status = -1; - if(!JS_SetPrivate(cx, obj, http)) + if(!JS_SetPrivate(cx, req, http)) { JS_ReportError(cx, "Failed to set private CouchHTTP data."); goto error; } - + ret = JS_TRUE; goto success; @@ -83,90 +100,76 @@ success: return ret; } -static void -destructor(JSContext* cx, JSObject* obj) + +void +http_dtor(JSContext* cx, JSObject* obj) { HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj); - if(!http) - { - fprintf(stderr, "Unable to destroy invalid CouchHTTP instance.\n"); - } - else - { + if(http) { if(http->url) free(http->url); if(http->req_headers) curl_slist_free_all(http->req_headers); free(http); } } -static JSBool -open(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) -{ - HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj); + +JSBool +http_open(JSContext* cx, JSObject* req, jsval mth, jsval url, jsval snc) +{ + HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req); char* method = NULL; - char* url = NULL; - JSBool ret = JS_FALSE; int methid; + JSBool ret = JS_FALSE; - if(!http) - { + if(!http) { JS_ReportError(cx, "Invalid CouchHTTP instance."); goto done; } - if(argv[0] == JSVAL_VOID) - { + if(mth == JSVAL_VOID) { JS_ReportError(cx, "You must specify a method."); goto done; } - method = enc_string(cx, argv[0], NULL); - if(!method) - { + method = enc_string(cx, mth, NULL); + if(!method) { JS_ReportError(cx, "Failed to encode method."); goto done; } - for(methid = 0; METHODS[methid] != NULL; methid++) - { + for(methid = 0; METHODS[methid] != NULL; methid++) { if(strcasecmp(METHODS[methid], method) == 0) break; } - if(methid > COPY) - { + if(methid > COPY) { JS_ReportError(cx, "Invalid method specified."); goto done; } http->method = methid; - if(argv[1] == JSVAL_VOID) - { + if(url == JSVAL_VOID) { JS_ReportError(cx, "You must specify a URL."); goto done; } - if(http->url) - { + if(http->url != NULL) { free(http->url); http->url = NULL; } - http->url = enc_string(cx, argv[1], NULL); - if(!http->url) - { + http->url = enc_string(cx, url, NULL); + if(http->url == NULL) { JS_ReportError(cx, "Failed to encode URL."); goto done; } - if(argv[2] != JSVAL_VOID && argv[2] != JSVAL_FALSE) - { - JS_ReportError(cx, "Synchronous flag must be false if specified."); + if(snc != JSVAL_FALSE) { + JS_ReportError(cx, "Synchronous flag must be false."); goto done; } - if(http->req_headers) - { + if(http->req_headers) { curl_slist_free_all(http->req_headers); http->req_headers = NULL; } @@ -181,42 +184,42 @@ done: return ret; } -static JSBool -setheader(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) -{ - HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj); + +JSBool +http_set_hdr(JSContext* cx, JSObject* req, jsval name, jsval val) +{ + HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req); char* keystr = NULL; char* valstr = NULL; char* hdrbuf = NULL; size_t hdrlen = -1; JSBool ret = JS_FALSE; - if(!http) - { + if(!http) { JS_ReportError(cx, "Invalid CouchHTTP instance."); goto done; } - if(argv[0] == JSVAL_VOID) + if(name == JSVAL_VOID) { JS_ReportError(cx, "You must speciy a header name."); goto done; } - keystr = enc_string(cx, argv[0], NULL); + keystr = enc_string(cx, name, NULL); if(!keystr) { JS_ReportError(cx, "Failed to encode header name."); goto done; } - if(argv[1] == JSVAL_VOID) + if(val == JSVAL_VOID) { JS_ReportError(cx, "You must specify a header value."); goto done; } - valstr = enc_string(cx, argv[1], NULL); + valstr = enc_string(cx, val, NULL); if(!valstr) { JS_ReportError(cx, "Failed to encode header value."); @@ -225,8 +228,7 @@ setheader(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) hdrlen = strlen(keystr) + strlen(valstr) + 3; hdrbuf = (char*) malloc(hdrlen * sizeof(char)); - if(!hdrbuf) - { + if(!hdrbuf) { JS_ReportError(cx, "Failed to allocate header buffer."); goto done; } @@ -240,121 +242,50 @@ done: if(keystr) free(keystr); if(valstr) free(valstr); if(hdrbuf) free(hdrbuf); - return ret; } -static JSBool -sendreq(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) +JSBool +http_send(JSContext* cx, JSObject* req, jsval body) { - HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj); - char* body = NULL; + HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req); + char* bodystr = NULL; size_t bodylen = 0; JSBool ret = JS_FALSE; - if(!http) - { + if(!http) { JS_ReportError(cx, "Invalid CouchHTTP instance."); goto done; } - if(argv[0] != JSVAL_VOID && argv[0] != JS_GetEmptyStringValue(cx)) - { - body = enc_string(cx, argv[0], &bodylen); - if(!body) - { + if(body != JSVAL_VOID && body != JS_GetEmptyStringValue(cx)) { + bodystr = enc_string(cx, body, &bodylen); + if(!bodystr) { JS_ReportError(cx, "Failed to encode body."); goto done; } } - ret = go(cx, obj, http, body, bodylen); + ret = go(cx, req, http, bodystr, bodylen); done: - if(body) free(body); + if(bodystr) free(bodystr); return ret; } -static JSBool -status(JSContext* cx, JSObject* obj, jsval idval, jsval* vp) +int +http_status(JSContext* cx, JSObject* req) { - HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj); + HTTPData* http = (HTTPData*) JS_GetPrivate(cx, req); - if(!http) - { + if(!http) { JS_ReportError(cx, "Invalid CouchHTTP instance."); return JS_FALSE; } - - if(INT_FITS_IN_JSVAL(http->last_status)) - { - *vp = INT_TO_JSVAL(http->last_status); - return JS_TRUE; - } - else - { - JS_ReportError(cx, "INTERNAL: Invalid last_status"); - return JS_FALSE; - } -} - -JSClass CouchHTTPClass = { - "CouchHTTP", - JSCLASS_HAS_PRIVATE - | JSCLASS_CONSTRUCT_PROTOTYPE - | JSCLASS_HAS_RESERVED_SLOTS(2), - JS_PropertyStub, - JS_PropertyStub, - JS_PropertyStub, - JS_PropertyStub, - JS_EnumerateStub, - JS_ResolveStub, - JS_ConvertStub, - destructor, - JSCLASS_NO_OPTIONAL_MEMBERS -}; - -JSPropertySpec CouchHTTPProperties[] = { - {"status", 0, JSPROP_READONLY, status, NULL}, - {0, 0, 0, 0, 0} -}; - -JSFunctionSpec CouchHTTPFunctions[] = { - {"_open", open, 3, 0, 0}, - {"_setRequestHeader", setheader, 2, 0, 0}, - {"_send", sendreq, 1, 0, 0}, - {0, 0, 0, 0, 0} -}; - -JSObject* -install_http(JSContext* cx, JSObject* glbl) -{ - JSObject* klass = NULL; - HTTPData* http = NULL; - klass = JS_InitClass( - cx, - glbl, - NULL, - &CouchHTTPClass, - constructor, - 0, - CouchHTTPProperties, - CouchHTTPFunctions, - NULL, - NULL - ); - - if(!klass) - { - fprintf(stderr, "Failed to initialize CouchHTTP class.\n"); - return NULL; - } - - return klass; + return http->last_status; } - // Curl Helpers typedef struct { @@ -364,6 +295,7 @@ typedef struct { char* sendbuf; size_t sendlen; size_t sent; + int sent_once; char* recvbuf; size_t recvlen; size_t read; @@ -395,13 +327,13 @@ go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t bodylen) state.sendbuf = body; state.sendlen = bodylen; state.sent = 0; + state.sent_once = 0; state.recvbuf = NULL; state.recvlen = 0; state.read = 0; - if(HTTP_HANDLE == NULL) - { + if(HTTP_HANDLE == NULL) { HTTP_HANDLE = curl_easy_init(); curl_easy_setopt(HTTP_HANDLE, CURLOPT_READFUNCTION, send_body); curl_easy_setopt(HTTP_HANDLE, CURLOPT_SEEKFUNCTION, @@ -416,14 +348,12 @@ go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t bodylen) "CouchHTTP Client - Relax"); } - if(!HTTP_HANDLE) - { + if(!HTTP_HANDLE) { JS_ReportError(cx, "Failed to initialize cURL handle."); goto done; } - if(http->method < 0 || http->method > COPY) - { + if(http->method < 0 || http->method > COPY) { JS_ReportError(cx, "INTERNAL: Unknown method."); goto done; } @@ -433,27 +363,21 @@ go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t bodylen) curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(HTTP_HANDLE, CURLOPT_UPLOAD, 0); - if(http->method == HEAD) - { + if(http->method == HEAD) { curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOBODY, 1); curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 0); - } - else if(http->method == POST || http->method == PUT) - { + } else if(http->method == POST || http->method == PUT) { curl_easy_setopt(HTTP_HANDLE, CURLOPT_UPLOAD, 1); curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 0); } - if(body && bodylen) - { + if(body && bodylen) { curl_easy_setopt(HTTP_HANDLE, CURLOPT_INFILESIZE, bodylen); - } - else - { + } else { curl_easy_setopt(HTTP_HANDLE, CURLOPT_INFILESIZE, 0); } - //curl_easy_setopt(HTTP_HANDLE, CURLOPT_VERBOSE, 1); + // curl_easy_setopt(HTTP_HANDLE, CURLOPT_VERBOSE, 1); curl_easy_setopt(HTTP_HANDLE, CURLOPT_URL, http->url); curl_easy_setopt(HTTP_HANDLE, CURLOPT_HTTPHEADER, http->req_headers); @@ -462,39 +386,32 @@ go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t bodylen) curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEHEADER, &state); curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEDATA, &state); - if(curl_easy_perform(HTTP_HANDLE) != 0) - { + if(curl_easy_perform(HTTP_HANDLE) != 0) { JS_ReportError(cx, "Failed to execute HTTP request: %s", ERRBUF); goto done; } - if(!state.resp_headers) - { + if(!state.resp_headers) { JS_ReportError(cx, "Failed to recieve HTTP headers."); goto done; } tmp = OBJECT_TO_JSVAL(state.resp_headers); if(!JS_DefineProperty( - cx, - obj, + cx, obj, "_headers", tmp, - NULL, - NULL, + NULL, NULL, JSPROP_READONLY - )) - { + )) { JS_ReportError(cx, "INTERNAL: Failed to set response headers."); goto done; } - if(state.recvbuf) // Is good enough? - { + if(state.recvbuf) { state.recvbuf[state.read] = '\0'; jsbody = dec_string(cx, state.recvbuf, state.read+1); - if(!jsbody) - { + if(!jsbody) { // If we can't decode the body as UTF-8 we forcefully // convert it to a string by just forcing each byte // to a jschar. @@ -507,22 +424,17 @@ go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t bodylen) } } tmp = STRING_TO_JSVAL(jsbody); - } - else - { + } else { tmp = JS_GetEmptyStringValue(cx); } if(!JS_DefineProperty( - cx, - obj, + cx, obj, "responseText", tmp, - NULL, - NULL, + NULL, NULL, JSPROP_READONLY - )) - { + )) { JS_ReportError(cx, "INTERNAL: Failed to set responseText."); goto done; } @@ -540,15 +452,20 @@ send_body(void *ptr, size_t size, size_t nmem, void *data) CurlState* state = (CurlState*) data; size_t length = size * nmem; size_t towrite = state->sendlen - state->sent; - if(towrite == 0) - { + + // Assume this is cURL trying to resend a request that + // failed. + if(towrite == 0 && state->sent_once == 0) { + state->sent_once = 1; return 0; + } else if(towrite == 0) { + state->sent = 0; + state->sent_once = 0; + towrite = state->sendlen; } if(length < towrite) towrite = length; - //fprintf(stderr, "%lu %lu %lu %lu\n", state->bodyused, state->bodyread, length, towrite); - memcpy(ptr, state->sendbuf + state->sent, towrite); state->sent += towrite; @@ -572,15 +489,12 @@ recv_header(void *ptr, size_t size, size_t nmem, void *data) char code[4]; char* header = (char*) ptr; size_t length = size * nmem; - size_t index = 0; JSString* hdr = NULL; jsuint hdrlen; jsval hdrval; - if(length > 7 && strncasecmp(header, "HTTP/1.", 7) == 0) - { - if(length < 12) - { + if(length > 7 && strncasecmp(header, "HTTP/1.", 7) == 0) { + if(length < 12) { return CURLE_WRITE_ERROR; } @@ -589,8 +503,7 @@ recv_header(void *ptr, size_t size, size_t nmem, void *data) state->http->last_status = atoi(code); state->resp_headers = JS_NewArrayObject(state->cx, 0, NULL); - if(!state->resp_headers) - { + if(!state->resp_headers) { return CURLE_WRITE_ERROR; } @@ -598,26 +511,22 @@ recv_header(void *ptr, size_t size, size_t nmem, void *data) } // We get a notice at the \r\n\r\n after headers. - if(length <= 2) - { + if(length <= 2) { return length; } // Append the new header to our array. hdr = dec_string(state->cx, header, length); - if(!hdr) - { + if(!hdr) { return CURLE_WRITE_ERROR; } - if(!JS_GetArrayLength(state->cx, state->resp_headers, &hdrlen)) - { + if(!JS_GetArrayLength(state->cx, state->resp_headers, &hdrlen)) { return CURLE_WRITE_ERROR; } hdrval = STRING_TO_JSVAL(hdr); - if(!JS_SetElement(state->cx, state->resp_headers, hdrlen, &hdrval)) - { + if(!JS_SetElement(state->cx, state->resp_headers, hdrlen, &hdrval)) { return CURLE_WRITE_ERROR; } @@ -631,15 +540,13 @@ recv_body(void *ptr, size_t size, size_t nmem, void *data) size_t length = size * nmem; char* tmp = NULL; - if(!state->recvbuf) - { + if(!state->recvbuf) { state->recvlen = 4096; state->read = 0; state->recvbuf = JS_malloc(state->cx, state->recvlen); } - if(!state->recvbuf) - { + if(!state->recvbuf) { return CURLE_WRITE_ERROR; } @@ -663,8 +570,7 @@ str_from_binary(JSContext* cx, char* data, size_t length) if(!conv) return NULL; - for(i = 0; i < length; i++) - { + for(i = 0; i < length; i++) { conv[i] = (jschar) data[i]; } diff --git a/src/couchdb/priv/couch_js/http.h b/src/couchdb/priv/couch_js/http.h index b5f8c70f..373d1e48 100644 --- a/src/couchdb/priv/couch_js/http.h +++ b/src/couchdb/priv/couch_js/http.h @@ -13,6 +13,12 @@ #ifndef COUCH_JS_HTTP_H #define COUCH_JS_HTTP_H -JSObject* install_http(JSContext* cx, JSObject* global); +void http_check_enabled(); +JSBool http_ctor(JSContext* cx, JSObject* req); +void http_dtor(JSContext* cx, JSObject* req); +JSBool http_open(JSContext* cx, JSObject* req, jsval mth, jsval url, jsval snc); +JSBool http_set_hdr(JSContext* cx, JSObject* req, jsval name, jsval val); +JSBool http_send(JSContext* cx, JSObject* req, jsval body); +int http_status(JSContext* cx, JSObject* req); -#endif \ No newline at end of file +#endif diff --git a/src/couchdb/priv/couch_js/main.c b/src/couchdb/priv/couch_js/main.c index 376aa15b..209bb023 100644 --- a/src/couchdb/priv/couch_js/main.c +++ b/src/couchdb/priv/couch_js/main.c @@ -10,329 +10,12 @@ // License for the specific language governing permissions and limitations under // the License. -#include -#include -#include -#include #include "config.h" -#include "utf8.h" -#include "http.h" - -int gExitCode = 0; - -#ifdef JS_THREADSAFE -#define SETUP_REQUEST(cx) \ - JS_SetContextThread(cx); \ - JS_BeginRequest(cx); -#define FINISH_REQUEST(cx) \ - JS_EndRequest(cx); \ - JS_ClearContextThread(cx); +#if defined(SM185) +#include "sm185.c" +#elif defined(SM180) +#include "sm180.c" #else -#define SETUP_REQUEST(cx) -#define FINISH_REQUEST(cx) +#include "sm170.c" #endif - -static JSBool -evalcx(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) -{ - JSString *str; - JSObject *sandbox; - JSContext *subcx; - const jschar *src; - size_t srclen; - JSBool ret = JS_FALSE; - jsval v; - - sandbox = NULL; - if(!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sandbox)) - { - return JS_FALSE; - } - - subcx = JS_NewContext(JS_GetRuntime(cx), 8L * 1024L); - if(!subcx) - { - JS_ReportOutOfMemory(cx); - return JS_FALSE; - } - - SETUP_REQUEST(subcx); - - src = JS_GetStringChars(str); - srclen = JS_GetStringLength(str); - - if(!sandbox) - { - sandbox = JS_NewObject(subcx, NULL, NULL, NULL); - if(!sandbox || !JS_InitStandardClasses(subcx, sandbox)) goto done; - } - - if(srclen == 0) - { - *rval = OBJECT_TO_JSVAL(sandbox); - } - else - { - JS_EvaluateUCScript(subcx, sandbox, src, srclen, NULL, 0, rval); - } - - ret = JS_TRUE; - -done: - FINISH_REQUEST(subcx); - JS_DestroyContext(subcx); - return ret; -} - -static JSBool -gc(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) -{ - JS_GC(cx); - return JS_TRUE; -} - -static JSBool -print(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) -{ - uintN i; - char *bytes; - - for(i = 0; i < argc; i++) - { - bytes = enc_string(cx, argv[i], NULL); - if(!bytes) return JS_FALSE; - - fprintf(stdout, "%s%s", i ? " " : "", bytes); - JS_free(cx, bytes); - } - - fputc('\n', stdout); - fflush(stdout); - return JS_TRUE; -} - -static JSBool -quit(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) -{ - JS_ConvertArguments(cx, argc, argv, "/ i", &gExitCode); - return JS_FALSE; -} - -static char* -readfp(JSContext* cx, FILE* fp, size_t* buflen) -{ - char* bytes = NULL; - char* tmp = NULL; - size_t used = 0; - size_t byteslen = 256; - size_t readlen = 0; - - bytes = JS_malloc(cx, byteslen); - if(bytes == NULL) return NULL; - - while((readlen = js_fgets(bytes+used, byteslen-used, stdin)) > 0) - { - used += readlen; - - if(bytes[used-1] == '\n') - { - bytes[used-1] = '\0'; - break; - } - - // Double our buffer and read more. - byteslen *= 2; - tmp = JS_realloc(cx, bytes, byteslen); - if(!tmp) - { - JS_free(cx, bytes); - return NULL; - } - bytes = tmp; - } - - *buflen = used; - return bytes; -} - -static JSBool -readline(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) { - jschar *chars; - JSString *str; - char* bytes; - char* tmp; - size_t byteslen; - - /* GC Occasionally */ - JS_MaybeGC(cx); - - bytes = readfp(cx, stdin, &byteslen); - if(!bytes) return JS_FALSE; - - /* Treat the empty string specially */ - if(byteslen == 0) - { - *rval = JS_GetEmptyStringValue(cx); - JS_free(cx, bytes); - return JS_TRUE; - } - - /* Shrink the buffer to the real size */ - tmp = JS_realloc(cx, bytes, byteslen); - if(!tmp) - { - JS_free(cx, bytes); - return JS_FALSE; - } - bytes = tmp; - - str = dec_string(cx, bytes, byteslen); - JS_free(cx, bytes); - - if(!str) return JS_FALSE; - - *rval = STRING_TO_JSVAL(str); - - return JS_TRUE; -} - -static JSBool -seal(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) { - JSObject *target; - JSBool deep = JS_FALSE; - - if (!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep)) - return JS_FALSE; - if (!target) - return JS_TRUE; - return JS_SealObject(cx, target, deep); -} - -static void -execute_script(JSContext *cx, JSObject *obj, const char *filename) { - FILE *file; - JSScript *script; - jsval result; - - if(!filename || strcmp(filename, "-") == 0) - { - file = stdin; - } - else - { - file = fopen(filename, "r"); - if (!file) - { - fprintf(stderr, "could not open script file %s\n", filename); - gExitCode = 1; - return; - } - } - - script = JS_CompileFileHandle(cx, obj, filename, file); - if(script) - { - JS_ExecuteScript(cx, obj, script, &result); - JS_DestroyScript(cx, script); - } -} - -static void -printerror(JSContext *cx, const char *mesg, JSErrorReport *report) -{ - if(!report || !JSREPORT_IS_WARNING(report->flags)) - { - fprintf(stderr, "%s\n", mesg); - } -} - -static JSFunctionSpec global_functions[] = { - {"evalcx", evalcx, 0, 0, 0}, - {"gc", gc, 0, 0, 0}, - {"print", print, 0, 0, 0}, - {"quit", quit, 0, 0, 0}, - {"readline", readline, 0, 0, 0}, - {"seal", seal, 0, 0, 0}, - {0, 0, 0, 0, 0} -}; - -static JSClass global_class = { - "GlobalClass", - JSCLASS_GLOBAL_FLAGS, - JS_PropertyStub, - JS_PropertyStub, - JS_PropertyStub, - JS_PropertyStub, - JS_EnumerateStub, - JS_ResolveStub, - JS_ConvertStub, - JS_FinalizeStub, - JSCLASS_NO_OPTIONAL_MEMBERS -}; - -int -main(int argc, const char * argv[]) -{ - JSRuntime* rt = NULL; - JSContext* cx = NULL; - JSObject* global = NULL; - JSFunctionSpec* sp = NULL; - int i = 0; - - rt = JS_NewRuntime(64L * 1024L * 1024L); - if (!rt) return 1; - - cx = JS_NewContext(rt, 8L * 1024L); - if (!cx) return 1; - - JS_SetErrorReporter(cx, printerror); - JS_ToggleOptions(cx, JSOPTION_XML); - - SETUP_REQUEST(cx); - - global = JS_NewObject(cx, &global_class, NULL, NULL); - if (!global) return 1; - if (!JS_InitStandardClasses(cx, global)) return 1; - - for(sp = global_functions; sp->name != NULL; sp++) - { - if(!JS_DefineFunction(cx, global, - sp->name, sp->call, sp->nargs, sp->flags)) - { - fprintf(stderr, "Failed to create function: %s\n", sp->name); - return 1; - } - } - - if(!install_http(cx, global)) - { - return 1; - } - - JS_SetGlobalObject(cx, global); - - if(argc > 2) - { - fprintf(stderr, "incorrect number of arguments\n\n"); - fprintf(stderr, "usage: %s \n", argv[0]); - return 2; - } - - if(argc == 0) - { - execute_script(cx, global, NULL); - } - else - { - execute_script(cx, global, argv[1]); - } - - FINISH_REQUEST(cx); - - JS_DestroyContext(cx); - JS_DestroyRuntime(rt); - JS_ShutDown(); - - return gExitCode; -} diff --git a/src/couchdb/priv/couch_js/utf8.c b/src/couchdb/priv/couch_js/utf8.c index 699a6fee..d6064267 100644 --- a/src/couchdb/priv/couch_js/utf8.c +++ b/src/couchdb/priv/couch_js/utf8.c @@ -11,6 +11,7 @@ // the License. #include +#include "config.h" static int enc_char(uint8 *utf8Buffer, uint32 ucs4Char) @@ -121,7 +122,7 @@ char* enc_string(JSContext* cx, jsval arg, size_t* buflen) { JSString* str = NULL; - jschar* src = NULL; + const jschar* src = NULL; char* bytes = NULL; size_t srclen = 0; size_t byteslen = 0; @@ -129,8 +130,12 @@ enc_string(JSContext* cx, jsval arg, size_t* buflen) str = JS_ValueToString(cx, arg); if(!str) goto error; +#ifdef HAVE_JS_GET_STRING_CHARS_AND_LENGTH + src = JS_GetStringCharsAndLength(cx, str, &srclen); +#else src = JS_GetStringChars(str); srclen = JS_GetStringLength(str); +#endif if(!enc_charbuf(src, srclen, NULL, &byteslen)) goto error; @@ -283,4 +288,4 @@ error: success: return str; -} \ No newline at end of file +} diff --git a/test/javascript/run.tpl b/test/javascript/run.tpl index c5abe6e7..1389a4f9 100644 --- a/test/javascript/run.tpl +++ b/test/javascript/run.tpl @@ -27,4 +27,4 @@ cat $SCRIPT_DIR/json2.js \ $SCRIPT_DIR/test/*.js \ $JS_TEST_DIR/couch_http.js \ $JS_TEST_DIR/cli_runner.js \ - | $COUCHJS - + | $COUCHJS --http - -- cgit v1.2.3 From 6cf2f036ece28c95988b8312c8532fac55a3c7a0 Mon Sep 17 00:00:00 2001 From: Paul Joseph Davis Date: Thu, 8 Sep 2011 05:05:19 +0000 Subject: And the rest of the commit/ SVN != Git. Forgot to check svn status before committing. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1166526 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/priv/couch_js/sm170.c | 378 +++++++++++++++++++++++++++++++++++ src/couchdb/priv/couch_js/sm180.c | 387 ++++++++++++++++++++++++++++++++++++ src/couchdb/priv/couch_js/sm185.c | 401 ++++++++++++++++++++++++++++++++++++++ src/couchdb/priv/couch_js/util.c | 237 ++++++++++++++++++++++ src/couchdb/priv/couch_js/util.h | 34 ++++ 5 files changed, 1437 insertions(+) create mode 100644 src/couchdb/priv/couch_js/sm170.c create mode 100644 src/couchdb/priv/couch_js/sm180.c create mode 100644 src/couchdb/priv/couch_js/sm185.c create mode 100644 src/couchdb/priv/couch_js/util.c create mode 100644 src/couchdb/priv/couch_js/util.h diff --git a/src/couchdb/priv/couch_js/sm170.c b/src/couchdb/priv/couch_js/sm170.c new file mode 100644 index 00000000..ebb6673f --- /dev/null +++ b/src/couchdb/priv/couch_js/sm170.c @@ -0,0 +1,378 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +#include +#include +#include + +#include +#include "http.h" +#include "utf8.h" +#include "util.h" + + +#ifdef JS_THREADSAFE +#define SETUP_REQUEST(cx) \ + JS_SetContextThread(cx); \ + JS_BeginRequest(cx); +#define FINISH_REQUEST(cx) \ + JS_EndRequest(cx); \ + JS_ClearContextThread(cx); +#else +#define SETUP_REQUEST(cx) +#define FINISH_REQUEST(cx) +#endif + + +static JSBool +req_ctor(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) +{ + return http_ctor(cx, obj); +} + + +static void +req_dtor(JSContext* cx, JSObject* obj) +{ + http_dtor(cx, obj); +} + + +static JSBool +req_open(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) +{ + JSBool ret = JS_FALSE; + + if(argc == 2) { + ret = http_open(cx, obj, argv[0], argv[1], JSVAL_FALSE); + } else if(argc == 3) { + ret = http_open(cx, obj, argv[0], argv[1], argv[2]); + } else { + JS_ReportError(cx, "Invalid call to CouchHTTP.open"); + } + + *rval = JSVAL_VOID; + return ret; +} + + +static JSBool +req_set_hdr(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) +{ + JSBool ret = JS_FALSE; + if(argc == 2) { + ret = http_set_hdr(cx, obj, argv[0], argv[1]); + } else { + JS_ReportError(cx, "Invalid call to CouchHTTP.set_header"); + } + + *rval = JSVAL_VOID; + return ret; +} + + +static JSBool +req_send(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) +{ + JSBool ret = JS_FALSE; + if(argc == 1) { + ret = http_send(cx, obj, argv[0]); + } else { + JS_ReportError(cx, "Invalid call to CouchHTTP.send"); + } + + *rval = JSVAL_VOID; + return ret; +} + + +static JSBool +req_status(JSContext* cx, JSObject* obj, jsval idval, jsval* rval) +{ + int status = http_status(cx, obj); + if(status < 0) + return JS_FALSE; + + if(INT_FITS_IN_JSVAL(status)) { + *rval = INT_TO_JSVAL(status); + return JS_TRUE; + } else { + JS_ReportError(cx, "Invalid HTTP status."); + return JS_FALSE; + } +} + + +static JSBool +evalcx(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) +{ + JSString *str; + JSObject *sandbox; + JSContext *subcx; + const jschar *src; + size_t srclen; + JSBool ret = JS_FALSE; + + sandbox = NULL; + if(!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sandbox)) { + return JS_FALSE; + } + + subcx = JS_NewContext(JS_GetRuntime(cx), 8L * 1024L); + if(!subcx) { + JS_ReportOutOfMemory(cx); + return JS_FALSE; + } + + SETUP_REQUEST(subcx); + + src = JS_GetStringChars(str); + srclen = JS_GetStringLength(str); + + if(!sandbox) { + sandbox = JS_NewObject(subcx, NULL, NULL, NULL); + if(!sandbox || !JS_InitStandardClasses(subcx, sandbox)) { + goto done; + } + } + + if(srclen == 0) { + *rval = OBJECT_TO_JSVAL(sandbox); + } else { + JS_EvaluateUCScript(subcx, sandbox, src, srclen, NULL, 0, rval); + } + + ret = JS_TRUE; + +done: + FINISH_REQUEST(subcx); + JS_DestroyContext(subcx); + return ret; +} + + +static JSBool +gc(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) +{ + JS_GC(cx); + *rval = JSVAL_VOID; + return JS_TRUE; +} + + +static JSBool +print(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) +{ + couch_print(cx, argc, argv); + *rval = JSVAL_VOID; + return JS_TRUE; +} + + +static JSBool +quit(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) +{ + int exit_code = 0; + JS_ConvertArguments(cx, argc, argv, "/i", &exit_code); + exit(exit_code); +} + + +static JSBool +readline(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) +{ + JSString* line; + + /* GC Occasionally */ + JS_MaybeGC(cx); + + line = couch_readline(cx, stdin); + if(line == NULL) return JS_FALSE; + + *rval = STRING_TO_JSVAL(line); + return JS_TRUE; +} + + +static JSBool +seal(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) +{ + JSObject *target; + JSBool deep = JS_FALSE; + + if(!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep)) + return JS_FALSE; + + if(!target) { + *rval = JSVAL_VOID; + return JS_TRUE; + } + + if(JS_SealObject(cx, obj, deep) != JS_TRUE) + return JS_FALSE; + + *rval = JSVAL_VOID; + return JS_TRUE; +} + + +JSClass CouchHTTPClass = { + "CouchHTTP", + JSCLASS_HAS_PRIVATE + | JSCLASS_CONSTRUCT_PROTOTYPE + | JSCLASS_HAS_RESERVED_SLOTS(2), + JS_PropertyStub, + JS_PropertyStub, + JS_PropertyStub, + JS_PropertyStub, + JS_EnumerateStub, + JS_ResolveStub, + JS_ConvertStub, + req_dtor, + JSCLASS_NO_OPTIONAL_MEMBERS +}; + + +JSPropertySpec CouchHTTPProperties[] = { + {"status", 0, JSPROP_READONLY, req_status, NULL}, + {0, 0, 0, 0, 0} +}; + + +JSFunctionSpec CouchHTTPFunctions[] = { + {"_open", req_open, 3, 0, 0}, + {"_setRequestHeader", req_set_hdr, 2, 0, 0}, + {"_send", req_send, 1, 0, 0}, + {0, 0, 0, 0, 0} +}; + + +static JSClass global_class = { + "GlobalClass", + JSCLASS_GLOBAL_FLAGS, + JS_PropertyStub, + JS_PropertyStub, + JS_PropertyStub, + JS_PropertyStub, + JS_EnumerateStub, + JS_ResolveStub, + JS_ConvertStub, + JS_FinalizeStub, + JSCLASS_NO_OPTIONAL_MEMBERS +}; + + +static JSFunctionSpec global_functions[] = { + {"evalcx", evalcx, 0, 0, 0}, + {"gc", gc, 0, 0, 0}, + {"print", print, 0, 0, 0}, + {"quit", quit, 0, 0, 0}, + {"readline", readline, 0, 0, 0}, + {"seal", seal, 0, 0, 0}, + {0, 0, 0, 0, 0} +}; + + +int +main(int argc, const char* argv[]) +{ + JSRuntime* rt = NULL; + JSContext* cx = NULL; + JSObject* global = NULL; + JSObject* klass = NULL; + JSScript* script; + JSString* scriptsrc; + jschar* schars; + size_t slen; + jsval sroot; + jsval result; + + couch_args* args = couch_parse_args(argc, argv); + + rt = JS_NewRuntime(64L * 1024L * 1024L); + if(rt == NULL) + return 1; + + cx = JS_NewContext(rt, args->stack_size); + if(cx == NULL) + return 1; + + JS_SetErrorReporter(cx, couch_error); + JS_ToggleOptions(cx, JSOPTION_XML); + + SETUP_REQUEST(cx); + + global = JS_NewObject(cx, &global_class, NULL, NULL); + if(global == NULL) + return 1; + + JS_SetGlobalObject(cx, global); + + if(!JS_InitStandardClasses(cx, global)) + return 1; + + if(couch_load_funcs(cx, global, global_functions) != JS_TRUE) + return 1; + + if(args->use_http) { + http_check_enabled(); + + klass = JS_InitClass( + cx, global, + NULL, + &CouchHTTPClass, req_ctor, + 0, + CouchHTTPProperties, CouchHTTPFunctions, + NULL, NULL + ); + + if(!klass) + { + fprintf(stderr, "Failed to initialize CouchHTTP class.\n"); + exit(2); + } + } + + // Convert script source to jschars. + scriptsrc = dec_string(cx, args->script, strlen(args->script)); + if(!scriptsrc) + return 1; + + schars = JS_GetStringChars(scriptsrc); + slen = JS_GetStringLength(scriptsrc); + + // Root it so GC doesn't collect it. + sroot = STRING_TO_JSVAL(scriptsrc); + if(JS_AddRoot(cx, &sroot) != JS_TRUE) { + fprintf(stderr, "Internal root error.\n"); + return 1; + } + + // Compile and run + script = JS_CompileUCScript(cx, global, schars, slen, args->script_name, 1); + if(!script) { + fprintf(stderr, "Failed to compile script.\n"); + return 1; + } + + JS_ExecuteScript(cx, global, script, &result); + + // Warning message if we don't remove it. + JS_RemoveRoot(cx, &sroot); + + FINISH_REQUEST(cx); + JS_DestroyContext(cx); + JS_DestroyRuntime(rt); + JS_ShutDown(); + + return 0; +} diff --git a/src/couchdb/priv/couch_js/sm180.c b/src/couchdb/priv/couch_js/sm180.c new file mode 100644 index 00000000..dee16a78 --- /dev/null +++ b/src/couchdb/priv/couch_js/sm180.c @@ -0,0 +1,387 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +#include +#include +#include + +#include +#include "http.h" +#include "utf8.h" +#include "util.h" + + +#define SETUP_REQUEST(cx) \ + JS_SetContextThread(cx); \ + JS_BeginRequest(cx); +#define FINISH_REQUEST(cx) \ + JS_EndRequest(cx); \ + JS_ClearContextThread(cx); + + +static JSBool +req_ctor(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval) +{ + return http_ctor(cx, obj); +} + + +static void +req_dtor(JSContext* cx, JSObject* obj) +{ + http_dtor(cx, obj); +} + + +static JSBool +req_open(JSContext* cx, uintN argc, jsval* vp) +{ + JSObject* obj = JS_THIS_OBJECT(cx, vp); + jsval* argv = JS_ARGV(cx, vp); + JSBool ret = JS_FALSE; + + if(argc == 2) { + ret = http_open(cx, obj, argv[0], argv[1], JSVAL_FALSE); + } else if(argc == 3) { + ret = http_open(cx, obj, argv[0], argv[1], argv[2]); + } else { + JS_ReportError(cx, "Invalid call to CouchHTTP.open"); + } + + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return ret; +} + + +static JSBool +req_set_hdr(JSContext* cx, uintN argc, jsval* vp) +{ + JSObject* obj = JS_THIS_OBJECT(cx, vp); + jsval* argv = JS_ARGV(cx, vp); + JSBool ret = JS_FALSE; + + if(argc == 2) { + ret = http_set_hdr(cx, obj, argv[0], argv[1]); + } else { + JS_ReportError(cx, "Invalid call to CouchHTTP.set_header"); + } + + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return ret; +} + + +static JSBool +req_send(JSContext* cx, uintN argc, jsval* vp) +{ + JSObject* obj = JS_THIS_OBJECT(cx, vp); + jsval* argv = JS_ARGV(cx, vp); + JSBool ret = JS_FALSE; + + if(argc == 1) { + ret = http_send(cx, obj, argv[0]); + } else { + JS_ReportError(cx, "Invalid call to CouchHTTP.send"); + } + + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return ret; +} + + +static JSBool +req_status(JSContext* cx, JSObject* obj, jsval idval, jsval* vp) +{ + int status = http_status(cx, obj); + if(status < 0) + return JS_FALSE; + + if(INT_FITS_IN_JSVAL(status)) { + JS_SET_RVAL(cx, vp, INT_TO_JSVAL(status)); + return JS_TRUE; + } else { + JS_ReportError(cx, "Invalid HTTP status."); + return JS_FALSE; + } +} + + +static JSBool +evalcx(JSContext *cx, uintN argc, jsval* vp) +{ + jsval* argv = JS_ARGV(cx, vp); + JSString *str; + JSObject *sandbox; + JSContext *subcx; + const jschar *src; + size_t srclen; + jsval rval; + JSBool ret = JS_FALSE; + + sandbox = NULL; + if(!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sandbox)) { + return JS_FALSE; + } + + subcx = JS_NewContext(JS_GetRuntime(cx), 8L * 1024L); + if(!subcx) { + JS_ReportOutOfMemory(cx); + return JS_FALSE; + } + + SETUP_REQUEST(subcx); + + src = JS_GetStringChars(str); + srclen = JS_GetStringLength(str); + + if(!sandbox) { + sandbox = JS_NewObject(subcx, NULL, NULL, NULL); + if(!sandbox || !JS_InitStandardClasses(subcx, sandbox)) { + goto done; + } + } + + if(srclen == 0) { + JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(sandbox)); + } else { + JS_EvaluateUCScript(subcx, sandbox, src, srclen, NULL, 0, &rval); + JS_SET_RVAL(cx, vp, rval); + } + + ret = JS_TRUE; + +done: + FINISH_REQUEST(subcx); + JS_DestroyContext(subcx); + return ret; +} + + +static JSBool +gc(JSContext* cx, uintN argc, jsval* vp) +{ + JS_GC(cx); + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return JS_TRUE; +} + + +static JSBool +print(JSContext* cx, uintN argc, jsval* vp) +{ + jsval* argv = JS_ARGV(cx, vp); + couch_print(cx, argc, argv); + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return JS_TRUE; +} + + +static JSBool +quit(JSContext* cx, uintN argc, jsval* vp) +{ + jsval* argv = JS_ARGV(cx, vp); + int exit_code = 0; + JS_ConvertArguments(cx, argc, argv, "/i", &exit_code); + exit(exit_code); +} + + +static JSBool +readline(JSContext* cx, uintN argc, jsval* vp) +{ + JSString* line; + + /* GC Occasionally */ + JS_MaybeGC(cx); + + line = couch_readline(cx, stdin); + if(line == NULL) return JS_FALSE; + + JS_SET_RVAL(cx, vp, STRING_TO_JSVAL(line)); + return JS_TRUE; +} + + +static JSBool +seal(JSContext* cx, uintN argc, jsval* vp) +{ + jsval* argv = JS_ARGV(cx, vp); + JSObject *target; + JSBool deep = JS_FALSE; + + if(!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep)) + return JS_FALSE; + + if(!target) { + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return JS_TRUE; + } + + if(JS_SealObject(cx, target, deep) != JS_TRUE) + return JS_FALSE; + + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return JS_TRUE; +} + + +JSClass CouchHTTPClass = { + "CouchHTTP", + JSCLASS_HAS_PRIVATE + | JSCLASS_CONSTRUCT_PROTOTYPE + | JSCLASS_HAS_RESERVED_SLOTS(2), + JS_PropertyStub, + JS_PropertyStub, + JS_PropertyStub, + JS_PropertyStub, + JS_EnumerateStub, + JS_ResolveStub, + JS_ConvertStub, + req_dtor, + JSCLASS_NO_OPTIONAL_MEMBERS +}; + + +JSPropertySpec CouchHTTPProperties[] = { + {"status", 0, JSPROP_READONLY, req_status, NULL}, + {0, 0, 0, 0, 0} +}; + + +JSFunctionSpec CouchHTTPFunctions[] = { + JS_FS("_open", (JSNative) req_open, 3, JSFUN_FAST_NATIVE, 0), + JS_FS("_setRequestHeader", (JSNative) req_set_hdr, 2, JSFUN_FAST_NATIVE, 0), + JS_FS("_send", (JSNative) req_send, 1, JSFUN_FAST_NATIVE, 0), + JS_FS_END +}; + + +static JSClass global_class = { + "GlobalClass", + JSCLASS_GLOBAL_FLAGS, + JS_PropertyStub, + JS_PropertyStub, + JS_PropertyStub, + JS_PropertyStub, + JS_EnumerateStub, + JS_ResolveStub, + JS_ConvertStub, + JS_FinalizeStub, + JSCLASS_NO_OPTIONAL_MEMBERS +}; + + +static JSFunctionSpec global_functions[] = { + JS_FS("evalcx", (JSNative) evalcx, 0, JSFUN_FAST_NATIVE, 0), + JS_FS("gc", (JSNative) gc, 0, JSFUN_FAST_NATIVE, 0), + JS_FS("print", (JSNative) print, 0, JSFUN_FAST_NATIVE, 0), + JS_FS("quit", (JSNative) quit, 0, JSFUN_FAST_NATIVE, 0), + JS_FS("readline", (JSNative) readline, 0, JSFUN_FAST_NATIVE, 0), + JS_FS("seal", (JSNative) seal, 0, JSFUN_FAST_NATIVE, 0), + JS_FS_END +}; + + +int +main(int argc, const char* argv[]) +{ + JSRuntime* rt = NULL; + JSContext* cx = NULL; + JSObject* global = NULL; + JSObject* klass = NULL; + JSScript* script; + JSString* scriptsrc; + jschar* schars; + size_t slen; + jsval sroot; + jsval result; + + couch_args* args = couch_parse_args(argc, argv); + + rt = JS_NewRuntime(64L * 1024L * 1024L); + if(rt == NULL) + return 1; + + cx = JS_NewContext(rt, args->stack_size); + if(cx == NULL) + return 1; + + JS_SetErrorReporter(cx, couch_error); + JS_ToggleOptions(cx, JSOPTION_XML); + + SETUP_REQUEST(cx); + + global = JS_NewObject(cx, &global_class, NULL, NULL); + if(global == NULL) + return 1; + + JS_SetGlobalObject(cx, global); + + if(!JS_InitStandardClasses(cx, global)) + return 1; + + if(couch_load_funcs(cx, global, global_functions) != JS_TRUE) + return 1; + + if(args->use_http) { + http_check_enabled(); + + klass = JS_InitClass( + cx, global, + NULL, + &CouchHTTPClass, req_ctor, + 0, + CouchHTTPProperties, CouchHTTPFunctions, + NULL, NULL + ); + + if(!klass) + { + fprintf(stderr, "Failed to initialize CouchHTTP class.\n"); + exit(2); + } + } + + // Convert script source to jschars. + scriptsrc = dec_string(cx, args->script, strlen(args->script)); + if(!scriptsrc) + return 1; + + schars = JS_GetStringChars(scriptsrc); + slen = JS_GetStringLength(scriptsrc); + + // Root it so GC doesn't collect it. + sroot = STRING_TO_JSVAL(scriptsrc); + if(JS_AddRoot(cx, &sroot) != JS_TRUE) { + fprintf(stderr, "Internal root error.\n"); + return 1; + } + + // Compile and run + script = JS_CompileUCScript(cx, global, schars, slen, args->script_name, 1); + if(!script) { + fprintf(stderr, "Failed to compile script.\n"); + return 1; + } + + JS_ExecuteScript(cx, global, script, &result); + + // Warning message if we don't remove it. + JS_RemoveRoot(cx, &sroot); + + FINISH_REQUEST(cx); + JS_DestroyContext(cx); + JS_DestroyRuntime(rt); + JS_ShutDown(); + + return 0; +} diff --git a/src/couchdb/priv/couch_js/sm185.c b/src/couchdb/priv/couch_js/sm185.c new file mode 100644 index 00000000..701d5677 --- /dev/null +++ b/src/couchdb/priv/couch_js/sm185.c @@ -0,0 +1,401 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +#include +#include +#include + +#include +#include "http.h" +#include "utf8.h" +#include "util.h" + + +#define SETUP_REQUEST(cx) \ + JS_SetContextThread(cx); \ + JS_BeginRequest(cx); +#define FINISH_REQUEST(cx) \ + JS_EndRequest(cx); \ + JS_ClearContextThread(cx); + + +static JSClass global_class = { + "GlobalClass", + JSCLASS_GLOBAL_FLAGS, + JS_PropertyStub, + JS_PropertyStub, + JS_PropertyStub, + JS_StrictPropertyStub, + JS_EnumerateStub, + JS_ResolveStub, + JS_ConvertStub, + JS_FinalizeStub, + JSCLASS_NO_OPTIONAL_MEMBERS +}; + + +static JSBool +req_ctor(JSContext* cx, uintN argc, jsval* vp) +{ + JSBool ret; + JSObject* obj = JS_NewObjectForConstructor(cx, vp); + if(!obj) { + JS_ReportError(cx, "Failed to create CouchHTTP instance.\n"); + return JS_FALSE; + } + ret = http_ctor(cx, obj); + JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(obj)); + return ret; +} + + +static void +req_dtor(JSContext* cx, JSObject* obj) +{ + http_dtor(cx, obj); +} + + +static JSBool +req_open(JSContext* cx, uintN argc, jsval* vp) +{ + JSObject* obj = JS_THIS_OBJECT(cx, vp); + jsval* argv = JS_ARGV(cx, vp); + JSBool ret = JS_FALSE; + + if(argc == 2) { + ret = http_open(cx, obj, argv[0], argv[1], JSVAL_FALSE); + } else if(argc == 3) { + ret = http_open(cx, obj, argv[0], argv[1], argv[2]); + } else { + JS_ReportError(cx, "Invalid call to CouchHTTP.open"); + } + + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return ret; +} + + +static JSBool +req_set_hdr(JSContext* cx, uintN argc, jsval* vp) +{ + JSObject* obj = JS_THIS_OBJECT(cx, vp); + jsval* argv = JS_ARGV(cx, vp); + JSBool ret = JS_FALSE; + + if(argc == 2) { + ret = http_set_hdr(cx, obj, argv[0], argv[1]); + } else { + JS_ReportError(cx, "Invalid call to CouchHTTP.set_header"); + } + + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return ret; +} + + +static JSBool +req_send(JSContext* cx, uintN argc, jsval* vp) +{ + JSObject* obj = JS_THIS_OBJECT(cx, vp); + jsval* argv = JS_ARGV(cx, vp); + JSBool ret = JS_FALSE; + + if(argc == 1) { + ret = http_send(cx, obj, argv[0]); + } else { + JS_ReportError(cx, "Invalid call to CouchHTTP.send"); + } + + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return ret; +} + + +static JSBool +req_status(JSContext* cx, JSObject* obj, jsid pid, jsval* vp) +{ + int status = http_status(cx, obj); + if(status < 0) + return JS_FALSE; + + JS_SET_RVAL(cx, vp, INT_TO_JSVAL(status)); + return JS_TRUE; +} + + +static JSBool +evalcx(JSContext *cx, uintN argc, jsval* vp) +{ + jsval* argv = JS_ARGV(cx, vp); + JSString* str; + JSObject* sandbox; + JSObject* global; + JSContext* subcx; + JSCrossCompartmentCall* call = NULL; + const jschar* src; + size_t srclen; + jsval rval; + JSBool ret = JS_FALSE; + + sandbox = NULL; + if(!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sandbox)) { + return JS_FALSE; + } + + subcx = JS_NewContext(JS_GetRuntime(cx), 8L * 1024L); + if(!subcx) { + JS_ReportOutOfMemory(cx); + return JS_FALSE; + } + + SETUP_REQUEST(subcx); + + src = JS_GetStringCharsAndLength(cx, str, &srclen); + + // Re-use the compartment associated with the main context, + // rather than creating a new compartment */ + global = JS_GetGlobalObject(cx); + if(global == NULL) goto done; + call = JS_EnterCrossCompartmentCall(subcx, global); + + if(!sandbox) { + sandbox = JS_NewGlobalObject(subcx, &global_class); + if(!sandbox || !JS_InitStandardClasses(subcx, sandbox)) { + goto done; + } + } + + if(srclen == 0) { + JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(sandbox)); + } else { + JS_EvaluateUCScript(subcx, sandbox, src, srclen, NULL, 0, &rval); + JS_SET_RVAL(cx, vp, rval); + } + + ret = JS_TRUE; + +done: + JS_LeaveCrossCompartmentCall(call); + FINISH_REQUEST(subcx); + JS_DestroyContext(subcx); + return ret; +} + + +static JSBool +gc(JSContext* cx, uintN argc, jsval* vp) +{ + JS_GC(cx); + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return JS_TRUE; +} + + +static JSBool +print(JSContext* cx, uintN argc, jsval* vp) +{ + jsval* argv = JS_ARGV(cx, vp); + couch_print(cx, argc, argv); + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return JS_TRUE; +} + + +static JSBool +quit(JSContext* cx, uintN argc, jsval* vp) +{ + jsval* argv = JS_ARGV(cx, vp); + int exit_code = 0; + JS_ConvertArguments(cx, argc, argv, "/i", &exit_code); + exit(exit_code); +} + + +static JSBool +readline(JSContext* cx, uintN argc, jsval* vp) +{ + JSString* line; + + /* GC Occasionally */ + JS_MaybeGC(cx); + + line = couch_readline(cx, stdin); + if(line == NULL) return JS_FALSE; + + JS_SET_RVAL(cx, vp, STRING_TO_JSVAL(line)); + return JS_TRUE; +} + + +static JSBool +seal(JSContext* cx, uintN argc, jsval* vp) +{ + jsval* argv = JS_ARGV(cx, vp); + JSObject *target; + JSBool deep = JS_FALSE; + JSBool ret; + + if(!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep)) + return JS_FALSE; + + if(!target) { + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return JS_TRUE; + } + + + ret = deep ? JS_DeepFreezeObject(cx, target) : JS_FreezeObject(cx, target); + JS_SET_RVAL(cx, vp, JSVAL_VOID); + return ret; +} + + +JSClass CouchHTTPClass = { + "CouchHTTP", + JSCLASS_HAS_PRIVATE + | JSCLASS_CONSTRUCT_PROTOTYPE + | JSCLASS_HAS_RESERVED_SLOTS(2), + JS_PropertyStub, + JS_PropertyStub, + JS_PropertyStub, + JS_StrictPropertyStub, + JS_EnumerateStub, + JS_ResolveStub, + JS_ConvertStub, + req_dtor, + JSCLASS_NO_OPTIONAL_MEMBERS +}; + + +JSPropertySpec CouchHTTPProperties[] = { + {"status", 0, JSPROP_READONLY, req_status, NULL}, + {0, 0, 0, 0, 0} +}; + + +JSFunctionSpec CouchHTTPFunctions[] = { + JS_FS("_open", req_open, 3, 0), + JS_FS("_setRequestHeader", req_set_hdr, 2, 0), + JS_FS("_send", req_send, 1, 0), + JS_FS_END +}; + + +static JSFunctionSpec global_functions[] = { + JS_FS("evalcx", evalcx, 0, 0), + JS_FS("gc", gc, 0, 0), + JS_FS("print", print, 0, 0), + JS_FS("quit", quit, 0, 0), + JS_FS("readline", readline, 0, 0), + JS_FS("seal", seal, 0, 0), + JS_FS_END +}; + + +int +main(int argc, const char* argv[]) +{ + JSRuntime* rt = NULL; + JSContext* cx = NULL; + JSObject* global = NULL; + JSCrossCompartmentCall *call = NULL; + JSObject* klass = NULL; + JSObject* script; + JSString* scriptsrc; + const jschar* schars; + size_t slen; + jsval sroot; + jsval result; + + couch_args* args = couch_parse_args(argc, argv); + + rt = JS_NewRuntime(64L * 1024L * 1024L); + if(rt == NULL) + return 1; + + cx = JS_NewContext(rt, args->stack_size); + if(cx == NULL) + return 1; + + JS_SetErrorReporter(cx, couch_error); + JS_ToggleOptions(cx, JSOPTION_XML); + + SETUP_REQUEST(cx); + + global = JS_NewCompartmentAndGlobalObject(cx, &global_class, NULL); + if(global == NULL) + return 1; + + call = JS_EnterCrossCompartmentCall(cx, global); + + JS_SetGlobalObject(cx, global); + + if(!JS_InitStandardClasses(cx, global)) + return 1; + + if(couch_load_funcs(cx, global, global_functions) != JS_TRUE) + return 1; + + if(args->use_http) { + http_check_enabled(); + + klass = JS_InitClass( + cx, global, + NULL, + &CouchHTTPClass, req_ctor, + 0, + CouchHTTPProperties, CouchHTTPFunctions, + NULL, NULL + ); + + if(!klass) + { + fprintf(stderr, "Failed to initialize CouchHTTP class.\n"); + exit(2); + } + } + + // Convert script source to jschars. + scriptsrc = dec_string(cx, args->script, strlen(args->script)); + if(!scriptsrc) + return 1; + + schars = JS_GetStringCharsAndLength(cx, scriptsrc, &slen); + + // Root it so GC doesn't collect it. + sroot = STRING_TO_JSVAL(scriptsrc); + if(JS_AddValueRoot(cx, &sroot) != JS_TRUE) { + fprintf(stderr, "Internal root error.\n"); + return 1; + } + + // Compile and run + script = JS_CompileUCScript(cx, global, schars, slen, args->script_name, 1); + if(!script) { + fprintf(stderr, "Failed to compile script.\n"); + return 1; + } + + JS_ExecuteScript(cx, global, script, &result); + + // Warning message if we don't remove it. + JS_RemoveValueRoot(cx, &sroot); + + JS_LeaveCrossCompartmentCall(call); + FINISH_REQUEST(cx); + JS_DestroyContext(cx); + JS_DestroyRuntime(rt); + JS_ShutDown(); + + return 0; +} diff --git a/src/couchdb/priv/couch_js/util.c b/src/couchdb/priv/couch_js/util.c new file mode 100644 index 00000000..070d7172 --- /dev/null +++ b/src/couchdb/priv/couch_js/util.c @@ -0,0 +1,237 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +#include +#include + +#include + +#include "util.h" +#include "utf8.h" + + +char* +slurp_file(char* buf, const char* file) +{ + FILE* fp; + char fbuf[16384]; + char* tmp; + size_t nread = 0; + size_t buflen = 0; + + if(strcmp(file, "-") == 0) { + fp = stdin; + } else { + fp = fopen(file, "r"); + if(fp == NULL) { + fprintf(stderr, "Failed to read file: %s\n", file); + exit(3); + } + } + + while((nread = fread(fbuf, 1, 16384, fp)) > 0) { + if(buf == NULL) { + buflen = nread; + buf = (char*) malloc(nread + 1); + if(buf == NULL) { + fprintf(stderr, "Out of memory.\n"); + exit(3); + } + memcpy(buf, fbuf, buflen); + buf[buflen] = '\0'; + } else { + buflen = strlen(buf); + tmp = (char*) malloc(buflen + nread + 1); + if(tmp == NULL) { + fprintf(stderr, "Out of memory.\n"); + exit(3); + } + memcpy(tmp, buf, buflen); + memcpy(tmp+buflen, fbuf, nread); + tmp[buflen+nread] = '\0'; + free(buf); + buf = tmp; + } + } + return buf; +} + +couch_args* +couch_parse_args(int argc, const char* argv[]) +{ + couch_args* args; + int i = 1; + + args = (couch_args*) malloc(sizeof(couch_args)); + if(args == NULL) + return NULL; + + memset(args, '\0', sizeof(couch_args)); + args->stack_size = 8L * 1024L; + + while(i < argc) { + if(strcmp("--http", argv[i]) == 0) { + args->use_http = 1; + } else if(strcmp("--stack-size", argv[i]) == 0) { + args->stack_size = atoi(argv[i+1]); + if(args->stack_size <= 0) { + fprintf(stderr, "Invalid stack size.\n"); + exit(2); + } + } else { + args->script = slurp_file(args->script, argv[i]); + if(args->script_name == NULL) { + if(strcmp(argv[i], "-") == 0) { + args->script_name = ""; + } else { + args->script_name = argv[i]; + } + } else { + args->script_name = ""; + } + } + i++; + } + + if(args->script_name == NULL || args->script == NULL) { + fprintf(stderr, "No script provided.\n"); + exit(3); + } + + return args; +} + + +int +couch_fgets(char* buf, int size, FILE* fp) +{ + int n, i, c; + + if(size <= 0) return -1; + n = size - 1; + + for(i = 0; i < n && (c = getc(fp)) != EOF; i++) { + buf[i] = c; + if(c == '\n') { + i++; + break; + } + } + + buf[i] = '\0'; + return i; +} + + +JSString* +couch_readline(JSContext* cx, FILE* fp) +{ + JSString* str; + char* bytes = NULL; + char* tmp = NULL; + size_t used = 0; + size_t byteslen = 256; + size_t readlen = 0; + + bytes = JS_malloc(cx, byteslen); + if(bytes == NULL) return NULL; + + while((readlen = couch_fgets(bytes+used, byteslen-used, fp)) > 0) { + used += readlen; + + if(bytes[used-1] == '\n') { + bytes[used-1] = '\0'; + break; + } + + // Double our buffer and read more. + byteslen *= 2; + tmp = JS_realloc(cx, bytes, byteslen); + if(!tmp) { + JS_free(cx, bytes); + return NULL; + } + + bytes = tmp; + } + + // Treat empty strings specially + if(used == 0) { + JS_free(cx, bytes); + return JSVAL_TO_STRING(JS_GetEmptyStringValue(cx)); + } + + // Shring the buffer to the actual data size + tmp = JS_realloc(cx, bytes, used); + if(!tmp) { + JS_free(cx, bytes); + return NULL; + } + bytes = tmp; + byteslen = used; + + str = dec_string(cx, bytes, byteslen); + JS_free(cx, bytes); + return str; +} + + +JSObject* +couch_readfile(JSContext* cx, FILE* fp) +{ + return NULL; +} + + +void +couch_print(JSContext* cx, uintN argc, jsval* argv) +{ + char *bytes; + uintN i; + + for(i = 0; i < argc; i++) + { + bytes = enc_string(cx, argv[i], NULL); + if(!bytes) return; + + fprintf(stdout, "%s%s", i ? " " : "", bytes); + JS_free(cx, bytes); + } + + fputc('\n', stdout); + fflush(stdout); +} + + +void +couch_error(JSContext* cx, const char* mesg, JSErrorReport* report) +{ + if(!report || !JSREPORT_IS_WARNING(report->flags)) + { + fprintf(stderr, "[couchjs] %s\n", mesg); + } +} + + +JSBool +couch_load_funcs(JSContext* cx, JSObject* obj, JSFunctionSpec* funcs) +{ + JSFunctionSpec* f; + for(f = funcs; f->name != NULL; f++) { + if(!JS_DefineFunction(cx, obj, f->name, f->call, f->nargs, f->flags)) { + fprintf(stderr, "Failed to create function: %s\n", f->name); + return JS_FALSE; + } + } + return JS_TRUE; +} + diff --git a/src/couchdb/priv/couch_js/util.h b/src/couchdb/priv/couch_js/util.h new file mode 100644 index 00000000..6caebfa1 --- /dev/null +++ b/src/couchdb/priv/couch_js/util.h @@ -0,0 +1,34 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +#ifndef COUCHJS_UTIL_H +#define COUCHJS_UTIL_H + +#include + +typedef struct { + int use_http; + int stack_size; + const char* script_name; + char* script; +} couch_args; + +void couch_usage(); +couch_args* couch_parse_args(int argc, const char* argv[]); +int couch_fgets(char* buf, int size, FILE* fp); +JSString* couch_readline(JSContext* cx, FILE* fp); +void couch_print(JSContext* cx, uintN argc, jsval* argv); +void couch_error(JSContext* cx, const char* mesg, JSErrorReport* report); +JSBool couch_load_funcs(JSContext* cx, JSObject* obj, JSFunctionSpec* funcs); + + +#endif // Included util.h -- cgit v1.2.3 From 860be768c957b0f6954921cd25a9b8465da8617b Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Thu, 8 Sep 2011 11:03:20 +0000 Subject: Fix list-style send() API in show functions when using provides() Also fix ignoring the return value when the send() API isnused. Patch by Alexander Shorin. Closes COUCHDB-1272 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1166625 13f79535-47bb-0310-9956-ffa450edef68 --- THANKS | 2 +- share/server/render.js | 10 +++++++--- share/www/script/test/show_documents.js | 26 ++++++++++++++++++++++++++ 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/THANKS b/THANKS index aae7991c..3aee0fb9 100644 --- a/THANKS +++ b/THANKS @@ -80,6 +80,6 @@ suggesting improvements or submitting changes. Some of these people are: * Sam Bisbee * Nathan Vander Wilt * Caolan McMahon - + * Alexander Shorin For a list of authors see the `AUTHORS` file. diff --git a/share/server/render.js b/share/server/render.js index d207db41..93ff6332 100644 --- a/share/server/render.js +++ b/share/server/render.js @@ -220,10 +220,10 @@ var Render = (function() { resetList(); Mime.resetProvides(); var resp = fun.apply(ddoc, args) || {}; + resp = maybeWrapResponse(resp); // handle list() style API if (chunks.length && chunks.length > 0) { - resp = maybeWrapResponse(resp); resp.headers = resp.headers || {}; for(var header in startResp) { resp.headers[header] = startResp[header] @@ -233,8 +233,12 @@ var Render = (function() { } if (Mime.providesUsed) { - resp = Mime.runProvides(args[1], ddoc); - resp = applyContentType(maybeWrapResponse(resp), Mime.responseContentType); + var provided_resp = Mime.runProvides(args[1], ddoc) || {}; + provided_resp = maybeWrapResponse(provided_resp); + resp.body = (resp.body || "") + chunks.join(""); + resp.body += provided_resp.body || ""; + resp = applyContentType(resp, Mime.responseContentType); + resetList(); } var type = typeOf(resp); diff --git a/share/www/script/test/show_documents.js b/share/www/script/test/show_documents.js index 55ed9698..cf73ed57 100644 --- a/share/www/script/test/show_documents.js +++ b/share/www/script/test/show_documents.js @@ -90,6 +90,24 @@ couchTests.show_documents = function(debug) { start({"X-Couch-Test-Header": "Yeah"}); send("Hey"); }), + "list-api-provides" : stringFun(function(doc, req) { + provides("text", function(){ + send("foo, "); + send("bar, "); + send("baz!"); + }) + }), + "list-api-provides-and-return" : stringFun(function(doc, req) { + provides("text", function(){ + send("4, "); + send("5, "); + send("6, "); + return "7!"; + }) + send("1, "); + send("2, "); + return "3, "; + }), "list-api-mix" : stringFun(function(doc, req) { start({"X-Couch-Test-Header": "Yeah"}); send("Hey "); @@ -395,6 +413,14 @@ couchTests.show_documents = function(debug) { T(xhr.responseText == "Hey"); TEquals("Yeah", xhr.getResponseHeader("X-Couch-Test-Header"), "header should be cool"); + // test list() compatible API with provides function + xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/list-api-provides/foo?format=text"); + TEquals(xhr.responseText, "foo, bar, baz!", "should join chunks to response body"); + + // should keep next result order: chunks + return value + provided chunks + provided return value + xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/list-api-provides-and-return/foo?format=text"); + TEquals(xhr.responseText, "1, 2, 3, 4, 5, 6, 7!", "should not break 1..7 range"); + xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/list-api-mix/foo"); T(xhr.responseText == "Hey Dude"); TEquals("Yeah", xhr.getResponseHeader("X-Couch-Test-Header"), "header should be cool"); -- cgit v1.2.3 From 9d0a0a52379f019deb2e479d9d5d87fe72cc98f5 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Thu, 8 Sep 2011 11:13:21 +0000 Subject: COUCHDB-1274 - Use text/javascript content-type for jsonp responses. Backported from trunk @1166618 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1166627 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 1 + NEWS | 1 + share/www/script/test/jsonp.js | 2 ++ src/couchdb/couch_httpd.erl | 35 ++++++++++++++++++++++------------- 4 files changed, 26 insertions(+), 13 deletions(-) diff --git a/CHANGES b/CHANGES index 12efc0b3..64b4f3c1 100644 --- a/CHANGES +++ b/CHANGES @@ -7,6 +7,7 @@ Version 1.1.1 This version has not been released yet. * ETags for views include current sequence if include_docs=true. +* JSONP responses now send "text/javascript" for Content-Type. Version 1.1.0 ------------- diff --git a/NEWS b/NEWS index 4803ba24..d7dc7cf1 100644 --- a/NEWS +++ b/NEWS @@ -13,6 +13,7 @@ Version 1.1.1 This version has not been released yet. * ETags for views include current sequence if include_docs=true. +* JSONP responses now send "text/javascript" for Content-Type. Version 1.1.0 ------------- diff --git a/share/www/script/test/jsonp.js b/share/www/script/test/jsonp.js index 9aba7189..d1bca94a 100644 --- a/share/www/script/test/jsonp.js +++ b/share/www/script/test/jsonp.js @@ -48,6 +48,7 @@ couchTests.jsonp = function(debug) { // Test unchunked callbacks. var xhr = CouchDB.request("GET", "/test_suite_db/0?callback=jsonp_no_chunk"); + TEquals("text/javascript", xhr.getResponseHeader("Content-Type")); T(xhr.status == 200); jsonp_flag = 0; eval(xhr.responseText); @@ -70,6 +71,7 @@ couchTests.jsonp = function(debug) { var url = "/test_suite_db/_design/test/_view/all_docs?callback=jsonp_chunk"; xhr = CouchDB.request("GET", url); + TEquals("text/javascript", xhr.getResponseHeader("Content-Type")); T(xhr.status == 200); jsonp_flag = 0; eval(xhr.responseText); diff --git a/src/couchdb/couch_httpd.erl b/src/couchdb/couch_httpd.erl index e472d094..15b85df8 100644 --- a/src/couchdb/couch_httpd.erl +++ b/src/couchdb/couch_httpd.erl @@ -627,25 +627,25 @@ send_json(Req, Code, Value) -> send_json(Req, Code, [], Value). send_json(Req, Code, Headers, Value) -> + initialize_jsonp(Req), DefaultHeaders = [ {"Content-Type", negotiate_content_type(Req)}, {"Cache-Control", "must-revalidate"} ], - Body = [start_jsonp(Req), ?JSON_ENCODE(Value), end_jsonp(), $\n], + Body = [start_jsonp(), ?JSON_ENCODE(Value), end_jsonp(), $\n], send_response(Req, Code, DefaultHeaders ++ Headers, Body). start_json_response(Req, Code) -> start_json_response(Req, Code, []). start_json_response(Req, Code, Headers) -> + initialize_jsonp(Req), DefaultHeaders = [ {"Content-Type", negotiate_content_type(Req)}, {"Cache-Control", "must-revalidate"} ], - start_jsonp(Req), % Validate before starting chunked. - %start_chunked_response(Req, Code, DefaultHeaders ++ Headers). {ok, Resp} = start_chunked_response(Req, Code, DefaultHeaders ++ Headers), - case start_jsonp(Req) of + case start_jsonp() of [] -> ok; Start -> send_chunk(Resp, Start) end, @@ -655,7 +655,7 @@ end_json_response(Resp) -> send_chunk(Resp, end_jsonp() ++ [$\n]), last_chunk(Resp). -start_jsonp(Req) -> +initialize_jsonp(Req) -> case get(jsonp) of undefined -> put(jsonp, qs_value(Req, "callback", no_jsonp)); _ -> ok @@ -668,14 +668,9 @@ start_jsonp(Req) -> % make sure jsonp is configured on (default off) case couch_config:get("httpd", "allow_jsonp", "false") of "true" -> - validate_callback(CallBack), - CallBack ++ "("; + validate_callback(CallBack); _Else -> - % this could throw an error message, but instead we just ignore the - % jsonp parameter - % throw({bad_request, <<"JSONP must be configured before using.">>}) - put(jsonp, no_jsonp), - [] + put(jsonp, no_jsonp) end catch Error -> @@ -684,6 +679,13 @@ start_jsonp(Req) -> end end. +start_jsonp() -> + case get(jsonp) of + no_jsonp -> []; + [] -> []; + CallBack -> CallBack ++ "(" + end. + end_jsonp() -> Resp = case get(jsonp) of no_jsonp -> []; @@ -844,7 +846,14 @@ send_redirect(Req, Path) -> Headers = [{"Location", couch_httpd:absolute_uri(Req, Path)}], send_response(Req, 301, Headers, <<>>). -negotiate_content_type(#httpd{mochi_req=MochiReq}) -> +negotiate_content_type(Req) -> + case get(jsonp) of + no_jsonp -> negotiate_content_type1(Req); + [] -> negotiate_content_type1(Req); + _Callback -> "text/javascript" + end. + +negotiate_content_type1(#httpd{mochi_req=MochiReq}) -> %% Determine the appropriate Content-Type header for a JSON response %% depending on the Accept header in the request. A request that explicitly %% lists the correct JSON MIME type will get that type, otherwise the -- cgit v1.2.3 From fb94d04c2dd4a9b39f1e961d4509bf07b07834ef Mon Sep 17 00:00:00 2001 From: Randall Leeds Date: Mon, 12 Sep 2011 20:54:32 +0000 Subject: Insane libtool hackery for windows (COUCHDB-1197) Patch by Dave Cottlehuber Backport of r1169920 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1169921 13f79535-47bb-0310-9956-ffa450edef68 --- configure.ac | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/configure.ac b/configure.ac index 5cf37635..b5f8697a 100644 --- a/configure.ac +++ b/configure.ac @@ -424,19 +424,29 @@ AC_CONFIG_FILES([var/Makefile]) AC_OUTPUT -# *sob* - on Windows libtool fails as 'libname_spec' isn't correct (it -# expects GNU style lib names). I can't work out how to configure this -# option sanely, so we pass the script through sed to modify it. -# Also, the erlang cc.sh script doesn't cope well with the '-link' command -# line option libtool provides. -# PLEASE, someone help put this out of its misery!! -# This hackery is being tracked via COUCHDB-440. +# Windows Erlang build tools wrap Microsoft's linker and compiler just enough +# to be able to build Erlang/OTP successfully, but not enough for full +# compatibility with GNU AutoTools. The MS VC compiler and linker are +# hidden from autotools in Erlang's cc.sh and ld.sh wrappers. GNU autoconf +# identifies this dastardly mix as a unix variant, and libtool kindly +# passes incorrect flags and names through to the MS linker. The simplest fix +# is to modify libtool via sed to remove those options. +# As this is only done once at first configure, and subsequent config or source +# changes may trigger a silent reversion to the non-functioning original. +# Changes are; +# 1. replace LIB$name with $name in libname_spec (e.g. libicu -> icu) to ensure +# correct windows versions of .lib and .dlls are found or generated. +# 2. remove incompatible \w-link\w from archive_cmds +# 3. remove GNU-style directives to be passed through to the linker +# 4. swap GNU-style shared library flags with MS -dll variant +# This obscene hackery is tracked under COUCHDB-440 and COUCHDB-1197. + if test x${IS_WINDOWS} = xTRUE; then mv libtool libtool.dist - sed -E -e 's,libname_spec="lib\\$name",libname_spec="\\\$name",' \ - -e 's,-link,,' \ - -e 's/-Xlinker --out-implib -Xlinker \\\$lib//' \ - -e 's/(-shared -nostdlib)/-dll \1/' \ + /bin/sed -E -e 's,^libname_spec="lib,libname_spec=",' \ + -e 's,( -link ), ,' \ + -e 's,-Xlinker --out-implib -Xlinker \\\$lib,,' \ + -e 's,(-shared -nostdlib), -dll ,' \ < libtool.dist > libtool # probably would chmod +x if we weren't on windows... fi -- cgit v1.2.3 From 9d93a3ee880bc1a64e9c3e6141c517ed4295661c Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Tue, 13 Sep 2011 18:54:31 +0000 Subject: Allow slashes in doc ids in URLs to _update handlers. This mirrors the behaviour of the _show API. Patch by Christopher Bonhage. Closes COUCHDB-1229 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1170299 13f79535-47bb-0310-9956-ffa450edef68 --- THANKS | 1 + share/www/script/test/update_documents.js | 14 ++++++++++++++ src/couchdb/couch_httpd_show.erl | 8 +++++--- 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/THANKS b/THANKS index 3aee0fb9..76a0c19b 100644 --- a/THANKS +++ b/THANKS @@ -81,5 +81,6 @@ suggesting improvements or submitting changes. Some of these people are: * Nathan Vander Wilt * Caolan McMahon * Alexander Shorin + * Christopher Bonhage For a list of authors see the `AUTHORS` file. diff --git a/share/www/script/test/update_documents.js b/share/www/script/test/update_documents.js index 49d3b68a..4d2b29fc 100644 --- a/share/www/script/test/update_documents.js +++ b/share/www/script/test/update_documents.js @@ -165,4 +165,18 @@ couchTests.update_documents = function(debug) { T(xhr.status == 200); T(xhr.responseText.length == 32); + // COUCHDB-1229 - allow slashes in doc ids for update handlers + // /db/_design/doc/_update/handler/doc/id + + var doc = { + _id:"with/slash", + counter:1 + }; + db.save(doc); + xhr = CouchDB.request("PUT", "/test_suite_db/_design/update/_update/bump-counter/with/slash"); + TEquals(201, xhr.status, "should return a 200 status"); + TEquals("

bumped it!

", xhr.responseText, "should report bumping"); + + var doc = db.open("with/slash"); + TEquals(2, doc.counter, "counter should be 2"); }; diff --git a/src/couchdb/couch_httpd_show.erl b/src/couchdb/couch_httpd_show.erl index a215b1da..742b0f20 100644 --- a/src/couchdb/couch_httpd_show.erl +++ b/src/couchdb/couch_httpd_show.erl @@ -106,13 +106,15 @@ get_fun_key(DDoc, Type, Name) -> % send_method_not_allowed(Req, "POST,PUT,DELETE,ETC"); handle_doc_update_req(#httpd{ - path_parts=[_, _, _, _, UpdateName, DocId] + path_parts=[_, _, _, _, UpdateName, DocId|Rest] }=Req, Db, DDoc) -> - Doc = try couch_httpd_db:couch_doc_open(Db, DocId, nil, [conflicts]) + DocParts = [DocId|Rest], + DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")), + Doc = try couch_httpd_db:couch_doc_open(Db, DocId1, nil, [conflicts]) catch _ -> nil end, - send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId); + send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId1); handle_doc_update_req(#httpd{ path_parts=[_, _, _, _, UpdateName] -- cgit v1.2.3 From 22e1994d007fb417f198bb36d05b8d69bc6ac905 Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Thu, 15 Sep 2011 16:40:47 +0000 Subject: remove trailing comma git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1171170 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/futon.browse.js | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/share/www/script/futon.browse.js b/share/www/script/futon.browse.js index 56435ae4..651fad69 100644 --- a/share/www/script/futon.browse.js +++ b/share/www/script/futon.browse.js @@ -1275,8 +1275,7 @@ return false; }).prependTo($("a", li)); } - }, - + } }); function encodeAttachment(name) { -- cgit v1.2.3 From dc5c3520db2a1491ffeb9fec1b9c5a5a5694148e Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 15 Sep 2011 23:48:23 +0000 Subject: Make sure view compaction terminates If a view group is compacting and the corresponding database is shutdown by the LRU system, then the view compaction is aborted because its couch view group process shutdowns. This could lead to situations where the number of active databases is much higher than max_dbs_open and making it impossible to compact view groups. Issue reported and patch tested by Mike Leddy. Thanks. COUCHDB-1283 This is a backport of revision 1171328 from branch 1.2.x git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1171329 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_httpd_db.erl | 2 +- src/couchdb/couch_view_compactor.erl | 19 ++- src/couchdb/couch_view_group.erl | 29 +--- src/couchdb/couch_view_updater.erl | 13 +- test/etap/200-view-group-no-db-leaks.t | 2 +- test/etap/201-view-group-shutdown.t | 300 +++++++++++++++++++++++++++++++++ test/etap/Makefile.am | 1 + 7 files changed, 337 insertions(+), 29 deletions(-) create mode 100755 test/etap/201-view-group-shutdown.t diff --git a/src/couchdb/couch_httpd_db.erl b/src/couchdb/couch_httpd_db.erl index db430bbd..0bf97e26 100644 --- a/src/couchdb/couch_httpd_db.erl +++ b/src/couchdb/couch_httpd_db.erl @@ -128,7 +128,7 @@ handle_changes_req1(Req, Db) -> handle_compact_req(#httpd{method='POST',path_parts=[DbName,_,Id|_]}=Req, Db) -> ok = couch_db:check_is_admin(Db), couch_httpd:validate_ctype(Req, "application/json"), - ok = couch_view_compactor:start_compact(DbName, Id), + {ok, _} = couch_view_compactor:start_compact(DbName, Id), send_json(Req, 202, {[{ok, true}]}); handle_compact_req(#httpd{method='POST'}=Req, Db) -> diff --git a/src/couchdb/couch_view_compactor.erl b/src/couchdb/couch_view_compactor.erl index 734605f0..43fdbc98 100644 --- a/src/couchdb/couch_view_compactor.erl +++ b/src/couchdb/couch_view_compactor.erl @@ -20,7 +20,7 @@ %% @doc Compacts the views. GroupId must not include the _design/ prefix start_compact(DbName, GroupId) -> Pid = couch_view:get_group_server(DbName, <<"_design/",GroupId/binary>>), - gen_server:cast(Pid, {start_compact, fun compact_group/3}). + gen_server:call(Pid, {start_compact, fun compact_group/3}). %%============================================================================= %% internal functions @@ -42,7 +42,6 @@ compact_group(Group, EmptyGroup, DbName) -> {ok, Db} = couch_db:open_int(DbName, []), {ok, {Count, _}} = couch_btree:full_reduce(Db#db.fulldocinfo_by_id_btree), - couch_db:close(Db), <<"_design", ShortName/binary>> = GroupId, TaskName = <>, @@ -77,9 +76,23 @@ compact_group(Group, EmptyGroup, DbName) -> views=NewViews, current_seq=Seq }, + maybe_retry_compact(Db, GroupId, NewGroup). +maybe_retry_compact(#db{name = DbName} = Db, GroupId, NewGroup) -> Pid = couch_view:get_group_server(DbName, GroupId), - gen_server:cast(Pid, {compact_done, NewGroup}). + case gen_server:call(Pid, {compact_done, NewGroup}) of + ok -> + couch_db:close(Db); + update -> + {ok, Db2} = couch_db:reopen(Db), + {_, Ref} = erlang:spawn_monitor(fun() -> + couch_view_updater:update(nil, NewGroup, Db2) + end), + receive + {'DOWN', Ref, _, _, {new_group, NewGroup2}} -> + maybe_retry_compact(Db2, GroupId, NewGroup2) + end + end. %% @spec compact_view(View, EmptyView, Retry) -> CompactView compact_view(View, EmptyView) -> diff --git a/src/couchdb/couch_view_group.erl b/src/couchdb/couch_view_group.erl index 448a7dcf..ef9b02ad 100644 --- a/src/couchdb/couch_view_group.erl +++ b/src/couchdb/couch_view_group.erl @@ -151,9 +151,9 @@ handle_call({request_group, RequestSeq}, From, handle_call(request_group_info, _From, State) -> GroupInfo = get_group_info(State), - {reply, {ok, GroupInfo}, State}. + {reply, {ok, GroupInfo}, State}; -handle_cast({start_compact, CompactFun}, #group_state{compactor_pid=nil} +handle_call({start_compact, CompactFun}, _From, #group_state{compactor_pid=nil} = State) -> #group_state{ group = #group{name = GroupId, sig = GroupSig} = Group, @@ -165,12 +165,12 @@ handle_cast({start_compact, CompactFun}, #group_state{compactor_pid=nil} NewGroup = reset_file(Db, Fd, DbName, Group), couch_db:close(Db), Pid = spawn_link(fun() -> CompactFun(Group, NewGroup, DbName) end), - {noreply, State#group_state{compactor_pid = Pid}}; -handle_cast({start_compact, _}, State) -> + {reply, {ok, Pid}, State#group_state{compactor_pid = Pid}}; +handle_call({start_compact, _}, _From, #group_state{compactor_pid=Pid} = State) -> %% compact already running, this is a no-op - {noreply, State}; + {reply, {ok, Pid}, State}; -handle_cast({compact_done, #group{current_seq=NewSeq} = NewGroup}, +handle_call({compact_done, #group{current_seq=NewSeq} = NewGroup}, _From, #group_state{group = #group{current_seq=OldSeq}} = State) when NewSeq >= OldSeq -> #group_state{ @@ -206,31 +206,20 @@ handle_cast({compact_done, #group{current_seq=NewSeq} = NewGroup}, {ok, NewRefCounter} = couch_ref_counter:start([NewGroup#group.fd]), self() ! delayed_commit, - {noreply, State#group_state{ + {reply, ok, State#group_state{ group=NewGroup, ref_counter=NewRefCounter, compactor_pid=nil, updater_pid=NewUpdaterPid }}; -handle_cast({compact_done, NewGroup}, State) -> +handle_call({compact_done, NewGroup}, _From, State) -> #group_state{ group = #group{name = GroupId, current_seq = CurrentSeq}, init_args={_RootDir, DbName, _} } = State, ?LOG_INFO("View index compaction still behind for ~s ~s -- current: ~p " ++ "compact: ~p", [DbName, GroupId, CurrentSeq, NewGroup#group.current_seq]), - Pid = spawn_link(fun() -> - {_,Ref} = erlang:spawn_monitor(fun() -> - couch_view_updater:update(nil, NewGroup, DbName) - end), - receive - {'DOWN', Ref, _, _, {new_group, NewGroup2}} -> - #group{name=GroupId} = NewGroup2, - Pid2 = couch_view:get_group_server(DbName, GroupId), - gen_server:cast(Pid2, {compact_done, NewGroup2}) - end - end), - {noreply, State#group_state{compactor_pid = Pid}}; + {reply, update, State}. handle_cast({partial_update, Pid, NewGroup}, #group_state{updater_pid=Pid} = State) -> diff --git a/src/couchdb/couch_view_updater.erl b/src/couchdb/couch_view_updater.erl index 2cc390df..9ecd95c8 100644 --- a/src/couchdb/couch_view_updater.erl +++ b/src/couchdb/couch_view_updater.erl @@ -18,7 +18,15 @@ -spec update(_, #group{}, Dbname::binary()) -> no_return(). -update(Owner, Group, DbName) -> +update(Owner, Group, DbName) when is_binary(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + try + update(Owner, Group, Db) + after + couch_db:close(Db) + end; + +update(Owner, Group, #db{name = DbName} = Db) -> #group{ name = GroupName, current_seq = Seq, @@ -26,7 +34,6 @@ update(Owner, Group, DbName) -> } = Group, couch_task_status:add_task(<<"View Group Indexer">>, <>, <<"Starting index update">>), - {ok, Db} = couch_db:open_int(DbName, []), DbPurgeSeq = couch_db:get_purge_seq(Db), Group2 = if DbPurgeSeq == PurgeSeq -> @@ -36,7 +43,6 @@ update(Owner, Group, DbName) -> purge_index(Group, Db); true -> couch_task_status:update(<<"Resetting view index due to lost purge entries.">>), - couch_db:close(Db), exit(reset) end, {ok, MapQueue} = couch_work_queue:new( @@ -74,7 +80,6 @@ update(Owner, Group, DbName) -> couch_task_status:set_update_frequency(0), couch_task_status:update("Finishing."), couch_work_queue:close(MapQueue), - couch_db:close(Db), receive {new_group, NewGroup} -> exit({new_group, NewGroup#group{current_seq=couch_db:get_update_seq(Db)}}) diff --git a/test/etap/200-view-group-no-db-leaks.t b/test/etap/200-view-group-no-db-leaks.t index 9c77f1a8..f506b7dc 100755 --- a/test/etap/200-view-group-no-db-leaks.t +++ b/test/etap/200-view-group-no-db-leaks.t @@ -165,7 +165,7 @@ wait_db_compact_done(N) -> end. compact_view_group() -> - ok = couch_view_compactor:start_compact(test_db_name(), ddoc_name()), + {ok, _} = couch_view_compactor:start_compact(test_db_name(), ddoc_name()), wait_view_compact_done(10). wait_view_compact_done(0) -> diff --git a/test/etap/201-view-group-shutdown.t b/test/etap/201-view-group-shutdown.t new file mode 100755 index 00000000..03feac2b --- /dev/null +++ b/test/etap/201-view-group-shutdown.t @@ -0,0 +1,300 @@ +#!/usr/bin/env escript +%% -*- erlang -*- + +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-record(user_ctx, { + name = null, + roles = [], + handler +}). + +-record(db, { + main_pid = nil, + update_pid = nil, + compactor_pid = nil, + instance_start_time, % number of microsecs since jan 1 1970 as a binary string + fd, + fd_ref_counter, + header = nil, + committed_update_seq, + fulldocinfo_by_id_btree, + docinfo_by_seq_btree, + local_docs_btree, + update_seq, + name, + filepath, + validate_doc_funs = [], + security = [], + security_ptr = nil, + user_ctx = #user_ctx{}, + waiting_delayed_commit = nil, + revs_limit = 1000, + fsync_options = [], + is_sys_db = false +}). + +main_db_name() -> <<"couch_test_view_group_shutdown">>. + + +main(_) -> + test_util:init_code_path(), + + etap:plan(17), + case (catch test()) of + ok -> + etap:end_tests(); + Other -> + etap:diag(io_lib:format("Test died abnormally: ~p", [Other])), + etap:bail(Other) + end, + ok. + + +test() -> + couch_server_sup:start_link(test_util:config_files()), + ok = couch_config:set("couchdb", "max_dbs_open", "3", false), + ok = couch_config:set("couchdb", "delayed_commits", "false", false), + crypto:start(), + + % Test that while a view group is being compacted its database can not + % be closed by the database LRU system. + test_view_group_compaction(), + + couch_server_sup:stop(), + ok. + + +test_view_group_compaction() -> + {ok, DbWriter3} = create_db(<<"couch_test_view_group_shutdown_w3">>), + ok = couch_db:close(DbWriter3), + + {ok, MainDb} = create_main_db(), + ok = couch_db:close(MainDb), + + {ok, DbWriter1} = create_db(<<"couch_test_view_group_shutdown_w1">>), + ok = couch_db:close(DbWriter1), + + {ok, DbWriter2} = create_db(<<"couch_test_view_group_shutdown_w2">>), + ok = couch_db:close(DbWriter2), + + Writer1 = spawn_writer(DbWriter1#db.name), + Writer2 = spawn_writer(DbWriter2#db.name), + etap:is(is_process_alive(Writer1), true, "Spawned writer 1"), + etap:is(is_process_alive(Writer2), true, "Spawned writer 2"), + + etap:is(get_writer_status(Writer1), ok, "Writer 1 opened his database"), + etap:is(get_writer_status(Writer2), ok, "Writer 2 opened his database"), + + {ok, CompactPid} = couch_view_compactor:start_compact( + MainDb#db.name, <<"foo">>), + MonRef = erlang:monitor(process, CompactPid), + + % Add some more docs to database and trigger view update + {ok, MainDb2} = couch_db:open_int(MainDb#db.name, []), + ok = populate_main_db(MainDb2, 3, 3), + update_view(MainDb2#db.name, <<"_design/foo">>, <<"foo">>), + ok = couch_db:close(MainDb2), + + % Assuming the view compaction takes more than 50ms to complete + ok = timer:sleep(50), + Writer3 = spawn_writer(DbWriter3#db.name), + etap:is(is_process_alive(Writer3), true, "Spawned writer 3"), + + etap:is(get_writer_status(Writer3), {error, all_dbs_active}, + "Writer 3 got {error, all_dbs_active} when opening his database"), + + etap:is(is_process_alive(Writer1), true, "Writer 1 still alive"), + etap:is(is_process_alive(Writer2), true, "Writer 2 still alive"), + etap:is(is_process_alive(Writer3), true, "Writer 3 still alive"), + + receive + {'DOWN', MonRef, process, CompactPid, normal} -> + etap:diag("View group compaction successful"), + ok; + {'DOWN', MonRef, process, CompactPid, _Reason} -> + etap:bail("Failure compacting view group") + end, + + ok = timer:sleep(2000), + + etap:is(writer_try_again(Writer3), ok, + "Told writer 3 to try open his database again"), + etap:is(get_writer_status(Writer3), ok, + "Writer 3 was able to open his database"), + + etap:is(is_process_alive(Writer1), true, "Writer 1 still alive"), + etap:is(is_process_alive(Writer2), true, "Writer 2 still alive"), + etap:is(is_process_alive(Writer3), true, "Writer 3 still alive"), + + etap:is(stop_writer(Writer1), ok, "Stopped writer 1"), + etap:is(stop_writer(Writer2), ok, "Stopped writer 2"), + etap:is(stop_writer(Writer3), ok, "Stopped writer 3"), + + delete_db(MainDb), + delete_db(DbWriter1), + delete_db(DbWriter2), + delete_db(DbWriter3). + + +create_main_db() -> + {ok, Db} = create_db(main_db_name()), + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/foo">>}, + {<<"language">>, <<"javascript">>}, + {<<"views">>, {[ + {<<"foo">>, {[ + {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>} + ]}}, + {<<"foo2">>, {[ + {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>} + ]}}, + {<<"foo3">>, {[ + {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>} + ]}}, + {<<"foo4">>, {[ + {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>} + ]}}, + {<<"foo5">>, {[ + {<<"map">>, <<"function(doc) { emit(doc._id, doc); }">>} + ]}} + ]}} + ]}), + {ok, _} = couch_db:update_doc(Db, DDoc, []), + ok = populate_main_db(Db, 1000, 20000), + update_view(Db#db.name, <<"_design/foo">>, <<"foo">>), + {ok, Db}. + + +populate_main_db(Db, BatchSize, N) when N > 0 -> + Docs = lists:map( + fun(_) -> + couch_doc:from_json_obj({[ + {<<"_id">>, couch_uuids:new()}, + {<<"value">>, base64:encode(crypto:rand_bytes(1000))} + ]}) + end, + lists:seq(1, BatchSize)), + {ok, _} = couch_db:update_docs(Db, Docs, []), + populate_main_db(Db, BatchSize, N - length(Docs)); +populate_main_db(_Db, _, _) -> + ok. + + +update_view(DbName, DDocName, ViewName) -> + % Use a dedicated process - we can't explicitly drop the #group ref counter + Pid = spawn(fun() -> + {ok, Db} = couch_db:open_int(DbName, []), + couch_view:get_map_view(Db, DDocName, ViewName, false), + ok = couch_db:close(Db) + end), + MonRef = erlang:monitor(process, Pid), + receive + {'DOWN', MonRef, process, Pid, normal} -> + etap:diag("View group updated"), + ok; + {'DOWN', MonRef, process, Pid, _Reason} -> + etap:bail("Failure updating view group") + end. + + +create_db(DbName) -> + {ok, Db} = couch_db:create( + DbName, + [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]), + {ok, Db}. + + +delete_db(#db{name = DbName, main_pid = Pid}) -> + ok = couch_server:delete( + DbName, [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]), + MonRef = erlang:monitor(process, Pid), + receive + {'DOWN', MonRef, process, Pid, _Reason} -> + ok + after 30000 -> + etap:bail("Timeout deleting database") + end. + + +spawn_writer(DbName) -> + Parent = self(), + spawn(fun() -> + process_flag(priority, high), + writer_loop(DbName, Parent) + end). + + +get_writer_status(Writer) -> + Ref = make_ref(), + Writer ! {get_status, Ref}, + receive + {db_open, Ref} -> + ok; + {db_open_error, Error, Ref} -> + Error + after 5000 -> + timeout + end. + + +writer_try_again(Writer) -> + Ref = make_ref(), + Writer ! {try_again, Ref}, + receive + {ok, Ref} -> + ok + after 5000 -> + timeout + end. + + +stop_writer(Writer) -> + Ref = make_ref(), + Writer ! {stop, Ref}, + receive + {ok, Ref} -> + ok + after 5000 -> + etap:bail("Timeout stopping writer process") + end. + + +% Just keep the database open, no need to actually do something on it. +writer_loop(DbName, Parent) -> + case couch_db:open_int(DbName, []) of + {ok, Db} -> + writer_loop_1(Db, Parent); + Error -> + writer_loop_2(DbName, Parent, Error) + end. + +writer_loop_1(Db, Parent) -> + receive + {get_status, Ref} -> + Parent ! {db_open, Ref}, + writer_loop_1(Db, Parent); + {stop, Ref} -> + ok = couch_db:close(Db), + Parent ! {ok, Ref} + end. + +writer_loop_2(DbName, Parent, Error) -> + receive + {get_status, Ref} -> + Parent ! {db_open_error, Error, Ref}, + writer_loop_2(DbName, Parent, Error); + {try_again, Ref} -> + Parent ! {ok, Ref}, + writer_loop(DbName, Parent) + end. diff --git a/test/etap/Makefile.am b/test/etap/Makefile.am index c3a4ddab..313945d7 100644 --- a/test/etap/Makefile.am +++ b/test/etap/Makefile.am @@ -88,5 +88,6 @@ EXTRA_DIST = \ 180-http-proxy.t \ 190-oauth.t \ 200-view-group-no-db-leaks.t \ + 201-view-group-shutdown.t \ 210-os-proc-pool.t -- cgit v1.2.3 From 8a7696145241c4752379271c8253fa0c15093322 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Thu, 22 Sep 2011 20:05:28 +0000 Subject: fix port conversion in vhost fix port conversion, don't forget to use substr function. spotted by @rnewson on irc, again. thanks. patch by benoitc. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1174358 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_httpd_vhost.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/couchdb/couch_httpd_vhost.erl b/src/couchdb/couch_httpd_vhost.erl index a9517600..03dd02ae 100644 --- a/src/couchdb/couch_httpd_vhost.erl +++ b/src/couchdb/couch_httpd_vhost.erl @@ -360,8 +360,8 @@ split_host_port(HostAsString) -> {split_host(HostAsString), '*'}; N -> HostPart = string:substr(HostAsString, 1, N-1), - case (catch erlang:list_to_integer(HostAsString, N+1, - length(HostAsString))) of + case (catch erlang:list_to_integer(string:substr(HostAsString, + N+1, length(HostAsString)))) of {'EXIT', _} -> {split_host(HostAsString), '*'}; Port -> -- cgit v1.2.3 From befbdfb11f45bd2a5ccffb6b0d5ac04435ac9e55 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Fri, 23 Sep 2011 19:50:51 +0000 Subject: Write header before handing over compacted group Writing the header first ensures that the index will not be reset if the server crashes in between the handoff and the next delayed_commit. See COUCHDB-994 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1174979 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_view_compactor.erl | 3 +++ src/couchdb/couch_view_group.erl | 3 +++ 2 files changed, 6 insertions(+) diff --git a/src/couchdb/couch_view_compactor.erl b/src/couchdb/couch_view_compactor.erl index 43fdbc98..0063ef1d 100644 --- a/src/couchdb/couch_view_compactor.erl +++ b/src/couchdb/couch_view_compactor.erl @@ -79,6 +79,9 @@ compact_group(Group, EmptyGroup, DbName) -> maybe_retry_compact(Db, GroupId, NewGroup). maybe_retry_compact(#db{name = DbName} = Db, GroupId, NewGroup) -> + #group{sig = Sig, fd = NewFd} = NewGroup, + Header = {Sig, couch_view_group:get_index_header_data(NewGroup)}, + ok = couch_file:write_header(NewFd, Header), Pid = couch_view:get_group_server(DbName, GroupId), case gen_server:call(Pid, {compact_done, NewGroup}) of ok -> diff --git a/src/couchdb/couch_view_group.erl b/src/couchdb/couch_view_group.erl index ef9b02ad..9c5372a8 100644 --- a/src/couchdb/couch_view_group.erl +++ b/src/couchdb/couch_view_group.erl @@ -17,6 +17,9 @@ -export([start_link/1, request_group/2, request_group_info/1]). -export([open_db_group/2, open_temp_group/5, design_doc_to_view_group/1,design_root/2]). +%% Exports for the compactor +-export([get_index_header_data/1]). + %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). -- cgit v1.2.3