From f6fd760c17cb5d5d49211be4b4ff92e797e83157 Mon Sep 17 00:00:00 2001 From: John Christopher Anderson Date: Sat, 4 Jul 2009 15:47:47 +0000 Subject: Name view index files by their function hashes for no downtime deploys. Closes COUCHDB-218 Adds ability to switch view indexes on the fly by building the index from a "staging" design doc, and then COPYing the staging doc to the production doc's id. Since indexes are referenced by view definition, the new version of the production design doc will point immediately to the index files already built in staging. Please use and give feedback. git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@791140 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_view_group.erl | 142 ++++++++++++++++++++++++++------------- 1 file changed, 97 insertions(+), 45 deletions(-) (limited to 'src/couchdb/couch_view_group.erl') diff --git a/src/couchdb/couch_view_group.erl b/src/couchdb/couch_view_group.erl index 0ab1077e..0b390b22 100644 --- a/src/couchdb/couch_view_group.erl +++ b/src/couchdb/couch_view_group.erl @@ -14,8 +14,8 @@ -behaviour(gen_server). %% API --export([start_link/1, request_group/2]). --export([design_doc_to_view_group/1]). +-export([start_link/1, request_group/2, request_group_info/1]). +-export([open_db_group/2, open_temp_group/5, design_doc_to_view_group/1,design_root/2]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, @@ -47,6 +47,22 @@ request_group(Pid, Seq) -> throw(Error) end. +request_group_info(Pid) -> + case gen_server:call(Pid, request_group_info) of + {ok, GroupInfoList} -> + {ok, GroupInfoList}; + Error -> + throw(Error) + end. + +request_index_files(Pid) -> + case gen_server:call(Pid, request_index_files) of + {ok, Filelist} -> + {ok, Filelist}; + Error -> + throw(Error) + end. + % from template start_link(InitArgs) -> @@ -67,9 +83,7 @@ start_link(InitArgs) -> Error end. -% init differentiates between temp and design_doc views. It creates a closure -% which spawns the appropriate view_updater. (It might also spawn the first -% view_updater run.) +% init creates a closure which spawns the appropriate view_updater. init({InitArgs, ReturnPid, Ref}) -> process_flag(trap_exit, true), case prepare_group(InitArgs, false) of @@ -95,8 +109,7 @@ init({InitArgs, ReturnPid, Ref}) -> % view group, and the couch_view_updater, which when spawned, updates the % group and sends it back here. We employ a caching mechanism, so that between % database writes, we don't have to spawn a couch_view_updater with every view -% request. This should give us more control, and the ability to request view -% statuses eventually. +% request. % The caching mechanism: each request is submitted with a seq_id for the % database at the time it was read. We guarantee to return a view from that @@ -137,8 +150,14 @@ handle_call({request_group, RequestSeq}, From, #group_state{waiting_list=WaitList}=State) -> {noreply, State#group_state{ waiting_list=[{From, RequestSeq}|WaitList] - }, infinity}. + }, infinity}; +handle_call(request_group_info, _From, #group_state{ + group = Group, + compactor_pid = CompactorPid + } = State) -> + GroupInfo = get_group_info(Group, CompactorPid), + {reply, {ok, GroupInfo}, State}. handle_cast({start_compact, CompactFun}, #group_state{ compactor_pid=nil, group=Group, init_args={view, RootDir, DbName, GroupId} } = State) -> @@ -154,15 +173,14 @@ handle_cast({start_compact, _}, State) -> handle_cast({compact_done, #group{fd=NewFd, current_seq=NewSeq} = NewGroup}, #group_state{ - group = #group{current_seq=OldSeq} = Group, - init_args = {view, RootDir, DbName, GroupId}, + group = #group{current_seq=OldSeq, sig=GroupSig} = Group, + init_args = {view, RootDir, DbName, _GroupId}, updater_pid = nil, ref_counter = RefCounter } = State) when NewSeq >= OldSeq -> ?LOG_INFO("View Group compaction complete", []), - BaseName = RootDir ++ "/." ++ ?b2l(DbName) ++ ?b2l(GroupId), - FileName = BaseName ++ ".view", - CompactName = BaseName ++".compact.view", + FileName = index_file_name(RootDir, DbName, GroupSig), + CompactName = index_file_name(compact, RootDir, DbName, GroupSig), file:delete(FileName), ok = file:rename(CompactName, FileName), @@ -224,8 +242,7 @@ handle_info({'EXIT', FromPid, {new_group, #group{db=Db}=Group}}, waiting_list=WaitList, waiting_commit=WaitingCommit}=State) when UpPid == FromPid -> ok = couch_db:close(Db), - - if Group#group.type == view andalso not WaitingCommit -> + if not WaitingCommit -> erlang:send_after(1000, self(), delayed_commit); true -> ok end, @@ -306,12 +323,13 @@ reply_all(#group_state{waiting_list=WaitList}=State, Reply) -> [catch gen_server:reply(Pid, Reply) || {Pid, _} <- WaitList], State#group_state{waiting_list=[]}. -prepare_group({view, RootDir, DbName, GroupId}, ForceReset)-> - case open_db_group(DbName, GroupId) of - {ok, Db, #group{sig=Sig}=Group} -> - case open_index_file(RootDir, DbName, GroupId) of +prepare_group({RootDir, DbName, #group{sig=Sig}=Group}, ForceReset)-> + case couch_db:open(DbName, []) of + {ok, Db} -> + case open_index_file(RootDir, DbName, Sig) of {ok, Fd} -> if ForceReset -> + % this can happen if we missed a purge {ok, reset_file(Db, Fd, DbName, Group)}; true -> % 09 UPGRADE CODE @@ -321,32 +339,18 @@ prepare_group({view, RootDir, DbName, GroupId}, ForceReset)-> % sigs match! {ok, init_group(Db, Fd, Group, HeaderInfo)}; _ -> + % this happens on a new file {ok, reset_file(Db, Fd, DbName, Group)} end end; Error -> - catch delete_index_file(RootDir, DbName, GroupId), + catch delete_index_file(RootDir, DbName, Sig), Error end; - Error -> - catch delete_index_file(RootDir, DbName, GroupId), - Error - end; -prepare_group({slow_view, DbName, Fd, Lang, DesignOptions, MapSrc, RedSrc}, _ForceReset) -> - case couch_db:open(DbName, []) of - {ok, Db} -> - View = #view{map_names=[<<"_temp">>], - id_num=0, - btree=nil, - def=MapSrc, - reduce_funs= if RedSrc==[] -> []; true -> [{<<"_temp">>, RedSrc}] end}, - {ok, init_group(Db, Fd, #group{type=slow_view, name= <<"_temp">>, db=Db, - views=[View], def_lang=Lang, design_options=DesignOptions}, nil)}; - Error -> - Error + Else -> + Else end. - get_index_header_data(#group{current_seq=Seq, purge_seq=PurgeSeq, id_btree=IdBtree,views=Views}) -> ViewStates = [couch_btree:get_state(Btree) || #view{btree=Btree} <- Views], @@ -355,14 +359,47 @@ get_index_header_data(#group{current_seq=Seq, purge_seq=PurgeSeq, id_btree_state=couch_btree:get_state(IdBtree), view_states=ViewStates}. +hex_sig(GroupSig) -> + couch_util:to_hex(?b2l(GroupSig)). + +design_root(RootDir, DbName) -> + RootDir ++ "/." ++ ?b2l(DbName) ++ "_design/". + +index_file_name(RootDir, DbName, GroupSig) -> + design_root(RootDir, DbName) ++ hex_sig(GroupSig) ++".view". -open_index_file(RootDir, DbName, GroupId) -> - FileName = RootDir ++ "/." ++ ?b2l(DbName) ++ ?b2l(GroupId) ++".view", +index_file_name(compact, RootDir, DbName, GroupSig) -> + design_root(RootDir, DbName) ++ hex_sig(GroupSig) ++".compact.view". + + +open_index_file(RootDir, DbName, GroupSig) -> + FileName = index_file_name(RootDir, DbName, GroupSig), case couch_file:open(FileName) of {ok, Fd} -> {ok, Fd}; {error, enoent} -> couch_file:open(FileName, [create]); Error -> Error end. + +open_temp_group(DbName, Language, DesignOptions, MapSrc, RedSrc) -> + case couch_db:open(DbName, []) of + {ok, Db} -> + View = #view{map_names=[<<"_temp">>], + id_num=0, + btree=nil, + def=MapSrc, + reduce_funs= if RedSrc==[] -> []; true -> [{<<"_temp">>, RedSrc}] end}, + + {ok, Db, #group{ + name = <<"_temp">>, + db=Db, + views=[View], + def_lang=Language, + design_options=DesignOptions, + sig = erlang:md5(term_to_binary({[View], Language, DesignOptions})) + }}; + Error -> + Error + end. open_db_group(DbName, GroupId) -> case couch_db:open(DbName, []) of @@ -378,12 +415,28 @@ open_db_group(DbName, GroupId) -> Else end. +get_group_info(#group{ + fd = Fd, + sig = GroupSig, + def_lang = Lang + }, CompactorPid) -> + {ok, Size} = couch_file:bytes(Fd), + [ + {signature, ?l2b(hex_sig(GroupSig))}, + {language, Lang}, + {disk_size, Size}, + {compact_running, CompactorPid /= nil} + ]. + % maybe move to another module design_doc_to_view_group(#doc{id=Id,body={Fields}}) -> Language = proplists:get_value(<<"language">>, Fields, <<"javascript">>), {DesignOptions} = proplists:get_value(<<"options">>, Fields, {[]}), {RawViews} = proplists:get_value(<<"views">>, Fields, {[]}), - + % sort the views by name to avoid spurious signature changes + SortedRawViews = lists:sort(fun({Name1, _}, {Name2, _}) -> + Name1 >= Name2 + end, RawViews), % add the views to a dictionary object, with the map source as the key DictBySrc = lists:foldl( @@ -402,7 +455,7 @@ design_doc_to_view_group(#doc{id=Id,body={Fields}}) -> View#view{reduce_funs=[{Name,RedSrc}|View#view.reduce_funs]} end, dict:store(MapSrc, View2, DictBySrcAcc) - end, dict:new(), RawViews), + end, dict:new(), SortedRawViews), % number the views {Views, _N} = lists:mapfoldl( fun({_Src, View}, N) -> @@ -410,7 +463,7 @@ design_doc_to_view_group(#doc{id=Id,body={Fields}}) -> end, 0, dict:to_list(DictBySrc)), Group = #group{name=Id, views=Views, def_lang=Language, design_options=DesignOptions}, - Group#group{sig=erlang:md5(term_to_binary(Group))}. + Group#group{sig=erlang:md5(term_to_binary({Views, Language, DesignOptions}))}. reset_group(#group{views=Views}=Group) -> Views2 = [View#view{btree=nil} || View <- Views], @@ -423,9 +476,8 @@ reset_file(Db, Fd, DbName, #group{sig=Sig,name=Name} = Group) -> ok = couch_file:write_header(Fd, {Sig, nil}), init_group(Db, Fd, reset_group(Group), nil). -delete_index_file(RootDir, DbName, GroupId) -> - file:delete(RootDir ++ "/." ++ binary_to_list(DbName) - ++ binary_to_list(GroupId) ++ ".view"). +delete_index_file(RootDir, DbName, GroupSig) -> + file:delete(index_file_name(RootDir, DbName, GroupSig)). init_group(Db, Fd, #group{views=Views}=Group, nil) -> init_group(Db, Fd, Group, -- cgit v1.2.3