summaryrefslogtreecommitdiff
path: root/src/couchdb/couch_db_updater.erl
diff options
context:
space:
mode:
Diffstat (limited to 'src/couchdb/couch_db_updater.erl')
-rw-r--r--src/couchdb/couch_db_updater.erl251
1 files changed, 155 insertions, 96 deletions
diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl
index 7292221a..633ae230 100644
--- a/src/couchdb/couch_db_updater.erl
+++ b/src/couchdb/couch_db_updater.erl
@@ -20,25 +20,38 @@
init({MainPid, DbName, Filepath, Fd, Options}) ->
+ process_flag(trap_exit, true),
case lists:member(create, Options) of
true ->
% create a new header and writes it to the file
Header = #db_header{},
ok = couch_file:write_header(Fd, Header),
% delete any old compaction files that might be hanging around
- file:delete(Filepath ++ ".compact");
+ RootDir = couch_config:get("couchdb", "database_dir", "."),
+ couch_file:delete(RootDir, Filepath ++ ".compact");
false ->
ok = couch_file:upgrade_old_header(Fd, <<$g, $m, $k, 0>>), % 09 UPGRADE CODE
- {ok, Header} = couch_file:read_header(Fd)
+ case couch_file:read_header(Fd) of
+ {ok, Header} ->
+ ok;
+ no_valid_header ->
+ % create a new header and writes it to the file
+ Header = #db_header{},
+ ok = couch_file:write_header(Fd, Header),
+ % delete any old compaction files that might be hanging around
+ file:delete(Filepath ++ ".compact")
+ end
end,
Db = init_db(DbName, Filepath, Fd, Header),
Db2 = refresh_validate_doc_funs(Db),
- {ok, Db2#db{main_pid=MainPid}}.
+ {ok, Db2#db{main_pid = MainPid, is_sys_db = lists:member(sys_db, Options)}}.
-terminate(Reason, _Srv) ->
- couch_util:terminate_linked(Reason),
+terminate(_Reason, Db) ->
+ couch_file:close(Db#db.fd),
+ couch_util:shutdown_sync(Db#db.compactor_pid),
+ couch_util:shutdown_sync(Db#db.fd_ref_counter),
ok.
handle_call(get_db, _From, Db) ->
@@ -53,9 +66,9 @@ handle_call(increment_update_seq, _From, Db) ->
couch_db_update_notifier:notify({updated, Db#db.name}),
{reply, {ok, Db2#db.update_seq}, Db2};
-handle_call({set_admins, NewAdmins}, _From, Db) ->
- {ok, Ptr} = couch_file:append_term(Db#db.fd, NewAdmins),
- Db2 = commit_data(Db#db{admins=NewAdmins, admins_ptr=Ptr,
+handle_call({set_security, NewSec}, _From, Db) ->
+ {ok, Ptr} = couch_file:append_term(Db#db.fd, NewSec),
+ Db2 = commit_data(Db#db{security=NewSec, security_ptr=Ptr,
update_seq=Db#db.update_seq+1}),
ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
{reply, ok, Db2};
@@ -130,21 +143,22 @@ handle_call({purge_docs, IdRevs}, _From, Db) ->
ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
couch_db_update_notifier:notify({updated, Db#db.name}),
- {reply, {ok, Db2#db.update_seq, IdRevsPurged}, Db2}.
-
-
-handle_cast(start_compact, Db) ->
+ {reply, {ok, (Db2#db.header)#db_header.purge_seq, IdRevsPurged}, Db2};
+handle_call(start_compact, _From, Db) ->
case Db#db.compactor_pid of
nil ->
?LOG_INFO("Starting compaction for db \"~s\"", [Db#db.name]),
Pid = spawn_link(fun() -> start_copy_compact(Db) end),
Db2 = Db#db{compactor_pid=Pid},
ok = gen_server:call(Db#db.main_pid, {db_updated, Db2}),
- {noreply, Db2};
+ {reply, ok, Db2};
_ ->
% compact currently running, this is a no-op
- {noreply, Db}
- end;
+ {reply, ok, Db}
+ end.
+
+
+
handle_cast({compact_done, CompactFilepath}, #db{filepath=Filepath}=Db) ->
{ok, NewFd} = couch_file:open(CompactFilepath),
{ok, NewHeader} = couch_file:read_header(NewFd),
@@ -158,17 +172,25 @@ handle_cast({compact_done, CompactFilepath}, #db{filepath=Filepath}=Db) ->
fun(Value, _Offset, Acc) -> {ok, [Value | Acc]} end, []),
{ok, NewLocalBtree} = couch_btree:add(NewDb#db.local_docs_btree, LocalDocs),
- NewDb2 = commit_data( NewDb#db{local_docs_btree=NewLocalBtree,
- main_pid = Db#db.main_pid,filepath = Filepath}),
+ NewDb2 = commit_data(NewDb#db{
+ local_docs_btree = NewLocalBtree,
+ main_pid = Db#db.main_pid,
+ filepath = Filepath,
+ instance_start_time = Db#db.instance_start_time,
+ revs_limit = Db#db.revs_limit
+ }),
?LOG_DEBUG("CouchDB swapping files ~s and ~s.",
[Filepath, CompactFilepath]),
- file:delete(Filepath),
+ RootDir = couch_config:get("couchdb", "database_dir", "."),
+ couch_file:delete(RootDir, Filepath),
ok = file:rename(CompactFilepath, Filepath),
close_db(Db),
- ok = gen_server:call(Db#db.main_pid, {db_updated, NewDb2}),
+ NewDb3 = refresh_validate_doc_funs(NewDb2),
+ ok = gen_server:call(Db#db.main_pid, {db_updated, NewDb3}, infinity),
+ couch_db_update_notifier:notify({compacted, NewDb3#db.name}),
?LOG_INFO("Compaction for db \"~s\" completed.", [Db#db.name]),
- {noreply, NewDb2#db{compactor_pid=nil}};
+ {noreply, NewDb3#db{compactor_pid=nil}};
false ->
?LOG_INFO("Compaction file still behind main file "
"(update seq=~p. compact update seq=~p). Retrying.",
@@ -180,11 +202,11 @@ handle_cast({compact_done, CompactFilepath}, #db{filepath=Filepath}=Db) ->
end.
-handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts,
+handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts,
FullCommit}, Db) ->
GroupedDocs2 = [[{Client, D} || D <- DocGroup] || DocGroup <- GroupedDocs],
if NonRepDocs == [] ->
- {GroupedDocs3, Clients, FullCommit2} = collect_updates(GroupedDocs2,
+ {GroupedDocs3, Clients, FullCommit2} = collect_updates(GroupedDocs2,
[Client], MergeConflicts, FullCommit);
true ->
GroupedDocs3 = GroupedDocs2,
@@ -192,7 +214,7 @@ handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts,
Clients = [Client]
end,
NonRepDocs2 = [{Client, NRDoc} || NRDoc <- NonRepDocs],
- try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts,
+ try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts,
FullCommit2) of
{ok, Db2} ->
ok = gen_server:call(Db#db.main_pid, {db_updated, Db2}),
@@ -207,6 +229,9 @@ handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts,
[catch(ClientPid ! {retry, self()}) || ClientPid <- Clients],
{noreply, Db}
end;
+handle_info(delayed_commit, #db{waiting_delayed_commit=nil}=Db) ->
+ %no outstanding delayed commits, ignore
+ {noreply, Db};
handle_info(delayed_commit, Db) ->
case commit_data(Db) of
Db ->
@@ -214,7 +239,11 @@ handle_info(delayed_commit, Db) ->
Db2 ->
ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
{noreply, Db2}
- end.
+ end;
+handle_info({'EXIT', _Pid, normal}, Db) ->
+ {noreply, Db};
+handle_info({'EXIT', _Pid, Reason}, Db) ->
+ {stop, Reason, Db}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
@@ -243,7 +272,7 @@ collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts, FullCommit) ->
{update_docs, Client, GroupedDocs, [], MergeConflicts, FullCommit2} ->
GroupedDocs2 = [[{Client, Doc} || Doc <- DocGroup]
|| DocGroup <- GroupedDocs],
- GroupedDocsAcc2 =
+ GroupedDocsAcc2 =
merge_updates(GroupedDocsAcc, GroupedDocs2, []),
collect_updates(GroupedDocsAcc2, [Client | ClientsAcc],
MergeConflicts, (FullCommit or FullCommit2))
@@ -326,7 +355,7 @@ btree_by_seq_reduce(rereduce, Reds) ->
simple_upgrade_record(Old, New) when tuple_size(Old) =:= tuple_size(New) ->
Old;
-simple_upgrade_record(Old, New) ->
+simple_upgrade_record(Old, New) when tuple_size(Old) < tuple_size(New) ->
OldSz = tuple_size(Old),
NewValuesTail =
lists:sublist(tuple_to_list(New), OldSz + 1, tuple_size(New) - OldSz),
@@ -337,9 +366,10 @@ init_db(DbName, Filepath, Fd, Header0) ->
Header1 = simple_upgrade_record(Header0, #db_header{}),
Header =
case element(2, Header1) of
- 1 -> Header1#db_header{unused = 0}; % 0.9
- 2 -> Header1#db_header{unused = 0}; % post 0.9 and pre 0.10
- 3 -> Header1; % post 0.9 and pre 0.10
+ 1 -> Header1#db_header{unused = 0, security_ptr = nil}; % 0.9
+ 2 -> Header1#db_header{unused = 0, security_ptr = nil}; % post 0.9 and pre 0.10
+ 3 -> Header1#db_header{security_ptr = nil}; % post 0.9 and pre 0.10
+ 4 -> Header1#db_header{security_ptr = nil}; % 0.10 and pre 0.11
?LATEST_DISK_VERSION -> Header1;
_ -> throw({database_disk_version_error, "Incorrect disk header version"})
end,
@@ -362,12 +392,12 @@ init_db(DbName, Filepath, Fd, Header0) ->
{join, fun(X,Y) -> btree_by_seq_join(X,Y) end},
{reduce, fun(X,Y) -> btree_by_seq_reduce(X,Y) end}]),
{ok, LocalDocsBtree} = couch_btree:open(Header#db_header.local_docs_btree_state, Fd),
- case Header#db_header.admins_ptr of
+ case Header#db_header.security_ptr of
nil ->
- Admins = [],
- AdminsPtr = nil;
- AdminsPtr ->
- {ok, Admins} = couch_file:pread_term(Fd, AdminsPtr)
+ Security = [],
+ SecurityPtr = nil;
+ SecurityPtr ->
+ {ok, Security} = couch_file:pread_term(Fd, SecurityPtr)
end,
% convert start time tuple to microsecs and store as a binary string
{MegaSecs, Secs, MicroSecs} = now(),
@@ -386,8 +416,8 @@ init_db(DbName, Filepath, Fd, Header0) ->
update_seq = Header#db_header.update_seq,
name = DbName,
filepath = Filepath,
- admins = Admins,
- admins_ptr = AdminsPtr,
+ security = Security,
+ security_ptr = SecurityPtr,
instance_start_time = StartTime,
revs_limit = Header#db_header.revs_limit,
fsync_options = FsyncOptions
@@ -429,8 +459,9 @@ flush_trees(#db{fd=Fd,header=Header}=Db,
case Atts of
[] -> [];
[#att{data={BinFd, _Sp}} | _ ] when BinFd == Fd ->
- [{N,T,P,L,R,M}
- || #att{name=N,type=T,data={_,P},md5=M,revpos=R,len=L}
+ [{N,T,P,AL,DL,R,M,E}
+ || #att{name=N,type=T,data={_,P},md5=M,revpos=R,
+ att_len=AL,disk_len=DL,encoding=E}
<- Atts];
_ ->
% BinFd must not equal our Fd. This can happen when a database
@@ -458,16 +489,16 @@ send_result(Client, Id, OriginalRevs, NewResult) ->
% used to send a result to the client
catch(Client ! {result, self(), {{Id, OriginalRevs}, NewResult}}).
-merge_rev_trees(_MergeConflicts, [], [], AccNewInfos, AccRemoveSeqs, AccSeq) ->
+merge_rev_trees(_Limit, _Merge, [], [], AccNewInfos, AccRemoveSeqs, AccSeq) ->
{ok, lists:reverse(AccNewInfos), AccRemoveSeqs, AccSeq};
-merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList],
+merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList],
[OldDocInfo|RestOldInfo], AccNewInfos, AccRemoveSeqs, AccSeq) ->
#full_doc_info{id=Id,rev_tree=OldTree,deleted=OldDeleted,update_seq=OldSeq}
= OldDocInfo,
- NewRevTree = lists:foldl(
+ NewRevTree0 = lists:foldl(
fun({Client, #doc{revs={Pos,[_Rev|PrevRevs]}}=NewDoc}, AccTree) ->
if not MergeConflicts ->
- case couch_key_tree:merge(AccTree, [couch_db:doc_to_tree(NewDoc)]) of
+ case couch_key_tree:merge(AccTree, couch_db:doc_to_tree(NewDoc)) of
{_NewTree, conflicts} when (not OldDeleted) ->
send_result(Client, Id, {Pos-1,PrevRevs}, conflict),
AccTree;
@@ -492,15 +523,15 @@ merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList],
% this means we are recreating a brand new document
% into a state that already existed before.
% put the rev into a subsequent edit of the deletion
- #doc_info{revs=[#rev_info{rev={OldPos,OldRev}}|_]} =
+ #doc_info{revs=[#rev_info{rev={OldPos,OldRev}}|_]} =
couch_doc:to_doc_info(OldDocInfo),
NewRevId = couch_db:new_revid(
NewDoc#doc{revs={OldPos, [OldRev]}}),
NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}},
{NewTree2, _} = couch_key_tree:merge(AccTree,
- [couch_db:doc_to_tree(NewDoc2)]),
+ couch_db:doc_to_tree(NewDoc2)),
% we changed the rev id, this tells the caller we did
- send_result(Client, Id, {Pos-1,PrevRevs},
+ send_result(Client, Id, {Pos-1,PrevRevs},
{ok, {OldPos + 1, NewRevId}}),
NewTree2;
true ->
@@ -512,15 +543,16 @@ merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList],
end;
true ->
{NewTree, _} = couch_key_tree:merge(AccTree,
- [couch_db:doc_to_tree(NewDoc)]),
+ couch_db:doc_to_tree(NewDoc)),
NewTree
- end
+ end
end,
OldTree, NewDocs),
+ NewRevTree = couch_key_tree:stem(NewRevTree0, Limit),
if NewRevTree == OldTree ->
% nothing changed
- merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo, AccNewInfos,
- AccRemoveSeqs, AccSeq);
+ merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
+ AccNewInfos, AccRemoveSeqs, AccSeq);
true ->
% we have updated the document, give it a new seq #
NewInfo = #full_doc_info{id=Id,update_seq=AccSeq+1,rev_tree=NewRevTree},
@@ -528,8 +560,8 @@ merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList],
0 -> AccRemoveSeqs;
_ -> [OldSeq | AccRemoveSeqs]
end,
- merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo,
- [NewInfo|AccNewInfos], RemoveSeqs, AccSeq+1)
+ merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
+ [NewInfo|AccNewInfos], RemoveSeqs, AccSeq+1)
end.
@@ -552,7 +584,8 @@ update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) ->
#db{
fulldocinfo_by_id_btree = DocInfoByIdBTree,
docinfo_by_seq_btree = DocInfoBySeqBTree,
- update_seq = LastSeq
+ update_seq = LastSeq,
+ revs_limit = RevsLimit
} = Db,
Ids = [Id || [{_Client, #doc{id=Id}}|_] <- DocsList],
% lookup up the old documents, if they exist.
@@ -565,11 +598,9 @@ update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) ->
end,
Ids, OldDocLookups),
% Merge the new docs into the revision trees.
- {ok, NewDocInfos0, RemoveSeqs, NewSeq} = merge_rev_trees(
+ {ok, NewFullDocInfos, RemoveSeqs, NewSeq} = merge_rev_trees(RevsLimit,
MergeConflicts, DocsList, OldDocInfos, [], [], LastSeq),
- NewFullDocInfos = stem_full_doc_infos(Db, NewDocInfos0),
-
% All documents are now ready to write.
{ok, Db2} = update_local_docs(Db, NonRepDocs),
@@ -654,35 +685,35 @@ db_to_header(Db, Header) ->
docinfo_by_seq_btree_state = couch_btree:get_state(Db#db.docinfo_by_seq_btree),
fulldocinfo_by_id_btree_state = couch_btree:get_state(Db#db.fulldocinfo_by_id_btree),
local_docs_btree_state = couch_btree:get_state(Db#db.local_docs_btree),
- admins_ptr = Db#db.admins_ptr,
+ security_ptr = Db#db.security_ptr,
revs_limit = Db#db.revs_limit}.
-commit_data(#db{fd=Fd,header=OldHeader,fsync_options=FsyncOptions}=Db, Delay) ->
- Header = db_to_header(Db, OldHeader),
- if OldHeader == Header ->
- Db;
- Delay and (Db#db.waiting_delayed_commit == nil) ->
- Db#db{waiting_delayed_commit=
- erlang:send_after(1000, self(), delayed_commit)};
- Delay ->
- Db;
- true ->
- if Db#db.waiting_delayed_commit /= nil ->
- case erlang:cancel_timer(Db#db.waiting_delayed_commit) of
- false -> receive delayed_commit -> ok after 0 -> ok end;
- _ -> ok
- end;
- true -> ok
- end,
+commit_data(#db{waiting_delayed_commit=nil} = Db, true) ->
+ Db#db{waiting_delayed_commit=erlang:send_after(1000,self(),delayed_commit)};
+commit_data(Db, true) ->
+ Db;
+commit_data(Db, _) ->
+ #db{
+ fd = Fd,
+ filepath = Filepath,
+ header = OldHeader,
+ fsync_options = FsyncOptions,
+ waiting_delayed_commit = Timer
+ } = Db,
+ if is_reference(Timer) -> erlang:cancel_timer(Timer); true -> ok end,
+ case db_to_header(Db, OldHeader) of
+ OldHeader ->
+ Db#db{waiting_delayed_commit=nil};
+ Header ->
case lists:member(before_header, FsyncOptions) of
- true -> ok = couch_file:sync(Fd);
+ true -> ok = couch_file:sync(Filepath);
_ -> ok
end,
ok = couch_file:write_header(Fd, Header),
case lists:member(after_header, FsyncOptions) of
- true -> ok = couch_file:sync(Fd);
+ true -> ok = couch_file:sync(Filepath);
_ -> ok
end,
@@ -696,21 +727,41 @@ copy_doc_attachments(#db{fd=SrcFd}=SrcDb, {Pos,_RevId}, SrcSp, DestFd) ->
{ok, {BodyData, BinInfos}} = couch_db:read_doc(SrcDb, SrcSp),
% copy the bin values
NewBinInfos = lists:map(
- fun({Name, {Type, BinSp, Len}}) when is_tuple(BinSp) orelse BinSp == null ->
+ fun({Name, {Type, BinSp, AttLen}}) when is_tuple(BinSp) orelse BinSp == null ->
% 09 UPGRADE CODE
- {NewBinSp, Len, Md5} = couch_stream:old_copy_to_new_stream(SrcFd, BinSp, Len, DestFd),
- {Name, Type, NewBinSp, Len, Pos, Md5};
- ({Name, {Type, BinSp, Len}}) ->
+ {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} =
+ couch_stream:old_copy_to_new_stream(SrcFd, BinSp, AttLen, DestFd),
+ {Name, Type, NewBinSp, AttLen, AttLen, Pos, Md5, identity};
+ ({Name, {Type, BinSp, AttLen}}) ->
% 09 UPGRADE CODE
- {NewBinSp, Len, Md5} = couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
- {Name, Type, NewBinSp, Len, Pos, Md5};
- ({Name, Type, BinSp, Len, RevPos, <<>>}) when is_tuple(BinSp) orelse BinSp == null ->
+ {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} =
+ couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
+ {Name, Type, NewBinSp, AttLen, AttLen, Pos, Md5, identity};
+ ({Name, Type, BinSp, AttLen, _RevPos, <<>>}) when
+ is_tuple(BinSp) orelse BinSp == null ->
% 09 UPGRADE CODE
- {NewBinSp, Len, Md5} = couch_stream:old_copy_to_new_stream(SrcFd, BinSp, Len, DestFd),
- {Name, Type, NewBinSp, Len, Len, Md5};
- ({Name, Type, BinSp, Len, RevPos, Md5}) ->
- {NewBinSp, Len, Md5} = couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
- {Name, Type, NewBinSp, Len, RevPos, Md5}
+ {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} =
+ couch_stream:old_copy_to_new_stream(SrcFd, BinSp, AttLen, DestFd),
+ {Name, Type, NewBinSp, AttLen, AttLen, AttLen, Md5, identity};
+ ({Name, Type, BinSp, AttLen, RevPos, Md5}) ->
+ % 010 UPGRADE CODE
+ {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} =
+ couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
+ {Name, Type, NewBinSp, AttLen, AttLen, RevPos, Md5, identity};
+ ({Name, Type, BinSp, AttLen, DiskLen, RevPos, Md5, Enc1}) ->
+ {NewBinSp, AttLen, _, Md5, _IdentityMd5} =
+ couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
+ Enc = case Enc1 of
+ true ->
+ % 0110 UPGRADE CODE
+ gzip;
+ false ->
+ % 0110 UPGRADE CODE
+ identity;
+ _ ->
+ Enc1
+ end,
+ {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, Md5, Enc}
end, BinInfos),
{BodyData, NewBinInfos}.
@@ -724,7 +775,10 @@ copy_rev_tree_attachments(SrcDb, DestFd, Tree) ->
end, Tree).
-copy_docs(Db, #db{fd=DestFd}=NewDb, InfoBySeq, Retry) ->
+copy_docs(Db, #db{fd=DestFd}=NewDb, InfoBySeq0, Retry) ->
+ % COUCHDB-968, make sure we prune duplicates during compaction
+ InfoBySeq = lists:usort(fun(#doc_info{id=A}, #doc_info{id=B}) -> A =< B end,
+ InfoBySeq0),
Ids = [Id || #doc_info{id=Id} <- InfoBySeq],
LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids),
@@ -790,7 +844,7 @@ copy_compact(Db, NewDb0, Retry) ->
couch_task_status:set_update_frequency(500),
{ok, _, {NewDb2, Uncopied, TotalChanges}} =
- couch_btree:foldl(Db#db.docinfo_by_seq_btree, EnumBySeqFun,
+ couch_btree:foldl(Db#db.docinfo_by_seq_btree, EnumBySeqFun,
{NewDb, [], 0},
[{start_key, NewDb#db.update_seq + 1}]),
@@ -799,9 +853,9 @@ copy_compact(Db, NewDb0, Retry) ->
NewDb3 = copy_docs(Db, NewDb2, lists:reverse(Uncopied), Retry),
% copy misc header values
- if NewDb3#db.admins /= Db#db.admins ->
- {ok, Ptr} = couch_file:append_term(NewDb3#db.fd, Db#db.admins),
- NewDb4 = NewDb3#db{admins=Db#db.admins, admins_ptr=Ptr};
+ if NewDb3#db.security /= Db#db.security ->
+ {ok, Ptr} = couch_file:append_term(NewDb3#db.fd, Db#db.security),
+ NewDb4 = NewDb3#db{security=Db#db.security, security_ptr=Ptr};
true ->
NewDb4 = NewDb3
end,
@@ -815,7 +869,12 @@ start_copy_compact(#db{name=Name,filepath=Filepath}=Db) ->
{ok, Fd} ->
couch_task_status:add_task(<<"Database Compaction">>, <<Name/binary, " retry">>, <<"Starting">>),
Retry = true,
- {ok, Header} = couch_file:read_header(Fd);
+ case couch_file:read_header(Fd) of
+ {ok, Header} ->
+ ok;
+ no_valid_header ->
+ ok = couch_file:write_header(Fd, Header=#db_header{})
+ end;
{error, enoent} ->
couch_task_status:add_task(<<"Database Compaction">>, Name, <<"Starting">>),
{ok, Fd} = couch_file:open(CompactFile, [create]),
@@ -823,8 +882,8 @@ start_copy_compact(#db{name=Name,filepath=Filepath}=Db) ->
ok = couch_file:write_header(Fd, Header=#db_header{})
end,
NewDb = init_db(Name, CompactFile, Fd, Header),
+ unlink(Fd),
NewDb2 = copy_compact(Db, NewDb, Retry),
-
- gen_server:cast(Db#db.update_pid, {compact_done, CompactFile}),
- close_db(NewDb2).
+ close_db(NewDb2),
+ gen_server:cast(Db#db.update_pid, {compact_done, CompactFile}).