diff options
author | Damien F. Katz <damien@apache.org> | 2008-04-20 18:17:15 +0000 |
---|---|---|
committer | Damien F. Katz <damien@apache.org> | 2008-04-20 18:17:15 +0000 |
commit | fb5b6bbc5aa941478d700e8fb3011c2a24c4d2d4 (patch) | |
tree | bcc23ed4869f395e894f76ec3fb5e76f75a5ba98 | |
parent | ad230e67fb09883e2171291d5a42635f5e2addb9 (diff) |
Added proper UUID generation and changed the details of how way debug logging is done to now use a more effcient macro instead of a function call.
git-svn-id: https://svn.apache.org/repos/asf/incubator/couchdb/trunk@649948 13f79535-47bb-0310-9956-ffa450edef68
-rw-r--r-- | src/couchdb/couch_db.erl | 23 | ||||
-rw-r--r-- | src/couchdb/couch_db.hrl | 16 | ||||
-rw-r--r-- | src/couchdb/couch_erl_driver.c | 99 | ||||
-rw-r--r-- | src/couchdb/couch_file.erl | 8 | ||||
-rw-r--r-- | src/couchdb/couch_httpd.erl | 28 | ||||
-rw-r--r-- | src/couchdb/couch_log.erl | 34 | ||||
-rw-r--r-- | src/couchdb/couch_query_servers.erl | 8 | ||||
-rw-r--r-- | src/couchdb/couch_rep.erl | 6 | ||||
-rw-r--r-- | src/couchdb/couch_server_sup.erl | 4 | ||||
-rw-r--r-- | src/couchdb/couch_util.erl | 28 | ||||
-rw-r--r-- | src/couchdb/couch_view.erl | 12 |
11 files changed, 144 insertions, 122 deletions
diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index 60d951c9..289cc4f9 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -68,7 +68,7 @@ start_link0(DbName, Filepath, Options) -> % crashed during the file switch. case couch_file:open(Filepath ++ ".compact") of {ok, Fd0} -> - couch_log:info("Found ~s~s compaction file, using as primary storage.", [Filepath, ".compact"]), + ?LOG_INFO("Found ~s~s compaction file, using as primary storage.", [Filepath, ".compact"]), ok = file:rename(Filepath ++ ".compact", Filepath), Fd0; {error, enoent} -> @@ -85,7 +85,7 @@ start_link0(DbName, Filepath, Options) -> % We successfully opened the db, delete old storage files if around case file:delete(Filepath ++ ".old") of ok -> - couch_log:info("Deleted old storage file ~s~s", [Filepath, ".old"]); + ?LOG_INFO("Deleted old storage file ~s~s", [Filepath, ".old"]); {error, enoent} -> ok % normal result end; @@ -461,7 +461,7 @@ code_change(_OldVsn, State, _Extra) -> {ok, State}. handle_info(Msg, Db) -> - couch_log:error("Bad message received for db ~s: ~p", [Db#db.name, Msg]), + ?LOG_ERROR("Bad message received for db ~s: ~p", [Db#db.name, Msg]), exit({error, Msg}). @@ -508,7 +508,7 @@ update_loop(#db{fd=Fd,name=Name,filepath=Filepath, main_pid=MainPid}=Db) -> compact -> case Db#db.compactor_pid of nil -> - couch_log:info("Starting compaction for db \"~s\"", [Name]), + ?LOG_INFO("Starting compaction for db \"~s\"", [Name]), Pid = spawn_link(couch_db, start_copy_compact_int, [Db, true]), Db2 = Db#db{compactor_pid=Pid}, ok = gen_server:call(MainPid, {db_updated, Db2}), @@ -523,7 +523,7 @@ update_loop(#db{fd=Fd,name=Name,filepath=Filepath, main_pid=MainPid}=Db) -> init_db(Name, CompactFilepath, NewFd, NewHeader), case Db#db.update_seq == NewSeq of true -> - couch_log:debug("CouchDB swapping files ~s and ~s.", [Filepath, CompactFilepath]), + ?LOG_DEBUG("CouchDB swapping files ~s and ~s.", [Filepath, CompactFilepath]), ok = file:rename(Filepath, Filepath ++ ".old"), ok = file:rename(CompactFilepath, Filepath), @@ -544,10 +544,10 @@ update_loop(#db{fd=Fd,name=Name,filepath=Filepath, main_pid=MainPid}=Db) -> end), ok = gen_server:call(MainPid, {db_updated, NewDb2}), - couch_log:info("Compaction for db ~p completed.", [Name]), + ?LOG_INFO("Compaction for db ~p completed.", [Name]), update_loop(NewDb2#db{compactor_pid=nil}); false -> - couch_log:info("Compaction file still behind main file " + ?LOG_INFO("Compaction file still behind main file " "(update seq=~p. compact update seq=~p). Retrying.", [Db#db.update_seq, NewSeq]), Pid = spawn_link(couch_db, start_copy_compact_int, [Db, false]), @@ -555,7 +555,7 @@ update_loop(#db{fd=Fd,name=Name,filepath=Filepath, main_pid=MainPid}=Db) -> update_loop(Db2) end; Else -> - couch_log:error("Unknown message received in db ~s:~p", [Db#db.name, Else]), + ?LOG_ERROR("Unknown message received in db ~s:~p", [Db#db.name, Else]), exit({error, Else}) end. @@ -701,7 +701,7 @@ flush_trees(#db{fd=Fd}=Db, [Unflushed | RestUnflushed], AccFlushed) -> _ -> % BinFd must not equal our Fd. This can happen when a database % is being updated during a compaction - couch_log:debug("File where the attachments are written has changed. Possibly retrying."), + ?LOG_DEBUG("File where the attachments are written has changed. Possibly retrying.", []), throw(retry) end, {ok, NewSummaryPointer} = couch_stream:write_term(Db#db.summary_stream, {Doc#doc.body, Bins}), @@ -876,6 +876,7 @@ commit_data(#db{fd=Fd, header=Header} = Db) -> if Header == Header2 -> Db; % unchanged. nothing to do true -> + %ok = couch_file:sync(Fd), ok = couch_file:write_header(Fd, <<$g, $m, $k, 0>>, Header2), Db#db{header = Header2} end. @@ -941,10 +942,10 @@ copy_compact_docs(Db, NewDb) -> start_copy_compact_int(#db{name=Name,filepath=Filepath}=Db, CopyLocal) -> CompactFile = Filepath ++ ".compact", - couch_log:debug("Compaction process spawned for db \"~s\"", [Name]), + ?LOG_DEBUG("Compaction process spawned for db \"~s\"", [Name]), case couch_file:open(CompactFile) of {ok, Fd} -> - couch_log:debug("Found existing compaction file for db \"~s\"", [Name]), + ?LOG_DEBUG("Found existing compaction file for db \"~s\"", [Name]), {ok, Header} = couch_file:read_header(Fd, <<$g, $m, $k, 0>>); {error, enoent} -> % {ok, Fd} = couch_file:open(CompactFile, [create]), diff --git a/src/couchdb/couch_db.hrl b/src/couchdb/couch_db.hrl index 51ee7af2..e4cf00ea 100644 --- a/src/couchdb/couch_db.hrl +++ b/src/couchdb/couch_db.hrl @@ -15,6 +15,21 @@ -define(DESIGN_DOC_PREFIX, "_design/"). -define(DEFAULT_ATTACHMENT_CONTENT_TYPE, "application/octet-stream"). + +-define(LOG_DEBUG(Format, Args), + case couch_log:debug_on() of + true -> error_logger:info_report(couch_debug, {Format, Args}); + false -> ok + end). + +-define(LOG_INFO(Format, Args), + case couch_log:info_on() of + true -> error_logger:info_report(couch_info, {Format, Args}); + false -> ok + end). + +-define(LOG_ERROR(Format, Args), + error_logger:info_report(couch_error, {Format, Args})). -record(doc_info, { @@ -53,4 +68,3 @@ % couch_db:open_doc(Db, Id, Options). meta = [] }). - diff --git a/src/couchdb/couch_erl_driver.c b/src/couchdb/couch_erl_driver.c index b5703f09..550cff9e 100644 --- a/src/couchdb/couch_erl_driver.c +++ b/src/couchdb/couch_erl_driver.c @@ -23,6 +23,7 @@ specific language governing permissions and limitations under the License. #ifndef WIN32 #include <string.h> // for memcpy #endif +#include <uuid/uuid.h> typedef struct { ErlDrvPort port; @@ -92,51 +93,65 @@ static int couch_drv_control(ErlDrvData drv_data, unsigned int command, const ch { #define COLLATE 0 #define COLLATE_NO_CASE 1 + #define UUID 2 couch_drv_data* pData = (couch_drv_data*)drv_data; - - UErrorCode status = U_ZERO_ERROR; - int collResult; - char response; - UCharIterator iterA; - UCharIterator iterB; - int32_t length; - - // 2 strings are in the buffer, consecutively - // The strings begin first with a 32 bit integer byte length, then the actual - // string bytes follow. - - // first 32bits are the length - memcpy(&length, pBuf, sizeof(length)); - pBuf += sizeof(length); - - // point the iterator at it. - uiter_setUTF8(&iterA, pBuf, length); - - pBuf += length; // now on to string b - - // first 32bits are the length - memcpy(&length, pBuf, sizeof(length)); - pBuf += sizeof(length); - - // point the iterator at it. - uiter_setUTF8(&iterB, pBuf, length); - - if (command == COLLATE) - collResult = ucol_strcollIter(pData->coll, &iterA, &iterB, &status); - else if (command == COLLATE_NO_CASE) - collResult = ucol_strcollIter(pData->collNoCase, &iterA, &iterB, &status); - else + switch(command) { + case UUID: + { + uuid_t uuid; + uuid_generate(uuid); + return return_control_result(&uuid, sizeof(uuid), rbuf, rlen); + } + + case COLLATE: + case COLLATE_NO_CASE: + { + UErrorCode status = U_ZERO_ERROR; + int collResult; + char response; + UCharIterator iterA; + UCharIterator iterB; + int32_t length; + + // 2 strings are in the buffer, consecutively + // The strings begin first with a 32 bit integer byte length, then the actual + // string bytes follow. + + // first 32bits are the length + memcpy(&length, pBuf, sizeof(length)); + pBuf += sizeof(length); + + // point the iterator at it. + uiter_setUTF8(&iterA, pBuf, length); + + pBuf += length; // now on to string b + + // first 32bits are the length + memcpy(&length, pBuf, sizeof(length)); + pBuf += sizeof(length); + + // point the iterator at it. + uiter_setUTF8(&iterB, pBuf, length); + + if (command == COLLATE) + collResult = ucol_strcollIter(pData->coll, &iterA, &iterB, &status); + else if (command == COLLATE_NO_CASE) + collResult = ucol_strcollIter(pData->collNoCase, &iterA, &iterB, &status); + + if (collResult < 0) + response = 0; //lt + else if (collResult > 0) + response = 1; //gt + else + response = 2; //eq + + return return_control_result(&response, sizeof(response), rbuf, rlen); + } + + default: return -1; - - if (collResult < 0) - response = 0; //lt - else if (collResult > 0) - response = 1; //gt - else - response = 2; //eq - - return return_control_result(&response, sizeof(response), rbuf, rlen); + } } ErlDrvEntry couch_driver_entry = { diff --git a/src/couchdb/couch_file.erl b/src/couchdb/couch_file.erl index 3a1a7af1..d42d0eb6 100644 --- a/src/couchdb/couch_file.erl +++ b/src/couchdb/couch_file.erl @@ -13,6 +13,8 @@ -module(couch_file). -behaviour(gen_server). +-include("couch_db.hrl"). + -define(HEADER_SIZE, 2048). % size of each segment of the doubly written header -export([open/1, open/2, close/1, pread/3, pwrite/3, expand/2, bytes/1, sync/1]). @@ -178,12 +180,12 @@ read_header(Fd, Prefix) -> false -> % To get here we must have two different header versions with signatures intact. % It's weird but possible (a commit failure right at the 2k boundary). Log it and take the first. - couch_log:info("Header version differences.~nPrimary Header: ~p~nSecondary Header: ~p", [Header1, Header2]), + ?LOG_INFO("Header version differences.~nPrimary Header: ~p~nSecondary Header: ~p", [Header1, Header2]), {ok, Header1} end; {error, Error} -> % error reading second header. It's ok, but log it. - couch_log:info("Secondary header corruption (error: ~p). Using primary header.", [Error]), + ?LOG_INFO("Secondary header corruption (error: ~p). Using primary header.", [Error]), {ok, Header1} end; {error, Error} -> @@ -191,7 +193,7 @@ read_header(Fd, Prefix) -> case extract_header(Prefix, Bin2) of {ok, Header2} -> % log corrupt primary header. It's ok since the secondary is still good. - couch_log:info("Primary header corruption (error: ~p). Using secondary header.", [Error]), + ?LOG_INFO("Primary header corruption (error: ~p). Using secondary header.", [Error]), {ok, Header2}; _ -> % error reading secondary header too diff --git a/src/couchdb/couch_httpd.erl b/src/couchdb/couch_httpd.erl index ad60d7ca..31bdd143 100644 --- a/src/couchdb/couch_httpd.erl +++ b/src/couchdb/couch_httpd.erl @@ -57,11 +57,11 @@ handle_request(Req, DocumentRoot) -> % removed, but URL quoting left intact {Path, _, _} = mochiweb_util:urlsplit_path(Req:get(raw_path)), - couch_log:debug("Version: ~p", [Req:get(version)]), - couch_log:debug("Method: ~p", [Method]), - couch_log:debug("Request URI: ~p", [Path]), - couch_log:debug("Headers: ~p", [mochiweb_headers:to_list(Req:get(headers))]), - + ?LOG_DEBUG("Version: ~p", [Req:get(version)]), + ?LOG_DEBUG("Method: ~p", [Method]), + ?LOG_DEBUG("Request URI: ~p", [Path]), + ?LOG_DEBUG("Headers: ~p", [mochiweb_headers:to_list(Req:get(headers))]), + {ok, Resp} = case catch(handle_request(Req, DocumentRoot, Method, Path)) of {ok, Resp0} -> {ok, Resp0}; @@ -69,13 +69,19 @@ handle_request(Req, DocumentRoot) -> send_error(Req, Error) end, - couch_log:info("~s - - ~p ~B", [ + ?LOG_INFO("~s - - ~p ~B", [ Req:get(peer), atom_to_list(Req:get(method)) ++ " " ++ Path, Resp:get(code) ]). handle_request(Req, DocumentRoot, Method, Path) -> + Start = erlang:now(), + X = handle_request0(Req, DocumentRoot, Method, Path), + io:format("now_diff:~p~n", [timer:now_diff(erlang:now(), Start)]), + X. + +handle_request0(Req, DocumentRoot, Method, Path) -> case Path of "/" -> handle_welcome_request(Req, Method); @@ -431,7 +437,7 @@ handle_doc_request(Req, 'GET', _DbName, Db, DocId) -> JsonDoc = couch_doc:to_json_obj(Doc, Options), AdditionalHeaders = case Doc#doc.meta of - [] -> [{"Etag", Etag}]; % output etag when we have no meta + [] -> [{"XEtag", Etag}]; % output etag when we have no meta _ -> [] end, send_json(Req, 200, AdditionalHeaders, JsonDoc); @@ -498,7 +504,7 @@ handle_doc_request(Req, 'PUT', _DbName, Db, DocId) -> Doc = couch_doc:from_json_obj(Json), {ok, NewRev} = couch_db:update_doc(Db, Doc#doc{id=DocId, revs=Revs}, []), - send_json(Req, 201, [{"Etag", "\"" ++ NewRev ++ "\""}], {obj, [ + send_json(Req, 201, [{"XEtag", "\"" ++ NewRev ++ "\""}], {obj, [ {ok, true}, {id, DocId}, {rev, NewRev} @@ -791,12 +797,12 @@ error_to_json0(Error) -> send_error(Req, {method_not_allowed, Methods}) -> {ok, Req:respond({405, [{"Allow", Methods}], <<>>})}; send_error(Req, {modified, Etag}) -> - {ok, Req:respond({412, [{"Etag", Etag}], <<>>})}; + {ok, Req:respond({412, [{"XEtag", Etag}], <<>>})}; send_error(Req, {not_modified, Etag}) -> - {ok, Req:respond({304, [{"Etag", Etag}], <<>>})}; + {ok, Req:respond({304, [{"XEtag", Etag}], <<>>})}; send_error(Req, Error) -> {Code, Json} = error_to_json(Error), - couch_log:info("HTTP Error (code ~w): ~p", [Code, Error]), + ?LOG_INFO("HTTP Error (code ~w): ~p", [Code, Error]), send_error(Req, Code, Json). send_error(Req, Code, Json) -> diff --git a/src/couchdb/couch_log.erl b/src/couchdb/couch_log.erl index 47e0114d..95ddc47f 100644 --- a/src/couchdb/couch_log.erl +++ b/src/couchdb/couch_log.erl @@ -14,7 +14,7 @@ -behaviour(gen_event). -export([start_link/2,stop/0]). --export([error/1,error/2,info/1,info/2,debug/1,debug/2,get_level/0,get_level_integer/0, set_level/1]). +-export([debug_on/0,info_on/0,get_level/0,get_level_integer/0, set_level/1]). -export([init/1, handle_event/2, terminate/2, code_change/3, handle_info/2, handle_call/2]). -define(LEVEL_ERROR, 3). @@ -44,33 +44,11 @@ init({Filename, Level}) -> {ok, Fd} = file:open(Filename, [append]), {ok, {Fd, level_integer(Level)}}. -error(Msg) -> - error("~s", [Msg]). - -error(Format, Args) -> - error_logger:error_report(couch_error, {Format, Args}). - -info(Msg) -> - info("~s", [Msg]). - -info(Format, Args) -> - case get_level_integer() =< ?LEVEL_INFO of - true -> - error_logger:info_report(couch_info, {Format, Args}); - false -> - ok - end. - -debug(Msg) -> - debug("~s", [Msg]). - -debug(Format, Args) -> - case get_level_integer() =< ?LEVEL_DEBUG of - true -> - error_logger:info_report(couch_debug, {Format, Args}); - false -> - ok - end. +debug_on() -> + get_level_integer() =< ?LEVEL_DEBUG. + +info_on() -> + get_level_integer() =< ?LEVEL_INFO. set_level(LevelAtom) -> set_level_integer(level_integer(LevelAtom)). diff --git a/src/couchdb/couch_query_servers.erl b/src/couchdb/couch_query_servers.erl index 33962705..a6e1750a 100644 --- a/src/couchdb/couch_query_servers.erl +++ b/src/couchdb/couch_query_servers.erl @@ -53,7 +53,7 @@ read_json(Port) -> case cjson:decode(readline(Port)) of {obj, [{"log", Msg}]} when is_list(Msg) -> % we got a message to log. Log it and continue - couch_log:info("Query Server Log Message: ~s", [Msg]), + ?LOG_INFO("Query Server Log Message: ~s", [Msg]), read_json(Port); Else -> Else @@ -75,7 +75,7 @@ start_doc_map(Lang, Functions) -> link(Port0), Port0; {empty, Cmd} -> - couch_log:info("Spawning new ~s instance.", [Lang]), + ?LOG_INFO("Spawning new ~s instance.", [Lang]), open_port({spawn, Cmd}, [stream, {line, 1000}, exit_status, @@ -170,11 +170,11 @@ handle_info({Port, {exit_status, Status}}, {QueryServerList, LangPorts}) -> {value, {Lang, _}} -> case Status of 0 -> ok; - _ -> couch_log:error("Abnormal shutdown of ~s query server process (exit_status: ~w).", [Lang, Status]) + _ -> ?LOG_ERROR("Abnormal shutdown of ~s query server process (exit_status: ~w).", [Lang, Status]) end, {noreply, {QueryServerList, lists:keydelete(Port, 2, LangPorts)}}; _ -> - couch_log:error("Unknown linked port/process crash: ~p", [Port]) + ?LOG_ERROR("Unknown linked port/process crash: ~p", [Port]) end; handle_info(_Whatever, {Cmd, Ports}) -> {noreply, {Cmd, Ports}}. diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index 3b338e6e..df0a4da1 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -151,9 +151,9 @@ do_http_request(Url, Action) -> do_http_request(Url, Action, []). do_http_request(Url, Action, JsonBody) -> - couch_log:debug("couch_rep HTTP client request:"), - couch_log:debug("\tAction: ~p", [Action]), - couch_log:debug("\tUrl: ~p", [Url]), + ?LOG_DEBUG("couch_rep HTTP client request:", []), + ?LOG_DEBUG("\tAction: ~p", [Action]), + ?LOG_DEBUG("\tUrl: ~p", [Url]), % ensure that the inets application is running case application:start(inets) of diff --git a/src/couchdb/couch_server_sup.erl b/src/couchdb/couch_server_sup.erl index 9c751f9c..9117f5f8 100644 --- a/src/couchdb/couch_server_sup.erl +++ b/src/couchdb/couch_server_sup.erl @@ -17,6 +17,8 @@ -export([start_link/1,stop/0]). +-include("couch_db.hrl"). + %% supervisor callbacks -export([init/1]). @@ -169,7 +171,7 @@ start_server(InputIniFilename) -> UpdateNotifierExes, FtSearchQueryServer, [lists:flatten(io_lib:format("\t~s=~s~n", [Lang, QueryExe])) || {Lang, QueryExe} <- QueryServers]]), - couch_log:debug("~s", [ConfigInfo]), + ?LOG_DEBUG("~s", [ConfigInfo]), case StartResult of {ok,_} -> diff --git a/src/couchdb/couch_util.erl b/src/couchdb/couch_util.erl index f85cc834..504a675a 100644 --- a/src/couchdb/couch_util.erl +++ b/src/couchdb/couch_util.erl @@ -40,11 +40,25 @@ start_link(LibDir) -> new_uuid() -> - gen_server:call(couch_util, new_uuid). + to_hex(erlang:port_control(drv_port(), 2, <<>>)). + +to_hex([]) -> + []; +to_hex([H|T]) -> + Digit1 = H div 16, + Digit2 = H rem 16, + [to_digit(Digit1), to_digit(Digit2) | to_hex(T)]. + +to_digit(N) when N < 10 -> + $0 + N; +to_digit(N) -> + $a + N-10. + % returns a random integer rand32() -> - gen_server:call(couch_util, rand32). + [A,B,C,D|_] = erlang:port_control(drv_port(), 2, <<>>), + (A bsl 24) + (B bsl 16) + (C bsl 8) + D. % given a pathname "../foo/bar/" it gives back the fully qualified % absolute pathname. @@ -190,8 +204,6 @@ init([]) -> terminate(_Reason, _Server) -> ok. -handle_call(new_uuid, _From, Server) -> - {reply, new_uuid_int(), Server}; handle_call(rand32, _From, Server) -> {reply, rand32_int(), Server}. @@ -205,14 +217,6 @@ handle_info(_Info, State) -> {noreply, State}. -new_uuid_int() -> - % eventually make this a C callout for a real guid (collisions are far less likely - % when using a proper generation function). For now we just fake it. - Num1 = random:uniform(16#FFFFFFFF + 1) - 1, - Num2 = random:uniform(16#FFFFFFFF + 1) - 1, - Num3 = random:uniform(16#FFFFFFFF + 1) - 1, - Num4 = random:uniform(16#FFFFFFFF + 1) - 1, - lists:flatten(io_lib:format("~8.16.0B~8.16.0B~8.16.0B~8.16.0B", [Num1, Num2, Num3, Num4])). diff --git a/src/couchdb/couch_view.erl b/src/couchdb/couch_view.erl index b9f6507f..d8006eba 100644 --- a/src/couchdb/couch_view.erl +++ b/src/couchdb/couch_view.erl @@ -141,7 +141,7 @@ handle_call({start_temp_updater, DbName, Lang, Query}, _From, #server{root_dir=R [{_, Fd, Count}] -> ok end, - couch_log:debug("Spawning new temp update process for db ~s.", [DbName]), + ?LOG_DEBUG("Spawning new temp update process for db ~s.", [DbName]), NewPid = spawn_link(couch_view, start_temp_update_loop, [DbName, Fd, Lang, Query]), true = ets:insert(couch_views_temp_fd_by_db, {DbName, Fd, Count + 1}), add_to_ets(NewPid, DbName, Name), @@ -154,7 +154,7 @@ handle_call({start_updater, DbName, GroupId}, _From, #server{root_dir=Root}=Serv Pid = case ets:lookup(couch_views_by_name, {DbName, GroupId}) of [] -> - couch_log:debug("Spawning new update process for view group ~s in database ~s.", [GroupId, DbName]), + ?LOG_DEBUG("Spawning new update process for view group ~s in database ~s.", [GroupId, DbName]), NewPid = spawn_link(couch_view, start_update_loop, [Root, DbName, GroupId]), add_to_ets(NewPid, DbName, GroupId), NewPid; @@ -168,7 +168,7 @@ handle_cast({reset_indexes, DbName}, #server{root_dir=Root}=Server) -> Names = ets:lookup(couch_views_by_db, DbName), lists:foreach( fun({_DbName, GroupId}) -> - couch_log:debug("Killing update process for view group ~s. in database ~s.", [GroupId, DbName]), + ?LOG_DEBUG("Killing update process for view group ~s. in database ~s.", [GroupId, DbName]), [{_, Pid}] = ets:lookup(couch_views_by_name, {DbName, GroupId}), exit(Pid, kill), receive {'EXIT', Pid, _} -> @@ -184,7 +184,7 @@ handle_info({'EXIT', _FromPid, normal}, Server) -> handle_info({'EXIT', FromPid, Reason}, #server{root_dir=RootDir}=Server) -> case ets:lookup(couch_views_by_updater, FromPid) of [] -> % non-updater linked process must have died, we propagate the error - couch_log:error("Exit on non-updater process: ~p", [Reason]), + ?LOG_ERROR("Exit on non-updater process: ~p", [Reason]), exit(Reason); [{_, {DbName, "_temp_" ++ _ = GroupId}}] -> delete_from_ets(FromPid, DbName, GroupId), @@ -202,7 +202,7 @@ handle_info({'EXIT', FromPid, Reason}, #server{root_dir=RootDir}=Server) -> end, {noreply, Server}; handle_info(Msg, _Server) -> - couch_log:error("Bad message received for view module: ~p", [Msg]), + ?LOG_ERROR("Bad message received for view module: ~p", [Msg]), exit({error, Msg}). add_to_ets(Pid, DbName, GroupId) -> @@ -294,7 +294,7 @@ get_notify_pids(Wait) -> {Pid, get_updated} -> [Pid | get_notify_pids()]; Else -> - couch_log:error("Unexpected message in view updater: ~p", [Else]), + ?LOG_ERROR("Unexpected message in view updater: ~p", [Else]), exit({error, Else}) after Wait -> exit(wait_timeout) |