summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--apps/chttpd/README.md23
-rw-r--r--apps/chttpd/ebin/chttpd.appup4
-rw-r--r--apps/chttpd/src/chttpd.app.src7
-rw-r--r--apps/chttpd/src/chttpd.erl585
-rw-r--r--apps/chttpd/src/chttpd_app.erl21
-rw-r--r--apps/chttpd/src/chttpd_auth.erl473
-rw-r--r--apps/chttpd/src/chttpd_db.erl1082
-rw-r--r--apps/chttpd/src/chttpd_external.erl173
-rw-r--r--apps/chttpd/src/chttpd_misc.erl222
-rw-r--r--apps/chttpd/src/chttpd_oauth.erl168
-rw-r--r--apps/chttpd/src/chttpd_rewrite.erl418
-rw-r--r--apps/chttpd/src/chttpd_show.erl311
-rw-r--r--apps/chttpd/src/chttpd_sup.erl25
-rw-r--r--apps/chttpd/src/chttpd_view.erl306
-rw-r--r--apps/fabric/README.md27
-rw-r--r--apps/fabric/ebin/fabric.appup3
-rw-r--r--apps/fabric/include/fabric.hrl36
-rw-r--r--apps/fabric/src/fabric.app.src6
-rw-r--r--apps/fabric/src/fabric.erl264
-rw-r--r--apps/fabric/src/fabric_db_create.erl79
-rw-r--r--apps/fabric/src/fabric_db_delete.erl55
-rw-r--r--apps/fabric/src/fabric_db_doc_count.erl46
-rw-r--r--apps/fabric/src/fabric_db_info.erl69
-rw-r--r--apps/fabric/src/fabric_db_meta.erl49
-rw-r--r--apps/fabric/src/fabric_dict.erl51
-rw-r--r--apps/fabric/src/fabric_doc_attachments.erl116
-rw-r--r--apps/fabric/src/fabric_doc_missing_revs.erl78
-rw-r--r--apps/fabric/src/fabric_doc_open.erl120
-rw-r--r--apps/fabric/src/fabric_doc_open_revs.erl284
-rw-r--r--apps/fabric/src/fabric_doc_update.erl147
-rw-r--r--apps/fabric/src/fabric_group_info.erl66
-rw-r--r--apps/fabric/src/fabric_rpc.erl402
-rw-r--r--apps/fabric/src/fabric_util.erl97
-rw-r--r--apps/fabric/src/fabric_view.erl235
-rw-r--r--apps/fabric/src/fabric_view_all_docs.erl181
-rw-r--r--apps/fabric/src/fabric_view_changes.erl271
-rw-r--r--apps/fabric/src/fabric_view_map.erl151
-rw-r--r--apps/fabric/src/fabric_view_reduce.erl99
-rw-r--r--apps/mem3/README.md25
-rw-r--r--apps/mem3/ebin/mem3.appup3
-rw-r--r--apps/mem3/include/mem3.hrl44
-rw-r--r--apps/mem3/src/mem3.app.src13
-rw-r--r--apps/mem3/src/mem3.erl121
-rw-r--r--apps/mem3/src/mem3_app.erl23
-rw-r--r--apps/mem3/src/mem3_cache.erl106
-rw-r--r--apps/mem3/src/mem3_httpd.erl53
-rw-r--r--apps/mem3/src/mem3_nodes.erl134
-rw-r--r--apps/mem3/src/mem3_rep.erl106
-rw-r--r--apps/mem3/src/mem3_sup.erl35
-rw-r--r--apps/mem3/src/mem3_sync.erl229
-rw-r--r--apps/mem3/src/mem3_sync_event.erl58
-rw-r--r--apps/mem3/src/mem3_util.erl153
-rw-r--r--apps/mem3/test/01-config-default.ini2
-rw-r--r--apps/mem3/test/mem3_util_test.erl154
-rw-r--r--apps/rexi/README.md27
-rw-r--r--apps/rexi/ebin/rexi.appup5
-rw-r--r--apps/rexi/src/rexi.app.src7
-rw-r--r--apps/rexi/src/rexi.erl105
-rw-r--r--apps/rexi/src/rexi_app.erl25
-rw-r--r--apps/rexi/src/rexi_monitor.erl54
-rw-r--r--apps/rexi/src/rexi_server.erl100
-rw-r--r--apps/rexi/src/rexi_sup.erl29
-rw-r--r--apps/rexi/src/rexi_utils.erl53
-rwxr-xr-xconfigure2
-rwxr-xr-xrebarbin83311 -> 85356 bytes
-rw-r--r--rebar.config17
-rw-r--r--rel/reltool.config2
67 files changed, 15 insertions, 8420 deletions
diff --git a/apps/chttpd/README.md b/apps/chttpd/README.md
deleted file mode 100644
index 2c2a2cf9..00000000
--- a/apps/chttpd/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-## chttpd
-
-chttpd is a cluster-aware http layer for [CouchDB][1]. It is used in [BigCouch][2] as the http front-end.
-
-### Getting Started
-Dependencies:
- * Erlang R13B-03 (or higher)
-
-Build with rebar:
- make
-
-### License
-[Apache 2.0][3]
-
-### Contact
- * [http://cloudant.com][4]
- * [info@cloudant.com][5]
-
-[1]: http://couchdb.apache.org
-[2]: http://github.com/cloudant/bigcouch
-[3]: http://www.apache.org/licenses/LICENSE-2.0.html
-[4]: http://cloudant.com
-[5]: mailto:info@cloudant.com
diff --git a/apps/chttpd/ebin/chttpd.appup b/apps/chttpd/ebin/chttpd.appup
deleted file mode 100644
index e0b5cdfe..00000000
--- a/apps/chttpd/ebin/chttpd.appup
+++ /dev/null
@@ -1,4 +0,0 @@
-{"1.0.3",[{"1.0.2",[
- {load_module, chttpd_external},
- {load_module, chttpd}
-]}],[{"1.0.2",[]}]}.
diff --git a/apps/chttpd/src/chttpd.app.src b/apps/chttpd/src/chttpd.app.src
deleted file mode 100644
index 95ef107a..00000000
--- a/apps/chttpd/src/chttpd.app.src
+++ /dev/null
@@ -1,7 +0,0 @@
-{application, chttpd, [
- {description, "HTTP interface for CouchDB cluster"},
- {vsn, "1.0.3"},
- {registered, [chttpd_sup, chttpd]},
- {applications, [kernel, stdlib, couch, fabric]},
- {mod, {chttpd_app,[]}}
-]}. \ No newline at end of file
diff --git a/apps/chttpd/src/chttpd.erl b/apps/chttpd/src/chttpd.erl
deleted file mode 100644
index 4d3425b2..00000000
--- a/apps/chttpd/src/chttpd.erl
+++ /dev/null
@@ -1,585 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd).
--include_lib("couch/include/couch_db.hrl").
-
--export([start_link/0, stop/0, handle_request/1, config_change/2,
- primary_header_value/2, header_value/2, header_value/3, qs_value/2,
- qs_value/3, qs/1, path/1, absolute_uri/2, body_length/1,
- verify_is_server_admin/1, unquote/1, quote/1, recv/2,recv_chunked/4,
- error_info/1, parse_form/1, json_body/1, json_body_obj/1, body/1,
- doc_etag/1, make_etag/1, etag_respond/3, partition/1, serve_file/3,
- server_header/0, start_chunked_response/3,send_chunk/2,
- start_response_length/4, send/2, start_json_response/2,
- start_json_response/3, end_json_response/1, send_response/4,
- send_method_not_allowed/2, send_error/2, send_error/4, send_redirect/2,
- send_chunked_error/2, send_json/2,send_json/3,send_json/4]).
-
-start_link() ->
- Options = [
- {loop, fun ?MODULE:handle_request/1},
- {name, ?MODULE},
- {ip, couch_config:get("chttpd", "bind_address", any)},
- {port, couch_config:get("chttpd", "port", "5984")},
- {backlog, list_to_integer(couch_config:get("chttpd", "backlog", "128"))}
- ],
- case mochiweb_http:start(Options) of
- {ok, Pid} ->
- ok = couch_config:register(fun ?MODULE:config_change/2, Pid),
- {ok, Pid};
- {error, Reason} ->
- io:format("Failure to start Mochiweb: ~s~n", [Reason]),
- {error, Reason}
- end.
-
-config_change("chttpd", "bind_address") ->
- ?MODULE:stop();
-config_change("chttpd", "port") ->
- ?MODULE:stop();
-config_change("chttpd", "backlog") ->
- ?MODULE:stop().
-
-stop() ->
- mochiweb_http:stop(?MODULE).
-
-handle_request(MochiReq) ->
- Begin = now(),
-
- AuthenticationFuns = [
- fun chttpd_auth:cookie_authentication_handler/1,
- fun chttpd_auth:default_authentication_handler/1
- ],
-
- % for the path, use the raw path with the query string and fragment
- % removed, but URL quoting left intact
- RawUri = MochiReq:get(raw_path),
- {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
- {HandlerKey, _, _} = mochiweb_util:partition(Path, "/"),
-
- LogForClosedSocket = io_lib:format("mochiweb_recv_error for ~s - ~p ~s", [
- MochiReq:get(peer),
- MochiReq:get(method),
- RawUri
- ]),
-
- Method1 =
- case MochiReq:get(method) of
- % already an atom
- Meth when is_atom(Meth) -> Meth;
-
- % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
- % possible (if any module references the atom, then it's existing).
- Meth -> couch_util:to_existing_atom(Meth)
- end,
- increment_method_stats(Method1),
- % alias HEAD to GET as mochiweb takes care of stripping the body
- Method = case Method1 of
- 'HEAD' -> 'GET';
- Other -> Other
- end,
-
- HttpReq = #httpd{
- mochi_req = MochiReq,
- method = Method,
- path_parts = [list_to_binary(chttpd:unquote(Part))
- || Part <- string:tokens(Path, "/")],
- db_url_handlers = db_url_handlers(),
- design_url_handlers = design_url_handlers()
- },
-
- {ok, Resp} =
- try
- case authenticate_request(HttpReq, AuthenticationFuns) of
- #httpd{} = Req ->
- HandlerFun = url_handler(HandlerKey),
- HandlerFun(Req);
- Response ->
- Response
- end
- catch
- throw:{http_head_abort, Resp0} ->
- {ok, Resp0};
- throw:{invalid_json, S} ->
- ?LOG_ERROR("attempted upload of invalid JSON ~s", [S]),
- send_error(HttpReq, {bad_request, "invalid UTF-8 JSON"});
- exit:{mochiweb_recv_error, E} ->
- ?LOG_INFO(LogForClosedSocket ++ " - ~p", [E]),
- exit(normal);
- throw:Error ->
- send_error(HttpReq, Error);
- error:database_does_not_exist ->
- send_error(HttpReq, database_does_not_exist);
- error:badarg ->
- ?LOG_ERROR("Badarg error in HTTP request",[]),
- ?LOG_INFO("Stacktrace: ~p",[erlang:get_stacktrace()]),
- send_error(HttpReq, badarg);
- error:function_clause ->
- ?LOG_ERROR("function_clause error in HTTP request",[]),
- ?LOG_INFO("Stacktrace: ~p",[erlang:get_stacktrace()]),
- send_error(HttpReq, function_clause);
- Tag:Error ->
- ?LOG_ERROR("Uncaught error in HTTP request: ~p",[{Tag, Error}]),
- ?LOG_INFO("Stacktrace: ~p",[erlang:get_stacktrace()]),
- send_error(HttpReq, Error)
- end,
-
- RequestTime = timer:now_diff(now(), Begin)/1000,
- Peer = MochiReq:get(peer),
- Code = Resp:get(code),
- Host = MochiReq:get_header_value("Host"),
- ?LOG_INFO("~s ~s ~s ~s ~B ~B", [Peer, Host,
- atom_to_list(Method1), RawUri, Code, round(RequestTime)]),
- couch_stats_collector:record({couchdb, request_time}, RequestTime),
- couch_stats_collector:increment({httpd, requests}),
- {ok, Resp}.
-
-% Try authentication handlers in order until one returns a result
-authenticate_request(#httpd{user_ctx=#user_ctx{}} = Req, _AuthFuns) ->
- Req;
-authenticate_request(#httpd{} = Req, [AuthFun|Rest]) ->
- authenticate_request(AuthFun(Req), Rest);
-authenticate_request(#httpd{} = Req, []) ->
- case couch_config:get("chttpd", "require_valid_user", "false") of
- "true" ->
- throw({unauthorized, <<"Authentication required.">>});
- "false" ->
- case couch_config:get("admins") of
- [] ->
- Ctx = #user_ctx{roles=[<<"_reader">>, <<"_writer">>, <<"_admin">>]},
- Req#httpd{user_ctx = Ctx};
- _ ->
- Req#httpd{user_ctx=#user_ctx{}}
- end
- end;
-authenticate_request(Response, _AuthFuns) ->
- Response.
-
-increment_method_stats(Method) ->
- couch_stats_collector:increment({httpd_request_methods, Method}).
-
-url_handler("") -> fun chttpd_misc:handle_welcome_req/1;
-url_handler("favicon.ico") -> fun chttpd_misc:handle_favicon_req/1;
-url_handler("_utils") -> fun chttpd_misc:handle_utils_dir_req/1;
-url_handler("_all_dbs") -> fun chttpd_misc:handle_all_dbs_req/1;
-url_handler("_active_tasks") -> fun chttpd_misc:handle_task_status_req/1;
-url_handler("_config") -> fun chttpd_misc:handle_config_req/1;
-url_handler("_replicate") -> fun chttpd_misc:handle_replicate_req/1;
-url_handler("_uuids") -> fun chttpd_misc:handle_uuids_req/1;
-url_handler("_log") -> fun chttpd_misc:handle_log_req/1;
-url_handler("_sleep") -> fun chttpd_misc:handle_sleep_req/1;
-url_handler("_session") -> fun chttpd_auth:handle_session_req/1;
-url_handler("_user") -> fun chttpd_auth:handle_user_req/1;
-url_handler("_oauth") -> fun chttpd_oauth:handle_oauth_req/1;
-url_handler("_restart") -> fun showroom_http:handle_restart_req/1;
-url_handler("_membership") -> fun mem3_httpd:handle_membership_req/1;
-url_handler(_) -> fun chttpd_db:handle_request/1.
-
-db_url_handlers() ->
- [
- {<<"_view_cleanup">>, fun chttpd_db:handle_view_cleanup_req/2},
- {<<"_compact">>, fun chttpd_db:handle_compact_req/2},
- {<<"_design">>, fun chttpd_db:handle_design_req/2},
- {<<"_temp_view">>, fun chttpd_view:handle_temp_view_req/2},
- {<<"_changes">>, fun chttpd_db:handle_changes_req/2},
- {<<"_search">>, fun chttpd_external:handle_search_req/2}
- ].
-
-design_url_handlers() ->
- [
- {<<"_view">>, fun chttpd_view:handle_view_req/3},
- {<<"_show">>, fun chttpd_show:handle_doc_show_req/3},
- {<<"_list">>, fun chttpd_show:handle_view_list_req/3},
- {<<"_update">>, fun chttpd_show:handle_doc_update_req/3},
- {<<"_info">>, fun chttpd_db:handle_design_info_req/3},
- {<<"_rewrite">>, fun chttpd_rewrite:handle_rewrite_req/3}
- ].
-
-% Utilities
-
-partition(Path) ->
- mochiweb_util:partition(Path, "/").
-
-header_value(#httpd{mochi_req=MochiReq}, Key) ->
- MochiReq:get_header_value(Key).
-
-header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
- case MochiReq:get_header_value(Key) of
- undefined -> Default;
- Value -> Value
- end.
-
-primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
- MochiReq:get_primary_header_value(Key).
-
-serve_file(#httpd{mochi_req=MochiReq}=Req, RelativePath, DocumentRoot) ->
- {ok, MochiReq:serve_file(RelativePath, DocumentRoot,
- server_header() ++ chttpd_auth:cookie_auth_header(Req, []))}.
-
-qs_value(Req, Key) ->
- qs_value(Req, Key, undefined).
-
-qs_value(Req, Key, Default) ->
- couch_util:get_value(Key, qs(Req), Default).
-
-qs(#httpd{mochi_req=MochiReq}) ->
- MochiReq:parse_qs().
-
-path(#httpd{mochi_req=MochiReq}) ->
- MochiReq:get(path).
-
-absolute_uri(#httpd{mochi_req=MochiReq}, Path) ->
- XHost = couch_config:get("httpd", "x_forwarded_host", "X-Forwarded-Host"),
- Host = case MochiReq:get_header_value(XHost) of
- undefined ->
- case MochiReq:get_header_value("Host") of
- undefined ->
- {ok, {Address, Port}} = inet:sockname(MochiReq:get(socket)),
- inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
- Value1 ->
- Value1
- end;
- Value -> Value
- end,
- XSsl = couch_config:get("httpd", "x_forwarded_ssl", "X-Forwarded-Ssl"),
- Scheme = case MochiReq:get_header_value(XSsl) of
- "on" -> "https";
- _ ->
- XProto = couch_config:get("httpd", "x_forwarded_proto",
- "X-Forwarded-Proto"),
- case MochiReq:get_header_value(XProto) of
- % Restrict to "https" and "http" schemes only
- "https" -> "https";
- _ -> "http"
- end
- end,
- Scheme ++ "://" ++ Host ++ Path.
-
-unquote(UrlEncodedString) ->
- mochiweb_util:unquote(UrlEncodedString).
-
-quote(UrlDecodedString) ->
- mochiweb_util:quote_plus(UrlDecodedString).
-
-parse_form(#httpd{mochi_req=MochiReq}) ->
- mochiweb_multipart:parse_form(MochiReq).
-
-recv(#httpd{mochi_req=MochiReq}, Len) ->
- MochiReq:recv(Len).
-
-recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
- % Fun is called once with each chunk
- % Fun({Length, Binary}, State)
- % called with Length == 0 on the last time.
- MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState).
-
-body_length(Req) ->
- case header_value(Req, "Transfer-Encoding") of
- undefined ->
- case header_value(Req, "Content-Length") of
- undefined -> undefined;
- Length -> list_to_integer(Length)
- end;
- "chunked" -> chunked;
- Unknown -> {unknown_transfer_encoding, Unknown}
- end.
-
-body(#httpd{mochi_req=MochiReq, req_body=ReqBody}) ->
- case ReqBody of
- undefined ->
- % Maximum size of document PUT request body (4GB)
- MaxSize = list_to_integer(
- couch_config:get("couchdb", "max_document_size", "4294967296")),
- MochiReq:recv_body(MaxSize);
- _Else ->
- ReqBody
- end.
-
-json_body(Httpd) ->
- ?JSON_DECODE(body(Httpd)).
-
-json_body_obj(Httpd) ->
- case json_body(Httpd) of
- {Props} -> {Props};
- _Else ->
- throw({bad_request, "Request body must be a JSON object"})
- end.
-
-
-doc_etag(#doc{revs={Start, [DiskRev|_]}}) ->
- "\"" ++ ?b2l(couch_doc:rev_to_str({Start, DiskRev})) ++ "\"".
-
-make_etag(Term) ->
- <<SigInt:128/integer>> = erlang:md5(term_to_binary(Term)),
- list_to_binary(io_lib:format("\"~.36B\"",[SigInt])).
-
-etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
- etag_match(Req, binary_to_list(CurrentEtag));
-
-etag_match(Req, CurrentEtag) ->
- EtagsToMatch = string:tokens(
- chttpd:header_value(Req, "If-None-Match", ""), ", "),
- lists:member(CurrentEtag, EtagsToMatch).
-
-etag_respond(Req, CurrentEtag, RespFun) ->
- case etag_match(Req, CurrentEtag) of
- true ->
- % the client has this in their cache.
- chttpd:send_response(Req, 304, [{"Etag", CurrentEtag}], <<>>);
- false ->
- % Run the function.
- RespFun()
- end.
-
-verify_is_server_admin(#httpd{user_ctx=#user_ctx{roles=Roles}}) ->
- case lists:member(<<"_admin">>, Roles) of
- true -> ok;
- false -> throw({unauthorized, <<"You are not a server admin.">>})
- end.
-
-start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Length) ->
- couch_stats_collector:increment({httpd_status_codes, Code}),
- Resp = MochiReq:start_response_length({Code, Headers ++ server_header() ++
- chttpd_auth:cookie_auth_header(Req, Headers), Length}),
- case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
- end,
- {ok, Resp}.
-
-send(Resp, Data) ->
- Resp:send(Data),
- {ok, Resp}.
-
-start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
- couch_stats_collector:increment({httpd_status_codes, Code}),
- Resp = MochiReq:respond({Code, Headers ++ server_header() ++
- chttpd_auth:cookie_auth_header(Req, Headers), chunked}),
- case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
- end,
- {ok, Resp}.
-
-send_chunk(Resp, Data) ->
- Resp:write_chunk(Data),
- {ok, Resp}.
-
-send_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Body) ->
- couch_stats_collector:increment({httpd_status_codes, Code}),
- if Code >= 400 ->
- ?LOG_DEBUG("httpd ~p error response:~n ~s", [Code, Body]);
- true -> ok
- end,
- {ok, MochiReq:respond({Code, Headers ++ server_header() ++
- chttpd_auth:cookie_auth_header(Req, Headers), Body})}.
-
-send_method_not_allowed(Req, Methods) ->
- send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>,
- ?l2b("Only " ++ Methods ++ " allowed")).
-
-send_json(Req, Value) ->
- send_json(Req, 200, Value).
-
-send_json(Req, Code, Value) ->
- send_json(Req, Code, [], Value).
-
-send_json(Req, Code, Headers, Value) ->
- DefaultHeaders = [
- {"Content-Type", negotiate_content_type(Req)},
- {"Cache-Control", "must-revalidate"}
- ],
- Body = list_to_binary(
- [start_jsonp(Req), ?JSON_ENCODE(Value), end_jsonp(), $\n]
- ),
- send_response(Req, Code, DefaultHeaders ++ Headers, Body).
-
-start_json_response(Req, Code) ->
- start_json_response(Req, Code, []).
-
-start_json_response(Req, Code, Headers) ->
- DefaultHeaders = [
- {"Content-Type", negotiate_content_type(Req)},
- {"Cache-Control", "must-revalidate"}
- ],
- start_jsonp(Req), % Validate before starting chunked.
- %start_chunked_response(Req, Code, DefaultHeaders ++ Headers).
- {ok, Resp} = start_chunked_response(Req, Code, DefaultHeaders ++ Headers),
- case start_jsonp(Req) of
- [] -> ok;
- Start -> send_chunk(Resp, Start)
- end,
- {ok, Resp}.
-
-end_json_response(Resp) ->
- send_chunk(Resp, end_jsonp() ++ [$\r,$\n]),
- %send_chunk(Resp, [$\n]),
- send_chunk(Resp, []).
-
-start_jsonp(Req) ->
- case get(jsonp) of
- undefined -> put(jsonp, qs_value(Req, "callback", no_jsonp));
- _ -> ok
- end,
- case get(jsonp) of
- no_jsonp -> [];
- [] -> [];
- CallBack ->
- try
- validate_callback(CallBack),
- CallBack ++ "("
- catch
- Error ->
- put(jsonp, no_jsonp),
- throw(Error)
- end
- end.
-
-end_jsonp() ->
- Resp = case get(jsonp) of
- no_jsonp -> [];
- [] -> [];
- _ -> ");"
- end,
- put(jsonp, undefined),
- Resp.
-
-validate_callback(CallBack) when is_binary(CallBack) ->
- validate_callback(binary_to_list(CallBack));
-validate_callback([]) ->
- ok;
-validate_callback([Char | Rest]) ->
- case Char of
- _ when Char >= $a andalso Char =< $z -> ok;
- _ when Char >= $A andalso Char =< $Z -> ok;
- _ when Char >= $0 andalso Char =< $9 -> ok;
- _ when Char == $. -> ok;
- _ when Char == $_ -> ok;
- _ when Char == $[ -> ok;
- _ when Char == $] -> ok;
- _ ->
- throw({bad_request, invalid_callback})
- end,
- validate_callback(Rest).
-
-
-error_info({Error, Reason}) when is_list(Reason) ->
- error_info({Error, couch_util:to_binary(Reason)});
-error_info(bad_request) ->
- {400, <<"bad_request">>, <<>>};
-error_info({bad_request, Reason}) ->
- {400, <<"bad_request">>, Reason};
-error_info({query_parse_error, Reason}) ->
- {400, <<"query_parse_error">>, Reason};
-error_info(database_does_not_exist) ->
- {404, <<"not_found">>, <<"Database does not exist.">>};
-error_info(not_found) ->
- {404, <<"not_found">>, <<"missing">>};
-error_info({not_found, Reason}) ->
- {404, <<"not_found">>, Reason};
-error_info({not_acceptable, Reason}) ->
- {406, <<"not_acceptable">>, Reason};
-error_info(conflict) ->
- {409, <<"conflict">>, <<"Document update conflict.">>};
-error_info({conflict, _}) ->
- {409, <<"conflict">>, <<"Document update conflict.">>};
-error_info({forbidden, Msg}) ->
- {403, <<"forbidden">>, Msg};
-error_info({forbidden, Error, Msg}) ->
- {403, Error, Msg};
-error_info({unauthorized, Msg}) ->
- {401, <<"unauthorized">>, Msg};
-error_info(file_exists) ->
- {412, <<"file_exists">>, <<"The database could not be "
- "created, the file already exists.">>};
-error_info({r_quorum_not_met, Reason}) ->
- {412, <<"read_quorum_not_met">>, Reason};
-error_info({w_quorum_not_met, Reason}) ->
- {500, <<"write_quorum_not_met">>, Reason};
-error_info({bad_ctype, Reason}) ->
- {415, <<"bad_content_type">>, Reason};
-error_info({error, illegal_database_name}) ->
- {400, <<"illegal_database_name">>, <<"Only lowercase characters (a-z), "
- "digits (0-9), and any of the characters _, $, (, ), +, -, and / "
- "are allowed">>};
-error_info({missing_stub, Reason}) ->
- {412, <<"missing_stub">>, Reason};
-error_info(not_implemented) ->
- {501, <<"not_implemented">>, <<"this feature is not yet implemented">>};
-error_info({Error, Reason}) ->
- {500, couch_util:to_binary(Error), couch_util:to_binary(Reason)};
-error_info(Error) ->
- {500, <<"unknown_error">>, couch_util:to_binary(Error)}.
-
-send_error(_Req, {already_sent, Resp, _Error}) ->
- {ok, Resp};
-
-send_error(#httpd{mochi_req=MochiReq}=Req, Error) ->
- {Code, ErrorStr, ReasonStr} = error_info(Error),
- Headers = if Code == 401 ->
- case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
- undefined ->
- case couch_config:get("httpd", "WWW-Authenticate", nil) of
- nil ->
- [];
- Type ->
- [{"WWW-Authenticate", Type}]
- end;
- Type ->
- [{"WWW-Authenticate", Type}]
- end;
- true ->
- []
- end,
- send_error(Req, Code, Headers, ErrorStr, ReasonStr).
-
-send_error(Req, Code, ErrorStr, ReasonStr) ->
- send_error(Req, Code, [], ErrorStr, ReasonStr).
-
-send_error(Req, Code, Headers, ErrorStr, ReasonStr) ->
- send_json(Req, Code, Headers,
- {[{<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}]}).
-
-% give the option for list functions to output html or other raw errors
-send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
- send_chunk(Resp, Reason),
- send_chunk(Resp, []);
-
-send_chunked_error(Resp, Error) ->
- {Code, ErrorStr, ReasonStr} = error_info(Error),
- JsonError = {[{<<"code">>, Code},
- {<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}]},
- send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
- send_chunk(Resp, []).
-
-send_redirect(Req, Path) ->
- Headers = [{"Location", chttpd:absolute_uri(Req, Path)}],
- send_response(Req, 301, Headers, <<>>).
-
-negotiate_content_type(#httpd{mochi_req=MochiReq}) ->
- %% Determine the appropriate Content-Type header for a JSON response
- %% depending on the Accept header in the request. A request that explicitly
- %% lists the correct JSON MIME type will get that type, otherwise the
- %% response will have the generic MIME type "text/plain"
- AcceptedTypes = case MochiReq:get_header_value("Accept") of
- undefined -> [];
- AcceptHeader -> string:tokens(AcceptHeader, ", ")
- end,
- case lists:member("application/json", AcceptedTypes) of
- true -> "application/json";
- false -> "text/plain;charset=utf-8"
- end.
-
-server_header() ->
- couch_httpd:server_header().
diff --git a/apps/chttpd/src/chttpd_app.erl b/apps/chttpd/src/chttpd_app.erl
deleted file mode 100644
index d7a5aef8..00000000
--- a/apps/chttpd/src/chttpd_app.erl
+++ /dev/null
@@ -1,21 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, StartArgs) ->
- chttpd_sup:start_link(StartArgs).
-
-stop(_State) ->
- ok.
diff --git a/apps/chttpd/src/chttpd_auth.erl b/apps/chttpd/src/chttpd_auth.erl
deleted file mode 100644
index 24fe8c05..00000000
--- a/apps/chttpd/src/chttpd_auth.erl
+++ /dev/null
@@ -1,473 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_auth).
--include_lib("couch/include/couch_db.hrl").
-
--export([special_test_authentication_handler/1, null_authentication_handler/1,
- cookie_authentication_handler/1, default_authentication_handler/1,
- handle_session_req/1, handle_user_req/1, cookie_auth_header/2]).
-
-% used by OAuth handler
--export([get_user/1, ensure_users_db_exists/1]).
-
--import(chttpd, [send_json/2, send_json/4, send_method_not_allowed/2]).
-
-special_test_authentication_handler(Req) ->
- case chttpd:header_value(Req, "WWW-Authenticate") of
- "X-Couch-Test-Auth " ++ NamePass ->
- % NamePass is a colon separated string: "joe schmoe:a password".
- [Name, Pass] = re:split(NamePass, ":", [{return, list}]),
- case {Name, Pass} of
- {"Jan Lehnardt", "apple"} -> ok;
- {"Christopher Lenz", "dog food"} -> ok;
- {"Noah Slater", "biggiesmalls endian"} -> ok;
- {"Chris Anderson", "mp3"} -> ok;
- {"Damien Katz", "pecan pie"} -> ok;
- {_, _} ->
- throw({unauthorized, <<"Name or password is incorrect.">>})
- end,
- Req#httpd{user_ctx=#user_ctx{name=?l2b(Name)}};
- _ ->
- % No X-Couch-Test-Auth credentials sent, give admin access so the
- % previous authentication can be restored after the test
- Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}
- end.
-
-null_authentication_handler(Req) ->
- Ctx = #user_ctx{roles=[<<"_reader">>, <<"writer">>, <<"_admin">>]},
- Req#httpd{user_ctx=Ctx}.
-
-default_authentication_handler(Req) ->
- case basic_username_pw(Req) of
- {Username, Password} ->
- case get_user(Username) of
- nil ->
- throw({unauthorized, <<"unknown username">>});
- Props ->
- ExpectedHash = couch_util:get_value(<<"password_sha">>, Props),
- Salt = couch_util:get_value(<<"salt">>, Props),
- PasswordHash = hash_password(?l2b(Password), Salt),
- case couch_util:verify(ExpectedHash, PasswordHash) of
- true ->
- Ctx = #user_ctx{
- name = couch_util:get_value(<<"username">>, Props),
- roles = couch_util:get_value(<<"roles">>, Props)
- },
- Req#httpd{user_ctx=Ctx};
- _ ->
- throw({unauthorized, <<"password is incorrect">>})
- end
- end;
- nil ->
- Req
- end.
-
-cookie_authentication_handler(#httpd{path_parts=[<<"_session">>],
- method='POST'} = Req) ->
- % ignore any cookies sent with login request
- Req;
-cookie_authentication_handler(Req) ->
- try cookie_auth_user(Req) of
- nil ->
- Req;
- {cookie_auth_failed, _} = X ->
- Req#httpd{auth=X};
- Req2 ->
- Req2
- catch error:_ ->
- Req#httpd{auth={cookie_auth_failed, {invalid_cookie, null}}}
- end.
-
-cookie_auth_header(#httpd{auth={cookie_auth_failed, _}}, Headers) ->
- % check for an AuthSession cookie from login handler
- CookieHeader = couch_util:get_value("Set-Cookie", Headers, ""),
- Cookies = mochiweb_cookies:parse_cookie(CookieHeader),
- AuthSession = couch_util:get_value("AuthSession", Cookies),
- if AuthSession == undefined ->
- [generate_cookie_buster()];
- true ->
- []
- end;
-cookie_auth_header(#httpd{user_ctx=#user_ctx{name=null}}, _Headers) ->
- [];
-cookie_auth_header(#httpd{user_ctx=Ctx, auth={Secret,true}}, Headers) ->
- % Note: we only set the AuthSession cookie if:
- % * a valid AuthSession cookie has been received
- % * we are outside a 10% timeout window
- % * and if an AuthSession cookie hasn't already been set e.g. by a login
- % or logout handler.
- % The login and logout handlers set the AuthSession cookie themselves.
- CookieHeader = couch_util:get_value("Set-Cookie", Headers, ""),
- Cookies = mochiweb_cookies:parse_cookie(CookieHeader),
- AuthSession = couch_util:get_value("AuthSession", Cookies),
- if AuthSession == undefined ->
- [generate_cookie(Ctx#user_ctx.name, Secret, timestamp())];
- true ->
- []
- end;
-cookie_auth_header(_Req, _Headers) ->
- [].
-
-handle_session_req(#httpd{method='POST', mochi_req=MochiReq, user_ctx=Ctx}=Req) ->
- % login
- Form = parse_form(MochiReq),
- UserName = extract_username(Form),
- case get_user(UserName) of
- nil ->
- throw({forbidden, <<"unknown username">>});
- User ->
- UserSalt = couch_util:get_value(<<"salt">>, User),
- case lists:member(<<"_admin">>, Ctx#user_ctx.roles) of
- true ->
- ok;
- false ->
- Password = extract_password(Form),
- ExpectedHash = couch_util:get_value(<<"password_sha">>, User),
- PasswordHash = hash_password(Password, UserSalt),
- case couch_util:verify(ExpectedHash, PasswordHash) of
- true ->
- ok;
- _Else ->
- throw({forbidden, <<"Name or password is incorrect.">>})
- end
- end,
- Secret = ?l2b(couch_config:get("couch_httpd_auth", "secret")),
- SecretAndSalt = <<Secret/binary, UserSalt/binary>>,
- Cookie = generate_cookie(UserName, SecretAndSalt, timestamp()),
- send_response(Req, [Cookie])
- end;
-handle_session_req(#httpd{method='GET', user_ctx=UserCtx}=Req) ->
- % whoami
- #user_ctx{name = Name, roles = Roles} = UserCtx,
- ForceLogin = chttpd:qs_value(Req, "basic", "false"),
- case {Name, ForceLogin} of
- {null, "true"} ->
- throw({unauthorized, <<"Please login.">>});
- _False ->
- Props = [{name,Name}, {roles,Roles}],
- send_json(Req, {[{ok,true}, {userCtx, {Props}} | Props]})
- end;
-handle_session_req(#httpd{method='DELETE'}=Req) ->
- % logout
- send_response(Req, [generate_cookie_buster()]);
-handle_session_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD,POST,DELETE").
-
-handle_user_req(#httpd{method='POST'}=Req) ->
- DbName = couch_config:get("couch_httpd_auth", "authentication_db", "users"),
- ensure_users_db_exists(DbName),
- create_user(Req, DbName);
-handle_user_req(#httpd{method=Method, path_parts=[_]}=_Req) when
- Method == 'PUT' orelse Method == 'DELETE' ->
- throw({bad_request, <<"Username is missing">>});
-handle_user_req(#httpd{method='PUT', path_parts=[_, UserName]}=Req) ->
- DbName = couch_config:get("couch_httpd_auth", "authentication_db", "users"),
- ensure_users_db_exists(DbName),
- update_user(Req, DbName, UserName);
-handle_user_req(#httpd{method='DELETE', path_parts=[_, UserName]}=Req) ->
- DbName = couch_config:get("couch_httpd_auth", "authentication_db", "users"),
- ensure_users_db_exists(DbName),
- delete_user(Req, DbName, UserName);
-handle_user_req(Req) ->
- send_method_not_allowed(Req, "DELETE,POST,PUT").
-
-get_user(UserName) when is_list(UserName) ->
- get_user(?l2b(UserName));
-get_user(UserName) ->
- case couch_config:get("admins", ?b2l(UserName)) of
- "-hashed-" ++ HashedPwdAndSalt ->
- [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
- [
- {<<"username">>, UserName},
- {<<"roles">>, [<<"_reader">>, <<"_writer">>, <<"_admin">>]},
- {<<"salt">>, ?l2b(Salt)},
- {<<"password_sha">>, ?l2b(HashedPwd)}
- ];
- _ ->
- try ets:lookup(users_cache, UserName) of
- [{UserName, Props}] ->
- Props;
- [] ->
- load_user_from_db(UserName)
- catch error:badarg ->
- load_user_from_db(UserName)
- end
- end.
-
-load_user_from_db(UserName) ->
- DbName = couch_config:get("couch_httpd_auth", "authentication_db", "users"),
- try fabric:open_doc(DbName, UserName, []) of
- {ok, Doc} ->
- ?LOG_INFO("cache miss on username ~s", [UserName]),
- {Props} = couch_doc:to_json_obj(Doc, []),
- Props;
- _Else ->
- ?LOG_INFO("no record of user ~s", [UserName]),
- nil
- catch error:database_does_not_exist ->
- nil
- end.
-
-ensure_users_db_exists(DbName) ->
- try fabric:get_doc_count(DbName) of
- {ok, N} when is_integer(N) ->
- ok;
- {error, _} ->
- fabric:create_db(DbName, [])
- catch error:database_does_not_exist ->
- fabric:create_db(DbName, [])
- end.
-
-% internal functions
-
-basic_username_pw(Req) ->
- case chttpd:header_value(Req, "Authorization") of
- "Basic " ++ Base64Value ->
- case string:tokens(?b2l(base64:decode(Base64Value)),":") of
- [User, Pass] ->
- {User, Pass};
- [User] ->
- {User, ""};
- _ ->
- nil
- end;
- _ ->
- nil
- end.
-
-cookie_auth_user(#httpd{mochi_req=MochiReq}=Req) ->
- case MochiReq:get_cookie_value("AuthSession") of
- undefined ->
- nil;
- Cookie ->
- AuthSession = couch_util:decodeBase64Url(Cookie),
- [User, TimeStr | HashParts] = string:tokens(?b2l(AuthSession), ":"),
- % Verify expiry and hash
- case couch_config:get("couch_httpd_auth", "secret") of
- undefined ->
- ?LOG_DEBUG("AuthSession cookie, but no secret in config!", []),
- {cookie_auth_failed, {internal_server_error, null}};
- SecretStr ->
- case get_user(User) of
- nil ->
- Msg = io_lib:format("no record of user ~s", [User]),
- {cookie_auth_failed, {bad_user, ?l2b(Msg)}};
- Result ->
- Secret = ?l2b(SecretStr),
- UserSalt = couch_util:get_value(<<"salt">>, Result),
- FullSecret = <<Secret/binary, UserSalt/binary>>,
- ExpectedHash = crypto:sha_mac(FullSecret, [User, ":", TimeStr]),
- PasswordHash = ?l2b(string:join(HashParts, ":")),
- case couch_util:verify(ExpectedHash, PasswordHash) of
- true ->
- TimeStamp = erlang:list_to_integer(TimeStr, 16),
- Timeout = erlang:list_to_integer(couch_config:get(
- "couch_httpd_auth", "timeout", "600")),
- CurrentTime = timestamp(),
- if CurrentTime < TimeStamp + Timeout ->
- TimeLeft = TimeStamp + Timeout - CurrentTime,
- Req#httpd{user_ctx=#user_ctx{
- name=?l2b(User),
- roles=couch_util:get_value(<<"roles">>, Result, [])
- }, auth={FullSecret, TimeLeft < Timeout*0.9}};
- true ->
- ?LOG_DEBUG("cookie for ~s was expired", [User]),
- Msg = lists:concat(["Your session has expired after ",
- Timeout div 60, " minutes of inactivity"]),
- {cookie_auth_failed, {credentials_expired, ?l2b(Msg)}}
- end;
- _Else ->
- Msg = <<"cookie password hash was incorrect">>,
- {cookie_auth_failed, {bad_password, Msg}}
- end
- end
- end
- end.
-
-create_user(#httpd{method='POST', mochi_req=MochiReq}=Req, Db) ->
- Form = parse_form(MochiReq),
- {UserName, Password} = extract_username_password(Form),
- case get_user(UserName) of
- nil ->
- Roles = [?l2b(R) || R <- proplists:get_all_values("roles", Form)],
- if Roles /= [] ->
- chttpd:verify_is_server_admin(Req);
- true -> ok end,
- Active = chttpd_view:parse_bool_param(couch_util:get_value("active",
- Form, "true")),
- UserSalt = couch_uuids:random(),
- UserDoc = #doc{
- id = UserName,
- body = {[
- {<<"active">>, Active},
- {<<"email">>, ?l2b(couch_util:get_value("email", Form, ""))},
- {<<"password_sha">>, hash_password(Password, UserSalt)},
- {<<"roles">>, Roles},
- {<<"salt">>, UserSalt},
- {<<"type">>, <<"user">>},
- {<<"username">>, UserName}
- ]}
- },
- {ok, _Rev} = fabric:update_doc(Db, UserDoc, []),
- ?LOG_DEBUG("User ~s (~s) with password, ~s created.", [UserName,
- UserName, Password]),
- send_response(Req);
- _Result ->
- ?LOG_DEBUG("Can't create ~s: already exists", [UserName]),
- throw({forbidden, <<"User already exists.">>})
- end.
-
-delete_user(#httpd{user_ctx=UserCtx}=Req, Db, UserName) ->
- case get_user(UserName) of
- nil ->
- throw({not_found, <<"User doesn't exist">>});
- User ->
- case lists:member(<<"_admin">>,UserCtx#user_ctx.roles) of
- true ->
- ok;
- false when UserCtx#user_ctx.name == UserName ->
- ok;
- false ->
- throw({forbidden, <<"You aren't allowed to delete the user">>})
- end,
- {Pos,Rev} = couch_doc:parse_rev(couch_util:get_value(<<"_rev">>,User)),
- UserDoc = #doc{
- id = UserName,
- revs = {Pos, [Rev]},
- deleted = true
- },
- {ok, _Rev} = fabric:update_doc(Db, UserDoc, []),
- send_response(Req)
- end.
-
-extract_username(Form) ->
- CouchFormat = couch_util:get_value("name", Form),
- try ?l2b(couch_util:get_value("username", Form, CouchFormat))
- catch error:badarg ->
- throw({bad_request, <<"user accounts must have a username">>})
- end.
-
-extract_password(Form) ->
- try ?l2b(couch_util:get_value("password", Form))
- catch error:badarg ->
- throw({bad_request, <<"user accounts must have a password">>})
- end.
-
-extract_username_password(Form) ->
- CouchFormat = couch_util:get_value("name", Form),
- try
- {?l2b(couch_util:get_value("username", Form, CouchFormat)),
- ?l2b(couch_util:get_value("password", Form))}
- catch error:badarg ->
- Msg = <<"user accounts must have a username and password">>,
- throw({bad_request, Msg})
- end.
-
-generate_cookie_buster() ->
- T0 = calendar:now_to_datetime({0,86400,0}),
- Opts = [{max_age,0}, {path,"/"}, {local_time,T0}],
- mochiweb_cookies:cookie("AuthSession", "", Opts).
-
-generate_cookie(User, Secret, TimeStamp) ->
- SessionData = ?b2l(User) ++ ":" ++ erlang:integer_to_list(TimeStamp, 16),
- Hash = crypto:sha_mac(Secret, SessionData),
- Cookie = couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)),
- % TODO add {secure, true} to options when SSL is detected
- mochiweb_cookies:cookie("AuthSession", Cookie, [{path, "/"}]).
-
-hash_password(Password, Salt) ->
- ?l2b(couch_util:to_hex(crypto:sha(<<Password/binary, Salt/binary>>))).
-
-parse_form(MochiReq) ->
- case MochiReq:get_primary_header_value("content-type") of
- "application/x-www-form-urlencoded" ++ _ ->
- ReqBody = MochiReq:recv_body(),
- mochiweb_util:parse_qs(ReqBody);
- _ ->
- throw({bad_request, <<"you must specify "
- "application/x-www-form-urlencoded as the primary content-type">>})
- end.
-
-send_response(Req) ->
- send_response(Req, []).
-
-send_response(Req, ExtraHeaders) ->
- {Code, Headers} = case chttpd:qs_value(Req, "next", nil) of
- nil -> {200, []};
- Redirect ->
- {302, [{"Location", chttpd:absolute_uri(Req, Redirect)}]}
- end,
- send_json(Req, Code, Headers ++ ExtraHeaders, {[{ok, true}]}).
-
-timestamp() ->
- {MegaSeconds, Seconds, _} = erlang:now(),
- MegaSeconds * 1000000 + Seconds.
-
-update_user(#httpd{mochi_req=MochiReq, user_ctx=UserCtx}=Req, Db, UserName) ->
- case get_user(UserName) of
- nil ->
- throw({not_found, <<"User doesn't exist">>});
- User ->
- Form = parse_form(MochiReq),
- NewPassword = ?l2b(couch_util:get_value("password", Form, "")),
- OldPassword = ?l2b(couch_util:get_value("old_password", Form, "")),
-
- UserSalt = couch_util:get_value(<<"salt">>, User),
- CurrentPasswordHash = couch_util:get_value(<<"password_sha">>, User),
-
- Roles = [?l2b(R) || R <- proplists:get_all_values("roles", Form)],
- if Roles /= [] ->
- chttpd:verify_is_server_admin(Req);
- true -> ok end,
-
- PasswordHash = case NewPassword of
- <<>> ->
- CurrentPasswordHash;
- _Else ->
- case lists:member(<<"_admin">>,UserCtx#user_ctx.roles) of
- true ->
- hash_password(NewPassword, UserSalt);
- false when UserCtx#user_ctx.name == UserName ->
- %% for user we test old password before allowing change
- case hash_password(OldPassword, UserSalt) of
- CurrentPasswordHash ->
- hash_password(NewPassword, UserSalt);
- _ ->
- throw({forbidden, <<"Old password is incorrect.">>})
- end;
- _ ->
- Msg = <<"You aren't allowed to change this password.">>,
- throw({forbidden, Msg})
- end
- end,
-
- Active = chttpd_view:parse_bool_param(couch_util:get_value("active",
- Form, "true")),
- {Pos,Rev} = couch_doc:parse_rev(couch_util:get_value(<<"_rev">>,User)),
- UserDoc = #doc{
- id = UserName,
- revs = {Pos,[Rev]},
- body = {[
- {<<"active">>, Active},
- {<<"email">>, ?l2b(couch_util:get_value("email", Form, ""))},
- {<<"password_sha">>, PasswordHash},
- {<<"roles">>, Roles},
- {<<"salt">>, UserSalt},
- {<<"type">>, <<"user">>},
- {<<"username">>, UserName}
- ]}
- },
- {ok, _Rev} = fabric:update_doc(Db, UserDoc, []),
- ?LOG_DEBUG("User ~s updated.", [UserName]),
- send_response(Req)
- end.
diff --git a/apps/chttpd/src/chttpd_db.erl b/apps/chttpd/src/chttpd_db.erl
deleted file mode 100644
index fc219621..00000000
--- a/apps/chttpd/src/chttpd_db.erl
+++ /dev/null
@@ -1,1082 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_db).
--include_lib("couch/include/couch_db.hrl").
-
--export([handle_request/1, handle_compact_req/2, handle_design_req/2,
- db_req/2, couch_doc_open/4,handle_changes_req/2,
- update_doc_result_to_json/1, update_doc_result_to_json/2,
- handle_design_info_req/3, handle_view_cleanup_req/2]).
-
--import(chttpd,
- [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
- start_json_response/2,send_chunk/2,end_json_response/1,
- start_chunked_response/3, absolute_uri/2, send/2,
- start_response_length/4]).
-
--record(doc_query_args, {
- options = [],
- rev = nil,
- open_revs = [],
- update_type = interactive_edit,
- atts_since = nil
-}).
-
-% Database request handlers
-handle_request(#httpd{path_parts=[DbName|RestParts],method=Method,
- db_url_handlers=DbUrlHandlers}=Req)->
- case {Method, RestParts} of
- {'PUT', []} ->
- create_db_req(Req, DbName);
- {'DELETE', []} ->
- delete_db_req(Req, DbName);
- {_, []} ->
- do_db_req(Req, fun db_req/2);
- {_, [SecondPart|_]} ->
- Handler = couch_util:get_value(SecondPart, DbUrlHandlers, fun db_req/2),
- do_db_req(Req, Handler)
- end.
-
-handle_changes_req(#httpd{method='GET'}=Req, Db) ->
- #changes_args{filter=Raw, style=Style} = Args0 = parse_changes_query(Req),
- ChangesArgs = Args0#changes_args{
- filter = couch_changes:configure_filter(Raw, Style, Req, Db)
- },
- case ChangesArgs#changes_args.feed of
- "normal" ->
- T0 = now(),
- {ok, Info} = fabric:get_db_info(Db),
- Etag = chttpd:make_etag(Info),
- DeltaT = timer:now_diff(now(), T0) / 1000,
- couch_stats_collector:record({couchdb, dbinfo}, DeltaT),
- chttpd:etag_respond(Req, Etag, fun() ->
- {ok, Resp} = chttpd:start_json_response(Req, 200, [{"Etag",Etag}]),
- fabric:changes(Db, fun changes_callback/2, {"normal", Resp},
- ChangesArgs)
- end);
- Feed ->
- % "longpoll" or "continuous"
- {ok, Resp} = chttpd:start_json_response(Req, 200),
- fabric:changes(Db, fun changes_callback/2, {Feed, Resp}, ChangesArgs)
- end;
-handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-% callbacks for continuous feed (newline-delimited JSON Objects)
-changes_callback(start, {"continuous", _} = Acc) ->
- {ok, Acc};
-changes_callback({change, Change}, {"continuous", Resp} = Acc) ->
- send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]),
- {ok, Acc};
-changes_callback({stop, EndSeq0}, {"continuous", Resp}) ->
- EndSeq = case is_old_couch(Resp) of true -> 0; false -> EndSeq0 end,
- send_chunk(Resp, [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]),
- end_json_response(Resp);
-
-% callbacks for longpoll and normal (single JSON Object)
-changes_callback(start, {_, Resp}) ->
- send_chunk(Resp, "{\"results\":[\n"),
- {ok, {"", Resp}};
-changes_callback({change, Change}, {Prepend, Resp}) ->
- send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]),
- {ok, {",\r\n", Resp}};
-changes_callback({stop, EndSeq}, {_, Resp}) ->
- case is_old_couch(Resp) of
- true ->
- send_chunk(Resp, "\n],\n\"last_seq\":0}\n");
- false ->
- send_chunk(Resp, io_lib:format("\n],\n\"last_seq\":\"~s\"}\n",[EndSeq]))
- end,
- end_json_response(Resp);
-
-changes_callback(timeout, {Prepend, Resp}) ->
- send_chunk(Resp, "\n"),
- {ok, {Prepend, Resp}};
-changes_callback({error, Reason}, Resp) ->
- chttpd:send_chunked_error(Resp, {error, Reason}).
-
-is_old_couch(Resp) ->
- MochiReq = Resp:get(request),
- case MochiReq:get_header_value("user-agent") of
- undefined ->
- false;
- UserAgent ->
- string:str(UserAgent, "CouchDB/0") > 0
- end.
-
-handle_compact_req(Req, _) ->
- Msg = <<"Compaction must be triggered on a per-shard basis in BigCouch">>,
- couch_httpd:send_error(Req, 403, forbidden, Msg).
-
-handle_view_cleanup_req(Req, Db) ->
- ok = fabric:cleanup_index_files(Db),
- send_json(Req, 202, {[{ok, true}]}).
-
-handle_design_req(#httpd{
- path_parts=[_DbName, _Design, Name, <<"_",_/binary>> = Action | _Rest],
- design_url_handlers = DesignUrlHandlers
- }=Req, Db) ->
- case fabric:open_doc(Db, <<"_design/", Name/binary>>, []) of
- {ok, DDoc} ->
- % TODO we'll trigger a badarity here if ddoc attachment starts with "_",
- % or if user tries an unknown Action
- Handler = couch_util:get_value(Action, DesignUrlHandlers, fun db_req/2),
- Handler(Req, Db, DDoc);
- Error ->
- throw(Error)
- end;
-
-handle_design_req(Req, Db) ->
- db_req(Req, Db).
-
-handle_design_info_req(#httpd{method='GET'}=Req, Db, #doc{id=Id} = DDoc) ->
- {ok, GroupInfoList} = fabric:get_view_group_info(Db, DDoc),
- send_json(Req, 200, {[
- {name, Id},
- {view_index, {GroupInfoList}}
- ]});
-
-handle_design_info_req(Req, _Db, _DDoc) ->
- send_method_not_allowed(Req, "GET").
-
-create_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
- N = couch_httpd:qs_value(Req, "n", couch_config:get("cluster", "n", "3")),
- Q = couch_httpd:qs_value(Req, "q", couch_config:get("cluster", "q", "8")),
- case fabric:create_db(DbName, [{user_ctx,UserCtx}, {n,N}, {q,Q}]) of
- ok ->
- DocUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
- send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
- {error, file_exists} ->
- chttpd:send_error(Req, file_exists);
- Error ->
- throw(Error)
- end.
-
-delete_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
- case fabric:delete_db(DbName, [{user_ctx, UserCtx}]) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- Error ->
- throw(Error)
- end.
-
-do_db_req(#httpd{path_parts=[DbName|_]}=Req, Fun) ->
- Fun(Req, #db{name=DbName}).
-
-db_req(#httpd{method='GET',path_parts=[DbName]}=Req, _Db) ->
- % measure the time required to generate the etag, see if it's worth it
- T0 = now(),
- {ok, DbInfo} = fabric:get_db_info(DbName),
- DeltaT = timer:now_diff(now(), T0) / 1000,
- couch_stats_collector:record({couchdb, dbinfo}, DeltaT),
- send_json(Req, {DbInfo});
-
-db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- Doc = couch_doc:from_json_obj(couch_httpd:json_body(Req)),
- Doc2 = case Doc#doc.id of
- <<"">> ->
- Doc#doc{id=couch_uuids:new(), revs={0, []}};
- _ ->
- Doc
- end,
- DocId = Doc2#doc.id,
- case couch_httpd:qs_value(Req, "batch") of
- "ok" ->
- % async_batching
- spawn(fun() ->
- case catch(fabric:update_doc(Db, Doc2, [{user_ctx, Ctx}])) of
- {ok, _} -> ok;
- Error ->
- ?LOG_INFO("Batch doc error (~s): ~p",[DocId, Error])
- end
- end),
-
- send_json(Req, 202, [], {[
- {ok, true},
- {id, DocId}
- ]});
- _Normal ->
- % normal
- {ok, NewRev} = fabric:update_doc(Db, Doc2, [{user_ctx, Ctx}]),
- DocUrl = absolute_uri(Req, [$/, DbName, $/, DocId]),
- send_json(Req, 201, [{"Location", DocUrl}], {[
- {ok, true},
- {id, DocId},
- {rev, couch_doc:rev_to_str(NewRev)}
- ]})
- end;
-
-db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
- send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
- send_json(Req, 201, {[
- {ok, true},
- {instance_start_time, <<"0">>}
- ]});
-
-db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req, Db) ->
- couch_stats_collector:increment({httpd, bulk_requests}),
- couch_httpd:validate_ctype(Req, "application/json"),
- {JsonProps} = chttpd:json_body_obj(Req),
- DocsArray = couch_util:get_value(<<"docs">>, JsonProps),
- case chttpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- Options = [full_commit, {user_ctx,Ctx}];
- "false" ->
- Options = [delay_commit, {user_ctx,Ctx}];
- _ ->
- Options = [{user_ctx,Ctx}]
- end,
- case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
- true ->
- Docs = lists:map(
- fun({ObjProps} = JsonObj) ->
- Doc = couch_doc:from_json_obj(JsonObj),
- validate_attachment_names(Doc),
- Id = case Doc#doc.id of
- <<>> -> couch_uuids:new();
- Id0 -> Id0
- end,
- case couch_util:get_value(<<"_rev">>, ObjProps) of
- undefined ->
- Revs = {0, []};
- Rev ->
- {Pos, RevId} = couch_doc:parse_rev(Rev),
- Revs = {Pos, [RevId]}
- end,
- Doc#doc{id=Id,revs=Revs}
- end,
- DocsArray),
- Options2 =
- case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
- true -> [all_or_nothing|Options];
- _ -> Options
- end,
- case fabric:update_docs(Db, Docs, Options2) of
- {ok, Results} ->
- % output the results
- DocResults = lists:zipwith(fun update_doc_result_to_json/2,
- Docs, Results),
- send_json(Req, 201, DocResults);
- {aborted, Errors} ->
- ErrorsJson =
- lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 417, ErrorsJson)
- end;
- false ->
- Docs = [couch_doc:from_json_obj(JsonObj) || JsonObj <- DocsArray],
- [validate_attachment_names(D) || D <- Docs],
- {ok, Errors} = fabric:update_docs(Db, Docs, [replicated_changes|Options]),
- ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 201, ErrorsJson)
- end;
-
-db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- {IdsRevs} = chttpd:json_body_obj(Req),
- IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
- case fabric:purge_docs(Db, IdsRevs2) of
- {ok, PurgeSeq, PurgedIdsRevs} ->
- PurgedIdsRevs2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs}
- <- PurgedIdsRevs],
- send_json(Req, 200, {[
- {<<"purge_seq">>, PurgeSeq},
- {<<"purged">>, {PurgedIdsRevs2}}
- ]});
- Error ->
- throw(Error)
- end;
-
-db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_all_docs">>]}=Req, Db) ->
- all_docs_view(Req, Db, nil);
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_all_docs">>]}=Req, Db) ->
- {Fields} = chttpd:json_body_obj(Req),
- case couch_util:get_value(<<"keys">>, Fields, nil) of
- Keys when is_list(Keys) ->
- all_docs_view(Req, Db, Keys);
- nil ->
- all_docs_view(Req, Db, nil);
- _ ->
- throw({bad_request, "`keys` body member must be an array."})
- end;
-
-db_req(#httpd{path_parts=[_,<<"_all_docs">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "GET,HEAD,POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
- {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
- {ok, Results} = fabric:get_missing_revs(Db, JsonDocIdRevs),
- Results2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- Results],
- send_json(Req, {[
- {missing_revs, {Results2}}
- ]});
-
-db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
- {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
- {ok, Results} = fabric:get_missing_revs(Db, JsonDocIdRevs),
- Results2 =
- lists:map(fun({Id, MissingRevs, PossibleAncestors}) ->
- {Id,
- {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
- if PossibleAncestors == [] ->
- [];
- true ->
- [{possible_ancestors,
- couch_doc:revs_to_strs(PossibleAncestors)}]
- end}}
- end, Results),
- send_json(Req, {Results2});
-
-db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>],user_ctx=Ctx}=Req,
- Db) ->
- SecObj = couch_httpd:json_body(Req),
- ok = fabric:set_security(Db, SecObj, [{user_ctx,Ctx}]),
- send_json(Req, {[{<<"ok">>, true}]});
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) ->
- send_json(Req, fabric:get_security(Db));
-
-db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "PUT,GET");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>],user_ctx=Ctx}=Req,
- Db) ->
- Limit = chttpd:json_body(Req),
- ok = fabric:set_revs_limit(Db, Limit, [{user_ctx,Ctx}]),
- send_json(Req, {[{<<"ok">>, true}]});
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
- send_json(Req, fabric:get_revs_limit(Db));
-
-db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
- send_method_not_allowed(Req, "PUT,GET");
-
-% vanilla CouchDB sends a 301 here, but we just handle the request
-db_req(#httpd{path_parts=[DbName,<<"_design/",Name/binary>>|Rest]}=Req, Db) ->
- db_req(Req#httpd{path_parts=[DbName, <<"_design">>, Name | Rest]}, Db);
-
-% Special case to enable using an unencoded slash in the URL of design docs,
-% as slashes in document IDs must otherwise be URL encoded.
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
- db_doc_req(Req, Db, <<"_design/",Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
- db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
-
-
-% Special case to allow for accessing local documents without %2F
-% encoding the docid. Throws out requests that don't have the second
-% path part or that specify an attachment name.
-db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) ->
- throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) ->
- throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) ->
- db_doc_req(Req, Db, <<"_local/", Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) ->
- throw({bad_request, <<"_local documents do not accept attachments.">>});
-
-db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
- db_doc_req(Req, Db, DocId);
-
-db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
- db_attachment_req(Req, Db, DocId, FileNameParts).
-
-all_docs_view(Req, Db, Keys) ->
- % measure the time required to generate the etag, see if it's worth it
- T0 = now(),
- {ok, Info} = fabric:get_db_info(Db),
- Etag = couch_httpd:make_etag(Info),
- DeltaT = timer:now_diff(now(), T0) / 1000,
- couch_stats_collector:record({couchdb, dbinfo}, DeltaT),
- QueryArgs = chttpd_view:parse_view_params(Req, Keys, map),
- chttpd:etag_respond(Req, Etag, fun() ->
- {ok, Resp} = chttpd:start_json_response(Req, 200, [{"Etag",Etag}]),
- fabric:all_docs(Db, fun all_docs_callback/2, {nil, Resp}, QueryArgs)
- end).
-
-all_docs_callback({total_and_offset, Total, Offset}, {_, Resp}) ->
- Chunk = "{\"total_rows\":~p,\"offset\":~p,\"rows\":[\r\n",
- send_chunk(Resp, io_lib:format(Chunk, [Total, Offset])),
- {ok, {"", Resp}};
-all_docs_callback({row, Row}, {Prepend, Resp}) ->
- send_chunk(Resp, [Prepend, ?JSON_ENCODE(Row)]),
- {ok, {",\r\n", Resp}};
-all_docs_callback(complete, {_, Resp}) ->
- send_chunk(Resp, "\r\n]}"),
- end_json_response(Resp);
-all_docs_callback({error, Reason}, {_, Resp}) ->
- chttpd:send_chunked_error(Resp, {error, Reason}).
-
-db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
- % check for the existence of the doc to handle the 404 case.
- couch_doc_open(Db, DocId, nil, []),
- case chttpd:qs_value(Req, "rev") of
- undefined ->
- Body = {[{<<"_deleted">>,true}]};
- Rev ->
- Body = {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}
- end,
- update_doc(Req, Db, DocId, couch_doc_from_req(Req, DocId, Body));
-
-db_doc_req(#httpd{method='GET'}=Req, Db, DocId) ->
- #doc_query_args{
- rev = Rev,
- open_revs = Revs,
- options = Options,
- atts_since = AttsSince
- } = parse_doc_query(Req),
- case Revs of
- [] ->
- Options2 =
- if AttsSince /= nil ->
- [{atts_since, AttsSince}, attachments | Options];
- true -> Options
- end,
- Doc = couch_doc_open(Db, DocId, Rev, Options2),
- send_doc(Req, Doc, Options2);
- _ ->
- {ok, Results} = fabric:open_revs(Db, DocId, Revs, Options),
- AcceptedTypes = case couch_httpd:header_value(Req, "Accept") of
- undefined -> [];
- AcceptHeader -> string:tokens(AcceptHeader, "; ")
- end,
- case lists:member("multipart/mixed", AcceptedTypes) of
- false ->
- {ok, Resp} = start_json_response(Req, 200),
- send_chunk(Resp, "["),
- % We loop through the docs. The first time through the separator
- % is whitespace, then a comma on subsequent iterations.
- lists:foldl(
- fun(Result, AccSeparator) ->
- case Result of
- {ok, Doc} ->
- JsonDoc = couch_doc:to_json_obj(Doc, Options),
- Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
- send_chunk(Resp, AccSeparator ++ Json);
- {{not_found, missing}, RevId} ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
- send_chunk(Resp, AccSeparator ++ Json)
- end,
- "," % AccSeparator now has a comma
- end,
- "", Results),
- send_chunk(Resp, "]"),
- end_json_response(Resp);
- true ->
- send_docs_multipart(Req, Results, Options)
- end
- end;
-
-db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
- couch_httpd:validate_referer(Req),
- couch_doc:validate_docid(DocId),
- couch_httpd:validate_ctype(Req, "multipart/form-data"),
- Form = couch_httpd:parse_form(Req),
- case proplists:is_defined("_doc", Form) of
- true ->
- Json = ?JSON_DECODE(couch_util:get_value("_doc", Form)),
- Doc = couch_doc_from_req(Req, DocId, Json);
- false ->
- Rev = couch_doc:parse_rev(list_to_binary(couch_util:get_value("_rev", Form))),
- {ok, [{ok, Doc}]} = fabric:open_revs(Db, DocId, [Rev], [])
- end,
- UpdatedAtts = [
- #att{name=validate_attachment_name(Name),
- type=list_to_binary(ContentType),
- data=Content} ||
- {Name, {ContentType, _}, Content} <-
- proplists:get_all_values("_attachments", Form)
- ],
- #doc{atts=OldAtts} = Doc,
- OldAtts2 = lists:flatmap(
- fun(#att{name=OldName}=Att) ->
- case [1 || A <- UpdatedAtts, A#att.name == OldName] of
- [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it
- _ -> [] % the attachment was in the UpdatedAtts, drop it
- end
- end, OldAtts),
- NewDoc = Doc#doc{
- atts = UpdatedAtts ++ OldAtts2
- },
- {ok, NewRev} = fabric:update_doc(Db, NewDoc, [{user_ctx,Ctx}]),
-
- send_json(Req, 201, [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewRev)) ++ "\""}], {[
- {ok, true},
- {id, DocId},
- {rev, couch_doc:rev_to_str(NewRev)}
- ]});
-
-db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
- #doc_query_args{
- update_type = UpdateType
- } = parse_doc_query(Req),
- couch_doc:validate_docid(DocId),
-
- W = couch_httpd:qs_value(Req, "w", couch_config:get("cluster", "w", "2")),
- Options = [{user_ctx,Ctx}, {w,W}],
-
- Loc = absolute_uri(Req, [$/, Db#db.name, $/, DocId]),
- RespHeaders = [{"Location", Loc}],
- case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
- ("multipart/related;" ++ _) = ContentType ->
- {ok, Doc0} = couch_doc:doc_from_multi_part_stream(ContentType,
- fun() -> receive_request_data(Req) end),
- Doc = couch_doc_from_req(Req, DocId, Doc0),
- update_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType);
- _Else ->
- case couch_httpd:qs_value(Req, "batch") of
- "ok" ->
- % batch
- Doc = couch_doc_from_req(Req, DocId, couch_httpd:json_body(Req)),
-
- spawn(fun() ->
- case catch(fabric:update_doc(Db, Doc, Options)) of
- {ok, _} -> ok;
- Error ->
- ?LOG_INFO("Batch doc error (~s): ~p",[DocId, Error])
- end
- end),
- send_json(Req, 202, [], {[
- {ok, true},
- {id, DocId}
- ]});
- _Normal ->
- % normal
- Body = couch_httpd:json_body(Req),
- Doc = couch_doc_from_req(Req, DocId, Body),
- update_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType)
- end
- end;
-
-db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) ->
- SourceRev =
- case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
- missing_rev -> nil;
- Rev -> Rev
- end,
- {TargetDocId, TargetRevs} = parse_copy_destination_header(Req),
- % open old doc
- Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
- % save new doc
- {ok, NewTargetRev} = fabric:update_doc(Db,
- Doc#doc{id=TargetDocId, revs=TargetRevs}, [{user_ctx,Ctx}]),
- % respond
- send_json(Req, 201,
- [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}],
- update_doc_result_to_json(TargetDocId, {ok, NewTargetRev}));
-
-db_doc_req(Req, _Db, _DocId) ->
- send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
-
-send_doc(Req, Doc, Options) ->
- case Doc#doc.meta of
- [] ->
- DiskEtag = couch_httpd:doc_etag(Doc),
- % output etag only when we have no meta
- couch_httpd:etag_respond(Req, DiskEtag, fun() ->
- send_doc_efficiently(Req, Doc, [{"Etag", DiskEtag}], Options)
- end);
- _ ->
- send_doc_efficiently(Req, Doc, [], Options)
- end.
-
-send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-send_doc_efficiently(Req, #doc{atts=Atts}=Doc, Headers, Options) ->
- case lists:member(attachments, Options) of
- true ->
- AcceptedTypes = case couch_httpd:header_value(Req, "Accept") of
- undefined -> [];
- AcceptHeader -> string:tokens(AcceptHeader, ", ")
- end,
- case lists:member("multipart/related", AcceptedTypes) of
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
- true ->
- Boundary = couch_uuids:random(),
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
- [attachments, follows|Options])),
- {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
- Boundary,JsonBytes, Atts,false),
- CType = {<<"Content-Type">>, ContentType},
- {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len),
- couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts,
- fun(Data) -> couch_httpd:send(Resp, Data) end, false)
- end;
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
- end.
-
-send_docs_multipart(Req, Results, Options) ->
- OuterBoundary = couch_uuids:random(),
- InnerBoundary = couch_uuids:random(),
- CType = {"Content-Type",
- "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
- {ok, Resp} = start_chunked_response(Req, 200, [CType]),
- couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
- lists:foreach(
- fun({ok, #doc{atts=Atts}=Doc}) ->
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
- [attachments,follows|Options])),
- {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
- InnerBoundary, JsonBytes, Atts, false),
- couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
- ContentType/binary, "\r\n\r\n">>),
- couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
- fun(Data) -> couch_httpd:send_chunk(Resp, Data)
- end, false),
- couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>);
- ({{not_found, missing}, RevId}) ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
- couch_httpd:send_chunk(Resp,
- [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json,
- <<"\r\n--", OuterBoundary/binary>>])
- end, Results),
- couch_httpd:send_chunk(Resp, <<"--">>),
- couch_httpd:last_chunk(Resp).
-
-receive_request_data(Req) ->
- {couch_httpd:recv(Req, 0), fun() -> receive_request_data(Req) end}.
-
-update_doc_result_to_json({{Id, Rev}, Error}) ->
- {_Code, Err, Msg} = chttpd:error_info(Error),
- {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)},
- {error, Err}, {reason, Msg}]}.
-
-update_doc_result_to_json(#doc{id=DocId}, Result) ->
- update_doc_result_to_json(DocId, Result);
-update_doc_result_to_json(DocId, {ok, NewRev}) ->
- {[{id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
-update_doc_result_to_json(DocId, Error) ->
- {_Code, ErrorStr, Reason} = chttpd:error_info(Error),
- {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
-
-
-update_doc(Req, Db, DocId, Json) ->
- update_doc(Req, Db, DocId, Json, []).
-
-update_doc(Req, Db, DocId, Doc, Headers) ->
- update_doc(Req, Db, DocId, Doc, Headers, interactive_edit).
-
-update_doc(#httpd{user_ctx=Ctx} = Req, Db, DocId, #doc{deleted=Deleted}=Doc,
- Headers, UpdateType) ->
- W = couch_httpd:qs_value(Req, "w", couch_config:get("cluster", "w", "2")),
- case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- Options = [full_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
- "false" ->
- Options = [delay_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
- _ ->
- Options = [UpdateType, {user_ctx,Ctx}, {w,W}]
- end,
- {ok, NewRev} = fabric:update_doc(Db, Doc, Options),
- NewRevStr = couch_doc:rev_to_str(NewRev),
- ResponseHeaders = [{"Etag", <<"\"", NewRevStr/binary, "\"">>} | Headers],
- send_json(Req, if Deleted -> 200; true -> 201 end, ResponseHeaders, {[
- {ok, true},
- {id, DocId},
- {rev, NewRevStr}
- ]}).
-
-couch_doc_from_req(Req, DocId, #doc{revs=Revs} = Doc) ->
- validate_attachment_names(Doc),
- ExplicitDocRev =
- case Revs of
- {Start,[RevId|_]} -> {Start, RevId};
- _ -> undefined
- end,
- case extract_header_rev(Req, ExplicitDocRev) of
- missing_rev ->
- Revs2 = {0, []};
- ExplicitDocRev ->
- Revs2 = Revs;
- {Pos, Rev} ->
- Revs2 = {Pos, [Rev]}
- end,
- Doc#doc{id=DocId, revs=Revs2};
-couch_doc_from_req(Req, DocId, Json) ->
- couch_doc_from_req(Req, DocId, couch_doc:from_json_obj(Json)).
-
-
-% Useful for debugging
-% couch_doc_open(Db, DocId) ->
-% couch_doc_open(Db, DocId, nil, []).
-
-couch_doc_open(Db, DocId, Rev, Options) ->
- case Rev of
- nil -> % open most recent rev
- case fabric:open_doc(Db, DocId, Options) of
- {ok, Doc} ->
- Doc;
- Error ->
- throw(Error)
- end;
- _ -> % open a specific rev (deletions come back as stubs)
- case fabric:open_revs(Db, DocId, [Rev], Options) of
- {ok, [{ok, Doc}]} ->
- Doc;
- {ok, [{{not_found, missing}, Rev}]} ->
- throw(not_found);
- {ok, [Else]} ->
- throw(Else)
- end
- end.
-
-% Attachment request handlers
-
-db_attachment_req(#httpd{method='GET'}=Req, Db, DocId, FileNameParts) ->
- FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1,
- FileNameParts),"/")),
- #doc_query_args{
- rev=Rev,
- options=Options
- } = parse_doc_query(Req),
- #doc{
- atts=Atts
- } = Doc = couch_doc_open(Db, DocId, Rev, Options),
- case [A || A <- Atts, A#att.name == FileName] of
- [] ->
- throw({not_found, "Document is missing attachment"});
- [#att{type=Type, encoding=Enc, disk_len=DiskLen, att_len=AttLen}=Att] ->
- Etag = chttpd:doc_etag(Doc),
- ReqAcceptsAttEnc = lists:member(
- atom_to_list(Enc),
- couch_httpd:accepted_encodings(Req)
- ),
- Headers = [
- {"ETag", Etag},
- {"Cache-Control", "must-revalidate"},
- {"Content-Type", binary_to_list(Type)}
- ] ++ case ReqAcceptsAttEnc of
- true ->
- [{"Content-Encoding", atom_to_list(Enc)}];
- _ ->
- []
- end,
- Len = case {Enc, ReqAcceptsAttEnc} of
- {identity, _} ->
- % stored and served in identity form
- DiskLen;
- {_, false} when DiskLen =/= AttLen ->
- % Stored encoded, but client doesn't accept the encoding we used,
- % so we need to decode on the fly. DiskLen is the identity length
- % of the attachment.
- DiskLen;
- {_, true} ->
- % Stored and served encoded. AttLen is the encoded length.
- AttLen;
- _ ->
- % We received an encoded attachment and stored it as such, so we
- % don't know the identity length. The client doesn't accept the
- % encoding, and since we cannot serve a correct Content-Length
- % header we'll fall back to a chunked response.
- undefined
- end,
- AttFun = case ReqAcceptsAttEnc of
- false ->
- fun couch_doc:att_foldl_decode/3;
- true ->
- fun couch_doc:att_foldl/3
- end,
- couch_httpd:etag_respond(
- Req,
- Etag,
- fun() ->
- case Len of
- undefined ->
- {ok, Resp} = start_chunked_response(Req, 200, Headers),
- AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, ok),
- couch_httpd:last_chunk(Resp);
- _ ->
- {ok, Resp} = start_response_length(Req, 200, Headers, Len),
- AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, ok)
- end
- end
- )
- end;
-
-
-db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNameParts)
- when (Method == 'PUT') or (Method == 'DELETE') ->
- FileName = validate_attachment_name(
- mochiweb_util:join(
- lists:map(fun binary_to_list/1,
- FileNameParts),"/")),
-
- NewAtt = case Method of
- 'DELETE' ->
- [];
- _ ->
- [#att{
- name=FileName,
- type = case couch_httpd:header_value(Req,"Content-Type") of
- undefined ->
- % We could throw an error here or guess by the FileName.
- % Currently, just giving it a default.
- <<"application/octet-stream">>;
- CType ->
- list_to_binary(CType)
- end,
- data = fabric:att_receiver(Req, chttpd:body_length(Req)),
- att_len = case couch_httpd:header_value(Req,"Content-Length") of
- undefined ->
- undefined;
- Length ->
- list_to_integer(Length)
- end,
- md5 = get_md5_header(Req),
- encoding = case string:to_lower(string:strip(
- couch_httpd:header_value(Req,"Content-Encoding","identity")
- )) of
- "identity" ->
- identity;
- "gzip" ->
- gzip;
- _ ->
- throw({
- bad_ctype,
- "Only gzip and identity content-encodings are supported"
- })
- end
- }]
- end,
-
- Doc = case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
- missing_rev -> % make the new doc
- couch_doc:validate_docid(DocId),
- #doc{id=DocId};
- Rev ->
- case fabric:open_revs(Db, DocId, [Rev], []) of
- {ok, [{ok, Doc0}]} -> Doc0;
- {ok, [Error]} -> throw(Error)
- end
- end,
-
- #doc{atts=Atts} = Doc,
- DocEdited = Doc#doc{
- atts = NewAtt ++ [A || A <- Atts, A#att.name /= FileName]
- },
- {ok, UpdatedRev} = fabric:update_doc(Db, DocEdited, [{user_ctx,Ctx}]),
- #db{name=DbName} = Db,
-
- {Status, Headers} = case Method of
- 'DELETE' ->
- {200, []};
- _ ->
- {201, [{"Location", absolute_uri(Req, [$/, DbName, $/, DocId, $/,
- FileName])}]}
- end,
- send_json(Req,Status, Headers, {[
- {ok, true},
- {id, DocId},
- {rev, couch_doc:rev_to_str(UpdatedRev)}
- ]});
-
-db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
- send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
-
-get_md5_header(Req) ->
- ContentMD5 = couch_httpd:header_value(Req, "Content-MD5"),
- Length = couch_httpd:body_length(Req),
- Trailer = couch_httpd:header_value(Req, "Trailer"),
- case {ContentMD5, Length, Trailer} of
- _ when is_list(ContentMD5) orelse is_binary(ContentMD5) ->
- base64:decode(ContentMD5);
- {_, chunked, undefined} ->
- <<>>;
- {_, chunked, _} ->
- case re:run(Trailer, "\\bContent-MD5\\b", [caseless]) of
- {match, _} ->
- md5_in_footer;
- _ ->
- <<>>
- end;
- _ ->
- <<>>
- end.
-
-parse_doc_query(Req) ->
- lists:foldl(fun({Key,Value}, Args) ->
- case {Key, Value} of
- {"attachments", "true"} ->
- Options = [attachments | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"meta", "true"} ->
- Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"revs", "true"} ->
- Options = [revs | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"local_seq", "true"} ->
- Options = [local_seq | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"revs_info", "true"} ->
- Options = [revs_info | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"conflicts", "true"} ->
- Options = [conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"deleted_conflicts", "true"} ->
- Options = [deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"rev", Rev} ->
- Args#doc_query_args{rev=couch_doc:parse_rev(Rev)};
- {"open_revs", "all"} ->
- Args#doc_query_args{open_revs=all};
- {"open_revs", RevsJsonStr} ->
- JsonArray = ?JSON_DECODE(RevsJsonStr),
- Args#doc_query_args{open_revs=[couch_doc:parse_rev(Rev) || Rev <- JsonArray]};
- {"atts_since", RevsJsonStr} ->
- JsonArray = ?JSON_DECODE(RevsJsonStr),
- Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
- {"new_edits", "false"} ->
- Args#doc_query_args{update_type=replicated_changes};
- {"new_edits", "true"} ->
- Args#doc_query_args{update_type=interactive_edit};
- {"att_encoding_info", "true"} ->
- Options = [att_encoding_info | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"r", R} ->
- Options = [{r,R} | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"w", W} ->
- Options = [{w,W} | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- _Else -> % unknown key value pair, ignore.
- Args
- end
- end, #doc_query_args{}, chttpd:qs(Req)).
-
-parse_changes_query(Req) ->
- lists:foldl(fun({Key, Value}, Args) ->
- case {Key, Value} of
- {"feed", _} ->
- Args#changes_args{feed=Value};
- {"descending", "true"} ->
- Args#changes_args{dir=rev};
- {"since", _} ->
- Args#changes_args{since=Value};
- {"limit", _} ->
- Args#changes_args{limit=list_to_integer(Value)};
- {"style", _} ->
- Args#changes_args{style=list_to_existing_atom(Value)};
- {"heartbeat", "true"} ->
- Args#changes_args{heartbeat=true};
- {"heartbeat", _} ->
- Args#changes_args{heartbeat=list_to_integer(Value)};
- {"timeout", _} ->
- Args#changes_args{timeout=list_to_integer(Value)};
- {"include_docs", "true"} ->
- Args#changes_args{include_docs=true};
- {"filter", _} ->
- Args#changes_args{filter=Value};
- _Else -> % unknown key value pair, ignore.
- Args
- end
- end, #changes_args{}, couch_httpd:qs(Req)).
-
-extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
- extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
-extract_header_rev(Req, ExplicitRev) ->
- Etag = case chttpd:header_value(Req, "If-Match") of
- undefined -> undefined;
- Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
- end,
- case {ExplicitRev, Etag} of
- {undefined, undefined} -> missing_rev;
- {_, undefined} -> ExplicitRev;
- {undefined, _} -> Etag;
- _ when ExplicitRev == Etag -> Etag;
- _ ->
- throw({bad_request, "Document rev and etag have different values"})
- end.
-
-
-parse_copy_destination_header(Req) ->
- Destination = chttpd:header_value(Req, "Destination"),
- case re:run(Destination, "\\?", [{capture, none}]) of
- nomatch ->
- {list_to_binary(Destination), {0, []}};
- match ->
- [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
- [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
- {Pos, RevId} = couch_doc:parse_rev(Rev),
- {list_to_binary(DocId), {Pos, [RevId]}}
- end.
-
-validate_attachment_names(Doc) ->
- lists:foreach(fun(#att{name=Name}) ->
- validate_attachment_name(Name)
- end, Doc#doc.atts).
-
-validate_attachment_name(Name) when is_list(Name) ->
- validate_attachment_name(list_to_binary(Name));
-validate_attachment_name(<<"_",_/binary>>) ->
- throw({bad_request, <<"Attachment name can't start with '_'">>});
-validate_attachment_name(Name) ->
- case is_valid_utf8(Name) of
- true -> Name;
- false -> throw({bad_request, <<"Attachment name is not UTF-8 encoded">>})
- end.
-
-%% borrowed from mochijson2:json_bin_is_safe()
-is_valid_utf8(<<>>) ->
- true;
-is_valid_utf8(<<C, Rest/binary>>) ->
- case C of
- $\" ->
- false;
- $\\ ->
- false;
- $\b ->
- false;
- $\f ->
- false;
- $\n ->
- false;
- $\r ->
- false;
- $\t ->
- false;
- C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
- false;
- C when C < 16#7f ->
- is_valid_utf8(Rest);
- _ ->
- false
- end.
diff --git a/apps/chttpd/src/chttpd_external.erl b/apps/chttpd/src/chttpd_external.erl
deleted file mode 100644
index 51f32e10..00000000
--- a/apps/chttpd/src/chttpd_external.erl
+++ /dev/null
@@ -1,173 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_external).
-
--export([handle_external_req/2, handle_external_req/3]).
--export([send_external_response/2, json_req_obj/2, json_req_obj/3]).
--export([default_or_content_type/2, parse_external_response/1]).
--export([handle_search_req/2]).
-
--import(chttpd,[send_error/4]).
-
--include_lib("couch/include/couch_db.hrl").
-
-handle_search_req(Req, Db) ->
- process_external_req(Req, Db, <<"search">>).
-
-% handle_external_req/2
-% for the old type of config usage:
-% _external = {chttpd_external, handle_external_req}
-% with urls like
-% /db/_external/action/design/name
-handle_external_req(#httpd{
- path_parts=[_DbName, _External, UrlName | _Path]
- }=HttpReq, Db) ->
- process_external_req(HttpReq, Db, UrlName);
-handle_external_req(#httpd{path_parts=[_, _]}=Req, _Db) ->
- send_error(Req, 404, <<"external_server_error">>, <<"No server name specified.">>);
-handle_external_req(Req, _) ->
- send_error(Req, 404, <<"external_server_error">>, <<"Broken assumption">>).
-
-% handle_external_req/3
-% for this type of config usage:
-% _action = {chttpd_external, handle_external_req, <<"action">>}
-% with urls like
-% /db/_action/design/name
-handle_external_req(HttpReq, Db, Name) ->
- process_external_req(HttpReq, Db, Name).
-
-process_external_req(HttpReq, Db, Name) ->
-
- Response = couch_external_manager:execute(binary_to_list(Name),
- json_req_obj(HttpReq, Db)),
-
- case Response of
- {unknown_external_server, Msg} ->
- send_error(HttpReq, 404, <<"external_server_error">>, Msg);
- _ ->
- send_external_response(HttpReq, Response)
- end.
-
-json_req_obj(Req, Db) -> json_req_obj(Req, Db, null).
-json_req_obj(#httpd{mochi_req=Req,
- method=Method,
- path_parts=Path,
- req_body=ReqBody
- }, Db, DocId) ->
- Body = case ReqBody of
- undefined -> Req:recv_body();
- Else -> Else
- end,
- ParsedForm = case Req:get_primary_header_value("content-type") of
- "application/x-www-form-urlencoded" ++ _ ->
- mochiweb_util:parse_qs(Body);
- _ ->
- []
- end,
- Headers = Req:get(headers),
- Hlist = mochiweb_headers:to_list(Headers),
- {ok, Info} = fabric:get_db_info(Db),
-
- % add headers...
- {[{<<"info">>, {Info}},
- {<<"id">>, DocId},
- {<<"method">>, Method},
- {<<"path">>, Path},
- {<<"query">>, json_query_keys(to_json_terms(Req:parse_qs()))},
- {<<"headers">>, to_json_terms(Hlist)},
- {<<"body">>, Body},
- {<<"peer">>, ?l2b(Req:get(peer))},
- {<<"form">>, to_json_terms(ParsedForm)},
- {<<"cookie">>, to_json_terms(Req:parse_cookie())},
- {<<"userCtx">>, couch_util:json_user_ctx(Db)}]}.
-
-to_json_terms(Data) ->
- to_json_terms(Data, []).
-to_json_terms([], Acc) ->
- {lists:reverse(Acc)};
-to_json_terms([{Key, Value} | Rest], Acc) when is_atom(Key) ->
- to_json_terms(Rest, [{list_to_binary(atom_to_list(Key)), list_to_binary(Value)} | Acc]);
-to_json_terms([{Key, Value} | Rest], Acc) ->
- to_json_terms(Rest, [{list_to_binary(Key), list_to_binary(Value)} | Acc]).
-
-json_query_keys({Json}) ->
- json_query_keys(Json, []).
-json_query_keys([], Acc) ->
- {lists:reverse(Acc)};
-json_query_keys([{<<"startkey">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([{<<"endkey">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([{<<"key">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)}|Acc]);
-json_query_keys([Term | Rest], Acc) ->
- json_query_keys(Rest, [Term|Acc]).
-
-send_external_response(#httpd{mochi_req=MochiReq}, Response) ->
- #extern_resp_args{
- code = Code,
- data = Data,
- ctype = CType,
- headers = Headers
- } = parse_external_response(Response),
- Resp = MochiReq:respond({Code,
- default_or_content_type(CType, Headers ++ chttpd:server_header()), Data}),
- {ok, Resp}.
-
-parse_external_response({Response}) ->
- lists:foldl(fun({Key,Value}, Args) ->
- case {Key, Value} of
- {"", _} ->
- Args;
- {<<"code">>, Value} ->
- Args#extern_resp_args{code=Value};
- {<<"stop">>, true} ->
- Args#extern_resp_args{stop=true};
- {<<"json">>, Value} ->
- Args#extern_resp_args{
- data=?JSON_ENCODE(Value),
- ctype="application/json"};
- {<<"body">>, Value} ->
- Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"};
- {<<"base64">>, Value} ->
- Args#extern_resp_args{
- data=base64:decode(Value),
- ctype="application/binary"
- };
- {<<"headers">>, {Headers}} ->
- NewHeaders = lists:map(fun({Header, HVal}) ->
- {binary_to_list(Header), binary_to_list(HVal)}
- end, Headers),
- Args#extern_resp_args{headers=NewHeaders};
- _ -> % unknown key
- Msg = lists:flatten(io_lib:format("Invalid data from external server: ~p", [{Key, Value}])),
- throw({external_response_error, Msg})
- end
- end, #extern_resp_args{}, Response).
-
-default_or_content_type(DefaultContentType, Headers) ->
- {ContentType, OtherHeaders} = lists:partition(
- fun({HeaderName, _}) ->
- HeaderName == "Content-Type"
- end, Headers),
-
- % XXX: What happens if we were passed multiple content types? We add another?
- case ContentType of
- [{"Content-Type", SetContentType}] ->
- TrueContentType = SetContentType;
- _Else ->
- TrueContentType = DefaultContentType
- end,
-
- HeadersWithContentType = lists:append(OtherHeaders, [{"Content-Type", TrueContentType}]),
- HeadersWithContentType.
diff --git a/apps/chttpd/src/chttpd_misc.erl b/apps/chttpd/src/chttpd_misc.erl
deleted file mode 100644
index 640e7d0d..00000000
--- a/apps/chttpd/src/chttpd_misc.erl
+++ /dev/null
@@ -1,222 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_misc).
-
--export([handle_welcome_req/2,handle_favicon_req/2,handle_utils_dir_req/2,
- handle_all_dbs_req/1,handle_replicate_req/1,handle_restart_req/1,
- handle_uuids_req/1,handle_config_req/1,handle_log_req/1,
- handle_task_status_req/1,handle_sleep_req/1,handle_welcome_req/1,
- handle_utils_dir_req/1, handle_favicon_req/1, handle_system_req/1]).
-
-
--include_lib("couch/include/couch_db.hrl").
-
--import(chttpd,
- [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
- start_json_response/2,send_chunk/2,end_json_response/1,
- start_chunked_response/3, send_error/4]).
-
-% httpd global handlers
-
-handle_welcome_req(Req) ->
- handle_welcome_req(Req, <<"Welcome">>).
-
-handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
- send_json(Req, {[
- {couchdb, WelcomeMessage},
- {version, list_to_binary(couch:version())},
- {bigcouch, get_version()}
- ]});
-handle_welcome_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-get_version() ->
- Releases = release_handler:which_releases(),
- Version = case [V || {"bigcouch", V, _, current} <- Releases] of
- [] ->
- case [V || {"bigcouch", V, _, permanent} <- Releases] of
- [] ->
- "dev";
- [Permanent] ->
- Permanent
- end;
- [Current] ->
- Current
- end,
- list_to_binary(Version).
-
-handle_favicon_req(Req) ->
- handle_favicon_req(Req, couch_config:get("chttpd", "docroot")).
-
-handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
- chttpd:serve_file(Req, "favicon.ico", DocumentRoot);
-handle_favicon_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_utils_dir_req(Req) ->
- handle_utils_dir_req(Req, couch_config:get("chttpd", "docroot")).
-
-handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
- "/" ++ UrlPath = chttpd:path(Req),
- case chttpd:partition(UrlPath) of
- {_ActionKey, "/", RelativePath} ->
- % GET /_utils/path or GET /_utils/
- chttpd:serve_file(Req, RelativePath, DocumentRoot);
- {_ActionKey, "", _RelativePath} ->
- % GET /_utils
- RedirectPath = chttpd:path(Req) ++ "/",
- chttpd:send_redirect(Req, RedirectPath)
- end;
-handle_utils_dir_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_sleep_req(#httpd{method='GET'}=Req) ->
- Time = list_to_integer(chttpd:qs_value(Req, "time")),
- receive snicklefart -> ok after Time -> ok end,
- send_json(Req, {[{ok, true}]});
-handle_sleep_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_all_dbs_req(#httpd{method='GET'}=Req) ->
- {ok, DbNames} = fabric:all_dbs(),
- send_json(Req, DbNames);
-handle_all_dbs_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-
-handle_task_status_req(#httpd{method='GET'}=Req) ->
- {Replies, _BadNodes} = gen_server:multi_call(couch_task_status, all),
- Response = lists:flatmap(fun({Node, Tasks}) ->
- [{[{node,Node} | Task]} || Task <- Tasks]
- end, Replies),
- send_json(Req, lists:sort(Response));
-handle_task_status_req(Req) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-handle_replicate_req(#httpd{method='POST', user_ctx=Ctx} = Req) ->
- PostBody = chttpd:json_body_obj(Req),
- try couch_rep:replicate(PostBody, Ctx) of
- {ok, {continuous, RepId}} ->
- send_json(Req, 202, {[{ok, true}, {<<"_local_id">>, RepId}]});
- {ok, {cancelled, RepId}} ->
- send_json(Req, 200, {[{ok, true}, {<<"_local_id">>, RepId}]});
- {ok, {JsonResults}} ->
- send_json(Req, {[{ok, true} | JsonResults]});
- {error, {Type, Details}} ->
- send_json(Req, 500, {[{error, Type}, {reason, Details}]});
- {error, not_found} ->
- send_json(Req, 404, {[{error, not_found}]});
- {error, Reason} ->
- send_json(Req, 500, {[{error, Reason}]})
- catch
- throw:{db_not_found, Msg} ->
- send_json(Req, 404, {[{error, db_not_found}, {reason, Msg}]});
- throw:{node_not_connected, Msg} ->
- send_json(Req, 404, {[{error, node_not_connected}, {reason, Msg}]})
- end;
-handle_replicate_req(Req) ->
- send_method_not_allowed(Req, "POST").
-
-
-handle_restart_req(#httpd{method='POST'}=Req) ->
- couch_server_sup:restart_core_server(),
- send_json(Req, 200, {[{ok, true}]});
-handle_restart_req(Req) ->
- send_method_not_allowed(Req, "POST").
-
-
-handle_uuids_req(Req) ->
- couch_httpd_misc_handlers:handle_uuids_req(Req).
-
-
-% Config request handler
-
-
-% GET /_config/
-% GET /_config
-handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) ->
- Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
- case dict:is_key(Section, Acc) of
- true ->
- dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
- false ->
- dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
- end
- end, dict:new(), couch_config:all()),
- KVs = dict:fold(fun(Section, Values, Acc) ->
- [{list_to_binary(Section), {Values}} | Acc]
- end, [], Grouped),
- send_json(Req, 200, {KVs});
-% GET /_config/Section
-handle_config_req(#httpd{method='GET', path_parts=[_,Section]}=Req) ->
- KVs = [{list_to_binary(Key), list_to_binary(Value)}
- || {Key, Value} <- couch_config:get(Section)],
- send_json(Req, 200, {KVs});
-% PUT /_config/Section/Key
-% "value"
-handle_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Req) ->
- Value = chttpd:json_body(Req),
- Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
- OldValue = couch_config:get(Section, Key, ""),
- ok = couch_config:set(Section, Key, ?b2l(Value), Persist),
- send_json(Req, 200, list_to_binary(OldValue));
-% GET /_config/Section/Key
-handle_config_req(#httpd{method='GET', path_parts=[_, Section, Key]}=Req) ->
- case couch_config:get(Section, Key, null) of
- null ->
- throw({not_found, unknown_config_value});
- Value ->
- send_json(Req, 200, list_to_binary(Value))
- end;
-% DELETE /_config/Section/Key
-handle_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Req) ->
- Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
- case couch_config:get(Section, Key, null) of
- null ->
- throw({not_found, unknown_config_value});
- OldValue ->
- couch_config:delete(Section, Key, Persist),
- send_json(Req, 200, list_to_binary(OldValue))
- end;
-handle_config_req(Req) ->
- send_method_not_allowed(Req, "GET,PUT,DELETE").
-
-% httpd log handlers
-
-handle_log_req(#httpd{method='GET'}=Req) ->
- Bytes = list_to_integer(chttpd:qs_value(Req, "bytes", "1000")),
- Offset = list_to_integer(chttpd:qs_value(Req, "offset", "0")),
- Chunk = couch_log:read(Bytes, Offset),
- {ok, Resp} = start_chunked_response(Req, 200, [
- % send a plaintext response
- {"Content-Type", "text/plain; charset=utf-8"},
- {"Content-Length", integer_to_list(length(Chunk))}
- ]),
- send_chunk(Resp, Chunk),
- send_chunk(Resp, "");
-handle_log_req(Req) ->
- send_method_not_allowed(Req, "GET").
-
-% Note: this resource is exposed on the backdoor interface, but it's in chttpd
-% because it's not couch trunk
-handle_system_req(Req) ->
- Other = erlang:memory(system) - lists:sum([X || {_,X} <-
- erlang:memory([atom, code, binary, ets])]),
- Memory = [{other, Other} | erlang:memory([atom, atom_used, processes,
- processes_used, binary, code, ets])],
- send_json(Req, {[
- {memory, {Memory}},
- {run_queue, statistics(run_queue)},
- {process_count, erlang:system_info(process_count)},
- {process_limit, erlang:system_info(process_limit)}
- ]}).
diff --git a/apps/chttpd/src/chttpd_oauth.erl b/apps/chttpd/src/chttpd_oauth.erl
deleted file mode 100644
index 84506efe..00000000
--- a/apps/chttpd/src/chttpd_oauth.erl
+++ /dev/null
@@ -1,168 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_oauth).
--include_lib("couch/include/couch_db.hrl").
-
--export([oauth_authentication_handler/1, handle_oauth_req/1, consumer_lookup/2]).
-
-% OAuth auth handler using per-node user db
-oauth_authentication_handler(#httpd{mochi_req=MochiReq}=Req) ->
- serve_oauth(Req, fun(URL, Params, Consumer, Signature) ->
- AccessToken = couch_util:get_value("oauth_token", Params),
- TokenSecret = couch_config:get("oauth_token_secrets", AccessToken),
- case oauth:verify(Signature, atom_to_list(MochiReq:get(method)), URL, Params, Consumer, TokenSecret) of
- true ->
- set_user_ctx(Req, AccessToken);
- false ->
- Req
- end
- end, true).
-
-% Look up the consumer key and get the roles to give the consumer
-set_user_ctx(Req, AccessToken) ->
- DbName = couch_config:get("couch_httpd_auth", "authentication_db", "users"),
- ok = chttpd_auth:ensure_users_db_exists(?l2b(DbName)),
- Name = ?l2b(couch_config:get("oauth_token_users", AccessToken)),
- case chttpd_auth:get_user(Name) of
- nil -> Req;
- User ->
- Roles = couch_util:get_value(<<"roles">>, User, []),
- Req#httpd{user_ctx=#user_ctx{name=Name, roles=Roles}}
- end.
-
-% OAuth request_token
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"request_token">>], method=Method}=Req) ->
- serve_oauth(Req, fun(URL, Params, Consumer, Signature) ->
- AccessToken = couch_util:get_value("oauth_token", Params),
- TokenSecret = couch_config:get("oauth_token_secrets", AccessToken),
- case oauth:verify(Signature, atom_to_list(Method), URL, Params, Consumer, TokenSecret) of
- true ->
- ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>);
- false ->
- invalid_signature(Req)
- end
- end, false);
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"authorize">>]}=Req) ->
- {ok, serve_oauth_authorize(Req)};
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>], method='GET'}=Req) ->
- serve_oauth(Req, fun(URL, Params, Consumer, Signature) ->
- case oauth:token(Params) of
- "requestkey" ->
- case oauth:verify(Signature, "GET", URL, Params, Consumer, "requestsecret") of
- true ->
- ok(Req, <<"oauth_token=accesskey&oauth_token_secret=accesssecret">>);
- false ->
- invalid_signature(Req)
- end;
- _ ->
- chttpd:send_error(Req, 400, <<"invalid_token">>, <<"Invalid OAuth token.">>)
- end
- end, false);
-handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>]}=Req) ->
- chttpd:send_method_not_allowed(Req, "GET").
-
-invalid_signature(Req) ->
- chttpd:send_error(Req, 400, <<"invalid_signature">>, <<"Invalid signature value.">>).
-
-% This needs to be protected i.e. force user to login using HTTP Basic Auth or form-based login.
-serve_oauth_authorize(#httpd{method=Method}=Req) ->
- case Method of
- 'GET' ->
- % Confirm with the User that they want to authenticate the Consumer
- serve_oauth(Req, fun(URL, Params, Consumer, Signature) ->
- AccessToken = couch_util:get_value("oauth_token", Params),
- TokenSecret = couch_config:get("oauth_token_secrets", AccessToken),
- case oauth:verify(Signature, "GET", URL, Params, Consumer, TokenSecret) of
- true ->
- ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>);
- false ->
- invalid_signature(Req)
- end
- end, false);
- 'POST' ->
- % If the User has confirmed, we direct the User back to the Consumer with a verification code
- serve_oauth(Req, fun(URL, Params, Consumer, Signature) ->
- AccessToken = couch_util:get_value("oauth_token", Params),
- TokenSecret = couch_config:get("oauth_token_secrets", AccessToken),
- case oauth:verify(Signature, "POST", URL, Params, Consumer, TokenSecret) of
- true ->
- %redirect(oauth_callback, oauth_token, oauth_verifier),
- ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>);
- false ->
- invalid_signature(Req)
- end
- end, false);
- _ ->
- chttpd:send_method_not_allowed(Req, "GET,POST")
- end.
-
-serve_oauth(#httpd{mochi_req=MochiReq}=Req, Fun, FailSilently) ->
- % 1. In the HTTP Authorization header as defined in OAuth HTTP Authorization Scheme.
- % 2. As the HTTP POST request body with a content-type of application/x-www-form-urlencoded.
- % 3. Added to the URLs in the query part (as defined by [RFC3986] section 3).
- AuthHeader = case MochiReq:get_header_value("authorization") of
- undefined ->
- "";
- Else ->
- [Head | Tail] = re:split(Else, "\\s", [{parts, 2}, {return, list}]),
- case [string:to_lower(Head) | Tail] of
- ["oauth", Rest] -> Rest;
- _ -> ""
- end
- end,
- HeaderParams = oauth_uri:params_from_header_string(AuthHeader),
- %Realm = couch_util:get_value("realm", HeaderParams),
- Params = proplists:delete("realm", HeaderParams) ++ MochiReq:parse_qs(),
- ?LOG_DEBUG("OAuth Params: ~p", [Params]),
- case couch_util:get_value("oauth_version", Params, "1.0") of
- "1.0" ->
- case couch_util:get_value("oauth_consumer_key", Params, undefined) of
- undefined ->
- case FailSilently of
- true -> Req;
- false -> chttpd:send_error(Req, 400, <<"invalid_consumer">>, <<"Invalid consumer.">>)
- end;
- ConsumerKey ->
- SigMethod = couch_util:get_value("oauth_signature_method", Params),
- case consumer_lookup(ConsumerKey, SigMethod) of
- none ->
- chttpd:send_error(Req, 400, <<"invalid_consumer">>, <<"Invalid consumer (key or signature method).">>);
- Consumer ->
- Signature = couch_util:get_value("oauth_signature", Params),
- URL = chttpd:absolute_uri(Req, MochiReq:get(raw_path)),
- Fun(URL, proplists:delete("oauth_signature", Params),
- Consumer, Signature)
- end
- end;
- _ ->
- chttpd:send_error(Req, 400, <<"invalid_oauth_version">>, <<"Invalid OAuth version.">>)
- end.
-
-consumer_lookup(Key, MethodStr) ->
- SignatureMethod = case MethodStr of
- "PLAINTEXT" -> plaintext;
- "HMAC-SHA1" -> hmac_sha1;
- %"RSA-SHA1" -> rsa_sha1;
- _Else -> undefined
- end,
- case SignatureMethod of
- undefined -> none;
- _SupportedMethod ->
- case couch_config:get("oauth_consumer_secrets", Key, undefined) of
- undefined -> none;
- Secret -> {Key, Secret, SignatureMethod}
- end
- end.
-
-ok(#httpd{mochi_req=MochiReq}, Body) ->
- {ok, MochiReq:respond({200, [], Body})}.
diff --git a/apps/chttpd/src/chttpd_rewrite.erl b/apps/chttpd/src/chttpd_rewrite.erl
deleted file mode 100644
index fbf246ab..00000000
--- a/apps/chttpd/src/chttpd_rewrite.erl
+++ /dev/null
@@ -1,418 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-%
-% bind_path is based on bind method from Webmachine
-
-
-%% @doc Module for URL rewriting by pattern matching.
-
--module(chttpd_rewrite).
--export([handle_rewrite_req/3]).
--include_lib("couch/include/couch_db.hrl").
-
--define(SEPARATOR, $\/).
--define(MATCH_ALL, '*').
-
-
-%% doc The http rewrite handler. All rewriting is done from
-%% /dbname/_design/ddocname/_rewrite by default.
-%%
-%% each rules should be in rewrites member of the design doc.
-%% Ex of a complete rule :
-%%
-%% {
-%% ....
-%% "rewrites": [
-%% {
-%% "from": "",
-%% "to": "index.html",
-%% "method": "GET",
-%% "query": {}
-%% }
-%% ]
-%% }
-%%
-%% from: is the path rule used to bind current uri to the rule. It
-%% use pattern matching for that.
-%%
-%% to: rule to rewrite an url. It can contain variables depending on binding
-%% variables discovered during pattern matching and query args (url args and from
-%% the query member.)
-%%
-%% method: method to bind the request method to the rule. by default "*"
-%% query: query args you want to define they can contain dynamic variable
-%% by binding the key to the bindings
-%%
-%%
-%% to and from are path with patterns. pattern can be string starting with ":" or
-%% "*". ex:
-%% /somepath/:var/*
-%%
-%% This path is converted in erlang list by splitting "/". Each var are
-%% converted in atom. "*" is converted to '*' atom. The pattern matching is done
-%% by splitting "/" in request url in a list of token. A string pattern will
-%% match equal token. The star atom ('*' in single quotes) will match any number
-%% of tokens, but may only be present as the last pathtern in a pathspec. If all
-%% tokens are matched and all pathterms are used, then the pathspec matches. It works
-%% like webmachine. Each identified token will be reused in to rule and in query
-%%
-%% The pattern matching is done by first matching the request method to a rule. by
-%% default all methods match a rule. (method is equal to "*" by default). Then
-%% It will try to match the path to one rule. If no rule match, then a 404 error
-%% is displayed.
-%%
-%% Once a rule is found we rewrite the request url using the "to" and
-%% "query" members. The identified token are matched to the rule and
-%% will replace var. if '*' is found in the rule it will contain the remaining
-%% part if it exists.
-%%
-%% Examples:
-%%
-%% Dispatch rule URL TO Tokens
-%%
-%% {"from": "/a/b", /a/b?k=v /some/b?k=v var =:= b
-%% "to": "/some/"} k = v
-%%
-%% {"from": "/a/b", /a/b /some/b?var=b var =:= b
-%% "to": "/some/:var"}
-%%
-%% {"from": "/a", /a /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/*", /a/b/c /some/b/c
-%% "to": "/some/*"}
-%%
-%% {"from": "/a", /a /some
-%% "to": "/some/*"}
-%%
-%% {"from": "/a/:foo/*", /a/b/c /some/b/c?foo=b foo =:= b
-%% "to": "/some/:foo/*"}
-%%
-%% {"from": "/a/:foo", /a/b /some/?k=b&foo=b foo =:= b
-%% "to": "/some",
-%% "query": {
-%% "k": ":foo"
-%% }}
-%%
-%% {"from": "/a", /a?foo=b /some/b foo =:= b
-%% "to": "/some/:foo",
-%% }}
-
-
-
-handle_rewrite_req(#httpd{
- path_parts=[DbName, <<"_design">>, DesignName, _Rewrite|PathParts],
- method=Method,
- mochi_req=MochiReq}=Req, _Db, DDoc) ->
-
- % we are in a design handler
- DesignId = <<"_design/", DesignName/binary>>,
- Prefix = <<"/", DbName/binary, "/", DesignId/binary>>,
- QueryList = couch_httpd:qs(Req),
- QueryList1 = [{to_atom(K), V} || {K, V} <- QueryList],
-
- #doc{body={Props}} = DDoc,
-
- % get rules from ddoc
- case couch_util:get_value(<<"rewrites">>, Props) of
- undefined ->
- couch_httpd:send_error(Req, 404, <<"rewrite_error">>,
- <<"Invalid path.">>);
- Rules ->
- % create dispatch list from rules
- DispatchList = [make_rule(Rule) || {Rule} <- Rules],
-
- %% get raw path by matching url to a rule.
- RawPath = case try_bind_path(DispatchList, Method, PathParts,
- QueryList1) of
- no_dispatch_path ->
- throw(not_found);
- {NewPathParts, Bindings} ->
- Parts = [mochiweb_util:quote_plus(X) || X <- NewPathParts],
-
- % build new path, reencode query args, eventually convert
- % them to json
- Path = lists:append(
- string:join(Parts, [?SEPARATOR]),
- case Bindings of
- [] -> [];
- _ -> [$?, encode_query(Bindings)]
- end),
-
- % if path is relative detect it and rewrite path
- case mochiweb_util:safe_relative_path(Path) of
- undefined ->
- ?b2l(Prefix) ++ "/" ++ Path;
- P1 ->
- ?b2l(Prefix) ++ "/" ++ P1
- end
-
- end,
-
- % normalize final path (fix levels "." and "..")
- RawPath1 = ?b2l(iolist_to_binary(normalize_path(RawPath))),
-
- ?LOG_DEBUG("rewrite to ~p ~n", [RawPath1]),
-
- % build a new mochiweb request
- MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
- MochiReq:get(method),
- RawPath1,
- MochiReq:get(version),
- MochiReq:get(headers)),
-
- % cleanup, It force mochiweb to reparse raw uri.
- MochiReq1:cleanup(),
-
- chttpd:handle_request(MochiReq1)
- end.
-
-
-
-%% @doc Try to find a rule matching current url. If none is found
-%% 404 error not_found is raised
-try_bind_path([], _Method, _PathParts, _QueryList) ->
- no_dispatch_path;
-try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
- [{PathParts1, Method1}, RedirectPath, QueryArgs] = Dispatch,
- case bind_method(Method1, Method) of
- true ->
- case bind_path(PathParts1, PathParts, []) of
- {ok, Remaining, Bindings} ->
- Bindings1 = Bindings ++ QueryList,
-
- % we parse query args from the rule and fill
- % it eventually with bindings vars
- QueryArgs1 = make_query_list(QueryArgs, Bindings1, []),
-
- % remove params in QueryLists1 that are already in
- % QueryArgs1
- Bindings2 = lists:foldl(fun({K, V}, Acc) ->
- K1 = to_atom(K),
- KV = case couch_util:get_value(K1, QueryArgs1) of
- undefined -> [{K1, V}];
- _V1 -> []
- end,
- Acc ++ KV
- end, [], Bindings1),
-
- FinalBindings = Bindings2 ++ QueryArgs1,
- NewPathParts = make_new_path(RedirectPath, FinalBindings,
- Remaining, []),
- {NewPathParts, FinalBindings};
- fail ->
- try_bind_path(Rest, Method, PathParts, QueryList)
- end;
- false ->
- try_bind_path(Rest, Method, PathParts, QueryList)
- end.
-
-%% rewriting dynamically the quey list given as query member in
-%% rewrites. Each value is replaced by one binding or an argument
-%% passed in url.
-make_query_list([], _Bindings, Acc) ->
- Acc;
-make_query_list([{Key, {Value}}|Rest], Bindings, Acc) ->
- Value1 = to_json({Value}),
- make_query_list(Rest, Bindings, [{to_atom(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Acc) when is_binary(Value) ->
- Value1 = replace_var(Key, Value, Bindings),
- make_query_list(Rest, Bindings, [{to_atom(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Acc) when is_list(Value) ->
- Value1 = replace_var(Key, Value, Bindings),
- make_query_list(Rest, Bindings, [{to_atom(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Acc) ->
- make_query_list(Rest, Bindings, [{to_atom(Key), Value}|Acc]).
-
-replace_var(Key, Value, Bindings) ->
- case Value of
- <<":", Var/binary>> ->
- get_var(Var, Bindings, Value);
- _ when is_list(Value) ->
- Value1 = lists:foldr(fun(V, Acc) ->
- V1 = case V of
- <<":", VName/binary>> ->
- case get_var(VName, Bindings, V) of
- V2 when is_list(V2) ->
- iolist_to_binary(V2);
- V2 -> V2
- end;
- _ ->
-
- V
- end,
- [V1|Acc]
- end, [], Value),
- to_json(Value1);
- _ when is_binary(Value) ->
- Value;
- _ ->
- case Key of
- <<"key">> -> to_json(Value);
- <<"startkey">> -> to_json(Value);
- <<"endkey">> -> to_json(Value);
- _ ->
- lists:flatten(?JSON_ENCODE(Value))
- end
- end.
-
-
-get_var(VarName, Props, Default) ->
- VarName1 = list_to_atom(binary_to_list(VarName)),
- couch_util:get_value(VarName1, Props, Default).
-
-%% doc: build new patch from bindings. bindings are query args
-%% (+ dynamic query rewritten if needed) and bindings found in
-%% bind_path step.
-make_new_path([], _Bindings, _Remaining, Acc) ->
- lists:reverse(Acc);
-make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
- Acc1 = lists:reverse(Acc) ++ Remaining,
- Acc1;
-make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
- Acc1 = lists:reverse(Acc) ++ Remaining,
- Acc1;
-make_new_path([P|Rest], Bindings, Remaining, Acc) when is_atom(P) ->
- P2 = case couch_util:get_value(P, Bindings) of
- undefined -> << "undefined">>;
- P1 -> P1
- end,
- make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
-make_new_path([P|Rest], Bindings, Remaining, Acc) ->
- make_new_path(Rest, Bindings, Remaining, [P|Acc]).
-
-
-%% @doc If method of the query fith the rule method. If the
-%% method rule is '*', which is the default, all
-%% request method will bind. It allows us to make rules
-%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method) ->
- true;
-bind_method(Method, Method) ->
- true;
-bind_method(_, _) ->
- false.
-
-
-%% @doc bind path. Using the rule from we try to bind variables given
-%% to the current url by pattern matching
-bind_path([], [], Bindings) ->
- {ok, [], Bindings};
-bind_path([?MATCH_ALL], Rest, Bindings) when is_list(Rest) ->
- {ok, Rest, Bindings};
-bind_path(_, [], _) ->
- fail;
-bind_path([Token|RestToken],[Match|RestMatch],Bindings) when is_atom(Token) ->
- bind_path(RestToken, RestMatch, [{Token, Match}|Bindings]);
-bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
- bind_path(RestToken, RestMatch, Bindings);
-bind_path(_, _, _) ->
- fail.
-
-
-%% normalize path.
-normalize_path(Path) ->
- "/" ++ string:join(normalize_path1(string:tokens(Path,
- "/"), []), [?SEPARATOR]).
-
-
-normalize_path1([], Acc) ->
- lists:reverse(Acc);
-normalize_path1([".."|Rest], Acc) ->
- Acc1 = case Acc of
- [] -> [".."|Acc];
- [T|_] when T =:= ".." -> [".."|Acc];
- [_|R] -> R
- end,
- normalize_path1(Rest, Acc1);
-normalize_path1(["."|Rest], Acc) ->
- normalize_path1(Rest, Acc);
-normalize_path1([Path|Rest], Acc) ->
- normalize_path1(Rest, [Path|Acc]).
-
-
-%% @doc transform json rule in erlang for pattern matching
-make_rule(Rule) ->
- Method = case couch_util:get_value(<<"method">>, Rule) of
- undefined -> '*';
- M -> list_to_atom(?b2l(M))
- end,
- QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
- undefined -> [];
- {Args} -> Args
- end,
- FromParts = case couch_util:get_value(<<"from">>, Rule) of
- undefined -> ['*'];
- From ->
- parse_path(From)
- end,
- ToParts = case couch_util:get_value(<<"to">>, Rule) of
- undefined ->
- throw({error, invalid_rewrite_target});
- To ->
- parse_path(To)
- end,
- [{FromParts, Method}, ToParts, QueryArgs].
-
-parse_path(Path) ->
- {ok, SlashRE} = re:compile(<<"\\/">>),
- path_to_list(re:split(Path, SlashRE), [], 0).
-
-%% @doc convert a path rule (from or to) to an erlang list
-%% * and path variable starting by ":" are converted
-%% in erlang atom.
-path_to_list([], Acc, _DotDotCount) ->
- lists:reverse(Acc);
-path_to_list([<<>>|R], Acc, DotDotCount) ->
- path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">>|R], Acc, DotDotCount) ->
- path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
-path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
- case couch_config:get("httpd", "secure_rewrites", "true") of
- "false" ->
- path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
- _Else ->
- ?LOG_INFO("insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
- throw({insecure_rewrite_rule, "too many ../.. segments"})
- end;
-path_to_list([<<"..">>|R], Acc, DotDotCount) ->
- path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-path_to_list([P|R], Acc, DotDotCount) ->
- P1 = case P of
- <<":", Var/binary>> ->
- list_to_atom(binary_to_list(Var));
- _ -> P
- end,
- path_to_list(R, [P1|Acc], DotDotCount).
-
-encode_query(Props) ->
- Props1 = lists:foldl(fun ({K, V}, Acc) ->
- V1 = case is_list(V) of
- true -> V;
- false when is_binary(V) ->
- V;
- false ->
- mochiweb_util:quote_plus(V)
- end,
- [{K, V1} | Acc]
- end, [], Props),
- lists:flatten(mochiweb_util:urlencode(Props1)).
-
-to_atom(V) when is_atom(V) ->
- V;
-to_atom(V) when is_binary(V) ->
- to_atom(?b2l(V));
-to_atom(V) ->
- list_to_atom(V).
-
-to_json(V) ->
- iolist_to_binary(?JSON_ENCODE(V)).
diff --git a/apps/chttpd/src/chttpd_show.erl b/apps/chttpd/src/chttpd_show.erl
deleted file mode 100644
index 0aba2835..00000000
--- a/apps/chttpd/src/chttpd_show.erl
+++ /dev/null
@@ -1,311 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_show).
-
--export([handle_doc_show_req/3, handle_doc_update_req/3, handle_view_list_req/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--import(chttpd,
- [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
- start_json_response/2,send_chunk/2,last_chunk/1,send_chunked_error/2,
- start_chunked_response/3, send_error/4]).
-
--record(lacc, {
- req,
- resp = nil,
- qserver,
- lname,
- db,
- etag
-}).
-
-% /db/_design/foo/_show/bar/docid
-% show converts a json doc to a response of any content-type.
-% it looks up the doc an then passes it to the query server.
-% then it sends the response from the query server to the http client.
-
-maybe_open_doc(Db, DocId) ->
- case fabric:open_doc(Db, DocId, [conflicts]) of
- {ok, Doc} ->
- Doc;
- {not_found, _} ->
- nil
- end.
-
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName, DocId]
- }=Req, Db, DDoc) ->
-
- % open the doc
- Doc = maybe_open_doc(Db, DocId),
-
- % we don't handle revs here b/c they are an internal api
- % returns 404 if there is no doc with DocId
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
-
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName, DocId|Rest]
- }=Req, Db, DDoc) ->
-
- DocParts = [DocId|Rest],
- DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")),
-
- % open the doc
- Doc = maybe_open_doc(Db, DocId1),
-
- % we don't handle revs here b/c they are an internal api
- % pass 404 docs to the show function
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
-
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName]
- }=Req, Db, DDoc) ->
- % with no docid the doc is nil
- handle_doc_show(Req, Db, DDoc, ShowName, nil);
-
-handle_doc_show_req(Req, _Db, _DDoc) ->
- send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc) ->
- handle_doc_show(Req, Db, DDoc, ShowName, Doc, null).
-
-handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
- % get responder for ddoc/showname
- CurrentEtag = show_etag(Req, Doc, DDoc, []),
- chttpd:etag_respond(Req, CurrentEtag, fun() ->
- JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
- JsonDoc = couch_query_servers:json_doc(Doc),
- [<<"resp">>, ExternalResp] =
- couch_query_servers:ddoc_prompt(DDoc, [<<"shows">>, ShowName],
- [JsonDoc, JsonReq]),
- JsonResp = apply_etag(ExternalResp, CurrentEtag),
- chttpd_external:send_external_response(Req, JsonResp)
- end).
-
-
-show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
- Accept = chttpd:header_value(Req, "Accept"),
- DocPart = case Doc of
- nil -> nil;
- Doc -> chttpd:doc_etag(Doc)
- end,
- couch_httpd:make_etag({couch_httpd:doc_etag(DDoc), DocPart, Accept,
- UserCtx#user_ctx.roles, More}).
-
-% /db/_design/foo/update/bar/docid
-% updates a doc based on a request
-% handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) ->
-% % anything but GET
-% send_method_not_allowed(Req, "POST,PUT,DELETE,ETC");
-
-handle_doc_update_req(#httpd{
- path_parts=[_, _, _, _, UpdateName, DocId]
- }=Req, Db, DDoc) ->
- Doc = maybe_open_doc(Db, DocId),
- send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId);
-
-handle_doc_update_req(#httpd{
- path_parts=[_, _, _, _, UpdateName]
- }=Req, Db, DDoc) ->
- send_doc_update_response(Req, Db, DDoc, UpdateName, nil, null);
-
-handle_doc_update_req(Req, _Db, _DDoc) ->
- send_error(Req, 404, <<"update_error">>, <<"Invalid path.">>).
-
-send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
- JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
- JsonDoc = couch_query_servers:json_doc(Doc),
- Cmd = [<<"updates">>, UpdateName],
- case couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]) of
- [<<"up">>, {NewJsonDoc}, JsonResp] ->
- case chttpd:header_value(Req, "X-Couch-Full-Commit", "false") of
- "true" ->
- Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
- _ ->
- Options = [{user_ctx, Req#httpd.user_ctx}]
- end,
- NewDoc = couch_doc:from_json_obj({NewJsonDoc}),
- Code = 201,
- {ok, _NewRev} = fabric:update_doc(Db, NewDoc, Options);
- [<<"up">>, _Other, JsonResp] ->
- Code = 200
- end,
- JsonResp2 = json_apply_field({<<"code">>, Code}, JsonResp),
- % todo set location field
- chttpd_external:send_external_response(Req, JsonResp2).
-
-
-% view-list request with view and list from same design doc.
-handle_view_list_req(#httpd{method='GET',
- path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) ->
- handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, nil);
-
-% view-list request with view and list from different design docs.
-handle_view_list_req(#httpd{method='GET',
- path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc) ->
- handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, nil);
-
-handle_view_list_req(#httpd{method='GET'}=Req, _Db, _DDoc) ->
- send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-
-handle_view_list_req(#httpd{method='POST',
- path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) ->
- ReqBody = couch_httpd:body(Req),
- {Props2} = ?JSON_DECODE(ReqBody),
- Keys = proplists:get_value(<<"keys">>, Props2, nil),
- handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName,
- {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='POST',
- path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc) ->
- ReqBody = couch_httpd:body(Req),
- {Props2} = ?JSON_DECODE(ReqBody),
- Keys = proplists:get_value(<<"keys">>, Props2, nil),
- handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName,
- {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='POST'}=Req, _Db, _DDoc) ->
- send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-
-handle_view_list_req(Req, _Db, _DDoc) ->
- send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_view_list(Req, Db, DDoc, LName, {ViewDesignName, ViewName}, Keys) ->
- {ok, VDoc} = fabric:open_doc(Db, <<"_design/", ViewDesignName/binary>>, []),
- Group = couch_view_group:design_doc_to_view_group(VDoc),
- IsReduce = chttpd_view:get_reduce_type(Req),
- ViewType = chttpd_view:extract_view_type(ViewName, Group#group.views,
- IsReduce),
- QueryArgs = chttpd_view:parse_view_params(Req, Keys, ViewType),
- CB = fun list_callback/2,
- Etag = couch_uuids:new(),
- chttpd:etag_respond(Req, Etag, fun() ->
- couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
- Acc0 = #lacc{
- lname = LName,
- req = Req,
- qserver = QServer,
- db = Db,
- etag = Etag
- },
- fabric:query_view(Db, VDoc, ViewName, CB, Acc0, QueryArgs)
- end)
- end).
-
-list_callback({total_and_offset, Total, Offset}, #lacc{resp=nil} = Acc) ->
- start_list_resp({[{<<"total_rows">>, Total}, {<<"offset">>, Offset}]}, Acc);
-list_callback({total_and_offset, _, _}, Acc) ->
- % a sorted=false view where the message came in late. Ignore.
- {ok, Acc};
-list_callback({row, Row}, #lacc{resp=nil} = Acc) ->
- % first row of a reduce view, or a sorted=false view
- {ok, NewAcc} = start_list_resp({[]}, Acc),
- send_list_row(Row, NewAcc);
-list_callback({row, Row}, Acc) ->
- send_list_row(Row, Acc);
-list_callback(complete, Acc) ->
- #lacc{qserver = {Proc, _}, resp = Resp0} = Acc,
- if Resp0 =:= nil ->
- {ok, #lacc{resp = Resp}} = start_list_resp({[]}, Acc);
- true ->
- Resp = Resp0
- end,
- [<<"end">>, Chunk] = couch_query_servers:proc_prompt(Proc, [<<"list_end">>]),
- send_non_empty_chunk(Resp, Chunk),
- couch_httpd:last_chunk(Resp),
- {ok, Resp};
-list_callback({error, Reason}, {_, Resp}) ->
- chttpd:send_chunked_error(Resp, {error, Reason}).
-
-start_list_resp(Head, Acc) ->
- #lacc{
- req = Req,
- db = Db,
- qserver = QServer,
- lname = LName,
- etag = Etag
- } = Acc,
-
- % use a separate process because we're already in a receive loop, and
- % json_req_obj calls fabric:get_db_info()
- spawn_monitor(fun() -> exit(chttpd_external:json_req_obj(Req, Db)) end),
- receive {'DOWN', _, _, _, JsonReq} -> ok end,
-
- [<<"start">>,Chunk,JsonResp] = couch_query_servers:ddoc_proc_prompt(QServer,
- [<<"lists">>, LName], [Head, JsonReq]),
- JsonResp2 = apply_etag(JsonResp, Etag),
- #extern_resp_args{
- code = Code,
- ctype = CType,
- headers = ExtHeaders
- } = couch_httpd_external:parse_external_response(JsonResp2),
- JsonHeaders = couch_httpd_external:default_or_content_type(CType, ExtHeaders),
- {ok, Resp} = start_chunked_response(Req, Code, JsonHeaders),
- send_non_empty_chunk(Resp, Chunk),
- {ok, Acc#lacc{resp=Resp}}.
-
-send_list_row(Row, #lacc{qserver = {Proc, _}, resp = Resp} = Acc) ->
- try couch_query_servers:proc_prompt(Proc, [<<"list_row">>, Row]) of
- [<<"chunks">>, Chunk] ->
- send_non_empty_chunk(Resp, Chunk),
- {ok, Acc};
- [<<"end">>, Chunk] ->
- send_non_empty_chunk(Resp, Chunk),
- couch_httpd:last_chunk(Resp),
- {stop, Resp}
- catch Error ->
- chttpd:send_chunked_error(Resp, Error),
- {stop, Resp}
- end.
-
-send_non_empty_chunk(_, []) ->
- ok;
-send_non_empty_chunk(Resp, Chunk) ->
- send_chunk(Resp, Chunk).
-
-% Maybe this is in the proplists API
-% todo move to couch_util
-json_apply_field(H, {L}) ->
- json_apply_field(H, L, []).
-json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
- % drop matching keys
- json_apply_field({Key, NewValue}, Headers, Acc);
-json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
- % something else is next, leave it alone.
- json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
-json_apply_field({Key, NewValue}, [], Acc) ->
- % end of list, add ours
- {[{Key, NewValue}|Acc]}.
-
-apply_etag({ExternalResponse}, CurrentEtag) ->
- % Here we embark on the delicate task of replacing or creating the
- % headers on the JsonResponse object. We need to control the Etag and
- % Vary headers. If the external function controls the Etag, we'd have to
- % run it to check for a match, which sort of defeats the purpose.
- case couch_util:get_value(<<"headers">>, ExternalResponse, nil) of
- nil ->
- % no JSON headers
- % add our Etag and Vary headers to the response
- {[{<<"headers">>, {[{<<"Etag">>, CurrentEtag}, {<<"Vary">>, <<"Accept">>}]}} | ExternalResponse]};
- JsonHeaders ->
- {[case Field of
- {<<"headers">>, JsonHeaders} -> % add our headers
- JsonHeadersEtagged = json_apply_field({<<"Etag">>, CurrentEtag}, JsonHeaders),
- JsonHeadersVaried = json_apply_field({<<"Vary">>, <<"Accept">>}, JsonHeadersEtagged),
- {<<"headers">>, JsonHeadersVaried};
- _ -> % skip non-header fields
- Field
- end || Field <- ExternalResponse]}
- end.
-
diff --git a/apps/chttpd/src/chttpd_sup.erl b/apps/chttpd/src/chttpd_sup.erl
deleted file mode 100644
index bfe6be90..00000000
--- a/apps/chttpd/src/chttpd_sup.erl
+++ /dev/null
@@ -1,25 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_sup).
--behaviour(supervisor).
--export([init/1]).
-
--export([start_link/1]).
-
-start_link(Args) ->
- supervisor:start_link({local,?MODULE}, ?MODULE, Args).
-
-init([]) ->
- Mod = chttpd,
- Spec = {Mod, {Mod,start_link,[]}, permanent, 100, worker, [Mod]},
- {ok, {{one_for_one, 3, 10}, [Spec]}}.
diff --git a/apps/chttpd/src/chttpd_view.erl b/apps/chttpd/src/chttpd_view.erl
deleted file mode 100644
index 89f91cb2..00000000
--- a/apps/chttpd/src/chttpd_view.erl
+++ /dev/null
@@ -1,306 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(chttpd_view).
--include_lib("couch/include/couch_db.hrl").
-
--export([handle_view_req/3, handle_temp_view_req/2, get_reduce_type/1,
- parse_view_params/3, view_group_etag/2, view_group_etag/3,
- parse_bool_param/1, extract_view_type/3]).
-
--import(chttpd,
- [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,send_chunk/2,
- start_json_response/2, start_json_response/3, end_json_response/1,
- send_chunked_error/2]).
-
-design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
- Group = couch_view_group:design_doc_to_view_group(DDoc),
- IsReduce = get_reduce_type(Req),
- ViewType = extract_view_type(ViewName, Group#group.views, IsReduce),
- QueryArgs = parse_view_params(Req, Keys, ViewType),
- % TODO proper calculation of etag
- % Etag = view_group_etag(ViewGroup, Db, Keys),
- Etag = couch_uuids:new(),
- couch_stats_collector:increment({httpd, view_reads}),
- chttpd:etag_respond(Req, Etag, fun() ->
- {ok, Resp} = chttpd:start_json_response(Req, 200, [{"Etag",Etag}]),
- CB = fun view_callback/2,
- fabric:query_view(Db, DDoc, ViewName, CB, {nil, Resp}, QueryArgs)
- end).
-
-view_callback({total_and_offset, Total, Offset}, {nil, Resp}) ->
- Chunk = "{\"total_rows\":~p,\"offset\":~p,\"rows\":[\r\n",
- send_chunk(Resp, io_lib:format(Chunk, [Total, Offset])),
- {ok, {"", Resp}};
-view_callback({total_and_offset, _, _}, Acc) ->
- % a sorted=false view where the message came in late. Ignore.
- {ok, Acc};
-view_callback({row, Row}, {nil, Resp}) ->
- % first row of a reduce view, or a sorted=false view
- send_chunk(Resp, ["{\"rows\":[\r\n", ?JSON_ENCODE(Row)]),
- {ok, {",\r\n", Resp}};
-view_callback({row, Row}, {Prepend, Resp}) ->
- send_chunk(Resp, [Prepend, ?JSON_ENCODE(Row)]),
- {ok, {",\r\n", Resp}};
-view_callback(complete, {nil, Resp}) ->
- send_chunk(Resp, "{\"rows\":[]}"),
- end_json_response(Resp),
- {ok, Resp};
-view_callback(complete, {_, Resp}) ->
- send_chunk(Resp, "\r\n]}"),
- end_json_response(Resp),
- {ok, Resp};
-view_callback({error, Reason}, {_, Resp}) ->
- chttpd:send_chunked_error(Resp, {error, Reason}).
-
-extract_view_type(_ViewName, [], _IsReduce) ->
- throw({not_found, missing_named_view});
-extract_view_type(ViewName, [View|Rest], IsReduce) ->
- case lists:member(ViewName, [Name || {Name, _} <- View#view.reduce_funs]) of
- true ->
- if IsReduce -> reduce; true -> red_map end;
- false ->
- case lists:member(ViewName, View#view.map_names) of
- true -> map;
- false -> extract_view_type(ViewName, Rest, IsReduce)
- end
- end.
-
-handle_view_req(#httpd{method='GET',
- path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
- design_doc_view(Req, Db, DDoc, ViewName, nil);
-
-handle_view_req(#httpd{method='POST',
- path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
- {Fields} = chttpd:json_body_obj(Req),
- case couch_util:get_value(<<"keys">>, Fields) of
- Keys when is_list(Keys) ->
- design_doc_view(Req, Db, DDoc, ViewName, Keys);
- _ ->
- throw({bad_request, "`keys` body member must be an array."})
- end;
-
-handle_view_req(Req, _Db, _DDoc) ->
- send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_temp_view_req(Req, _Db) ->
- Msg = <<"Temporary views are not supported in BigCouch">>,
- chttpd:send_error(Req, 403, forbidden, Msg).
-
-reverse_key_default(?MIN_STR) -> ?MAX_STR;
-reverse_key_default(?MAX_STR) -> ?MIN_STR;
-reverse_key_default(Key) -> Key.
-
-get_reduce_type(Req) ->
- list_to_atom(chttpd:qs_value(Req, "reduce", "true")).
-
-parse_view_params(Req, Keys, ViewType) ->
- QueryList = chttpd:qs(Req),
- QueryParams =
- lists:foldl(fun({K, V}, Acc) ->
- parse_view_param(K, V) ++ Acc
- end, [], QueryList),
- IsMultiGet = case Keys of
- nil -> false;
- _ -> true
- end,
- Args = #view_query_args{
- view_type=ViewType,
- multi_get=IsMultiGet,
- keys=Keys
- },
- QueryArgs = lists:foldl(fun({K, V}, Args2) ->
- validate_view_query(K, V, Args2)
- end, Args, lists:reverse(QueryParams)), % Reverse to match QS order.
-
- GroupLevel = QueryArgs#view_query_args.group_level,
- case {ViewType, GroupLevel, IsMultiGet} of
- {reduce, exact, true} ->
- QueryArgs;
- {reduce, _, false} ->
- QueryArgs;
- {reduce, _, _} ->
- Msg = <<"Multi-key fetchs for reduce "
- "view must include `group=true`">>,
- throw({query_parse_error, Msg});
- _ ->
- QueryArgs
- end,
- QueryArgs.
-
-parse_view_param("", _) ->
- [];
-parse_view_param("key", Value) ->
- JsonKey = ?JSON_DECODE(Value),
- [{start_key, JsonKey}, {end_key, JsonKey}];
-parse_view_param("startkey_docid", Value) ->
- [{start_docid, ?l2b(Value)}];
-parse_view_param("endkey_docid", Value) ->
- [{end_docid, ?l2b(Value)}];
-parse_view_param("startkey", Value) ->
- [{start_key, ?JSON_DECODE(Value)}];
-parse_view_param("endkey", Value) ->
- [{end_key, ?JSON_DECODE(Value)}];
-parse_view_param("limit", Value) ->
- [{limit, parse_positive_int_param(Value)}];
-parse_view_param("count", _Value) ->
- throw({query_parse_error, <<"Query parameter 'count' is now 'limit'.">>});
-parse_view_param("stale", "ok") ->
- [{stale, ok}];
-parse_view_param("stale", _Value) ->
- throw({query_parse_error, <<"stale only available as stale=ok">>});
-parse_view_param("update", _Value) ->
- throw({query_parse_error, <<"update=false is now stale=ok">>});
-parse_view_param("descending", Value) ->
- [{descending, parse_bool_param(Value)}];
-parse_view_param("skip", Value) ->
- [{skip, parse_int_param(Value)}];
-parse_view_param("group", Value) ->
- case parse_bool_param(Value) of
- true -> [{group_level, exact}];
- false -> [{group_level, 0}]
- end;
-parse_view_param("group_level", Value) ->
- [{group_level, parse_positive_int_param(Value)}];
-parse_view_param("inclusive_end", Value) ->
- [{inclusive_end, parse_bool_param(Value)}];
-parse_view_param("reduce", Value) ->
- [{reduce, parse_bool_param(Value)}];
-parse_view_param("include_docs", Value) ->
- [{include_docs, parse_bool_param(Value)}];
-parse_view_param("list", Value) ->
- [{list, ?l2b(Value)}];
-parse_view_param("callback", _) ->
- []; % Verified in the JSON response functions
-parse_view_param("show_total_rows", Value) ->
- [{show_total_rows, parse_bool_param(Value)}];
-parse_view_param("sorted", Value) ->
- [{sorted, parse_bool_param(Value)}];
-parse_view_param(Key, Value) ->
- [{extra, {Key, Value}}].
-
-validate_view_query(start_key, Value, Args) ->
- case Args#view_query_args.multi_get of
- true ->
- Msg = <<"Query parameter `start_key` is "
- "not compatiible with multi-get">>,
- throw({query_parse_error, Msg});
- _ ->
- Args#view_query_args{start_key=Value}
- end;
-validate_view_query(start_docid, Value, Args) ->
- Args#view_query_args{start_docid=Value};
-validate_view_query(end_key, Value, Args) ->
- case Args#view_query_args.multi_get of
- true->
- Msg = <<"Query paramter `end_key` is "
- "not compatibile with multi-get">>,
- throw({query_parse_error, Msg});
- _ ->
- Args#view_query_args{end_key=Value}
- end;
-validate_view_query(end_docid, Value, Args) ->
- Args#view_query_args{end_docid=Value};
-validate_view_query(limit, Value, Args) ->
- Args#view_query_args{limit=Value};
-validate_view_query(list, Value, Args) ->
- Args#view_query_args{list=Value};
-validate_view_query(stale, Value, Args) ->
- Args#view_query_args{stale=Value};
-validate_view_query(descending, true, Args) ->
- case Args#view_query_args.direction of
- rev -> Args; % Already reversed
- fwd ->
- Args#view_query_args{
- direction = rev,
- start_docid =
- reverse_key_default(Args#view_query_args.start_docid),
- end_docid =
- reverse_key_default(Args#view_query_args.end_docid)
- }
- end;
-validate_view_query(descending, false, Args) ->
- Args; % Ignore default condition
-validate_view_query(skip, Value, Args) ->
- Args#view_query_args{skip=Value};
-validate_view_query(group_level, Value, Args) ->
- case Args#view_query_args.view_type of
- reduce ->
- Args#view_query_args{group_level=Value};
- _ ->
- Msg = <<"Invalid URL parameter 'group' or "
- " 'group_level' for non-reduce view.">>,
- throw({query_parse_error, Msg})
- end;
-validate_view_query(inclusive_end, Value, Args) ->
- Args#view_query_args{inclusive_end=Value};
-validate_view_query(reduce, _, Args) ->
- case Args#view_query_args.view_type of
- map ->
- Msg = <<"Invalid URL parameter `reduce` for map view.">>,
- throw({query_parse_error, Msg});
- _ ->
- Args
- end;
-validate_view_query(include_docs, true, Args) ->
- case Args#view_query_args.view_type of
- reduce ->
- Msg = <<"Query paramter `include_docs` "
- "is invalid for reduce views.">>,
- throw({query_parse_error, Msg});
- _ ->
- Args#view_query_args{include_docs=true}
- end;
-validate_view_query(include_docs, _Value, Args) ->
- Args;
-validate_view_query(sorted, false, Args) ->
- Args#view_query_args{sorted=false};
-validate_view_query(sorted, _Value, Args) ->
- Args;
-validate_view_query(extra, _Value, Args) ->
- Args.
-
-view_group_etag(Group, Db) ->
- view_group_etag(Group, Db, nil).
-
-view_group_etag(#group{sig=Sig,current_seq=CurrentSeq}, _Db, Extra) ->
- % ?LOG_ERROR("Group ~p",[Group]),
- % This is not as granular as it could be.
- % If there are updates to the db that do not effect the view index,
- % they will change the Etag. For more granular Etags we'd need to keep
- % track of the last Db seq that caused an index change.
- chttpd:make_etag({Sig, CurrentSeq, Extra}).
-
-parse_bool_param("true") -> true;
-parse_bool_param("false") -> false;
-parse_bool_param(Val) ->
- Msg = io_lib:format("Invalid value for boolean paramter: ~p", [Val]),
- throw({query_parse_error, ?l2b(Msg)}).
-
-parse_int_param(Val) ->
- case (catch list_to_integer(Val)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- Msg = io_lib:format("Invalid value for integer parameter: ~p", [Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
-
-parse_positive_int_param(Val) ->
- case parse_int_param(Val) of
- IntVal when IntVal >= 0 ->
- IntVal;
- _ ->
- Fmt = "Invalid value for positive integer parameter: ~p",
- Msg = io_lib:format(Fmt, [Val]),
- throw({query_parse_error, ?l2b(Msg)})
- end.
diff --git a/apps/fabric/README.md b/apps/fabric/README.md
deleted file mode 100644
index 2d7f1ae2..00000000
--- a/apps/fabric/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-## fabric
-
-Fabric is a collection of proxy functions for [CouchDB][1] operations in a cluster. These functions are used in [BigCouch][2] as the remote procedure endpoints on each of the cluster nodes.
-
-For example, creating a database is a straightforward task in standalone CouchDB, but for BigCouch, each node that will store a shard/partition for the database needs to receive and execute a fabric function. The node handling the request also needs to compile the results from each of the nodes and respond accordingly to the client.
-
-Fabric is used in conjunction with 'Rexi' which is also an application within BigCouch.
-
-### Getting Started
-Dependencies:
- * Erlang R13B-03 (or higher)
-
-Build with rebar:
- make
-
-### License
-[Apache 2.0][3]
-
-### Contact
- * [http://cloudant.com][4]
- * [info@cloudant.com][5]
-
-[1]: http://couchdb.apache.org
-[2]: http://github.com/cloudant/bigcouch
-[3]: http://www.apache.org/licenses/LICENSE-2.0.html
-[4]: http://cloudant.com
-[5]: mailto:info@cloudant.com
diff --git a/apps/fabric/ebin/fabric.appup b/apps/fabric/ebin/fabric.appup
deleted file mode 100644
index ef5dc496..00000000
--- a/apps/fabric/ebin/fabric.appup
+++ /dev/null
@@ -1,3 +0,0 @@
-{"1.0.3",[{"1.0.2",[
- {load_module, fabric_view_changes}
-]}],[{"1.0.2",[]}]}.
diff --git a/apps/fabric/include/fabric.hrl b/apps/fabric/include/fabric.hrl
deleted file mode 100644
index 5e10f5cd..00000000
--- a/apps/fabric/include/fabric.hrl
+++ /dev/null
@@ -1,36 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--include_lib("eunit/include/eunit.hrl").
-
--record(collector, {
- query_args,
- callback,
- counters,
- buffer_size,
- blocked = [],
- total_rows = 0,
- offset = 0,
- rows = [],
- skip,
- limit,
- keys,
- os_proc,
- reducer,
- lang,
- sorted,
- user_acc
-}).
-
--record(view_row, {key, id, value, doc, worker}).
diff --git a/apps/fabric/src/fabric.app.src b/apps/fabric/src/fabric.app.src
deleted file mode 100644
index 0b626ba7..00000000
--- a/apps/fabric/src/fabric.app.src
+++ /dev/null
@@ -1,6 +0,0 @@
-{application, fabric, [
- {description, "Routing and proxying layer for CouchDB cluster"},
- {vsn, "1.0.3"},
- {registered, []},
- {applications, [kernel, stdlib, couch, rexi, mem3]}
-]}.
diff --git a/apps/fabric/src/fabric.erl b/apps/fabric/src/fabric.erl
deleted file mode 100644
index 9f9db032..00000000
--- a/apps/fabric/src/fabric.erl
+++ /dev/null
@@ -1,264 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric).
-
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-% DBs
--export([all_dbs/0, all_dbs/1, create_db/1, create_db/2, delete_db/1,
- delete_db/2, get_db_info/1, get_doc_count/1, set_revs_limit/3,
- set_security/3, get_revs_limit/1, get_security/1]).
-
-% Documents
--export([open_doc/3, open_revs/4, get_missing_revs/2, update_doc/3,
- update_docs/3, att_receiver/2]).
-
-% Views
--export([all_docs/4, changes/4, query_view/3, query_view/4, query_view/6,
- get_view_group_info/2]).
-
-% miscellany
--export([design_docs/1, reset_validation_funs/1, cleanup_index_files/0,
- cleanup_index_files/1]).
-
--include("fabric.hrl").
-
-% db operations
-
-all_dbs() ->
- all_dbs(<<>>).
-
-all_dbs(Prefix) when is_list(Prefix) ->
- all_dbs(list_to_binary(Prefix));
-all_dbs(Prefix) when is_binary(Prefix) ->
- Length = byte_size(Prefix),
- MatchingDbs = ets:foldl(fun(#shard{dbname=DbName}, Acc) ->
- case DbName of
- <<Prefix:Length/binary, _/binary>> ->
- [DbName | Acc];
- _ ->
- Acc
- end
- end, [], partitions),
- {ok, lists:usort(MatchingDbs)}.
-
-get_db_info(DbName) ->
- fabric_db_info:go(dbname(DbName)).
-
-get_doc_count(DbName) ->
- fabric_db_doc_count:go(dbname(DbName)).
-
-create_db(DbName) ->
- create_db(DbName, []).
-
-create_db(DbName, Options) ->
- fabric_db_create:go(dbname(DbName), opts(Options)).
-
-delete_db(DbName) ->
- delete_db(DbName, []).
-
-delete_db(DbName, Options) ->
- fabric_db_delete:go(dbname(DbName), opts(Options)).
-
-set_revs_limit(DbName, Limit, Options) when is_integer(Limit), Limit > 0 ->
- fabric_db_meta:set_revs_limit(dbname(DbName), Limit, opts(Options)).
-
-get_revs_limit(DbName) ->
- {ok, Db} = fabric_util:get_db(dbname(DbName)),
- try couch_db:get_revs_limit(Db) after catch couch_db:close(Db) end.
-
-set_security(DbName, SecObj, Options) ->
- fabric_db_meta:set_security(dbname(DbName), SecObj, opts(Options)).
-
-get_security(DbName) ->
- {ok, Db} = fabric_util:get_db(dbname(DbName)),
- try couch_db:get_security(Db) after catch couch_db:close(Db) end.
-
-% doc operations
-open_doc(DbName, Id, Options) ->
- fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options)).
-
-open_revs(DbName, Id, Revs, Options) ->
- fabric_doc_open_revs:go(dbname(DbName), docid(Id), Revs, opts(Options)).
-
-get_missing_revs(DbName, IdsRevs) when is_list(IdsRevs) ->
- Sanitized = [idrevs(IdR) || IdR <- IdsRevs],
- fabric_doc_missing_revs:go(dbname(DbName), Sanitized).
-
-update_doc(DbName, Doc, Options) ->
- case update_docs(DbName, [Doc], opts(Options)) of
- {ok, [{ok, NewRev}]} ->
- {ok, NewRev};
- {ok, [Error]} ->
- throw(Error);
- {ok, []} ->
- % replication success
- #doc{revs = {Pos, [RevId | _]}} = doc(Doc),
- {ok, {Pos, RevId}}
- end.
-
-update_docs(DbName, Docs, Options) ->
- try fabric_doc_update:go(dbname(DbName), docs(Docs), opts(Options))
- catch {aborted, PreCommitFailures} ->
- {aborted, PreCommitFailures}
- end.
-
-att_receiver(Req, Length) ->
- fabric_doc_attachments:receiver(Req, Length).
-
-all_docs(DbName, Callback, Acc0, #view_query_args{} = QueryArgs) when
- is_function(Callback, 2) ->
- fabric_view_all_docs:go(dbname(DbName), QueryArgs, Callback, Acc0).
-
-changes(DbName, Callback, Acc0, Options) ->
- % TODO use a keylist for Options instead of #changes_args, BugzID 10281
- Feed = Options#changes_args.feed,
- fabric_view_changes:go(dbname(DbName), Feed, Options, Callback, Acc0).
-
-query_view(DbName, DesignName, ViewName) ->
- query_view(DbName, DesignName, ViewName, #view_query_args{}).
-
-query_view(DbName, DesignName, ViewName, QueryArgs) ->
- Callback = fun default_callback/2,
- query_view(DbName, DesignName, ViewName, Callback, [], QueryArgs).
-
-query_view(DbName, Design, ViewName, Callback, Acc0, QueryArgs) ->
- Db = dbname(DbName), View = name(ViewName),
- case is_reduce_view(Db, Design, View, QueryArgs) of
- true ->
- Mod = fabric_view_reduce;
- false ->
- Mod = fabric_view_map
- end,
- Mod:go(Db, Design, View, QueryArgs, Callback, Acc0).
-
-get_view_group_info(DbName, DesignId) ->
- fabric_group_info:go(dbname(DbName), design_doc(DesignId)).
-
-design_docs(DbName) ->
- QueryArgs = #view_query_args{start_key = <<"_design/">>, include_docs=true},
- Callback = fun({total_and_offset, _, _}, []) ->
- {ok, []};
- ({row, {Props}}, Acc) ->
- case couch_util:get_value(id, Props) of
- <<"_design/", _/binary>> ->
- {ok, [couch_util:get_value(doc, Props) | Acc]};
- _ ->
- {stop, Acc}
- end;
- (complete, Acc) ->
- {ok, lists:reverse(Acc)}
- end,
- fabric:all_docs(dbname(DbName), Callback, [], QueryArgs).
-
-reset_validation_funs(DbName) ->
- [rexi:cast(Node, {fabric_rpc, reset_validation_funs, [Name]}) ||
- #shard{node=Node, name=Name} <- mem3:shards(DbName)].
-
-cleanup_index_files() ->
- {ok, DbNames} = fabric:all_dbs(),
- [cleanup_index_files(Name) || Name <- DbNames].
-
-cleanup_index_files(DbName) ->
- {ok, DesignDocs} = fabric:design_docs(DbName),
-
- ActiveSigs = lists:map(fun(#doc{id = GroupId}) ->
- {ok, Info} = fabric:get_view_group_info(DbName, GroupId),
- binary_to_list(couch_util:get_value(signature, Info))
- end, [couch_doc:from_json_obj(DD) || DD <- DesignDocs]),
-
- FileList = filelib:wildcard([couch_config:get("couchdb", "view_index_dir"),
- "/.shards/*/", couch_util:to_list(DbName), "_design/*"]),
-
- DeleteFiles = if ActiveSigs =:= [] -> FileList; true ->
- {ok, RegExp} = re:compile([$(, string:join(ActiveSigs, "|"), $)]),
- lists:filter(fun(FilePath) ->
- re:run(FilePath, RegExp, [{capture, none}]) == nomatch
- end, FileList)
- end,
- [file:delete(File) || File <- DeleteFiles],
- ok.
-
-%% some simple type validation and transcoding
-
-dbname(DbName) when is_list(DbName) ->
- list_to_binary(DbName);
-dbname(DbName) when is_binary(DbName) ->
- DbName;
-dbname(#db{name=Name}) ->
- Name;
-dbname(DbName) ->
- erlang:error({illegal_database_name, DbName}).
-
-name(Thing) ->
- couch_util:to_binary(Thing).
-
-docid(DocId) when is_list(DocId) ->
- list_to_binary(DocId);
-docid(DocId) when is_binary(DocId) ->
- DocId;
-docid(DocId) ->
- erlang:error({illegal_docid, DocId}).
-
-docs(Docs) when is_list(Docs) ->
- [doc(D) || D <- Docs];
-docs(Docs) ->
- erlang:error({illegal_docs_list, Docs}).
-
-doc(#doc{} = Doc) ->
- Doc;
-doc({_} = Doc) ->
- couch_doc:from_json_obj(Doc);
-doc(Doc) ->
- erlang:error({illegal_doc_format, Doc}).
-
-design_doc(#doc{} = DDoc) ->
- DDoc;
-design_doc(DocId) when is_list(DocId) ->
- design_doc(list_to_binary(DocId));
-design_doc(<<"_design/", _/binary>> = DocId) ->
- DocId;
-design_doc(GroupName) ->
- <<"_design/", GroupName/binary>>.
-
-idrevs({Id, Revs}) when is_list(Revs) ->
- {docid(Id), [rev(R) || R <- Revs]}.
-
-rev(Rev) when is_list(Rev); is_binary(Rev) ->
- couch_doc:parse_rev(Rev);
-rev({Seq, Hash} = Rev) when is_integer(Seq), is_binary(Hash) ->
- Rev.
-
-opts(Options) ->
- case couch_util:get_value(user_ctx, Options) of
- undefined ->
- case erlang:get(user_ctx) of
- #user_ctx{} = Ctx ->
- [{user_ctx, Ctx} | Options];
- _ ->
- Options
- end;
- _ ->
- Options
- end.
-
-default_callback(complete, Acc) ->
- {ok, lists:reverse(Acc)};
-default_callback(Row, Acc) ->
- {ok, [Row | Acc]}.
-
-is_reduce_view(_, _, _, #view_query_args{view_type=Reduce}) ->
- Reduce =:= reduce.
diff --git a/apps/fabric/src/fabric_db_create.erl b/apps/fabric/src/fabric_db_create.erl
deleted file mode 100644
index ccea943d..00000000
--- a/apps/fabric/src/fabric_db_create.erl
+++ /dev/null
@@ -1,79 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_create).
--export([go/2]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(DBNAME_REGEX, "^[a-z][a-z0-9\\_\\$()\\+\\-\\/\\s.]*$").
-
-%% @doc Create a new database, and all its partition files across the cluster
-%% Options is proplist with user_ctx, n, q
-go(DbName, Options) ->
- case re:run(DbName, ?DBNAME_REGEX, [{capture,none}]) of
- match ->
- Shards = mem3:choose_shards(DbName, Options),
- Doc = make_document(Shards),
- Workers = fabric_util:submit_jobs(Shards, create_db, [Options, Doc]),
- Acc0 = fabric_dict:init(Workers, nil),
- case fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {ok, _} ->
- ok;
- Else ->
- Else
- end;
- nomatch ->
- {error, illegal_database_name}
- end.
-
-handle_message(Msg, Shard, Counters) ->
- C1 = fabric_dict:store(Shard, Msg, Counters),
- case fabric_dict:any(nil, C1) of
- true ->
- {ok, C1};
- false ->
- final_answer(C1)
- end.
-
-make_document([#shard{dbname=DbName}|_] = Shards) ->
- {RawOut, ByNodeOut, ByRangeOut} =
- lists:foldl(fun(#shard{node=N, range=[B,E]}, {Raw, ByNode, ByRange}) ->
- Range = ?l2b([couch_util:to_hex(<<B:32/integer>>), "-",
- couch_util:to_hex(<<E:32/integer>>)]),
- Node = couch_util:to_binary(N),
- {[[<<"add">>, Range, Node] | Raw], orddict:append(Node, Range, ByNode),
- orddict:append(Range, Node, ByRange)}
- end, {[], [], []}, Shards),
- #doc{id=DbName, body = {[
- {<<"changelog">>, lists:sort(RawOut)},
- {<<"by_node">>, {[{K,lists:sort(V)} || {K,V} <- ByNodeOut]}},
- {<<"by_range">>, {[{K,lists:sort(V)} || {K,V} <- ByRangeOut]}}
- ]}}.
-
-final_answer(Counters) ->
- Successes = [X || {_, M} = X <- Counters, M == ok orelse M == file_exists],
- case fabric_view:is_progress_possible(Successes) of
- true ->
- case lists:keymember(file_exists, 2, Successes) of
- true ->
- {error, file_exists};
- false ->
- {stop, ok}
- end;
- false ->
- {error, internal_server_error}
- end.
diff --git a/apps/fabric/src/fabric_db_delete.erl b/apps/fabric/src/fabric_db_delete.erl
deleted file mode 100644
index c3000e57..00000000
--- a/apps/fabric/src/fabric_db_delete.erl
+++ /dev/null
@@ -1,55 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_delete).
--export([go/2]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-go(DbName, Options) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, delete_db, [Options, DbName]),
- Acc0 = fabric_dict:init(Workers, nil),
- case fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {ok, ok} ->
- ok;
- {ok, not_found} ->
- erlang:error(database_does_not_exist);
- Error ->
- Error
- end.
-
-handle_message(Msg, Shard, Counters) ->
- C1 = fabric_dict:store(Shard, Msg, Counters),
- case fabric_dict:any(nil, C1) of
- true ->
- {ok, C1};
- false ->
- final_answer(C1)
- end.
-
-final_answer(Counters) ->
- Successes = [X || {_, M} = X <- Counters, M == ok orelse M == not_found],
- case fabric_view:is_progress_possible(Successes) of
- true ->
- case lists:keymember(ok, 2, Successes) of
- true ->
- {stop, ok};
- false ->
- {stop, not_found}
- end;
- false ->
- {error, internal_server_error}
- end.
diff --git a/apps/fabric/src/fabric_db_doc_count.erl b/apps/fabric/src/fabric_db_doc_count.erl
deleted file mode 100644
index faa755e6..00000000
--- a/apps/fabric/src/fabric_db_doc_count.erl
+++ /dev/null
@@ -1,46 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_doc_count).
-
--export([go/1]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, get_doc_count, []),
- Acc0 = {fabric_dict:init(Workers, nil), 0},
- fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0).
-
-handle_message({ok, Count}, Shard, {Counters, Acc}) ->
- case fabric_dict:lookup_element(Shard, Counters) of
- undefined ->
- % already heard from someone else in this range
- {ok, {Counters, Acc}};
- nil ->
- C1 = fabric_dict:store(Shard, ok, Counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, {C2, Count+Acc}};
- false ->
- {stop, Count+Acc}
- end
- end;
-handle_message(_, _, Acc) ->
- {ok, Acc}.
-
diff --git a/apps/fabric/src/fabric_db_info.erl b/apps/fabric/src/fabric_db_info.erl
deleted file mode 100644
index a0acb379..00000000
--- a/apps/fabric/src/fabric_db_info.erl
+++ /dev/null
@@ -1,69 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_info).
-
--export([go/1]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-go(DbName) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, get_db_info, []),
- Acc0 = {fabric_dict:init(Workers, nil), []},
- fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0).
-
-handle_message({ok, Info}, #shard{dbname=Name} = Shard, {Counters, Acc}) ->
- case fabric_dict:lookup_element(Shard, Counters) of
- undefined ->
- % already heard from someone else in this range
- {ok, {Counters, Acc}};
- nil ->
- Seq = couch_util:get_value(update_seq, Info),
- C1 = fabric_dict:store(Shard, Seq, Counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, {C2, [Info|Acc]}};
- false ->
- {stop, [
- {db_name,Name},
- {update_seq, fabric_view_changes:pack_seqs(C2)} |
- merge_results(lists:flatten([Info|Acc]))
- ]}
- end
- end;
-handle_message(_, _, Acc) ->
- {ok, Acc}.
-
-merge_results(Info) ->
- Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
- orddict:new(), Info),
- orddict:fold(fun
- (doc_count, X, Acc) ->
- [{doc_count, lists:sum(X)} | Acc];
- (doc_del_count, X, Acc) ->
- [{doc_del_count, lists:sum(X)} | Acc];
- (purge_seq, X, Acc) ->
- [{purge_seq, lists:sum(X)} | Acc];
- (compact_running, X, Acc) ->
- [{compact_running, lists:member(true, X)} | Acc];
- (disk_size, X, Acc) ->
- [{disk_size, lists:sum(X)} | Acc];
- (disk_format_version, X, Acc) ->
- [{disk_format_version, lists:max(X)} | Acc];
- (_, _, Acc) ->
- Acc
- end, [{instance_start_time, <<"0">>}], Dict).
diff --git a/apps/fabric/src/fabric_db_meta.erl b/apps/fabric/src/fabric_db_meta.erl
deleted file mode 100644
index cb46f380..00000000
--- a/apps/fabric/src/fabric_db_meta.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_db_meta).
-
--export([set_revs_limit/3, set_security/3]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-set_revs_limit(DbName, Limit, Options) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, set_revs_limit, [Limit, Options]),
- Waiting = length(Workers) - 1,
- case fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Waiting) of
- {ok, ok} ->
- ok;
- Error ->
- Error
- end.
-
-set_security(DbName, SecObj, Options) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, set_security, [SecObj, Options]),
- Waiting = length(Workers) - 1,
- case fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Waiting) of
- {ok, ok} ->
- ok;
- Error ->
- Error
- end.
-
-handle_message(ok, _, 0) ->
- {stop, ok};
-handle_message(ok, _, Waiting) ->
- {ok, Waiting - 1};
-handle_message(Error, _, _Waiting) ->
- {error, Error}. \ No newline at end of file
diff --git a/apps/fabric/src/fabric_dict.erl b/apps/fabric/src/fabric_dict.erl
deleted file mode 100644
index 7db98923..00000000
--- a/apps/fabric/src/fabric_dict.erl
+++ /dev/null
@@ -1,51 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_dict).
--compile(export_all).
-
-% Instead of ets, let's use an ordered keylist. We'll need to revisit if we
-% have >> 100 shards, so a private interface is a good idea. - APK June 2010
-
-init(Keys, InitialValue) ->
- orddict:from_list([{Key, InitialValue} || Key <- Keys]).
-
-
-decrement_all(Dict) ->
- [{K,V-1} || {K,V} <- Dict].
-
-store(Key, Value, Dict) ->
- orddict:store(Key, Value, Dict).
-
-erase(Key, Dict) ->
- orddict:erase(Key, Dict).
-
-update_counter(Key, Incr, Dict0) ->
- orddict:update_counter(Key, Incr, Dict0).
-
-
-lookup_element(Key, Dict) ->
- couch_util:get_value(Key, Dict).
-
-size(Dict) ->
- orddict:size(Dict).
-
-any(Value, Dict) ->
- lists:keymember(Value, 2, Dict).
-
-filter(Fun, Dict) ->
- orddict:filter(Fun, Dict).
-
-fold(Fun, Acc0, Dict) ->
- orddict:fold(Fun, Acc0, Dict).
diff --git a/apps/fabric/src/fabric_doc_attachments.erl b/apps/fabric/src/fabric_doc_attachments.erl
deleted file mode 100644
index b66e2ae4..00000000
--- a/apps/fabric/src/fabric_doc_attachments.erl
+++ /dev/null
@@ -1,116 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_attachments).
-
--include("fabric.hrl").
-
-%% couch api calls
--export([receiver/2]).
-
-receiver(_Req, undefined) ->
- <<"">>;
-receiver(_Req, {unknown_transfer_encoding, Unknown}) ->
- exit({unknown_transfer_encoding, Unknown});
-receiver(Req, chunked) ->
- MiddleMan = spawn(fun() -> middleman(Req, chunked) end),
- fun(4096, ChunkFun, ok) ->
- write_chunks(MiddleMan, ChunkFun)
- end;
-receiver(_Req, 0) ->
- <<"">>;
-receiver(Req, Length) when is_integer(Length) ->
- Middleman = spawn(fun() -> middleman(Req, Length) end),
- fun() ->
- Middleman ! {self(), gimme_data},
- receive {Middleman, Data} -> Data end
- end;
-receiver(_Req, Length) ->
- exit({length_not_integer, Length}).
-
-%%
-%% internal
-%%
-
-write_chunks(MiddleMan, ChunkFun) ->
- MiddleMan ! {self(), gimme_data},
- receive
- {MiddleMan, {0, _Footers}} ->
- % MiddleMan ! {self(), done},
- ok;
- {MiddleMan, ChunkRecord} ->
- ChunkFun(ChunkRecord, ok),
- write_chunks(MiddleMan, ChunkFun)
- end.
-
-receive_unchunked_attachment(_Req, 0) ->
- ok;
-receive_unchunked_attachment(Req, Length) ->
- receive {MiddleMan, go} ->
- Data = couch_httpd:recv(Req, 0),
- MiddleMan ! {self(), Data}
- end,
- receive_unchunked_attachment(Req, Length - size(Data)).
-
-middleman(Req, chunked) ->
- % spawn a process to actually receive the uploaded data
- RcvFun = fun(ChunkRecord, ok) ->
- receive {From, go} -> From ! {self(), ChunkRecord} end, ok
- end,
- Receiver = spawn(fun() -> couch_httpd:recv_chunked(Req,4096,RcvFun,ok) end),
-
- % take requests from the DB writers and get data from the receiver
- N = erlang:list_to_integer(couch_config:get("cluster","n")),
- middleman_loop(Receiver, N, dict:new(), 0, []);
-
-middleman(Req, Length) ->
- Receiver = spawn(fun() -> receive_unchunked_attachment(Req, Length) end),
- N = erlang:list_to_integer(couch_config:get("cluster","n")),
- middleman_loop(Receiver, N, dict:new(), 0, []).
-
-middleman_loop(Receiver, N, Counters, Offset, ChunkList) ->
- receive {From, gimme_data} ->
- % figure out how far along this writer (From) is in the list
- {NewCounters, WhichChunk} = case dict:find(From, Counters) of
- {ok, I} ->
- {dict:update_counter(From, 1, Counters), I};
- error ->
- {dict:store(From, 2, Counters), 1}
- end,
- ListIndex = WhichChunk - Offset,
-
- % talk to the receiver to get another chunk if necessary
- ChunkList1 = if ListIndex > length(ChunkList) ->
- Receiver ! {self(), go},
- receive {Receiver, ChunkRecord} -> ChunkList ++ [ChunkRecord] end;
- true -> ChunkList end,
-
- % reply to the writer
- From ! {self(), lists:nth(ListIndex, ChunkList1)},
-
- % check if we can drop a chunk from the head of the list
- SmallestIndex = dict:fold(fun(_, Val, Acc) -> lists:min([Val,Acc]) end,
- WhichChunk+1, NewCounters),
- Size = dict:size(NewCounters),
-
- {NewChunkList, NewOffset} =
- if Size == N andalso (SmallestIndex - Offset) == 2 ->
- {tl(ChunkList1), Offset+1};
- true ->
- {ChunkList1, Offset}
- end,
- middleman_loop(Receiver, N, NewCounters, NewOffset, NewChunkList)
- after 10000 ->
- ok
- end.
diff --git a/apps/fabric/src/fabric_doc_missing_revs.erl b/apps/fabric/src/fabric_doc_missing_revs.erl
deleted file mode 100644
index a4d54192..00000000
--- a/apps/fabric/src/fabric_doc_missing_revs.erl
+++ /dev/null
@@ -1,78 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_missing_revs).
-
--export([go/2]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
-
-go(DbName, AllIdsRevs) ->
- Workers = lists:map(fun({#shard{name=Name, node=Node} = Shard, IdsRevs}) ->
- Ref = rexi:cast(Node, {fabric_rpc, get_missing_revs, [Name, IdsRevs]}),
- Shard#shard{ref=Ref}
- end, group_idrevs_by_shard(DbName, AllIdsRevs)),
- ResultDict = dict:from_list([{Id, {nil,Revs}} || {Id, Revs} <- AllIdsRevs]),
- Acc0 = {length(Workers), ResultDict},
- fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0).
-
-handle_message({rexi_DOWN, _, _, _}, _Worker, Acc0) ->
- skip_message(Acc0);
-handle_message({rexi_EXIT, _, _, _}, _Worker, Acc0) ->
- skip_message(Acc0);
-handle_message({ok, Results}, _Worker, {1, D0}) ->
- D = update_dict(D0, Results),
- {stop, dict:fold(fun force_reply/3, [], D)};
-handle_message({ok, Results}, _Worker, {WaitingCount, D0}) ->
- D = update_dict(D0, Results),
- case dict:fold(fun maybe_reply/3, {stop, []}, D) of
- continue ->
- % still haven't heard about some Ids
- {ok, {WaitingCount - 1, D}};
- {stop, FinalReply} ->
- {stop, FinalReply}
- end.
-
-force_reply(Id, {nil,Revs}, Acc) ->
- % never heard about this ID, assume it's missing
- [{Id, Revs} | Acc];
-force_reply(_, [], Acc) ->
- Acc;
-force_reply(Id, Revs, Acc) ->
- [{Id, Revs} | Acc].
-
-maybe_reply(_, _, continue) ->
- continue;
-maybe_reply(_, {nil, _}, _) ->
- continue;
-maybe_reply(_, [], {stop, Acc}) ->
- {stop, Acc};
-maybe_reply(Id, Revs, {stop, Acc}) ->
- {stop, [{Id, Revs} | Acc]}.
-
-group_idrevs_by_shard(DbName, IdsRevs) ->
- dict:to_list(lists:foldl(fun({Id, Revs}, D0) ->
- lists:foldl(fun(Shard, D1) ->
- dict:append(Shard, {Id, Revs}, D1)
- end, D0, mem3:shards(DbName,Id))
- end, dict:new(), IdsRevs)).
-
-update_dict(D0, KVs) ->
- lists:foldl(fun({K,V,_}, D1) -> dict:store(K, V, D1) end, D0, KVs).
-
-skip_message({1, Dict}) ->
- {stop, dict:fold(fun force_reply/3, [], Dict)};
-skip_message({WaitingCount, Dict}) ->
- {ok, {WaitingCount-1, Dict}}.
diff --git a/apps/fabric/src/fabric_doc_open.erl b/apps/fabric/src/fabric_doc_open.erl
deleted file mode 100644
index dd4917b9..00000000
--- a/apps/fabric/src/fabric_doc_open.erl
+++ /dev/null
@@ -1,120 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_open).
-
--export([go/3]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName, Id, Options) ->
- Workers = fabric_util:submit_jobs(mem3:shards(DbName,Id), open_doc,
- [Id, [deleted|Options]]),
- SuppressDeletedDoc = not lists:member(deleted, Options),
- R = couch_util:get_value(r, Options, couch_config:get("cluster","r","2")),
- RepairOpts = [{r, integer_to_list(mem3:n(DbName))} | Options],
- Acc0 = {length(Workers), list_to_integer(R), []},
- case fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {ok, Reply} ->
- format_reply(Reply, SuppressDeletedDoc);
- {error, needs_repair, Reply} ->
- spawn(fabric, open_revs, [DbName, Id, all, RepairOpts]),
- format_reply(Reply, SuppressDeletedDoc);
- {error, needs_repair} ->
- % we couldn't determine the correct reply, so we'll run a sync repair
- {ok, Results} = fabric:open_revs(DbName, Id, all, RepairOpts),
- case lists:partition(fun({ok, #doc{deleted=Del}}) -> Del end, Results) of
- {[], []} ->
- {not_found, missing};
- {_DeletedDocs, []} when SuppressDeletedDoc ->
- {not_found, deleted};
- {DeletedDocs, []} ->
- lists:last(lists:sort(DeletedDocs));
- {_, LiveDocs} ->
- lists:last(lists:sort(LiveDocs))
- end;
- Error ->
- Error
- end.
-
-format_reply({ok, #doc{deleted=true}}, true) ->
- {not_found, deleted};
-format_reply(Else, _) ->
- Else.
-
-handle_message({rexi_DOWN, _, _, _}, _Worker, Acc0) ->
- skip_message(Acc0);
-handle_message({rexi_EXIT, _Reason}, _Worker, Acc0) ->
- skip_message(Acc0);
-handle_message(Reply, _Worker, {WaitingCount, R, Replies}) ->
- NewReplies = orddict:update_counter(Reply, 1, Replies),
- Reduced = fabric_util:remove_ancestors(NewReplies, []),
- case lists:dropwhile(fun({_, Count}) -> Count < R end, Reduced) of
- [{QuorumReply, _} | _] ->
- if length(NewReplies) =:= 1 ->
- {stop, QuorumReply};
- true ->
- % we had some disagreement amongst the workers, so repair is useful
- {error, needs_repair, QuorumReply}
- end;
- [] ->
- if WaitingCount =:= 1 ->
- {error, needs_repair};
- true ->
- {ok, {WaitingCount-1, R, NewReplies}}
- end
- end.
-
-skip_message({1, _R, _Replies}) ->
- {error, needs_repair};
-skip_message({WaitingCount, R, Replies}) ->
- {ok, {WaitingCount-1, R, Replies}}.
-
-
-open_doc_test() ->
- Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
- Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
- Bar1 = {ok, #doc{revs = {1,[<<"bar">>]}}},
- Baz1 = {ok, #doc{revs = {1,[<<"baz">>]}}},
- NF = {not_found, missing},
- State0 = {3, 2, []},
- State1 = {2, 2, [{Foo1,1}]},
- State2 = {1, 2, [{Bar1,1}, {Foo1,1}]},
- ?assertEqual({ok, State1}, handle_message(Foo1, nil, State0)),
-
- % normal case - quorum reached, no disagreement
- ?assertEqual({stop, Foo1}, handle_message(Foo1, nil, State1)),
-
- % 2nd worker disagrees, voting continues
- ?assertEqual({ok, State2}, handle_message(Bar1, nil, State1)),
-
- % 3rd worker resolves voting, but repair is needed
- ?assertEqual({error, needs_repair, Foo1}, handle_message(Foo1, nil, State2)),
-
- % 2nd worker comes up with descendant of Foo1, voting resolved, run repair
- ?assertEqual({error, needs_repair, Foo2}, handle_message(Foo2, nil, State1)),
-
- % not_found is considered to be an ancestor of everybody
- ?assertEqual({error, needs_repair, Foo1}, handle_message(NF, nil, State1)),
-
- % 3 distinct edit branches result in quorum failure
- ?assertEqual({error, needs_repair}, handle_message(Baz1, nil, State2)),
-
- % bad node concludes voting w/o success, run sync repair to get the result
- ?assertEqual(
- {error, needs_repair},
- handle_message({rexi_DOWN, 1, 2, 3}, nil, State2)
- ).
diff --git a/apps/fabric/src/fabric_doc_open_revs.erl b/apps/fabric/src/fabric_doc_open_revs.erl
deleted file mode 100644
index d0aec6e4..00000000
--- a/apps/fabric/src/fabric_doc_open_revs.erl
+++ /dev/null
@@ -1,284 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_open_revs).
-
--export([go/4]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
--record(state, {
- dbname,
- worker_count,
- reply_count = 0,
- r,
- revs,
- latest,
- replies = []
-}).
-
-go(DbName, Id, Revs, Options) ->
- Workers = fabric_util:submit_jobs(mem3:shards(DbName,Id), open_revs,
- [Id, Revs, Options]),
- R = couch_util:get_value(r, Options, couch_config:get("cluster","r","2")),
- State = #state{
- dbname = DbName,
- worker_count = length(Workers),
- r = list_to_integer(R),
- revs = Revs,
- latest = lists:member(latest, Options),
- replies = case Revs of all -> []; Revs -> [{Rev,[]} || Rev <- Revs] end
- },
- case fabric_util:recv(Workers, #shard.ref, fun handle_message/3, State) of
- {ok, {ok, Reply}} ->
- {ok, Reply};
- Else ->
- Else
- end.
-
-handle_message({rexi_DOWN, _, _, _}, _Worker, State) ->
- skip(State);
-handle_message({rexi_EXIT, _}, _Worker, State) ->
- skip(State);
-handle_message({ok, RawReplies}, _Worker, #state{revs = all} = State) ->
- #state{
- dbname = DbName,
- reply_count = ReplyCount,
- worker_count = WorkerCount,
- replies = All0,
- r = R
- } = State,
- All = lists:foldl(fun(Reply,D) -> orddict:update_counter(Reply,1,D) end,
- All0, RawReplies),
- Reduced = fabric_util:remove_ancestors(All, []),
- Complete = (ReplyCount =:= (WorkerCount - 1)),
- QuorumMet = lists:all(fun({_, C}) -> C >= R end, Reduced),
- case Reduced of All when QuorumMet andalso ReplyCount =:= (R-1) ->
- Repair = false;
- _ ->
- Repair = [D || {{ok,D}, _} <- Reduced]
- end,
- case maybe_reply(DbName, Reduced, Complete, Repair, R) of
- noreply ->
- {ok, State#state{replies = All, reply_count = ReplyCount+1}};
- {reply, FinalReply} ->
- {stop, FinalReply}
- end;
-handle_message({ok, RawReplies0}, _Worker, State) ->
- % we've got an explicit revision list, but if latest=true the workers may
- % return a descendant of the requested revision. Take advantage of the
- % fact that revisions are returned in order to keep track.
- RawReplies = strip_not_found_missing(RawReplies0),
- #state{
- dbname = DbName,
- reply_count = ReplyCount,
- worker_count = WorkerCount,
- replies = All0,
- r = R
- } = State,
- All = lists:zipwith(fun({Rev, D}, Reply) ->
- if Reply =:= error -> {Rev, D}; true ->
- {Rev, orddict:update_counter(Reply, 1, D)}
- end
- end, All0, RawReplies),
- Reduced = [fabric_util:remove_ancestors(X, []) || {_, X} <- All],
- FinalReplies = [choose_winner(X, R) || X <- Reduced],
- Complete = (ReplyCount =:= (WorkerCount - 1)),
- case is_repair_needed(All, FinalReplies) of
- true ->
- Repair = [D || {{ok,D}, _} <- lists:flatten(Reduced)];
- false ->
- Repair = false
- end,
- case maybe_reply(DbName, FinalReplies, Complete, Repair, R) of
- noreply ->
- {ok, State#state{replies = All, reply_count = ReplyCount+1}};
- {reply, FinalReply} ->
- {stop, FinalReply}
- end.
-
-skip(#state{revs=all} = State) ->
- handle_message({ok, []}, nil, State);
-skip(#state{revs=Revs} = State) ->
- handle_message({ok, [error || _Rev <- Revs]}, nil, State).
-
-maybe_reply(_, [], false, _, _) ->
- noreply;
-maybe_reply(DbName, ReplyDict, IsComplete, RepairDocs, R) ->
- case lists:all(fun({_, C}) -> C >= R end, ReplyDict) of
- true ->
- maybe_execute_read_repair(DbName, RepairDocs),
- {reply, unstrip_not_found_missing(orddict:fetch_keys(ReplyDict))};
- false ->
- case IsComplete of false -> noreply; true ->
- maybe_execute_read_repair(DbName, RepairDocs),
- {reply, unstrip_not_found_missing(orddict:fetch_keys(ReplyDict))}
- end
- end.
-
-choose_winner(Options, R) ->
- case lists:dropwhile(fun({_Reply, C}) -> C < R end, Options) of
- [] ->
- case [Elem || {{ok, #doc{}}, _} = Elem <- Options] of
- [] ->
- hd(Options);
- Docs ->
- lists:last(lists:sort(Docs))
- end;
- [QuorumMet | _] ->
- QuorumMet
- end.
-
-% repair needed if any reply other than the winner has been received for a rev
-is_repair_needed([], []) ->
- false;
-is_repair_needed([{_Rev, [Reply]} | Tail1], [Reply | Tail2]) ->
- is_repair_needed(Tail1, Tail2);
-is_repair_needed(_, _) ->
- true.
-
-maybe_execute_read_repair(_Db, false) ->
- ok;
-maybe_execute_read_repair(Db, Docs) ->
- spawn(fun() ->
- [#doc{id=Id} | _] = Docs,
- Ctx = #user_ctx{roles=[<<"_admin">>]},
- Res = fabric:update_docs(Db, Docs, [replicated_changes, {user_ctx,Ctx}]),
- ?LOG_INFO("read_repair ~s ~s ~p", [Db, Id, Res])
- end).
-
-% hackery required so that not_found sorts first
-strip_not_found_missing([]) ->
- [];
-strip_not_found_missing([{{not_found, missing}, Rev} | Rest]) ->
- [{not_found, Rev} | strip_not_found_missing(Rest)];
-strip_not_found_missing([Else | Rest]) ->
- [Else | strip_not_found_missing(Rest)].
-
-unstrip_not_found_missing([]) ->
- [];
-unstrip_not_found_missing([{not_found, Rev} | Rest]) ->
- [{{not_found, missing}, Rev} | unstrip_not_found_missing(Rest)];
-unstrip_not_found_missing([Else | Rest]) ->
- [Else | unstrip_not_found_missing(Rest)].
-
-all_revs_test() ->
- State0 = #state{worker_count = 3, r = 2, revs = all},
- Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
- Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
- Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
-
- % an empty worker response does not count as meeting quorum
- ?assertMatch(
- {ok, #state{}},
- handle_message({ok, []}, nil, State0)
- ),
-
- ?assertMatch(
- {ok, #state{}},
- handle_message({ok, [Foo1, Bar1]}, nil, State0)
- ),
- {ok, State1} = handle_message({ok, [Foo1, Bar1]}, nil, State0),
-
- % the normal case - workers agree
- ?assertEqual(
- {stop, [Bar1, Foo1]},
- handle_message({ok, [Foo1, Bar1]}, nil, State1)
- ),
-
- % a case where the 2nd worker has a newer Foo - currently we're considering
- % Foo to have reached quorum and execute_read_repair()
- ?assertEqual(
- {stop, [Bar1, Foo2]},
- handle_message({ok, [Foo2, Bar1]}, nil, State1)
- ),
-
- % a case where quorum has not yet been reached for Foo
- ?assertMatch(
- {ok, #state{}},
- handle_message({ok, [Bar1]}, nil, State1)
- ),
- {ok, State2} = handle_message({ok, [Bar1]}, nil, State1),
-
- % still no quorum, but all workers have responded. We include Foo1 in the
- % response and execute_read_repair()
- ?assertEqual(
- {stop, [Bar1, Foo1]},
- handle_message({ok, [Bar1]}, nil, State2)
- ).
-
-specific_revs_test() ->
- Revs = [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}],
- State0 = #state{
- worker_count = 3,
- r = 2,
- revs = Revs,
- latest = false,
- replies = [{Rev,[]} || Rev <- Revs]
- },
- Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
- Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
- Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
- Baz1 = {{not_found, missing}, {1,<<"baz">>}},
- Baz2 = {ok, #doc{revs = {1, [<<"baz">>]}}},
-
- ?assertMatch(
- {ok, #state{}},
- handle_message({ok, [Foo1, Bar1, Baz1]}, nil, State0)
- ),
- {ok, State1} = handle_message({ok, [Foo1, Bar1, Baz1]}, nil, State0),
-
- % the normal case - workers agree
- ?assertEqual(
- {stop, [Foo1, Bar1, Baz1]},
- handle_message({ok, [Foo1, Bar1, Baz1]}, nil, State1)
- ),
-
- % latest=true, worker responds with Foo2 and we return it
- State0L = State0#state{latest = true},
- ?assertMatch(
- {ok, #state{}},
- handle_message({ok, [Foo2, Bar1, Baz1]}, nil, State0L)
- ),
- {ok, State1L} = handle_message({ok, [Foo2, Bar1, Baz1]}, nil, State0L),
- ?assertEqual(
- {stop, [Foo2, Bar1, Baz1]},
- handle_message({ok, [Foo2, Bar1, Baz1]}, nil, State1L)
- ),
-
- % Foo1 is included in the read quorum for Foo2
- ?assertEqual(
- {stop, [Foo2, Bar1, Baz1]},
- handle_message({ok, [Foo1, Bar1, Baz1]}, nil, State1L)
- ),
-
- % {not_found, missing} is included in the quorum for any found revision
- ?assertEqual(
- {stop, [Foo2, Bar1, Baz2]},
- handle_message({ok, [Foo2, Bar1, Baz2]}, nil, State1L)
- ),
-
- % a worker failure is skipped
- ?assertMatch(
- {ok, #state{}},
- handle_message({rexi_EXIT, foo}, nil, State1L)
- ),
- {ok, State2L} = handle_message({rexi_EXIT, foo}, nil, State1L),
- ?assertEqual(
- {stop, [Foo2, Bar1, Baz2]},
- handle_message({ok, [Foo2, Bar1, Baz2]}, nil, State2L)
- ).
diff --git a/apps/fabric/src/fabric_doc_update.erl b/apps/fabric/src/fabric_doc_update.erl
deleted file mode 100644
index 50d02888..00000000
--- a/apps/fabric/src/fabric_doc_update.erl
+++ /dev/null
@@ -1,147 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_doc_update).
-
--export([go/3]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(_, [], _) ->
- {ok, []};
-go(DbName, AllDocs, Opts) ->
- validate_atomic_update(DbName, AllDocs, lists:member(all_or_nothing, Opts)),
- Options = lists:delete(all_or_nothing, Opts),
- GroupedDocs = lists:map(fun({#shard{name=Name, node=Node} = Shard, Docs}) ->
- Ref = rexi:cast(Node, {fabric_rpc, update_docs, [Name, Docs, Options]}),
- {Shard#shard{ref=Ref}, Docs}
- end, group_docs_by_shard(DbName, AllDocs)),
- {Workers, _} = lists:unzip(GroupedDocs),
- W = couch_util:get_value(w, Options, couch_config:get("cluster","w","2")),
- Acc0 = {length(Workers), length(AllDocs), list_to_integer(W), GroupedDocs,
- dict:from_list([{Doc,[]} || Doc <- AllDocs])},
- case fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {ok, Results} ->
- Reordered = couch_util:reorder_results(AllDocs, Results),
- {ok, [R || R <- Reordered, R =/= noreply]};
- Else ->
- Else
- end.
-
-handle_message({rexi_DOWN, _, _, _}, _Worker, Acc0) ->
- skip_message(Acc0);
-handle_message({rexi_EXIT, _}, _Worker, Acc0) ->
- {WaitingCount, _, W, _, DocReplyDict} = Acc0,
- if WaitingCount =:= 1 ->
- {W, Reply} = dict:fold(fun force_reply/3, {W,[]}, DocReplyDict),
- {stop, Reply};
- true ->
- {ok, setelement(1, Acc0, WaitingCount-1)}
- end;
-handle_message({ok, Replies}, Worker, Acc0) ->
- {WaitingCount, DocCount, W, GroupedDocs, DocReplyDict0} = Acc0,
- Docs = couch_util:get_value(Worker, GroupedDocs),
- DocReplyDict = append_update_replies(Docs, Replies, DocReplyDict0),
- case {WaitingCount, dict:size(DocReplyDict)} of
- {1, _} ->
- % last message has arrived, we need to conclude things
- {W, Reply} = dict:fold(fun force_reply/3, {W,[]}, DocReplyDict),
- {stop, Reply};
- {_, DocCount} ->
- % we've got at least one reply for each document, let's take a look
- case dict:fold(fun maybe_reply/3, {stop,W,[]}, DocReplyDict) of
- continue ->
- {ok, {WaitingCount - 1, DocCount, W, GroupedDocs, DocReplyDict}};
- {stop, W, FinalReplies} ->
- {stop, FinalReplies}
- end;
- {_, N} when N < DocCount ->
- % no point in trying to finalize anything yet
- {ok, {WaitingCount - 1, DocCount, W, GroupedDocs, DocReplyDict}}
- end;
-handle_message({missing_stub, Stub}, _, _) ->
- throw({missing_stub, Stub});
-handle_message({not_found, no_db_file} = X, Worker, Acc0) ->
- {_, _, _, GroupedDocs, _} = Acc0,
- Docs = couch_util:get_value(Worker, GroupedDocs),
- handle_message({ok, [X || _D <- Docs]}, Worker, Acc0).
-
-force_reply(Doc, [], {W, Acc}) ->
- {W, [{Doc, {error, internal_server_error}} | Acc]};
-force_reply(Doc, [FirstReply|_] = Replies, {W, Acc}) ->
- case update_quorum_met(W, Replies) of
- {true, Reply} ->
- {W, [{Doc,Reply} | Acc]};
- false ->
- ?LOG_ERROR("write quorum (~p) failed, reply ~p", [W, FirstReply]),
- % TODO make a smarter choice than just picking the first reply
- {W, [{Doc,FirstReply} | Acc]}
- end.
-
-maybe_reply(_, _, continue) ->
- % we didn't meet quorum for all docs, so we're fast-forwarding the fold
- continue;
-maybe_reply(Doc, Replies, {stop, W, Acc}) ->
- case update_quorum_met(W, Replies) of
- {true, Reply} ->
- {stop, W, [{Doc, Reply} | Acc]};
- false ->
- continue
- end.
-
-update_quorum_met(W, Replies) ->
- Counters = lists:foldl(fun(R,D) -> orddict:update_counter(R,1,D) end,
- orddict:new(), Replies),
- case lists:dropwhile(fun({_, Count}) -> Count < W end, Counters) of
- [] ->
- false;
- [{FinalReply, _} | _] ->
- {true, FinalReply}
- end.
-
--spec group_docs_by_shard(binary(), [#doc{}]) -> [{#shard{}, [#doc{}]}].
-group_docs_by_shard(DbName, Docs) ->
- dict:to_list(lists:foldl(fun(#doc{id=Id} = Doc, D0) ->
- lists:foldl(fun(Shard, D1) ->
- dict:append(Shard, Doc, D1)
- end, D0, mem3:shards(DbName,Id))
- end, dict:new(), Docs)).
-
-append_update_replies([], [], DocReplyDict) ->
- DocReplyDict;
-append_update_replies([Doc|Rest], [], Dict0) ->
- % icky, if replicated_changes only errors show up in result
- append_update_replies(Rest, [], dict:append(Doc, noreply, Dict0));
-append_update_replies([Doc|Rest1], [Reply|Rest2], Dict0) ->
- % TODO what if the same document shows up twice in one update_docs call?
- append_update_replies(Rest1, Rest2, dict:append(Doc, Reply, Dict0)).
-
-skip_message(Acc0) ->
- % TODO fix this
- {ok, Acc0}.
-
-validate_atomic_update(_, _, false) ->
- ok;
-validate_atomic_update(_DbName, AllDocs, true) ->
- % TODO actually perform the validation. This requires some hackery, we need
- % to basically extract the prep_and_validate_updates function from couch_db
- % and only run that, without actually writing in case of a success.
- Error = {not_implemented, <<"all_or_nothing is not supported yet">>},
- PreCommitFailures = lists:map(fun(#doc{id=Id, revs = {Pos,Revs}}) ->
- case Revs of [] -> RevId = <<>>; [RevId|_] -> ok end,
- {{Id, {Pos, RevId}}, Error}
- end, AllDocs),
- throw({aborted, PreCommitFailures}).
diff --git a/apps/fabric/src/fabric_group_info.erl b/apps/fabric/src/fabric_group_info.erl
deleted file mode 100644
index d5260271..00000000
--- a/apps/fabric/src/fabric_group_info.erl
+++ /dev/null
@@ -1,66 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_group_info).
-
--export([go/2]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName, GroupId) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, GroupId, []),
- go(DbName, DDoc);
-
-go(DbName, #doc{} = DDoc) ->
- Group = couch_view_group:design_doc_to_view_group(DDoc),
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, group_info, [Group]),
- Acc0 = {fabric_dict:init(Workers, nil), []},
- fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0).
-
-handle_message({ok, Info}, Shard, {Counters, Acc}) ->
- case fabric_dict:lookup_element(Shard, Counters) of
- undefined ->
- % already heard from someone else in this range
- {ok, {Counters, Acc}};
- nil ->
- C1 = fabric_dict:store(Shard, ok, Counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, {C2, [Info|Acc]}};
- false ->
- {stop, merge_results(lists:flatten([Info|Acc]))}
- end
- end;
-handle_message(_, _, Acc) ->
- {ok, Acc}.
-
-merge_results(Info) ->
- Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
- orddict:new(), Info),
- orddict:fold(fun
- (signature, [X|_], Acc) ->
- [{signature, X} | Acc];
- (language, [X|_], Acc) ->
- [{language, X} | Acc];
- (disk_size, X, Acc) ->
- [{disk_size, lists:sum(X)} | Acc];
- (compact_running, X, Acc) ->
- [{compact_running, lists:member(true, X)} | Acc];
- (_, _, Acc) ->
- Acc
- end, [], Dict).
diff --git a/apps/fabric/src/fabric_rpc.erl b/apps/fabric/src/fabric_rpc.erl
deleted file mode 100644
index a7d555e0..00000000
--- a/apps/fabric/src/fabric_rpc.erl
+++ /dev/null
@@ -1,402 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_rpc).
-
--export([get_db_info/1, get_doc_count/1, get_update_seq/1]).
--export([open_doc/3, open_revs/4, get_missing_revs/2, update_docs/3]).
--export([all_docs/2, changes/3, map_view/4, reduce_view/4, group_info/2]).
--export([create_db/3, delete_db/3, reset_validation_funs/1, set_security/3,
- set_revs_limit/3]).
-
--include("fabric.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record (view_acc, {
- db,
- limit,
- include_docs,
- offset = nil,
- total_rows,
- reduce_fun = fun couch_db:enum_docs_reduce_to_count/1,
- group_level = 0
-}).
-
-%% rpc endpoints
-%% call to with_db will supply your M:F with a #db{} and then remaining args
-
-all_docs(DbName, #view_query_args{keys=nil} = QueryArgs) ->
- {ok, Db} = couch_db:open(DbName, []),
- #view_query_args{
- start_key = StartKey,
- start_docid = StartDocId,
- end_key = EndKey,
- end_docid = EndDocId,
- limit = Limit,
- skip = Skip,
- include_docs = IncludeDocs,
- direction = Dir,
- inclusive_end = Inclusive
- } = QueryArgs,
- {ok, Total} = couch_db:get_doc_count(Db),
- Acc0 = #view_acc{
- db = Db,
- include_docs = IncludeDocs,
- limit = Limit+Skip,
- total_rows = Total
- },
- EndKeyType = if Inclusive -> end_key; true -> end_key_gt end,
- Options = [
- {dir, Dir},
- {start_key, if is_binary(StartKey) -> StartKey; true -> StartDocId end},
- {EndKeyType, if is_binary(EndKey) -> EndKey; true -> EndDocId end}
- ],
- {ok, _, Acc} = couch_db:enum_docs(Db, fun view_fold/3, Acc0, Options),
- final_response(Total, Acc#view_acc.offset).
-
-changes(DbName, Args, StartSeq) ->
- #changes_args{style=Style, dir=Dir} = Args,
- case couch_db:open(DbName, []) of
- {ok, Db} ->
- Enum = fun changes_enumerator/2,
- Opts = [{dir,Dir}],
- Acc0 = {Db, StartSeq, Args},
- try
- {ok, {_, LastSeq, _}} =
- couch_db:changes_since(Db, Style, StartSeq, Enum, Opts, Acc0),
- rexi:reply({complete, LastSeq})
- after
- couch_db:close(Db)
- end;
- Error ->
- rexi:reply(Error)
- end.
-
-map_view(DbName, DDoc, ViewName, QueryArgs) ->
- {ok, Db} = couch_db:open(DbName, []),
- #view_query_args{
- limit = Limit,
- skip = Skip,
- keys = Keys,
- include_docs = IncludeDocs,
- stale = Stale,
- view_type = ViewType
- } = QueryArgs,
- MinSeq = if Stale == ok -> 0; true -> couch_db:get_update_seq(Db) end,
- Group0 = couch_view_group:design_doc_to_view_group(DDoc),
- {ok, Pid} = gen_server:call(couch_view, {get_group_server, DbName, Group0}),
- {ok, Group} = couch_view_group:request_group(Pid, MinSeq),
- View = fabric_view:extract_view(Pid, ViewName, Group#group.views, ViewType),
- {ok, Total} = couch_view:get_row_count(View),
- Acc0 = #view_acc{
- db = Db,
- include_docs = IncludeDocs,
- limit = Limit+Skip,
- total_rows = Total,
- reduce_fun = fun couch_view:reduce_to_count/1
- },
- case Keys of
- nil ->
- Options = couch_httpd_view:make_key_options(QueryArgs),
- {ok, _, Acc} = couch_view:fold(View, fun view_fold/3, Acc0, Options);
- _ ->
- Acc = lists:foldl(fun(Key, AccIn) ->
- KeyArgs = QueryArgs#view_query_args{start_key=Key, end_key=Key},
- Options = couch_httpd_view:make_key_options(KeyArgs),
- {_Go, _, Out} = couch_view:fold(View, fun view_fold/3, AccIn,
- Options),
- Out
- end, Acc0, Keys)
- end,
- final_response(Total, Acc#view_acc.offset).
-
-reduce_view(DbName, Group0, ViewName, QueryArgs) ->
- {ok, Db} = couch_db:open(DbName, []),
- #view_query_args{
- group_level = GroupLevel,
- limit = Limit,
- skip = Skip,
- keys = Keys,
- stale = Stale
- } = QueryArgs,
- GroupFun = group_rows_fun(GroupLevel),
- MinSeq = if Stale == ok -> 0; true -> couch_db:get_update_seq(Db) end,
- {ok, Pid} = gen_server:call(couch_view, {get_group_server, DbName, Group0}),
- {ok, #group{views=Views, def_lang=Lang}} = couch_view_group:request_group(
- Pid, MinSeq),
- {NthRed, View} = fabric_view:extract_view(Pid, ViewName, Views, reduce),
- ReduceView = {reduce, NthRed, Lang, View},
- Acc0 = #view_acc{group_level = GroupLevel, limit = Limit+Skip},
- case Keys of
- nil ->
- Options0 = couch_httpd_view:make_key_options(QueryArgs),
- Options = [{key_group_fun, GroupFun} | Options0],
- couch_view:fold_reduce(ReduceView, fun reduce_fold/3, Acc0, Options);
- _ ->
- lists:map(fun(Key) ->
- KeyArgs = QueryArgs#view_query_args{start_key=Key, end_key=Key},
- Options0 = couch_httpd_view:make_key_options(KeyArgs),
- Options = [{key_group_fun, GroupFun} | Options0],
- couch_view:fold_reduce(ReduceView, fun reduce_fold/3, Acc0, Options)
- end, Keys)
- end,
- rexi:reply(complete).
-
-create_db(DbName, Options, Doc) ->
- mem3_util:write_db_doc(Doc),
- rexi:reply(case couch_server:create(DbName, Options) of
- {ok, _} ->
- ok;
- Error ->
- Error
- end).
-
-delete_db(DbName, Options, DocId) ->
- mem3_util:delete_db_doc(DocId),
- rexi:reply(couch_server:delete(DbName, Options)).
-
-get_db_info(DbName) ->
- with_db(DbName, [], {couch_db, get_db_info, []}).
-
-get_doc_count(DbName) ->
- with_db(DbName, [], {couch_db, get_doc_count, []}).
-
-get_update_seq(DbName) ->
- with_db(DbName, [], {couch_db, get_update_seq, []}).
-
-set_security(DbName, SecObj, Options) ->
- with_db(DbName, Options, {couch_db, set_security, [SecObj]}).
-
-set_revs_limit(DbName, Limit, Options) ->
- with_db(DbName, Options, {couch_db, set_revs_limit, [Limit]}).
-
-open_doc(DbName, DocId, Options) ->
- with_db(DbName, Options, {couch_db, open_doc, [DocId, Options]}).
-
-open_revs(DbName, Id, Revs, Options) ->
- with_db(DbName, Options, {couch_db, open_doc_revs, [Id, Revs, Options]}).
-
-get_missing_revs(DbName, IdRevsList) ->
- % reimplement here so we get [] for Ids with no missing revs in response
- rexi:reply(case couch_db:open(DbName, []) of
- {ok, Db} ->
- Ids = [Id1 || {Id1, _Revs} <- IdRevsList],
- {ok, lists:zipwith(fun({Id, Revs}, FullDocInfoResult) ->
- case FullDocInfoResult of
- {ok, #full_doc_info{rev_tree=RevisionTree} = FullInfo} ->
- MissingRevs = couch_key_tree:find_missing(RevisionTree, Revs),
- {Id, MissingRevs, possible_ancestors(FullInfo, MissingRevs)};
- not_found ->
- {Id, Revs, []}
- end
- end, IdRevsList, couch_btree:lookup(Db#db.id_tree, Ids))};
- Error ->
- Error
- end).
-
-update_docs(DbName, Docs0, Options) ->
- case proplists:get_value(replicated_changes, Options) of
- true ->
- X = replicated_changes;
- _ ->
- X = interactive_edit
- end,
- Docs = make_att_readers(Docs0),
- with_db(DbName, Options, {couch_db, update_docs, [Docs, Options, X]}).
-
-group_info(DbName, Group0) ->
- {ok, Pid} = gen_server:call(couch_view, {get_group_server, DbName, Group0}),
- rexi:reply(couch_view_group:request_group_info(Pid)).
-
-reset_validation_funs(DbName) ->
- case couch_db:open(DbName, []) of
- {ok, #db{main_pid = Pid}} ->
- gen_server:cast(Pid, {load_validation_funs, undefined});
- _ ->
- ok
- end.
-
-%%
-%% internal
-%%
-
-with_db(DbName, Options, {M,F,A}) ->
- case couch_db:open(DbName, Options) of
- {ok, Db} ->
- rexi:reply(try
- apply(M, F, [Db | A])
- catch Exception ->
- Exception;
- error:Reason ->
- ?LOG_ERROR("~p ~p ~p~n~p", [?MODULE, {M,F}, Reason,
- erlang:get_stacktrace()]),
- {error, Reason}
- end);
- Error ->
- rexi:reply(Error)
- end.
-
-view_fold(#full_doc_info{} = FullDocInfo, OffsetReds, Acc) ->
- % matches for _all_docs and translates #full_doc_info{} -> KV pair
- case couch_doc:to_doc_info(FullDocInfo) of
- #doc_info{revs=[#rev_info{deleted=false, rev=Rev}|_]} ->
- Id = FullDocInfo#full_doc_info.id,
- Value = {[{rev,couch_doc:rev_to_str(Rev)}]},
- view_fold({{Id,Id}, Value}, OffsetReds, Acc);
- #doc_info{revs=[#rev_info{deleted=true}|_]} ->
- {ok, Acc}
- end;
-view_fold(KV, OffsetReds, #view_acc{offset=nil, total_rows=Total} = Acc) ->
- % calculates the offset for this shard
- #view_acc{reduce_fun=Reduce} = Acc,
- Offset = Reduce(OffsetReds),
- case rexi:sync_reply({total_and_offset, Total, Offset}) of
- ok ->
- view_fold(KV, OffsetReds, Acc#view_acc{offset=Offset});
- stop ->
- exit(normal);
- timeout ->
- exit(timeout)
- end;
-view_fold(_KV, _Offset, #view_acc{limit=0} = Acc) ->
- % we scanned through limit+skip local rows
- {stop, Acc};
-view_fold({{Key,Id}, Value}, _Offset, Acc) ->
- % the normal case
- #view_acc{
- db = Db,
- limit = Limit,
- include_docs = IncludeDocs
- } = Acc,
- Doc = if not IncludeDocs -> undefined; true ->
- case couch_db:open_doc(Db, Id, []) of
- {not_found, deleted} ->
- null;
- {not_found, missing} ->
- undefined;
- {ok, Doc0} ->
- couch_doc:to_json_obj(Doc0, [])
- end
- end,
- case rexi:sync_reply(#view_row{key=Key, id=Id, value=Value, doc=Doc}) of
- ok ->
- {ok, Acc#view_acc{limit=Limit-1}};
- timeout ->
- exit(timeout)
- end.
-
-final_response(Total, nil) ->
- case rexi:sync_reply({total_and_offset, Total, Total}) of ok ->
- rexi:reply(complete);
- stop ->
- ok;
- timeout ->
- exit(timeout)
- end;
-final_response(_Total, _Offset) ->
- rexi:reply(complete).
-
-group_rows_fun(exact) ->
- fun({Key1,_}, {Key2,_}) -> Key1 == Key2 end;
-group_rows_fun(0) ->
- fun(_A, _B) -> true end;
-group_rows_fun(GroupLevel) when is_integer(GroupLevel) ->
- fun({[_|_] = Key1,_}, {[_|_] = Key2,_}) ->
- lists:sublist(Key1, GroupLevel) == lists:sublist(Key2, GroupLevel);
- ({Key1,_}, {Key2,_}) ->
- Key1 == Key2
- end.
-
-reduce_fold(_Key, _Red, #view_acc{limit=0} = Acc) ->
- {stop, Acc};
-reduce_fold(_Key, Red, #view_acc{group_level=0} = Acc) ->
- send(null, Red, Acc);
-reduce_fold(Key, Red, #view_acc{group_level=exact} = Acc) ->
- send(Key, Red, Acc);
-reduce_fold(K, Red, #view_acc{group_level=I} = Acc) when I > 0, is_list(K) ->
- send(lists:sublist(K, I), Red, Acc).
-
-send(Key, Value, #view_acc{limit=Limit} = Acc) ->
- case rexi:sync_reply(#view_row{key=Key, value=Value}) of
- ok ->
- {ok, Acc#view_acc{limit=Limit-1}};
- stop ->
- exit(normal);
- timeout ->
- exit(timeout)
- end.
-
-changes_enumerator(DocInfo, {Db, _Seq, Args}) ->
- #changes_args{include_docs=IncludeDocs, filter=Acc} = Args,
- #doc_info{id=Id, high_seq=Seq, revs=[#rev_info{deleted=Del,rev=Rev}|_]}
- = DocInfo,
- case [X || X <- couch_changes:filter(DocInfo, Acc), X /= null] of
- [] ->
- {ok, {Db, Seq, Args}};
- Results ->
- ChangesRow = changes_row(Db, Seq, Id, Results, Rev, Del, IncludeDocs),
- Go = rexi:sync_reply(ChangesRow),
- {Go, {Db, Seq, Args}}
- end.
-
-changes_row(_, Seq, Id, Results, _, true, true) ->
- #view_row{key=Seq, id=Id, value=Results, doc=deleted};
-changes_row(_, Seq, Id, Results, _, true, false) ->
- #view_row{key=Seq, id=Id, value=Results, doc=deleted};
-changes_row(Db, Seq, Id, Results, Rev, false, true) ->
- #view_row{key=Seq, id=Id, value=Results, doc=doc_member(Db, Id, Rev)};
-changes_row(_, Seq, Id, Results, _, false, false) ->
- #view_row{key=Seq, id=Id, value=Results}.
-
-doc_member(Shard, Id, Rev) ->
- case couch_db:open_doc_revs(Shard, Id, [Rev], []) of
- {ok, [{ok,Doc}]} ->
- couch_doc:to_json_obj(Doc, []);
- Error ->
- Error
- end.
-
-possible_ancestors(_FullInfo, []) ->
- [];
-possible_ancestors(FullInfo, MissingRevs) ->
- #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
- LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
- % Find the revs that are possible parents of this rev
- lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
- % this leaf is a "possible ancenstor" of the missing
- % revs if this LeafPos lessthan any of the missing revs
- case lists:any(fun({MissingPos, _}) ->
- LeafPos < MissingPos end, MissingRevs) of
- true ->
- [{LeafPos, LeafRevId} | Acc];
- false ->
- Acc
- end
- end, [], LeafRevs).
-
-make_att_readers([]) ->
- [];
-make_att_readers([#doc{atts=Atts0} = Doc | Rest]) ->
- % % go through the attachments looking for 'follows' in the data,
- % % replace with function that reads the data from MIME stream.
- Atts = [Att#att{data=make_att_reader(D)} || #att{data=D} = Att <- Atts0],
- [Doc#doc{atts = Atts} | make_att_readers(Rest)].
-
-make_att_reader({follows, Parser}) ->
- fun() ->
- Parser ! {get_bytes, self()},
- receive {bytes, Bytes} -> Bytes end
- end;
-make_att_reader(Else) ->
- Else.
diff --git a/apps/fabric/src/fabric_util.erl b/apps/fabric/src/fabric_util.erl
deleted file mode 100644
index 4ae8e126..00000000
--- a/apps/fabric/src/fabric_util.erl
+++ /dev/null
@@ -1,97 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_util).
-
--export([submit_jobs/3, cleanup/1, recv/4, get_db/1, remove_ancestors/2]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-submit_jobs(Shards, EndPoint, ExtraArgs) ->
- lists:map(fun(#shard{node=Node, name=ShardName} = Shard) ->
- Ref = rexi:cast(Node, {fabric_rpc, EndPoint, [ShardName | ExtraArgs]}),
- Shard#shard{ref = Ref}
- end, Shards).
-
-cleanup(Workers) ->
- [rexi:kill(Node, Ref) || #shard{node=Node, ref=Ref} <- Workers].
-
-recv(Workers, Keypos, Fun, Acc0) ->
- Timeout = case couch_config:get("fabric", "request_timeout", "60000") of
- "infinity" -> infinity;
- N -> list_to_integer(N)
- end,
- rexi_utils:recv(Workers, Keypos, Fun, Acc0, Timeout, infinity).
-
-
-get_db(DbName) ->
- Shards = mem3:shards(DbName),
- case lists:partition(fun(#shard{node = N}) -> N =:= node() end, Shards) of
- {[#shard{name = ShardName}|_], _} ->
- % prefer node-local DBs
- couch_db:open(ShardName, []);
- {[], [#shard{node = Node, name = ShardName}|_]} ->
- % but don't require them
- rpc:call(Node, couch_db, open, [ShardName, []])
- end.
-
-% this presumes the incoming list is sorted, i.e. shorter revlists come first
-remove_ancestors([], Acc) ->
- lists:reverse(Acc);
-remove_ancestors([{{not_found, _}, Count} = Head | Tail], Acc) ->
- % any document is a descendant
- case lists:filter(fun({{ok, #doc{}}, _}) -> true; (_) -> false end, Tail) of
- [{{ok, #doc{}} = Descendant, _} | _] ->
- remove_ancestors(orddict:update_counter(Descendant, Count, Tail), Acc);
- [] ->
- remove_ancestors(Tail, [Head | Acc])
- end;
-remove_ancestors([{{ok, #doc{revs = {Pos, Revs}}}, Count} = Head | Tail], Acc) ->
- Descendants = lists:dropwhile(fun
- ({{ok, #doc{revs = {Pos2, Revs2}}}, _}) ->
- case lists:nthtail(Pos2 - Pos, Revs2) of
- [] ->
- % impossible to tell if Revs2 is a descendant - assume no
- true;
- History ->
- % if Revs2 is a descendant, History is a prefix of Revs
- not lists:prefix(History, Revs)
- end
- end, Tail),
- case Descendants of [] ->
- remove_ancestors(Tail, [Head | Acc]);
- [{Descendant, _} | _] ->
- remove_ancestors(orddict:update_counter(Descendant, Count, Tail), Acc)
- end.
-
-remove_ancestors_test() ->
- Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
- Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
- Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
- Bar2 = {not_found, {1,<<"bar">>}},
- ?assertEqual(
- [{Bar1,1}, {Foo1,1}],
- remove_ancestors([{Bar1,1}, {Foo1,1}], [])
- ),
- ?assertEqual(
- [{Bar1,1}, {Foo2,2}],
- remove_ancestors([{Bar1,1}, {Foo1,1}, {Foo2,1}], [])
- ),
- ?assertEqual(
- [{Bar1,2}],
- remove_ancestors([{Bar2,1}, {Bar1,1}], [])
- ).
diff --git a/apps/fabric/src/fabric_view.erl b/apps/fabric/src/fabric_view.erl
deleted file mode 100644
index e5f19b73..00000000
--- a/apps/fabric/src/fabric_view.erl
+++ /dev/null
@@ -1,235 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view).
-
--export([is_progress_possible/1, remove_overlapping_shards/2, maybe_send_row/1,
- maybe_pause_worker/3, maybe_resume_worker/2, transform_row/1, keydict/1,
- extract_view/4]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-%% @doc looks for a fully covered keyrange in the list of counters
--spec is_progress_possible([{#shard{}, term()}]) -> boolean().
-is_progress_possible([]) ->
- false;
-is_progress_possible(Counters) ->
- Ranges = fabric_dict:fold(fun(#shard{range=[X,Y]}, _, A) -> [{X,Y}|A] end,
- [], Counters),
- [{Start, Tail0} | Rest] = lists:ukeysort(1, Ranges),
- Result = lists:foldl(fun
- (_, fail) ->
- % we've already declared failure
- fail;
- (_, complete) ->
- % this is the success condition, we can fast-forward
- complete;
- ({X,_}, Tail) when X > (Tail+1) ->
- % gap in the keyrange, we're dead
- fail;
- ({_,Y}, Tail) ->
- case erlang:max(Tail, Y) of
- End when (End+1) =:= (2 bsl 31) ->
- complete;
- Else ->
- % the normal condition, adding to the tail
- Else
- end
- end, if (Tail0+1) =:= (2 bsl 31) -> complete; true -> Tail0 end, Rest),
- (Start =:= 0) andalso (Result =:= complete).
-
--spec remove_overlapping_shards(#shard{}, [{#shard{}, any()}]) ->
- [{#shard{}, any()}].
-remove_overlapping_shards(#shard{range=[A,B]} = Shard0, Shards) ->
- fabric_dict:filter(fun(#shard{range=[X,Y]} = Shard, _Value) ->
- if Shard =:= Shard0 ->
- % we can't remove ourselves
- true;
- A < B, X >= A, X < B ->
- % lower bound is inside our range
- false;
- A < B, Y > A, Y =< B ->
- % upper bound is inside our range
- false;
- B < A, X >= A orelse B < A, X < B ->
- % target shard wraps the key range, lower bound is inside
- false;
- B < A, Y > A orelse B < A, Y =< B ->
- % target shard wraps the key range, upper bound is inside
- false;
- true ->
- true
- end
- end, Shards).
-
-maybe_pause_worker(Worker, From, State) ->
- #collector{buffer_size = BufferSize, counters = Counters} = State,
- case fabric_dict:lookup_element(Worker, Counters) of
- BufferSize ->
- State#collector{blocked = [{Worker,From} | State#collector.blocked]};
- _Count ->
- gen_server:reply(From, ok),
- State
- end.
-
-maybe_resume_worker(Worker, State) ->
- #collector{buffer_size = Buffer, counters = C, blocked = B} = State,
- case fabric_dict:lookup_element(Worker, C) of
- Count when Count < Buffer/2 ->
- case couch_util:get_value(Worker, B) of
- undefined ->
- State;
- From ->
- gen_server:reply(From, ok),
- State#collector{blocked = lists:keydelete(Worker, 1, B)}
- end;
- _Other ->
- State
- end.
-
-maybe_send_row(#collector{limit=0} = State) ->
- #collector{user_acc=AccIn, callback=Callback} = State,
- {_, Acc} = Callback(complete, AccIn),
- {stop, State#collector{user_acc=Acc}};
-maybe_send_row(State) ->
- #collector{
- callback = Callback,
- counters = Counters,
- skip = Skip,
- limit = Limit,
- user_acc = AccIn
- } = State,
- case fabric_dict:any(0, Counters) of
- true ->
- {ok, State};
- false ->
- try get_next_row(State) of
- {_, NewState} when Skip > 0 ->
- maybe_send_row(NewState#collector{skip=Skip-1, limit=Limit-1});
- {Row, NewState} ->
- case Callback(transform_row(Row), AccIn) of
- {stop, Acc} ->
- {stop, NewState#collector{user_acc=Acc, limit=Limit-1}};
- {ok, Acc} ->
- maybe_send_row(NewState#collector{user_acc=Acc, limit=Limit-1})
- end
- catch complete ->
- {_, Acc} = Callback(complete, AccIn),
- {stop, State#collector{user_acc=Acc}}
- end
- end.
-
-keydict(nil) ->
- undefined;
-keydict(Keys) ->
- {Dict,_} = lists:foldl(fun(K, {D,I}) -> {dict:store(K,I,D), I+1} end,
- {dict:new(),0}, Keys),
- Dict.
-
-%% internal %%
-
-get_next_row(#collector{rows = []}) ->
- throw(complete);
-get_next_row(#collector{reducer = RedSrc} = St) when RedSrc =/= undefined ->
- #collector{
- query_args = #view_query_args{direction=Dir},
- keys = Keys,
- rows = RowDict,
- os_proc = Proc,
- counters = Counters0
- } = St,
- {Key, RestKeys} = find_next_key(Keys, Dir, RowDict),
- case dict:find(Key, RowDict) of
- {ok, Records} ->
- NewRowDict = dict:erase(Key, RowDict),
- Counters = lists:foldl(fun(#view_row{worker=Worker}, CountersAcc) ->
- fabric_dict:update_counter(Worker, -1, CountersAcc)
- end, Counters0, Records),
- Wrapped = [[V] || #view_row{value=V} <- Records],
- {ok, [Reduced]} = couch_query_servers:rereduce(Proc, [RedSrc], Wrapped),
- NewSt = St#collector{keys=RestKeys, rows=NewRowDict, counters=Counters},
- NewState = lists:foldl(fun(#view_row{worker=Worker}, StateAcc) ->
- maybe_resume_worker(Worker, StateAcc)
- end, NewSt, Records),
- {#view_row{key=Key, id=reduced, value=Reduced}, NewState};
- error ->
- get_next_row(St#collector{keys=RestKeys})
- end;
-get_next_row(State) ->
- #collector{rows = [Row|Rest], counters = Counters0} = State,
- Worker = Row#view_row.worker,
- Counters1 = fabric_dict:update_counter(Worker, -1, Counters0),
- NewState = maybe_resume_worker(Worker, State#collector{counters=Counters1}),
- {Row, NewState#collector{rows = Rest}}.
-
-find_next_key(nil, Dir, RowDict) ->
- case lists:sort(sort_fun(Dir), dict:fetch_keys(RowDict)) of
- [] ->
- throw(complete);
- [Key|_] ->
- {Key, nil}
- end;
-find_next_key([], _, _) ->
- throw(complete);
-find_next_key([Key|Rest], _, _) ->
- {Key, Rest}.
-
-transform_row(#view_row{key=Key, id=reduced, value=Value}) ->
- {row, {[{key,Key}, {value,Value}]}};
-transform_row(#view_row{key=Key, id=undefined}) ->
- {row, {[{key,Key}, {error,not_found}]}};
-transform_row(#view_row{key=Key, id=Id, value=Value, doc=undefined}) ->
- {row, {[{id,Id}, {key,Key}, {value,Value}]}};
-transform_row(#view_row{key=Key, id=Id, value=Value, doc={error,Reason}}) ->
- {row, {[{id,Id}, {key,Key}, {value,Value}, {error,Reason}]}};
-transform_row(#view_row{key=Key, id=Id, value=Value, doc=Doc}) ->
- {row, {[{id,Id}, {key,Key}, {value,Value}, {doc,Doc}]}}.
-
-sort_fun(fwd) ->
- fun(A,A) -> true; (A,B) -> couch_view:less_json(A,B) end;
-sort_fun(rev) ->
- fun(A,A) -> true; (A,B) -> couch_view:less_json(B,A) end.
-
-extract_view(Pid, ViewName, [], _ViewType) ->
- ?LOG_ERROR("missing_named_view ~p", [ViewName]),
- exit(Pid, kill),
- exit(missing_named_view);
-extract_view(Pid, ViewName, [View|Rest], ViewType) ->
- case lists:member(ViewName, view_names(View, ViewType)) of
- true ->
- if ViewType == reduce ->
- {index_of(ViewName, view_names(View, reduce)), View};
- true ->
- View
- end;
- false ->
- extract_view(Pid, ViewName, Rest, ViewType)
- end.
-
-view_names(View, Type) when Type == red_map; Type == reduce ->
- [Name || {Name, _} <- View#view.reduce_funs];
-view_names(View, map) ->
- View#view.map_names.
-
-index_of(X, List) ->
- index_of(X, List, 1).
-
-index_of(_X, [], _I) ->
- not_found;
-index_of(X, [X|_Rest], I) ->
- I;
-index_of(X, [_|Rest], I) ->
- index_of(X, Rest, I+1).
diff --git a/apps/fabric/src/fabric_view_all_docs.erl b/apps/fabric/src/fabric_view_all_docs.erl
deleted file mode 100644
index b3436171..00000000
--- a/apps/fabric/src/fabric_view_all_docs.erl
+++ /dev/null
@@ -1,181 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_all_docs).
-
--export([go/4]).
--export([open_doc/3]). % exported for spawn
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName, #view_query_args{keys=nil} = QueryArgs, Callback, Acc0) ->
- Workers = lists:map(fun(#shard{name=Name, node=Node} = Shard) ->
- Ref = rexi:cast(Node, {fabric_rpc, all_docs, [Name, QueryArgs]}),
- Shard#shard{ref = Ref}
- end, mem3:shards(DbName)),
- BufferSize = couch_config:get("fabric", "map_buffer_size", "2"),
- #view_query_args{limit = Limit, skip = Skip} = QueryArgs,
- State = #collector{
- query_args = QueryArgs,
- callback = Callback,
- buffer_size = list_to_integer(BufferSize),
- counters = fabric_dict:init(Workers, 0),
- skip = Skip,
- limit = Limit,
- user_acc = Acc0
- },
- try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, infinity, 5000) of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- Error ->
- Error
- after
- fabric_util:cleanup(Workers)
- end;
-
-go(DbName, QueryArgs, Callback, Acc0) ->
- #view_query_args{
- direction = Dir,
- include_docs = IncludeDocs,
- limit = Limit0,
- skip = Skip0,
- keys = Keys
- } = QueryArgs,
- {_, Ref0} = spawn_monitor(fun() -> exit(fabric:get_doc_count(DbName)) end),
- Monitors0 = [spawn_monitor(?MODULE, open_doc, [DbName, Id, IncludeDocs]) ||
- Id <- Keys],
- Monitors = if Dir=:=fwd -> Monitors0; true -> lists:reverse(Monitors0) end,
- receive {'DOWN', Ref0, _, _, {ok, TotalRows}} ->
- {ok, Acc1} = Callback({total_and_offset, TotalRows, 0}, Acc0),
- {ok, Acc2} = doc_receive_loop(Monitors, Skip0, Limit0, Callback, Acc1),
- Callback(complete, Acc2)
- after 10000 ->
- Callback(timeout, Acc0)
- end.
-
-handle_message({rexi_DOWN, _, _, _}, nil, State) ->
- % TODO see if progress can be made here, possibly by removing all shards
- % from that node and checking is_progress_possible
- {ok, State};
-
-handle_message({rexi_EXIT, _}, Worker, State) ->
- #collector{callback=Callback, counters=Counters0, user_acc=Acc} = State,
- Counters = fabric_dict:erase(Worker, Counters0),
- case fabric_view:is_progress_possible(Counters) of
- true ->
- {ok, State#collector{counters = Counters}};
- false ->
- Callback({error, dead_shards}, Acc),
- {error, dead_shards}
- end;
-
-handle_message({total_and_offset, Tot, Off}, {Worker, From}, State) ->
- #collector{
- callback = Callback,
- counters = Counters0,
- total_rows = Total0,
- offset = Offset0,
- user_acc = AccIn
- } = State,
- case fabric_dict:lookup_element(Worker, Counters0) of
- undefined ->
- % this worker lost the race with other partition copies, terminate
- gen_server:reply(From, stop),
- {ok, State};
- 0 ->
- gen_server:reply(From, ok),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- Counters2 = fabric_view:remove_overlapping_shards(Worker, Counters1),
- Total = Total0 + Tot,
- Offset = Offset0 + Off,
- case fabric_dict:any(0, Counters2) of
- true ->
- {ok, State#collector{
- counters = Counters2,
- total_rows = Total,
- offset = Offset
- }};
- false ->
- FinalOffset = erlang:min(Total, Offset+State#collector.skip),
- {Go, Acc} = Callback({total_and_offset, Total, FinalOffset}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters2),
- total_rows = Total,
- offset = FinalOffset,
- user_acc = Acc
- }}
- end
- end;
-
-handle_message(#view_row{} = Row, {Worker, From}, State) ->
- #collector{query_args = Args, counters = Counters0, rows = Rows0} = State,
- Dir = Args#view_query_args.direction,
- Rows = merge_row(Dir, Row#view_row{worker=Worker}, Rows0),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows=Rows, counters=Counters1},
- State2 = fabric_view:maybe_pause_worker(Worker, From, State1),
- fabric_view:maybe_send_row(State2);
-
-handle_message(complete, Worker, State) ->
- Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
- fabric_view:maybe_send_row(State#collector{counters = Counters}).
-
-
-merge_row(fwd, Row, Rows) ->
- lists:keymerge(#view_row.id, [Row], Rows);
-merge_row(rev, Row, Rows) ->
- lists:rkeymerge(#view_row.id, [Row], Rows).
-
-doc_receive_loop([], _, _, _, Acc) ->
- {ok, Acc};
-doc_receive_loop(_, _, 0, _, Acc) ->
- {ok, Acc};
-doc_receive_loop([{Pid,Ref}|Rest], Skip, Limit, Callback, Acc) when Skip > 0 ->
- receive {'DOWN', Ref, process, Pid, #view_row{}} ->
- doc_receive_loop(Rest, Skip-1, Limit-1, Callback, Acc)
- after 10000 ->
- timeout
- end;
-doc_receive_loop([{Pid,Ref}|Rest], 0, Limit, Callback, AccIn) ->
- receive {'DOWN', Ref, process, Pid, #view_row{} = Row} ->
- case Callback(fabric_view:transform_row(Row), AccIn) of
- {ok, Acc} ->
- doc_receive_loop(Rest, 0, Limit-1, Callback, Acc);
- {stop, Acc} ->
- {ok, Acc}
- end
- after 10000 ->
- timeout
- end.
-
-open_doc(DbName, Id, IncludeDocs) ->
- Row = case fabric:open_doc(DbName, Id, [deleted]) of
- {not_found, missing} ->
- Doc = undefined,
- #view_row{key=Id};
- {ok, #doc{deleted=true, revs=Revs}} ->
- Doc = null,
- {RevPos, [RevId|_]} = Revs,
- Value = {[{rev,couch_doc:rev_to_str({RevPos, RevId})}, {deleted,true}]},
- #view_row{key=Id, id=Id, value=Value};
- {ok, #doc{revs=Revs} = Doc0} ->
- Doc = couch_doc:to_json_obj(Doc0, []),
- {RevPos, [RevId|_]} = Revs,
- Value = {[{rev,couch_doc:rev_to_str({RevPos, RevId})}]},
- #view_row{key=Id, id=Id, value=Value}
- end,
- exit(if IncludeDocs -> Row#view_row{doc=Doc}; true -> Row end).
diff --git a/apps/fabric/src/fabric_view_changes.erl b/apps/fabric/src/fabric_view_changes.erl
deleted file mode 100644
index a4421a92..00000000
--- a/apps/fabric/src/fabric_view_changes.erl
+++ /dev/null
@@ -1,271 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_changes).
-
--export([go/5, start_update_notifier/1, pack_seqs/1]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName, Feed, Options, Callback, Acc0) when Feed == "continuous" orelse
- Feed == "longpoll" ->
- Args = make_changes_args(Options),
- {ok, Acc} = Callback(start, Acc0),
- Notifiers = start_update_notifiers(DbName),
- {Timeout, _} = couch_changes:get_changes_timeout(Args, Callback),
- try
- keep_sending_changes(
- DbName,
- Args,
- Callback,
- get_start_seq(DbName, Args),
- Acc,
- Timeout
- )
- after
- stop_update_notifiers(Notifiers),
- couch_changes:get_rest_db_updated()
- end;
-
-go(DbName, "normal", Options, Callback, Acc0) ->
- Args = make_changes_args(Options),
- {ok, Acc} = Callback(start, Acc0),
- {ok, #collector{counters=Seqs, user_acc=AccOut}} = send_changes(
- DbName,
- Args,
- Callback,
- get_start_seq(DbName, Args),
- Acc
- ),
- Callback({stop, pack_seqs(Seqs)}, AccOut).
-
-keep_sending_changes(DbName, Args, Callback, Seqs, AccIn, Timeout) ->
- #changes_args{limit=Limit, feed=Feed, heartbeat=Heartbeat} = Args,
- {ok, Collector} = send_changes(DbName, Args, Callback, Seqs, AccIn),
- #collector{limit=Limit2, counters=NewSeqs, user_acc=AccOut} = Collector,
- LastSeq = pack_seqs(NewSeqs),
- if Limit > Limit2, Feed == "longpoll" ->
- Callback({stop, LastSeq}, AccOut);
- true ->
- case wait_db_updated(Timeout) of
- updated ->
- keep_sending_changes(
- DbName,
- Args#changes_args{limit=Limit2},
- Callback,
- LastSeq,
- AccOut,
- Timeout
- );
- timeout ->
- case Heartbeat of undefined ->
- Callback({stop, LastSeq}, AccOut);
- _ ->
- {ok, AccTimeout} = Callback(timeout, AccOut),
- keep_sending_changes(DbName, Args#changes_args{limit=Limit2},
- Callback, LastSeq, AccTimeout, Timeout)
- end
- end
- end.
-
-send_changes(DbName, ChangesArgs, Callback, PackedSeqs, AccIn) ->
- AllShards = mem3:shards(DbName),
- Seqs = lists:flatmap(fun({#shard{name=Name, node=N} = Shard, Seq}) ->
- case lists:member(Shard, AllShards) of
- true ->
- Ref = rexi:cast(N, {fabric_rpc, changes, [Name,ChangesArgs,Seq]}),
- [{Shard#shard{ref = Ref}, Seq}];
- false ->
- % Find some replacement shards to cover the missing range
- % TODO It's possible in rare cases of shard merging to end up
- % with overlapping shard ranges from this technique
- lists:map(fun(#shard{name=Name2, node=N2} = NewShard) ->
- Ref = rexi:cast(N2, {fabric_rpc, changes, [Name2,ChangesArgs,0]}),
- {NewShard#shard{ref = Ref}, 0}
- end, find_replacement_shards(Shard, AllShards))
- end
- end, unpack_seqs(PackedSeqs, DbName)),
- {Workers, _} = lists:unzip(Seqs),
- State = #collector{
- query_args = ChangesArgs,
- callback = Callback,
- counters = fabric_dict:init(Workers, 0),
- user_acc = AccIn,
- limit = ChangesArgs#changes_args.limit,
- rows = Seqs % store sequence positions instead
- },
- try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, infinity, 5000)
- after
- fabric_util:cleanup(Workers)
- end.
-
-handle_message({rexi_DOWN, _, _, _}, nil, State) ->
- % TODO see if progress can be made here, possibly by removing all shards
- % from that node and checking is_progress_possible
- {ok, State};
-
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
- ?LOG_ERROR("~p rexi_EXIT ~p", [?MODULE, Reason]),
- #collector{
- callback=Callback,
- counters=Counters0,
- rows = Seqs0,
- user_acc=Acc
- } = State,
- Counters = fabric_dict:erase(Worker, Counters0),
- Seqs = fabric_dict:erase(Worker, Seqs0),
- case fabric_view:is_progress_possible(Counters) of
- true ->
- {ok, State#collector{counters = Counters, rows=Seqs}};
- false ->
- Callback({error, dead_shards}, Acc),
- {error, dead_shards}
- end;
-
-handle_message(_, _, #collector{limit=0} = State) ->
- {stop, State};
-
-handle_message(#view_row{key=Seq} = Row0, {Worker, From}, St) ->
- #collector{
- query_args = #changes_args{include_docs=IncludeDocs},
- callback = Callback,
- counters = S0,
- limit = Limit,
- user_acc = AccIn
- } = St,
- case fabric_dict:lookup_element(Worker, S0) of
- undefined ->
- % this worker lost the race with other partition copies, terminate it
- gen_server:reply(From, stop),
- {ok, St};
- _ ->
- S1 = fabric_dict:store(Worker, Seq, S0),
- S2 = fabric_view:remove_overlapping_shards(Worker, S1),
- Row = Row0#view_row{key = pack_seqs(S2)},
- {Go, Acc} = Callback(changes_row(Row, IncludeDocs), AccIn),
- gen_server:reply(From, Go),
- {Go, St#collector{counters=S2, limit=Limit-1, user_acc=Acc}}
- end;
-
-handle_message({complete, EndSeq}, Worker, State) ->
- #collector{
- counters = S0,
- total_rows = Completed % override
- } = State,
- case fabric_dict:lookup_element(Worker, S0) of
- undefined ->
- {ok, State};
- _ ->
- S1 = fabric_dict:store(Worker, EndSeq, S0),
- % unlikely to have overlaps here, but possible w/ filters
- S2 = fabric_view:remove_overlapping_shards(Worker, S1),
- NewState = State#collector{counters=S2, total_rows=Completed+1},
- case fabric_dict:size(S2) =:= (Completed+1) of
- true ->
- {stop, NewState};
- false ->
- {ok, NewState}
- end
- end.
-
-make_changes_args(#changes_args{style=Style, filter=undefined}=Args) ->
- Args#changes_args{filter = Style};
-make_changes_args(Args) ->
- Args.
-
-get_start_seq(_DbName, #changes_args{dir=fwd, since=Since}) ->
- Since;
-get_start_seq(DbName, #changes_args{dir=rev}) ->
- Shards = mem3:shards(DbName),
- Workers = fabric_util:submit_jobs(Shards, get_update_seq, []),
- {ok, Since} = fabric_util:recv(Workers, #shard.ref,
- fun collect_update_seqs/3, fabric_dict:init(Workers, -1)),
- Since.
-
-collect_update_seqs(Seq, Shard, Counters) when is_integer(Seq) ->
- case fabric_dict:lookup_element(Shard, Counters) of
- undefined ->
- % already heard from someone else in this range
- {ok, Counters};
- -1 ->
- C1 = fabric_dict:store(Shard, Seq, Counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- case fabric_dict:any(-1, C2) of
- true ->
- {ok, C2};
- false ->
- {stop, pack_seqs(C2)}
- end
- end.
-
-pack_seqs(Workers) ->
- SeqList = [{N,R,S} || {#shard{node=N, range=R}, S} <- Workers],
- SeqSum = lists:sum(element(2, lists:unzip(Workers))),
- Opaque = couch_util:encodeBase64Url(term_to_binary(SeqList, [compressed])),
- list_to_binary([integer_to_list(SeqSum), $-, Opaque]).
-
-unpack_seqs(0, DbName) ->
- fabric_dict:init(mem3:shards(DbName), 0);
-
-unpack_seqs("0", DbName) ->
- fabric_dict:init(mem3:shards(DbName), 0);
-
-unpack_seqs(Packed, DbName) ->
- {match, [Opaque]} = re:run(Packed, "^([0-9]+-)?(?<opaque>.*)", [{capture,
- [opaque], binary}]),
- % TODO relies on internal structure of fabric_dict as keylist
- lists:map(fun({Node, [A,B], Seq}) ->
- Shard = #shard{node=Node, range=[A,B], dbname=DbName},
- {mem3_util:name_shard(Shard), Seq}
- end, binary_to_term(couch_util:decodeBase64Url(Opaque))).
-
-start_update_notifiers(DbName) ->
- lists:map(fun(#shard{node=Node, name=Name}) ->
- {Node, rexi:cast(Node, {?MODULE, start_update_notifier, [Name]})}
- end, mem3:shards(DbName)).
-
-% rexi endpoint
-start_update_notifier(DbName) ->
- {Caller, _} = get(rexi_from),
- Fun = fun({_, X}) when X == DbName -> Caller ! db_updated; (_) -> ok end,
- Id = {couch_db_update_notifier, make_ref()},
- ok = gen_event:add_sup_handler(couch_db_update, Id, Fun),
- receive {gen_event_EXIT, Id, Reason} ->
- rexi:reply({gen_event_EXIT, DbName, Reason})
- end.
-
-stop_update_notifiers(Notifiers) ->
- [rexi:kill(Node, Ref) || {Node, Ref} <- Notifiers].
-
-changes_row(#view_row{key=Seq, id=Id, value=Value, doc=deleted}, true) ->
- {change, {[{seq,Seq}, {id,Id}, {changes,Value}, {deleted, true}, {doc, null}]}};
-changes_row(#view_row{key=Seq, id=Id, value=Value, doc=deleted}, false) ->
- {change, {[{seq,Seq}, {id,Id}, {changes,Value}, {deleted, true}]}};
-changes_row(#view_row{key=Seq, id=Id, value=Value, doc={error,Reason}}, true) ->
- {change, {[{seq,Seq}, {id,Id}, {changes,Value}, {error,Reason}]}};
-changes_row(#view_row{key=Seq, id=Id, value=Value, doc=Doc}, true) ->
- {change, {[{seq,Seq}, {id,Id}, {changes,Value}, {doc,Doc}]}};
-changes_row(#view_row{key=Seq, id=Id, value=Value}, false) ->
- {change, {[{seq,Seq}, {id,Id}, {changes,Value}]}}.
-
-find_replacement_shards(#shard{range=Range}, AllShards) ->
- % TODO make this moar betta -- we might have split or merged the partition
- [Shard || Shard <- AllShards, Shard#shard.range =:= Range].
-
-wait_db_updated(Timeout) ->
- receive db_updated -> couch_changes:get_rest_db_updated()
- after Timeout -> timeout end.
diff --git a/apps/fabric/src/fabric_view_map.erl b/apps/fabric/src/fabric_view_map.erl
deleted file mode 100644
index e1210a84..00000000
--- a/apps/fabric/src/fabric_view_map.erl
+++ /dev/null
@@ -1,151 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_map).
-
--export([go/6]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName, GroupId, View, Args, Callback, Acc0) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
- go(DbName, DDoc, View, Args, Callback, Acc0);
-
-go(DbName, DDoc, View, Args, Callback, Acc0) ->
- Workers = lists:map(fun(#shard{name=Name, node=Node} = Shard) ->
- Ref = rexi:cast(Node, {fabric_rpc, map_view, [Name, DDoc, View, Args]}),
- Shard#shard{ref = Ref}
- end, mem3:shards(DbName)),
- BufferSize = couch_config:get("fabric", "map_buffer_size", "2"),
- #view_query_args{limit = Limit, skip = Skip, keys = Keys} = Args,
- State = #collector{
- query_args = Args,
- callback = Callback,
- buffer_size = list_to_integer(BufferSize),
- counters = fabric_dict:init(Workers, 0),
- skip = Skip,
- limit = Limit,
- keys = fabric_view:keydict(Keys),
- sorted = Args#view_query_args.sorted,
- user_acc = Acc0
- },
- try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, infinity, 1000 * 60 * 60) of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- Error ->
- Error
- after
- fabric_util:cleanup(Workers)
- end.
-
-handle_message({rexi_DOWN, _, _, _}, nil, State) ->
- % TODO see if progress can be made here, possibly by removing all shards
- % from that node and checking is_progress_possible
- {ok, State};
-
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
- ?LOG_ERROR("~p rexi_EXIT ~p", [?MODULE, Reason]),
- #collector{callback=Callback, counters=Counters0, user_acc=Acc} = State,
- Counters = fabric_dict:erase(Worker, Counters0),
- case fabric_view:is_progress_possible(Counters) of
- true ->
- {ok, State#collector{counters = Counters}};
- false ->
- Callback({error, dead_shards}, Acc),
- {error, dead_shards}
- end;
-
-handle_message({total_and_offset, Tot, Off}, {Worker, From}, State) ->
- #collector{
- callback = Callback,
- counters = Counters0,
- total_rows = Total0,
- offset = Offset0,
- user_acc = AccIn
- } = State,
- case fabric_dict:lookup_element(Worker, Counters0) of
- undefined ->
- % this worker lost the race with other partition copies, terminate
- gen_server:reply(From, stop),
- {ok, State};
- 0 ->
- gen_server:reply(From, ok),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- Counters2 = fabric_view:remove_overlapping_shards(Worker, Counters1),
- Total = Total0 + Tot,
- Offset = Offset0 + Off,
- case fabric_dict:any(0, Counters2) of
- true ->
- {ok, State#collector{
- counters = Counters2,
- total_rows = Total,
- offset = Offset
- }};
- false ->
- FinalOffset = erlang:min(Total, Offset+State#collector.skip),
- {Go, Acc} = Callback({total_and_offset, Total, FinalOffset}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters2),
- total_rows = Total,
- offset = FinalOffset,
- user_acc = Acc
- }}
- end
- end;
-
-handle_message(#view_row{}, {_, _}, #collector{limit=0} = State) ->
- #collector{callback=Callback} = State,
- {_, Acc} = Callback(complete, State#collector.user_acc),
- {stop, State#collector{user_acc=Acc}};
-
-handle_message(#view_row{} = Row, {_,From}, #collector{sorted=false} = St) ->
- #collector{callback=Callback, user_acc=AccIn, limit=Limit} = St,
- {Go, Acc} = Callback(fabric_view:transform_row(Row), AccIn),
- gen_server:reply(From, ok),
- {Go, St#collector{user_acc=Acc, limit=Limit-1}};
-
-handle_message(#view_row{} = Row, {Worker, From}, State) ->
- #collector{
- query_args = #view_query_args{direction=Dir},
- counters = Counters0,
- rows = Rows0,
- keys = KeyDict
- } = State,
- Rows = merge_row(Dir, KeyDict, Row#view_row{worker=Worker}, Rows0),
- Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows=Rows, counters=Counters1},
- State2 = fabric_view:maybe_pause_worker(Worker, From, State1),
- fabric_view:maybe_send_row(State2);
-
-handle_message(complete, Worker, State) ->
- Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
- fabric_view:maybe_send_row(State#collector{counters = Counters}).
-
-merge_row(fwd, undefined, Row, Rows) ->
- lists:merge(fun(#view_row{key=KeyA, id=IdA}, #view_row{key=KeyB, id=IdB}) ->
- couch_view:less_json([KeyA, IdA], [KeyB, IdB])
- end, [Row], Rows);
-merge_row(rev, undefined, Row, Rows) ->
- lists:merge(fun(#view_row{key=KeyA, id=IdA}, #view_row{key=KeyB, id=IdB}) ->
- couch_view:less_json([KeyB, IdB], [KeyA, IdA])
- end, [Row], Rows);
-merge_row(_, KeyDict, Row, Rows) ->
- lists:merge(fun(#view_row{key=A, id=IdA}, #view_row{key=B, id=IdB}) ->
- if A =:= B -> IdA < IdB; true ->
- dict:fetch(A, KeyDict) < dict:fetch(B, KeyDict)
- end
- end, [Row], Rows).
diff --git a/apps/fabric/src/fabric_view_reduce.erl b/apps/fabric/src/fabric_view_reduce.erl
deleted file mode 100644
index 6ae564cc..00000000
--- a/apps/fabric/src/fabric_view_reduce.erl
+++ /dev/null
@@ -1,99 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(fabric_view_reduce).
-
--export([go/6]).
-
--include("fabric.hrl").
--include_lib("mem3/include/mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-go(DbName, GroupId, View, Args, Callback, Acc0) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
- go(DbName, DDoc, View, Args, Callback, Acc0);
-
-go(DbName, DDoc, VName, Args, Callback, Acc0) ->
- #group{def_lang=Lang, views=Views} = Group =
- couch_view_group:design_doc_to_view_group(DDoc),
- {NthRed, View} = fabric_view:extract_view(nil, VName, Views, reduce),
- {VName, RedSrc} = lists:nth(NthRed, View#view.reduce_funs),
- Workers = lists:map(fun(#shard{name=Name, node=N} = Shard) ->
- Ref = rexi:cast(N, {fabric_rpc, reduce_view, [Name,Group,VName,Args]}),
- Shard#shard{ref = Ref}
- end, mem3:shards(DbName)),
- BufferSize = couch_config:get("fabric", "reduce_buffer_size", "20"),
- #view_query_args{limit = Limit, skip = Skip} = Args,
- State = #collector{
- query_args = Args,
- callback = Callback,
- buffer_size = list_to_integer(BufferSize),
- counters = fabric_dict:init(Workers, 0),
- keys = Args#view_query_args.keys,
- skip = Skip,
- limit = Limit,
- lang = Group#group.def_lang,
- os_proc = couch_query_servers:get_os_process(Lang),
- reducer = RedSrc,
- rows = dict:new(),
- user_acc = Acc0
- },
- try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, infinity, 1000 * 60 * 60) of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- Error ->
- Error
- after
- fabric_util:cleanup(Workers),
- catch couch_query_servers:ret_os_process(State#collector.os_proc)
- end.
-
-handle_message({rexi_DOWN, _, _, _}, nil, State) ->
- % TODO see if progress can be made here, possibly by removing all shards
- % from that node and checking is_progress_possible
- {ok, State};
-
-handle_message({rexi_EXIT, Reason}, Worker, State) ->
- ?LOG_ERROR("~p rexi_EXIT ~p", [?MODULE, Reason]),
- #collector{callback=Callback, counters=Counters0, user_acc=Acc} = State,
- Counters = fabric_dict:erase(Worker, Counters0),
- case fabric_view:is_progress_possible(Counters) of
- true ->
- {ok, State#collector{counters = Counters}};
- false ->
- Callback({error, dead_shards}, Acc),
- {error, dead_shards}
- end;
-
-handle_message(#view_row{key=Key} = Row, {Worker, From}, State) ->
- #collector{counters = Counters0, rows = Rows0} = State,
- case fabric_dict:lookup_element(Worker, Counters0) of
- undefined ->
- % this worker lost the race with other partition copies, terminate it
- gen_server:reply(From, stop),
- {ok, State};
- _ ->
- Rows = dict:append(Key, Row#view_row{worker=Worker}, Rows0),
- C1 = fabric_dict:update_counter(Worker, 1, Counters0),
- % TODO time this call, if slow don't do it every time
- C2 = fabric_view:remove_overlapping_shards(Worker, C1),
- State1 = State#collector{rows=Rows, counters=C2},
- State2 = fabric_view:maybe_pause_worker(Worker, From, State1),
- fabric_view:maybe_send_row(State2)
- end;
-
-handle_message(complete, Worker, State) ->
- Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
- fabric_view:maybe_send_row(State#collector{counters = Counters}).
diff --git a/apps/mem3/README.md b/apps/mem3/README.md
deleted file mode 100644
index 4fa75906..00000000
--- a/apps/mem3/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-## mem3
-
-Mem3 is the node membership application for clustered [CouchDB][1]. It is used in [BigCouch][2] and tracks two very important things for the cluster:
-
- 1. member nodes
- 2. node/partition mappings for each database
-
-Both the nodes and partitions are tracked in node-local couch databases. Partitions are heavily used, so an ETS cache is also maintained for low-latency lookups. The nodes and partitions are synchronized via continuous CouchDB replication, which serves as 'gossip' in Dynamo parlance. The partitions ETS cache is kept in sync based on membership and database event listeners.
-
-A very important point to make here is that BigCouch does not necessarily divide up each database into equal partitions across the nodes of a cluster. For instance, in a 20-node cluster, you may have the need to create a small database with very few documents. For efficiency reasons, you may create your database with Q=4 and keep the default of N=3. This means you only have 12 partitions total, so 8 nodes will hold none of the data for this database. Given this feature, we even partition use out across the cluster by altering the 'start' node for the database's partitions.
-
-Splitting and merging partitions is an immature feature of the system, and will require attention in the near-term. We believe we can implement both functions and perform them while the database remains online.
-
-### License
-[Apache 2.0][3]
-
-### Contact
- * [http://cloudant.com][4]
- * [info@cloudant.com][5]
-
-[1]: http://couchdb.apache.org
-[2]: http://github.com/cloudant/bigcouch
-[3]: http://www.apache.org/licenses/LICENSE-2.0.html
-[4]: http://cloudant.com
-[5]: mailto:info@cloudant.com
diff --git a/apps/mem3/ebin/mem3.appup b/apps/mem3/ebin/mem3.appup
deleted file mode 100644
index 6e9ebe71..00000000
--- a/apps/mem3/ebin/mem3.appup
+++ /dev/null
@@ -1,3 +0,0 @@
-{"1.0.1",[{"1.0",[
- {load_module, mem3_httpd}
-]}],[{"1.0",[]}]}.
diff --git a/apps/mem3/include/mem3.hrl b/apps/mem3/include/mem3.hrl
deleted file mode 100644
index ed86d07e..00000000
--- a/apps/mem3/include/mem3.hrl
+++ /dev/null
@@ -1,44 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-% type specification hacked to suppress dialyzer warning re: match spec
--record(shard, {
- name :: binary() | '_',
- node :: node() | '_',
- dbname :: binary(),
- range :: [non_neg_integer() | '$1' | '$2'],
- ref :: reference() | 'undefined' | '_'
-}).
-
-%% types
--type join_type() :: init | join | replace | leave.
--type join_order() :: non_neg_integer().
--type options() :: list().
--type mem_node() :: {join_order(), node(), options()}.
--type mem_node_list() :: [mem_node()].
--type arg_options() :: {test, boolean()}.
--type args() :: [] | [arg_options()].
--type test() :: undefined | node().
--type epoch() :: float().
--type clock() :: {node(), epoch()}.
--type vector_clock() :: [clock()].
--type ping_node() :: node() | nil.
--type gossip_fun() :: call | cast.
-
--type part() :: #shard{}.
--type fullmap() :: [part()].
--type ref_part_map() :: {reference(), part()}.
--type tref() :: reference().
--type np() :: {node(), part()}.
--type beg_acc() :: [integer()].
diff --git a/apps/mem3/src/mem3.app.src b/apps/mem3/src/mem3.app.src
deleted file mode 100644
index 38f85c68..00000000
--- a/apps/mem3/src/mem3.app.src
+++ /dev/null
@@ -1,13 +0,0 @@
-{application, mem3, [
- {description, "CouchDB Cluster Membership"},
- {mod, {mem3_app, []}},
- {vsn, "1.0.1"},
- {registered, [
- mem3_cache,
- mem3_events,
- mem3_nodes,
- mem3_sync,
- mem3_sup
- ]},
- {applications, [kernel, stdlib, sasl, crypto, mochiweb, couch]}
-]}.
diff --git a/apps/mem3/src/mem3.erl b/apps/mem3/src/mem3.erl
deleted file mode 100644
index 1021ee5b..00000000
--- a/apps/mem3/src/mem3.erl
+++ /dev/null
@@ -1,121 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3).
-
--export([start/0, stop/0, restart/0, nodes/0, shards/1, shards/2,
- choose_shards/2, n/1]).
--export([compare_nodelists/0, compare_shards/1]).
-
--include("mem3.hrl").
-
-start() ->
- application:start(mem3).
-
-stop() ->
- application:stop(mem3).
-
-restart() ->
- stop(),
- start().
-
-%% @doc Detailed report of cluster-wide membership state. Queries the state
-%% on all member nodes and builds a dictionary with unique states as the
-%% key and the nodes holding that state as the value. Also reports member
-%% nodes which fail to respond and nodes which are connected but are not
-%% cluster members. Useful for debugging.
--spec compare_nodelists() -> [{{cluster_nodes, [node()]} | bad_nodes
- | non_member_nodes, [node()]}].
-compare_nodelists() ->
- Nodes = mem3:nodes(),
- AllNodes = erlang:nodes([this, visible]),
- {Replies, BadNodes} = gen_server:multi_call(Nodes, mem3_nodes, get_nodelist),
- Dict = lists:foldl(fun({Node, Nodelist}, D) ->
- orddict:append({cluster_nodes, Nodelist}, Node, D)
- end, orddict:new(), Replies),
- [{non_member_nodes, AllNodes -- Nodes}, {bad_nodes, BadNodes} | Dict].
-
--spec compare_shards(DbName::iodata()) -> [{bad_nodes | [#shard{}], [node()]}].
-compare_shards(DbName) when is_list(DbName) ->
- compare_shards(list_to_binary(DbName));
-compare_shards(DbName) ->
- Nodes = mem3:nodes(),
- {Replies, BadNodes} = rpc:multicall(mem3, shards, [DbName]),
- GoodNodes = [N || N <- Nodes, not lists:member(N, BadNodes)],
- Dict = lists:foldl(fun({Shards, Node}, D) ->
- orddict:append(Shards, Node, D)
- end, orddict:new(), lists:zip(Replies, GoodNodes)),
- [{bad_nodes, BadNodes} | Dict].
-
--spec n(DbName::iodata()) -> integer().
-n(DbName) ->
- length(mem3:shards(DbName, <<"foo">>)).
-
--spec nodes() -> [node()].
-nodes() ->
- mem3_nodes:get_nodelist().
-
--spec shards(DbName::iodata()) -> [#shard{}].
-shards(DbName) when is_list(DbName) ->
- shards(list_to_binary(DbName));
-shards(DbName) ->
- try ets:lookup(partitions, DbName) of
- [] ->
- mem3_util:load_shards_from_disk(DbName);
- Else ->
- Else
- catch error:badarg ->
- mem3_util:load_shards_from_disk(DbName)
- end.
-
--spec shards(DbName::iodata(), DocId::binary()) -> [#shard{}].
-shards(DbName, DocId) when is_list(DbName) ->
- shards(list_to_binary(DbName), DocId);
-shards(DbName, DocId) when is_list(DocId) ->
- shards(DbName, list_to_binary(DocId));
-shards(DbName, DocId) ->
- HashKey = mem3_util:hash(DocId),
- Head = #shard{
- name = '_',
- node = '_',
- dbname = DbName,
- range = ['$1','$2'],
- ref = '_'
- },
- Conditions = [{'<', '$1', HashKey}, {'=<', HashKey, '$2'}],
- try ets:select(partitions, [{Head, Conditions, ['$_']}]) of
- [] ->
- mem3_util:load_shards_from_disk(DbName, DocId);
- Shards ->
- Shards
- catch error:badarg ->
- mem3_util:load_shards_from_disk(DbName, DocId)
- end.
-
--spec choose_shards(DbName::iodata(), Options::list()) -> [#shard{}].
-choose_shards(DbName, Options) when is_list(DbName) ->
- choose_shards(list_to_binary(DbName), Options);
-choose_shards(DbName, Options) ->
- try shards(DbName)
- catch error:E when E==database_does_not_exist; E==badarg ->
- Nodes = mem3:nodes(),
- NodeCount = length(Nodes),
- N = mem3_util:n_val(couch_util:get_value(n, Options), NodeCount),
- Q = mem3_util:to_integer(couch_util:get_value(q, Options,
- couch_config:get("cluster", "q", "8"))),
- % rotate to a random entry in the nodelist for even distribution
- {A, B} = lists:split(crypto:rand_uniform(1,length(Nodes)+1), Nodes),
- RotatedNodes = B ++ A,
- mem3_util:create_partition_map(DbName, N, Q, RotatedNodes)
- end.
diff --git a/apps/mem3/src/mem3_app.erl b/apps/mem3/src/mem3_app.erl
deleted file mode 100644
index 682c6aff..00000000
--- a/apps/mem3/src/mem3_app.erl
+++ /dev/null
@@ -1,23 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_app).
--behaviour(application).
--export([start/2, stop/1]).
-
-start(_Type, []) ->
- mem3_sup:start_link().
-
-stop([]) ->
- ok.
diff --git a/apps/mem3/src/mem3_cache.erl b/apps/mem3/src/mem3_cache.erl
deleted file mode 100644
index 6614c29e..00000000
--- a/apps/mem3/src/mem3_cache.erl
+++ /dev/null
@@ -1,106 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_cache).
--behaviour(gen_server).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
--export([start_link/0]).
-
--record(state, {changes_pid}).
-
--include("mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init([]) ->
- ets:new(partitions, [bag, public, named_table, {keypos,#shard.dbname}]),
- {Pid, _} = spawn_monitor(fun() -> listen_for_changes(0) end),
- {ok, #state{changes_pid = Pid}}.
-
-handle_call(_Call, _From, State) ->
- {noreply, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({'DOWN', _, _, Pid, {badarg, [{ets,delete,[partitions,_]}|_]}},
- #state{changes_pid=Pid} = State) ->
- % fatal error, somebody deleted our ets table
- {stop, ets_table_error, State};
-handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) ->
- ?LOG_INFO("~p changes listener died ~p", [?MODULE, Reason]),
- Seq = case Reason of {seq, EndSeq} -> EndSeq; _ -> 0 end,
- erlang:send_after(5000, self(), {start_listener, Seq}),
- {noreply, State};
-handle_info({start_listener, Seq}, State) ->
- {NewPid, _} = spawn_monitor(fun() -> listen_for_changes(Seq) end),
- {noreply, State#state{changes_pid=NewPid}};
-handle_info(_Msg, State) ->
- {noreply, State}.
-
-terminate(_Reason, #state{changes_pid=Pid}) ->
- exit(Pid, kill),
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%% internal functions
-
-listen_for_changes(Since) ->
- DbName = ?l2b(couch_config:get("mem3", "db", "dbs")),
- {ok, Db} = ensure_exists(DbName),
- Args = #changes_args{
- feed = "continuous",
- since = Since,
- heartbeat = true,
- include_docs = true
- },
- ChangesFun = couch_changes:handle_changes(Args, nil, Db),
- ChangesFun(fun changes_callback/2).
-
-ensure_exists(DbName) ->
- Options = [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}],
- case couch_db:open(DbName, Options) of
- {ok, Db} ->
- {ok, Db};
- _ ->
- couch_server:create(DbName, Options)
- end.
-
-changes_callback(start, _) ->
- {ok, nil};
-changes_callback({stop, EndSeq}, _) ->
- exit({seq, EndSeq});
-changes_callback({change, {Change}, _}, _) ->
- DbName = couch_util:get_value(<<"id">>, Change),
- case couch_util:get_value(deleted, Change, false) of
- true ->
- ets:delete(partitions, DbName);
- false ->
- case couch_util:get_value(doc, Change) of
- {error, Reason} ->
- ?LOG_ERROR("missing partition table for ~s: ~p", [DbName, Reason]);
- {Doc} ->
- ets:delete(partitions, DbName),
- ets:insert(partitions, mem3_util:build_shards(DbName, Doc))
- end
- end,
- {ok, couch_util:get_value(<<"seq">>, Change)};
-changes_callback(timeout, _) ->
- {ok, nil}.
diff --git a/apps/mem3/src/mem3_httpd.erl b/apps/mem3/src/mem3_httpd.erl
deleted file mode 100644
index 6810562e..00000000
--- a/apps/mem3/src/mem3_httpd.erl
+++ /dev/null
@@ -1,53 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_httpd).
-
--export([handle_membership_req/1]).
-
-%% includes
--include("mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-
-handle_membership_req(#httpd{method='GET',
- path_parts=[<<"_membership">>]} = Req) ->
- ClusterNodes = try mem3:nodes()
- catch _:_ -> {ok,[]} end,
- couch_httpd:send_json(Req, {[
- {all_nodes, lists:sort([node()|nodes()])},
- {cluster_nodes, lists:sort(ClusterNodes)}
- ]});
-handle_membership_req(#httpd{method='GET',
- path_parts=[<<"_membership">>, <<"parts">>, DbName]} = Req) ->
- ClusterNodes = try mem3:nodes()
- catch _:_ -> {ok,[]} end,
- Shards = mem3:shards(DbName),
- JsonShards = json_shards(Shards, dict:new()),
- couch_httpd:send_json(Req, {[
- {all_nodes, lists:sort([node()|nodes()])},
- {cluster_nodes, lists:sort(ClusterNodes)},
- {partitions, JsonShards}
- ]}).
-
-%%
-%% internal
-%%
-
-json_shards([], AccIn) ->
- List = dict:to_list(AccIn),
- {lists:sort(List)};
-json_shards([#shard{node=Node, range=[B,_E]} | Rest], AccIn) ->
- HexBeg = couch_util:to_hex(<<B:32/integer>>),
- json_shards(Rest, dict:append(HexBeg, Node, AccIn)).
diff --git a/apps/mem3/src/mem3_nodes.erl b/apps/mem3/src/mem3_nodes.erl
deleted file mode 100644
index f9320598..00000000
--- a/apps/mem3/src/mem3_nodes.erl
+++ /dev/null
@@ -1,134 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_nodes).
--behaviour(gen_server).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
--export([start_link/0, get_nodelist/0]).
-
--include("mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(state, {changes_pid, update_seq, nodes}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_nodelist() ->
- gen_server:call(?MODULE, get_nodelist).
-
-init([]) ->
- {Nodes, UpdateSeq} = initialize_nodelist(),
- {Pid, _} = spawn_monitor(fun() -> listen_for_changes(UpdateSeq) end),
- {ok, #state{changes_pid = Pid, update_seq = UpdateSeq, nodes = Nodes}}.
-
-handle_call(get_nodelist, _From, State) ->
- {reply, State#state.nodes, State};
-handle_call({add_node, Node}, _From, #state{nodes=Nodes} = State) ->
- gen_event:notify(mem3_events, {add_node, Node}),
- {reply, ok, State#state{nodes = lists:umerge([Node], Nodes)}};
-handle_call({remove_node, Node}, _From, #state{nodes=Nodes} = State) ->
- gen_event:notify(mem3_events, {remove_node, Node}),
- {reply, ok, State#state{nodes = lists:delete(Node, Nodes)}};
-handle_call(_Call, _From, State) ->
- {noreply, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) ->
- ?LOG_INFO("~p changes listener died ~p", [?MODULE, Reason]),
- StartSeq = State#state.update_seq,
- Seq = case Reason of {seq, EndSeq} -> EndSeq; _ -> StartSeq end,
- erlang:send_after(5000, self(), start_listener),
- {noreply, State#state{update_seq = Seq}};
-handle_info(start_listener, #state{update_seq = Seq} = State) ->
- {NewPid, _} = spawn_monitor(fun() -> listen_for_changes(Seq) end),
- {noreply, State#state{changes_pid=NewPid}};
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%% internal functions
-
-initialize_nodelist() ->
- DbName = couch_config:get("mem3", "nodedb", "nodes"),
- {ok, Db} = ensure_exists(DbName),
- {ok, _, Nodes0} = couch_btree:fold(Db#db.id_tree, fun first_fold/3, [], []),
- % add self if not already present
- case lists:member(node(), Nodes0) of
- true ->
- Nodes = Nodes0;
- false ->
- Doc = #doc{id = couch_util:to_binary(node())},
- {ok, _} = couch_db:update_doc(Db, Doc, []),
- Nodes = [node() | Nodes0]
- end,
- couch_db:close(Db),
- {lists:sort(Nodes), Db#db.update_seq}.
-
-first_fold(#full_doc_info{id = <<"_design/", _/binary>>}, _, Acc) ->
- {ok, Acc};
-first_fold(#full_doc_info{deleted=true}, _, Acc) ->
- {ok, Acc};
-first_fold(#full_doc_info{id=Id}, _, Acc) ->
- {ok, [mem3_util:to_atom(Id) | Acc]}.
-
-listen_for_changes(Since) ->
- DbName = ?l2b(couch_config:get("mem3", "nodedb", "nodes")),
- {ok, Db} = ensure_exists(DbName),
- Args = #changes_args{
- feed = "continuous",
- since = Since,
- heartbeat = true,
- include_docs = true
- },
- ChangesFun = couch_changes:handle_changes(Args, nil, Db),
- ChangesFun(fun changes_callback/2).
-
-ensure_exists(DbName) when is_list(DbName) ->
- ensure_exists(list_to_binary(DbName));
-ensure_exists(DbName) ->
- Options = [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}],
- case couch_db:open(DbName, Options) of
- {ok, Db} ->
- {ok, Db};
- _ ->
- couch_server:create(DbName, Options)
- end.
-
-changes_callback(start, _) ->
- {ok, nil};
-changes_callback({stop, EndSeq}, _) ->
- exit({seq, EndSeq});
-changes_callback({change, {Change}, _}, _) ->
- Node = couch_util:get_value(<<"id">>, Change),
- case Node of <<"_design/", _/binary>> -> ok; _ ->
- case couch_util:get_value(deleted, Change, false) of
- false ->
- gen_server:call(?MODULE, {add_node, mem3_util:to_atom(Node)});
- true ->
- gen_server:call(?MODULE, {remove_node, mem3_util:to_atom(Node)})
- end
- end,
- {ok, couch_util:get_value(<<"seq">>, Change)};
-changes_callback(timeout, _) ->
- {ok, nil}.
diff --git a/apps/mem3/src/mem3_rep.erl b/apps/mem3/src/mem3_rep.erl
deleted file mode 100644
index f80eeb3d..00000000
--- a/apps/mem3/src/mem3_rep.erl
+++ /dev/null
@@ -1,106 +0,0 @@
--module(mem3_rep).
-
--export([go/2, changes_enumerator/2, make_local_id/2]).
-
--include("mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--define(CTX, #user_ctx{roles = [<<"_admin">>]}).
-
--record(acc, {revcount = 0, infos = [], seq, localid, source, target}).
-
-go(#shard{} = Source, #shard{} = Target) ->
- LocalId = make_local_id(Source, Target),
- {ok, Db} = couch_db:open(Source#shard.name, [{user_ctx,?CTX}]),
- try go(Db, Target, LocalId) after couch_db:close(Db) end.
-
-go(#db{} = Db, #shard{} = Target, LocalId) ->
- Seq = calculate_start_seq(Db, Target, LocalId),
- Acc0 = #acc{source=Db, target=Target, seq=Seq, localid=LocalId},
- Fun = fun ?MODULE:changes_enumerator/2,
- {ok, AccOut} = couch_db:changes_since(Db, all_docs, Seq, Fun, [], Acc0),
- {ok, _} = replicate_batch(AccOut).
-
-make_local_id(#shard{node=SourceNode}, #shard{node=TargetNode}) ->
- S = couch_util:encodeBase64Url(couch_util:md5(term_to_binary(SourceNode))),
- T = couch_util:encodeBase64Url(couch_util:md5(term_to_binary(TargetNode))),
- <<"_local/shard-sync-", S/binary, "-", T/binary>>.
-
-changes_enumerator(DocInfo, #acc{revcount = C} = Acc) when C >= 99 ->
- Seq = DocInfo#doc_info.high_seq,
- replicate_batch(Acc#acc{seq = Seq, infos = [DocInfo | Acc#acc.infos]});
-
-changes_enumerator(DocInfo, #acc{revcount = C, infos = Infos} = Acc) ->
- RevCount = C + length(DocInfo#doc_info.revs),
- Seq = DocInfo#doc_info.high_seq,
- {ok, Acc#acc{seq = Seq, revcount = RevCount, infos = [DocInfo | Infos]}}.
-
-replicate_batch(#acc{target = #shard{node=Node, name=Name}} = Acc) ->
- case find_missing_revs(Acc) of
- [] ->
- ok;
- Missing ->
- ok = save_on_target(Node, Name, open_docs(Acc#acc.source, Missing))
- end,
- update_locals(Acc),
- {ok, Acc#acc{revcount=0, infos=[]}}.
-
-find_missing_revs(Acc) ->
- #acc{target = #shard{node=Node, name=Name}, infos = Infos} = Acc,
- IdsRevs = lists:map(fun(#doc_info{id=Id, revs=RevInfos}) ->
- {Id, [R || #rev_info{rev=R} <- RevInfos]}
- end, Infos),
- rexi_call(Node, {fabric_rpc, get_missing_revs, [Name, IdsRevs]}).
-
-open_docs(Db, Missing) ->
- lists:flatmap(fun({Id, Revs, _}) ->
- {ok, Docs} = couch_db:open_doc_revs(Db, Id, Revs, [latest]),
- [Doc || {ok, Doc} <- Docs]
- end, Missing).
-
-save_on_target(Node, Name, Docs) ->
- Options = [replicated_changes, full_commit, {user_ctx, ?CTX}],
- rexi_call(Node, {fabric_rpc, update_docs, [Name, Docs, Options]}),
- ok.
-
-update_locals(Acc) ->
- #acc{seq=Seq, source=Db, target=Target, localid=Id} = Acc,
- #shard{name=Name, node=Node} = Target,
- Doc = #doc{id = Id, body = {[
- {<<"seq">>, Seq},
- {<<"timestamp">>, list_to_binary(iso8601_timestamp())}
- ]}},
- {ok, _} = couch_db:update_doc(Db, Doc, []),
- Options = [{user_ctx, ?CTX}],
- rexi_call(Node, {fabric_rpc, update_docs, [Name, [Doc], Options]}).
-
-rexi_call(Node, MFA) ->
- Ref = rexi:cast(Node, MFA),
- receive {Ref, {ok, Reply}} ->
- Reply;
- {Ref, Error} ->
- erlang:error(Error)
- after 60000 ->
- erlang:error(timeout)
- end.
-
-calculate_start_seq(Db, #shard{node=Node, name=Name}, LocalId) ->
- case couch_db:open_doc(Db, LocalId, []) of
- {ok, #doc{body = {SProps}}} ->
- try rexi_call(Node, {fabric_rpc, open_doc, [Name, LocalId, []]}) of
- #doc{body = {TProps}} ->
- SourceSeq = couch_util:get_value(<<"seq">>, SProps, 0),
- TargetSeq = couch_util:get_value(<<"seq">>, TProps, 0),
- erlang:min(SourceSeq, TargetSeq)
- catch error:_ ->
- 0
- end;
- _ ->
- 0
- end.
-
-iso8601_timestamp() ->
- {_,_,Micro} = Now = os:timestamp(),
- {{Year,Month,Date},{Hour,Minute,Second}} = calendar:now_to_datetime(Now),
- Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0B.~6.10.0BZ",
- io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second, Micro]).
diff --git a/apps/mem3/src/mem3_sup.erl b/apps/mem3/src/mem3_sup.erl
deleted file mode 100644
index 9f93dd39..00000000
--- a/apps/mem3/src/mem3_sup.erl
+++ /dev/null
@@ -1,35 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sup).
--behaviour(supervisor).
--export([start_link/0, init/1]).
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-init(_Args) ->
- Children = [
- child(mem3_events),
- child(mem3_sync),
- child(mem3_cache),
- child(mem3_nodes)
- ],
- {ok, {{one_for_one,10,1}, Children}}.
-
-child(mem3_events) ->
- MFA = {gen_event, start_link, [{local, mem3_events}]},
- {mem3_events, MFA, permanent, 1000, worker, dynamic};
-child(Child) ->
- {Child, {Child, start_link, []}, permanent, 1000, worker, [Child]}.
diff --git a/apps/mem3/src/mem3_sync.erl b/apps/mem3/src/mem3_sync.erl
deleted file mode 100644
index a1ba4f8b..00000000
--- a/apps/mem3/src/mem3_sync.erl
+++ /dev/null
@@ -1,229 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync).
--behaviour(gen_server).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
--export([start_link/0, get_active/0, get_queue/0, push/2, remove_node/1]).
-
--include("mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
--record(state, {
- active = [],
- count = 0,
- limit,
- dict = dict:new(),
- waiting = [],
- update_notifier
-}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-get_active() ->
- gen_server:call(?MODULE, get_active).
-
-get_queue() ->
- gen_server:call(?MODULE, get_queue).
-
-push(Db, Node) ->
- gen_server:cast(?MODULE, {push, Db, Node}).
-
-remove_node(Node) ->
- gen_server:cast(?MODULE, {remove_node, Node}).
-
-init([]) ->
- process_flag(trap_exit, true),
- Concurrency = couch_config:get("mem3", "sync_concurrency", "10"),
- gen_event:add_handler(mem3_events, mem3_sync_event, []),
- {ok, Pid} = start_update_notifier(),
- spawn(fun initial_sync/0),
- {ok, #state{limit = list_to_integer(Concurrency), update_notifier=Pid}}.
-
-handle_call(get_active, _From, State) ->
- {reply, State#state.active, State};
-
-handle_call(get_queue, _From, State) ->
- {reply, State#state.waiting, State}.
-
-handle_cast({push, DbName, Node}, #state{count=Count, limit=Limit} = State)
- when Count >= Limit ->
- {noreply, add_to_queue(State, DbName, Node)};
-
-handle_cast({push, DbName, Node}, State) ->
- #state{active = L, count = C} = State,
- case is_running(DbName, Node, L) of
- true ->
- {noreply, add_to_queue(State, DbName, Node)};
- false ->
- Pid = start_push_replication(DbName, Node),
- {noreply, State#state{active=[{DbName, Node, Pid}|L], count=C+1}}
- end;
-
-handle_cast({remove_node, Node}, State) ->
- Waiting = [{S,N} || {S,N} <- State#state.waiting, N =/= Node],
- Dict = lists:foldl(fun(DbName,D) -> dict:erase({DbName,Node}, D) end,
- State#state.dict, [S || {S,N} <- Waiting, N =:= Node]),
- {noreply, State#state{dict = Dict, waiting = Waiting}}.
-
-handle_info({'EXIT', Pid, _}, #state{update_notifier=Pid} = State) ->
- {ok, NewPid} = start_update_notifier(),
- {noreply, State#state{update_notifier=NewPid}};
-
-handle_info({'EXIT', Active, normal}, State) ->
- handle_replication_exit(State, Active);
-
-handle_info({'EXIT', Active, Reason}, State) ->
- case lists:keyfind(Active, 3, State#state.active) of
- {OldDbName, OldNode, _} ->
- ?LOG_ERROR("~p replication ~s -> ~p died:~n~p", [?MODULE, OldDbName,
- OldNode, Reason]),
- timer:apply_after(5000, ?MODULE, push, [OldDbName, OldNode]);
- false -> ok end,
- handle_replication_exit(State, Active);
-
-handle_info(Msg, State) ->
- ?LOG_ERROR("unexpected msg at replication manager ~p", [Msg]),
- {noreply, State}.
-
-terminate(_Reason, State) ->
- [exit(Pid, shutdown) || {_,_,Pid} <- State#state.active],
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-handle_replication_exit(#state{waiting=[]} = State, Pid) ->
- NewActive = lists:keydelete(Pid, 3, State#state.active),
- {noreply, State#state{active=NewActive, count=length(NewActive)}};
-handle_replication_exit(State, Pid) ->
- #state{active=Active, limit=Limit, dict=D, waiting=Waiting} = State,
- Active1 = lists:keydelete(Pid, 3, Active),
- Count = length(Active1),
- NewState = if Count < Limit ->
- case next_replication(Active1, Waiting) of
- nil -> % all waiting replications are also active
- State#state{active = Active1, count = Count};
- {DbName, Node, StillWaiting} ->
- NewPid = start_push_replication(DbName, Node),
- State#state{
- active = [{DbName, Node, NewPid} | Active1],
- count = Count+1,
- dict = dict:erase({DbName,Node}, D),
- waiting = StillWaiting
- }
- end;
- true ->
- State#state{active = Active1, count=Count}
- end,
- {noreply, NewState}.
-
-start_push_replication(DbName, Node) ->
- PostBody = {[
- {<<"source">>, DbName},
- {<<"target">>, {[{<<"node">>, Node}, {<<"name">>, DbName}]}},
- {<<"continuous">>, false},
- {<<"async">>, true}
- ]},
- ?LOG_INFO("starting ~s -> ~p internal replication", [DbName, Node]),
- UserCtx = #user_ctx{name = <<"replicator">>, roles = [<<"_admin">>]},
- case (catch couch_rep:replicate(PostBody, UserCtx)) of
- Pid when is_pid(Pid) ->
- link(Pid),
- Pid;
- {db_not_found, _Msg} ->
- case couch_db:open(DbName, []) of
- {ok, Db} ->
- % source exists, let's (re)create the target
- couch_db:close(Db),
- case rpc:call(Node, couch_api, create_db, [DbName, []]) of
- {ok, Target} ->
- ?LOG_INFO("~p successfully created ~s on ~p", [?MODULE, DbName,
- Node]),
- couch_db:close(Target),
- start_push_replication(DbName, Node);
- file_exists ->
- start_push_replication(DbName, Node);
- Error ->
- ?LOG_ERROR("~p couldn't create ~s on ~p because ~p",
- [?MODULE, DbName, Node, Error]),
- exit(shutdown)
- end;
- {not_found, no_db_file} ->
- % source is gone, so this is a hack to skip it
- ?LOG_INFO("~p tried to push ~s to ~p but it was already deleted",
- [?MODULE, DbName, Node]),
- spawn_link(fun() -> ok end)
- end;
- {node_not_connected, _} ->
- % we'll get this one when the node rejoins
- ?LOG_ERROR("~p exiting because ~p is not connected", [?MODULE, Node]),
- spawn_link(fun() -> ok end);
- CatchAll ->
- ?LOG_INFO("~p strange error ~p", [?MODULE, CatchAll]),
- case lists:member(Node, nodes()) of
- true ->
- timer:apply_after(5000, ?MODULE, push, [DbName, Node]);
- false ->
- ok
- end,
- spawn_link(fun() -> ok end)
- end.
-
-add_to_queue(State, DbName, Node) ->
- #state{dict=D, waiting=Waiting} = State,
- case dict:is_key({DbName, Node}, D) of
- true ->
- State;
- false ->
- ?LOG_DEBUG("adding ~s -> ~p to internal queue", [DbName, Node]),
- State#state{
- dict = dict:store({DbName,Node}, ok, D),
- waiting = Waiting ++ [{DbName,Node}]
- }
- end.
-
-initial_sync() ->
- Db1 = ?l2b(couch_config:get("mem3", "node_db", "nodes")),
- Db2 = ?l2b(couch_config:get("mem3", "shard_db", "dbs")),
- Nodes = mem3:nodes(),
- Live = nodes(),
- [[push(Db, N) || Db <- [Db1,Db2]] || N <- Nodes, lists:member(N, Live)].
-
-start_update_notifier() ->
- Db1 = ?l2b(couch_config:get("mem3", "node_db", "nodes")),
- Db2 = ?l2b(couch_config:get("mem3", "shard_db", "dbs")),
- couch_db_update_notifier:start_link(fun
- ({updated, Db}) when Db == Db1; Db == Db2 ->
- Nodes = mem3:nodes(),
- Live = nodes(),
- [?MODULE:push(Db, N) || N <- Nodes, lists:member(N, Live)];
- (_) -> ok end).
-
-%% @doc Finds the next {DbName,Node} pair in the list of waiting replications
-%% which does not correspond to an already running replication
--spec next_replication(list(), list()) -> {binary(),node(),list()} | nil.
-next_replication(Active, Waiting) ->
- case lists:splitwith(fun({S,N}) -> is_running(S,N,Active) end, Waiting) of
- {_, []} ->
- nil;
- {Running, [{DbName,Node}|Rest]} ->
- {DbName, Node, Running ++ Rest}
- end.
-
-is_running(DbName, Node, ActiveList) ->
- [] =/= [true || {S,N,_} <- ActiveList, S=:=DbName, N=:=Node].
diff --git a/apps/mem3/src/mem3_sync_event.erl b/apps/mem3/src/mem3_sync_event.erl
deleted file mode 100644
index 5ee93b7a..00000000
--- a/apps/mem3/src/mem3_sync_event.erl
+++ /dev/null
@@ -1,58 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_sync_event).
--behaviour(gen_event).
-
--export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2,
- code_change/3]).
-
-init(_) ->
- {ok, nil}.
-
-handle_event({add_node, Node}, State) ->
- Db1 = list_to_binary(couch_config:get("mem3", "node_db", "nodes")),
- Db2 = list_to_binary(couch_config:get("mem3", "shard_db", "dbs")),
- [mem3_sync:push(Db, Node) || Db <- [Db1, Db2]],
- {ok, State};
-
-handle_event({nodeup, Node}, State) ->
- case lists:member(Node, mem3:nodes()) of
- true ->
- Db1 = list_to_binary(couch_config:get("mem3", "node_db", "nodes")),
- Db2 = list_to_binary(couch_config:get("mem3", "shard_db", "dbs")),
- [mem3_sync:push(Db, Node) || Db <- [Db1, Db2]];
- false ->
- ok
- end,
- {ok, State};
-
-handle_event({Down, Node}, State) when Down == nodedown; Down == remove_node ->
- mem3_sync:remove_node(Node),
- {ok, State};
-
-handle_event(_Event, State) ->
- {ok, State}.
-
-handle_call(_Request, State) ->
- {ok, ok, State}.
-
-handle_info(_Info, State) ->
- {ok, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/apps/mem3/src/mem3_util.erl b/apps/mem3/src/mem3_util.erl
deleted file mode 100644
index 5e866ccf..00000000
--- a/apps/mem3/src/mem3_util.erl
+++ /dev/null
@@ -1,153 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_util).
-
--export([hash/1, name_shard/1, create_partition_map/4, build_shards/2,
- n_val/2, to_atom/1, to_integer/1, write_db_doc/1, delete_db_doc/1,
- load_shards_from_disk/1, load_shards_from_disk/2]).
-
--define(RINGTOP, 2 bsl 31). % CRC32 space
-
--include("mem3.hrl").
--include_lib("couch/include/couch_db.hrl").
-
-hash(Item) when is_binary(Item) ->
- erlang:crc32(Item);
-hash(Item) ->
- erlang:crc32(term_to_binary(Item)).
-
-name_shard(#shard{dbname = DbName, range=[B,E]} = Shard) ->
- Name = ["shards/", couch_util:to_hex(<<B:32/integer>>), "-",
- couch_util:to_hex(<<E:32/integer>>), "/", DbName],
- Shard#shard{name = ?l2b(Name)}.
-
-create_partition_map(DbName, N, Q, Nodes) ->
- UniqueShards = make_key_ranges((?RINGTOP) div Q, 0, []),
- Shards0 = lists:flatten([lists:duplicate(N, S) || S <- UniqueShards]),
- Shards1 = attach_nodes(Shards0, [], Nodes, []),
- [name_shard(S#shard{dbname=DbName}) || S <- Shards1].
-
-make_key_ranges(_, CurrentPos, Acc) when CurrentPos >= ?RINGTOP ->
- Acc;
-make_key_ranges(Increment, Start, Acc) ->
- case Start + 2*Increment of
- X when X > ?RINGTOP ->
- End = ?RINGTOP - 1;
- _ ->
- End = Start + Increment - 1
- end,
- make_key_ranges(Increment, End+1, [#shard{range=[Start, End]} | Acc]).
-
-attach_nodes([], Acc, _, _) ->
- lists:reverse(Acc);
-attach_nodes(Shards, Acc, [], UsedNodes) ->
- attach_nodes(Shards, Acc, lists:reverse(UsedNodes), []);
-attach_nodes([S | Rest], Acc, [Node | Nodes], UsedNodes) ->
- attach_nodes(Rest, [S#shard{node=Node} | Acc], Nodes, [Node | UsedNodes]).
-
-write_db_doc(Doc) ->
- {ok, Db} = couch_db:open(<<"dbs">>, []),
- try
- update_db_doc(Db, Doc)
- catch conflict ->
- ok % assume this is a race with another shard on this node
- after
- couch_db:close(Db)
- end.
-
-update_db_doc(Db, #doc{id=Id, body=Body} = Doc) ->
- case couch_db:open_doc(Db, Id, []) of
- {not_found, _} ->
- {ok, _} = couch_db:update_doc(Db, Doc, []);
- {ok, #doc{body=Body}} ->
- ok;
- {ok, OldDoc} ->
- {ok, _} = couch_db:update_doc(Db, OldDoc#doc{body=Body}, [])
- end.
-
-delete_db_doc(DocId) ->
- {ok, Db} = couch_db:open(<<"dbs">>, []),
- try
- delete_db_doc(Db, DocId)
- catch conflict ->
- ok
- after
- couch_db:close(Db)
- end.
-
-delete_db_doc(Db, DocId) ->
- case couch_db:open_doc(Db, DocId, []) of
- {not_found, _} ->
- ok;
- {ok, OldDoc} ->
- {ok, _} = couch_db:update_doc(Db, OldDoc#doc{deleted=true}, [])
- end.
-
-build_shards(DbName, DocProps) ->
- {ByNode} = couch_util:get_value(<<"by_node">>, DocProps, {[]}),
- lists:flatmap(fun({Node, Ranges}) ->
- lists:map(fun(Range) ->
- [B,E] = string:tokens(?b2l(Range), "-"),
- Beg = httpd_util:hexlist_to_integer(B),
- End = httpd_util:hexlist_to_integer(E),
- name_shard(#shard{
- dbname = DbName,
- node = to_atom(Node),
- range = [Beg, End]
- })
- end, Ranges)
- end, ByNode).
-
-to_atom(Node) when is_binary(Node) ->
- list_to_atom(binary_to_list(Node));
-to_atom(Node) when is_atom(Node) ->
- Node.
-
-to_integer(N) when is_integer(N) ->
- N;
-to_integer(N) when is_binary(N) ->
- list_to_integer(binary_to_list(N));
-to_integer(N) when is_list(N) ->
- list_to_integer(N).
-
-n_val(undefined, NodeCount) ->
- n_val(couch_config:get("cluster", "n", "3"), NodeCount);
-n_val(N, NodeCount) when is_list(N) ->
- n_val(list_to_integer(N), NodeCount);
-n_val(N, NodeCount) when is_integer(NodeCount), N > NodeCount ->
- ?LOG_ERROR("Request to create N=~p DB but only ~p node(s)", [N, NodeCount]),
- NodeCount;
-n_val(N, _) when N < 1 ->
- 1;
-n_val(N, _) ->
- N.
-
-load_shards_from_disk(DbName) when is_binary(DbName) ->
- {ok, Db} = couch_db:open(<<"dbs">>, []),
- try load_shards_from_db(Db, DbName) after couch_db:close(Db) end.
-
-load_shards_from_db(#db{} = ShardDb, DbName) ->
- case couch_db:open_doc(ShardDb, DbName, []) of
- {ok, #doc{body = {Props}}} ->
- ?LOG_INFO("dbs cache miss for ~s", [DbName]),
- build_shards(DbName, Props);
- {not_found, _} ->
- erlang:error(database_does_not_exist)
- end.
-
-load_shards_from_disk(DbName, DocId)->
- Shards = load_shards_from_disk(DbName),
- HashKey = hash(DocId),
- [S || #shard{range = [B,E]} = S <- Shards, B < HashKey, HashKey =< E].
diff --git a/apps/mem3/test/01-config-default.ini b/apps/mem3/test/01-config-default.ini
deleted file mode 100644
index 757f7830..00000000
--- a/apps/mem3/test/01-config-default.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[cluster]
-n=3
diff --git a/apps/mem3/test/mem3_util_test.erl b/apps/mem3/test/mem3_util_test.erl
deleted file mode 100644
index 490521cf..00000000
--- a/apps/mem3/test/mem3_util_test.erl
+++ /dev/null
@@ -1,154 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(mem3_util_test).
-
--include("mem3.hrl").
--include_lib("eunit/include/eunit.hrl").
-
-hash_test() ->
- ?assertEqual(1624516141,mem3_util:hash(0)),
- ?assertEqual(3816901808,mem3_util:hash("0")),
- ?assertEqual(3523407757,mem3_util:hash(<<0>>)),
- ?assertEqual(4108050209,mem3_util:hash(<<"0">>)),
- ?assertEqual(3094724072,mem3_util:hash(zero)),
- ok.
-
-name_shard_test() ->
- Shard1 = #shard{},
- ?assertError(function_clause, mem3_util:name_shard(Shard1)),
-
- Shard2 = #shard{dbname = <<"testdb">>, range = [0,100]},
- #shard{name=Name2} = mem3_util:name_shard(Shard2),
- ?assertEqual(<<"shards/00000000-00000064/testdb">>, Name2),
-
- ok.
-
-create_partition_map_test() ->
- {DbName1, N1, Q1, Nodes1} = {<<"testdb1">>, 3, 4, [a,b,c,d]},
- Map1 = mem3_util:create_partition_map(DbName1, N1, Q1, Nodes1),
- ?assertEqual(12, length(Map1)),
-
- {DbName2, N2, Q2, Nodes2} = {<<"testdb2">>, 1, 1, [a,b,c,d]},
- [#shard{name=Name2,node=Node2}] = Map2 =
- mem3_util:create_partition_map(DbName2, N2, Q2, Nodes2),
- ?assertEqual(1, length(Map2)),
- ?assertEqual(<<"shards/00000000-ffffffff/testdb2">>, Name2),
- ?assertEqual(a, Node2),
- ok.
-
-build_shards_test() ->
- DocProps1 =
- [{<<"changelog">>,
- [[<<"add">>,<<"00000000-1fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"20000000-3fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"40000000-5fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"60000000-7fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"80000000-9fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"a0000000-bfffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"c0000000-dfffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"e0000000-ffffffff">>,
- <<"bigcouch@node.local">>]]},
- {<<"by_node">>,
- {[{<<"bigcouch@node.local">>,
- [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>,
- <<"40000000-5fffffff">>,<<"60000000-7fffffff">>,
- <<"80000000-9fffffff">>,<<"a0000000-bfffffff">>,
- <<"c0000000-dfffffff">>,<<"e0000000-ffffffff">>]}]}},
- {<<"by_range">>,
- {[{<<"00000000-1fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"20000000-3fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"40000000-5fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"60000000-7fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"80000000-9fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"a0000000-bfffffff">>,[<<"bigcouch@node.local">>]},
- {<<"c0000000-dfffffff">>,[<<"bigcouch@node.local">>]},
- {<<"e0000000-ffffffff">>,[<<"bigcouch@node.local">>]}]}}],
- Shards1 = mem3_util:build_shards(<<"testdb1">>, DocProps1),
- ExpectedShards1 =
- [{shard,<<"shards/00000000-1fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [0,536870911],
- undefined},
- {shard,<<"shards/20000000-3fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [536870912,1073741823],
- undefined},
- {shard,<<"shards/40000000-5fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [1073741824,1610612735],
- undefined},
- {shard,<<"shards/60000000-7fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [1610612736,2147483647],
- undefined},
- {shard,<<"shards/80000000-9fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [2147483648,2684354559],
- undefined},
- {shard,<<"shards/a0000000-bfffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [2684354560,3221225471],
- undefined},
- {shard,<<"shards/c0000000-dfffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [3221225472,3758096383],
- undefined},
- {shard,<<"shards/e0000000-ffffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [3758096384,4294967295],
- undefined}],
- ?assertEqual(ExpectedShards1, Shards1),
- ok.
-
-
-%% n_val tests
-
-nval_test() ->
- ?assertEqual(2, mem3_util:n_val(2,4)),
- ?assertEqual(1, mem3_util:n_val(-1,4)),
- ?assertEqual(4, mem3_util:n_val(6,4)),
- ok.
-
-config_01_setup() ->
- Ini = filename:join([code:lib_dir(mem3, test), "01-config-default.ini"]),
- {ok, Pid} = couch_config:start_link([Ini]),
- Pid.
-
-config_teardown(_Pid) ->
- couch_config:stop().
-
-n_val_test_() ->
- {"n_val tests",
- [
- {setup,
- fun config_01_setup/0,
- fun config_teardown/1,
- fun(Pid) ->
- {with, Pid, [
- fun n_val_1/1
- ]}
- end}
- ]
- }.
-
-n_val_1(_Pid) ->
- ?assertEqual(3, mem3_util:n_val(undefined, 4)).
diff --git a/apps/rexi/README.md b/apps/rexi/README.md
deleted file mode 100644
index 995916a6..00000000
--- a/apps/rexi/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-## rexi
-
-Rexi is a tailor-made RPC server application for sending [CouchDB][1] operations to nodes in a cluster. It is used in [BigCouch][2] as the remote procedure vehicle to get 'fabric' functions to execute on remote cluster nodes.
-
-Rexi better fits the needs of the BigCouch distributed data store by dropping some unneeded overhead in rex, the RPC server that ships with Erlang/OTP. Rexi is optimized for the case when you need to spawn a bunch of remote processes. Cast messages are sent from the origin to the remote rexi server, and local processes are spawned from there, which is vastly more efficient than spawning remote processes from the origin. You still get monitoring of the remote processes, but the request-handling process doesn't get stuck trying to connect to an overloaded/dead node. 'rexi_DOWN' messages will arrive at the client eventually. This has been an extremely advantageous mix of latency and failure detection, vastly improving the performance of BigCouch.
-
-Rexi is used in conjunction with 'Fabric' which is also an application within BigCouch, but can be used on a stand-alone basis.
-
-### Getting Started
-Dependencies:
- * Erlang R13B-03 (or higher)
-
-Build with rebar:
- make
-
-### License
-[Apache 2.0][3]
-
-### Contact
- * [http://cloudant.com][4]
- * [info@cloudant.com][5]
-
-[1]: http://couchdb.apache.org
-[2]: http://github.com/cloudant/BigCouch
-[3]: http://www.apache.org/licenses/LICENSE-2.0.html
-[4]: http://cloudant.com
-[5]: mailto:info@cloudant.com
diff --git a/apps/rexi/ebin/rexi.appup b/apps/rexi/ebin/rexi.appup
deleted file mode 100644
index 7ed8ad73..00000000
--- a/apps/rexi/ebin/rexi.appup
+++ /dev/null
@@ -1,5 +0,0 @@
-{"1.1",[{"1.0",[
- {load_module, rexi},
- {add_module, rexi_monitor},
- {load_module, rexi_server}
-]}],[{"1.0",[]}]}.
diff --git a/apps/rexi/src/rexi.app.src b/apps/rexi/src/rexi.app.src
deleted file mode 100644
index d56a182f..00000000
--- a/apps/rexi/src/rexi.app.src
+++ /dev/null
@@ -1,7 +0,0 @@
-{application, rexi, [
- {description, "Lightweight RPC server"},
- {vsn, "1.2"},
- {registered, [rexi_sup, rexi_server]},
- {applications, [kernel, stdlib]},
- {mod, {rexi_app,[]}}
-]}.
diff --git a/apps/rexi/src/rexi.erl b/apps/rexi/src/rexi.erl
deleted file mode 100644
index 0ba77bdb..00000000
--- a/apps/rexi/src/rexi.erl
+++ /dev/null
@@ -1,105 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi).
--export([start/0, stop/0, restart/0]).
--export([cast/2, cast/3, kill/2]).
--export([reply/1, sync_reply/1, sync_reply/2]).
--export([async_server_call/2, async_server_call/3]).
-
--define(SERVER, rexi_server).
-
-start() ->
- application:start(rexi).
-
-stop() ->
- application:stop(rexi).
-
-restart() ->
- stop(), start().
-
-%% @equiv cast(Node, self(), MFA)
--spec cast(node(), {atom(), atom(), list()}) -> reference().
-cast(Node, MFA) ->
- cast(Node, self(), MFA).
-
-%% @doc Executes apply(M, F, A) on Node.
-%% You might want to use this instead of rpc:cast/4 for two reasons. First,
-%% the Caller pid and the returned reference are inserted into the remote
-%% process' dictionary as `rexi_from', so it has a way to communicate with you.
-%% Second, the remote process is monitored. If it exits with a Reason other
-%% than normal, Caller will receive a message of the form
-%% `{Ref, {rexi_EXIT, Reason}}' where Ref is the returned reference.
--spec cast(node(), pid(), {atom(), atom(), list()}) -> reference().
-cast(Node, Caller, MFA) ->
- Ref = make_ref(),
- ok = gen_server:cast({?SERVER, Node}, {doit, {Caller,Ref}, MFA}),
- Ref.
-
-%% @doc Sends an async kill signal to the remote process associated with Ref.
-%% No rexi_EXIT message will be sent.
--spec kill(node(), reference()) -> ok.
-kill(Node, Ref) ->
- ok = gen_server:cast({?SERVER, Node}, {kill, Ref}).
-
-%% @equiv async_server_call(Server, self(), Request)
--spec async_server_call(pid() | {atom(),node()}, any()) -> reference().
-async_server_call(Server, Request) ->
- async_server_call(Server, self(), Request).
-
-%% @doc Sends a properly formatted gen_server:call Request to the Server and
-%% returns the reference which the Server will include in its reply. The
-%% function acts more like cast() than call() in that the server process
-%% is not monitored. Clients who want to know if the server is alive should
-%% monitor it themselves before calling this function.
--spec async_server_call(pid() | {atom(),node()}, pid(), any()) -> reference().
-async_server_call(Server, Caller, Request) ->
- Ref = make_ref(),
- do_send(Server, {'$gen_call', {Caller,Ref}, Request}),
- Ref.
-
-%% @doc convenience function to reply to the original rexi Caller.
--spec reply(any()) -> any().
-reply(Reply) ->
- {Caller, Ref} = get(rexi_from),
- erlang:send(Caller, {Ref,Reply}).
-
-%% @equiv sync_reply(Reply, 300000)
-sync_reply(Reply) ->
- sync_reply(Reply, 300000).
-
-%% @doc convenience function to reply to caller and wait for response. Message
-%% is of the form {OriginalRef, {self(),reference()}, Reply}, which enables the
-%% original caller to respond back.
--spec sync_reply(any(), pos_integer() | infinity) -> any().
-sync_reply(Reply, Timeout) ->
- {Caller, Ref} = get(rexi_from),
- Tag = make_ref(),
- erlang:send(Caller, {Ref, {self(),Tag}, Reply}),
- receive {Tag, Response} ->
- Response
- after Timeout ->
- timeout
- end.
-
-%% internal functions %%
-
-% send a message as quickly as possible
-do_send(Dest, Msg) ->
- case erlang:send(Dest, Msg, [noconnect]) of
- noconnect ->
- spawn(erlang, send, [Dest, Msg]);
- ok ->
- ok
- end.
diff --git a/apps/rexi/src/rexi_app.erl b/apps/rexi/src/rexi_app.erl
deleted file mode 100644
index 2dd99c23..00000000
--- a/apps/rexi/src/rexi_app.erl
+++ /dev/null
@@ -1,25 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_app).
--behaviour(application).
--export([start/2, stop/1]).
-
--include_lib("eunit/include/eunit.hrl").
-
-start(_Type, StartArgs) ->
- rexi_sup:start_link(StartArgs).
-
-stop(_State) ->
- ok.
diff --git a/apps/rexi/src/rexi_monitor.erl b/apps/rexi/src/rexi_monitor.erl
deleted file mode 100644
index 819b6bce..00000000
--- a/apps/rexi/src/rexi_monitor.erl
+++ /dev/null
@@ -1,54 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_monitor).
--export([start/1, stop/1]).
-
--include_lib("eunit/include/eunit.hrl").
-
-%% @doc spawn_links a process which monitors the supplied list of items and
-%% returns the process ID. If a monitored process exits, the caller will
-%% receive a {rexi_DOWN, MonitoringPid, DeadPid, Reason} message.
--spec start([pid() | atom() | {atom(),node()}]) -> pid().
-start(Procs) ->
- Parent = self(),
- spawn_link(fun() ->
- [erlang:monitor(process, P) || P <- Procs],
- wait_monitors(Parent)
- end).
-
-%% @doc Cleanly shut down the monitoring process and flush all rexi_DOWN
-%% messages from our mailbox.
--spec stop(pid()) -> ok.
-stop(MonitoringPid) ->
- MonitoringPid ! {self(), shutdown},
- flush_down_messages().
-
-%% internal functions %%
-
-wait_monitors(Parent) ->
- receive
- {'DOWN', _, process, Pid, Reason} ->
- Parent ! {rexi_DOWN, self(), Pid, Reason},
- wait_monitors(Parent);
- {Parent, shutdown} ->
- ok
- end.
-
-flush_down_messages() ->
- receive {rexi_DOWN, _, _, _} ->
- flush_down_messages()
- after 0 ->
- ok
- end.
diff --git a/apps/rexi/src/rexi_server.erl b/apps/rexi/src/rexi_server.erl
deleted file mode 100644
index eb9b0ca2..00000000
--- a/apps/rexi/src/rexi_server.erl
+++ /dev/null
@@ -1,100 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_server).
--behaviour(gen_server).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
--export([start_link/0, init_p/2]).
-
--include_lib("eunit/include/eunit.hrl").
-
--record(st, {
- workers = ets:new(workers, [private, {keypos,2}])
-}).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-init([]) ->
- {ok, #st{}}.
-
-handle_call(_Request, _From, St) ->
- {reply, ignored, St}.
-
-handle_cast({doit, From, MFA}, #st{workers=Workers} = St) ->
- {LocalPid, Ref} = spawn_monitor(?MODULE, init_p, [From, MFA]),
- {noreply, St#st{workers = add_worker({LocalPid, Ref, From}, Workers)}};
-
-handle_cast({kill, FromRef}, #st{workers=Workers} = St) ->
- case find_worker_from(FromRef, Workers) of
- {Pid, KeyRef, {_, FromRef}} ->
- erlang:demonitor(KeyRef),
- exit(Pid, kill),
- {noreply, St#st{workers = remove_worker(KeyRef, Workers)}};
- false ->
- {noreply, St}
- end.
-
-handle_info({'DOWN', Ref, process, _, normal}, #st{workers=Workers} = St) ->
- {noreply, St#st{workers = remove_worker(Ref, Workers)}};
-
-handle_info({'DOWN', Ref, process, Pid, Reason}, #st{workers=Workers} = St) ->
- case find_worker(Ref, Workers) of
- {Pid, Ref, From} ->
- notify_caller(From, Reason),
- {noreply, St#st{workers = remove_worker(Ref, Workers)}};
- false ->
- {noreply, St}
- end;
-
-handle_info(_Info, St) ->
- {noreply, St}.
-
-terminate(_Reason, St) ->
- ets:foldl(fun({Pid, _, _}, _) -> exit(Pid,kill) end, nil, St#st.workers),
- ok.
-
-code_change(_OldVsn, St, _Extra) ->
- {ok, St}.
-
-%% @doc initializes a process started by rexi_server.
--spec init_p({pid(), reference()}, {atom(), atom(), list()}) -> any().
-init_p(From, {M,F,A}) ->
- put(rexi_from, From),
- put(initial_call, {M,F,length(A)}),
- try apply(M, F, A) catch _:Reason -> exit(Reason) end.
-
-%% internal
-
-add_worker(Worker, Tab) ->
- ets:insert(Tab, Worker), Tab.
-
-remove_worker(Ref, Tab) ->
- ets:delete(Tab, Ref), Tab.
-
-find_worker(Ref, Tab) ->
- case ets:lookup(Tab, Ref) of [] -> false; [Worker] -> Worker end.
-
-find_worker_from(Ref, Tab) ->
- case ets:match_object(Tab, {'_', '_', {'_', Ref}}) of
- [] ->
- false;
- [Worker] ->
- Worker
- end.
-
-notify_caller({Caller, Ref}, Reason) ->
- Caller ! {Ref, {rexi_EXIT, Reason}}.
diff --git a/apps/rexi/src/rexi_sup.erl b/apps/rexi/src/rexi_sup.erl
deleted file mode 100644
index 828ee54d..00000000
--- a/apps/rexi/src/rexi_sup.erl
+++ /dev/null
@@ -1,29 +0,0 @@
-% Copyright 2010 Cloudant
-%
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(rexi_sup).
--behaviour(supervisor).
--export([init/1]).
-
--export([start_link/1]).
-
--include_lib("eunit/include/eunit.hrl").
-
-start_link(Args) ->
- supervisor:start_link({local,?MODULE}, ?MODULE, Args).
-
-init([]) ->
- Mod = rexi_server,
- Spec = {Mod, {Mod,start_link,[]}, permanent, 100, worker, [Mod]},
- {ok, {{one_for_one, 3, 10}, [Spec]}}.
diff --git a/apps/rexi/src/rexi_utils.erl b/apps/rexi/src/rexi_utils.erl
deleted file mode 100644
index 3b6102da..00000000
--- a/apps/rexi/src/rexi_utils.erl
+++ /dev/null
@@ -1,53 +0,0 @@
--module(rexi_utils).
-
--export([recv/6]).
-
-%% @doc set up the receive loop with an overall timeout
--spec recv([any()], integer(), function(), any(), timeout(), timeout()) ->
- {ok, any()} | timeout | {error, any()}.
-recv(Refs, Keypos, Fun, Acc0, infinity, PerMsgTO) ->
- process_mailbox(Refs, Keypos, Fun, Acc0, nil, PerMsgTO);
-recv(Refs, Keypos, Fun, Acc0, GlobalTimeout, PerMsgTO) ->
- TimeoutRef = erlang:make_ref(),
- TRef = erlang:send_after(GlobalTimeout, self(), {timeout, TimeoutRef}),
- try
- process_mailbox(Refs, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO)
- after
- erlang:cancel_timer(TRef)
- end.
-
-process_mailbox(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) ->
- case process_message(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) of
- {ok, Acc} ->
- process_mailbox(RefList, Keypos, Fun, Acc, TimeoutRef, PerMsgTO);
- {stop, Acc} ->
- {ok, Acc};
- Error ->
- Error
- end.
-
-process_message(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) ->
- receive
- {timeout, TimeoutRef} ->
- timeout;
- {Ref, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- % this was some non-matching message which we will ignore
- {ok, Acc0};
- Worker ->
- Fun(Msg, Worker, Acc0)
- end;
- {Ref, From, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- {ok, Acc0};
- Worker ->
- Fun(Msg, {Worker, From}, Acc0)
- end;
- {rexi_DOWN, _RexiMonPid, ServerPid, Reason} = Msg ->
- io:format("rexi_DOWN ~p ~p", [ServerPid, Reason]),
- Fun(Msg, nil, Acc0)
- after PerMsgTO ->
- timeout
- end.
diff --git a/configure b/configure
index eb8afd40..75e359ab 100755
--- a/configure
+++ b/configure
@@ -88,4 +88,4 @@ cat > rel/dev$i.config << EOF
EOF
done
-cat rel/bigcouch.config
+./rebar get-deps && cat rel/bigcouch.config
diff --git a/rebar b/rebar
index 1b106d4d..23ae5c9f 100755
--- a/rebar
+++ b/rebar
Binary files differ
diff --git a/rebar.config b/rebar.config
index 2db0ae21..c3ee7d9f 100644
--- a/rebar.config
+++ b/rebar.config
@@ -12,16 +12,25 @@
% License for the specific language governing permissions and limitations under
% the License.
+{deps, [
+ {rexi, ".*", {git, "git://github.com/cloudant/rexi.git", "master"}},
+ {fabric, ".*", {git, "git://github.com/cloudant/fabric.git", "master"}},
+ {mem3, ".*", {git, "git://github.com/cloudant/mem3.git", "master"}},
+ {chttpd, ".*", {git, "git://github.com/cloudant/chttpd.git", "master"}}
+]}.
+% needed for a clean transition to the deps model
+{clean_files, [
+ "apps/rexi/ebin",
+ "apps/fabric/ebin",
+ "apps/mem3/ebin",
+ "apps/chttpd/ebin"
+]}.
{sub_dirs, [
"apps/ibrowse",
"apps/couch",
- "apps/chttpd",
"apps/etap",
- "apps/fabric",
- "apps/mem3",
"apps/mochiweb",
"apps/oauth",
- "apps/rexi",
"rel"
]}.
{cover_enabled, true}.
diff --git a/rel/reltool.config b/rel/reltool.config
index 1b5868f4..bfdcde75 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -13,7 +13,7 @@
% the License.
{sys, [
- {lib_dirs, ["../apps"]},
+ {lib_dirs, ["../apps", "../deps"]},
{rel, "bigcouch", "1.2.4", [
kernel,
stdlib,