-- cgit v1.2.3 From 3a1f041e07c75001cf52cbae1391dcd801c17396 Mon Sep 17 00:00:00 2001 From: John Christopher Anderson Date: Tue, 5 Jan 2010 18:11:58 +0000 Subject: merge account branch to apache branch git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/account@896158 13f79535-47bb-0310-9956-ffa450edef68 --- etc/couchdb/default.ini.tpl.in | 5 +- etc/couchdb/local.ini | 6 +- share/www/_sidebar.html | 28 ++- share/www/config.html | 4 +- share/www/couch_tests.html | 8 +- share/www/custom_test.html | 4 +- share/www/database.html | 3 +- share/www/dialog/_admin_party.html | 33 +++ share/www/dialog/_create_admin.html | 50 ++++ share/www/dialog/_login.html | 34 +++ share/www/dialog/_signup.html | 35 +++ share/www/document.html | 9 +- share/www/index.html | 1 + share/www/replicator.html | 2 + share/www/script/couch.js | 62 ++--- share/www/script/couch_test_runner.js | 64 ++++- share/www/script/couch_tests.js | 1 + share/www/script/futon.browse.js | 7 +- share/www/script/futon.js | 128 ++++++++++ share/www/script/jquery.couch.js | 68 +++++- share/www/script/test/cookie_auth.js | 253 +++++++++++++------- share/www/script/test/oauth.js | 13 +- share/www/script/test/users_db.js | 66 ++++++ share/www/style/layout.css | 11 +- src/couchdb/couch_db.hrl | 6 +- src/couchdb/couch_httpd.erl | 28 ++- src/couchdb/couch_httpd_auth.erl | 429 +++++++++++++--------------------- src/couchdb/couch_httpd_db.erl | 34 ++- src/couchdb/couch_httpd_oauth.erl | 3 +- src/couchdb/couch_server.erl | 1 + 30 files changed, 948 insertions(+), 448 deletions(-) create mode 100644 share/www/dialog/_admin_party.html create mode 100644 share/www/dialog/_create_admin.html create mode 100644 share/www/dialog/_login.html create mode 100644 share/www/dialog/_signup.html create mode 100644 share/www/script/test/users_db.js diff --git a/etc/couchdb/default.ini.tpl.in b/etc/couchdb/default.ini.tpl.in index 71656d26..409bfe98 100644 --- a/etc/couchdb/default.ini.tpl.in +++ b/etc/couchdb/default.ini.tpl.in @@ -17,9 +17,8 @@ batch_save_interval = 1000 ; milliseconds after which to save batches [httpd] port = 5984 bind_address = 127.0.0.1 -authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, default_authentication_handler} +authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler} default_handler = {couch_httpd_db, handle_request} -WWW-Authenticate = Basic realm="administrator" [log] file = %localstatelogdir%/couch.log @@ -27,7 +26,6 @@ level = info [couch_httpd_auth] authentication_db = users -secret = replace this with a real secret in your local.ini file require_valid_user = false [query_servers] @@ -70,7 +68,6 @@ _log = {couch_httpd_misc_handlers, handle_log_req} _sleep = {couch_httpd_misc_handlers, handle_sleep_req} _session = {couch_httpd_auth, handle_session_req} _oauth = {couch_httpd_oauth, handle_oauth_req} -_user = {couch_httpd_auth, handle_user_req} [httpd_db_handlers] _view_cleanup = {couch_httpd_db, handle_view_cleanup_req} diff --git a/etc/couchdb/local.ini b/etc/couchdb/local.ini index 0a118770..96fcdc76 100644 --- a/etc/couchdb/local.ini +++ b/etc/couchdb/local.ini @@ -10,14 +10,12 @@ [httpd] ;port = 5984 ;bind_address = 127.0.0.1 +; Uncomment next line to trigger basic-auth popup on unauthorized requests. +;WWW-Authenticate = Basic realm="administrator" [log] ;level = debug -[couch_httpd_auth] -;secret = replace this with a real secret - - [update_notification] ;unique notifier name=/full/path/to/exe -with "cmd line arg" diff --git a/share/www/_sidebar.html b/share/www/_sidebar.html index c83b100c..6c7abc99 100644 --- a/share/www/_sidebar.html +++ b/share/www/_sidebar.html @@ -30,7 +30,31 @@ specific language governing permissions and limitations under the License. diff --git a/share/www/config.html b/share/www/config.html index f324c923..8f788041 100644 --- a/share/www/config.html +++ b/share/www/config.html @@ -19,10 +19,12 @@ specific language governing permissions and limitations under the License. + - + + + - + + + + + - + + - + - - - + - + + + + diff --git a/share/www/replicator.html b/share/www/replicator.html index 3516128d..5a09ca16 100644 --- a/share/www/replicator.html +++ b/share/www/replicator.html @@ -19,8 +19,10 @@ specific language governing permissions and limitations under the License. + + '); }; diff --git a/share/www/script/futon.browse.js b/share/www/script/futon.browse.js index 5f687941..65acbdeb 100644 --- a/share/www/script/futon.browse.js +++ b/share/www/script/futon.browse.js @@ -97,7 +97,10 @@ // Page class for browse/database.html CouchDatabasePage: function() { var urlParts = location.search.substr(1).split("/"); - var dbName = decodeURIComponent(urlParts.shift()); + var dbName = decodeURIComponent(urlParts.shift()) + + var dbNameRegExp = new RegExp("[^a-z0-9\_\$\(\)\+\/\-]", "g"); + dbName = dbName.replace(dbNameRegExp, ""); $.futon.storage.declareWithPrefix(dbName + ".", { desc: {}, @@ -119,7 +122,7 @@ if (viewName) { this.redirecting = true; location.href = "database.html?" + encodeURIComponent(dbName) + - "/" + viewName; + "/" + encodeURIComponent(viewName); } } var db = $.couch.db(dbName); @@ -372,7 +375,8 @@ var path = $.couch.encodeDocId(doc._id) + "/_view/" + encodeURIComponent(viewNames[j]); var option = $(document.createElement("option")) - .attr("value", path).text(viewNames[j]).appendTo(optGroup); + .attr("value", path).text(encodeURIComponent(viewNames[j])) + .appendTo(optGroup); if (path == viewName) { option[0].selected = true; } @@ -408,7 +412,7 @@ } var viewCode = resp.views[localViewName]; page.viewLanguage = resp.language || "javascript"; - $("#language").val(page.viewLanguage); + $("#language").val(encodeURIComponent(page.viewLanguage)); page.updateViewEditor(viewCode.map, viewCode.reduce || ""); $("#viewcode button.revert, #viewcode button.save").attr("disabled", "disabled"); page.storedViewCode = viewCode; @@ -420,7 +424,7 @@ page.updateViewEditor(page.storedViewCode.map, page.storedViewCode.reduce || ""); page.viewLanguage = page.storedViewLanguage; - $("#language").val(page.viewLanguage); + $("#language").val(encodeURIComponent(page.viewLanguage)); $("#viewcode button.revert, #viewcode button.save").attr("disabled", "disabled"); page.isDirty = false; if (callback) callback(); @@ -504,7 +508,8 @@ callback({ docid: "Cannot save to " + data.docid + " because its language is \"" + doc.language + - "\", not \"" + page.viewLanguage + "\"." + "\", not \"" + + encodeURIComponent(page.viewLanguage) + "\"." }); return; } diff --git a/share/www/script/futon.format.js b/share/www/script/futon.format.js index 0d536e36..31880764 100644 --- a/share/www/script/futon.format.js +++ b/share/www/script/futon.format.js @@ -16,7 +16,10 @@ escape: function(string) { return string.replace(/&/g, "&") .replace(//g, ">"); + .replace(/>/g, ">") + .replace(/"/, """) + .replace(/'/, "';") + ; }, // JSON pretty printing diff --git a/share/www/script/futon.js b/share/www/script/futon.js index 200d6ec5..c4647ed1 100644 --- a/share/www/script/futon.js +++ b/share/www/script/futon.js @@ -215,9 +215,10 @@ function $$(node) { recentDbs.sort(); $.each(recentDbs, function(idx, name) { if (name) { + name = encodeURIComponent(name); $("#dbs").append("
  • " + "" + - "" + name + + "" + name + "
  • "); } }); diff --git a/share/www/session.html b/share/www/session.html index 581640b0..0ebd943d 100644 --- a/share/www/session.html +++ b/share/www/session.html @@ -36,7 +36,7 @@ specific language governing permissions and limitations under the License. } m = qp.match(/reason=(.*)/); if (m) { - reason = decodeURIComponent(m[1]); + reason = $.futon.escape(decodeURIComponent(m[1])); } }); if (reason) { -- cgit v1.2.3 From cb8073a4eb1513fac7c97a2164368e4a684a1cf2 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Wed, 3 Nov 2010 10:58:48 +0000 Subject: Merged revision 1030405 from trunk: Replicator fix: add Content-Length header to the request that creates the remote DB. Closes COUCHDB-932. Patch by Dale Harvey. Thanks. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1030406 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_rep_httpc.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/couchdb/couch_rep_httpc.erl b/src/couchdb/couch_rep_httpc.erl index 06d4748a..b32e4c77 100644 --- a/src/couchdb/couch_rep_httpc.erl +++ b/src/couchdb/couch_rep_httpc.erl @@ -85,7 +85,8 @@ db_exists(Req, CanonicalUrl, CreateDB) -> end, case CreateDB of true -> - catch ibrowse:send_req(Url, HeadersFun(put), put, [], Options); + Headers = [{"Content-Length", 0} | HeadersFun(put)], + catch ibrowse:send_req(Url, Headers, put, [], Options); _Else -> ok end, case catch ibrowse:send_req(Url, HeadersFun(head), head, [], Options) of -- cgit v1.2.3 From a615efe543c290b4611fa7aa8313af2291df2c3f Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Wed, 3 Nov 2010 11:05:10 +0000 Subject: typo git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1030411 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/futon.format.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/share/www/script/futon.format.js b/share/www/script/futon.format.js index 31880764..8d9b7f5c 100644 --- a/share/www/script/futon.format.js +++ b/share/www/script/futon.format.js @@ -18,7 +18,7 @@ .replace(//g, ">") .replace(/"/, """) - .replace(/'/, "';") + .replace(/'/, "'") ; }, -- cgit v1.2.3 From e29a0ad2dbc9a4fbb1e76741c251693a8e44c7e9 Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Wed, 3 Nov 2010 12:18:41 +0000 Subject: We don't have MOVE requests. No need to track them. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1030432 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/priv/stat_descriptions.cfg.in | 1 - 1 file changed, 1 deletion(-) diff --git a/src/couchdb/priv/stat_descriptions.cfg.in b/src/couchdb/priv/stat_descriptions.cfg.in index 5c972ddf..b80d7684 100644 --- a/src/couchdb/priv/stat_descriptions.cfg.in +++ b/src/couchdb/priv/stat_descriptions.cfg.in @@ -32,7 +32,6 @@ {httpd_request_methods, 'DELETE', "number of HTTP DELETE requests"}. {httpd_request_methods, 'GET', "number of HTTP GET requests"}. {httpd_request_methods, 'HEAD', "number of HTTP HEAD requests"}. -{httpd_request_methods, 'MOVE', "number of HTTP MOVE requests"}. {httpd_request_methods, 'POST', "number of HTTP POST requests"}. {httpd_request_methods, 'PUT', "number of HTTP PUT requests"}. -- cgit v1.2.3 From aa92e73811342ec1398dd77c92017520f9987715 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Wed, 3 Nov 2010 17:04:38 +0000 Subject: Merged revision 1030534 from trunk: Fix ibrowse 2.0.x inactivity timeouts not getting cleared. Patch submitted upstream: http://github.com/cmullaparthi/ibrowse/issues/#issue/17 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1030537 13f79535-47bb-0310-9956-ffa450edef68 --- src/ibrowse/ibrowse_http_client.erl | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/ibrowse/ibrowse_http_client.erl b/src/ibrowse/ibrowse_http_client.erl index 16d9b872..2dd209da 100644 --- a/src/ibrowse/ibrowse_http_client.erl +++ b/src/ibrowse/ibrowse_http_client.erl @@ -1713,7 +1713,15 @@ set_inac_timer(State) -> set_inac_timer(State, get_inac_timeout(State)). set_inac_timer(_State, Timeout) when is_integer(Timeout) -> - erlang:send_after(Timeout, self(), timeout); + TimerRef = erlang:send_after(Timeout, self(), timeout), + case erlang:put(inac_timer, TimerRef) of + OldTimer when is_reference(OldTimer) -> + erlang:cancel_timer(OldTimer), + receive timeout -> ok after 0 -> ok end; + _ -> + ok + end, + TimerRef; set_inac_timer(_, _) -> undefined. -- cgit v1.2.3 From 537ce23994d1f180a52d977213e8622e530aaf99 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 4 Nov 2010 21:55:17 +0000 Subject: Merged revision 1031276 from trunk: Replicator: set Content-Length header when posting to _ensure_full_commit. Same reason as for COUCHDB-932. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1031279 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_rep.erl | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index 28054f34..90b065c0 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -655,10 +655,15 @@ commit_to_both(Source, Target, RequiredSeq) -> {SourceStartTime, TargetStartTime}. ensure_full_commit(#http_db{headers = Headers} = Target) -> + Headers1 = [ + {"Content-Length", 0} | + couch_util:proplist_apply_field( + {"Content-Type", "application/json"}, Headers) + ], Req = Target#http_db{ resource = "_ensure_full_commit", method = post, - headers = couch_util:proplist_apply_field({"Content-Type", "application/json"}, Headers) + headers = Headers1 }, {ResultProps} = couch_rep_httpc:request(Req), true = couch_util:get_value(<<"ok">>, ResultProps), @@ -680,11 +685,16 @@ ensure_full_commit(Target) -> end. ensure_full_commit(#http_db{headers = Headers} = Source, RequiredSeq) -> + Headers1 = [ + {"Content-Length", 0} | + couch_util:proplist_apply_field( + {"Content-Type", "application/json"}, Headers) + ], Req = Source#http_db{ resource = "_ensure_full_commit", method = post, qs = [{seq, RequiredSeq}], - headers = couch_util:proplist_apply_field({"Content-Type", "application/json"}, Headers) + headers = Headers1 }, {ResultProps} = couch_rep_httpc:request(Req), case couch_util:get_value(<<"ok">>, ResultProps) of -- cgit v1.2.3 From f91636c7085932952b30424662623dc3c6f7f07f Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sun, 7 Nov 2010 21:09:17 +0000 Subject: Merged revision 1032391 from trunk: Bug fix: the separator for media types in the 'Accept' header is a comma. The semicolon separates a media type from its parameters. A more complete solution, which takes into account Q values, was submitted upstream: https://github.com/mochi/mochiweb/issues/issue/21. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1032392 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/attachments_multipart.js | 2 +- src/couchdb/couch_httpd_db.erl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/share/www/script/test/attachments_multipart.js b/share/www/script/test/attachments_multipart.js index 2b79e559..5edf4d2c 100644 --- a/share/www/script/test/attachments_multipart.js +++ b/share/www/script/test/attachments_multipart.js @@ -193,7 +193,7 @@ couchTests.attachments_multipart= function(debug) { // a certain rev). xhr = CouchDB.request("GET", "/test_suite_db/multipart?atts_since=[\"" + firstrev + "\"]", - {headers:{"accept": "multipart/related,*/*;"}}); + {headers:{"accept": "multipart/related, */*"}}); T(xhr.status == 200); diff --git a/src/couchdb/couch_httpd_db.erl b/src/couchdb/couch_httpd_db.erl index cf4e2120..7b09bf57 100644 --- a/src/couchdb/couch_httpd_db.erl +++ b/src/couchdb/couch_httpd_db.erl @@ -576,7 +576,7 @@ db_doc_req(#httpd{method='GET'}=Req, Db, DocId) -> {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options), AcceptedTypes = case couch_httpd:header_value(Req, "Accept") of undefined -> []; - AcceptHeader -> string:tokens(AcceptHeader, "; ") + AcceptHeader -> string:tokens(AcceptHeader, ", ") end, case lists:member("multipart/mixed", AcceptedTypes) of false -> -- cgit v1.2.3 From 9902712bc739a12ae6e0de381341babd4a05c740 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Wed, 10 Nov 2010 13:35:46 +0000 Subject: Merged revision 1033456 from trunk: Updated ibrowse to version 2.1.0. It contains fixes for the following important issues: - https://github.com/cmullaparthi/ibrowse/issues/closed#issue/17 - https://github.com/cmullaparthi/ibrowse/issues/closed#issue/15 - https://github.com/cmullaparthi/ibrowse/issues/closed#issue/19 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1033457 13f79535-47bb-0310-9956-ffa450edef68 --- src/ibrowse/Makefile.am | 2 +- src/ibrowse/ibrowse.app.in | 2 +- src/ibrowse/ibrowse.erl | 33 +++++- src/ibrowse/ibrowse_http_client.erl | 211 +++++++++++++++++++++++------------- src/ibrowse/ibrowse_lib.erl | 2 +- src/ibrowse/ibrowse_test.erl | 45 +++++--- 6 files changed, 201 insertions(+), 94 deletions(-) diff --git a/src/ibrowse/Makefile.am b/src/ibrowse/Makefile.am index 39878f0a..8c5d3f8e 100644 --- a/src/ibrowse/Makefile.am +++ b/src/ibrowse/Makefile.am @@ -10,7 +10,7 @@ ## License for the specific language governing permissions and limitations under ## the License. -ibrowseebindir = $(localerlanglibdir)/ibrowse-2.0.1/ebin +ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.0/ebin ibrowse_file_collection = \ ibrowse.app.in \ diff --git a/src/ibrowse/ibrowse.app.in b/src/ibrowse/ibrowse.app.in index 8fc20663..e8580d10 100644 --- a/src/ibrowse/ibrowse.app.in +++ b/src/ibrowse/ibrowse.app.in @@ -1,6 +1,6 @@ {application, ibrowse, [{description, "HTTP client application"}, - {vsn, "2.0.1"}, + {vsn, "2.1.0"}, {modules, [ ibrowse, ibrowse_http_client, ibrowse_app, diff --git a/src/ibrowse/ibrowse.erl b/src/ibrowse/ibrowse.erl index 7f8d8bcf..1a42f4bc 100644 --- a/src/ibrowse/ibrowse.erl +++ b/src/ibrowse/ibrowse.erl @@ -7,8 +7,8 @@ %%%------------------------------------------------------------------- %% @author Chandrashekhar Mullaparthi %% @copyright 2005-2010 Chandrashekhar Mullaparthi -%% @version 2.0.1 -%% @doc The ibrowse application implements an HTTP 1.1 client. This +%% @version 2.1.0 +%% @doc The ibrowse application implements an HTTP 1.1 client in erlang. This %% module implements the API of the HTTP client. There is one named %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is %% one process to handle one TCP connection to a webserver @@ -87,6 +87,7 @@ send_req_direct/6, send_req_direct/7, stream_next/1, + stream_close/1, set_max_sessions/3, set_max_pipeline_size/3, set_dest/3, @@ -201,7 +202,11 @@ send_req(Url, Headers, Method, Body) -> %% dealing with large response bodies and/or slow links. In these %% cases, it might be hard to estimate how long a request will take to %% complete. In such cases, the client might want to timeout if no -%% data has been received on the link for a certain time interval. +%% data has been received on the link for a certain time interval. +%% +%% This value is also used to close connections which are not in use for +%% the specified timeout value. +%% %% %%
  • %% The connect_timeout option is to specify how long the @@ -458,6 +463,8 @@ ensure_bin({Fun, _} = Body) when is_function(Fun) -> Body. spawn_worker_process(Url) -> ibrowse_http_client:start(Url). +%% @doc Same as spawn_worker_process/1 but takes as input a Host and Port +%% instead of a URL. %% @spec spawn_worker_process(Host::string(), Port::integer()) -> {ok, pid()} spawn_worker_process(Host, Port) -> ibrowse_http_client:start({Host, Port}). @@ -468,6 +475,8 @@ spawn_worker_process(Host, Port) -> spawn_link_worker_process(Url) -> ibrowse_http_client:start_link(Url). +%% @doc Same as spawn_worker_process/2 except the the calling process +%% is linked to the worker process which is spawned. %% @spec spawn_link_worker_process(Host::string(), Port::integer()) -> {ok, pid()} spawn_link_worker_process(Host, Port) -> ibrowse_http_client:start_link({Host, Port}). @@ -524,6 +533,21 @@ stream_next(Req_id) -> ok end. +%% @doc Tell ibrowse to close the connection associated with the +%% specified stream. Should be used in conjunction with the +%% stream_to option. Note that all requests in progress on +%% the connection which is serving this Req_id will be aborted, and an +%% error returned. +%% @spec stream_close(Req_id :: req_id()) -> ok | {error, unknown_req_id} +stream_close(Req_id) -> + case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of + [] -> + {error, unknown_req_id}; + [{_, Pid}] -> + catch Pid ! {stream_close, Req_id}, + ok + end. + %% @doc Turn tracing on for the ibrowse process trace_on() -> ibrowse ! {trace, true}. @@ -553,6 +577,9 @@ all_trace_off() -> ibrowse ! all_trace_off, ok. +%% @doc Shows some internal information about load balancing. Info +%% about workers spawned using spawn_worker_process/2 or +%% spawn_link_worker_process/2 is not included. show_dest_status() -> Dests = lists:filter(fun({lb_pid, {Host, Port}, _}) when is_list(Host), is_integer(Port) -> diff --git a/src/ibrowse/ibrowse_http_client.erl b/src/ibrowse/ibrowse_http_client.erl index 2dd209da..5c3d5c9a 100644 --- a/src/ibrowse/ibrowse_http_client.erl +++ b/src/ibrowse/ibrowse_http_client.erl @@ -37,6 +37,7 @@ -include("ibrowse.hrl"). -record(state, {host, port, connect_timeout, + inactivity_timer_ref, use_proxy = false, proxy_auth_digest, ssl_options = [], is_ssl = false, socket, proxy_tunnel_setup = false, @@ -192,6 +193,12 @@ handle_info({stream_next, Req_id}, #state{socket = Socket, handle_info({stream_next, _Req_id}, State) -> {noreply, State}; +handle_info({stream_close, _Req_id}, State) -> + shutting_down(State), + do_close(State), + do_error_reply(State, closing_on_request), + {stop, normal, ok, State}; + handle_info({tcp_closed, _Sock}, State) -> do_trace("TCP connection closed by peer!~n", []), handle_sock_closed(State), @@ -221,6 +228,7 @@ handle_info({req_timedout, From}, State) -> end; handle_info(timeout, State) -> + do_trace("Inactivity timeout triggered. Shutting down connection~n", []), shutting_down(State), do_error_reply(State, req_timedout), {stop, normal, State}; @@ -273,8 +281,8 @@ handle_sock_data(Data, #state{status = get_header}=State) -> {stop, normal, State}; State_1 -> active_once(State_1), - set_inac_timer(State_1), - {noreply, State_1} + State_2 = set_inac_timer(State_1), + {noreply, State_2} end; handle_sock_data(Data, #state{status = get_body, @@ -293,8 +301,8 @@ handle_sock_data(Data, #state{status = get_body, {stop, normal, State}; State_1 -> active_once(State_1), - set_inac_timer(State_1), - {noreply, State_1} + State_2 = set_inac_timer(State_1), + {noreply, State_2} end; _ -> case parse_11_response(Data, State) of @@ -314,12 +322,12 @@ handle_sock_data(Data, #state{status = get_body, active_once(State_1) end, State_2 = State_1#state{interim_reply_sent = false}, - set_inac_timer(State_2), - {noreply, State_2}; + State_3 = set_inac_timer(State_2), + {noreply, State_3}; State_1 -> active_once(State_1), - set_inac_timer(State_1), - {noreply, State_1} + State_2 = set_inac_timer(State_1), + {noreply, State_2} end end. @@ -507,29 +515,37 @@ do_send(Req, #state{socket = Sock, is_ssl = false}) -> gen_tcp:send(Sock, Req). %% {fun_arity_0} | %% {fun_arity_1, term()} %% error() = term() -do_send_body(Source, State) when is_function(Source) -> - do_send_body({Source}, State); -do_send_body({Source}, State) when is_function(Source) -> - do_send_body1(Source, Source(), State); -do_send_body({Source, Source_state}, State) when is_function(Source) -> - do_send_body1(Source, Source(Source_state), State); -do_send_body(Body, State) -> +do_send_body(Source, State, TE) when is_function(Source) -> + do_send_body({Source}, State, TE); +do_send_body({Source}, State, TE) when is_function(Source) -> + do_send_body1(Source, Source(), State, TE); +do_send_body({Source, Source_state}, State, TE) when is_function(Source) -> + do_send_body1(Source, Source(Source_state), State, TE); +do_send_body(Body, State, _TE) -> do_send(Body, State). -do_send_body1(Source, Resp, State) -> +do_send_body1(Source, Resp, State, TE) -> case Resp of {ok, Data} -> - do_send(Data, State), - do_send_body({Source}, State); + do_send(maybe_chunked_encode(Data, TE), State), + do_send_body({Source}, State, TE); {ok, Data, New_source_state} -> - do_send(Data, State), - do_send_body({Source, New_source_state}, State); + do_send(maybe_chunked_encode(Data, TE), State), + do_send_body({Source, New_source_state}, State, TE); + eof when TE == true -> + do_send(<<"0\r\n\r\n">>, State), + ok; eof -> ok; Err -> Err end. +maybe_chunked_encode(Data, false) -> + Data; +maybe_chunked_encode(Data, true) -> + [ibrowse_lib:dec2hex(4, size(to_binary(Data))), "\r\n", Data, "\r\n"]. + do_close(#state{socket = undefined}) -> ok; do_close(#state{socket = Sock, is_ssl = true, @@ -619,11 +635,13 @@ send_req_1(From, {Req, Body_1} = make_request(connect, Pxy_auth_headers, Path, Path, [], Options, State_1), + TE = is_chunked_encoding_specified(Options), trace_request(Req), case do_send(Req, State) of ok -> - case do_send_body(Body_1, State_1) of + case do_send_body(Body_1, State_1, TE) of ok -> + trace_request_body(Body_1), active_once(State_1), Ref = case Timeout of infinity -> @@ -636,8 +654,8 @@ send_req_1(From, send_timer = Ref, proxy_tunnel_setup = in_progress, tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout}]}, - set_inac_timer(State_1), - {noreply, State_2}; + State_3 = set_inac_timer(State_2), + {noreply, State_3}; Err -> shutting_down(State_1), do_trace("Send failed... Reason: ~p~n", [Err]), @@ -706,10 +724,12 @@ send_req_1(From, AbsPath, RelPath, Body, Options, State_1), trace_request(Req), do_setopts(Socket, Caller_socket_options, Is_ssl), + TE = is_chunked_encoding_specified(Options), case do_send(Req, State_1) of ok -> - case do_send_body(Body_1, State_1) of + case do_send_body(Body_1, State_1, TE) of ok -> + trace_request_body(Body_1), State_2 = inc_pipeline_counter(State_1), active_once(State_2), Ref = case Timeout of @@ -732,8 +752,8 @@ send_req_1(From, _ -> gen_server:reply(From, {ibrowse_req_id, ReqId}) end, - set_inac_timer(State_1), - {noreply, State_3}; + State_4 = set_inac_timer(State_3), + {noreply, State_4}; Err -> shutting_down(State_1), do_trace("Send failed... Reason: ~p~n", [Err]), @@ -759,6 +779,7 @@ maybe_modify_headers(#url{host = Host, port = Port} = Url, false -> case Port of 80 -> Host; + 443 -> Host; _ -> [Host, ":", integer_to_list(Port)] end; {value, {_, Host_h_val}} -> @@ -802,31 +823,42 @@ http_auth_digest(Username, Password) -> make_request(Method, Headers, AbsPath, RelPath, Body, Options, #state{use_proxy = UseProxy, is_ssl = Is_ssl}) -> HttpVsn = http_vsn_string(get_value(http_vsn, Options, {1,1})), + Fun1 = fun({X, Y}) when is_atom(X) -> + {to_lower(atom_to_list(X)), X, Y}; + ({X, Y}) when is_list(X) -> + {to_lower(X), X, Y} + end, + Headers_0 = [Fun1(X) || X <- Headers], Headers_1 = - case get_value(content_length, Headers, false) of - false when (Body == []) or - (Body == <<>>) or - is_tuple(Body) or - is_function(Body) -> - Headers; + case lists:keysearch("content-length", 1, Headers_0) of + false when (Body == []) orelse + (Body == <<>>) orelse + is_tuple(Body) orelse + is_function(Body) -> + Headers_0; false when is_binary(Body) -> - [{"content-length", integer_to_list(size(Body))} | Headers]; - false -> - [{"content-length", integer_to_list(length(Body))} | Headers]; + [{"content-length", "content-length", integer_to_list(size(Body))} | Headers_0]; + false when is_list(Body) -> + [{"content-length", "content-length", integer_to_list(length(Body))} | Headers_0]; _ -> - Headers + %% Content-Length is already specified + Headers_0 end, {Headers_2, Body_1} = - case get_value(transfer_encoding, Options, false) of + case is_chunked_encoding_specified(Options) of false -> - {Headers_1, Body}; - {chunked, ChunkSize} -> - {[{X, Y} || {X, Y} <- Headers_1, - X /= "Content-Length", - X /= "content-length", - X /= content_length] ++ + {[{Y, Z} || {_, Y, Z} <- Headers_1], Body}; + true -> + Chunk_size_1 = case get_value(transfer_encoding, Options) of + chunked -> + 5120; + {chunked, Chunk_size} -> + Chunk_size + end, + {[{Y, Z} || {X, Y, Z} <- Headers_1, + X /= "content-length"] ++ [{"Transfer-Encoding", "chunked"}], - chunk_request_body(Body, ChunkSize)} + chunk_request_body(Body, Chunk_size_1)} end, Headers_3 = cons_headers(Headers_2), Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of @@ -842,6 +874,16 @@ make_request(Method, Headers, AbsPath, RelPath, Body, Options, end, {[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_3, crnl()], Body_1}. +is_chunked_encoding_specified(Options) -> + case get_value(transfer_encoding, Options, false) of + false -> + false; + {chunked, _} -> + true; + chunked -> + true + end. + http_vsn_string({0,9}) -> "HTTP/0.9"; http_vsn_string({1,0}) -> "HTTP/1.0"; http_vsn_string({1,1}) -> "HTTP/1.1". @@ -873,6 +915,9 @@ encode_headers([{Name,Val} | T], Acc) when is_atom(Name) -> encode_headers([], Acc) -> lists:reverse(Acc). +chunk_request_body(Body, _ChunkSize) when is_tuple(Body) orelse + is_function(Body) -> + Body; chunk_request_body(Body, ChunkSize) -> chunk_request_body(Body, ChunkSize, []). @@ -1060,7 +1105,7 @@ upgrade_to_ssl(#state{socket = Socket, send_queued_requests([], State) -> do_trace("Sent all queued requests via SSL connection~n", []), - State#state{tunnel_setup_queue = done}; + State#state{tunnel_setup_queue = []}; send_queued_requests([{From, Url, Headers, Method, Body, Options, Timeout} | Q], State) -> case send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State) of @@ -1217,7 +1262,6 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId, reply_buffer = RepBuf, recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false -> Body = RepBuf, - State_1 = set_cur_request(State), file:close(Fd), ResponseBody = case TmpFilename of undefined -> @@ -1232,9 +1276,9 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId, false -> {ok, SCode, Resp_headers_1, ResponseBody} end, - State_2 = do_reply(State_1, From, StreamTo, ReqId, Resp_format, Reply), + State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply), cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}), - State_2; + set_cur_request(State_1); handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId, response_format = Resp_format, options = Options}, @@ -1245,7 +1289,6 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId, reply_buffer = RepBuf, send_timer = ReqTimer} = State) -> Body = RepBuf, -%% State_1 = set_cur_request(State), {Resp_headers_1, Raw_headers_1} = maybe_add_custom_headers(Resp_headers, Raw_headers, Options), Reply = case get_value(give_raw_headers, Options, false) of true -> @@ -1253,15 +1296,8 @@ handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId, false -> {ok, SCode, Resp_headers_1, Body} end, - State_1 = case get(conn_close) of - "close" -> - do_reply(State, From, StreamTo, ReqId, Resp_format, Reply), - exit(normal); - _ -> - State_1_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply), - cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}), - State_1_1 - end, + State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply), + cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}), set_cur_request(State_1). reset_state(State) -> @@ -1353,6 +1389,8 @@ parse_status_line([32 | T], get_prot_vsn, ProtVsn, StatCode) -> parse_status_line(T, get_status_code, ProtVsn, StatCode); parse_status_line([32 | T], get_status_code, ProtVsn, StatCode) -> {ok, lists:reverse(ProtVsn), lists:reverse(StatCode), T}; +parse_status_line([], get_status_code, ProtVsn, StatCode) -> + {ok, lists:reverse(ProtVsn), lists:reverse(StatCode), []}; parse_status_line([H | T], get_prot_vsn, ProtVsn, StatCode) -> parse_status_line(T, get_prot_vsn, [H|ProtVsn], StatCode); parse_status_line([H | T], get_status_code, ProtVsn, StatCode) -> @@ -1710,36 +1748,61 @@ get_stream_chunk_size(Options) -> end. set_inac_timer(State) -> - set_inac_timer(State, get_inac_timeout(State)). - -set_inac_timer(_State, Timeout) when is_integer(Timeout) -> - TimerRef = erlang:send_after(Timeout, self(), timeout), - case erlang:put(inac_timer, TimerRef) of - OldTimer when is_reference(OldTimer) -> - erlang:cancel_timer(OldTimer), - receive timeout -> ok after 0 -> ok end; - _ -> - ok - end, - TimerRef; -set_inac_timer(_, _) -> - undefined. + cancel_timer(State#state.inactivity_timer_ref), + set_inac_timer(State#state{inactivity_timer_ref = undefined}, + get_inac_timeout(State)). + +set_inac_timer(State, Timeout) when is_integer(Timeout) -> + Ref = erlang:send_after(Timeout, self(), timeout), + State#state{inactivity_timer_ref = Ref}; +set_inac_timer(State, _) -> + State. get_inac_timeout(#state{cur_req = #request{options = Opts}}) -> get_value(inactivity_timeout, Opts, infinity); get_inac_timeout(#state{cur_req = undefined}) -> - infinity. + case ibrowse:get_config_value(inactivity_timeout, undefined) of + Val when is_integer(Val) -> + Val; + _ -> + case application:get_env(ibrowse, inactivity_timeout) of + {ok, Val} when is_integer(Val), Val > 0 -> + Val; + _ -> + 10000 + end + end. trace_request(Req) -> case get(my_trace_flag) of true -> %%Avoid the binary operations if trace is not on... - NReq = binary_to_list(list_to_binary(Req)), + NReq = to_binary(Req), do_trace("Sending request: ~n" "--- Request Begin ---~n~s~n" "--- Request End ---~n", [NReq]); _ -> ok end. +trace_request_body(Body) -> + case get(my_trace_flag) of + true -> + %%Avoid the binary operations if trace is not on... + NBody = to_binary(Body), + case size(NBody) > 1024 of + true -> + ok; + false -> + do_trace("Sending request body: ~n" + "--- Request Body Begin ---~n~s~n" + "--- Request Body End ---~n", [NBody]) + end; + false -> + ok + end. + to_integer(X) when is_list(X) -> list_to_integer(X); to_integer(X) when is_integer(X) -> X. + +to_binary(X) when is_list(X) -> list_to_binary(X); +to_binary(X) when is_binary(X) -> X. diff --git a/src/ibrowse/ibrowse_lib.erl b/src/ibrowse/ibrowse_lib.erl index fbb9c34b..c463c7bd 100644 --- a/src/ibrowse/ibrowse_lib.erl +++ b/src/ibrowse/ibrowse_lib.erl @@ -208,7 +208,7 @@ parse_url(Url) -> parse_url([$:, $/, $/ | _], get_protocol, Url, []) -> {invalid_uri_1, Url}; parse_url([$:, $/, $/ | T], get_protocol, Url, TmpAcc) -> - Prot = list_to_atom(lists:reverse(TmpAcc)), + Prot = list_to_existing_atom(lists:reverse(TmpAcc)), parse_url(T, get_username, Url#url{protocol = Prot}, []); diff --git a/src/ibrowse/ibrowse_test.erl b/src/ibrowse/ibrowse_test.erl index e7d6e59e..3ad76603 100644 --- a/src/ibrowse/ibrowse_test.erl +++ b/src/ibrowse/ibrowse_test.erl @@ -217,14 +217,18 @@ dump_errors(Key, Iod) -> {"http://jigsaw.w3.org/HTTP/300/", get}, {"http://jigsaw.w3.org/HTTP/Basic/", get, [{basic_auth, {"guest", "guest"}}]}, {"http://jigsaw.w3.org/HTTP/CL/", get}, - {"http://www.httpwatch.com/httpgallery/chunked/", get} + {"http://www.httpwatch.com/httpgallery/chunked/", get}, + {"https://github.com", get, [{ssl_options, [{depth, 2}]}]} ]). unit_tests() -> unit_tests([]). unit_tests(Options) -> + application:start(crypto), + application:start(public_key), application:start(ssl), + ibrowse:start(), Options_1 = Options ++ [{connect_timeout, 5000}], {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]), receive @@ -249,32 +253,45 @@ verify_chunked_streaming() -> verify_chunked_streaming([]). verify_chunked_streaming(Options) -> + io:format("~nVerifying that chunked streaming is working...~n", []), Url = "http://www.httpwatch.com/httpgallery/chunked/", - io:format("URL: ~s~n", [Url]), - io:format("Fetching data without streaming...~n", []), + io:format(" URL: ~s~n", [Url]), + io:format(" Fetching data without streaming...~n", []), Result_without_streaming = ibrowse:send_req( Url, [], get, [], [{response_format, binary} | Options]), - io:format("Fetching data with streaming as list...~n", []), + io:format(" Fetching data with streaming as list...~n", []), Async_response_list = do_async_req_list( Url, get, [{response_format, list} | Options]), - io:format("Fetching data with streaming as binary...~n", []), + io:format(" Fetching data with streaming as binary...~n", []), Async_response_bin = do_async_req_list( Url, get, [{response_format, binary} | Options]), - io:format("Fetching data with streaming as binary, {active, once}...~n", []), + io:format(" Fetching data with streaming as binary, {active, once}...~n", []), Async_response_bin_once = do_async_req_list( Url, get, [once, {response_format, binary} | Options]), - compare_responses(Result_without_streaming, Async_response_list, Async_response_bin), - compare_responses(Result_without_streaming, Async_response_list, Async_response_bin_once). + Res1 = compare_responses(Result_without_streaming, Async_response_list, Async_response_bin), + Res2 = compare_responses(Result_without_streaming, Async_response_list, Async_response_bin_once), + case {Res1, Res2} of + {success, success} -> + io:format(" Chunked streaming working~n", []); + _ -> + ok + end. test_chunked_streaming_once() -> test_chunked_streaming_once([]). test_chunked_streaming_once(Options) -> + io:format("~nTesting chunked streaming with the {stream_to, {Pid, once}} option...~n", []), Url = "http://www.httpwatch.com/httpgallery/chunked/", - io:format("URL: ~s~n", [Url]), - io:format("Fetching data with streaming as binary, {active, once}...~n", []), - do_async_req_list(Url, get, [once, {response_format, binary} | Options]). + io:format(" URL: ~s~n", [Url]), + io:format(" Fetching data with streaming as binary, {active, once}...~n", []), + case do_async_req_list(Url, get, [once, {response_format, binary} | Options]) of + {ok, _, _, _} -> + io:format(" Success!~n", []); + Err -> + io:format(" Fail: ~p~n", [Err]) + end. compare_responses({ok, St_code, _, Body}, {ok, St_code, _, Body}, {ok, St_code, _, Body}) -> success; @@ -310,7 +327,7 @@ do_async_req_list(Url, Method, Options) -> {Pid,_} = erlang:spawn_monitor(?MODULE, i_do_async_req_list, [self(), Url, Method, Options ++ [{stream_chunk_size, 1000}]]), - io:format("Spawned process ~p~n", [Pid]), +%% io:format("Spawned process ~p~n", [Pid]), wait_for_resp(Pid). wait_for_resp(Pid) -> @@ -354,7 +371,7 @@ wait_for_async_resp(Req_id, Options, Acc_Stat_code, Acc_Headers, Body) -> maybe_stream_next(Req_id, Options), wait_for_async_resp(Req_id, Options, StatCode, Headers, Body); {ibrowse_async_response_end, Req_id} -> - io:format("Recvd end of response.~n", []), + %% io:format("Recvd end of response.~n", []), Body_1 = list_to_binary(lists:reverse(Body)), {ok, Acc_Stat_code, Acc_Headers, Body_1}; {ibrowse_async_response, Req_id, Data} -> @@ -384,7 +401,7 @@ execute_req(Url, Method, Options) -> {ok, SCode, _H, _B} -> io:format("Status code: ~p~n", [SCode]); Err -> - io:format("Err -> ~p~n", [Err]) + io:format("~p~n", [Err]) end. drv_ue_test() -> -- cgit v1.2.3 From 3a3bb5e8697bc02bbfb793b700c51cdc1e59737e Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Wed, 10 Nov 2010 18:52:31 +0000 Subject: COUCHDB-945 - ensure validation funs are still applied after compaction. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1033642 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/security_validation.js | 21 ++++++++++++++------- src/couchdb/couch_db_updater.erl | 5 +++-- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/share/www/script/test/security_validation.js b/share/www/script/test/security_validation.js index e0ab17d6..dd3b202e 100644 --- a/share/www/script/test/security_validation.js +++ b/share/www/script/test/security_validation.js @@ -136,13 +136,20 @@ couchTests.security_validation = function(debug) { doc.foo=2; T(userDb.save(doc).ok); - // Save a document that's missing an author field. - try { - userDb.save({foo:1}); - T(false && "Can't get here. Should have thrown an error 2"); - } catch (e) { - T(e.error == "forbidden"); - T(userDb.last_req.status == 403); + // Save a document that's missing an author field (before and after compaction) + for (var i=0; i<2; i++) { + try { + userDb.save({foo:1}); + T(false && "Can't get here. Should have thrown an error 2"); + } catch (e) { + T(e.error == "forbidden"); + T(userDb.last_req.status == 403); + } + // compact. + T(db.compact().ok); + T(db.last_req.status == 202); + // compaction isn't instantaneous, loop until done + while (db.info().compact_running) {}; } // Now attempt to update the document as a different user, Jan diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl index 928c305d..3571fd5e 100644 --- a/src/couchdb/couch_db_updater.erl +++ b/src/couchdb/couch_db_updater.erl @@ -186,9 +186,10 @@ handle_cast({compact_done, CompactFilepath}, #db{filepath=Filepath}=Db) -> couch_file:delete(RootDir, Filepath), ok = file:rename(CompactFilepath, Filepath), close_db(Db), - ok = gen_server:call(Db#db.main_pid, {db_updated, NewDb2}, infinity), + NewDb3 = refresh_validate_doc_funs(NewDb2), + ok = gen_server:call(Db#db.main_pid, {db_updated, NewDb3}, infinity), ?LOG_INFO("Compaction for db \"~s\" completed.", [Db#db.name]), - {noreply, NewDb2#db{compactor_pid=nil}}; + {noreply, NewDb3#db{compactor_pid=nil}}; false -> ?LOG_INFO("Compaction file still behind main file " "(update seq=~p. compact update seq=~p). Retrying.", -- cgit v1.2.3 From ddcf3092bf4b31e1698240c9086c56ccc43e9877 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Fri, 12 Nov 2010 12:33:59 +0000 Subject: Backport revision 1034374 from trunk: Added test for COUCHDB-868 - ensure that a pull replication of design documents with attachments works with HTTP basic auth git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1034376 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/replication.js | 156 +++++++++++++++++++++++++++++++++++ 1 file changed, 156 insertions(+) diff --git a/share/www/script/test/replication.js b/share/www/script/test/replication.js index d2b3164b..3c68b858 100644 --- a/share/www/script/test/replication.js +++ b/share/www/script/test/replication.js @@ -527,4 +527,160 @@ couchTests.replication = function(debug) { T(docFoo4 === null); } + // test for COUCHDB-868 - design docs' attachments not getting replicated + // when doing a pull replication with HTTP basic auth + dbA = new CouchDB("test_suite_db_a"); + dbB = new CouchDB("test_suite_db_b"); + var usersDb = new CouchDB("test_suite_auth"); + var lorem = CouchDB.request( + "GET", "/_utils/script/test/lorem.txt").responseText; + var lorem_b64 = CouchDB.request( + "GET", "/_utils/script/test/lorem_b64.txt").responseText; + + usersDb.deleteDb(); + usersDb.createDb(); + dbA.deleteDb(); + dbA.createDb(); + dbB.deleteDb(); + dbB.createDb(); + + var atts_ddoc = { + _id: "_design/i_have_atts", + language: "javascript" + }; + T(dbA.save(atts_ddoc).ok); + + var rev = atts_ddoc._rev; + var att_1_name = "lorem.txt"; + var att_2_name = "lorem.dat"; + var xhr = CouchDB.request( + "PUT", "/" + dbA.name + "/" + atts_ddoc._id + "/" + att_1_name + "?rev=" + rev, { + headers: {"Content-Type": "text/plain;charset=utf-8"}, + body: lorem + }); + rev = JSON.parse(xhr.responseText).rev; + T(xhr.status === 201); + xhr = CouchDB.request( + "PUT", "/" + dbA.name + "/" + atts_ddoc._id + "/" + att_2_name + "?rev=" + rev, { + headers: {"Content-Type": "application/data"}, + body: lorem_b64 + }); + T(xhr.status === 201); + + var fdmananaUserDoc = CouchDB.prepareUserDoc({ + name: "fdmanana", + roles: ["reader"] + }, "qwerty"); + T(usersDb.save(fdmananaUserDoc).ok); + + T(dbA.setSecObj({ + admins: { + names: [], + roles: ["admin"] + }, + readers: { + names: [], + roles: ["reader"] + } + }).ok); + T(dbB.setSecObj({ + admins: { + names: ["fdmanana"], + roles: [] + } + }).ok); + + var server_config = [ + { + section: "couch_httpd_auth", + key: "authentication_db", + value: usersDb.name + }, + // to prevent admin party mode + { + section: "admins", + key: "joe", + value: "erlang" + } + ]; + + var test_fun = function() { + T(CouchDB.login("fdmanana", "qwerty").ok); + T(CouchDB.session().userCtx.name === "fdmanana"); + T(CouchDB.session().userCtx.roles.indexOf("_admin") === -1); + + var repResult = CouchDB.replicate( + "http://fdmanana:qwerty@" + host + "/" + dbA.name, + dbB.name + ); + T(repResult.ok === true); + T(repResult.history instanceof Array); + T(repResult.history.length === 1); + T(repResult.history[0].docs_written === 1); + T(repResult.history[0].docs_read === 1); + T(repResult.history[0].doc_write_failures === 0); + + var atts_ddoc_copy = dbB.open(atts_ddoc._id); + T(atts_ddoc_copy !== null); + T(typeof atts_ddoc_copy._attachments === "object"); + T(atts_ddoc_copy._attachments !== null); + T(att_1_name in atts_ddoc_copy._attachments); + T(att_2_name in atts_ddoc_copy._attachments); + + var xhr = CouchDB.request("GET", "/" + dbB.name + "/" + atts_ddoc._id + "/" + att_1_name); + T(xhr.status === 200); + T(xhr.responseText === lorem); + + xhr = CouchDB.request("GET", "/" + dbB.name + "/" + atts_ddoc._id + "/" + att_2_name); + T(xhr.status === 200); + T(xhr.responseText === lorem_b64); + + CouchDB.logout(); + T(CouchDB.login("joe", "erlang").ok); + T(dbA.setSecObj({ + admins: { + names: [], + roles: ["bar"] + }, + readers: { + names: [], + roles: ["foo"] + } + }).ok); + T(dbB.deleteDb().ok === true); + T(dbB.createDb().ok === true); + T(dbB.setSecObj({ + admins: { + names: ["fdmanana"], + roles: [] + } + }).ok); + CouchDB.logout(); + + T(CouchDB.login("fdmanana", "qwerty").ok); + T(CouchDB.session().userCtx.name === "fdmanana"); + T(CouchDB.session().userCtx.roles.indexOf("_admin") === -1); + try { + repResult = CouchDB.replicate( + "http://fdmanana:qwerty@" + host + "/" + dbA.name, + dbB.name + ); + T(false, "replication should have failed"); + } catch(x) { + T(x.error === "db_not_found"); + } + + atts_ddoc_copy = dbB.open(atts_ddoc._id); + T(atts_ddoc_copy === null); + + CouchDB.logout(); + T(CouchDB.login("joe", "erlang").ok); + }; + + run_on_modified_server(server_config, test_fun); + + // cleanup + dbA.deleteDb(); + dbB.deleteDb(); + usersDb.deleteDb(); }; -- cgit v1.2.3 From c65b3efe303bd5a5321afa29424569357506fb8f Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Fri, 12 Nov 2010 12:45:44 +0000 Subject: Merged revision 1034380 from trunk: Use lists:min/1 and lists:max/1 instead of erlang:min/2 and erlang:max/2. The later are not available in earlier OTP releases. Closes COUCHDB-856. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1034381 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_auth_cache.erl | 2 +- src/couchdb/couch_query_servers.erl | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/couchdb/couch_auth_cache.erl b/src/couchdb/couch_auth_cache.erl index 078bfcc1..0800d1ab 100644 --- a/src/couchdb/couch_auth_cache.erl +++ b/src/couchdb/couch_auth_cache.erl @@ -175,7 +175,7 @@ handle_call({new_max_cache_size, NewSize}, _From, State) -> end, NewState = State#state{ max_cache_size = NewSize, - cache_size = erlang:min(NewSize, State#state.cache_size) + cache_size = lists:min([NewSize, State#state.cache_size]) }, {reply, ok, NewState}; diff --git a/src/couchdb/couch_query_servers.erl b/src/couchdb/couch_query_servers.erl index c4f1bf0b..5f97cbd3 100644 --- a/src/couchdb/couch_query_servers.erl +++ b/src/couchdb/couch_query_servers.erl @@ -166,7 +166,7 @@ builtin_sum_rows(KVs) -> builtin_stats(reduce, [[_,First]|Rest]) when is_number(First) -> Stats = lists:foldl(fun([_K,V], {S,C,Mi,Ma,Sq}) when is_number(V) -> - {S+V, C+1, erlang:min(Mi,V), erlang:max(Ma,V), Sq+(V*V)}; + {S+V, C+1, lists:min([Mi, V]), lists:max([Ma, V]), Sq+(V*V)}; (_, _) -> throw({invalid_value, <<"builtin _stats function requires map values to be numbers">>}) @@ -178,7 +178,7 @@ builtin_stats(rereduce, [[_,First]|Rest]) -> {[{sum,Sum0}, {count,Cnt0}, {min,Min0}, {max,Max0}, {sumsqr,Sqr0}]} = First, Stats = lists:foldl(fun([_K,Red], {S,C,Mi,Ma,Sq}) -> {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]} = Red, - {Sum+S, Cnt+C, erlang:min(Min,Mi), erlang:max(Max,Ma), Sqr+Sq} + {Sum+S, Cnt+C, lists:min([Min, Mi]), lists:max([Max, Ma]), Sqr+Sq} end, {Sum0,Cnt0,Min0,Max0,Sqr0}, Rest), {Sum, Cnt, Min, Max, Sqr} = Stats, {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]}. -- cgit v1.2.3 From 341a4fc4f95b81c2c948666308e2856f8060f300 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Fri, 12 Nov 2010 14:30:12 +0000 Subject: Merged revision 1034404 from trunk: Yet another ibrowse fix. Patch submitted upstream: https://github.com/cmullaparthi/ibrowse/issues/issue/20 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1034405 13f79535-47bb-0310-9956-ffa450edef68 --- src/ibrowse/ibrowse_http_client.erl | 10 +++++----- src/ibrowse/ibrowse_lib.erl | 12 ++++-------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/src/ibrowse/ibrowse_http_client.erl b/src/ibrowse/ibrowse_http_client.erl index 5c3d5c9a..5ff323cd 100644 --- a/src/ibrowse/ibrowse_http_client.erl +++ b/src/ibrowse/ibrowse_http_client.erl @@ -544,7 +544,7 @@ do_send_body1(Source, Resp, State, TE) -> maybe_chunked_encode(Data, false) -> Data; maybe_chunked_encode(Data, true) -> - [ibrowse_lib:dec2hex(4, size(to_binary(Data))), "\r\n", Data, "\r\n"]. + [ibrowse_lib:dec2hex(byte_size(to_binary(Data))), "\r\n", Data, "\r\n"]. do_close(#state{socket = undefined}) -> ok; do_close(#state{socket = Sock, @@ -927,23 +927,23 @@ chunk_request_body(Body, _ChunkSize, Acc) when Body == <<>>; Body == [] -> chunk_request_body(Body, ChunkSize, Acc) when is_binary(Body), size(Body) >= ChunkSize -> <> = Body, - Chunk = [ibrowse_lib:dec2hex(4, ChunkSize),"\r\n", + Chunk = [ibrowse_lib:dec2hex(ChunkSize),"\r\n", ChunkBody, "\r\n"], chunk_request_body(Rest, ChunkSize, [Chunk | Acc]); chunk_request_body(Body, _ChunkSize, Acc) when is_binary(Body) -> BodySize = size(Body), - Chunk = [ibrowse_lib:dec2hex(4, BodySize),"\r\n", + Chunk = [ibrowse_lib:dec2hex(BodySize),"\r\n", Body, "\r\n"], LastChunk = "0\r\n", lists:reverse(["\r\n", LastChunk, Chunk | Acc]); chunk_request_body(Body, ChunkSize, Acc) when length(Body) >= ChunkSize -> {ChunkBody, Rest} = split_list_at(Body, ChunkSize), - Chunk = [ibrowse_lib:dec2hex(4, ChunkSize),"\r\n", + Chunk = [ibrowse_lib:dec2hex(ChunkSize),"\r\n", ChunkBody, "\r\n"], chunk_request_body(Rest, ChunkSize, [Chunk | Acc]); chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) -> BodySize = length(Body), - Chunk = [ibrowse_lib:dec2hex(4, BodySize),"\r\n", + Chunk = [ibrowse_lib:dec2hex(BodySize),"\r\n", Body, "\r\n"], LastChunk = "0\r\n", lists:reverse(["\r\n", LastChunk, Chunk | Acc]). diff --git a/src/ibrowse/ibrowse_lib.erl b/src/ibrowse/ibrowse_lib.erl index c463c7bd..e913adbe 100644 --- a/src/ibrowse/ibrowse_lib.erl +++ b/src/ibrowse/ibrowse_lib.erl @@ -19,7 +19,7 @@ url_encode/1, decode_rfc822_date/1, status_code/1, - dec2hex/2, + dec2hex/1, drv_ue/1, drv_ue/2, encode_base64/1, @@ -163,14 +163,10 @@ status_code(507) -> insufficient_storage; status_code(X) when is_list(X) -> status_code(list_to_integer(X)); status_code(_) -> unknown_status_code. -%% @doc dec2hex taken from gtk.erl in std dist -%% M = integer() -- number of hex digits required +%% @doc Returns a string with the hexadecimal representation of a given decimal. %% N = integer() -- the number to represent as hex -%% @spec dec2hex(M::integer(), N::integer()) -> string() -dec2hex(M,N) -> dec2hex(M,N,[]). - -dec2hex(0,_N,Ack) -> Ack; -dec2hex(M,N,Ack) -> dec2hex(M-1,N bsr 4,[d2h(N band 15)|Ack]). +%% @spec dec2hex(N::integer()) -> string() +dec2hex(N) -> lists:flatten(io_lib:format("~.16B", [N])). %% @doc Implements the base64 encoding algorithm. The output data type matches in the input data type. %% @spec encode_base64(In) -> Out -- cgit v1.2.3 From 856d5b1ed17ad621543c2ef0d6d369dee9f206b0 Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Sun, 14 Nov 2010 23:16:35 +0000 Subject: Allow reduce=false parameter in map-only views. Patch by Jason Smith. Closes COUCHDB-881. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1035098 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/view_errors.js | 25 ++++++++++++++++++++++--- src/couchdb/couch_httpd_view.erl | 2 ++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/share/www/script/test/view_errors.js b/share/www/script/test/view_errors.js index a211c061..c05000b7 100644 --- a/share/www/script/test/view_errors.js +++ b/share/www/script/test/view_errors.js @@ -74,9 +74,6 @@ couchTests.view_errors = function(debug) { T(e.error == "query_parse_error"); } - // reduce=false on map views doesn't work, so group=true will - // never throw for temp reduce views. - var designDoc = { _id:"_design/test", language: "javascript", @@ -104,6 +101,15 @@ couchTests.view_errors = function(debug) { db.view("test/no_reduce", {group: true}); T(0 == 1); } catch(e) { + T(db.last_req.status == 400); + T(e.error == "query_parse_error"); + } + + try { + db.view("test/no_reduce", {group_level: 1}); + T(0 == 1); + } catch(e) { + T(db.last_req.status == 400); T(e.error == "query_parse_error"); } @@ -115,10 +121,23 @@ couchTests.view_errors = function(debug) { T(e.error == "query_parse_error"); } + db.view("test/no_reduce", {reduce: false}); + TEquals(200, db.last_req.status, "reduce=false for map views (without" + + " group or group_level) is allowed"); + try { db.view("test/with_reduce", {group: true, reduce: false}); T(0 == 1); } catch(e) { + T(db.last_req.status == 400); + T(e.error == "query_parse_error"); + } + + try { + db.view("test/with_reduce", {group_level: 1, reduce: false}); + T(0 == 1); + } catch(e) { + T(db.last_req.status == 400); T(e.error == "query_parse_error"); } diff --git a/src/couchdb/couch_httpd_view.erl b/src/couchdb/couch_httpd_view.erl index e1a0dfad..cb387d1b 100644 --- a/src/couchdb/couch_httpd_view.erl +++ b/src/couchdb/couch_httpd_view.erl @@ -365,6 +365,8 @@ validate_view_query(group_level, Value, Args) -> end; validate_view_query(inclusive_end, Value, Args) -> Args#view_query_args{inclusive_end=Value}; +validate_view_query(reduce, false, Args) -> + Args; validate_view_query(reduce, _, Args) -> case Args#view_query_args.view_type of map -> -- cgit v1.2.3 From 678648f2c176149ae9cced1c53f6d3b5760595fa Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sun, 14 Nov 2010 23:36:16 +0000 Subject: Merged revision 1035101 from trunk: Added missing semicolons to replication.js test. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1035102 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/replication.js | 40 ++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/share/www/script/test/replication.js b/share/www/script/test/replication.js index 3c68b858..00773f5c 100644 --- a/share/www/script/test/replication.js +++ b/share/www/script/test/replication.js @@ -22,14 +22,14 @@ couchTests.replication = function(debug) { target:"test_suite_db_b"}, {source:"http://" + host + "/test_suite_db_a", target:"http://" + host + "/test_suite_db_b"} - ] + ]; var dbA = new CouchDB("test_suite_db_a", {"X-Couch-Full-Commit":"false"}); var dbB = new CouchDB("test_suite_db_b", {"X-Couch-Full-Commit":"false"}); var numDocs = 10; var xhr; for (var testPair = 0; testPair < dbPairs.length; testPair++) { - var A = dbPairs[testPair].source - var B = dbPairs[testPair].target + var A = dbPairs[testPair].source; + var B = dbPairs[testPair].target; dbA.deleteDb(); dbA.createDb(); @@ -41,7 +41,7 @@ couchTests.replication = function(debug) { test_template: new function () { this.init = function(dbA, dbB) { // before anything has happened - } + }; this.afterAB1 = function(dbA, dbB) { // called after replicating src=A tgt=B first time. }; @@ -165,20 +165,20 @@ couchTests.replication = function(debug) { this.afterAB1 = function(dbA, dbB) { var xhr = CouchDB.request("GET", "/test_suite_db_a/bin_doc/foo%2Bbar.txt"); - T(xhr.responseText == "This is a base64 encoded text") + T(xhr.responseText == "This is a base64 encoded text"); xhr = CouchDB.request("GET", "/test_suite_db_b/bin_doc/foo%2Bbar.txt"); - T(xhr.responseText == "This is a base64 encoded text") + T(xhr.responseText == "This is a base64 encoded text"); // and the design-doc xhr = CouchDB.request("GET", "/test_suite_db_a/_design/with_bin/foo%2Bbar.txt"); - T(xhr.responseText == "This is a base64 encoded text") + T(xhr.responseText == "This is a base64 encoded text"); xhr = CouchDB.request("GET", "/test_suite_db_b/_design/with_bin/foo%2Bbar.txt"); - T(xhr.responseText == "This is a base64 encoded text") + T(xhr.responseText == "This is a base64 encoded text"); }; }, @@ -209,8 +209,8 @@ couchTests.replication = function(debug) { var docB = dbB.open("foo", {conflicts: true, deleted_conflicts: true}); // We should have no conflicts this time - T(docA._conflicts === undefined) - T(docB._conflicts === undefined); + T(typeof docA._conflicts === "undefined"); + T(typeof docB._conflicts === "undefined"); // They show up as deleted conflicts instead T(docA._deleted_conflicts[0] == docB._deleted_conflicts[0]); @@ -229,7 +229,7 @@ couchTests.replication = function(debug) { var seqA = result.source_last_seq; T(0 == result.history[0].start_last_seq); - T(result.history[1] === undefined) + T(typeof result.history[1] === "undefined"); for(test in repTests) { if(repTests[test].afterAB1) repTests[test].afterAB1(dbA, dbB); @@ -239,7 +239,7 @@ couchTests.replication = function(debug) { var seqB = result.source_last_seq; T(0 == result.history[0].start_last_seq); - T(result.history[1] === undefined) + T(typeof result.history[1] === "undefined"); for(test in repTests) { if(repTests[test].afterBA1) repTests[test].afterBA1(dbA, dbB); @@ -252,7 +252,7 @@ couchTests.replication = function(debug) { T(seqA < result2.source_last_seq); T(seqA == result2.history[0].start_last_seq); - T(result2.history[1].end_last_seq == seqA) + T(result2.history[1].end_last_seq == seqA); seqA = result2.source_last_seq; @@ -260,11 +260,11 @@ couchTests.replication = function(debug) { if(repTests[test].afterAB2) repTests[test].afterAB2(dbA, dbB); } - result = CouchDB.replicate(B, A) + result = CouchDB.replicate(B, A); T(seqB < result.source_last_seq); T(seqB == result.history[0].start_last_seq); - T(result.history[1].end_last_seq == seqB) + T(result.history[1].end_last_seq == seqB); seqB = result.source_last_seq; @@ -306,21 +306,21 @@ couchTests.replication = function(debug) { var continuousResult = CouchDB.replicate(dbA.name, "test_suite_db_b", { body: {"continuous": true} }); - T(continuousResult.ok) - T(continuousResult._local_id) + T(continuousResult.ok); + T(continuousResult._local_id); var cancelResult = CouchDB.replicate(dbA.name, "test_suite_db_b", { body: {"cancel": true} }); - T(cancelResult.ok) - T(continuousResult._local_id == cancelResult._local_id) + T(cancelResult.ok); + T(continuousResult._local_id == cancelResult._local_id); try { var cancelResult2 = CouchDB.replicate(dbA.name, "test_suite_db_b", { body: {"cancel": true} }); } catch (e) { - T(e.error == "not_found") + T(e.error == "not_found"); } // test replication object option doc_ids -- cgit v1.2.3 From 4b3568a58d37aa48c4a2027a21a34b38782aeec7 Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Mon, 15 Nov 2010 00:06:29 +0000 Subject: Avoid lengthy stack traces for log(undefined); Improve log(); Patch by Benjamin Young. Closes COUCHDB-895. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1035105 13f79535-47bb-0310-9956-ffa450edef68 --- THANKS | 1 + share/server/util.js | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/THANKS b/THANKS index 6472bb1a..bd91f7ed 100644 --- a/THANKS +++ b/THANKS @@ -66,5 +66,6 @@ suggesting improvements or submitting changes. Some of these people are: * Lim Yue Chuan * David Davis * Dale Harvey + * Benjamin Young For a list of authors see the `AUTHORS` file. diff --git a/share/server/util.js b/share/server/util.js index 77b934ed..a5dfa127 100644 --- a/share/server/util.js +++ b/share/server/util.js @@ -117,8 +117,10 @@ function respond(obj) { function log(message) { // idea: query_server_config option for log level - if (typeof message != "string") { + if (typeof message == "xml") { + message = message.toXMLString(); + } else if (typeof message != "string") { message = Couch.toJSON(message); } - respond(["log", message]); + respond(["log", String(message)]); }; -- cgit v1.2.3 From 10423ffe29a22f85df08b5391693d8dffff54945 Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Mon, 15 Nov 2010 11:37:46 +0000 Subject: Correct display for docs with %2f in their ID. Closes COUCHDB-948. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1035230 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/document.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/share/www/document.html b/share/www/document.html index ee0f3475..ed4fd4ee 100644 --- a/share/www/document.html +++ b/share/www/document.html @@ -44,7 +44,7 @@ specific language governing permissions and limitations under the License. $(function() { $("h1 a.dbname").text(encodeURIComponent(page.dbName)) .attr("href", "database.html?" + encodeURIComponent(page.db.name)); - $("h1 strong").text(encodeURIComponent(page.docId)); + $("h1 strong").text(encodeURIComponent(page.docId).replace(/%2[Ff]/, "/")); $("h1 a.raw").attr("href", "/" + encodeURIComponent(page.db.name) + "/" + encodeURIComponent(page.docId)); page.updateFieldListing(); -- cgit v1.2.3 From e55d96443d64f5cf8b654ca523b2476427273bd8 Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Tue, 16 Nov 2010 10:56:11 +0000 Subject: After authentication, redirect to the path the user requested, not the one that is used internally. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1035582 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_httpd.erl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/couchdb/couch_httpd.erl b/src/couchdb/couch_httpd.erl index 8a5c699a..d24822aa 100644 --- a/src/couchdb/couch_httpd.erl +++ b/src/couchdb/couch_httpd.erl @@ -769,7 +769,13 @@ error_headers(#httpd{mochi_req=MochiReq}=Req, Code, ErrorStr, ReasonStr) -> {Code, []}; match -> AuthRedirectBin = ?l2b(AuthRedirect), - UrlReturn = ?l2b(couch_util:url_encode(MochiReq:get(path))), + % Redirect to the path the user requested, not + % the one that is used internally. + UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of + undefined -> MochiReq:get(path); + VHostPath -> VHostPath + end, + UrlReturn = ?l2b(couch_util:url_encode(UrlReturnRaw)), UrlReason = ?l2b(couch_util:url_encode(ReasonStr)), {302, [{"Location", couch_httpd:absolute_uri(Req, < Else -> Else end. +reopen(#db{main_pid = Pid, fd_ref_counter = OldRefCntr}) -> + {ok, #db{fd_ref_counter = NewRefCntr} = NewDb} = + gen_server:call(Pid, get_db, infinity), + case NewRefCntr =:= OldRefCntr of + true -> + {ok, NewDb}; + false -> + couch_ref_counter:add(NewRefCntr), + couch_ref_counter:drop(OldRefCntr), + {ok, NewDb} + end. + ensure_full_commit(#db{update_pid=UpdatePid,instance_start_time=StartTime}) -> ok = gen_server:call(UpdatePid, full_commit, infinity), {ok, StartTime}. diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl index 3571fd5e..e5c6019a 100644 --- a/src/couchdb/couch_db_updater.erl +++ b/src/couchdb/couch_db_updater.erl @@ -188,6 +188,7 @@ handle_cast({compact_done, CompactFilepath}, #db{filepath=Filepath}=Db) -> close_db(Db), NewDb3 = refresh_validate_doc_funs(NewDb2), ok = gen_server:call(Db#db.main_pid, {db_updated, NewDb3}, infinity), + couch_db_update_notifier:notify({compacted, NewDb3#db.name}), ?LOG_INFO("Compaction for db \"~s\" completed.", [Db#db.name]), {noreply, NewDb3#db{compactor_pid=nil}}; false -> diff --git a/src/couchdb/couch_view_group.erl b/src/couchdb/couch_view_group.erl index 1a8103a1..aec52543 100644 --- a/src/couchdb/couch_view_group.erl +++ b/src/couchdb/couch_view_group.erl @@ -32,7 +32,8 @@ compactor_pid=nil, waiting_commit=false, waiting_list=[], - ref_counter=nil + ref_counter=nil, + db_update_notifier=nil }). % api methods @@ -75,7 +76,7 @@ start_link(InitArgs) -> end. % init creates a closure which spawns the appropriate view_updater. -init({InitArgs, ReturnPid, Ref}) -> +init({{_, DbName, _} = InitArgs, ReturnPid, Ref}) -> process_flag(trap_exit, true), case prepare_group(InitArgs, false) of {ok, #group{db=Db, fd=Fd, current_seq=Seq}=Group} -> @@ -86,7 +87,15 @@ init({InitArgs, ReturnPid, Ref}) -> _ -> couch_db:monitor(Db), {ok, RefCounter} = couch_ref_counter:start([Fd]), + Server = self(), + {ok, Notifier} = couch_db_update_notifier:start_link( + fun({compacted, DbName1}) when DbName1 =:= DbName -> + ok = gen_server:cast(Server, reopen_db); + (_) -> + ok + end), {ok, #group_state{ + db_update_notifier=Notifier, db_name=couch_db:name(Db), init_args=InitArgs, group=Group, @@ -116,11 +125,11 @@ init({InitArgs, ReturnPid, Ref}) -> handle_call({request_group, RequestSeq}, From, #group_state{ db_name=DbName, - group=#group{current_seq=Seq}=Group, + group=#group{current_seq=Seq, db=OldDb}=Group, updater_pid=nil, waiting_list=WaitList }=State) when RequestSeq > Seq -> - {ok, Db} = couch_db:open_int(DbName, []), + {ok, Db} = reopen_db(DbName, OldDb), Group2 = Group#group{db=Db}, Owner = self(), Pid = spawn_link(fun()-> couch_view_updater:update(Owner, Group2) end), @@ -155,11 +164,11 @@ handle_call(request_group_info, _From, State) -> handle_cast({start_compact, CompactFun}, #group_state{compactor_pid=nil} = State) -> #group_state{ - group = #group{name = GroupId, sig = GroupSig} = Group, + group = #group{name = GroupId, sig = GroupSig, db = OldDb} = Group, init_args = {RootDir, DbName, _} } = State, ?LOG_INFO("View index compaction starting for ~s ~s", [DbName, GroupId]), - {ok, Db} = couch_db:open_int(DbName, []), + {ok, Db} = reopen_db(DbName, OldDb), {ok, Fd} = open_index_file(compact, RootDir, DbName, GroupSig), NewGroup = reset_file(Db, Fd, DbName, Group), Pid = spawn_link(fun() -> CompactFun(Group, NewGroup) end), @@ -222,13 +231,14 @@ handle_cast({compact_done, NewGroup}, State) -> ?LOG_INFO("View index compaction still behind for ~s ~s -- current: ~p " ++ "compact: ~p", [DbName, GroupId, CurrentSeq, NewGroup#group.current_seq]), couch_db:close(NewGroup#group.db), - {ok, Db} = couch_db:open_int(DbName, []), Pid = spawn_link(fun() -> + {ok, Db} = couch_db:open_int(DbName, []), {_,Ref} = erlang:spawn_monitor(fun() -> couch_view_updater:update(nil, NewGroup#group{db = Db}) end), receive {'DOWN', Ref, _, _, {new_group, NewGroup2}} -> + couch_db:close(Db), #group{name=GroupId} = NewGroup2, Pid2 = couch_view:get_group_server(DbName, GroupId), gen_server:cast(Pid2, {compact_done, NewGroup2}) @@ -252,7 +262,11 @@ handle_cast({partial_update, Pid, NewGroup}, #group_state{updater_pid=Pid} {noreply, State#group_state{group=NewGroup, waiting_commit=true}}; handle_cast({partial_update, _, _}, State) -> %% message from an old (probably pre-compaction) updater; ignore - {noreply, State}. + {noreply, State}; + +handle_cast(reopen_db, #group_state{group = Group, db_name = DbName} = State) -> + {ok, Db} = reopen_db(DbName, Group#group.db), + {noreply, State#group_state{group = Group#group{db = Db}}}. handle_info(delayed_commit, #group_state{db_name=DbName,group=Group}=State) -> {ok, Db} = couch_db:open_int(DbName, []), @@ -338,6 +352,7 @@ handle_info({'DOWN',_,_,_,_}, State) -> terminate(Reason, #group_state{updater_pid=Update, compactor_pid=Compact}=S) -> + couch_db_update_notifier:stop(S#group_state.db_update_notifier), reply_all(S, Reason), couch_util:shutdown_sync(Update), couch_util:shutdown_sync(Compact), @@ -369,8 +384,8 @@ reply_all(#group_state{waiting_list=WaitList}=State, Reply) -> [catch gen_server:reply(Pid, Reply) || {Pid, _} <- WaitList], State#group_state{waiting_list=[]}. -prepare_group({RootDir, DbName, #group{sig=Sig}=Group}, ForceReset)-> - case couch_db:open_int(DbName, []) of +prepare_group({RootDir, DbName, #group{sig=Sig, db=OldDb}=Group}, ForceReset)-> + case reopen_db(DbName, OldDb) of {ok, Db} -> case open_index_file(RootDir, DbName, Sig) of {ok, Fd} -> @@ -588,4 +603,7 @@ init_group(Db, Fd, #group{def_lang=Lang,views=Views}= Group#group{db=Db, fd=Fd, current_seq=Seq, purge_seq=PurgeSeq, id_btree=IdBtree, views=Views2}. - +reopen_db(DbName, nil) -> + couch_db:open_int(DbName, []); +reopen_db(_DbName, Db) -> + couch_db:reopen(Db). -- cgit v1.2.3 From cde3f3943f0185a8ec98a1a0e6e716202529f239 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 18 Nov 2010 10:07:33 +0000 Subject: Merged revision 1032673 from trunk: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Preserve attachment identity length when doing local to local replications. Closes COUCHDB-930. Patch by Juuso Väänänen. Thanks. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1036380 13f79535-47bb-0310-9956-ffa450edef68 --- THANKS | 1 + src/couchdb/couch_db.erl | 5 ++-- test/etap/113-replication-attachment-comp.t | 46 ++++++++++++++++++++++++++++- 3 files changed, 49 insertions(+), 3 deletions(-) diff --git a/THANKS b/THANKS index bd91f7ed..68f8a4cd 100644 --- a/THANKS +++ b/THANKS @@ -66,6 +66,7 @@ suggesting improvements or submitting changes. Some of these people are: * Lim Yue Chuan * David Davis * Dale Harvey + * Juuso Väänänen * Benjamin Young For a list of authors see the `AUTHORS` file. diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index 9a02e1ee..fe155abe 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -816,11 +816,12 @@ flush_att(Fd, #att{data={Fd0, _}}=Att) when Fd0 == Fd -> % already written to our file, nothing to write Att; -flush_att(Fd, #att{data={OtherFd,StreamPointer}, md5=InMd5}=Att) -> +flush_att(Fd, #att{data={OtherFd,StreamPointer}, md5=InMd5, + disk_len=InDiskLen} = Att) -> {NewStreamData, Len, _IdentityLen, Md5, IdentityMd5} = couch_stream:copy_to_new_stream(OtherFd, StreamPointer, Fd), check_md5(IdentityMd5, InMd5), - Att#att{data={Fd, NewStreamData}, md5=Md5, att_len=Len, disk_len=Len}; + Att#att{data={Fd, NewStreamData}, md5=Md5, att_len=Len, disk_len=InDiskLen}; flush_att(Fd, #att{data=Data}=Att) when is_binary(Data) -> with_stream(Fd, Att, fun(OutputStream) -> diff --git a/test/etap/113-replication-attachment-comp.t b/test/etap/113-replication-attachment-comp.t index 30f602ef..19c48fc6 100755 --- a/test/etap/113-replication-attachment-comp.t +++ b/test/etap/113-replication-attachment-comp.t @@ -30,7 +30,7 @@ test_db_b_name() -> main(_) -> test_util:init_code_path(), - etap:plan(30), + etap:plan(45), case (catch test()) of ok -> etap:end_tests(); @@ -102,6 +102,33 @@ test() -> check_server_can_decompress_att(test_db_b_name()), check_att_stubs(test_db_a_name(), test_db_b_name()), + % + % test local replication + % + + delete_db(test_db_a_name()), + delete_db(test_db_b_name()), + create_db(test_db_a_name()), + create_db(test_db_b_name()), + + % enable compression + couch_config:set("attachments", "compression_level", "8"), + couch_config:set("attachments", "compressible_types", "text/*"), + + % store doc with text attachment in DB A + put_text_att(test_db_a_name()), + + % disable attachment compression + couch_config:set("attachments", "compression_level", "0"), + + % do local-local replication + do_local_replication(test_db_a_name(), test_db_b_name()), + + % verify that DB B has the attachment stored in compressed form + check_att_is_compressed(test_db_b_name()), + check_server_can_decompress_att(test_db_b_name()), + check_att_stubs(test_db_a_name(), test_db_b_name()), + timer:sleep(3000), % to avoid mochiweb socket closed exceptions delete_db(test_db_a_name()), delete_db(test_db_b_name()), @@ -152,6 +179,23 @@ do_push_replication(SourceDbName, TargetDbName) -> etap:is(RepOk, true, "Push replication completed with success"), ok. +do_local_replication(SourceDbName, TargetDbName) -> + RepObj = {[ + {<<"source">>, SourceDbName}, + {<<"target">>, TargetDbName} + ]}, + {ok, {{_, Code, _}, _Headers, Body}} = http:request( + post, + {rep_url(), [], + "application/json", list_to_binary(couch_util:json_encode(RepObj))}, + [], + [{sync, true}]), + etap:is(Code, 200, "Local replication successfully triggered"), + Json = couch_util:json_decode(Body), + RepOk = couch_util:get_nested_json_value(Json, [<<"ok">>]), + etap:is(RepOk, true, "Local replication completed with success"), + ok. + check_att_is_compressed(DbName) -> {ok, {{_, Code, _}, Headers, Body}} = http:request( get, -- cgit v1.2.3 From 9d2d40f25da4f62f4d68172e2c62b4a6eb01e531 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 18 Nov 2010 11:11:41 +0000 Subject: Merged revision 1036407 from trunk: Make sure that after compaction of the authentication database the old reference counter is released. Same type of issue as in COUCHDB-926. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1036408 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/auth_cache.js | 31 +++++++++++++++++++++++++++++++ src/couchdb/couch_auth_cache.erl | 9 +++++++++ 2 files changed, 40 insertions(+) diff --git a/share/www/script/test/auth_cache.js b/share/www/script/test/auth_cache.js index 75827dbd..e48f7370 100644 --- a/share/www/script/test/auth_cache.js +++ b/share/www/script/test/auth_cache.js @@ -238,6 +238,37 @@ couchTests.auth_cache = function(debug) { T(misses_after === misses_before); T(hits_after === (hits_before + 1)); + + // login, compact authentication DB, login again and verify that + // there was a cache hit + hits_before = hits_after; + misses_before = misses_after; + + T(CouchDB.login("johndoe", "123456").ok); + + hits_after = hits(); + misses_after = misses(); + + T(misses_after === (misses_before + 1)); + T(hits_after === hits_before); + + T(CouchDB.logout().ok); + T(authDb.compact().ok); + + while (authDb.info().compact_running); + + hits_before = hits_after; + misses_before = misses_after; + + T(CouchDB.login("johndoe", "123456").ok); + + hits_after = hits(); + misses_after = misses(); + + T(misses_after === misses_before); + T(hits_after === (hits_before + 1)); + + T(CouchDB.logout().ok); } diff --git a/src/couchdb/couch_auth_cache.erl b/src/couchdb/couch_auth_cache.erl index 2fbc4f0e..e0715b88 100644 --- a/src/couchdb/couch_auth_cache.erl +++ b/src/couchdb/couch_auth_cache.erl @@ -135,6 +135,7 @@ handle_db_event({Event, DbName}) -> case Event of deleted -> gen_server:call(?MODULE, auth_db_deleted, infinity); created -> gen_server:call(?MODULE, auth_db_created, infinity); + compacted -> gen_server:call(?MODULE, auth_db_compacted, infinity); _Else -> ok end; false -> @@ -158,6 +159,14 @@ handle_call(auth_db_created, _From, State) -> true = ets:insert(?STATE, {auth_db, open_auth_db()}), {reply, ok, NewState}; +handle_call(auth_db_compacted, _From, State) -> + exec_if_auth_db( + fun(AuthDb) -> + true = ets:insert(?STATE, {auth_db, reopen_auth_db(AuthDb)}) + end + ), + {reply, ok, State}; + handle_call({new_max_cache_size, NewSize}, _From, State) -> case NewSize >= State#state.cache_size of true -> -- cgit v1.2.3 From 19c1bd388cfdbd8954f43ac48fb25af637b7e07c Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 18 Nov 2010 13:27:21 +0000 Subject: Merged revisions 986710, 988909, 997862, 998090, 1005513 and 1036447 from trunk to make the JavaScript test suite work as well on Chrome/Chromium and Safari. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1036449 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/couch.js | 80 +++++++++++++------------- share/www/script/couch_test_runner.js | 19 +++--- share/www/script/jquery.couch.js | 10 ++-- share/www/script/test/attachment_names.js | 4 +- share/www/script/test/attachment_paths.js | 14 +++-- share/www/script/test/attachment_views.js | 4 +- share/www/script/test/attachments.js | 28 ++++----- share/www/script/test/attachments_multipart.js | 2 +- share/www/script/test/basics.js | 10 ++-- share/www/script/test/bulk_docs.js | 10 ++-- share/www/script/test/compact.js | 6 +- share/www/script/test/conflicts.js | 2 +- share/www/script/test/cookie_auth.js | 20 +++---- share/www/script/test/erlang_views.js | 2 +- share/www/script/test/etags_views.js | 2 +- share/www/script/test/list_views.js | 2 +- share/www/script/test/method_override.js | 2 +- share/www/script/test/proxyauth.js | 13 ++--- share/www/script/test/purge.js | 8 +-- share/www/script/test/recreate_doc.js | 2 +- share/www/script/test/reduce.js | 9 +-- share/www/script/test/reduce_builtin.js | 9 +-- share/www/script/test/stats.js | 10 ++-- share/www/script/test/view_multi_key_design.js | 6 +- share/www/script/test/view_sandboxing.js | 4 +- share/www/script/test/view_update_seq.js | 4 +- 26 files changed, 147 insertions(+), 135 deletions(-) diff --git a/share/www/script/couch.js b/share/www/script/couch.js index 33fd82ba..ca860bd5 100644 --- a/share/www/script/couch.js +++ b/share/www/script/couch.js @@ -22,17 +22,17 @@ function CouchDB(name, httpHeaders) { this.last_req = null; this.request = function(method, uri, requestOptions) { - requestOptions = requestOptions || {} - requestOptions.headers = combine(requestOptions.headers, httpHeaders) + requestOptions = requestOptions || {}; + requestOptions.headers = combine(requestOptions.headers, httpHeaders); return CouchDB.request(method, uri, requestOptions); - } + }; // Creates the database on the server this.createDb = function() { this.last_req = this.request("PUT", this.uri); CouchDB.maybeThrowError(this.last_req); return JSON.parse(this.last_req.responseText); - } + }; // Deletes the database on the server this.deleteDb = function() { @@ -42,7 +42,7 @@ function CouchDB(name, httpHeaders) { } CouchDB.maybeThrowError(this.last_req); return JSON.parse(this.last_req.responseText); - } + }; // Save a document to the database this.save = function(doc, options) { @@ -57,7 +57,7 @@ function CouchDB(name, httpHeaders) { var result = JSON.parse(this.last_req.responseText); doc._rev = result.rev; return result; - } + }; // Open a document from the database this.open = function(docId, options) { @@ -68,7 +68,7 @@ function CouchDB(name, httpHeaders) { } CouchDB.maybeThrowError(this.last_req); return JSON.parse(this.last_req.responseText); - } + }; // Deletes a document from the database this.deleteDoc = function(doc) { @@ -79,7 +79,7 @@ function CouchDB(name, httpHeaders) { doc._rev = result.rev; //record rev in input document doc._deleted = true; return result; - } + }; // Deletes an attachment from a document this.deleteDocAttachment = function(doc, attachment_name) { @@ -89,18 +89,18 @@ function CouchDB(name, httpHeaders) { var result = JSON.parse(this.last_req.responseText); doc._rev = result.rev; //record rev in input document return result; - } + }; this.bulkSave = function(docs, options) { // first prepoulate the UUIDs for new documents - var newCount = 0 + var newCount = 0; for (var i=0; i= 400) { @@ -460,7 +460,7 @@ CouchDB.maybeThrowError = function(req) { } throw result; } -} +}; CouchDB.params = function(options) { options = options || {}; diff --git a/share/www/script/couch_test_runner.js b/share/www/script/couch_test_runner.js index fbffbbb6..56787e9a 100644 --- a/share/www/script/couch_test_runner.js +++ b/share/www/script/couch_test_runner.js @@ -28,7 +28,7 @@ function patchTest(fun) { var source = fun.toString(); var output = ""; var i = 0; - var testMarker = "T(" + var testMarker = "T("; while (i < source.length) { var testStart = source.indexOf(testMarker, i); if (testStart == -1) { @@ -239,13 +239,13 @@ function saveTestReport(report) { $.couch.info({success : function(node_info) { report.node = node_info; db.saveDoc(report); - }}) + }}); }; var createDb = function() { db.create({success: function() { db.info({success:saveReport}); }}); - } + }; db.info({error: createDb, success:saveReport}); } }; @@ -309,7 +309,7 @@ function T(arg1, arg2, testName) { .find("code").text(message).end() .appendTo($("td.details ol", currentRow)); } - numFailures += 1 + numFailures += 1; } } @@ -318,6 +318,11 @@ function TEquals(expected, actual, testName) { "', got '" + repr(actual) + "'", testName); } +function TEqualsIgnoreCase(expected, actual, testName) { + T(equals(expected.toUpperCase(), actual.toUpperCase()), "expected '" + repr(expected) + + "', got '" + repr(actual) + "'", testName); +} + function equals(a,b) { if (a === b) return true; try { @@ -338,18 +343,18 @@ function repr(val) { } function makeDocs(start, end, templateDoc) { - var templateDocSrc = templateDoc ? JSON.stringify(templateDoc) : "{}" + var templateDocSrc = templateDoc ? JSON.stringify(templateDoc) : "{}"; if (end === undefined) { end = start; start = 0; } - var docs = [] + var docs = []; for (var i = start; i < end; i++) { var newDoc = eval("(" + templateDocSrc + ")"); newDoc._id = (i).toString(); newDoc.integer = i; newDoc.string = (i).toString(); - docs.push(newDoc) + docs.push(newDoc); } return docs; } diff --git a/share/www/script/jquery.couch.js b/share/www/script/jquery.couch.js index ebf7d52a..114e5801 100644 --- a/share/www/script/jquery.couch.js +++ b/share/www/script/jquery.couch.js @@ -36,7 +36,7 @@ } user_doc.type = "user"; if (!user_doc.roles) { - user_doc.roles = [] + user_doc.roles = []; } return user_doc; }; @@ -75,7 +75,7 @@ req.type = "PUT"; req.data = toJSON(value); req.contentType = "application/json"; - req.processData = false + req.processData = false; } ajax(req, options, @@ -115,7 +115,7 @@ user_doc = prepareUserDoc(user_doc, password); $.couch.userDb(function(db) { db.saveDoc(user_doc, options); - }) + }); }, login: function(options) { @@ -167,7 +167,7 @@ doc._attachments["rev-"+doc._rev.split("-")[0]] = { content_type :"application/json", data : Base64.encode(rawDocs[doc._id].raw) - } + }; return true; } } @@ -583,7 +583,7 @@ if (!uuidCache.length) { ajax({url: this.urlPrefix + "/_uuids", data: {count: cacheNum}, async: false}, { success: function(resp) { - uuidCache = resp.uuids + uuidCache = resp.uuids; } }, "Failed to retrieve UUID batch." diff --git a/share/www/script/test/attachment_names.js b/share/www/script/test/attachment_names.js index d90c24c4..988dd2d2 100644 --- a/share/www/script/test/attachment_names.js +++ b/share/www/script/test/attachment_names.js @@ -24,7 +24,7 @@ couchTests.attachment_names = function(debug) { data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=" } } - } + }; // inline attachments try { @@ -72,7 +72,7 @@ couchTests.attachment_names = function(debug) { data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=" } } - } + }; try { db.save(binAttDoc); diff --git a/share/www/script/test/attachment_paths.js b/share/www/script/test/attachment_paths.js index a2a0f69c..3f6ffb7c 100644 --- a/share/www/script/test/attachment_paths.js +++ b/share/www/script/test/attachment_paths.js @@ -33,7 +33,7 @@ couchTests.attachment_paths = function(debug) { data: "V2UgbGlrZSBwZXJjZW50IHR3byBGLg==" } } - } + }; T(db.save(binAttDoc).ok); @@ -73,7 +73,10 @@ couchTests.attachment_paths = function(debug) { T(binAttDoc._attachments["foo/bar.txt"] !== undefined); T(binAttDoc._attachments["foo%2Fbaz.txt"] !== undefined); T(binAttDoc._attachments["foo/bar2.txt"] !== undefined); - T(binAttDoc._attachments["foo/bar2.txt"].content_type == "text/plain;charset=utf-8"); + TEquals("text/plain;charset=utf-8", // thank you Safari + binAttDoc._attachments["foo/bar2.txt"].content_type.toLowerCase(), + "correct content-type" + ); T(binAttDoc._attachments["foo/bar2.txt"].length == 30); //// now repeat the while thing with a design doc @@ -92,7 +95,7 @@ couchTests.attachment_paths = function(debug) { data: "V2UgbGlrZSBwZXJjZW50IHR3byBGLg==" } } - } + }; T(db.save(binAttDoc).ok); @@ -141,7 +144,10 @@ couchTests.attachment_paths = function(debug) { T(binAttDoc._attachments["foo/bar.txt"] !== undefined); T(binAttDoc._attachments["foo/bar2.txt"] !== undefined); - T(binAttDoc._attachments["foo/bar2.txt"].content_type == "text/plain;charset=utf-8"); + TEquals("text/plain;charset=utf-8", // thank you Safari + binAttDoc._attachments["foo/bar2.txt"].content_type.toLowerCase(), + "correct content-type" + ); T(binAttDoc._attachments["foo/bar2.txt"].length == 30); } }; diff --git a/share/www/script/test/attachment_views.js b/share/www/script/test/attachment_views.js index fd30dcfc..a92a8ad0 100644 --- a/share/www/script/test/attachment_views.js +++ b/share/www/script/test/attachment_views.js @@ -68,11 +68,11 @@ couchTests.attachment_views= function(debug) { } emit(parseInt(doc._id), count); - } + }; var reduceFunction = function(key, values) { return sum(values); - } + }; var result = db.query(mapFunction, reduceFunction); diff --git a/share/www/script/test/attachments.js b/share/www/script/test/attachments.js index 9d89d5d0..e16c384f 100644 --- a/share/www/script/test/attachments.js +++ b/share/www/script/test/attachments.js @@ -24,7 +24,7 @@ couchTests.attachments= function(debug) { data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=" } } - } + }; var save_response = db.save(binAttDoc); T(save_response.ok); @@ -43,7 +43,7 @@ couchTests.attachments= function(debug) { data: "" } } - } + }; T(db.save(binAttDoc2).ok); @@ -68,12 +68,12 @@ couchTests.attachments= function(debug) { T(binAttDoc2._attachments["foo.txt"] !== undefined); T(binAttDoc2._attachments["foo2.txt"] !== undefined); - T(binAttDoc2._attachments["foo2.txt"].content_type == "text/plain;charset=utf-8"); + TEqualsIgnoreCase("text/plain;charset=utf-8", binAttDoc2._attachments["foo2.txt"].content_type); T(binAttDoc2._attachments["foo2.txt"].length == 30); var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc2/foo2.txt"); T(xhr.responseText == "This is no base64 encoded text"); - T(xhr.getResponseHeader("Content-Type") == "text/plain;charset=utf-8"); + TEqualsIgnoreCase("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type")); // test without rev, should fail var xhr = CouchDB.request("DELETE", "/test_suite_db/bin_doc2/foo2.txt"); @@ -96,7 +96,7 @@ couchTests.attachments= function(debug) { var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc3/attachment.txt"); T(xhr.responseText == bin_data); - T(xhr.getResponseHeader("Content-Type") == "text/plain;charset=utf-8"); + TEqualsIgnoreCase("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type")); var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc3/attachment.txt", { headers:{"Content-Type":"text/plain;charset=utf-8"}, @@ -113,11 +113,11 @@ couchTests.attachments= function(debug) { var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc3/attachment.txt"); T(xhr.responseText == bin_data); - T(xhr.getResponseHeader("Content-Type") == "text/plain;charset=utf-8"); + TEqualsIgnoreCase("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type")); var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc3/attachment.txt?rev=" + rev); T(xhr.responseText == bin_data); - T(xhr.getResponseHeader("Content-Type") == "text/plain;charset=utf-8"); + TEqualsIgnoreCase("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type")); var xhr = CouchDB.request("DELETE", "/test_suite_db/bin_doc3/attachment.txt?rev=" + rev); T(xhr.status == 200); @@ -129,7 +129,7 @@ couchTests.attachments= function(debug) { var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc3/attachment.txt?rev=" + rev); T(xhr.status == 200); T(xhr.responseText == bin_data); - T(xhr.getResponseHeader("Content-Type") == "text/plain;charset=utf-8"); + TEqualsIgnoreCase("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type")); // empty attachments var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc4/attachment.txt", { @@ -156,7 +156,7 @@ couchTests.attachments= function(debug) { // Attachment sparseness COUCHDB-220 - var docs = [] + var docs = []; for (var i = 0; i < 5; i++) { var doc = { _id: (i).toString(), @@ -166,8 +166,8 @@ couchTests.attachments= function(debug) { data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=" } } - } - docs.push(doc) + }; + docs.push(doc); } var saved = db.bulkSave(docs); @@ -210,7 +210,7 @@ couchTests.attachments= function(debug) { var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc5/lorem.txt"); T(xhr.responseText == lorem); - T(xhr.getResponseHeader("Content-Type") == "text/plain;charset=utf-8"); + TEqualsIgnoreCase("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type")); // test large inline attachment too var lorem_b64 = CouchDB.request("GET", "/_utils/script/test/lorem_b64.txt").responseText; @@ -254,7 +254,7 @@ couchTests.attachments= function(debug) { data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=" } } - } + }; T(db.save(bin_doc6).ok); // stub out the attachment bin_doc6._attachments["foo.txt"] = { stub: true }; @@ -268,6 +268,6 @@ couchTests.attachments= function(debug) { T(db.save(bin_doc6).ok == true); T(false && "Shouldn't get here!"); } catch (e) { - T(e.error == "missing_stub") + T(e.error == "missing_stub"); } }; diff --git a/share/www/script/test/attachments_multipart.js b/share/www/script/test/attachments_multipart.js index 5edf4d2c..fecf9d01 100644 --- a/share/www/script/test/attachments_multipart.js +++ b/share/www/script/test/attachments_multipart.js @@ -58,7 +58,7 @@ couchTests.attachments_multipart= function(debug) { var result = JSON.parse(xhr.responseText); - T(result.ok) + T(result.ok); diff --git a/share/www/script/test/basics.js b/share/www/script/test/basics.js index 6a3ae471..8885ba6e 100644 --- a/share/www/script/test/basics.js +++ b/share/www/script/test/basics.js @@ -45,7 +45,7 @@ couchTests.basics = function(debug) { // Get the database info, check the db_name T(db.info().db_name == "test_suite_db"); - T(CouchDB.allDbs().indexOf("test_suite_db") != -1) + T(CouchDB.allDbs().indexOf("test_suite_db") != -1); // Get the database info, check the doc_count T(db.info().doc_count == 0); @@ -91,13 +91,13 @@ couchTests.basics = function(debug) { emit(null, doc.b); }; - results = db.query(mapFunction); + var results = db.query(mapFunction); // verify only one document found and the result value (doc.b). T(results.total_rows == 1 && results.rows[0].value == 16); // reopen document we saved earlier - existingDoc = db.open(id); + var existingDoc = db.open(id); T(existingDoc.a==1); @@ -191,12 +191,12 @@ couchTests.basics = function(debug) { T(xhr.status == 404); // Check for invalid document members - bad_docs = [ + var bad_docs = [ ["goldfish", {"_zing": 4}], ["zebrafish", {"_zoom": "hello"}], ["mudfish", {"zane": "goldfish", "_fan": "something smells delicious"}], ["tastyfish", {"_bing": {"wha?": "soda can"}}] - ] + ]; var test_doc = function(info) { var data = JSON.stringify(info[1]); xhr = CouchDB.request("PUT", "/test_suite_db/" + info[0], {body: data}); diff --git a/share/www/script/test/bulk_docs.js b/share/www/script/test/bulk_docs.js index 346aea83..9095e6b3 100644 --- a/share/www/script/test/bulk_docs.js +++ b/share/www/script/test/bulk_docs.js @@ -51,12 +51,12 @@ couchTests.bulk_docs = function(debug) { T(results.length == 5); T(results[0].id == "0"); T(results[0].error == "conflict"); - T(results[0].rev === undefined); // no rev member when a conflict + T(typeof results[0].rev === "undefined"); // no rev member when a conflict // but the rest are not for (i = 1; i < 5; i++) { T(results[i].id == i.toString()); - T(results[i].rev) + T(results[i].rev); T(db.open(docs[i]._id) == null); } @@ -64,7 +64,7 @@ couchTests.bulk_docs = function(debug) { // save doc 0, this will cause a conflict when we save docs[0] var doc = db.open("0"); - docs[0] = db.open("0") + docs[0] = db.open("0"); db.save(doc); docs[0].shooby = "dooby"; @@ -93,8 +93,8 @@ couchTests.bulk_docs = function(debug) { // Regression test for failure on update/delete var newdoc = {"_id": "foobar", "body": "baz"}; T(db.save(newdoc).ok); - update = {"_id": newdoc._id, "_rev": newdoc._rev, "body": "blam"}; - torem = {"_id": newdoc._id, "_rev": newdoc._rev, "_deleted": true}; + var update = {"_id": newdoc._id, "_rev": newdoc._rev, "body": "blam"}; + var torem = {"_id": newdoc._id, "_rev": newdoc._rev, "_deleted": true}; results = db.bulkSave([update, torem]); T(results[0].error == "conflict" || results[1].error == "conflict"); }; diff --git a/share/www/script/test/compact.js b/share/www/script/test/compact.js index 22eeaec1..805a3b08 100644 --- a/share/www/script/test/compact.js +++ b/share/www/script/test/compact.js @@ -26,7 +26,7 @@ couchTests.compact = function(debug) { data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=" } } - } + }; T(db.save(binAttDoc).ok); @@ -51,8 +51,8 @@ couchTests.compact = function(debug) { T(db.ensureFullCommit().ok); restartServer(); var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt"); - T(xhr.responseText == "This is a base64 encoded text") - T(xhr.getResponseHeader("Content-Type") == "text/plain") + T(xhr.responseText == "This is a base64 encoded text"); + T(xhr.getResponseHeader("Content-Type") == "text/plain"); T(db.info().doc_count == 1); T(db.info().disk_size < deletesize); diff --git a/share/www/script/test/conflicts.js b/share/www/script/test/conflicts.js index b8b93946..7258bc31 100644 --- a/share/www/script/test/conflicts.js +++ b/share/www/script/test/conflicts.js @@ -44,7 +44,7 @@ couchTests.conflicts = function(debug) { var changes = db.changes(); - T( changes.results.length == 1) + T(changes.results.length == 1); // Now clear out the _rev member and save. This indicates this document is // new, not based on an existing revision. diff --git a/share/www/script/test/cookie_auth.js b/share/www/script/test/cookie_auth.js index 68ec882d..ef915602 100644 --- a/share/www/script/test/cookie_auth.js +++ b/share/www/script/test/cookie_auth.js @@ -65,7 +65,7 @@ couchTests.cookie_auth = function(debug) { }, "eh, Boo-Boo?"); try { - usersDb.save(duplicateJchrisDoc) + usersDb.save(duplicateJchrisDoc); T(false && "Can't create duplicate user names. Should have thrown an error."); } catch (e) { T(e.error == "conflict"); @@ -78,7 +78,7 @@ couchTests.cookie_auth = function(debug) { }, "copperfield"); try { - usersDb.save(underscoreUserDoc) + usersDb.save(underscoreUserDoc); T(false && "Can't create underscore user names. Should have thrown an error."); } catch (e) { T(e.error == "forbidden"); @@ -93,7 +93,7 @@ couchTests.cookie_auth = function(debug) { badIdDoc._id = "org.apache.couchdb:w00x"; try { - usersDb.save(badIdDoc) + usersDb.save(badIdDoc); T(false && "Can't create malformed docids. Should have thrown an error."); } catch (e) { T(e.error == "forbidden"); @@ -125,7 +125,7 @@ couchTests.cookie_auth = function(debug) { T(CouchDB.session().userCtx.name != 'Jason Davies'); // test redirect - xhr = CouchDB.request("POST", "/_session?next=/", { + var xhr = CouchDB.request("POST", "/_session?next=/", { headers: {"Content-Type": "application/x-www-form-urlencoded"}, body: "name=Jason%20Davies&password="+encodeURIComponent(password) }); @@ -135,10 +135,10 @@ couchTests.cookie_auth = function(debug) { // to follow the redirect, ie, the browser follows and does a // GET on the returned Location if (xhr.status == 200) { - T(/Welcome/.test(xhr.responseText)) + T(/Welcome/.test(xhr.responseText)); } else { - T(xhr.status == 302) - T(xhr.getResponseHeader("Location")) + T(xhr.status == 302); + T(xhr.getResponseHeader("Location")); } // test users db validations @@ -151,7 +151,7 @@ couchTests.cookie_auth = function(debug) { jasonUserDoc.foo=3; try { - usersDb.save(jasonUserDoc) + usersDb.save(jasonUserDoc); T(false && "Can't update someone else's user doc. Should have thrown an error."); } catch (e) { T(e.error == "forbidden"); @@ -162,7 +162,7 @@ couchTests.cookie_auth = function(debug) { jchrisUserDoc.roles = ["foo"]; try { - usersDb.save(jchrisUserDoc) + usersDb.save(jchrisUserDoc); T(false && "Can't set roles unless you are admin. Should have thrown an error."); } catch (e) { T(e.error == "forbidden"); @@ -179,7 +179,7 @@ couchTests.cookie_auth = function(debug) { jchrisUserDoc.roles = ["_bar"]; try { - usersDb.save(jchrisUserDoc) + usersDb.save(jchrisUserDoc); T(false && "Can't add system roles to user's db. Should have thrown an error."); } catch (e) { T(e.error == "forbidden"); diff --git a/share/www/script/test/erlang_views.js b/share/www/script/test/erlang_views.js index 5e93cb96..7eddab40 100644 --- a/share/www/script/test/erlang_views.js +++ b/share/www/script/test/erlang_views.js @@ -44,7 +44,7 @@ couchTests.erlang_views = function(debug) { // check simple reduction - another doc with same key. var doc = {_id: "2", integer: 1, string: "str2"}; T(db.save(doc).ok); - rfun = "fun(Keys, Values, ReReduce) -> length(Values) end." + rfun = "fun(Keys, Values, ReReduce) -> length(Values) end."; results = db.query(mfun, rfun, null, null, "erlang"); T(results.rows[0].value == 2); diff --git a/share/www/script/test/etags_views.js b/share/www/script/test/etags_views.js index a12734f8..7e1537bd 100644 --- a/share/www/script/test/etags_views.js +++ b/share/www/script/test/etags_views.js @@ -38,7 +38,7 @@ couchTests.etags_views = function(debug) { }) } } - } + }; T(db.save(designDoc).ok); var xhr; var docs = makeDocs(0, 10); diff --git a/share/www/script/test/list_views.js b/share/www/script/test/list_views.js index f826b46f..44afa899 100644 --- a/share/www/script/test/list_views.js +++ b/share/www/script/test/list_views.js @@ -394,7 +394,7 @@ couchTests.list_views = function(debug) { T(/LastKey: 0/.test(xhr.responseText)); // Test we do multi-key requests on lists and views in separate docs. - var url = "/test_suite_db/_design/lists/_list/simpleForm/views/basicView" + var url = "/test_suite_db/_design/lists/_list/simpleForm/views/basicView"; xhr = CouchDB.request("POST", url, { body: '{"keys":[-2,-4,-5,-7]}' }); diff --git a/share/www/script/test/method_override.js b/share/www/script/test/method_override.js index 26e9bee0..0bb4c61f 100644 --- a/share/www/script/test/method_override.js +++ b/share/www/script/test/method_override.js @@ -28,7 +28,7 @@ couchTests.method_override = function(debug) { T(doc.bob == "connie"); xhr = CouchDB.request("POST", "/test_suite_db/fnord?rev=" + doc._rev, {headers:{"X-HTTP-Method-Override" : "DELETE"}}); - T(xhr.status == 200) + T(xhr.status == 200); xhr = CouchDB.request("GET", "/test_suite_db/fnord2", {body: JSON.stringify(doc), headers:{"X-HTTP-Method-Override" : "PUT"}}); // Method Override is ignored when original Method isn't POST diff --git a/share/www/script/test/proxyauth.js b/share/www/script/test/proxyauth.js index 171eef37..91e2f221 100644 --- a/share/www/script/test/proxyauth.js +++ b/share/www/script/test/proxyauth.js @@ -39,7 +39,7 @@ couchTests.proxyauth = function(debug) { db.createDb(); var benoitcUserDoc = CouchDB.prepareUserDoc({ - name: "benoitc@apache.org", + name: "benoitc@apache.org" }, "test"); T(usersDb.save(benoitcUserDoc).ok); @@ -56,7 +56,7 @@ couchTests.proxyauth = function(debug) { CouchDB.logout(); - headers = { + var headers = { "X-Auth-CouchDB-UserName": "benoitc@apache.org", "X-Auth-CouchDB-Roles": "test", "X-Auth-CouchDB-Token": hex_hmac_sha1(secret, "benoitc@apache.org") @@ -72,14 +72,13 @@ couchTests.proxyauth = function(debug) { }), "role": stringFun(function(doc, req) { return req.userCtx['roles'][0]; - }), + }) } - - } + }; db.save(designDoc); - req = CouchDB.request("GET", "/test_suite_db/_design/test/_show/welcome", + var req = CouchDB.request("GET", "/test_suite_db/_design/test/_show/welcome", {headers: headers}); T(req.responseText == "Welcome benoitc@apache.org"); @@ -87,7 +86,7 @@ couchTests.proxyauth = function(debug) { {headers: headers}); T(req.responseText == "test"); - xhr = CouchDB.request("PUT", "/_config/couch_httpd_auth/proxy_use_secret",{ + var xhr = CouchDB.request("PUT", "/_config/couch_httpd_auth/proxy_use_secret",{ body : JSON.stringify("true"), headers: {"X-Couch-Persist": "false"} }); diff --git a/share/www/script/test/purge.js b/share/www/script/test/purge.js index a924c348..af72ea4f 100644 --- a/share/www/script/test/purge.js +++ b/share/www/script/test/purge.js @@ -30,7 +30,7 @@ couchTests.purge = function(debug) { all_docs_twice: {map: "function(doc) { emit(doc.integer, null); emit(doc.integer, null) }"}, single_doc: {map: "function(doc) { if (doc._id == \"1\") { emit(1, null) }}"} } - } + }; T(db.save(designDoc).ok); @@ -50,7 +50,7 @@ couchTests.purge = function(debug) { // purge the documents var xhr = CouchDB.request("POST", "/test_suite_db/_purge", { - body: JSON.stringify({"1":[doc1._rev], "2":[doc2._rev]}), + body: JSON.stringify({"1":[doc1._rev], "2":[doc2._rev]}) }); T(xhr.status == 200); @@ -83,13 +83,13 @@ couchTests.purge = function(debug) { var doc4 = db.open("4"); xhr = CouchDB.request("POST", "/test_suite_db/_purge", { - body: JSON.stringify({"3":[doc3._rev]}), + body: JSON.stringify({"3":[doc3._rev]}) }); T(xhr.status == 200); xhr = CouchDB.request("POST", "/test_suite_db/_purge", { - body: JSON.stringify({"4":[doc4._rev]}), + body: JSON.stringify({"4":[doc4._rev]}) }); T(xhr.status == 200); diff --git a/share/www/script/test/recreate_doc.js b/share/www/script/test/recreate_doc.js index a6a64ac0..05843558 100644 --- a/share/www/script/test/recreate_doc.js +++ b/share/www/script/test/recreate_doc.js @@ -51,7 +51,7 @@ couchTests.recreate_doc = function(debug) { data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=" } } - } + }; try { // same as before, but with binary db.save(binAttDoc); diff --git a/share/www/script/test/reduce.js b/share/www/script/test/reduce.js index 9c80fa7f..979a0292 100644 --- a/share/www/script/test/reduce.js +++ b/share/www/script/test/reduce.js @@ -15,14 +15,15 @@ couchTests.reduce = function(debug) { db.deleteDb(); db.createDb(); if (debug) debugger; - var numDocs = 500 + var numDocs = 500; var docs = makeDocs(1,numDocs + 1); db.bulkSave(docs); var summate = function(N) {return (N+1)*N/2;}; var map = function (doc) { emit(doc.integer, doc.integer); - emit(doc.integer, doc.integer)}; + emit(doc.integer, doc.integer); + }; var reduce = function (keys, values) { return sum(values); }; var result = db.query(map, reduce); T(result.rows[0].value == 2*summate(numDocs)); @@ -69,7 +70,7 @@ couchTests.reduce = function(debug) { T(db.info().doc_count == ((i - 1) * 10 * 11) + ((j + 1) * 11)); } - map = function (doc) {emit(doc.keys, 1)}; + map = function (doc) { emit(doc.keys, 1); }; reduce = function (keys, values) { return sum(values); }; var results = db.query(map, reduce, {group:true}); @@ -107,7 +108,7 @@ couchTests.reduce = function(debug) { db.createDb(); - var map = function (doc) {emit(doc.val, doc.val)}; + var map = function (doc) { emit(doc.val, doc.val); }; var reduceCombine = function (keys, values, rereduce) { // This computes the standard deviation of the mapped results var stdDeviation=0.0; diff --git a/share/www/script/test/reduce_builtin.js b/share/www/script/test/reduce_builtin.js index d9635688..c9d41fa4 100644 --- a/share/www/script/test/reduce_builtin.js +++ b/share/www/script/test/reduce_builtin.js @@ -16,7 +16,7 @@ couchTests.reduce_builtin = function(debug) { db.createDb(); if (debug) debugger; - var numDocs = 500 + var numDocs = 500; var docs = makeDocs(1,numDocs + 1); db.bulkSave(docs); @@ -28,13 +28,14 @@ couchTests.reduce_builtin = function(debug) { acc += i*i; } return acc; - } + }; // this is the same test as the reduce.js test // only we'll let CouchDB run reduce in Erlang var map = function (doc) { emit(doc.integer, doc.integer); - emit(doc.integer, doc.integer)}; + emit(doc.integer, doc.integer); + }; var result = db.query(map, "_sum"); T(result.rows[0].value == 2*summate(numDocs)); @@ -115,7 +116,7 @@ couchTests.reduce_builtin = function(debug) { T(db.info().doc_count == ((i - 1) * 10 * 11) + ((j + 1) * 11)); } - map = function (doc) {emit(doc.keys, 1)}; + map = function (doc) { emit(doc.keys, 1); }; // with emitted values being 1, count should be the same as sum var builtins = ["_sum", "_count"]; diff --git a/share/www/script/test/stats.js b/share/www/script/test/stats.js index d2fd6eac..6fb0fbba 100644 --- a/share/www/script/test/stats.js +++ b/share/www/script/test/stats.js @@ -30,7 +30,7 @@ couchTests.stats = function(debug) { _id:"_design/test", // turn off couch.js id escaping? language: "javascript", views: { - all_docs: {map: "function(doc) {emit(doc.integer, null);}"}, + all_docs: {map: "function(doc) {emit(doc.integer, null);}"} } }; db.save(designDoc); @@ -163,12 +163,12 @@ couchTests.stats = function(debug) { CouchDB.request("POST", "/test_suite_db", { headers: {"Content-Type": "application/json"}, body: '{"a": "1"}' - }) + }); }, test: function(before, after) { TEquals(before+1, after, "POST'ing new docs increments doc writes."); } - }) + }); runTest("couchdb", "database_writes", { setup: function(db) {db.save({"_id": "test"});}, @@ -247,7 +247,7 @@ couchTests.stats = function(debug) { }); runTest("httpd", "temporary_view_reads", { - run: function(db) {db.query(function(doc) {emit(doc._id)})}, + run: function(db) { db.query(function(doc) { emit(doc._id); }); }, test: function(before, after) { TEquals(before+1, after, "Temporary views have their own counter."); } @@ -261,7 +261,7 @@ couchTests.stats = function(debug) { }); runTest("httpd", "view_reads", { - run: function(db) {db.query(function(doc) {emit(doc._id)});}, + run: function(db) { db.query(function(doc) { emit(doc._id); }); }, test: function(before, after) { TEquals(before, after, "Temporary views don't affect permanent views."); } diff --git a/share/www/script/test/view_multi_key_design.js b/share/www/script/test/view_multi_key_design.js index 5a2f645d..c39e73d9 100644 --- a/share/www/script/test/view_multi_key_design.js +++ b/share/www/script/test/view_multi_key_design.js @@ -34,11 +34,11 @@ couchTests.view_multi_key_design = function(debug) { reduce:"function (keys, values) { return sum(values); };" } } - } + }; T(db.save(designDoc).ok); // Test that missing keys work too - var keys = [101,30,15,37,50] + var keys = [101,30,15,37,50]; var reduce = db.view("test/summate",{group:true},keys).rows; T(reduce.length == keys.length-1); // 101 is missing for(var i=0; i Date: Thu, 18 Nov 2010 14:48:56 +0000 Subject: update NEWS and CHANGES for 1.0.2 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1036473 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 37 +++++++++++++++++++++++++++++++++++++ NEWS | 17 +++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/CHANGES b/CHANGES index a8d82f31..118417c4 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,43 @@ Apache CouchDB CHANGES ====================== +Version 1.0.2 +------------- + +Note: This version has not been released yet. + +Futon: + + * Make test suite work with Safari and Chrome. + +Storage System: + + * Fix leaking file handles after compacting databases views. + * Fix databases forgetting their validation function after compaction. + * Fix occasional timeout errors after compacting large databases. + * Fix occasional timeout errors on systems with slow or heavily loaded IO. + +Log System: + + * Reduce lengthy stack traces. + * Allow logging of native types. + +HTTP Interface: + + * Allow reduce=false parameter in map-only views. + * Support "Q values" in HTTP headers. + +Replicator: + + * Updated ibrowse library to 2.1.0 fixing numerous replication issues. + * Fix authenticated replication of design documents with attachments. + * Various fixes to make replicated more resilient for edge-cases. + +View Server: + + * Don't trigger a view update when requesting `_design/doc/_info`. + * Fix for circular references in CommonJS requires. + Version 1.0.1 ------------- diff --git a/NEWS b/NEWS index d69f0dcd..264c5640 100644 --- a/NEWS +++ b/NEWS @@ -7,6 +7,23 @@ For details about backwards incompatible changes, see: Each release section notes when backwards incompatible changes have been made. +Version 1.0.2 +------------- + +Note: This version has not been released yet. + + * Make test suite work with Safari and Chrome. + * Fix leaking file handles after compacting databases views. + * Fix databases forgetting their validation function after compaction. + * Fix occasional timeout errors. + * Reduce lengthy stack traces. + * Allow logging of native types. + * Updated ibrowse library to 2.1.0 fixing numerous replication issues. + * Fix authenticated replication of design documents with attachments. + * Various fixes to make replicated more resilient for edge-cases. + * Don't trigger a view update when requesting `_design/doc/_info`. + * Fix for circular references in CommonJS requires. + Version 1.0.1 ------------- -- cgit v1.2.3 From 6f7fb7b6ae631fb49e431b2472c2c6d7a70dbb3b Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Thu, 18 Nov 2010 14:56:25 +0000 Subject: refine descriptions in CHANGES and NEWS git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1036475 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 4 ++-- NEWS | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES b/CHANGES index 118417c4..be4c9786 100644 --- a/CHANGES +++ b/CHANGES @@ -12,7 +12,7 @@ Futon: Storage System: - * Fix leaking file handles after compacting databases views. + * Fix leaking file handles after compacting databases and views. * Fix databases forgetting their validation function after compaction. * Fix occasional timeout errors after compacting large databases. * Fix occasional timeout errors on systems with slow or heavily loaded IO. @@ -25,7 +25,7 @@ Log System: HTTP Interface: * Allow reduce=false parameter in map-only views. - * Support "Q values" in HTTP headers. + * Fix parsing of Accept headers. Replicator: diff --git a/NEWS b/NEWS index 264c5640..c71a74ff 100644 --- a/NEWS +++ b/NEWS @@ -13,7 +13,7 @@ Version 1.0.2 Note: This version has not been released yet. * Make test suite work with Safari and Chrome. - * Fix leaking file handles after compacting databases views. + * Fix leaking file handles after compacting databases and views. * Fix databases forgetting their validation function after compaction. * Fix occasional timeout errors. * Reduce lengthy stack traces. -- cgit v1.2.3 From 1c41d9cce36a414e10ff3ab741c504a03f6c297f Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Thu, 18 Nov 2010 15:21:03 +0000 Subject: COUCHDB-957 - Show an error message if trying 'Compact View' without selecting a view, rather than compacting the associated database by mistake. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1036487 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/futon.browse.js | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/share/www/script/futon.browse.js b/share/www/script/futon.browse.js index 65acbdeb..17975de2 100644 --- a/share/www/script/futon.browse.js +++ b/share/www/script/futon.browse.js @@ -153,9 +153,13 @@ db.compact({success: function(resp) { callback() }}); break; case "compact_views": - var groupname = page.viewName.substring(8, - page.viewName.indexOf("/_view")); - db.compactView(groupname, {success: function(resp) { callback() }}); + var idx = page.viewName.indexOf("/_view"); + if (idx == -1) { + alert("Compact Views requires focus on a view!"); + } else { + var groupname = page.viewName.substring(8, idx); + db.compactView(groupname, {success: function(resp) { callback() }}); + } break; case "view_cleanup": db.viewCleanup({success: function(resp) { callback() }}); -- cgit v1.2.3 From a8c8aac826a3d86bef3ca169d7f379cef4c4016f Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 18 Nov 2010 21:07:39 +0000 Subject: Merged revision 1036627 from trunk: Preserve user context when reopening a database. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1036629 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db.erl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index fe155abe..964c4704 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -87,17 +87,17 @@ open(DbName, Options) -> Else -> Else end. -reopen(#db{main_pid = Pid, fd_ref_counter = OldRefCntr}) -> +reopen(#db{main_pid = Pid, fd_ref_counter = OldRefCntr, user_ctx = UserCtx}) -> {ok, #db{fd_ref_counter = NewRefCntr} = NewDb} = gen_server:call(Pid, get_db, infinity), case NewRefCntr =:= OldRefCntr of true -> - {ok, NewDb}; + ok; false -> couch_ref_counter:add(NewRefCntr), - couch_ref_counter:drop(OldRefCntr), - {ok, NewDb} - end. + couch_ref_counter:drop(OldRefCntr) + end, + {ok, NewDb#db{user_ctx = UserCtx}}. ensure_full_commit(#db{update_pid=UpdatePid,instance_start_time=StartTime}) -> ok = gen_server:call(UpdatePid, full_commit, infinity), -- cgit v1.2.3 From 18ec22a5c07daf67e4d0ed8e876545c0b96835be Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Fri, 19 Nov 2010 02:05:45 +0000 Subject: Merged revision 1036705 from trunk: Make sure that after a local database compaction the old database reference counters don't get unreleased forever because of a continuous (or long) replication is going on. Same type of issue as in COUCHDB-926. Thanks Adam Kocoloski for some suggestions. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1036711 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_rep.erl | 65 ++++++++++++++++++++++++++++---- src/couchdb/couch_rep_missing_revs.erl | 12 +++--- src/couchdb/couch_rep_reader.erl | 18 +++++---- src/couchdb/couch_rep_writer.erl | 9 +++-- test/etap/112-replication-missing-revs.t | 19 ++++++++-- 5 files changed, 94 insertions(+), 29 deletions(-) diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index 90b065c0..ba387285 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -47,7 +47,9 @@ committed_seq = 0, stats = nil, - doc_ids = nil + doc_ids = nil, + source_db_update_notifier = nil, + target_db_update_notifier = nil }). %% convenience function to do a simple replication from the shell @@ -196,7 +198,9 @@ do_init([RepId, {PostProps}, UserCtx] = InitArgs) -> rep_starttime = httpd_util:rfc1123_date(), src_starttime = couch_util:get_value(instance_start_time, SourceInfo), tgt_starttime = couch_util:get_value(instance_start_time, TargetInfo), - doc_ids = DocIds + doc_ids = DocIds, + source_db_update_notifier = source_db_update_notifier(Source), + target_db_update_notifier = target_db_update_notifier(Target) }, {ok, State}. @@ -204,7 +208,21 @@ handle_call(get_result, From, #state{complete=true, listeners=[]} = State) -> {stop, normal, State#state{listeners=[From]}}; handle_call(get_result, From, State) -> Listeners = State#state.listeners, - {noreply, State#state{listeners=[From|Listeners]}}. + {noreply, State#state{listeners=[From|Listeners]}}; + +handle_call(get_source_db, _From, #state{source = Source} = State) -> + {reply, {ok, Source}, State}; + +handle_call(get_target_db, _From, #state{target = Target} = State) -> + {reply, {ok, Target}, State}. + +handle_cast(reopen_source_db, #state{source = Source} = State) -> + {ok, NewSource} = couch_db:reopen(Source), + {noreply, State#state{source = NewSource}}; + +handle_cast(reopen_target_db, #state{target = Target} = State) -> + {ok, NewTarget} = couch_db:reopen(Target), + {noreply, State#state{target = NewTarget}}; handle_cast(do_checkpoint, State) -> {noreply, do_checkpoint(State)}; @@ -422,13 +440,20 @@ do_terminate(State) -> false -> [gen_server:reply(R, retry) || R <- OtherListeners] end, + couch_task_status:update("Finishing"), terminate_cleanup(State). -terminate_cleanup(#state{source=Source, target=Target, stats=Stats}) -> - couch_task_status:update("Finishing"), - close_db(Target), - close_db(Source), - ets:delete(Stats). +terminate_cleanup(State) -> + close_db(State#state.source), + close_db(State#state.target), + stop_db_update_notifier(State#state.source_db_update_notifier), + stop_db_update_notifier(State#state.target_db_update_notifier), + ets:delete(State#state.stats). + +stop_db_update_notifier(nil) -> + ok; +stop_db_update_notifier(Notifier) -> + couch_db_update_notifier:stop(Notifier). has_session_id(_SessionId, []) -> false; @@ -752,3 +777,27 @@ parse_proxy_params(ProxyUrl) -> true -> [{proxy_user, User}, {proxy_password, Passwd}] end. + +source_db_update_notifier(#db{name = DbName}) -> + Server = self(), + {ok, Notifier} = couch_db_update_notifier:start_link( + fun({compacted, DbName1}) when DbName1 =:= DbName -> + ok = gen_server:cast(Server, reopen_source_db); + (_) -> + ok + end), + Notifier; +source_db_update_notifier(_) -> + nil. + +target_db_update_notifier(#db{name = DbName}) -> + Server = self(), + {ok, Notifier} = couch_db_update_notifier:start_link( + fun({compacted, DbName1}) when DbName1 =:= DbName -> + ok = gen_server:cast(Server, reopen_target_db); + (_) -> + ok + end), + Notifier; +target_db_update_notifier(_) -> + nil. diff --git a/src/couchdb/couch_rep_missing_revs.erl b/src/couchdb/couch_rep_missing_revs.erl index 1eff6774..9809ca5e 100644 --- a/src/couchdb/couch_rep_missing_revs.erl +++ b/src/couchdb/couch_rep_missing_revs.erl @@ -24,7 +24,6 @@ -record (state, { changes_loop, changes_from = nil, - target, parent, complete = false, count = 0, @@ -44,11 +43,11 @@ next(Server) -> stop(Server) -> gen_server:call(Server, stop). -init([Parent, Target, ChangesFeed, _PostProps]) -> +init([Parent, _Target, ChangesFeed, _PostProps]) -> process_flag(trap_exit, true), Self = self(), - Pid = spawn_link(fun() -> changes_loop(Self, ChangesFeed, Target) end), - {ok, #state{changes_loop=Pid, target=Target, parent=Parent}}. + Pid = spawn_link(fun() -> changes_loop(Self, ChangesFeed, Parent) end), + {ok, #state{changes_loop=Pid, parent=Parent}}. handle_call({add_missing_revs, {HighSeq, Revs}}, From, State) -> State#state.parent ! {update_stats, missing_revs, length(Revs)}, @@ -133,15 +132,16 @@ handle_changes_loop_exit(normal, State) -> handle_changes_loop_exit(Reason, State) -> {stop, Reason, State#state{changes_loop=nil}}. -changes_loop(OurServer, SourceChangesServer, Target) -> +changes_loop(OurServer, SourceChangesServer, Parent) -> case couch_rep_changes_feed:next(SourceChangesServer) of complete -> exit(normal); Changes -> + {ok, Target} = gen_server:call(Parent, get_target_db, infinity), MissingRevs = get_missing_revs(Target, Changes), gen_server:call(OurServer, {add_missing_revs, MissingRevs}, infinity) end, - changes_loop(OurServer, SourceChangesServer, Target). + changes_loop(OurServer, SourceChangesServer, Parent). get_missing_revs(#http_db{}=Target, Changes) -> Transform = fun({Props}) -> diff --git a/src/couchdb/couch_rep_reader.erl b/src/couchdb/couch_rep_reader.erl index 5c824cbc..0930599c 100644 --- a/src/couchdb/couch_rep_reader.erl +++ b/src/couchdb/couch_rep_reader.erl @@ -60,7 +60,7 @@ init([Parent, Source, MissingRevs_or_DocIds, _PostProps]) -> true -> ok end, Self = self(), ReaderLoop = spawn_link( - fun() -> reader_loop(Self, Source, MissingRevs_or_DocIds) end + fun() -> reader_loop(Self, Parent, Source, MissingRevs_or_DocIds) end ), MissingRevs = case MissingRevs_or_DocIds of Pid when is_pid(Pid) -> @@ -281,12 +281,13 @@ open_doc(#http_db{url = Url} = DbS, DocId) -> [] end. -reader_loop(ReaderServer, Source, DocIds) when is_list(DocIds) -> - case Source of +reader_loop(ReaderServer, Parent, Source1, DocIds) when is_list(DocIds) -> + case Source1 of #http_db{} -> [gen_server:call(ReaderServer, {open_remote_doc, Id, nil, nil}, infinity) || Id <- DocIds]; _LocalDb -> + {ok, Source} = gen_server:call(Parent, get_source_db, infinity), Docs = lists:foldr(fun(Id, Acc) -> case couch_db:open_doc(Source, Id) of {ok, Doc} -> @@ -299,7 +300,7 @@ reader_loop(ReaderServer, Source, DocIds) when is_list(DocIds) -> end, exit(complete); -reader_loop(ReaderServer, Source, MissingRevsServer) -> +reader_loop(ReaderServer, Parent, Source, MissingRevsServer) -> case couch_rep_missing_revs:next(MissingRevsServer) of complete -> exit(complete); @@ -312,22 +313,23 @@ reader_loop(ReaderServer, Source, MissingRevsServer) -> #http_db{} -> [gen_server:call(ReaderServer, {open_remote_doc, Id, Seq, Revs}, infinity) || {Id,Seq,Revs} <- SortedIdsRevs], - reader_loop(ReaderServer, Source, MissingRevsServer); + reader_loop(ReaderServer, Parent, Source, MissingRevsServer); _Local -> - Source2 = maybe_reopen_db(Source, HighSeq), + {ok, Source1} = gen_server:call(Parent, get_source_db, infinity), + Source2 = maybe_reopen_db(Source1, HighSeq), lists:foreach(fun({Id,Seq,Revs}) -> {ok, Docs} = couch_db:open_doc_revs(Source2, Id, Revs, [latest]), JustTheDocs = [Doc || {ok, Doc} <- Docs], gen_server:call(ReaderServer, {add_docs, Seq, JustTheDocs}, infinity) end, SortedIdsRevs), - reader_loop(ReaderServer, Source2, MissingRevsServer) + couch_db:close(Source2), + reader_loop(ReaderServer, Parent, Source2, MissingRevsServer) end end. maybe_reopen_db(#db{update_seq=OldSeq} = Db, HighSeq) when HighSeq > OldSeq -> {ok, NewDb} = couch_db:open(Db#db.name, [{user_ctx, Db#db.user_ctx}]), - couch_db:close(Db), NewDb; maybe_reopen_db(Db, _HighSeq) -> Db. diff --git a/src/couchdb/couch_rep_writer.erl b/src/couchdb/couch_rep_writer.erl index dd6396fd..cf98ccfb 100644 --- a/src/couchdb/couch_rep_writer.erl +++ b/src/couchdb/couch_rep_writer.erl @@ -16,10 +16,10 @@ -include("couch_db.hrl"). -start_link(Parent, Target, Reader, _PostProps) -> - {ok, spawn_link(fun() -> writer_loop(Parent, Reader, Target) end)}. +start_link(Parent, _Target, Reader, _PostProps) -> + {ok, spawn_link(fun() -> writer_loop(Parent, Reader) end)}. -writer_loop(Parent, Reader, Target) -> +writer_loop(Parent, Reader) -> case couch_rep_reader:next(Reader) of {complete, nil} -> ok; @@ -28,6 +28,7 @@ writer_loop(Parent, Reader, Target) -> ok; {HighSeq, Docs} -> DocCount = length(Docs), + {ok, Target} = gen_server:call(Parent, get_target_db, infinity), try write_docs(Target, Docs) of {ok, []} -> Parent ! {update_stats, docs_written, DocCount}; @@ -48,7 +49,7 @@ writer_loop(Parent, Reader, Target) -> end, couch_rep_att:cleanup(), couch_util:should_flush(), - writer_loop(Parent, Reader, Target) + writer_loop(Parent, Reader) end. write_docs(#http_db{} = Db, Docs) -> diff --git a/test/etap/112-replication-missing-revs.t b/test/etap/112-replication-missing-revs.t index 750334b9..71971088 100755 --- a/test/etap/112-replication-missing-revs.t +++ b/test/etap/112-replication-missing-revs.t @@ -188,8 +188,21 @@ start_changes_feed(remote, Since, Continuous) -> Db = #http_db{url = "http://127.0.0.1:5984/etap-test-source/"}, couch_rep_changes_feed:start_link(self(), Db, Since, Props). +couch_rep_pid(Db) -> + spawn(fun() -> couch_rep_pid_loop(Db) end). + +couch_rep_pid_loop(Db) -> + receive + {'$gen_call', From, get_target_db} -> + gen_server:reply(From, {ok, Db}) + end, + couch_rep_pid_loop(Db). + start_missing_revs(local, Changes) -> - couch_rep_missing_revs:start_link(self(), get_db(target), Changes, []); + TargetDb = get_db(target), + MainPid = couch_rep_pid(TargetDb), + couch_rep_missing_revs:start_link(MainPid, TargetDb, Changes, []); start_missing_revs(remote, Changes) -> - Db = #http_db{url = "http://127.0.0.1:5984/etap-test-target/"}, - couch_rep_missing_revs:start_link(self(), Db, Changes, []). + TargetDb = #http_db{url = "http://127.0.0.1:5984/etap-test-target/"}, + MainPid = couch_rep_pid(TargetDb), + couch_rep_missing_revs:start_link(MainPid, TargetDb, Changes, []). -- cgit v1.2.3 From 786dd5b875d42c7511572d0a56a3c8b914816b9a Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Fri, 19 Nov 2010 12:47:24 +0000 Subject: Updated CHANGES for release 1.0.2 and fixed one typo. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1036824 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGES b/CHANGES index be4c9786..e8527e43 100644 --- a/CHANGES +++ b/CHANGES @@ -14,7 +14,8 @@ Storage System: * Fix leaking file handles after compacting databases and views. * Fix databases forgetting their validation function after compaction. - * Fix occasional timeout errors after compacting large databases. + * Fix occasional timeout errors after successfully compacting large databases. + * Fix ocassional error when writing to a database that has just been compacted. * Fix occasional timeout errors on systems with slow or heavily loaded IO. Log System: @@ -30,12 +31,13 @@ HTTP Interface: Replicator: * Updated ibrowse library to 2.1.0 fixing numerous replication issues. - * Fix authenticated replication of design documents with attachments. - * Various fixes to make replicated more resilient for edge-cases. + * Fix authenticated replication (with HTTP basic auth) of design documents + with attachments. + * Various fixes to make replication more resilient for edge-cases. View Server: - * Don't trigger a view update when requesting `_design/doc/_info`. + * Don't trigger view updates when requesting `_design/doc/_info`. * Fix for circular references in CommonJS requires. Version 1.0.1 -- cgit v1.2.3 From 0e1b64d8288c97ef66f334eb272865b9347c3594 Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Fri, 19 Nov 2010 13:26:30 +0000 Subject: Undo overzealous escaping. Patch by Gabriel Farrell. Closes COUCHDB-948. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1036833 13f79535-47bb-0310-9956-ffa450edef68 --- THANKS | 1 + share/www/database.html | 2 +- share/www/document.html | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/THANKS b/THANKS index 68f8a4cd..5f57b196 100644 --- a/THANKS +++ b/THANKS @@ -68,5 +68,6 @@ suggesting improvements or submitting changes. Some of these people are: * Dale Harvey * Juuso Väänänen * Benjamin Young + * Gabriel Farrell For a list of authors see the `AUTHORS` file. diff --git a/share/www/database.html b/share/www/database.html index f1d8dbd6..2802ad78 100644 --- a/share/www/database.html +++ b/share/www/database.html @@ -37,7 +37,7 @@ specific language governing permissions and limitations under the License. $(function() { if (page.redirecting) return; - $("h1 strong").html('' + page.db.name + ''); + $("h1 strong").text(page.db.name); var viewPath = page.viewName || "_all_docs"; if (viewPath != "_temp_view" && viewPath != "_design_docs") { $("h1 a.raw").attr("href", "/" + encodeURIComponent(page.db.name) + diff --git a/share/www/document.html b/share/www/document.html index ed4fd4ee..b6f42018 100644 --- a/share/www/document.html +++ b/share/www/document.html @@ -42,9 +42,9 @@ specific language governing permissions and limitations under the License. }); $(function() { - $("h1 a.dbname").text(encodeURIComponent(page.dbName)) + $("h1 a.dbname").text(page.dbName) .attr("href", "database.html?" + encodeURIComponent(page.db.name)); - $("h1 strong").text(encodeURIComponent(page.docId).replace(/%2[Ff]/, "/")); + $("h1 strong").text(page.docId); $("h1 a.raw").attr("href", "/" + encodeURIComponent(page.db.name) + "/" + encodeURIComponent(page.docId)); page.updateFieldListing(); -- cgit v1.2.3 From c39629cfa9151bb1776b296f22adf87bcd00b6af Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Sat, 20 Nov 2010 12:41:26 +0000 Subject: Send a user friendly error message when rewrite rules are a String, not a JSON Array. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1037196 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/rewrite.js | 15 ++++++++++++--- src/couchdb/couch_httpd_rewrite.erl | 5 ++++- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/share/www/script/test/rewrite.js b/share/www/script/test/rewrite.js index 66b33d74..ff2d3822 100644 --- a/share/www/script/test/rewrite.js +++ b/share/www/script/test/rewrite.js @@ -365,7 +365,16 @@ couchTests.rewrite = function(debug) { T(result.uuids.length == 1); var first = result.uuids[0]; }); - }); - -} \ No newline at end of file + + // test invalid rewrites + // string + var ddoc = { + _id: "_design/invalid", + rewrites: "[{\"from\":\"foo\",\"to\":\"bar\"}]" + } + db.save(ddoc); + var res = CouchDB.request("GET", "/test_suite_db/_design/invalid/_rewrite/foo"); + TEquals(400, res.status, "should return 400"); + +} diff --git a/src/couchdb/couch_httpd_rewrite.erl b/src/couchdb/couch_httpd_rewrite.erl index ca4ac1f0..6c3d0e3c 100644 --- a/src/couchdb/couch_httpd_rewrite.erl +++ b/src/couchdb/couch_httpd_rewrite.erl @@ -126,7 +126,10 @@ handle_rewrite_req(#httpd{ case couch_util:get_value(<<"rewrites">>, Props) of undefined -> couch_httpd:send_error(Req, 404, <<"rewrite_error">>, - <<"Invalid path.">>); + <<"Invalid path.">>); + Bin when is_binary(Bin) -> + couch_httpd:send_error(Req, 400, <<"rewrite_error">>, + <<"Rewrite rules are a String. They must be a JSON Array.">>); Rules -> % create dispatch list from rules DispatchList = [make_rule(Rule) || {Rule} <- Rules], -- cgit v1.2.3 From eb5f9bf94e98b885c633491a643fbb452b6b3bde Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sat, 20 Nov 2010 14:02:45 +0000 Subject: Merged revision 1037213 from trunk: Bug fix: using undeclared identifier (doc). The function argument is named obj, not doc. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1037215 13f79535-47bb-0310-9956-ffa450edef68 --- share/server/util.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/share/server/util.js b/share/server/util.js index a5dfa127..b1da6d60 100644 --- a/share/server/util.js +++ b/share/server/util.js @@ -98,8 +98,8 @@ var Couch = { // seal() is broken in current Spidermonkey seal(obj); for (var propname in obj) { - if (typeof doc[propname] == "object") { - recursivelySeal(doc[propname]); + if (typeof obj[propname] == "object") { + recursivelySeal(obj[propname]); } } } -- cgit v1.2.3 From a281bc40d6545e70e1aae9973e1a8a49aa119fea Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sat, 20 Nov 2010 15:56:40 +0000 Subject: Merged revision 1037245 from trunk: Fix recursive call: recursivelySeal is not defined within the body of the anonymous function declaration. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1037246 13f79535-47bb-0310-9956-ffa450edef68 --- share/server/util.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/share/server/util.js b/share/server/util.js index b1da6d60..71a36a29 100644 --- a/share/server/util.js +++ b/share/server/util.js @@ -99,7 +99,7 @@ var Couch = { seal(obj); for (var propname in obj) { if (typeof obj[propname] == "object") { - recursivelySeal(obj[propname]); + arguments.callee(obj[propname]); } } } -- cgit v1.2.3 From 882be279b64b291045701895d947f256309f5743 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sun, 21 Nov 2010 14:04:47 +0000 Subject: Merged revision 1037448 from trunk: Proper verification of the roles property of a user document. Closes COUCHDB-790. Thanks Gabriel Farrell. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1037449 13f79535-47bb-0310-9956-ffa450edef68 --- share/server/loop.js | 1 + share/server/util.js | 4 ++++ share/www/script/test/users_db.js | 23 ++++++++++++++++++++++- src/couchdb/couch_js_functions.hrl | 2 +- 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/share/server/loop.js b/share/server/loop.js index 300151e9..a988684f 100644 --- a/share/server/loop.js +++ b/share/server/loop.js @@ -26,6 +26,7 @@ function init_sandbox() { sandbox.start = Render.start; sandbox.send = Render.send; sandbox.getRow = Render.getRow; + sandbox.isArray = isArray; } catch (e) { log(e.toSource()); } diff --git a/share/server/util.js b/share/server/util.js index 71a36a29..1b57f041 100644 --- a/share/server/util.js +++ b/share/server/util.js @@ -124,3 +124,7 @@ function log(message) { } respond(["log", String(message)]); }; + +function isArray(obj) { + return toString.call(obj) === "[object Array]"; +} diff --git a/share/www/script/test/users_db.js b/share/www/script/test/users_db.js index 667ff3c1..1e13e5d7 100644 --- a/share/www/script/test/users_db.js +++ b/share/www/script/test/users_db.js @@ -90,6 +90,27 @@ couchTests.users_db = function(debug) { T(s.name == null); T(s.roles.indexOf("_admin") !== -1); T(usersDb.deleteDoc(jchrisWithConflict).ok); + + // you can't change doc from type "user" + jchrisUserDoc = usersDb.open(jchrisUserDoc._id); + jchrisUserDoc.type = "not user"; + try { + usersDb.save(jchrisUserDoc); + T(false && "should only allow us to save doc when type == 'user'"); + } catch(e) { + T(e.reason == "doc.type must be user"); + } + jchrisUserDoc.type = "user"; + + // "roles" must be an array + jchrisUserDoc.roles = "not an array"; + try { + usersDb.save(jchrisUserDoc); + T(false && "should only allow us to save doc when roles is an array"); + } catch(e) { + T(e.reason == "doc.roles must be an array"); + } + jchrisUserDoc.roles = []; }; usersDb.deleteDb(); @@ -100,4 +121,4 @@ couchTests.users_db = function(debug) { ); usersDb.deleteDb(); // cleanup -} \ No newline at end of file +} diff --git a/src/couchdb/couch_js_functions.hrl b/src/couchdb/couch_js_functions.hrl index 1f314f6e..32573a90 100644 --- a/src/couchdb/couch_js_functions.hrl +++ b/src/couchdb/couch_js_functions.hrl @@ -31,7 +31,7 @@ throw({forbidden: 'doc.name is required'}); } - if (!(newDoc.roles && (typeof newDoc.roles.length !== 'undefined'))) { + if (newDoc.roles && !isArray(newDoc.roles)) { throw({forbidden: 'doc.roles must be an array'}); } -- cgit v1.2.3 From 02dd55e08c2328201849cfda51f203a17885368d Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Tue, 23 Nov 2010 16:40:23 +0000 Subject: Merged revision 1038193 from trunk: Seal documents before passing them to map functions (JavaScript view server only). This prevents one map function from modifying a document before it's passed to another map function. Has no effect on array fields for some Spidermonkey versions (see https://bugzilla.mozilla.org/show_bug.cgi?id=449657). Closes COUCHDB-925. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1038196 13f79535-47bb-0310-9956-ffa450edef68 --- share/server/util.js | 8 ++- share/server/views.js | 13 +---- share/www/script/test/view_sandboxing.js | 88 ++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+), 13 deletions(-) diff --git a/share/server/util.js b/share/server/util.js index 1b57f041..c5e89f3d 100644 --- a/share/server/util.js +++ b/share/server/util.js @@ -96,7 +96,13 @@ var Couch = { }, recursivelySeal : function(obj) { // seal() is broken in current Spidermonkey - seal(obj); + try { + seal(obj); + } catch (x) { + // Sealing of arrays broken in some SpiderMonkey versions. + // https://bugzilla.mozilla.org/show_bug.cgi?id=449657 + return; + } for (var propname in obj) { if (typeof obj[propname] == "object") { arguments.callee(obj[propname]); diff --git a/share/server/views.js b/share/server/views.js index ffe63377..2a15ee56 100644 --- a/share/server/views.js +++ b/share/server/views.js @@ -105,19 +105,8 @@ var Views = (function() { // ] // - /* - Immutable document support temporarily removed. + Couch.recursivelySeal(doc); - Removed because the seal function no longer works on JS 1.8 arrays, - instead returning an error. The sealing is meant to prevent map - functions from modifying the same document that is passed to other map - functions. However, only map functions in the same design document are - run together, so we have a reasonable expectation they can trust each - other. Any map fun that can't be trusted can be placed in its own - design document, and it cannot affect other map functions. - - recursivelySeal(doc); // seal to prevent map functions from changing doc - */ var buf = []; for (var i = 0; i < State.funs.length; i++) { map_results = []; diff --git a/share/www/script/test/view_sandboxing.js b/share/www/script/test/view_sandboxing.js index 61b44954..02951d9f 100644 --- a/share/www/script/test/view_sandboxing.js +++ b/share/www/script/test/view_sandboxing.js @@ -49,4 +49,92 @@ couchTests.view_sandboxing = function(debug) { // the view server var results = db.query(function(doc) { map_results.push(1); emit(null, doc); }); T(results.total_rows == 0); + + // test for COUCHDB-925 + // altering 'doc' variable in map function affects other map functions + var ddoc = { + _id: "_design/foobar", + language: "javascript", + views: { + view1: { + map: + (function(doc) { + if (doc.values) { + doc.values = [666]; + } + if (doc.tags) { + doc.tags.push("qwerty"); + } + if (doc.tokens) { + doc.tokens["c"] = 3; + } + }).toString() + }, + view2: { + map: + (function(doc) { + if (doc.values) { + emit(doc._id, doc.values); + } + if (doc.tags) { + emit(doc._id, doc.tags); + } + if (doc.tokens) { + emit(doc._id, doc.tokens); + } + }).toString() + } + } + }; + var doc1 = { + _id: "doc1", + values: [1, 2, 3] + }; + var doc2 = { + _id: "doc2", + tags: ["foo", "bar"], + tokens: {a: 1, b: 2} + }; + + db.deleteDb(); + db.createDb(); + T(db.save(ddoc).ok); + T(db.save(doc1).ok); + T(db.save(doc2).ok); + + var view1Results = db.view( + "foobar/view1", {bypass_cache: Math.round(Math.random() * 1000)}); + var view2Results = db.view( + "foobar/view2", {bypass_cache: Math.round(Math.random() * 1000)}); + + TEquals(0, view1Results.rows.length, "view1 has 0 rows"); + TEquals(3, view2Results.rows.length, "view2 has 3 rows"); + + TEquals(doc1._id, view2Results.rows[0].key); + TEquals(doc2._id, view2Results.rows[1].key); + TEquals(doc2._id, view2Results.rows[2].key); + + // https://bugzilla.mozilla.org/show_bug.cgi?id=449657 + TEquals(3, view2Results.rows[0].value.length, + "Warning: installed SpiderMonkey version doesn't allow sealing of arrays"); + if (view2Results.rows[0].value.length === 3) { + TEquals(1, view2Results.rows[0].value[0]); + TEquals(2, view2Results.rows[0].value[1]); + TEquals(3, view2Results.rows[0].value[2]); + } + + TEquals(1, view2Results.rows[1].value["a"]); + TEquals(2, view2Results.rows[1].value["b"]); + TEquals('undefined', typeof view2Results.rows[1].value["c"], + "doc2.tokens object was not sealed"); + + TEquals(2, view2Results.rows[2].value.length, + "Warning: installed SpiderMonkey version doesn't allow sealing of arrays"); + if (view2Results.rows[2].value.length === 2) { + TEquals("foo", view2Results.rows[2].value[0]); + TEquals("bar", view2Results.rows[2].value[1]); + } + + // cleanup + db.deleteDb(); }; -- cgit v1.2.3 From 6f301a4b153ff24cf67ee7a718b096af31a6c68e Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Tue, 23 Nov 2010 18:52:25 +0000 Subject: Updated CHANGES for release 1.0.2 to reflect tickets COUCHDB-790 and COUCHDB-925. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1038275 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index e8527e43..1ba777c6 100644 --- a/CHANGES +++ b/CHANGES @@ -39,6 +39,8 @@ View Server: * Don't trigger view updates when requesting `_design/doc/_info`. * Fix for circular references in CommonJS requires. + * Made isArray() function available to functions executed in the query server. + * Documents are now sealed before being passed to map functions. Version 1.0.1 ------------- -- cgit v1.2.3 From 4e20909c3c53e532215657c08848998583dfceff Mon Sep 17 00:00:00 2001 From: Paul Joseph Davis Date: Wed, 24 Nov 2010 16:55:17 +0000 Subject: Updating the version for the 1.0.2 release. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1038693 13f79535-47bb-0310-9956-ffa450edef68 --- acinclude.m4.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/acinclude.m4.in b/acinclude.m4.in index fb92b3f4..6ba5a784 100644 --- a/acinclude.m4.in +++ b/acinclude.m4.in @@ -18,7 +18,7 @@ m4_define([LOCAL_PACKAGE_NAME], [Apache CouchDB]) m4_define([LOCAL_BUG_URI], [https://issues.apache.org/jira/browse/COUCHDB]) m4_define([LOCAL_VERSION_MAJOR], [1]) m4_define([LOCAL_VERSION_MINOR], [0]) -m4_define([LOCAL_VERSION_REVISION], [1]) +m4_define([LOCAL_VERSION_REVISION], [2]) m4_define([LOCAL_VERSION_STAGE], []) m4_define([LOCAL_VERSION_RELEASE], []) m4_define([LOCAL_VERSION_PRIMARY], -- cgit v1.2.3 -- cgit v1.2.3 From 8915ae339640551b8d603732ab5b901d66d842a2 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 25 Nov 2010 18:01:37 +0000 Subject: Merged revision 1039118 from trunk: Even if sealing of an array is not allowed in the installed SpiderMonkey version, seal its members. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1039121 13f79535-47bb-0310-9956-ffa450edef68 --- share/server/util.js | 1 - 1 file changed, 1 deletion(-) diff --git a/share/server/util.js b/share/server/util.js index c5e89f3d..b55480b9 100644 --- a/share/server/util.js +++ b/share/server/util.js @@ -101,7 +101,6 @@ var Couch = { } catch (x) { // Sealing of arrays broken in some SpiderMonkey versions. // https://bugzilla.mozilla.org/show_bug.cgi?id=449657 - return; } for (var propname in obj) { if (typeof obj[propname] == "object") { -- cgit v1.2.3 From a9e61d57893b2b967d0fa40f30ed8aed09732ee0 Mon Sep 17 00:00:00 2001 From: Paul Joseph Davis Date: Thu, 25 Nov 2010 18:02:27 +0000 Subject: Update the signing key in SVN. Need to make sure that the tarball reflects the tag exactly. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1039124 13f79535-47bb-0310-9956-ffa450edef68 --- Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.am b/Makefile.am index 8e16baf6..65302879 100644 --- a/Makefile.am +++ b/Makefile.am @@ -140,7 +140,7 @@ distcheck-hook: distsign: distcheck check @# @@ unpack archive and run diff -r to double check missing files @# @@ does automake have anything that does this? - gpg --armor --detach-sig --default-key 8FBFCFBF \ + gpg --armor --detach-sig --default-key 43ECCEE1 \ < $(top_srcdir)/$(distdir).tar.gz \ > $(top_srcdir)/$(distdir).tar.gz.asc md5sum $(top_srcdir)/$(distdir).tar.gz \ -- cgit v1.2.3 From 23bcb4a09e504d7957dbf3af958e414ba2534e85 Mon Sep 17 00:00:00 2001 From: Paul Joseph Davis Date: Sun, 5 Dec 2010 12:58:02 +0000 Subject: Drop version number after aborting the vote. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1042342 13f79535-47bb-0310-9956-ffa450edef68 --- acinclude.m4.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/acinclude.m4.in b/acinclude.m4.in index 6ba5a784..fb92b3f4 100644 --- a/acinclude.m4.in +++ b/acinclude.m4.in @@ -18,7 +18,7 @@ m4_define([LOCAL_PACKAGE_NAME], [Apache CouchDB]) m4_define([LOCAL_BUG_URI], [https://issues.apache.org/jira/browse/COUCHDB]) m4_define([LOCAL_VERSION_MAJOR], [1]) m4_define([LOCAL_VERSION_MINOR], [0]) -m4_define([LOCAL_VERSION_REVISION], [2]) +m4_define([LOCAL_VERSION_REVISION], [1]) m4_define([LOCAL_VERSION_STAGE], []) m4_define([LOCAL_VERSION_RELEASE], []) m4_define([LOCAL_VERSION_PRIMARY], -- cgit v1.2.3 From d3e93e9f7dfdbfcf6eb52688bf9ddeeba53194ea Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Tue, 7 Dec 2010 20:26:32 +0000 Subject: Merged revision 1043186 from trunk: Replicator improvement: send "unauthorized" error message instead of "db_not_found" when a remote endpoint can not be accessed due to authorization. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1043187 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/replication.js | 2 +- src/couchdb/couch_httpd_misc_handlers.erl | 4 ++- src/couchdb/couch_rep.erl | 46 +++++++++++++++++++++---------- src/couchdb/couch_rep_httpc.erl | 2 ++ 4 files changed, 37 insertions(+), 17 deletions(-) diff --git a/share/www/script/test/replication.js b/share/www/script/test/replication.js index 00773f5c..7cc1f823 100644 --- a/share/www/script/test/replication.js +++ b/share/www/script/test/replication.js @@ -667,7 +667,7 @@ couchTests.replication = function(debug) { ); T(false, "replication should have failed"); } catch(x) { - T(x.error === "db_not_found"); + T(x.error === "unauthorized"); } atts_ddoc_copy = dbB.open(atts_ddoc._id); diff --git a/src/couchdb/couch_httpd_misc_handlers.erl b/src/couchdb/couch_httpd_misc_handlers.erl index 13d770f1..db1b2ca1 100644 --- a/src/couchdb/couch_httpd_misc_handlers.erl +++ b/src/couchdb/couch_httpd_misc_handlers.erl @@ -101,7 +101,9 @@ handle_replicate_req(#httpd{method='POST'}=Req) -> end catch throw:{db_not_found, Msg} -> - send_json(Req, 404, {[{error, db_not_found}, {reason, Msg}]}) + send_json(Req, 404, {[{error, db_not_found}, {reason, Msg}]}); + throw:{unauthorized, Msg} -> + send_json(Req, 404, {[{error, unauthorized}, {reason, Msg}]}) end; handle_replicate_req(Req) -> send_method_not_allowed(Req, "POST"). diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index ba387285..464bcfa0 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -108,8 +108,12 @@ get_result(Server, PostBody, UserCtx) -> end. init(InitArgs) -> - try do_init(InitArgs) - catch throw:{db_not_found, DbUrl} -> {stop, {db_not_found, DbUrl}} end. + try + do_init(InitArgs) + catch + throw:Error -> + {stop, Error} + end. do_init([RepId, {PostProps}, UserCtx] = InitArgs) -> process_flag(trap_exit, true), @@ -314,13 +318,19 @@ start_replication_server(Replicator) -> ?LOG_DEBUG("replication ~p already running at ~p", [RepId, Pid]), Pid; {error, {db_not_found, DbUrl}} -> - throw({db_not_found, <<"could not open ", DbUrl/binary>>}) + throw({db_not_found, <<"could not open ", DbUrl/binary>>}); + {error, {unauthorized, DbUrl}} -> + throw({unauthorized, + <<"unauthorized to access database ", DbUrl/binary>>}) end; {error, {already_started, Pid}} -> ?LOG_DEBUG("replication ~p already running at ~p", [RepId, Pid]), Pid; {error, {{db_not_found, DbUrl}, _}} -> - throw({db_not_found, <<"could not open ", DbUrl/binary>>}) + throw({db_not_found, <<"could not open ", DbUrl/binary>>}); + {error, {{unauthorized, DbUrl}, _}} -> + throw({unauthorized, + <<"unauthorized to access database ", DbUrl/binary>>}) end. compare_replication_logs(SrcDoc, TgtDoc) -> @@ -561,18 +571,24 @@ open_db(<<"http://",_/binary>>=Url, _, ProxyParams, CreateTarget) -> open_db(<<"https://",_/binary>>=Url, _, ProxyParams, CreateTarget) -> open_db({[{<<"url">>,Url}]}, [], ProxyParams, CreateTarget); open_db(<>, UserCtx, _ProxyParams, CreateTarget) -> - case CreateTarget of - true -> - ok = couch_httpd:verify_is_server_admin(UserCtx), - couch_server:create(DbName, [{user_ctx, UserCtx}]); - false -> ok - end, + try + case CreateTarget of + true -> + ok = couch_httpd:verify_is_server_admin(UserCtx), + couch_server:create(DbName, [{user_ctx, UserCtx}]); + false -> + ok + end, - case couch_db:open(DbName, [{user_ctx, UserCtx}]) of - {ok, Db} -> - couch_db:monitor(Db), - Db; - {not_found, no_db_file} -> throw({db_not_found, DbName}) + case couch_db:open(DbName, [{user_ctx, UserCtx}]) of + {ok, Db} -> + couch_db:monitor(Db), + Db; + {not_found, no_db_file} -> + throw({db_not_found, DbName}) + end + catch throw:{unauthorized, _} -> + throw({unauthorized, DbName}) end. schedule_checkpoint(#state{checkpoint_scheduled = nil} = State) -> diff --git a/src/couchdb/couch_rep_httpc.erl b/src/couchdb/couch_rep_httpc.erl index b32e4c77..e3a4c4c9 100644 --- a/src/couchdb/couch_rep_httpc.erl +++ b/src/couchdb/couch_rep_httpc.erl @@ -98,6 +98,8 @@ db_exists(Req, CanonicalUrl, CreateDB) -> {ok, "302", RespHeaders, _} -> RedirectUrl = redirect_url(RespHeaders, Req#http_db.url), db_exists(Req#http_db{url = RedirectUrl}, CanonicalUrl); + {ok, "401", _, _} -> + throw({unauthorized, ?l2b(Url)}); Error -> ?LOG_DEBUG("DB at ~s could not be found because ~p", [Url, Error]), throw({db_not_found, ?l2b(Url)}) -- cgit v1.2.3 From 83c42e18687f227f95a7b262d4a307cfb973baac Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Tue, 7 Dec 2010 20:37:56 +0000 Subject: Merged revision 1023274 from trunk: Replicator: fix issues when a peer is accessible via SSL. Closes COUCHDB-491. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1043193 13f79535-47bb-0310-9956-ffa450edef68 --- etc/couchdb/default.ini.tpl.in | 8 +++++++- src/couchdb/couch_rep.erl | 5 ++++- src/couchdb/couch_rep_httpc.erl | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 2 deletions(-) diff --git a/etc/couchdb/default.ini.tpl.in b/etc/couchdb/default.ini.tpl.in index 6b70777e..bb7ccec6 100644 --- a/etc/couchdb/default.ini.tpl.in +++ b/etc/couchdb/default.ini.tpl.in @@ -121,4 +121,10 @@ compressible_types = text/*, application/javascript, application/json, applicat [replicator] max_http_sessions = 10 -max_http_pipeline_size = 10 \ No newline at end of file +max_http_pipeline_size = 10 +; set to true to validate peer certificates +verify_ssl_certificates = false +; file containing a list of peer trusted certificates (PEM format) +; ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt +; maximum peer certificate depth (must be set even if certificate validation is off) +ssl_certificate_max_depth = 3 diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index 464bcfa0..c804b49d 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -564,7 +564,10 @@ open_db({Props}, _UserCtx, ProxyParams, CreateTarget) -> auth = AuthProps, headers = lists:ukeymerge(1, Headers, DefaultHeaders) }, - Db = Db1#http_db{options = Db1#http_db.options ++ ProxyParams}, + Db = Db1#http_db{ + options = Db1#http_db.options ++ ProxyParams ++ + couch_rep_httpc:ssl_options(Db1) + }, couch_rep_httpc:db_exists(Db, CreateTarget); open_db(<<"http://",_/binary>>=Url, _, ProxyParams, CreateTarget) -> open_db({[{<<"url">>,Url}]}, [], ProxyParams, CreateTarget); diff --git a/src/couchdb/couch_rep_httpc.erl b/src/couchdb/couch_rep_httpc.erl index e3a4c4c9..ee46a15e 100644 --- a/src/couchdb/couch_rep_httpc.erl +++ b/src/couchdb/couch_rep_httpc.erl @@ -16,6 +16,7 @@ -export([db_exists/1, db_exists/2, full_url/1, request/1, redirected_request/2, redirect_url/2, spawn_worker_process/1, spawn_link_worker_process/1]). +-export([ssl_options/1]). request(#http_db{} = Req) -> do_request(Req). @@ -246,3 +247,35 @@ oauth_header(Url, QS, Action, Props) -> Params = oauth:signed_params(Method, Url, QSL, Consumer, Token, TokenSecret) -- QSL, {"Authorization", "OAuth " ++ oauth_uri:params_to_header_string(Params)}. + +ssl_options(#http_db{url = Url}) -> + case ibrowse_lib:parse_url(Url) of + #url{protocol = https} -> + Depth = list_to_integer( + couch_config:get("replicator", "ssl_certificate_max_depth", "3") + ), + SslOpts = [{depth, Depth} | + case couch_config:get("replicator", "verify_ssl_certificates") of + "true" -> + ssl_verify_options(true); + _ -> + ssl_verify_options(false) + end], + [{is_ssl, true}, {ssl_options, SslOpts}]; + #url{protocol = http} -> + [] + end. + +ssl_verify_options(Value) -> + ssl_verify_options(Value, erlang:system_info(otp_release)). + +ssl_verify_options(true, OTPVersion) when OTPVersion >= "R14" -> + CAFile = couch_config:get("replicator", "ssl_trusted_certificates_file"), + [{verify, verify_peer}, {cacertfile, CAFile}]; +ssl_verify_options(false, OTPVersion) when OTPVersion >= "R14" -> + [{verify, verify_none}]; +ssl_verify_options(true, _OTPVersion) -> + CAFile = couch_config:get("replicator", "ssl_trusted_certificates_file"), + [{verify, 2}, {cacertfile, CAFile}]; +ssl_verify_options(false, _OTPVersion) -> + [{verify, 0}]. -- cgit v1.2.3 From 268fbeb6ab1e3374127249fa1bea2cd50021bd6b Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 8 Dec 2010 16:11:12 +0000 Subject: Stem new rev tree before comparing with old one, COUCHDB-968 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1043476 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db_updater.erl | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl index e5c6019a..bbba5d4b 100644 --- a/src/couchdb/couch_db_updater.erl +++ b/src/couchdb/couch_db_updater.erl @@ -489,13 +489,13 @@ send_result(Client, Id, OriginalRevs, NewResult) -> % used to send a result to the client catch(Client ! {result, self(), {{Id, OriginalRevs}, NewResult}}). -merge_rev_trees(_MergeConflicts, [], [], AccNewInfos, AccRemoveSeqs, AccSeq) -> +merge_rev_trees(_Limit, _Merge, [], [], AccNewInfos, AccRemoveSeqs, AccSeq) -> {ok, lists:reverse(AccNewInfos), AccRemoveSeqs, AccSeq}; -merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList], +merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList], [OldDocInfo|RestOldInfo], AccNewInfos, AccRemoveSeqs, AccSeq) -> #full_doc_info{id=Id,rev_tree=OldTree,deleted=OldDeleted,update_seq=OldSeq} = OldDocInfo, - NewRevTree = lists:foldl( + NewRevTree0 = lists:foldl( fun({Client, #doc{revs={Pos,[_Rev|PrevRevs]}}=NewDoc}, AccTree) -> if not MergeConflicts -> case couch_key_tree:merge(AccTree, [couch_db:doc_to_tree(NewDoc)]) of @@ -548,10 +548,11 @@ merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList], end end, OldTree, NewDocs), + NewRevTree = couch_key_tree:stem(NewRevTree0, Limit), if NewRevTree == OldTree -> % nothing changed - merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo, AccNewInfos, - AccRemoveSeqs, AccSeq); + merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo, + AccNewInfos, AccRemoveSeqs, AccSeq); true -> % we have updated the document, give it a new seq # NewInfo = #full_doc_info{id=Id,update_seq=AccSeq+1,rev_tree=NewRevTree}, @@ -559,8 +560,8 @@ merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList], 0 -> AccRemoveSeqs; _ -> [OldSeq | AccRemoveSeqs] end, - merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo, - [NewInfo|AccNewInfos], RemoveSeqs, AccSeq+1) + merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo, + [NewInfo|AccNewInfos], RemoveSeqs, AccSeq+1) end. @@ -583,7 +584,8 @@ update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) -> #db{ fulldocinfo_by_id_btree = DocInfoByIdBTree, docinfo_by_seq_btree = DocInfoBySeqBTree, - update_seq = LastSeq + update_seq = LastSeq, + revs_limit = RevsLimit } = Db, Ids = [Id || [{_Client, #doc{id=Id}}|_] <- DocsList], % lookup up the old documents, if they exist. @@ -596,11 +598,9 @@ update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) -> end, Ids, OldDocLookups), % Merge the new docs into the revision trees. - {ok, NewDocInfos0, RemoveSeqs, NewSeq} = merge_rev_trees( + {ok, NewFullDocInfos, RemoveSeqs, NewSeq} = merge_rev_trees(RevsLimit, MergeConflicts, DocsList, OldDocInfos, [], [], LastSeq), - NewFullDocInfos = stem_full_doc_infos(Db, NewDocInfos0), - % All documents are now ready to write. {ok, Db2} = update_local_docs(Db, NonRepDocs), -- cgit v1.2.3 From 2b6049692b0b76fa60c1d0e1adc99fe7b0888054 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 8 Dec 2010 16:11:19 +0000 Subject: Prefer values from old tree when merging, COUCHDB-968 This commit represents a substantial refactor of the key tree merging logic, some of which is not strictly necessary for the resolution of COUCHDB-968. Two etap test cases checking the ability to merge in a non-linear tree are removed because the functionality is no longer supported. CouchDB only ever merged a linear revision history into an existing revision tree. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1043478 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_key_tree.erl | 91 ++++++++++++++++++++---------------------- test/etap/060-kt-merging.t | 14 +------ 2 files changed, 45 insertions(+), 60 deletions(-) diff --git a/src/couchdb/couch_key_tree.erl b/src/couchdb/couch_key_tree.erl index 4fe09bf3..bc3775c4 100644 --- a/src/couchdb/couch_key_tree.erl +++ b/src/couchdb/couch_key_tree.erl @@ -43,50 +43,53 @@ merge(A, B) -> merge_one([], Insert, OutAcc, ConflictsAcc) -> {ok, [Insert | OutAcc], ConflictsAcc}; -merge_one([{Start, Tree}|Rest], {StartInsert, TreeInsert}, OutAcc, ConflictsAcc) -> - if Start =< StartInsert -> - StartA = Start, - StartB = StartInsert, - TreeA = Tree, - TreeB = TreeInsert; - true -> - StartB = Start, - StartA = StartInsert, - TreeB = Tree, - TreeA = TreeInsert - end, - case merge_at([TreeA], StartB - StartA, TreeB) of - {ok, [CombinedTrees], Conflicts} -> - merge_one(Rest, {StartA, CombinedTrees}, OutAcc, Conflicts or ConflictsAcc); +merge_one([{Start, Tree}|Rest], {StartInsert, TreeInsert}, Acc, HasConflicts) -> + case merge_at([Tree], StartInsert - Start, [TreeInsert]) of + {ok, [Merged], Conflicts} -> + MergedStart = lists:min([Start, StartInsert]), + merge_one(Rest, {MergedStart, Merged}, Acc, Conflicts or HasConflicts); no -> - merge_one(Rest, {StartB, TreeB}, [{StartA, TreeA} | OutAcc], ConflictsAcc) + AccOut = [{Start, Tree} | Acc], + merge_one(Rest, {StartInsert, TreeInsert}, AccOut, HasConflicts) end. +merge_at(_Ours, _Place, []) -> + no; merge_at([], _Place, _Insert) -> no; -merge_at([{Key, Value, SubTree}|Sibs], 0, {InsertKey, InsertValue, InsertSubTree}) -> - if Key == InsertKey -> - {Merge, Conflicts} = merge_simple(SubTree, InsertSubTree), - {ok, [{Key, Value, Merge} | Sibs], Conflicts}; - true -> - case merge_at(Sibs, 0, {InsertKey, InsertValue, InsertSubTree}) of - {ok, Merged, Conflicts} -> - {ok, [{Key, Value, SubTree} | Merged], Conflicts}; - no -> - no - end - end; -merge_at([{Key, Value, SubTree}|Sibs], Place, Insert) -> - case merge_at(SubTree, Place - 1,Insert) of +merge_at([{Key, Value, SubTree}|Sibs], Place, InsertTree) when Place > 0 -> + % inserted starts later than committed, need to drill into committed subtree + case merge_at(SubTree, Place - 1, InsertTree) of {ok, Merged, Conflicts} -> {ok, [{Key, Value, Merged} | Sibs], Conflicts}; no -> - case merge_at(Sibs, Place, Insert) of + case merge_at(Sibs, Place, InsertTree) of {ok, Merged, Conflicts} -> {ok, [{Key, Value, SubTree} | Merged], Conflicts}; no -> no end + end; +merge_at(OurTree, Place, [{Key, Value, SubTree}]) when Place < 0 -> + % inserted starts earlier than committed, need to drill into insert subtree + case merge_at(OurTree, Place + 1, SubTree) of + {ok, Merged, Conflicts} -> + {ok, [{Key, Value, Merged}], Conflicts}; + no -> + no + end; +merge_at([{Key, Value, SubTree}|Sibs], 0, [{Key, _Value, InsertSubTree}]) -> + {Merged, Conflicts} = merge_simple(SubTree, InsertSubTree), + {ok, [{Key, Value, Merged} | Sibs], Conflicts}; +merge_at([{OurKey, _, _} | _], 0, [{Key, _, _}]) when OurKey > Key -> + % siblings keys are ordered, no point in continuing + no; +merge_at([Tree | Sibs], 0, InsertTree) -> + case merge_at(Sibs, 0, InsertTree) of + {ok, Merged, Conflicts} -> + {ok, [Tree | Merged], Conflicts}; + no -> + no end. % key tree functions @@ -94,22 +97,16 @@ merge_simple([], B) -> {B, false}; merge_simple(A, []) -> {A, false}; -merge_simple([ATree | ANextTree], [BTree | BNextTree]) -> - {AKey, AValue, ASubTree} = ATree, - {BKey, _BValue, BSubTree} = BTree, - if - AKey == BKey -> - %same key - {MergedSubTree, Conflict1} = merge_simple(ASubTree, BSubTree), - {MergedNextTree, Conflict2} = merge_simple(ANextTree, BNextTree), - {[{AKey, AValue, MergedSubTree} | MergedNextTree], Conflict1 or Conflict2}; - AKey < BKey -> - {MTree, _} = merge_simple(ANextTree, [BTree | BNextTree]), - {[ATree | MTree], true}; - true -> - {MTree, _} = merge_simple([ATree | ANextTree], BNextTree), - {[BTree | MTree], true} - end. +merge_simple([{Key, Value, SubA} | NextA], [{Key, _, SubB} | NextB]) -> + {MergedSubTree, Conflict1} = merge_simple(SubA, SubB), + {MergedNextTree, Conflict2} = merge_simple(NextA, NextB), + {[{Key, Value, MergedSubTree} | MergedNextTree], Conflict1 or Conflict2}; +merge_simple([{A, _, _} = Tree | Next], [{B, _, _} | _] = Insert) when A < B -> + {Merged, _} = merge_simple(Next, Insert), + {[Tree | Merged], true}; +merge_simple(Ours, [Tree | Next]) -> + {Merged, _} = merge_simple(Ours, Next), + {[Tree | Merged], true}. find_missing(_Tree, []) -> []; diff --git a/test/etap/060-kt-merging.t b/test/etap/060-kt-merging.t index d6b13d6d..73744e52 100755 --- a/test/etap/060-kt-merging.t +++ b/test/etap/060-kt-merging.t @@ -15,7 +15,7 @@ main(_) -> test_util:init_code_path(), - etap:plan(16), + etap:plan(14), case (catch test()) of ok -> etap:end_tests(); @@ -88,24 +88,12 @@ test() -> "Merging a tree with a stem." ), - etap:is( - {TwoChildSibs, no_conflicts}, - couch_key_tree:merge(Stemmed1b, TwoChildSibs), - "Merging in the opposite direction." - ), - etap:is( {TwoChildSibs2, no_conflicts}, couch_key_tree:merge(TwoChildSibs2, Stemmed1bb), "Merging a stem at a deeper level." ), - etap:is( - {TwoChildSibs2, no_conflicts}, - couch_key_tree:merge(Stemmed1bb, TwoChildSibs2), - "Merging a deeper level in opposite order." - ), - etap:is( {TwoChild, no_conflicts}, couch_key_tree:merge(TwoChild, Stemmed1aa), -- cgit v1.2.3 From 3191597a70d955369d5cb3df9f602dd0e2b61bc0 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 8 Dec 2010 16:11:25 +0000 Subject: Usort the infos during compaction to remove dupes, COUCHDB-968 This is not a bulletproof solution; it only removes dupes when the they appear in the same batch of 1000 updates. However, for dupes that show up in _all_docs the probability of that happening is quite high. If the dupes are only in _changes a user may need to compact twice, once to get the dupes ordered together and a second time to remove them. A more complete solution would be to trigger the compaction in "retry" mode, but this is siginificantly slower. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1043479 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db_updater.erl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl index bbba5d4b..8630ff4e 100644 --- a/src/couchdb/couch_db_updater.erl +++ b/src/couchdb/couch_db_updater.erl @@ -775,7 +775,10 @@ copy_rev_tree_attachments(SrcDb, DestFd, Tree) -> end, Tree). -copy_docs(Db, #db{fd=DestFd}=NewDb, InfoBySeq, Retry) -> +copy_docs(Db, #db{fd=DestFd}=NewDb, InfoBySeq0, Retry) -> + % COUCHDB-968, make sure we prune duplicates during compaction + InfoBySeq = lists:usort(fun(#doc_info{id=A}, #doc_info{id=B}) -> A =< B end, + InfoBySeq0), Ids = [Id || #doc_info{id=Id} <- InfoBySeq], LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids), -- cgit v1.2.3 From ae228c7f3177fd79c5dc38b53802999be4152e04 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 8 Dec 2010 16:11:33 +0000 Subject: Change key_tree merge to take path as 2nd arg, add type specs git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1043480 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db.erl | 2 +- src/couchdb/couch_db_updater.erl | 6 +-- src/couchdb/couch_key_tree.erl | 36 +++++++++--------- test/etap/060-kt-merging.t | 81 +++++++++++++++++----------------------- 4 files changed, 56 insertions(+), 69 deletions(-) diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index 964c4704..27a3953b 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -555,7 +555,7 @@ prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldI {ok, #full_doc_info{rev_tree=OldTree}} -> NewRevTree = lists:foldl( fun(NewDoc, AccTree) -> - {NewTree, _} = couch_key_tree:merge(AccTree, [couch_db:doc_to_tree(NewDoc)]), + {NewTree, _} = couch_key_tree:merge(AccTree, couch_db:doc_to_tree(NewDoc)), NewTree end, OldTree, Bucket), diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl index 8630ff4e..bd404c64 100644 --- a/src/couchdb/couch_db_updater.erl +++ b/src/couchdb/couch_db_updater.erl @@ -498,7 +498,7 @@ merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList], NewRevTree0 = lists:foldl( fun({Client, #doc{revs={Pos,[_Rev|PrevRevs]}}=NewDoc}, AccTree) -> if not MergeConflicts -> - case couch_key_tree:merge(AccTree, [couch_db:doc_to_tree(NewDoc)]) of + case couch_key_tree:merge(AccTree, couch_db:doc_to_tree(NewDoc)) of {_NewTree, conflicts} when (not OldDeleted) -> send_result(Client, Id, {Pos-1,PrevRevs}, conflict), AccTree; @@ -529,7 +529,7 @@ merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList], NewDoc#doc{revs={OldPos, [OldRev]}}), NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}}, {NewTree2, _} = couch_key_tree:merge(AccTree, - [couch_db:doc_to_tree(NewDoc2)]), + couch_db:doc_to_tree(NewDoc2)), % we changed the rev id, this tells the caller we did send_result(Client, Id, {Pos-1,PrevRevs}, {ok, {OldPos + 1, NewRevId}}), @@ -543,7 +543,7 @@ merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList], end; true -> {NewTree, _} = couch_key_tree:merge(AccTree, - [couch_db:doc_to_tree(NewDoc)]), + couch_db:doc_to_tree(NewDoc)), NewTree end end, diff --git a/src/couchdb/couch_key_tree.erl b/src/couchdb/couch_key_tree.erl index bc3775c4..985aebc0 100644 --- a/src/couchdb/couch_key_tree.erl +++ b/src/couchdb/couch_key_tree.erl @@ -16,31 +16,27 @@ -export([map/2, get_all_leafs/1, count_leafs/1, remove_leafs/2, get_all_leafs_full/1,stem/2,map_leafs/2]). -% a key tree looks like this: -% Tree -> [] or [{Key, Value, ChildTree} | SiblingTree] -% ChildTree -> Tree -% SiblingTree -> [] or [{SiblingKey, Value, Tree} | Tree] -% And each Key < SiblingKey - +% Tree::term() is really a tree(), but we don't want to require R13B04 yet +-type branch() :: {Key::term(), Value::term(), Tree::term()}. +-type path() :: {Start::pos_integer(), branch()}. +-type tree() :: [branch()]. % sorted by key % partial trees arranged by how much they are cut off. -merge(A, B) -> - {Merged, HasConflicts} = - lists:foldl( - fun(InsertTree, {AccTrees, AccConflicts}) -> - {ok, Merged, Conflicts} = merge_one(AccTrees, InsertTree, [], false), - {Merged, Conflicts or AccConflicts} - end, - {A, false}, B), - if HasConflicts or - ((length(Merged) =/= length(A)) and (length(Merged) =/= length(B))) -> +-spec merge([path()], path()) -> {[path()], conflicts | no_conflicts}. +merge(Paths, Path) -> + {ok, Merged, HasConflicts} = merge_one(Paths, Path, [], false), + if HasConflicts -> + Conflicts = conflicts; + (length(Merged) =/= length(Paths)) and (length(Merged) =/= 1) -> Conflicts = conflicts; true -> Conflicts = no_conflicts end, {lists:sort(Merged), Conflicts}. +-spec merge_one(Original::[path()], Inserted::path(), [path()], bool()) -> + {ok, Merged::[path()], NewConflicts::bool()}. merge_one([], Insert, OutAcc, ConflictsAcc) -> {ok, [Insert | OutAcc], ConflictsAcc}; merge_one([{Start, Tree}|Rest], {StartInsert, TreeInsert}, Acc, HasConflicts) -> @@ -53,6 +49,8 @@ merge_one([{Start, Tree}|Rest], {StartInsert, TreeInsert}, Acc, HasConflicts) -> merge_one(Rest, {StartInsert, TreeInsert}, AccOut, HasConflicts) end. +-spec merge_at(tree(), Place::integer(), tree()) -> + {ok, Merged::tree(), HasConflicts::bool()} | no. merge_at(_Ours, _Place, []) -> no; merge_at([], _Place, _Insert) -> @@ -93,6 +91,8 @@ merge_at([Tree | Sibs], 0, InsertTree) -> end. % key tree functions + +-spec merge_simple(tree(), tree()) -> {Merged::tree(), NewConflicts::bool()}. merge_simple([], B) -> {B, false}; merge_simple(A, []) -> @@ -156,7 +156,7 @@ remove_leafs(Trees, Keys) -> fun({PathPos, Path},TreeAcc) -> [SingleTree] = lists:foldl( fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path), - {NewTrees, _} = merge(TreeAcc, [{PathPos + 1 - length(Path), SingleTree}]), + {NewTrees, _} = merge(TreeAcc, {PathPos + 1 - length(Path), SingleTree}), NewTrees end, [], FilteredPaths), {NewTree, RemovedKeys}. @@ -318,7 +318,7 @@ stem(Trees, Limit) -> fun({PathPos, Path},TreeAcc) -> [SingleTree] = lists:foldl( fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path), - {NewTrees, _} = merge(TreeAcc, [{PathPos + 1 - length(Path), SingleTree}]), + {NewTrees, _} = merge(TreeAcc, {PathPos + 1 - length(Path), SingleTree}), NewTrees end, [], Paths2). diff --git a/test/etap/060-kt-merging.t b/test/etap/060-kt-merging.t index 73744e52..5a8571ac 100755 --- a/test/etap/060-kt-merging.t +++ b/test/etap/060-kt-merging.t @@ -15,7 +15,7 @@ main(_) -> test_util:init_code_path(), - etap:plan(14), + etap:plan(12), case (catch test()) of ok -> etap:end_tests(); @@ -26,101 +26,88 @@ main(_) -> ok. test() -> - EmptyTree = [], - One = [{0, {"1","foo",[]}}], + One = {0, {"1","foo",[]}}, TwoSibs = [{0, {"1","foo",[]}}, {0, {"2","foo",[]}}], - OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}], - TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}], - TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, - {"1b", "bar", []}]}}], - TwoChildSibs2 = [{0, {"1","foo", [{"1a", "bar", []}, - {"1b", "bar", [{"1bb", "boo", []}]}]}}], - Stemmed1b = [{1, {"1a", "bar", []}}], - Stemmed1a = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}], - Stemmed1aa = [{2, {"1aa", "bar", []}}], - Stemmed1bb = [{2, {"1bb", "boo", []}}], + OneChild = {0, {"1","foo",[{"1a", "bar", []}]}}, + TwoChild = {0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}, + TwoChildSibs = {0, {"1","foo", [{"1a", "bar", []}, + {"1b", "bar", []}]}}, + TwoChildSibs2 = {0, {"1","foo", [{"1a", "bar", []}, + {"1b", "bar", [{"1bb", "boo", []}]}]}}, + Stemmed1b = {1, {"1a", "bar", []}}, + Stemmed1a = {1, {"1a", "bar", [{"1aa", "bar", []}]}}, + Stemmed1aa = {2, {"1aa", "bar", []}}, + Stemmed1bb = {2, {"1bb", "boo", []}}, etap:is( - {EmptyTree, no_conflicts}, - couch_key_tree:merge(EmptyTree, EmptyTree), - "Merging two empty trees yields an empty tree." - ), - - etap:is( - {One, no_conflicts}, - couch_key_tree:merge(EmptyTree, One), + {[One], no_conflicts}, + couch_key_tree:merge([], One), "The empty tree is the identity for merge." ), - etap:is( - {One, no_conflicts}, - couch_key_tree:merge(One, EmptyTree), - "Merging is commutative." - ), - etap:is( {TwoSibs, no_conflicts}, - couch_key_tree:merge(One, TwoSibs), + couch_key_tree:merge(TwoSibs, One), "Merging a prefix of a tree with the tree yields the tree." ), etap:is( - {One, no_conflicts}, - couch_key_tree:merge(One, One), + {[One], no_conflicts}, + couch_key_tree:merge([One], One), "Merging is reflexive." ), etap:is( - {TwoChild, no_conflicts}, - couch_key_tree:merge(TwoChild, TwoChild), + {[TwoChild], no_conflicts}, + couch_key_tree:merge([TwoChild], TwoChild), "Merging two children is still reflexive." ), etap:is( - {TwoChildSibs, no_conflicts}, - couch_key_tree:merge(TwoChildSibs, TwoChildSibs), + {[TwoChildSibs], no_conflicts}, + couch_key_tree:merge([TwoChildSibs], TwoChildSibs), "Merging a tree to itself is itself."), etap:is( - {TwoChildSibs, no_conflicts}, - couch_key_tree:merge(TwoChildSibs, Stemmed1b), + {[TwoChildSibs], no_conflicts}, + couch_key_tree:merge([TwoChildSibs], Stemmed1b), "Merging a tree with a stem." ), etap:is( - {TwoChildSibs2, no_conflicts}, - couch_key_tree:merge(TwoChildSibs2, Stemmed1bb), + {[TwoChildSibs2], no_conflicts}, + couch_key_tree:merge([TwoChildSibs2], Stemmed1bb), "Merging a stem at a deeper level." ), etap:is( - {TwoChild, no_conflicts}, - couch_key_tree:merge(TwoChild, Stemmed1aa), + {[TwoChild], no_conflicts}, + couch_key_tree:merge([TwoChild], Stemmed1aa), "Merging a single tree with a deeper stem." ), etap:is( - {TwoChild, no_conflicts}, - couch_key_tree:merge(TwoChild, Stemmed1a), + {[TwoChild], no_conflicts}, + couch_key_tree:merge([TwoChild], Stemmed1a), "Merging a larger stem." ), etap:is( - {Stemmed1a, no_conflicts}, - couch_key_tree:merge(Stemmed1a, Stemmed1aa), + {[Stemmed1a], no_conflicts}, + couch_key_tree:merge([Stemmed1a], Stemmed1aa), "More merging." ), - Expect1 = OneChild ++ Stemmed1aa, + Expect1 = [OneChild, Stemmed1aa], etap:is( {Expect1, conflicts}, - couch_key_tree:merge(OneChild, Stemmed1aa), + couch_key_tree:merge([OneChild], Stemmed1aa), "Merging should create conflicts." ), etap:is( - {TwoChild, no_conflicts}, + {[TwoChild], no_conflicts}, couch_key_tree:merge(Expect1, TwoChild), "Merge should have no conflicts." ), -- cgit v1.2.3 From 2f5e45e43780f4a479f2413fbb5cbbde23e77f3a Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 8 Dec 2010 17:42:08 +0000 Subject: Fix compactor crash when .compact file is missing header Users affected by COUCHDB-968 can take advantage of this patch to force a compaction in retry mode and thus remove the duplicates. Simply create an empty dbname.couch.compact file in the database_dir before triggering the compaction for dbname. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1043543 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db_updater.erl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl index bd404c64..633ae230 100644 --- a/src/couchdb/couch_db_updater.erl +++ b/src/couchdb/couch_db_updater.erl @@ -869,7 +869,12 @@ start_copy_compact(#db{name=Name,filepath=Filepath}=Db) -> {ok, Fd} -> couch_task_status:add_task(<<"Database Compaction">>, <>, <<"Starting">>), Retry = true, - {ok, Header} = couch_file:read_header(Fd); + case couch_file:read_header(Fd) of + {ok, Header} -> + ok; + no_valid_header -> + ok = couch_file:write_header(Fd, Header=#db_header{}) + end; {error, enoent} -> couch_task_status:add_task(<<"Database Compaction">>, Name, <<"Starting">>), {ok, Fd} = couch_file:open(CompactFile, [create]), -- cgit v1.2.3 From ee5887d79531336d81f06cb052f3ab809ac65685 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 8 Dec 2010 18:54:54 +0000 Subject: update CHANGES and NEWS for COUCHDB-968 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1043600 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 3 +++ NEWS | 2 ++ 2 files changed, 5 insertions(+) diff --git a/CHANGES b/CHANGES index 1ba777c6..062fc250 100644 --- a/CHANGES +++ b/CHANGES @@ -17,6 +17,9 @@ Storage System: * Fix occasional timeout errors after successfully compacting large databases. * Fix ocassional error when writing to a database that has just been compacted. * Fix occasional timeout errors on systems with slow or heavily loaded IO. + * Fix for frequently edited documents in multi-master deployments being + duplicated in _changes and _all_docs. See COUCHDDB-968 for details on how + to repair. Log System: diff --git a/NEWS b/NEWS index c71a74ff..bed858b1 100644 --- a/NEWS +++ b/NEWS @@ -23,6 +23,8 @@ Note: This version has not been released yet. * Various fixes to make replicated more resilient for edge-cases. * Don't trigger a view update when requesting `_design/doc/_info`. * Fix for circular references in CommonJS requires. + * Fix for frequently edited documents in multi-master deployments being + duplicated in _changes and _all_docs. Version 1.0.1 ------------- -- cgit v1.2.3 From dbfddc84d17e18d157a7e7e319acc000a206267b Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Fri, 10 Dec 2010 11:03:48 +0000 Subject: Merged revision 1043524 from trunk Calculate and verify MD5 digests outside of a couch_file server This has a significant positive impact on the performance, both for readers and writers, when there are several requests in parallel acessing the same database or view index file. $ node tests/compare_write_and_read.js --wclients 100 --rclients 200 \ -name1 md5_out -name2 trunk \ -url1 http://localhost:5984/ -url2 http://localhost:5985/ \ --duration 120 run 1) http://graphs.mikeal.couchone.com/#/graph/5c859b3e7d1b9bd0488cfe271105130c run 2) http://graphs.mikeal.couchone.com/#/graph/5c859b3e7d1b9bd0488cfe2711051bba Closes COUCHDB-980 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1044284 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_file.erl | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/src/couchdb/couch_file.erl b/src/couchdb/couch_file.erl index 0a891712..fbfd6c6a 100644 --- a/src/couchdb/couch_file.erl +++ b/src/couchdb/couch_file.erl @@ -120,7 +120,19 @@ pread_binary(Fd, Pos) -> pread_iolist(Fd, Pos) -> - gen_server:call(Fd, {pread_iolist, Pos}, infinity). + case gen_server:call(Fd, {pread_iolist, Pos}, infinity) of + {ok, IoList, <<>>} -> + {ok, IoList}; + {ok, IoList, Md5} -> + case couch_util:md5(IoList) of + Md5 -> + {ok, IoList}; + _ -> + exit({file_corruption, <<"file corruption">>}) + end; + Error -> + Error + end. %%---------------------------------------------------------------------- %% Purpose: The length of a file, in bytes. @@ -298,15 +310,10 @@ handle_call({pread_iolist, Pos}, _From, File) -> <<1:1/integer,Len:31/integer>> -> % an MD5-prefixed term {Md5AndIoList, _} = read_raw_iolist_int(File, NextPos, Len+16), {Md5, IoList} = extract_md5(Md5AndIoList), - case couch_util:md5(IoList) of - Md5 -> - {reply, {ok, IoList}, File}; - _ -> - {stop, file_corruption, {error,file_corruption}, File} - end; + {reply, {ok, IoList, Md5}, File}; <<0:1/integer,Len:31/integer>> -> {Iolist, _} = read_raw_iolist_int(File, NextPos, Len), - {reply, {ok, Iolist}, File} + {reply, {ok, Iolist, <<>>}, File} end; handle_call({pread, Pos, Bytes}, _From, #file{fd=Fd,tail_append_begin=TailAppendBegin}=File) -> {ok, Bin} = file:pread(Fd, Pos, Bytes), -- cgit v1.2.3 From f7e889f785652f9b24128267e1f9c01643971403 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Fri, 10 Dec 2010 11:15:55 +0000 Subject: Updated CHANGES for 1.0.2 to reflect COUCHDB-980 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1044290 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 062fc250..213287e3 100644 --- a/CHANGES +++ b/CHANGES @@ -20,6 +20,8 @@ Storage System: * Fix for frequently edited documents in multi-master deployments being duplicated in _changes and _all_docs. See COUCHDDB-968 for details on how to repair. + * Significantly higher read and write throughput against database and + view index files. Log System: -- cgit v1.2.3 From 4ad46e6122a4c7731264bd11a920eb0ec76348b8 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 14 Dec 2010 20:31:46 +0000 Subject: Fix OOME when compacting doc w/ lots of conflicts, COUCHDB-888 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1049257 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db_updater.erl | 31 ++++++++----------------------- 1 file changed, 8 insertions(+), 23 deletions(-) diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl index 633ae230..2cce4b69 100644 --- a/src/couchdb/couch_db_updater.erl +++ b/src/couchdb/couch_db_updater.erl @@ -765,16 +765,6 @@ copy_doc_attachments(#db{fd=SrcFd}=SrcDb, {Pos,_RevId}, SrcSp, DestFd) -> end, BinInfos), {BodyData, NewBinInfos}. -copy_rev_tree_attachments(SrcDb, DestFd, Tree) -> - couch_key_tree:map( - fun(Rev, {IsDel, Sp, Seq}, leaf) -> - DocBody = copy_doc_attachments(SrcDb, Rev, Sp, DestFd), - {IsDel, DocBody, Seq}; - (_, _, branch) -> - ?REV_MISSING - end, Tree). - - copy_docs(Db, #db{fd=DestFd}=NewDb, InfoBySeq0, Retry) -> % COUCHDB-968, make sure we prune duplicates during compaction InfoBySeq = lists:usort(fun(#doc_info{id=A}, #doc_info{id=B}) -> A =< B end, @@ -782,22 +772,17 @@ copy_docs(Db, #db{fd=DestFd}=NewDb, InfoBySeq0, Retry) -> Ids = [Id || #doc_info{id=Id} <- InfoBySeq], LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids), - % write out the attachments - NewFullDocInfos0 = lists:map( - fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) -> - Info#full_doc_info{rev_tree=copy_rev_tree_attachments(Db, DestFd, RevTree)} - end, LookupResults), - % write out the docs - % we do this in 2 stages so the docs are written out contiguously, making - % view indexing and replication faster. NewFullDocInfos1 = lists:map( - fun(#full_doc_info{rev_tree=RevTree}=Info) -> - Info#full_doc_info{rev_tree=couch_key_tree:map_leafs( - fun(_Key, {IsDel, DocBody, Seq}) -> + fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) -> + Info#full_doc_info{rev_tree=couch_key_tree:map( + fun(Rev, {IsDel, Sp, Seq}, leaf) -> + DocBody = copy_doc_attachments(Db, Rev, Sp, DestFd), {ok, Pos} = couch_file:append_term_md5(DestFd, DocBody), - {IsDel, Pos, Seq} + {IsDel, Pos, Seq}; + (_, _, branch) -> + ?REV_MISSING end, RevTree)} - end, NewFullDocInfos0), + end, LookupResults), NewFullDocInfos = stem_full_doc_infos(Db, NewFullDocInfos1), NewDocInfos = [couch_doc:to_doc_info(Info) || Info <- NewFullDocInfos], -- cgit v1.2.3 From 13080df6ef65af7fb70ce92c45e5dd549c4c662e Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sat, 18 Dec 2010 13:25:14 +0000 Subject: Merged revision 1050633 from trunk Upgrade ibrowse to version 2.1.1 This ibrowse release includes a few important fixes. See https://github.com/cmullaparthi/ibrowse for the list of fixes. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1050635 13f79535-47bb-0310-9956-ffa450edef68 --- src/ibrowse/Makefile.am | 2 +- src/ibrowse/ibrowse.app.in | 2 +- src/ibrowse/ibrowse.erl | 2 +- src/ibrowse/ibrowse_http_client.erl | 45 +++++++++++++------------------------ src/ibrowse/ibrowse_lib.erl | 19 ---------------- 5 files changed, 19 insertions(+), 51 deletions(-) diff --git a/src/ibrowse/Makefile.am b/src/ibrowse/Makefile.am index 8c5d3f8e..deddd5a9 100644 --- a/src/ibrowse/Makefile.am +++ b/src/ibrowse/Makefile.am @@ -10,7 +10,7 @@ ## License for the specific language governing permissions and limitations under ## the License. -ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.0/ebin +ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.1/ebin ibrowse_file_collection = \ ibrowse.app.in \ diff --git a/src/ibrowse/ibrowse.app.in b/src/ibrowse/ibrowse.app.in index e8580d10..aee0f205 100644 --- a/src/ibrowse/ibrowse.app.in +++ b/src/ibrowse/ibrowse.app.in @@ -1,6 +1,6 @@ {application, ibrowse, [{description, "HTTP client application"}, - {vsn, "2.1.0"}, + {vsn, "2.1.1"}, {modules, [ ibrowse, ibrowse_http_client, ibrowse_app, diff --git a/src/ibrowse/ibrowse.erl b/src/ibrowse/ibrowse.erl index 1a42f4bc..6e20cfb0 100644 --- a/src/ibrowse/ibrowse.erl +++ b/src/ibrowse/ibrowse.erl @@ -7,7 +7,7 @@ %%%------------------------------------------------------------------- %% @author Chandrashekhar Mullaparthi %% @copyright 2005-2010 Chandrashekhar Mullaparthi -%% @version 2.1.0 +%% @version 2.1.1 %% @doc The ibrowse application implements an HTTP 1.1 client in erlang. This %% module implements the API of the HTTP client. There is one named %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is diff --git a/src/ibrowse/ibrowse_http_client.erl b/src/ibrowse/ibrowse_http_client.erl index 5ff323cd..0135a49b 100644 --- a/src/ibrowse/ibrowse_http_client.erl +++ b/src/ibrowse/ibrowse_http_client.erl @@ -69,7 +69,7 @@ ]). -define(DEFAULT_STREAM_CHUNK_SIZE, 1024*1024). - +-define(dec2hex(X), erlang:integer_to_list(X, 16)). %%==================================================================== %% External functions %%==================================================================== @@ -197,7 +197,7 @@ handle_info({stream_close, _Req_id}, State) -> shutting_down(State), do_close(State), do_error_reply(State, closing_on_request), - {stop, normal, ok, State}; + {stop, normal, State}; handle_info({tcp_closed, _Sock}, State) -> do_trace("TCP connection closed by peer!~n", []), @@ -369,15 +369,6 @@ accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf {error, Reason} -> {error, {file_write_error, Reason}} end; -%% accumulate_response(<<>>, #state{cur_req = #request{caller_controls_socket = Ccs}, -%% socket = Socket} = State) -> -%% case Ccs of -%% true -> -%% do_setopts(Socket, [{active, once}], State); -%% false -> -%% ok -%% end, -%% State; accumulate_response(Data, #state{reply_buffer = RepBuf, rep_buf_size = RepBufSize, streamed_size = Streamed_size, @@ -544,7 +535,7 @@ do_send_body1(Source, Resp, State, TE) -> maybe_chunked_encode(Data, false) -> Data; maybe_chunked_encode(Data, true) -> - [ibrowse_lib:dec2hex(byte_size(to_binary(Data))), "\r\n", Data, "\r\n"]. + [?dec2hex(size(to_binary(Data))), "\r\n", Data, "\r\n"]. do_close(#state{socket = undefined}) -> ok; do_close(#state{socket = Sock, @@ -683,8 +674,7 @@ send_req_1(From, path = RelPath} = Url, Headers, Method, Body, Options, Timeout, #state{status = Status, - socket = Socket, - is_ssl = Is_ssl} = State) -> + socket = Socket} = State) -> ReqId = make_req_id(), Resp_format = get_value(response_format, Options, list), Caller_socket_options = get_value(socket_options, Options, []), @@ -723,7 +713,7 @@ send_req_1(From, Headers_1, AbsPath, RelPath, Body, Options, State_1), trace_request(Req), - do_setopts(Socket, Caller_socket_options, Is_ssl), + do_setopts(Socket, Caller_socket_options, State_1), TE = is_chunked_encoding_specified(Options), case do_send(Req, State_1) of ok -> @@ -831,17 +821,14 @@ make_request(Method, Headers, AbsPath, RelPath, Body, Options, Headers_0 = [Fun1(X) || X <- Headers], Headers_1 = case lists:keysearch("content-length", 1, Headers_0) of - false when (Body == []) orelse - (Body == <<>>) orelse - is_tuple(Body) orelse - is_function(Body) -> - Headers_0; - false when is_binary(Body) -> - [{"content-length", "content-length", integer_to_list(size(Body))} | Headers_0]; - false when is_list(Body) -> - [{"content-length", "content-length", integer_to_list(length(Body))} | Headers_0]; + false when (Body =:= [] orelse Body =:= <<>>) andalso + (Method =:= post orelse Method =:= put) -> + [{"content-length", "Content-Length", "0"} | Headers_0]; + false when is_binary(Body) orelse is_list(Body) -> + [{"content-length", "Content-Length", integer_to_list(iolist_size(Body))} | Headers_0]; _ -> - %% Content-Length is already specified + %% Content-Length is already specified or Body is a + %% function or function/state pair Headers_0 end, {Headers_2, Body_1} = @@ -927,23 +914,23 @@ chunk_request_body(Body, _ChunkSize, Acc) when Body == <<>>; Body == [] -> chunk_request_body(Body, ChunkSize, Acc) when is_binary(Body), size(Body) >= ChunkSize -> <> = Body, - Chunk = [ibrowse_lib:dec2hex(ChunkSize),"\r\n", + Chunk = [?dec2hex(ChunkSize),"\r\n", ChunkBody, "\r\n"], chunk_request_body(Rest, ChunkSize, [Chunk | Acc]); chunk_request_body(Body, _ChunkSize, Acc) when is_binary(Body) -> BodySize = size(Body), - Chunk = [ibrowse_lib:dec2hex(BodySize),"\r\n", + Chunk = [?dec2hex(BodySize),"\r\n", Body, "\r\n"], LastChunk = "0\r\n", lists:reverse(["\r\n", LastChunk, Chunk | Acc]); chunk_request_body(Body, ChunkSize, Acc) when length(Body) >= ChunkSize -> {ChunkBody, Rest} = split_list_at(Body, ChunkSize), - Chunk = [ibrowse_lib:dec2hex(ChunkSize),"\r\n", + Chunk = [?dec2hex(ChunkSize),"\r\n", ChunkBody, "\r\n"], chunk_request_body(Rest, ChunkSize, [Chunk | Acc]); chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) -> BodySize = length(Body), - Chunk = [ibrowse_lib:dec2hex(BodySize),"\r\n", + Chunk = [?dec2hex(BodySize),"\r\n", Body, "\r\n"], LastChunk = "0\r\n", lists:reverse(["\r\n", LastChunk, Chunk | Acc]). diff --git a/src/ibrowse/ibrowse_lib.erl b/src/ibrowse/ibrowse_lib.erl index e913adbe..696d0f69 100644 --- a/src/ibrowse/ibrowse_lib.erl +++ b/src/ibrowse/ibrowse_lib.erl @@ -19,9 +19,6 @@ url_encode/1, decode_rfc822_date/1, status_code/1, - dec2hex/1, - drv_ue/1, - drv_ue/2, encode_base64/1, decode_base64/1, get_value/2, @@ -33,17 +30,6 @@ get_trace_status(Host, Port) -> ibrowse:get_config_value({trace, Host, Port}, false). -drv_ue(Str) -> - [{port, Port}| _] = ets:lookup(ibrowse_table, port), - drv_ue(Str, Port). -drv_ue(Str, Port) -> - case erlang:port_control(Port, 1, Str) of - [] -> - Str; - Res -> - Res - end. - %% @doc URL-encodes a string based on RFC 1738. Returns a flat list. %% @spec url_encode(Str) -> UrlEncodedStr %% Str = string() @@ -163,11 +149,6 @@ status_code(507) -> insufficient_storage; status_code(X) when is_list(X) -> status_code(list_to_integer(X)); status_code(_) -> unknown_status_code. -%% @doc Returns a string with the hexadecimal representation of a given decimal. -%% N = integer() -- the number to represent as hex -%% @spec dec2hex(N::integer()) -> string() -dec2hex(N) -> lists:flatten(io_lib:format("~.16B", [N])). - %% @doc Implements the base64 encoding algorithm. The output data type matches in the input data type. %% @spec encode_base64(In) -> Out %% In = string() | binary() -- cgit v1.2.3 From b07ccd6dc1c36c9d8e61de237b18aebd1b5de06d Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Mon, 20 Dec 2010 12:34:52 +0000 Subject: Merged revision 1051082 from trunk Upgraded ibrowse to version 2.1.2 (released today) This version fixes a blocking issue (which rarely happens) when using the same connection (with ot without pipelining) for multiple requests using the option {stream_to, {pid(), once}}. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1051084 13f79535-47bb-0310-9956-ffa450edef68 --- src/ibrowse/Makefile.am | 2 +- src/ibrowse/ibrowse.app.in | 2 +- src/ibrowse/ibrowse.erl | 2 +- src/ibrowse/ibrowse_http_client.erl | 35 +++++++++++--- src/ibrowse/ibrowse_test.erl | 93 +++++++++++++++++++++++++++++++++++-- 5 files changed, 121 insertions(+), 13 deletions(-) diff --git a/src/ibrowse/Makefile.am b/src/ibrowse/Makefile.am index deddd5a9..4cebe5d1 100644 --- a/src/ibrowse/Makefile.am +++ b/src/ibrowse/Makefile.am @@ -10,7 +10,7 @@ ## License for the specific language governing permissions and limitations under ## the License. -ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.1/ebin +ibrowseebindir = $(localerlanglibdir)/ibrowse-2.1.2/ebin ibrowse_file_collection = \ ibrowse.app.in \ diff --git a/src/ibrowse/ibrowse.app.in b/src/ibrowse/ibrowse.app.in index aee0f205..c8e42271 100644 --- a/src/ibrowse/ibrowse.app.in +++ b/src/ibrowse/ibrowse.app.in @@ -1,6 +1,6 @@ {application, ibrowse, [{description, "HTTP client application"}, - {vsn, "2.1.1"}, + {vsn, "2.1.2"}, {modules, [ ibrowse, ibrowse_http_client, ibrowse_app, diff --git a/src/ibrowse/ibrowse.erl b/src/ibrowse/ibrowse.erl index 6e20cfb0..e1051504 100644 --- a/src/ibrowse/ibrowse.erl +++ b/src/ibrowse/ibrowse.erl @@ -7,7 +7,7 @@ %%%------------------------------------------------------------------- %% @author Chandrashekhar Mullaparthi %% @copyright 2005-2010 Chandrashekhar Mullaparthi -%% @version 2.1.1 +%% @version 2.1.2 %% @doc The ibrowse application implements an HTTP 1.1 client in erlang. This %% module implements the API of the HTTP client. There is one named %% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is diff --git a/src/ibrowse/ibrowse_http_client.erl b/src/ibrowse/ibrowse_http_client.erl index 0135a49b..ea759488 100644 --- a/src/ibrowse/ibrowse_http_client.erl +++ b/src/ibrowse/ibrowse_http_client.erl @@ -191,6 +191,14 @@ handle_info({stream_next, Req_id}, #state{socket = Socket, {noreply, State}; handle_info({stream_next, _Req_id}, State) -> + _Cur_req_id = case State#state.cur_req of + #request{req_id = Cur} -> + Cur; + _ -> + undefined + end, +%% io:format("Ignoring stream_next as ~1000.p is not cur req (~1000.p)~n", +%% [_Req_id, _Cur_req_id]), {noreply, State}; handle_info({stream_close, _Req_id}, State) -> @@ -625,7 +633,7 @@ send_req_1(From, Path = [Server_host, $:, integer_to_list(Server_port)], {Req, Body_1} = make_request(connect, Pxy_auth_headers, Path, Path, - [], Options, State_1), + [], Options, State_1, undefined), TE = is_chunked_encoding_specified(Options), trace_request(Req), case do_send(Req, State) of @@ -711,7 +719,8 @@ send_req_1(From, Headers_1 = maybe_modify_headers(Url, Method, Options, Headers, State_1), {Req, Body_1} = make_request(Method, Headers_1, - AbsPath, RelPath, Body, Options, State_1), + AbsPath, RelPath, Body, Options, State_1, + ReqId), trace_request(Req), do_setopts(Socket, Caller_socket_options, State_1), TE = is_chunked_encoding_specified(Options), @@ -811,7 +820,7 @@ http_auth_digest(Username, Password) -> ibrowse_lib:encode_base64(Username ++ [$: | Password]). make_request(Method, Headers, AbsPath, RelPath, Body, Options, - #state{use_proxy = UseProxy, is_ssl = Is_ssl}) -> + #state{use_proxy = UseProxy, is_ssl = Is_ssl}, ReqId) -> HttpVsn = http_vsn_string(get_value(http_vsn, Options, {1,1})), Fun1 = fun({X, Y}) when is_atom(X) -> {to_lower(atom_to_list(X)), X, Y}; @@ -847,7 +856,13 @@ make_request(Method, Headers, AbsPath, RelPath, Body, Options, [{"Transfer-Encoding", "chunked"}], chunk_request_body(Body, Chunk_size_1)} end, - Headers_3 = cons_headers(Headers_2), + Headers_3 = case lists:member({include_ibrowse_req_id, true}, Options) of + true -> + [{"x-ibrowse-request-id", io_lib:format("~1000.p",[ReqId])} | Headers_2]; + false -> + Headers_2 + end, + Headers_4 = cons_headers(Headers_3), Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of true -> case Is_ssl of @@ -859,7 +874,7 @@ make_request(Method, Headers, AbsPath, RelPath, Body, Options, false -> RelPath end, - {[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_3, crnl()], Body_1}. + {[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_4, crnl()], Body_1}. is_chunked_encoding_specified(Options) -> case get_value(transfer_encoding, Options, false) of @@ -1303,11 +1318,17 @@ reset_state(State) -> transfer_encoding = undefined }. -set_cur_request(#state{reqs = Reqs} = State) -> +set_cur_request(#state{reqs = Reqs, socket = Socket} = State) -> case queue:to_list(Reqs) of [] -> State#state{cur_req = undefined}; - [NextReq | _] -> + [#request{caller_controls_socket = Ccs} = NextReq | _] -> + case Ccs of + true -> + do_setopts(Socket, [{active, once}], State); + _ -> + ok + end, State#state{cur_req = NextReq} end. diff --git a/src/ibrowse/ibrowse_test.erl b/src/ibrowse/ibrowse_test.erl index 3ad76603..b8e0a4a5 100644 --- a/src/ibrowse/ibrowse_test.erl +++ b/src/ibrowse/ibrowse_test.erl @@ -20,7 +20,8 @@ test_chunked_streaming_once/0, i_do_async_req_list/4, test_stream_once/3, - test_stream_once/4 + test_stream_once/4, + test_20122010/0 ]). test_stream_once(Url, Method, Options) -> @@ -218,7 +219,8 @@ dump_errors(Key, Iod) -> {"http://jigsaw.w3.org/HTTP/Basic/", get, [{basic_auth, {"guest", "guest"}}]}, {"http://jigsaw.w3.org/HTTP/CL/", get}, {"http://www.httpwatch.com/httpgallery/chunked/", get}, - {"https://github.com", get, [{ssl_options, [{depth, 2}]}]} + {"https://github.com", get, [{ssl_options, [{depth, 2}]}]}, + {local_test_fun, test_20122010, []} ]). unit_tests() -> @@ -228,6 +230,7 @@ unit_tests(Options) -> application:start(crypto), application:start(public_key), application:start(ssl), + (catch ibrowse_test_server:start_server(8181, tcp)), ibrowse:start(), Options_1 = Options ++ [{connect_timeout, 5000}], {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]), @@ -242,7 +245,9 @@ unit_tests(Options) -> end. unit_tests_1(Parent, Options) -> - lists:foreach(fun({Url, Method}) -> + lists:foreach(fun({local_test_fun, Fun_name, Args}) -> + execute_req(local_test_fun, Fun_name, Args); + ({Url, Method}) -> execute_req(Url, Method, Options); ({Url, Method, X_Opts}) -> execute_req(Url, Method, X_Opts ++ Options) @@ -394,6 +399,10 @@ maybe_stream_next(Req_id, Options) -> ok end. +execute_req(local_test_fun, Method, Args) -> + io:format(" ~-54.54w: ", [Method]), + Result = (catch apply(?MODULE, Method, Args)), + io:format("~p~n", [Result]); execute_req(Url, Method, Options) -> io:format("~7.7w, ~50.50s: ", [Method, Url]), Result = (catch ibrowse:send_req(Url, [], Method, [], Options)), @@ -430,3 +439,81 @@ ue_test(Data) -> log_msg(Fmt, Args) -> io:format("~s -- " ++ Fmt, [ibrowse_lib:printable_date() | Args]). + +%%------------------------------------------------------------------------------ +%% +%%------------------------------------------------------------------------------ + +test_20122010() -> + {ok, Pid} = ibrowse:spawn_worker_process("http://localhost:8181"), + Expected_resp = <<"1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36-37-38-39-40-41-42-43-44-45-46-47-48-49-50-51-52-53-54-55-56-57-58-59-60-61-62-63-64-65-66-67-68-69-70-71-72-73-74-75-76-77-78-79-80-81-82-83-84-85-86-87-88-89-90-91-92-93-94-95-96-97-98-99-100">>, + Test_parent = self(), + Fun = fun() -> + do_test_20122010(Pid, Expected_resp, Test_parent) + end, + Pids = [erlang:spawn_monitor(Fun) || _ <- lists:seq(1,10)], + wait_for_workers(Pids). + +wait_for_workers([{Pid, _Ref} | Pids]) -> + receive + {Pid, success} -> + wait_for_workers(Pids) + after 5000 -> + test_failed + end; +wait_for_workers([]) -> + success. + +do_test_20122010(Pid, Expected_resp, Test_parent) -> + {ibrowse_req_id, Req_id} = ibrowse:send_req_direct( + Pid, + "http://localhost:8181/ibrowse_stream_once_chunk_pipeline_test", + [], get, [], + [{stream_to, {self(), once}}, + {include_ibrowse_req_id, true}]), + do_trace("~p -- sent request ~1000.p~n", [self(), Req_id]), + Req_id_str = lists:flatten(io_lib:format("~1000.p",[Req_id])), + receive + {ibrowse_async_headers, Req_id, "200", Headers} -> + case lists:keysearch("x-ibrowse-request-id", 1, Headers) of + {value, {_, Req_id_str}} -> + ok; + {value, {_, Req_id_1}} -> + do_trace("~p -- Sent req-id: ~1000.p. Recvd: ~1000.p~n", + [self(), Req_id, Req_id_1]), + exit(req_id_mismatch) + end + after 5000 -> + do_trace("~p -- response headers not received~n", [self()]), + exit({timeout, test_failed}) + end, + do_trace("~p -- response headers received~n", [self()]), + ok = ibrowse:stream_next(Req_id), + case do_test_20122010_1(Expected_resp, Req_id, []) of + true -> + Test_parent ! {self(), success}; + false -> + Test_parent ! {self(), failed} + end. + +do_test_20122010_1(Expected_resp, Req_id, Acc) -> + receive + {ibrowse_async_response, Req_id, Body_part} -> + ok = ibrowse:stream_next(Req_id), + do_test_20122010_1(Expected_resp, Req_id, [Body_part | Acc]); + {ibrowse_async_response_end, Req_id} -> + Acc_1 = list_to_binary(lists:reverse(Acc)), + Result = Acc_1 == Expected_resp, + do_trace("~p -- End of response. Result: ~p~n", [self(), Result]), + Result + after 1000 -> + exit({timeout, test_failed}) + end. + +do_trace(Fmt, Args) -> + do_trace(get(my_trace_flag), Fmt, Args). + +do_trace(true, Fmt, Args) -> + io:format("~s -- " ++ Fmt, [ibrowse_lib:printable_date() | Args]); +do_trace(_, _, _) -> + ok. -- cgit v1.2.3 From 7b10d890600660d116b0fd1891d682982a5d106c Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Wed, 22 Dec 2010 19:11:07 +0000 Subject: Merged revision 1052031 from trunk: Make sure attachments get compressed when their MIME type lists parameters Closes COUCHDB-996. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1052035 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_util.erl | 4 +-- test/etap/140-attachment-comp.t | 54 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 55 insertions(+), 3 deletions(-) diff --git a/src/couchdb/couch_util.erl b/src/couchdb/couch_util.erl index 7a8ae055..ed6d2b25 100644 --- a/src/couchdb/couch_util.erl +++ b/src/couchdb/couch_util.erl @@ -418,8 +418,8 @@ compressible_att_type(MimeType) -> ), lists:any( fun(TypeExp) -> - Regexp = "^\\s*" ++ - re:replace(TypeExp, "\\*", ".*", [{return, list}]) ++ "\\s*$", + Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"), + "(?:\\s*;.*?)?\\s*", $$], case re:run(MimeType, Regexp, [caseless]) of {match, _} -> true; diff --git a/test/etap/140-attachment-comp.t b/test/etap/140-attachment-comp.t index 98d37abc..81a24bcb 100755 --- a/test/etap/140-attachment-comp.t +++ b/test/etap/140-attachment-comp.t @@ -22,7 +22,7 @@ test_db_name() -> main(_) -> test_util:init_code_path(), - etap:plan(78), + etap:plan(86), case (catch test()) of ok -> etap:end_tests(); @@ -75,6 +75,8 @@ test() -> "compress" ), + test_compressible_type_with_parameters(), + timer:sleep(3000), % to avoid mochiweb socket closed exceptions couch_server:delete(test_db_name(), []), couch_server_sup:stop(), @@ -698,6 +700,56 @@ test_create_already_compressed_att_with_invalid_content_encoding( ), ok. +test_compressible_type_with_parameters() -> + {ok, {{_, Code, _}, _Headers, _Body}} = http:request( + put, + {db_url() ++ "/testdoc5/readme.txt", [], + "text/plain; charset=UTF-8", test_text_data()}, + [], + [{sync, true}]), + etap:is(Code, 201, "Created text attachment with MIME type " + "'text/plain; charset=UTF-8' using the standalone api"), + {ok, {{_, Code2, _}, Headers2, Body}} = http:request( + get, + {db_url() ++ "/testdoc5/readme.txt", [{"Accept-Encoding", "gzip"}]}, + [], + [{sync, true}]), + etap:is(Code2, 200, "HTTP response code is 200"), + Gziped = lists:member({"content-encoding", "gzip"}, Headers2), + etap:is(Gziped, true, "received body is gziped"), + Uncompressed = binary_to_list(zlib:gunzip(list_to_binary(Body))), + etap:is(Uncompressed, test_text_data(), "received data is gzipped"), + {ok, {{_, Code3, _}, _Headers3, Body3}} = http:request( + get, + {db_url() ++ "/testdoc5?att_encoding_info=true", []}, + [], + [{sync, true}]), + etap:is(Code3, 200, "HTTP response code is 200"), + Json = couch_util:json_decode(Body3), + {TextAttJson} = couch_util:get_nested_json_value( + Json, + [<<"_attachments">>, <<"readme.txt">>] + ), + TextAttLength = couch_util:get_value(<<"length">>, TextAttJson), + etap:is( + TextAttLength, + length(test_text_data()), + "text attachment stub length matches the uncompressed length" + ), + TextAttEncoding = couch_util:get_value(<<"encoding">>, TextAttJson), + etap:is( + TextAttEncoding, + <<"gzip">>, + "text attachment stub has the encoding field set to gzip" + ), + TextAttEncLength = couch_util:get_value(<<"encoded_length">>, TextAttJson), + etap:is( + TextAttEncLength, + iolist_size(zlib:gzip(test_text_data())), + "text attachment stub encoded_length matches the compressed length" + ), + ok. + test_png_data() -> {ok, Data} = file:read_file( test_util:source_file("share/www/image/logo.png") -- cgit v1.2.3 From 3c69bd3dfb926b010d6734495ba641fe22f19997 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Thu, 23 Dec 2010 11:25:48 +0000 Subject: Merged revision 1052227 from trunk: Fix: replicator didn't use the HTTP settings defined in the .ini config Issue found by Randall Leeds. Thanks. Closes COUCHDB-992 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1052228 13f79535-47bb-0310-9956-ffa450edef68 --- etc/couchdb/default.ini.tpl.in | 4 ++-- src/couchdb/couch_db.hrl | 8 +------- src/couchdb/couch_rep_httpc.erl | 14 ++++++++++++++ src/couchdb/couch_rep_reader.erl | 8 -------- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/etc/couchdb/default.ini.tpl.in b/etc/couchdb/default.ini.tpl.in index bb7ccec6..064bed4b 100644 --- a/etc/couchdb/default.ini.tpl.in +++ b/etc/couchdb/default.ini.tpl.in @@ -120,8 +120,8 @@ compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to d compressible_types = text/*, application/javascript, application/json, application/xml [replicator] -max_http_sessions = 10 -max_http_pipeline_size = 10 +max_http_sessions = 20 +max_http_pipeline_size = 50 ; set to true to validate peer certificates verify_ssl_certificates = false ; file containing a list of peer trusted certificates (PEM format) diff --git a/src/couchdb/couch_db.hrl b/src/couchdb/couch_db.hrl index a35745ef..d9a8697c 100644 --- a/src/couchdb/couch_db.hrl +++ b/src/couchdb/couch_db.hrl @@ -264,13 +264,7 @@ body = nil, options = [ {response_format,binary}, - {inactivity_timeout, 30000}, - {max_sessions, list_to_integer( - couch_config:get("replicator", "max_http_sessions", "10") - )}, - {max_pipeline_size, list_to_integer( - couch_config:get("replicator", "max_http_pipeline_size", "10") - )} + {inactivity_timeout, 30000} ], retries = 10, pause = 500, diff --git a/src/couchdb/couch_rep_httpc.erl b/src/couchdb/couch_rep_httpc.erl index ee46a15e..e535c0d5 100644 --- a/src/couchdb/couch_rep_httpc.erl +++ b/src/couchdb/couch_rep_httpc.erl @@ -92,6 +92,7 @@ db_exists(Req, CanonicalUrl, CreateDB) -> end, case catch ibrowse:send_req(Url, HeadersFun(head), head, [], Options) of {ok, "200", _, _} -> + config_http(CanonicalUrl), Req#http_db{url = CanonicalUrl}; {ok, "301", RespHeaders, _} -> RedirectUrl = redirect_url(RespHeaders, Req#http_db.url), @@ -106,6 +107,19 @@ db_exists(Req, CanonicalUrl, CreateDB) -> throw({db_not_found, ?l2b(Url)}) end. +config_http(Url) -> + #url{host = Host, port = Port} = ibrowse_lib:parse_url(Url), + ok = ibrowse:set_max_sessions(Host, Port, list_to_integer( + couch_config:get("replicator", "max_http_sessions", "20"))), + ok = ibrowse:set_max_pipeline_size(Host, Port, list_to_integer( + couch_config:get("replicator", "max_http_pipeline_size", "50"))), + ok = couch_config:register( + fun("replicator", "max_http_sessions", MaxSessions) -> + ibrowse:set_max_sessions(Host, Port, list_to_integer(MaxSessions)); + ("replicator", "max_http_pipeline_size", PipeSize) -> + ibrowse:set_max_pipeline_size(Host, Port, list_to_integer(PipeSize)) + end). + redirect_url(RespHeaders, OrigUrl) -> MochiHeaders = mochiweb_headers:make(RespHeaders), RedUrl = mochiweb_headers:get_value("Location", MochiHeaders), diff --git a/src/couchdb/couch_rep_reader.erl b/src/couchdb/couch_rep_reader.erl index 0930599c..a7ae45a8 100644 --- a/src/couchdb/couch_rep_reader.erl +++ b/src/couchdb/couch_rep_reader.erl @@ -21,11 +21,8 @@ -define (BUFFER_SIZE, 1000). -define (MAX_CONCURRENT_REQUESTS, 100). --define (MAX_CONNECTIONS, 20). --define (MAX_PIPELINE_SIZE, 50). -include("couch_db.hrl"). --include("../ibrowse/ibrowse.hrl"). -record (state, { parent, @@ -53,11 +50,6 @@ next(Pid) -> init([Parent, Source, MissingRevs_or_DocIds, _PostProps]) -> process_flag(trap_exit, true), - if is_record(Source, http_db) -> - #url{host=Host, port=Port} = ibrowse_lib:parse_url(Source#http_db.url), - ibrowse:set_max_sessions(Host, Port, ?MAX_CONNECTIONS), - ibrowse:set_max_pipeline_size(Host, Port, ?MAX_PIPELINE_SIZE); - true -> ok end, Self = self(), ReaderLoop = spawn_link( fun() -> reader_loop(Self, Parent, Source, MissingRevs_or_DocIds) end -- cgit v1.2.3 From 1a93bfd193c6b235a2296feff461c43ef64c1316 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 29 Dec 2010 03:19:05 +0000 Subject: Skip recursive path merging, COUCHDB-968 This patch ensures that we only ever merge a linear path into the tree. It relies on the stemming code to collapse paths that could have been merged together by a recursive use of merge_one. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1053518 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_key_tree.erl | 2 +- test/etap/060-kt-merging.t | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/couchdb/couch_key_tree.erl b/src/couchdb/couch_key_tree.erl index 985aebc0..7c84865c 100644 --- a/src/couchdb/couch_key_tree.erl +++ b/src/couchdb/couch_key_tree.erl @@ -43,7 +43,7 @@ merge_one([{Start, Tree}|Rest], {StartInsert, TreeInsert}, Acc, HasConflicts) -> case merge_at([Tree], StartInsert - Start, [TreeInsert]) of {ok, [Merged], Conflicts} -> MergedStart = lists:min([Start, StartInsert]), - merge_one(Rest, {MergedStart, Merged}, Acc, Conflicts or HasConflicts); + {ok, Rest ++ [{MergedStart, Merged} | Acc], Conflicts or HasConflicts}; no -> AccOut = [{Start, Tree} | Acc], merge_one(Rest, {StartInsert, TreeInsert}, AccOut, HasConflicts) diff --git a/test/etap/060-kt-merging.t b/test/etap/060-kt-merging.t index 5a8571ac..971e49bf 100755 --- a/test/etap/060-kt-merging.t +++ b/test/etap/060-kt-merging.t @@ -15,7 +15,7 @@ main(_) -> test_util:init_code_path(), - etap:plan(12), + etap:plan(14), case (catch test()) of ok -> etap:end_tests(); @@ -106,10 +106,13 @@ test() -> "Merging should create conflicts." ), + {MultiPaths, NoConflicts} = couch_key_tree:merge(Expect1, TwoChild), + etap:is(NoConflicts, no_conflicts, "Merge should have no conflicts."), + etap:is(length(MultiPaths), 2, "Should have two paths before stemming."), etap:is( - {[TwoChild], no_conflicts}, - couch_key_tree:merge(Expect1, TwoChild), - "Merge should have no conflicts." + couch_key_tree:stem(MultiPaths, 10), + [TwoChild], + "Stemming should collapse the paths." ), ok. -- cgit v1.2.3 From 85d358f89ecb88560a6b8f263da8c24df1ebec98 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 29 Dec 2010 03:19:17 +0000 Subject: Stem revision trees after merging a path, COUCHDB-968 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1053519 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db.erl | 3 ++- src/couchdb/couch_db_updater.erl | 10 +++++----- src/couchdb/couch_key_tree.erl | 8 +++++++- test/etap/060-kt-merging.t | 33 +++++++++++++++------------------ 4 files changed, 29 insertions(+), 25 deletions(-) diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index 27a3953b..f005a2ea 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -555,7 +555,8 @@ prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldI {ok, #full_doc_info{rev_tree=OldTree}} -> NewRevTree = lists:foldl( fun(NewDoc, AccTree) -> - {NewTree, _} = couch_key_tree:merge(AccTree, couch_db:doc_to_tree(NewDoc)), + {NewTree, _} = couch_key_tree:merge(AccTree, + couch_db:doc_to_tree(NewDoc), Db#db.revs_limit), NewTree end, OldTree, Bucket), diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl index 2cce4b69..eb1a3edc 100644 --- a/src/couchdb/couch_db_updater.erl +++ b/src/couchdb/couch_db_updater.erl @@ -495,10 +495,11 @@ merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList], [OldDocInfo|RestOldInfo], AccNewInfos, AccRemoveSeqs, AccSeq) -> #full_doc_info{id=Id,rev_tree=OldTree,deleted=OldDeleted,update_seq=OldSeq} = OldDocInfo, - NewRevTree0 = lists:foldl( + NewRevTree = lists:foldl( fun({Client, #doc{revs={Pos,[_Rev|PrevRevs]}}=NewDoc}, AccTree) -> if not MergeConflicts -> - case couch_key_tree:merge(AccTree, couch_db:doc_to_tree(NewDoc)) of + case couch_key_tree:merge(AccTree, couch_db:doc_to_tree(NewDoc), + Limit) of {_NewTree, conflicts} when (not OldDeleted) -> send_result(Client, Id, {Pos-1,PrevRevs}, conflict), AccTree; @@ -529,7 +530,7 @@ merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList], NewDoc#doc{revs={OldPos, [OldRev]}}), NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}}, {NewTree2, _} = couch_key_tree:merge(AccTree, - couch_db:doc_to_tree(NewDoc2)), + couch_db:doc_to_tree(NewDoc2), Limit), % we changed the rev id, this tells the caller we did send_result(Client, Id, {Pos-1,PrevRevs}, {ok, {OldPos + 1, NewRevId}}), @@ -543,12 +544,11 @@ merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList], end; true -> {NewTree, _} = couch_key_tree:merge(AccTree, - couch_db:doc_to_tree(NewDoc)), + couch_db:doc_to_tree(NewDoc), Limit), NewTree end end, OldTree, NewDocs), - NewRevTree = couch_key_tree:stem(NewRevTree0, Limit), if NewRevTree == OldTree -> % nothing changed merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo, diff --git a/src/couchdb/couch_key_tree.erl b/src/couchdb/couch_key_tree.erl index 7c84865c..6701da58 100644 --- a/src/couchdb/couch_key_tree.erl +++ b/src/couchdb/couch_key_tree.erl @@ -12,7 +12,7 @@ -module(couch_key_tree). --export([merge/2, find_missing/2, get_key_leafs/2, get_full_key_paths/2, get/2]). +-export([merge/3, find_missing/2, get_key_leafs/2, get_full_key_paths/2, get/2]). -export([map/2, get_all_leafs/1, count_leafs/1, remove_leafs/2, get_all_leafs_full/1,stem/2,map_leafs/2]). @@ -23,6 +23,12 @@ % partial trees arranged by how much they are cut off. +-spec merge([path()], path(), pos_integer()) -> {[path()], + conflicts | no_conflicts}. +merge(Paths, Path, Depth) -> + {Merged, Conflicts} = merge(Paths, Path), + {stem(Merged, Depth), Conflicts}. + -spec merge([path()], path()) -> {[path()], conflicts | no_conflicts}. merge(Paths, Path) -> {ok, Merged, HasConflicts} = merge_one(Paths, Path, [], false), diff --git a/test/etap/060-kt-merging.t b/test/etap/060-kt-merging.t index 971e49bf..0e481a52 100755 --- a/test/etap/060-kt-merging.t +++ b/test/etap/060-kt-merging.t @@ -15,7 +15,7 @@ main(_) -> test_util:init_code_path(), - etap:plan(14), + etap:plan(12), case (catch test()) of ok -> etap:end_tests(); @@ -42,77 +42,74 @@ test() -> etap:is( {[One], no_conflicts}, - couch_key_tree:merge([], One), + couch_key_tree:merge([], One, 10), "The empty tree is the identity for merge." ), etap:is( {TwoSibs, no_conflicts}, - couch_key_tree:merge(TwoSibs, One), + couch_key_tree:merge(TwoSibs, One, 10), "Merging a prefix of a tree with the tree yields the tree." ), etap:is( {[One], no_conflicts}, - couch_key_tree:merge([One], One), + couch_key_tree:merge([One], One, 10), "Merging is reflexive." ), etap:is( {[TwoChild], no_conflicts}, - couch_key_tree:merge([TwoChild], TwoChild), + couch_key_tree:merge([TwoChild], TwoChild, 10), "Merging two children is still reflexive." ), etap:is( {[TwoChildSibs], no_conflicts}, - couch_key_tree:merge([TwoChildSibs], TwoChildSibs), + couch_key_tree:merge([TwoChildSibs], TwoChildSibs, 10), "Merging a tree to itself is itself."), etap:is( {[TwoChildSibs], no_conflicts}, - couch_key_tree:merge([TwoChildSibs], Stemmed1b), + couch_key_tree:merge([TwoChildSibs], Stemmed1b, 10), "Merging a tree with a stem." ), etap:is( {[TwoChildSibs2], no_conflicts}, - couch_key_tree:merge([TwoChildSibs2], Stemmed1bb), + couch_key_tree:merge([TwoChildSibs2], Stemmed1bb, 10), "Merging a stem at a deeper level." ), etap:is( {[TwoChild], no_conflicts}, - couch_key_tree:merge([TwoChild], Stemmed1aa), + couch_key_tree:merge([TwoChild], Stemmed1aa, 10), "Merging a single tree with a deeper stem." ), etap:is( {[TwoChild], no_conflicts}, - couch_key_tree:merge([TwoChild], Stemmed1a), + couch_key_tree:merge([TwoChild], Stemmed1a, 10), "Merging a larger stem." ), etap:is( {[Stemmed1a], no_conflicts}, - couch_key_tree:merge([Stemmed1a], Stemmed1aa), + couch_key_tree:merge([Stemmed1a], Stemmed1aa, 10), "More merging." ), Expect1 = [OneChild, Stemmed1aa], etap:is( {Expect1, conflicts}, - couch_key_tree:merge([OneChild], Stemmed1aa), + couch_key_tree:merge([OneChild], Stemmed1aa, 10), "Merging should create conflicts." ), - {MultiPaths, NoConflicts} = couch_key_tree:merge(Expect1, TwoChild), - etap:is(NoConflicts, no_conflicts, "Merge should have no conflicts."), - etap:is(length(MultiPaths), 2, "Should have two paths before stemming."), etap:is( - couch_key_tree:stem(MultiPaths, 10), - [TwoChild], - "Stemming should collapse the paths." + {[TwoChild], no_conflicts}, + couch_key_tree:merge(Expect1, TwoChild, 10), + "Merge should have no conflicts." ), ok. -- cgit v1.2.3 From 08d71849464a8e1cc869b385591fa00b3ad0f843 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 29 Dec 2010 16:01:17 +0000 Subject: Ignore closed connection after _changes are downloaded Closes COUCHDB-993 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1053659 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_rep_changes_feed.erl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/couchdb/couch_rep_changes_feed.erl b/src/couchdb/couch_rep_changes_feed.erl index 7f7d3a38..4d1afcb8 100644 --- a/src/couchdb/couch_rep_changes_feed.erl +++ b/src/couchdb/couch_rep_changes_feed.erl @@ -204,6 +204,9 @@ handle_info({ibrowse_async_response_end, Id}, #state{reqid=Id} = State) -> handle_info({'EXIT', From, normal}, #state{changes_loop=From} = State) -> handle_feed_completion(State); +handle_info({'EXIT', From, normal}, #state{conn=From, complete=true} = State) -> + {noreply, State}; + handle_info({'EXIT', From, Reason}, #state{changes_loop=From} = State) -> ?LOG_ERROR("changes_loop died with reason ~p", [Reason]), {stop, changes_loop_died, State}; -- cgit v1.2.3 From 50ee63aa35b3d7a180cfeacc190924bb7fac3273 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Mon, 3 Jan 2011 21:45:47 +0000 Subject: Crash if duplicates are detected during view compaction, COUCHDB-999 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1054784 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_view_compactor.erl | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/couchdb/couch_view_compactor.erl b/src/couchdb/couch_view_compactor.erl index 895556bf..9a47f5f8 100644 --- a/src/couchdb/couch_view_compactor.erl +++ b/src/couchdb/couch_view_compactor.erl @@ -48,18 +48,22 @@ compact_group(Group, EmptyGroup) -> TaskName = <>, couch_task_status:add_task(<<"View Group Compaction">>, TaskName, <<"">>), - Fun = fun(KV, {Bt, Acc, TotalCopied}) -> + Fun = fun({DocId, _ViewIdKeys} = KV, {Bt, Acc, TotalCopied, LastId}) -> + if DocId =:= LastId -> % COUCHDB-999 + Msg = "Duplicates of ~s detected in ~s ~s - rebuild required", + exit(io_lib:format(Msg, [DocId, DbName, GroupId])); + true -> ok end, if TotalCopied rem 10000 =:= 0 -> couch_task_status:update("Copied ~p of ~p Ids (~p%)", [TotalCopied, Count, (TotalCopied*100) div Count]), {ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV|Acc])), - {ok, {Bt2, [], TotalCopied+1}}; + {ok, {Bt2, [], TotalCopied+1, DocId}}; true -> - {ok, {Bt, [KV|Acc], TotalCopied+1}} + {ok, {Bt, [KV|Acc], TotalCopied+1, DocId}} end end, - {ok, _, {Bt3, Uncopied, _Total}} = couch_btree:foldl(IdBtree, Fun, - {EmptyIdBtree, [], 0}), + {ok, _, {Bt3, Uncopied, _Total, _LastId}} = couch_btree:foldl(IdBtree, Fun, + {EmptyIdBtree, [], 0, nil}), {ok, NewIdBtree} = couch_btree:add(Bt3, lists:reverse(Uncopied)), NewViews = lists:map(fun({View, EmptyView}) -> -- cgit v1.2.3 From c13fc058e0e38a40d38163c158444e9d9ab4f291 Mon Sep 17 00:00:00 2001 From: Paul Joseph Davis Date: Mon, 3 Jan 2011 23:53:04 +0000 Subject: Updated CHANGES and NEWS before 1.0.2 release. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1054833 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 12 +++++++++--- NEWS | 4 +--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/CHANGES b/CHANGES index 213287e3..bfabc018 100644 --- a/CHANGES +++ b/CHANGES @@ -4,11 +4,10 @@ Apache CouchDB CHANGES Version 1.0.2 ------------- -Note: This version has not been released yet. - Futon: * Make test suite work with Safari and Chrome. + * Fixed animated progress spinner. Storage System: @@ -17,6 +16,8 @@ Storage System: * Fix occasional timeout errors after successfully compacting large databases. * Fix ocassional error when writing to a database that has just been compacted. * Fix occasional timeout errors on systems with slow or heavily loaded IO. + * Fix for OOME when compactions include documents with many conflicts. + * Fix for missing attachment compression when MIME types included parameters. * Fix for frequently edited documents in multi-master deployments being duplicated in _changes and _all_docs. See COUCHDDB-968 for details on how to repair. @@ -35,7 +36,9 @@ HTTP Interface: Replicator: - * Updated ibrowse library to 2.1.0 fixing numerous replication issues. + * Updated ibrowse library to 2.1.2 fixing numerous replication issues. + * Make sure that the replicator respects HTTP settings defined in the config. + * Fix error when the ibrowse connection closes unexpectedly. * Fix authenticated replication (with HTTP basic auth) of design documents with attachments. * Various fixes to make replication more resilient for edge-cases. @@ -46,6 +49,9 @@ View Server: * Fix for circular references in CommonJS requires. * Made isArray() function available to functions executed in the query server. * Documents are now sealed before being passed to map functions. + * Force view compaction failure when duplicated document data exists. When + this error is seen in the logs users should rebuild their views from + scratch to fix the issue. See COUCHDB-999 for details. Version 1.0.1 ------------- diff --git a/NEWS b/NEWS index bed858b1..1a70a0f1 100644 --- a/NEWS +++ b/NEWS @@ -10,15 +10,13 @@ Each release section notes when backwards incompatible changes have been made. Version 1.0.2 ------------- -Note: This version has not been released yet. - * Make test suite work with Safari and Chrome. * Fix leaking file handles after compacting databases and views. * Fix databases forgetting their validation function after compaction. * Fix occasional timeout errors. * Reduce lengthy stack traces. * Allow logging of native types. - * Updated ibrowse library to 2.1.0 fixing numerous replication issues. + * Updated ibrowse library to 2.1.2 fixing numerous replication issues. * Fix authenticated replication of design documents with attachments. * Various fixes to make replicated more resilient for edge-cases. * Don't trigger a view update when requesting `_design/doc/_info`. -- cgit v1.2.3 From 2a1f5e8b4a8bf80c632d44b6c81b0d9dfbd55174 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Fri, 7 Jan 2011 17:17:18 +0000 Subject: Merged revision 1056395 from trunk Applied 2 more ibrowse fixes already submitted upstream https://github.com/cmullaparthi/ibrowse/pull/24 https://github.com/cmullaparthi/ibrowse/pull/25 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1056397 13f79535-47bb-0310-9956-ffa450edef68 --- src/ibrowse/ibrowse_http_client.erl | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/ibrowse/ibrowse_http_client.erl b/src/ibrowse/ibrowse_http_client.erl index ea759488..5dce321c 100644 --- a/src/ibrowse/ibrowse_http_client.erl +++ b/src/ibrowse/ibrowse_http_client.erl @@ -287,10 +287,14 @@ handle_sock_data(Data, #state{status = get_header}=State) -> {error, _Reason} -> shutting_down(State), {stop, normal, State}; - State_1 -> - active_once(State_1), - State_2 = set_inac_timer(State_1), - {noreply, State_2} + #state{socket = Socket, status = Status, cur_req = CurReq} = State_1 -> + case {Status, CurReq} of + {get_header, #request{caller_controls_socket = true}} -> + do_setopts(Socket, [{active, once}], State_1); + _ -> + active_once(State_1) + end, + {noreply, set_inac_timer(State_1)} end; handle_sock_data(Data, #state{status = get_body, @@ -683,6 +687,7 @@ send_req_1(From, Headers, Method, Body, Options, Timeout, #state{status = Status, socket = Socket} = State) -> + cancel_timer(State#state.inactivity_timer_ref, {eat_message, timeout}), ReqId = make_req_id(), Resp_format = get_value(response_format, Options, list), Caller_socket_options = get_value(socket_options, Options, []), -- cgit v1.2.3 From 5af920c4c08ba65cfd025c29846b162664e5ea6d Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Mon, 10 Jan 2011 19:37:10 +0000 Subject: spell javascript correctly in loadScript(url) git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1057330 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/couch_test_runner.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/share/www/script/couch_test_runner.js b/share/www/script/couch_test_runner.js index 56787e9a..55a6533f 100644 --- a/share/www/script/couch_test_runner.js +++ b/share/www/script/couch_test_runner.js @@ -18,7 +18,7 @@ function loadScript(url) { if((url.substr(0, 7) == "http://") || (url.substr(0, 2) == "//") || (url.substr(0, 5) == "data:") - || (url.substr(0, 11) == "javsacript:")) { + || (url.substr(0, 11) == "javascript:")) { throw "Not loading remote test scripts"; } if (typeof document != "undefined") document.write(''); -- cgit v1.2.3 From 6ce291d3d3ccac65c082c787e9540fd187c225be Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Tue, 11 Jan 2011 00:20:35 +0000 Subject: Avoid overzealous URI encoding. Fix raw view document link. Closes COUCHDB-998 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1057422 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/database.html | 4 ++++ share/www/script/futon.browse.js | 8 ++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/share/www/database.html b/share/www/database.html index 2802ad78..9a9f121e 100644 --- a/share/www/database.html +++ b/share/www/database.html @@ -40,6 +40,10 @@ specific language governing permissions and limitations under the License. $("h1 strong").text(page.db.name); var viewPath = page.viewName || "_all_docs"; if (viewPath != "_temp_view" && viewPath != "_design_docs") { + viewPath = $.map(viewPath.split("/"), function (part) { + return encodeURIComponent(part); + }).join("/"); + $("h1 a.raw").attr("href", "/" + encodeURIComponent(page.db.name) + "/" + viewPath); } diff --git a/share/www/script/futon.browse.js b/share/www/script/futon.browse.js index 17975de2..6b3c979c 100644 --- a/share/www/script/futon.browse.js +++ b/share/www/script/futon.browse.js @@ -116,7 +116,7 @@ var viewName = (urlParts.length > 0) ? urlParts.join("/") : null; if (viewName) { - $.futon.storage.set("view", viewName); + $.futon.storage.set("view", decodeURIComponent(viewName)); } else { viewName = $.futon.storage.get("view"); if (viewName) { @@ -128,6 +128,7 @@ var db = $.couch.db(dbName); this.dbName = dbName; + viewName = decodeURIComponent(viewName); this.viewName = viewName; this.viewLanguage = "javascript"; this.db = db; @@ -578,7 +579,7 @@ this.updateDesignDocLink = function() { if (viewName && /^_design/.test(viewName)) { - var docId = "_design/" + decodeURIComponent(viewName.split("/")[1]); + var docId = "_design/" + encodeURIComponent(decodeURIComponent(viewName).split("/")[1]); $("#designdoc-link").attr("href", "document.html?" + encodeURIComponent(dbName) + "/" + $.couch.encodeDocId(docId)).text(docId); } else { @@ -774,8 +775,7 @@ if (page.isDirty) { db.query(currentMapCode, currentReduceCode, page.viewLanguage, options); } else { - var viewParts = viewName.split('/'); - + var viewParts = decodeURIComponent(viewName).split('/'); if ($.futon.storage.get("stale")) { options.stale = "ok"; } -- cgit v1.2.3 From 7ad660be8252a21c69f1265d70f876fe6d955e3b Mon Sep 17 00:00:00 2001 From: Paul Joseph Davis Date: Tue, 11 Jan 2011 01:41:40 +0000 Subject: Update version info before preparing the 1.0.2 release. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1057442 13f79535-47bb-0310-9956-ffa450edef68 --- acinclude.m4.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/acinclude.m4.in b/acinclude.m4.in index fb92b3f4..6ba5a784 100644 --- a/acinclude.m4.in +++ b/acinclude.m4.in @@ -18,7 +18,7 @@ m4_define([LOCAL_PACKAGE_NAME], [Apache CouchDB]) m4_define([LOCAL_BUG_URI], [https://issues.apache.org/jira/browse/COUCHDB]) m4_define([LOCAL_VERSION_MAJOR], [1]) m4_define([LOCAL_VERSION_MINOR], [0]) -m4_define([LOCAL_VERSION_REVISION], [1]) +m4_define([LOCAL_VERSION_REVISION], [2]) m4_define([LOCAL_VERSION_STAGE], []) m4_define([LOCAL_VERSION_RELEASE], []) m4_define([LOCAL_VERSION_PRIMARY], -- cgit v1.2.3 From 2a3b0a8d47430053541a84a3eeacaaef505308b1 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Tue, 11 Jan 2011 21:29:55 +0000 Subject: Merged revision 1057875 from trunk: Make the doc multipart GET APIs always send attachments compressed For attachments that are stored in compressed (gzip) form, make sure the document multipart/related and multipart/mixed APIs don't decompress the attachments before sending them through the socket. This is to avoid multipart parser issues when the attachment's identity length is unknown or lost due to a local to local replication triggered by CouchDB versions up to 1.0.1 Closes COUCHDB-1022. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1057878 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/attachments_multipart.js | 6 +++--- src/couchdb/couch_httpd_db.erl | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/share/www/script/test/attachments_multipart.js b/share/www/script/test/attachments_multipart.js index fecf9d01..f173d2bb 100644 --- a/share/www/script/test/attachments_multipart.js +++ b/share/www/script/test/attachments_multipart.js @@ -29,17 +29,17 @@ couchTests.attachments_multipart= function(debug) { "_attachments":{ "foo.txt": { "follows":true, - "content_type":"text/plain", + "content_type":"application/test", "length":21 }, "bar.txt": { "follows":true, - "content_type":"text/plain", + "content_type":"application/test", "length":20 }, "baz.txt": { "follows":true, - "content_type":"text/plain", + "content_type":"application/test", "length":19 } } diff --git a/src/couchdb/couch_httpd_db.erl b/src/couchdb/couch_httpd_db.erl index 7b09bf57..1767d9cc 100644 --- a/src/couchdb/couch_httpd_db.erl +++ b/src/couchdb/couch_httpd_db.erl @@ -736,34 +736,34 @@ send_doc_efficiently(Req, #doc{atts=Atts}=Doc, Headers, Options) -> JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [attachments, follows|Options])), {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream( - Boundary,JsonBytes, Atts,false), + Boundary,JsonBytes, Atts, true), CType = {<<"Content-Type">>, ContentType}, {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len), couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts, - fun(Data) -> couch_httpd:send(Resp, Data) end, false) + fun(Data) -> couch_httpd:send(Resp, Data) end, true) end; false -> send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options)) end. -send_docs_multipart(Req, Results, Options) -> +send_docs_multipart(Req, Results, Options1) -> OuterBoundary = couch_uuids:random(), InnerBoundary = couch_uuids:random(), + Options = [attachments, follows, att_encoding_info | Options1], CType = {"Content-Type", "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""}, {ok, Resp} = start_chunked_response(Req, 200, [CType]), couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>), lists:foreach( fun({ok, #doc{atts=Atts}=Doc}) -> - JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, - [attachments,follows|Options])), + JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)), {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream( - InnerBoundary, JsonBytes, Atts, false), + InnerBoundary, JsonBytes, Atts, true), couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ", ContentType/binary, "\r\n\r\n">>), couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts, fun(Data) -> couch_httpd:send_chunk(Resp, Data) - end, false), + end, true), couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>); ({{not_found, missing}, RevId}) -> RevStr = couch_doc:rev_to_str(RevId), -- cgit v1.2.3 From 43da5dcc08fd83b6255f14d7a97ea177018f07e6 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Wed, 12 Jan 2011 04:55:16 +0000 Subject: Preserve purge metadata during compaction, thanks Mike Leddy Closes COUCHDB-1021 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1057988 13f79535-47bb-0310-9956-ffa450edef68 --- THANKS | 1 + share/www/script/test/purge.js | 8 ++++++++ src/couchdb/couch_db_updater.erl | 14 +++++++++++--- 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/THANKS b/THANKS index 5f57b196..15072e2a 100644 --- a/THANKS +++ b/THANKS @@ -69,5 +69,6 @@ suggesting improvements or submitting changes. Some of these people are: * Juuso Väänänen * Benjamin Young * Gabriel Farrell + * Mike Leddy For a list of authors see the `AUTHORS` file. diff --git a/share/www/script/test/purge.js b/share/www/script/test/purge.js index af72ea4f..f8f45138 100644 --- a/share/www/script/test/purge.js +++ b/share/www/script/test/purge.js @@ -76,6 +76,14 @@ couchTests.purge = function(debug) { } T(db.view("test/single_doc").total_rows == 0); + // purge sequences are preserved after compaction (COUCHDB-1021) + T(db.compact().ok); + T(db.last_req.status == 202); + // compaction isn't instantaneous, loop until done + while (db.info().compact_running) {}; + var compactInfo = db.info(); + T(compactInfo.purge_seq == newInfo.purge_seq); + // purge documents twice in a row without loading views // (causes full view rebuilds) diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl index eb1a3edc..e660800d 100644 --- a/src/couchdb/couch_db_updater.erl +++ b/src/couchdb/couch_db_updater.erl @@ -847,7 +847,7 @@ copy_compact(Db, NewDb0, Retry) -> commit_data(NewDb4#db{update_seq=Db#db.update_seq}). -start_copy_compact(#db{name=Name,filepath=Filepath}=Db) -> +start_copy_compact(#db{name=Name,filepath=Filepath,header=#db_header{purge_seq=PurgeSeq}}=Db) -> CompactFile = Filepath ++ ".compact", ?LOG_DEBUG("Compaction process spawned for db \"~s\"", [Name]), case couch_file:open(CompactFile) of @@ -867,8 +867,16 @@ start_copy_compact(#db{name=Name,filepath=Filepath}=Db) -> ok = couch_file:write_header(Fd, Header=#db_header{}) end, NewDb = init_db(Name, CompactFile, Fd, Header), + NewDb2 = if PurgeSeq > 0 -> + {ok, PurgedIdsRevs} = couch_db:get_last_purged(Db), + {ok, Pointer} = couch_file:append_term(Fd, PurgedIdsRevs), + NewDb#db{header=Header#db_header{purge_seq=PurgeSeq, purged_docs=Pointer}}; + true -> + NewDb + end, unlink(Fd), - NewDb2 = copy_compact(Db, NewDb, Retry), - close_db(NewDb2), + + NewDb3 = copy_compact(Db, NewDb2, Retry), + close_db(NewDb3), gen_server:cast(Db#db.update_pid, {compact_done, CompactFile}). -- cgit v1.2.3 From 3674f2a2ef778bdf211426d9e804192e22cd26ad Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Wed, 12 Jan 2011 10:45:14 +0000 Subject: ensure write_streamed_attachment bails on negative LenLeft values While Filipe has identified the fix for COUCHDB-1021, this patch will ensure that no other bug will cause negative values to be passed to this function, in turn leading to database inflation problems, etc. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1058058 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index f005a2ea..b7055b5c 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -920,7 +920,7 @@ with_stream(Fd, #att{md5=InMd5,type=Type,encoding=Enc}=Att, Fun) -> write_streamed_attachment(_Stream, _F, 0) -> ok; -write_streamed_attachment(Stream, F, LenLeft) -> +write_streamed_attachment(Stream, F, LenLeft) when LenLeft > 0 -> Bin = F(), ok = couch_stream:write(Stream, Bin), write_streamed_attachment(Stream, F, LenLeft - size(Bin)). -- cgit v1.2.3 From 8432c0e8f31a683b13419dc591edca49933d1f81 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 18 Jan 2011 00:42:36 +0000 Subject: Avoid bug in rev tree logic during attachment uploads This is a workaround for a bug in couch_key_tree, described in COUCHDB-902, which would cause uploads to fail with spurious conflicts. A patch for the key tree itself will be landing on trunk. Thanks Bob Dionne, Klaus Trainer. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1060149 13f79535-47bb-0310-9956-ffa450edef68 --- share/Makefile.am | 1 + share/www/script/couch_tests.js | 1 + share/www/script/test/attachment_conflicts.js | 56 +++++++++++++++++++++++++++ src/couchdb/couch_httpd_db.erl | 4 +- 4 files changed, 61 insertions(+), 1 deletion(-) create mode 100644 share/www/script/test/attachment_conflicts.js diff --git a/share/Makefile.am b/share/Makefile.am index 752fa9f9..f72db769 100644 --- a/share/Makefile.am +++ b/share/Makefile.am @@ -114,6 +114,7 @@ nobase_dist_localdata_DATA = \ www/script/test/all_docs.js \ www/script/test/attachments.js \ www/script/test/attachments_multipart.js \ + www/script/test/attachment_conflicts.js \ www/script/test/attachment_names.js \ www/script/test/attachment_paths.js \ www/script/test/attachment_views.js \ diff --git a/share/www/script/couch_tests.js b/share/www/script/couch_tests.js index c5257ea6..896b3538 100644 --- a/share/www/script/couch_tests.js +++ b/share/www/script/couch_tests.js @@ -32,6 +32,7 @@ loadTest("basics.js"); loadTest("all_docs.js"); loadTest("attachments.js"); loadTest("attachments_multipart.js"); +loadTest("attachment_conflicts.js"); loadTest("attachment_names.js"); loadTest("attachment_paths.js"); loadTest("attachment_views.js"); diff --git a/share/www/script/test/attachment_conflicts.js b/share/www/script/test/attachment_conflicts.js new file mode 100644 index 00000000..c400277e --- /dev/null +++ b/share/www/script/test/attachment_conflicts.js @@ -0,0 +1,56 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +// Do some edit conflict detection tests for attachments. +couchTests.attachment_conflicts = function(debug) { + + var dbA = new CouchDB("test_suite_db_a", {"X-Couch-Full-Commit":"false"}); + var dbB = new CouchDB("test_suite_db_b", {"X-Couch-Full-Commit":"false"}); + dbA.deleteDb(); + dbA.createDb(); + dbB.deleteDb(); + dbB.createDb(); + + if (debug) debugger; + + T(dbA.save({"_id":"doc", "foo":"bar"}).ok); + + // create conflict + T(CouchDB.replicate("test_suite_db_a", "test_suite_db_b").ok); + + var doc = dbA.open("doc"); + var rev11 = doc._rev; + T(dbA.save({"_id":"doc", "foo":"bar2","_rev":rev11}).ok); + + doc = dbB.open("doc"); + var rev12 = doc._rev; + T(dbB.save({"_id":"doc", "foo":"bar3","_rev":rev12}).ok); + + T(CouchDB.replicate("test_suite_db_a", "test_suite_db_b").ok); + + // the attachment + var bin_data = "JHAPDO*AU£PN ){(3u[d 93DQ9¡€])} ææøo'∂ƒæ≤çæππ•¥∫¶®#†π¶®¥π€ª®˙π8np"; + + doc = dbB.open("doc"); + var rev13 = doc._rev; + + // test that we can can attach to conflicting documents + var xhr = CouchDB.request("PUT", "/test_suite_db_b/doc/attachment.txt", { + headers: { + "Content-Type": "text/plain;charset=utf-8", + "If-Match": rev13 + }, + body: bin_data + }); + T(xhr.status == 201); + +}; diff --git a/src/couchdb/couch_httpd_db.erl b/src/couchdb/couch_httpd_db.erl index 1767d9cc..217a2d03 100644 --- a/src/couchdb/couch_httpd_db.erl +++ b/src/couchdb/couch_httpd_db.erl @@ -1020,8 +1020,10 @@ db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileN end end, - #doc{atts=Atts} = Doc, + #doc{atts=Atts, revs = {Pos, Revs}} = Doc, DocEdited = Doc#doc{ + % prune revision list as a workaround for key tree bug (COUCHDB-902) + revs = {Pos, case Revs of [] -> []; [Hd|_] -> [Hd] end}, atts = NewAtt ++ [A || A <- Atts, A#att.name /= FileName] }, {ok, UpdatedRev} = couch_db:update_doc(Db, DocEdited, []), -- cgit v1.2.3 From c9c334db9fe384265df316459555fe2411fd8231 Mon Sep 17 00:00:00 2001 From: Paul Joseph Davis Date: Thu, 20 Jan 2011 00:50:18 +0000 Subject: Update CHANGES and NEWS before the 1.0.2 release. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1061083 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 7 +++++++ NEWS | 5 +++++ 2 files changed, 12 insertions(+) diff --git a/CHANGES b/CHANGES index bfabc018..fdfd31d2 100644 --- a/CHANGES +++ b/CHANGES @@ -8,6 +8,8 @@ Futon: * Make test suite work with Safari and Chrome. * Fixed animated progress spinner. + * Fix raw view document link due to overzealous URI encoding. + * Spell javascript correctly in loadScript(uri). Storage System: @@ -18,6 +20,9 @@ Storage System: * Fix occasional timeout errors on systems with slow or heavily loaded IO. * Fix for OOME when compactions include documents with many conflicts. * Fix for missing attachment compression when MIME types included parameters. + * Preserve purge metadata during compaction to avoid spurious view rebuilds. + * Fix spurious conflicts introduced when uploading an attachment after + a doc has been in a conflict. See COUCHDB-902 for details. * Fix for frequently edited documents in multi-master deployments being duplicated in _changes and _all_docs. See COUCHDDB-968 for details on how to repair. @@ -33,6 +38,8 @@ HTTP Interface: * Allow reduce=false parameter in map-only views. * Fix parsing of Accept headers. + * Fix for multipart GET APIs when an attachment was created during a + local-local replication. See COUCHDB-1022 for details. Replicator: diff --git a/NEWS b/NEWS index 1a70a0f1..9550856e 100644 --- a/NEWS +++ b/NEWS @@ -18,11 +18,16 @@ Version 1.0.2 * Allow logging of native types. * Updated ibrowse library to 2.1.2 fixing numerous replication issues. * Fix authenticated replication of design documents with attachments. + * Fix multipart GET APIs by always sending attachments in compressed + form when the source attachment is compressed on disk. Fixes a possible + edge case when an attachment underwent local-local replication. * Various fixes to make replicated more resilient for edge-cases. * Don't trigger a view update when requesting `_design/doc/_info`. * Fix for circular references in CommonJS requires. * Fix for frequently edited documents in multi-master deployments being duplicated in _changes and _all_docs. + * Fix spurious conflict generation during attachment uploads. + * Fix for various bugs in Futon. Version 1.0.1 ------------- -- cgit v1.2.3 From eb30d14d3465d400103367be6f5f9d89fa16e105 Mon Sep 17 00:00:00 2001 From: Paul Joseph Davis Date: Thu, 20 Jan 2011 01:15:55 +0000 Subject: Fix bug that allows invalid UTF-8 after valid escapes. Merges r991073 from trunk to branches/1.0.x Fixes COUCHDB-875 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.0.x@1061088 13f79535-47bb-0310-9956-ffa450edef68 --- src/mochiweb/mochijson2.erl | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/src/mochiweb/mochijson2.erl b/src/mochiweb/mochijson2.erl index 111c37bd..1ed2b88f 100644 --- a/src/mochiweb/mochijson2.erl +++ b/src/mochiweb/mochijson2.erl @@ -405,8 +405,22 @@ tokenize_string(B, S=#decoder{offset=O}, Acc) -> Acc1 = lists:reverse(xmerl_ucs:to_utf8(C), Acc), tokenize_string(B, ?ADV_COL(S, 6), Acc1) end; - <<_:O/binary, C, _/binary>> -> - tokenize_string(B, ?INC_CHAR(S, C), [C | Acc]) + <<_:O/binary, C1, _/binary>> when C1 < 128 -> + tokenize_string(B, ?INC_CHAR(S, C1), [C1 | Acc]); + <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223, + C2 >= 128, C2 =< 191 -> + tokenize_string(B, ?ADV_COL(S, 2), [C2, C1 | Acc]); + <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239, + C2 >= 128, C2 =< 191, + C3 >= 128, C3 =< 191 -> + tokenize_string(B, ?ADV_COL(S, 3), [C3, C2, C1 | Acc]); + <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244, + C2 >= 128, C2 =< 191, + C3 >= 128, C3 =< 191, + C4 >= 128, C4 =< 191 -> + tokenize_string(B, ?ADV_COL(S, 4), [C4, C3, C2, C1 | Acc]); + _ -> + throw(invalid_utf8) end. tokenize_number(B, S) -> @@ -653,7 +667,9 @@ test_input_validation() -> <>, <>, % we don't support code points > 10FFFF per RFC 3629 - <> + <>, + %% escape characters trigger a different code path + <> ], lists:foreach(fun(X) -> ok = try decode(X) catch invalid_utf8 -> ok end -- cgit v1.2.3