From 571edb05e32a2ae70ca7e1b1086ba937d2941a66 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Tue, 17 May 2011 19:13:42 +0000 Subject: port Filipe's fix and test for COUCHDB-885 to 1.1.x git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1104475 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/replication.js | 56 ++++++++++++++++++++++++++++++++++++ src/couchdb/couch_rep_writer.erl | 4 +-- 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/share/www/script/test/replication.js b/share/www/script/test/replication.js index 7f92891e..bde4ad11 100644 --- a/share/www/script/test/replication.js +++ b/share/www/script/test/replication.js @@ -785,6 +785,62 @@ couchTests.replication = function(debug) { TEquals('string', typeof repResult._local_id); + // COUCHDB-885 - push replication of a doc with attachment causes a + // conflict in the target. + dbA = new CouchDB("test_suite_db_a"); + dbB = new CouchDB("test_suite_db_b"); + + dbA.deleteDb(); + dbA.createDb(); + dbB.deleteDb(); + dbB.createDb(); + + var doc = { + _id: "doc1" + }; + TEquals(true, dbA.save(doc).ok); + + repResult = CouchDB.replicate( + dbA.name, + CouchDB.protocol + host + "/" + dbB.name + ); + TEquals(true, repResult.ok); + TEquals(true, repResult.history instanceof Array); + TEquals(1, repResult.history.length); + TEquals(1, repResult.history[0].docs_written); + TEquals(1, repResult.history[0].docs_read); + TEquals(0, repResult.history[0].doc_write_failures); + + doc["_attachments"] = { + "hello.txt": { + "content_type": "text/plain", + "data": "aGVsbG8gd29ybGQ=" // base64:encode("hello world") + }, + "foo.dat": { + "content_type": "not/compressible", + "data": "aSBhbSBub3QgZ3ppcGVk" // base64:encode("i am not gziped") + } + }; + + TEquals(true, dbA.save(doc).ok); + repResult = CouchDB.replicate( + dbA.name, + CouchDB.protocol + host + "/" + dbB.name + ); + TEquals(true, repResult.ok); + TEquals(true, repResult.history instanceof Array); + TEquals(2, repResult.history.length); + TEquals(1, repResult.history[0].docs_written); + TEquals(1, repResult.history[0].docs_read); + TEquals(0, repResult.history[0].doc_write_failures); + + var copy = dbB.open(doc._id, {conflicts: true, deleted_conflicts: true}); + T(copy !== null); + TEquals("undefined", typeof copy._conflicts); + TEquals("undefined", typeof copy._deleted_conflicts); + // end of test for COUCHDB-885 + + // cleanup dbA.deleteDb(); dbB.deleteDb(); diff --git a/src/couchdb/couch_rep_writer.erl b/src/couchdb/couch_rep_writer.erl index 12d6dec5..2b722e8e 100644 --- a/src/couchdb/couch_rep_writer.erl +++ b/src/couchdb/couch_rep_writer.erl @@ -64,7 +64,7 @@ write_bulk_docs(_Db, []) -> []; write_bulk_docs(#http_db{headers = Headers} = Db, Docs) -> JsonDocs = [ - couch_doc:to_json_obj(Doc, [revs, att_gzip_length]) || Doc <- Docs + couch_doc:to_json_obj(Doc, [revs]) || Doc <- Docs ], Request = Db#http_db{ resource = "_bulk_docs", @@ -84,7 +84,7 @@ write_multi_part_doc(#http_db{headers=Headers} = Db, #doc{atts=Atts} = Doc) -> JsonBytes = ?JSON_ENCODE( couch_doc:to_json_obj( Doc, - [follows, att_encoding_info, attachments] + [follows, att_encoding_info, attachments, revs] ) ), Boundary = couch_uuids:random(), -- cgit v1.2.3 From 589a5043e34c3bb98eb97b780fd015e0d1296152 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Tue, 17 May 2011 19:18:57 +0000 Subject: Added extra assertions to the test for COUCHDB-885 This is to verify the attachments really exist in the target and have the right data and metadata. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1104478 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/replication.js | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/share/www/script/test/replication.js b/share/www/script/test/replication.js index bde4ad11..ea6713e8 100644 --- a/share/www/script/test/replication.js +++ b/share/www/script/test/replication.js @@ -838,6 +838,12 @@ couchTests.replication = function(debug) { T(copy !== null); TEquals("undefined", typeof copy._conflicts); TEquals("undefined", typeof copy._deleted_conflicts); + TEquals("text/plain", copy._attachments["hello.txt"]["content_type"]); + TEquals("aGVsbG8gd29ybGQ=", copy._attachments["hello.txt"]["data"]); + TEquals("gzip", copy._attachments["hello.txt"]["encoding"]); + TEquals("not/compressible", copy._attachments["foo.dat"]["content_type"]); + TEquals("aSBhbSBub3QgZ3ppcGVk", copy._attachments["foo.dat"]["data"]); + TEquals("undefined", typeof copy._attachments["foo.dat"]["encoding"]); // end of test for COUCHDB-885 -- cgit v1.2.3 From 61c777b873004f795060b5f432cce02402bdf026 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Tue, 17 May 2011 19:24:56 +0000 Subject: Add missing doc open option to the test for COUCHDB-885 These were forgotten when backporting the attachment related assertions from 1.0.x. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1104481 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/replication.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/share/www/script/test/replication.js b/share/www/script/test/replication.js index ea6713e8..a08c0b66 100644 --- a/share/www/script/test/replication.js +++ b/share/www/script/test/replication.js @@ -834,7 +834,9 @@ couchTests.replication = function(debug) { TEquals(1, repResult.history[0].docs_read); TEquals(0, repResult.history[0].doc_write_failures); - var copy = dbB.open(doc._id, {conflicts: true, deleted_conflicts: true}); + var copy = dbB.open(doc._id, { + conflicts: true, deleted_conflicts: true, attachments: true, + att_encoding_info: true}); T(copy !== null); TEquals("undefined", typeof copy._conflicts); TEquals("undefined", typeof copy._deleted_conflicts); -- cgit v1.2.3 From d618f75fe229d6ca4ebe24822ba498baf80278dc Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Tue, 17 May 2011 20:31:32 +0000 Subject: backport oauth fix - COUCHDB-1144 git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1104530 13f79535-47bb-0310-9956-ffa450edef68 --- src/erlang-oauth/oauth_uri.erl | 66 ++++++++++++++++++++++++------------------ test/etap/190-oauth.t | 31 ++++++++++++++++++++ test/etap/Makefile.am | 1 + 3 files changed, 70 insertions(+), 28 deletions(-) create mode 100755 test/etap/190-oauth.t diff --git a/src/erlang-oauth/oauth_uri.erl b/src/erlang-oauth/oauth_uri.erl index 3bdc9076..5023f983 100644 --- a/src/erlang-oauth/oauth_uri.erl +++ b/src/erlang-oauth/oauth_uri.erl @@ -6,14 +6,6 @@ -import(lists, [concat/1]). --define(is_uppercase_alpha(C), C >= $A, C =< $Z). --define(is_lowercase_alpha(C), C >= $a, C =< $z). --define(is_alpha(C), ?is_uppercase_alpha(C); ?is_lowercase_alpha(C)). --define(is_digit(C), C >= $0, C =< $9). --define(is_alphanumeric(C), ?is_alpha(C); ?is_digit(C)). --define(is_unreserved(C), ?is_alphanumeric(C); C =:= $-; C =:= $_; C =:= $.; C =:= $~). --define(is_hex(C), ?is_digit(C); C >= $A, C =< $F). - normalize(URI) -> case http_uri:parse(URI) of @@ -66,23 +58,41 @@ intersperse(_, [X]) -> [X]; intersperse(Sep, [X|Xs]) -> [X, Sep|intersperse(Sep, Xs)]. -decode(Chars) -> - decode(Chars, []). - -decode([], Decoded) -> - lists:reverse(Decoded); -decode([$%,A,B|Etc], Decoded) when ?is_hex(A), ?is_hex(B) -> - decode(Etc, [erlang:list_to_integer([A,B], 16)|Decoded]); -decode([C|Etc], Decoded) when ?is_unreserved(C) -> - decode(Etc, [C|Decoded]). - -encode(Chars) -> - encode(Chars, []). - -encode([], Encoded) -> - lists:flatten(lists:reverse(Encoded)); -encode([C|Etc], Encoded) when ?is_unreserved(C) -> - encode(Etc, [C|Encoded]); -encode([C|Etc], Encoded) -> - Value = io_lib:format("%~2.2.0s", [erlang:integer_to_list(C, 16)]), - encode(Etc, [Value|Encoded]). +-define(is_alphanum(C), C >= $A, C =< $Z; C >= $a, C =< $z; C >= $0, C =< $9). + +encode(Term) when is_integer(Term) -> + integer_to_list(Term); +encode(Term) when is_atom(Term) -> + encode(atom_to_list(Term)); +encode(Term) when is_list(Term) -> + encode(lists:reverse(Term, []), []). + +encode([X | T], Acc) when ?is_alphanum(X); X =:= $-; X =:= $_; X =:= $.; X =:= $~ -> + encode(T, [X | Acc]); +encode([X | T], Acc) -> + NewAcc = [$%, dec2hex(X bsr 4), dec2hex(X band 16#0f) | Acc], + encode(T, NewAcc); +encode([], Acc) -> + Acc. + +decode(Str) when is_list(Str) -> + decode(Str, []). + +decode([$%, A, B | T], Acc) -> + decode(T, [(hex2dec(A) bsl 4) + hex2dec(B) | Acc]); +decode([X | T], Acc) -> + decode(T, [X | Acc]); +decode([], Acc) -> + lists:reverse(Acc, []). + +-compile({inline, [{dec2hex, 1}, {hex2dec, 1}]}). + +dec2hex(N) when N >= 10 andalso N =< 15 -> + N + $A - 10; +dec2hex(N) when N >= 0 andalso N =< 9 -> + N + $0. + +hex2dec(C) when C >= $A andalso C =< $F -> + C - $A + 10; +hex2dec(C) when C >= $0 andalso C =< $9 -> + C - $0. diff --git a/test/etap/190-oauth.t b/test/etap/190-oauth.t new file mode 100755 index 00000000..09922049 --- /dev/null +++ b/test/etap/190-oauth.t @@ -0,0 +1,31 @@ +#!/usr/bin/env escript +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +main(_) -> + test_util:init_code_path(), + etap:plan(1), + case (catch test()) of + ok -> + etap:end_tests(); + Other -> + etap:diag(io_lib:format("Test died abnormally: ~p", [Other])), + etap:bail(Other) + end, + ok. + +test() -> + etap:is( + oauth_uri:params_from_string("realm=http://localhost:5984"), + [{"realm","http://localhost:5984"}], + "decode should handle non-percent encoded input."), + ok. diff --git a/test/etap/Makefile.am b/test/etap/Makefile.am index 9ba3fcfa..1b14b9e1 100644 --- a/test/etap/Makefile.am +++ b/test/etap/Makefile.am @@ -85,4 +85,5 @@ EXTRA_DIST = \ 173-os-daemon-cfg-register.t \ 180-http-proxy.ini \ 180-http-proxy.t \ + 190-oauth.t \ 200-view-group-no-db-leaks.t -- cgit v1.2.3 From aea9ddf21eedc566afa01892778081425fc131ac Mon Sep 17 00:00:00 2001 From: Paul Joseph Davis Date: Tue, 17 May 2011 23:14:25 +0000 Subject: Backported os daemon tests from trunk. This reduces the chance that these tests fail due to timing differences by avoding booting of an Erlang VM as an os daemon. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1104622 13f79535-47bb-0310-9956-ffa450edef68 --- test/etap/172-os-daemon-errors.1.es | 22 ---------------------- test/etap/172-os-daemon-errors.1.sh | 17 +++++++++++++++++ test/etap/172-os-daemon-errors.2.es | 16 ---------------- test/etap/172-os-daemon-errors.2.sh | 15 +++++++++++++++ test/etap/172-os-daemon-errors.3.es | 17 ----------------- test/etap/172-os-daemon-errors.3.sh | 15 +++++++++++++++ test/etap/172-os-daemon-errors.4.es | 17 ----------------- test/etap/172-os-daemon-errors.4.sh | 15 +++++++++++++++ test/etap/172-os-daemon-errors.t | 8 ++++---- test/etap/Makefile.am | 8 ++++---- 10 files changed, 70 insertions(+), 80 deletions(-) delete mode 100644 test/etap/172-os-daemon-errors.1.es create mode 100644 test/etap/172-os-daemon-errors.1.sh delete mode 100755 test/etap/172-os-daemon-errors.2.es create mode 100755 test/etap/172-os-daemon-errors.2.sh delete mode 100755 test/etap/172-os-daemon-errors.3.es create mode 100755 test/etap/172-os-daemon-errors.3.sh delete mode 100755 test/etap/172-os-daemon-errors.4.es create mode 100755 test/etap/172-os-daemon-errors.4.sh diff --git a/test/etap/172-os-daemon-errors.1.es b/test/etap/172-os-daemon-errors.1.es deleted file mode 100644 index a9defba1..00000000 --- a/test/etap/172-os-daemon-errors.1.es +++ /dev/null @@ -1,22 +0,0 @@ -#! /usr/bin/env escript - -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - -% Please do not make this file executable as that's the error being tested. - -loop() -> - timer:sleep(5000), - loop(). - -main([]) -> - loop(). diff --git a/test/etap/172-os-daemon-errors.1.sh b/test/etap/172-os-daemon-errors.1.sh new file mode 100644 index 00000000..345c8b40 --- /dev/null +++ b/test/etap/172-os-daemon-errors.1.sh @@ -0,0 +1,17 @@ +#!/bin/sh -e +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# Please do not make this file executable as that's the error being tested. + +sleep 5 diff --git a/test/etap/172-os-daemon-errors.2.es b/test/etap/172-os-daemon-errors.2.es deleted file mode 100755 index 52de0401..00000000 --- a/test/etap/172-os-daemon-errors.2.es +++ /dev/null @@ -1,16 +0,0 @@ -#! /usr/bin/env escript - -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - -main([]) -> - init:stop(). diff --git a/test/etap/172-os-daemon-errors.2.sh b/test/etap/172-os-daemon-errors.2.sh new file mode 100755 index 00000000..256ee793 --- /dev/null +++ b/test/etap/172-os-daemon-errors.2.sh @@ -0,0 +1,15 @@ +#!/bin/sh -e +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. + +exit 1 diff --git a/test/etap/172-os-daemon-errors.3.es b/test/etap/172-os-daemon-errors.3.es deleted file mode 100755 index 64229800..00000000 --- a/test/etap/172-os-daemon-errors.3.es +++ /dev/null @@ -1,17 +0,0 @@ -#! /usr/bin/env escript - -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - -main([]) -> - timer:sleep(1000), - init:stop(). diff --git a/test/etap/172-os-daemon-errors.3.sh b/test/etap/172-os-daemon-errors.3.sh new file mode 100755 index 00000000..f5a13684 --- /dev/null +++ b/test/etap/172-os-daemon-errors.3.sh @@ -0,0 +1,15 @@ +#!/bin/sh -e +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. + +sleep 1 diff --git a/test/etap/172-os-daemon-errors.4.es b/test/etap/172-os-daemon-errors.4.es deleted file mode 100755 index 577f3410..00000000 --- a/test/etap/172-os-daemon-errors.4.es +++ /dev/null @@ -1,17 +0,0 @@ -#! /usr/bin/env escript - -% Licensed under the Apache License, Version 2.0 (the "License"); you may not -% use this file except in compliance with the License. You may obtain a copy of -% the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, software -% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -% License for the specific language governing permissions and limitations under -% the License. - -main([]) -> - timer:sleep(2000), - init:stop(). diff --git a/test/etap/172-os-daemon-errors.4.sh b/test/etap/172-os-daemon-errors.4.sh new file mode 100755 index 00000000..5bc10e83 --- /dev/null +++ b/test/etap/172-os-daemon-errors.4.sh @@ -0,0 +1,15 @@ +#!/bin/sh -e +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. + +sleep 2 diff --git a/test/etap/172-os-daemon-errors.t b/test/etap/172-os-daemon-errors.t index 287a0812..bde5c6ff 100755 --- a/test/etap/172-os-daemon-errors.t +++ b/test/etap/172-os-daemon-errors.t @@ -30,16 +30,16 @@ config_files() -> ]). bad_perms() -> - test_util:source_file("test/etap/172-os-daemon-errors.1.es"). + test_util:source_file("test/etap/172-os-daemon-errors.1.sh"). die_on_boot() -> - test_util:source_file("test/etap/172-os-daemon-errors.2.es"). + test_util:source_file("test/etap/172-os-daemon-errors.2.sh"). die_quickly() -> - test_util:source_file("test/etap/172-os-daemon-errors.3.es"). + test_util:source_file("test/etap/172-os-daemon-errors.3.sh"). can_reboot() -> - test_util:source_file("test/etap/172-os-daemon-errors.4.es"). + test_util:source_file("test/etap/172-os-daemon-errors.4.sh"). main(_) -> test_util:init_code_path(), diff --git a/test/etap/Makefile.am b/test/etap/Makefile.am index 1b14b9e1..ce52d430 100644 --- a/test/etap/Makefile.am +++ b/test/etap/Makefile.am @@ -77,10 +77,10 @@ EXTRA_DIST = \ 170-os-daemons.t \ 171-os-daemons-config.es \ 171-os-daemons-config.t \ - 172-os-daemon-errors.1.es \ - 172-os-daemon-errors.2.es \ - 172-os-daemon-errors.3.es \ - 172-os-daemon-errors.4.es \ + 172-os-daemon-errors.1.sh \ + 172-os-daemon-errors.2.sh \ + 172-os-daemon-errors.3.sh \ + 172-os-daemon-errors.4.sh \ 172-os-daemon-errors.t \ 173-os-daemon-cfg-register.t \ 180-http-proxy.ini \ -- cgit v1.2.3 From a9ce63984c13d126f633a6ab8a243910feac6bc8 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Wed, 18 May 2011 10:54:39 +0000 Subject: Avoid assertion failure in replication.js due to timing issues git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1124185 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/replication.js | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/share/www/script/test/replication.js b/share/www/script/test/replication.js index a08c0b66..5e85847e 100644 --- a/share/www/script/test/replication.js +++ b/share/www/script/test/replication.js @@ -12,6 +12,20 @@ couchTests.replication = function(debug) { if (debug) debugger; + + function waitForSeq(sourceDb, targetDb) { + var targetSeq, + sourceSeq = sourceDb.info().update_seq, + t0 = new Date(), + t1, + ms = 3000; + + do { + targetSeq = targetDb.info().update_seq; + t1 = new Date(); + } while (((t1 - t0) <= ms) && targetSeq < sourceSeq); + } + var host = CouchDB.host; var dbPairs = [ {source:"test_suite_db_a", @@ -768,6 +782,7 @@ couchTests.replication = function(debug) { var tasksAfter = JSON.parse(xhr.responseText); TEquals(tasks.length, tasksAfter.length); + waitForSeq(dbA, dbB); T(dbB.open("30") !== null); repResult = CouchDB.replicate( -- cgit v1.2.3 From 4361a832c18be3f9d96e7943f6e1bbce92a94cad Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Fri, 20 May 2011 10:56:30 +0000 Subject: Replication manager: don't update doc if new state == current state This is to avoid unncessary updates. This is a backport of revision 1125317 (trunk). git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1125320 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_rep.erl | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index 5c9fbce6..e4b4264e 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -883,13 +883,18 @@ update_rep_doc({Props} = _RepDoc, KVs) -> update_rep_doc(RepDb, #doc{body = {RepDocBody}} = RepDoc, KVs) -> NewRepDocBody = lists:foldl( - fun({<<"_replication_state">> = K, _V} = KV, Body) -> - Body1 = lists:keystore(K, 1, Body, KV), - {Mega, Secs, _} = erlang:now(), - UnixTime = Mega * 1000000 + Secs, - lists:keystore( - <<"_replication_state_time">>, 1, - Body1, {<<"_replication_state_time">>, UnixTime}); + fun({<<"_replication_state">> = K, State} = KV, Body) -> + case couch_util:get_value(K, Body) of + State -> + Body; + _ -> + Body1 = lists:keystore(K, 1, Body, KV), + {Mega, Secs, _} = erlang:now(), + UnixTime = Mega * 1000000 + Secs, + lists:keystore( + <<"_replication_state_time">>, 1, + Body1, {<<"_replication_state_time">>, UnixTime}) + end; ({K, _V} = KV, Body) -> lists:keystore(K, 1, Body, KV) end, -- cgit v1.2.3 From 5b8d4522255662ce2b8637680ed1a3db24c2bcef Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Fri, 20 May 2011 10:57:28 +0000 Subject: Replication manager: allow edition of replication documents Replication documents that are not in the triggered state can now be edited by users. This is to make it simpler for Futon users to restart replications - they can just edit the document, remove its state field, and save it, instead of recreating it or adding a similar document but with a different _id. This is a backport of revision 1125319 (trunk). git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1125321 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/replicator_db.js | 3 ++- src/couchdb/couch_js_functions.hrl | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/share/www/script/test/replicator_db.js b/share/www/script/test/replicator_db.js index 2810352c..48e5d5c6 100644 --- a/share/www/script/test/replicator_db.js +++ b/share/www/script/test/replicator_db.js @@ -676,7 +676,8 @@ couchTests.replicator_db = function(debug) { var repDoc = { _id: "foo_rep_doc", source: dbA.name, - target: dbB.name + target: dbB.name, + continuous: true }; T(CouchDB.login("fdmanana", "qwerty").ok); diff --git a/src/couchdb/couch_js_functions.hrl b/src/couchdb/couch_js_functions.hrl index 0cc49d62..1e3ed4e9 100644 --- a/src/couchdb/couch_js_functions.hrl +++ b/src/couchdb/couch_js_functions.hrl @@ -140,8 +140,10 @@ var isReplicator = (userCtx.roles.indexOf('_replicator') >= 0); var isAdmin = (userCtx.roles.indexOf('_admin') >= 0); - if (oldDoc && !newDoc._deleted && !isReplicator) { - reportError('Only the replicator can edit replication documents.'); + if (oldDoc && !newDoc._deleted && !isReplicator && + (oldDoc._replication_state === 'triggered')) { + reportError('Only the replicator can edit replication documents ' + + 'that are in the triggered state.'); } if (!newDoc._deleted) { -- cgit v1.2.3 From 86c8ffc732052ba5eb942330ad431d1a42297034 Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sat, 21 May 2011 12:29:38 +0000 Subject: Merged revision 1125680 from trunk Use RFC3339 timestamps in replication documents As recently proposed by Max Odgen, RFC3339 timestamps are now used instead of Unix timestamps. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1125682 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/replicator_db.js | 20 ++++++++++---------- src/couchdb/couch_rep.erl | 24 +++++++++++++++++++++--- 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/share/www/script/test/replicator_db.js b/share/www/script/test/replicator_db.js index 48e5d5c6..c28e067d 100644 --- a/share/www/script/test/replicator_db.js +++ b/share/www/script/test/replicator_db.js @@ -121,7 +121,7 @@ couchTests.replicator_db = function(debug) { T(repDoc1.source === repDoc.source); T(repDoc1.target === repDoc.target); T(repDoc1._replication_state === "completed", "simple"); - T(typeof repDoc1._replication_state_time === "number"); + T(typeof repDoc1._replication_state_time === "string"); T(typeof repDoc1._replication_id === "string"); } @@ -173,7 +173,7 @@ couchTests.replicator_db = function(debug) { T(repDoc1.source === repDoc.source); T(repDoc1.target === repDoc.target); T(repDoc1._replication_state === "completed", "filtered"); - T(typeof repDoc1._replication_state_time === "number"); + T(typeof repDoc1._replication_state_time === "string"); T(typeof repDoc1._replication_id === "string"); } @@ -217,7 +217,7 @@ couchTests.replicator_db = function(debug) { T(repDoc1.source === repDoc.source); T(repDoc1.target === repDoc.target); T(repDoc1._replication_state === "triggered"); - T(typeof repDoc1._replication_state_time === "number"); + T(typeof repDoc1._replication_state_time === "string"); T(typeof repDoc1._replication_id === "string"); // add a design doc to source, it will be replicated to target @@ -332,7 +332,7 @@ couchTests.replicator_db = function(debug) { T(repDoc1_copy.source === repDoc1.source); T(repDoc1_copy.target === repDoc1.target); T(repDoc1_copy._replication_state === "completed"); - T(typeof repDoc1_copy._replication_state_time === "number"); + T(typeof repDoc1_copy._replication_state_time === "string"); T(typeof repDoc1_copy._replication_id === "string"); var newDoc = { @@ -363,7 +363,7 @@ couchTests.replicator_db = function(debug) { T(repDoc2_copy.source === repDoc1.source); T(repDoc2_copy.target === repDoc1.target); T(repDoc2_copy._replication_state === "completed"); - T(typeof repDoc2_copy._replication_state_time === "number"); + T(typeof repDoc2_copy._replication_state_time === "string"); T(typeof repDoc2_copy._replication_id === "string"); T(repDoc2_copy._replication_id === repDoc1_copy._replication_id); } @@ -400,7 +400,7 @@ couchTests.replicator_db = function(debug) { repDoc1 = repDb.open("foo_dup_rep_doc_1"); T(repDoc1 !== null); T(repDoc1._replication_state === "completed", "identical"); - T(typeof repDoc1._replication_state_time === "number"); + T(typeof repDoc1._replication_state_time === "string"); T(typeof repDoc1._replication_id === "string"); repDoc2 = repDb.open("foo_dup_rep_doc_2"); @@ -444,7 +444,7 @@ couchTests.replicator_db = function(debug) { repDoc1 = repDb.open("foo_dup_cont_rep_doc_1"); T(repDoc1 !== null); T(repDoc1._replication_state === "triggered"); - T(typeof repDoc1._replication_state_time === "number"); + T(typeof repDoc1._replication_state_time === "string"); T(typeof repDoc1._replication_id === "string"); repDoc2 = repDb.open("foo_dup_cont_rep_doc_2"); @@ -470,7 +470,7 @@ couchTests.replicator_db = function(debug) { repDoc1 = repDb.open("foo_dup_cont_rep_doc_1"); T(repDoc1 !== null); T(repDoc1._replication_state === "triggered"); - T(typeof repDoc1._replication_state_time === "number"); + T(typeof repDoc1._replication_state_time === "string"); var newDoc2 = { _id: "foo5000", @@ -739,7 +739,7 @@ couchTests.replicator_db = function(debug) { T(repDoc1.target === repDoc.target); T(repDoc1._replication_state === "completed", "replication document with bad replication id failed"); - T(typeof repDoc1._replication_state_time === "number"); + T(typeof repDoc1._replication_state_time === "string"); T(typeof repDoc1._replication_id === "string"); T(repDoc1._replication_id !== "1234abc"); } @@ -930,7 +930,7 @@ couchTests.replicator_db = function(debug) { var repDoc1 = repDb.open(repDoc._id); T(repDoc1 !== null); T(repDoc1._replication_state === "error"); - T(typeof repDoc1._replication_state_time === "number"); + T(typeof repDoc1._replication_state_time === "string"); T(typeof repDoc1._replication_id === "string"); } diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index e4b4264e..49a82e5d 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -889,11 +889,9 @@ update_rep_doc(RepDb, #doc{body = {RepDocBody}} = RepDoc, KVs) -> Body; _ -> Body1 = lists:keystore(K, 1, Body, KV), - {Mega, Secs, _} = erlang:now(), - UnixTime = Mega * 1000000 + Secs, lists:keystore( <<"_replication_state_time">>, 1, - Body1, {<<"_replication_state_time">>, UnixTime}) + Body1, {<<"_replication_state_time">>, timestamp()}) end; ({K, _V} = KV, Body) -> lists:keystore(K, 1, Body, KV) @@ -909,6 +907,26 @@ update_rep_doc(RepDb, #doc{body = {RepDocBody}} = RepDoc, KVs) -> [] ). +% RFC3339 timestamps. +% Note: doesn't include the time seconds fraction (RFC3339 says it's optional). +timestamp() -> + {{Year, Month, Day}, {Hour, Min, Sec}} = calendar:now_to_local_time(now()), + UTime = erlang:universaltime(), + LocalTime = calendar:universal_time_to_local_time(UTime), + DiffSecs = calendar:datetime_to_gregorian_seconds(LocalTime) - + calendar:datetime_to_gregorian_seconds(UTime), + zone(DiffSecs div 3600, (DiffSecs rem 3600) div 60), + iolist_to_binary( + io_lib:format("~4..0w-~2..0w-~2..0wT~2..0w:~2..0w:~2..0w~s", + [Year, Month, Day, Hour, Min, Sec, + zone(DiffSecs div 3600, (DiffSecs rem 3600) div 60)])). + +zone(Hr, Min) when Hr >= 0, Min >= 0 -> + io_lib:format("+~2..0w:~2..0w", [Hr, Min]); +zone(Hr, Min) -> + io_lib:format("-~2..0w:~2..0w", [abs(Hr), abs(Min)]). + + maybe_set_triggered({RepProps} = RepDoc, RepId) -> case couch_util:get_value(<<"_replication_state">>, RepProps) of <<"triggered">> -> -- cgit v1.2.3 From 6f2e87fb5a36bece84a9df3d0cd5c7b87ec60bce Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Sat, 21 May 2011 22:01:38 +0000 Subject: Merged revision 1125828 from trunk Add missing option to multipart/related GETs This is necessary as the client needs to be able to know if attachments are encoded or not. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1125830 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/attachments_multipart.js | 7 +++++-- src/couchdb/couch_httpd_db.erl | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/share/www/script/test/attachments_multipart.js b/share/www/script/test/attachments_multipart.js index 7f587357..13a5abf4 100644 --- a/share/www/script/test/attachments_multipart.js +++ b/share/www/script/test/attachments_multipart.js @@ -39,7 +39,7 @@ couchTests.attachments_multipart= function(debug) { }, "baz.txt": { "follows":true, - "content_type":"application/test", + "content_type":"text/plain", "length":19 } } @@ -78,12 +78,15 @@ couchTests.attachments_multipart= function(debug) { // now edit an attachment - var doc = db.open("multipart"); + var doc = db.open("multipart", {att_encoding_info: true}); var firstrev = doc._rev; T(doc._attachments["foo.txt"].stub == true); T(doc._attachments["bar.txt"].stub == true); T(doc._attachments["baz.txt"].stub == true); + TEquals("undefined", typeof doc._attachments["foo.txt"].encoding); + TEquals("undefined", typeof doc._attachments["bar.txt"].encoding); + TEquals("gzip", doc._attachments["baz.txt"].encoding); //lets change attachment bar delete doc._attachments["bar.txt"].stub; // remove stub member (or could set to false) diff --git a/src/couchdb/couch_httpd_db.erl b/src/couchdb/couch_httpd_db.erl index 0dbebb6e..8336cac0 100644 --- a/src/couchdb/couch_httpd_db.erl +++ b/src/couchdb/couch_httpd_db.erl @@ -762,7 +762,7 @@ send_doc_efficiently(#httpd{mochi_req = MochiReq} = Req, true -> Boundary = couch_uuids:random(), JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, - [attachments, follows|Options])), + [attachments, follows, att_encoding_info | Options])), {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream( Boundary,JsonBytes, Atts, true), CType = {<<"Content-Type">>, ContentType}, -- cgit v1.2.3 From ee6bb16b53069e6b213d1314f78cde2bd648b069 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Mon, 23 May 2011 09:51:04 +0000 Subject: COUCHDB-1156 - send Accept header so Futon doesn't display HTML where it expects JSON (Thanks Dale Harvey for patch) git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1126411 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/jquery.couch.js | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/share/www/script/jquery.couch.js b/share/www/script/jquery.couch.js index edae18fc..9bc1363b 100644 --- a/share/www/script/jquery.couch.js +++ b/share/www/script/jquery.couch.js @@ -624,8 +624,14 @@ }; function ajax(obj, options, errorMessage, ajaxOptions) { + + var defaultAjaxOpts = { + contentType: "application/json", + headers:{"Accept": "application/json"} + }; + options = $.extend({successStatus: 200}, options); - ajaxOptions = $.extend({contentType: "application/json"}, ajaxOptions); + ajaxOptions = $.extend(defaultAjaxOpts, ajaxOptions); errorMessage = errorMessage || "Unknown error"; $.ajax($.extend($.extend({ type: "GET", dataType: "json", cache : !$.browser.msie, -- cgit v1.2.3 From 1a4f2d8c5ef87933192125b3feb98eaaa33a7bbc Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Mon, 23 May 2011 10:56:08 +0000 Subject: Merged revision 1126426 from trunk Fix timing issues in the doc PUT multipart/related API Two issues were present: 1) the handler replied to the request before the multipart parser consumed all the request's data, causing a subsequent request in the same connection to consume the remaining data from the multipart/related request; 2) the data function passed to the multipart parser could consume, and discard, all or part of the data from a subsequent request in the same connection. This closes COUCHDB-1174. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1126428 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_doc.erl | 17 +++++++++++------ src/couchdb/couch_httpd_db.erl | 17 +++++++++++++---- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/src/couchdb/couch_doc.erl b/src/couchdb/couch_doc.erl index e3d66145..e2690f3e 100644 --- a/src/couchdb/couch_doc.erl +++ b/src/couchdb/couch_doc.erl @@ -461,16 +461,17 @@ atts_to_mp([Att | RestAtts], Boundary, WriteFun, doc_from_multi_part_stream(ContentType, DataFun) -> - Self = self(), + Parent = self(), Parser = spawn_link(fun() -> - couch_httpd:parse_multipart_request(ContentType, DataFun, - fun(Next)-> mp_parse_doc(Next, []) end), - unlink(Self) + {<<"--">>, _, _} = couch_httpd:parse_multipart_request( + ContentType, DataFun, + fun(Next) -> mp_parse_doc(Next, []) end), + unlink(Parent), + Parent ! {self(), finished} end), Parser ! {get_doc_bytes, self()}, receive {doc_bytes, DocBytes} -> - erlang:put(mochiweb_request_recv, true), Doc = from_json_obj(?JSON_DECODE(DocBytes)), % go through the attachments looking for 'follows' in the data, % replace with function that reads the data from MIME stream. @@ -484,7 +485,11 @@ doc_from_multi_part_stream(ContentType, DataFun) -> (A) -> A end, Doc#doc.atts), - {ok, Doc#doc{atts=Atts2}} + WaitFun = fun() -> + receive {Parser, finished} -> ok end, + erlang:put(mochiweb_request_recv, true) + end, + {ok, Doc#doc{atts=Atts2}, WaitFun} end. mp_parse_doc({headers, H}, []) -> diff --git a/src/couchdb/couch_httpd_db.erl b/src/couchdb/couch_httpd_db.erl index 8336cac0..468ae3f0 100644 --- a/src/couchdb/couch_httpd_db.erl +++ b/src/couchdb/couch_httpd_db.erl @@ -687,10 +687,12 @@ db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) -> RespHeaders = [{"Location", Loc}], case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of ("multipart/related;" ++ _) = ContentType -> - {ok, Doc0} = couch_doc:doc_from_multi_part_stream(ContentType, - fun() -> receive_request_data(Req) end), + {ok, Doc0, WaitFun} = couch_doc:doc_from_multi_part_stream( + ContentType, fun() -> receive_request_data(Req) end), Doc = couch_doc_from_req(Req, DocId, Doc0), - update_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType); + Result = update_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType), + WaitFun(), + Result; _Else -> case couch_httpd:qs_value(Req, "batch") of "ok" -> @@ -825,7 +827,14 @@ send_ranges_multipart(Req, ContentType, Len, Att, Ranges) -> {ok, Resp}. receive_request_data(Req) -> - {couch_httpd:recv(Req, 0), fun() -> receive_request_data(Req) end}. + receive_request_data(Req, couch_httpd:body_length(Req)). + +receive_request_data(Req, LenLeft) when LenLeft > 0 -> + Len = erlang:min(4096, LenLeft), + Data = couch_httpd:recv(Req, Len), + {Data, fun() -> receive_request_data(Req, LenLeft - iolist_size(Data)) end}; +receive_request_data(_Req, _) -> + throw(<<"expected more data">>). make_content_range(From, To, Len) -> ?l2b(io_lib:format("bytes ~B-~B/~B", [From, To, Len])). -- cgit v1.2.3 From d952ac01cb4cd4ae5ceb0c8cc079acf595ff9747 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Mon, 23 May 2011 21:26:00 +0000 Subject: Fix authentication. Jquery append "*.*" to accept by default so if we test text/html first it will alway be true. Then test first if application/json was given and then test if text/html then others. Backported from trunk (related to COUCHDB-1175). git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1126759 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_httpd.erl | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/src/couchdb/couch_httpd.erl b/src/couchdb/couch_httpd.erl index 73d214e8..7c7781f6 100644 --- a/src/couchdb/couch_httpd.erl +++ b/src/couchdb/couch_httpd.erl @@ -764,24 +764,29 @@ error_headers(#httpd{mochi_req=MochiReq}=Req, Code, ErrorStr, ReasonStr) -> % send the browser popup header no matter what if we are require_valid_user {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]}; _False -> - case MochiReq:accepts_content_type("text/html") of - false -> - {Code, []}; + case MochiReq:accepts_content_type("application/json") of true -> - % Redirect to the path the user requested, not - % the one that is used internally. - UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of - undefined -> - MochiReq:get(path); - VHostPath -> - VHostPath - end, - RedirectLocation = lists:flatten([ - AuthRedirect, - "?return=", couch_util:url_encode(UrlReturnRaw), - "&reason=", couch_util:url_encode(ReasonStr) - ]), - {302, [{"Location", absolute_uri(Req, RedirectLocation)}]} + {Code, []}; + false -> + case MochiReq:accepts_content_type("text/html") of + true -> + % Redirect to the path the user requested, not + % the one that is used internally. + UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of + undefined -> + MochiReq:get(path); + VHostPath -> + VHostPath + end, + RedirectLocation = lists:flatten([ + AuthRedirect, + "?return=", couch_util:url_encode(UrlReturnRaw), + "&reason=", couch_util:url_encode(ReasonStr) + ]), + {302, [{"Location", absolute_uri(Req, RedirectLocation)}]}; + false -> + {Code, []} + end end end end; -- cgit v1.2.3 From 172a751ce84a46d2f121a1c57f6d5554447c7bee Mon Sep 17 00:00:00 2001 From: Filipe David Borba Manana Date: Wed, 25 May 2011 19:01:03 +0000 Subject: Backported revision 1127632 from trunk Force non admins to supply a user_ctx in replication documents This is to prevent users deleting replication documents added by other users and to make it clear who triggers which replications. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1127634 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/replicator_db.js | 238 +++++++++++++++++++++++++++++- src/couchdb/couch_js_functions.hrl | 30 ++-- src/couchdb/couch_rep.erl | 15 +- src/couchdb/couch_replication_manager.erl | 6 +- 4 files changed, 264 insertions(+), 25 deletions(-) diff --git a/share/www/script/test/replicator_db.js b/share/www/script/test/replicator_db.js index c28e067d..4434124e 100644 --- a/share/www/script/test/replicator_db.js +++ b/share/www/script/test/replicator_db.js @@ -186,7 +186,10 @@ couchTests.replicator_db = function(debug) { _id: "foo_cont_rep_doc", source: "http://" + host + "/" + dbA.name, target: dbB.name, - continuous: true + continuous: true, + user_ctx: { + roles: ["_admin"] + } }; T(repDb.save(repDoc).ok); @@ -220,10 +223,8 @@ couchTests.replicator_db = function(debug) { T(typeof repDoc1._replication_state_time === "string"); T(typeof repDoc1._replication_id === "string"); - // add a design doc to source, it will be replicated to target - // when the "user_ctx" property is not defined in the replication doc, - // the replication will be done under an _admin context, therefore - // design docs will be replicated + // Design documents are only replicated to local targets if the respective + // replication document has a user_ctx filed with the "_admin" role in it. var ddoc = { _id: "_design/foobar", language: "javascript" @@ -303,8 +304,7 @@ couchTests.replicator_db = function(debug) { T(copy === null); copy = dbB.open("_design/mydesign"); - T(copy !== null); - T(copy.language === "javascript"); + T(copy === null); } @@ -713,6 +713,225 @@ couchTests.replicator_db = function(debug) { } + function test_user_ctx_validation() { + populate_db(dbA, docs1); + populate_db(dbB, []); + populate_db(usersDb, []); + + var joeUserDoc = CouchDB.prepareUserDoc({ + name: "joe", + roles: ["erlanger", "bar"] + }, "erly"); + var fdmananaUserDoc = CouchDB.prepareUserDoc({ + name: "fdmanana", + roles: ["a", "b", "c"] + }, "qwerty"); + + TEquals(true, usersDb.save(joeUserDoc).ok); + TEquals(true, usersDb.save(fdmananaUserDoc).ok); + + T(dbB.setSecObj({ + admins: { + names: [], + roles: ["god"] + }, + readers: { + names: [], + roles: ["foo"] + } + }).ok); + + TEquals(true, CouchDB.login("joe", "erly").ok); + TEquals("joe", CouchDB.session().userCtx.name); + TEquals(-1, CouchDB.session().userCtx.roles.indexOf("_admin")); + + var repDoc = { + _id: "foo_rep", + source: CouchDB.protocol + host + "/" + dbA.name, + target: dbB.name + }; + + try { + repDb.save(repDoc); + T(false, "Should have failed, user_ctx missing."); + } catch (x) { + TEquals("forbidden", x.error); + } + + repDoc.user_ctx = { + name: "john", + roles: ["erlanger"] + }; + + try { + repDb.save(repDoc); + T(false, "Should have failed, wrong user_ctx.name."); + } catch (x) { + TEquals("forbidden", x.error); + } + + repDoc.user_ctx = { + name: "joe", + roles: ["bar", "god", "erlanger"] + }; + + try { + repDb.save(repDoc); + T(false, "Should have failed, a bad role in user_ctx.roles."); + } catch (x) { + TEquals("forbidden", x.error); + } + + // user_ctx.roles might contain only a subset of the user's roles + repDoc.user_ctx = { + name: "joe", + roles: ["erlanger"] + }; + + TEquals(true, repDb.save(repDoc).ok); + CouchDB.logout(); + + waitForRep(repDb, repDoc, "error"); + var repDoc1 = repDb.open(repDoc._id); + T(repDoc1 !== null); + TEquals(repDoc.source, repDoc1.source); + TEquals(repDoc.target, repDoc1.target); + TEquals("error", repDoc1._replication_state); + TEquals("string", typeof repDoc1._replication_id); + TEquals("string", typeof repDoc1._replication_state_time); + + TEquals(true, CouchDB.login("fdmanana", "qwerty").ok); + TEquals("fdmanana", CouchDB.session().userCtx.name); + TEquals(-1, CouchDB.session().userCtx.roles.indexOf("_admin")); + + try { + T(repDb.deleteDoc(repDoc1).ok); + T(false, "Shouldn't be able to delete replication document."); + } catch (x) { + TEquals("forbidden", x.error); + } + + CouchDB.logout(); + TEquals(true, CouchDB.login("joe", "erly").ok); + TEquals("joe", CouchDB.session().userCtx.name); + TEquals(-1, CouchDB.session().userCtx.roles.indexOf("_admin")); + + T(repDb.deleteDoc(repDoc1).ok); + CouchDB.logout(); + + for (var i = 0; i < docs1.length; i++) { + var doc = docs1[i]; + var copy = dbB.open(doc._id); + + TEquals(null, copy); + } + + T(dbB.setSecObj({ + admins: { + names: [], + roles: ["god", "erlanger"] + }, + readers: { + names: [], + roles: ["foo"] + } + }).ok); + + TEquals(true, CouchDB.login("joe", "erly").ok); + TEquals("joe", CouchDB.session().userCtx.name); + TEquals(-1, CouchDB.session().userCtx.roles.indexOf("_admin")); + + repDoc = { + _id: "foo_rep_2", + source: CouchDB.protocol + host + "/" + dbA.name, + target: dbB.name, + user_ctx: { + name: "joe", + roles: ["erlanger"] + } + }; + + TEquals(true, repDb.save(repDoc).ok); + CouchDB.logout(); + + waitForRep(repDb, repDoc, "complete"); + repDoc1 = repDb.open(repDoc._id); + T(repDoc1 !== null); + TEquals(repDoc.source, repDoc1.source); + TEquals(repDoc.target, repDoc1.target); + TEquals("completed", repDoc1._replication_state); + TEquals("string", typeof repDoc1._replication_id); + TEquals("string", typeof repDoc1._replication_state_time); + + for (var i = 0; i < docs1.length; i++) { + var doc = docs1[i]; + var copy = dbB.open(doc._id); + + T(copy !== null); + TEquals(doc.value, copy.value); + } + + // Admins don't need to supply a user_ctx property in replication docs. + // If they do not, the implicit user_ctx "user_ctx": {name: null, roles: []} + // is used, meaning that design documents will not be replicated into + // local targets + T(dbB.setSecObj({ + admins: { + names: [], + roles: [] + }, + readers: { + names: [], + roles: [] + } + }).ok); + + var ddoc = { _id: "_design/foo" }; + TEquals(true, dbA.save(ddoc).ok); + + repDoc = { + _id: "foo_rep_3", + source: CouchDB.protocol + host + "/" + dbA.name, + target: dbB.name + }; + + TEquals(true, repDb.save(repDoc).ok); + waitForRep(repDb, repDoc, "complete"); + repDoc1 = repDb.open(repDoc._id); + T(repDoc1 !== null); + TEquals(repDoc.source, repDoc1.source); + TEquals(repDoc.target, repDoc1.target); + TEquals("completed", repDoc1._replication_state); + TEquals("string", typeof repDoc1._replication_id); + TEquals("string", typeof repDoc1._replication_state_time); + + var ddoc_copy = dbB.open(ddoc._id); + T(ddoc_copy === null); + + repDoc = { + _id: "foo_rep_4", + source: CouchDB.protocol + host + "/" + dbA.name, + target: dbB.name, + user_ctx: { + roles: ["_admin"] + } + }; + + TEquals(true, repDb.save(repDoc).ok); + waitForRep(repDb, repDoc, "complete"); + repDoc1 = repDb.open(repDoc._id); + T(repDoc1 !== null); + TEquals(repDoc.source, repDoc1.source); + TEquals(repDoc.target, repDoc1.target); + TEquals("completed", repDoc1._replication_state); + TEquals("string", typeof repDoc1._replication_id); + TEquals("string", typeof repDoc1._replication_state_time); + + ddoc_copy = dbB.open(ddoc._id); + T(ddoc_copy !== null); + } + + function rep_doc_with_bad_rep_id() { populate_db(dbA, docs1); populate_db(dbB, []); @@ -1111,6 +1330,11 @@ couchTests.replicator_db = function(debug) { value: usersDb.name } ]); + + repDb.deleteDb(); + restartServer(); + run_on_modified_server(server_config_2, test_user_ctx_validation); + repDb.deleteDb(); restartServer(); run_on_modified_server(server_config_2, test_replication_credentials_delegation); diff --git a/src/couchdb/couch_js_functions.hrl b/src/couchdb/couch_js_functions.hrl index 1e3ed4e9..d07eead5 100644 --- a/src/couchdb/couch_js_functions.hrl +++ b/src/couchdb/couch_js_functions.hrl @@ -182,12 +182,6 @@ } if (newDoc.user_ctx) { - if (!isAdmin) { - reportError('Delegated replications (use of the ' + - '`user_ctx\\' property) can only be triggered by ' + - 'administrators.'); - } - var user_ctx = newDoc.user_ctx; if ((typeof user_ctx !== 'object') || (user_ctx === null)) { @@ -204,24 +198,40 @@ 'non-empty string or null.'); } + if (!isAdmin && (user_ctx.name !== userCtx.name)) { + reportError('The given `user_ctx.name\\' is not valid'); + } + if (user_ctx.roles && !isArray(user_ctx.roles)) { reportError('The `user_ctx.roles\\' property must be ' + 'an array of strings.'); } - if (user_ctx.roles) { + if (!isAdmin && user_ctx.roles) { for (var i = 0; i < user_ctx.roles.length; i++) { var role = user_ctx.roles[i]; if (typeof role !== 'string' || role.length === 0) { reportError('Roles must be non-empty strings.'); } - if (role[0] === '_') { - reportError('System roles (starting with an ' + - 'underscore) are not allowed.'); + if (userCtx.roles.indexOf(role) === -1) { + reportError('Invalid role (`' + role + + '\\') in the `user_ctx\\''); } } } + } else { + if (!isAdmin) { + reportError('The `user_ctx\\' property is missing (it is ' + + 'optional for admins only).'); + } + } + } else { + if (!isAdmin) { + if (!oldDoc.user_ctx || (oldDoc.user_ctx.name !== userCtx.name)) { + reportError('Replication documents can only be deleted by ' + + 'admins or by the users who created them.'); + } } } } diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl index 49a82e5d..fd323f7f 100644 --- a/src/couchdb/couch_rep.erl +++ b/src/couchdb/couch_rep.erl @@ -899,13 +899,14 @@ update_rep_doc(RepDb, #doc{body = {RepDocBody}} = RepDoc, KVs) -> RepDocBody, KVs ), - % might not succeed - when the replication doc is deleted right - % before this update (not an error) - couch_db:update_doc( - RepDb, - RepDoc#doc{body = {NewRepDocBody}}, - [] - ). + case NewRepDocBody of + RepDocBody -> + ok; + _ -> + % might not succeed - when the replication doc is deleted right + % before this update (not an error) + couch_db:update_doc(RepDb, RepDoc#doc{body = {NewRepDocBody}}, []) + end. % RFC3339 timestamps. % Note: doesn't include the time seconds fraction (RFC3339 says it's optional). diff --git a/src/couchdb/couch_replication_manager.erl b/src/couchdb/couch_replication_manager.erl index 6101c9c5..6537c8b2 100644 --- a/src/couchdb/couch_replication_manager.erl +++ b/src/couchdb/couch_replication_manager.erl @@ -253,7 +253,7 @@ process_update(State, {Change}) -> rep_user_ctx({RepDoc}) -> case get_value(<<"user_ctx">>, RepDoc) of undefined -> - #user_ctx{roles = [<<"_admin">>]}; + #user_ctx{}; {UserCtx} -> #user_ctx{ name = get_value(<<"name">>, UserCtx, null), @@ -307,6 +307,10 @@ start_replication(Server, {RepProps} = RepDoc, RepId, UserCtx, MaxRetries) -> ok = gen_server:call(Server, {triggered, RepId}, infinity), couch_rep:get_result(Pid, RepId, RepDoc, UserCtx); Error -> + couch_rep:update_rep_doc( + RepDoc, + [{<<"_replication_state">>, <<"error">>}, + {<<"_replication_id">>, ?l2b(element(1, RepId))}]), keep_retrying( Server, RepId, RepDoc, UserCtx, Error, ?INITIAL_WAIT, MaxRetries) end. -- cgit v1.2.3 From 12e69371d6d708022c3f28548e6f5ca501328719 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Thu, 26 May 2011 18:59:58 +0000 Subject: COUCHDB-1163 - fix internal state of documents affected by COUCHDB-885 (patch by Paul Davis) git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1128037 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_key_tree.erl | 49 +++++++++++++++++++++++++++++++----------- 1 file changed, 36 insertions(+), 13 deletions(-) diff --git a/src/couchdb/couch_key_tree.erl b/src/couchdb/couch_key_tree.erl index bc723cc2..e5d3549f 100644 --- a/src/couchdb/couch_key_tree.erl +++ b/src/couchdb/couch_key_tree.erl @@ -16,6 +16,8 @@ -export([map/2, get_all_leafs/1, count_leafs/1, remove_leafs/2, get_all_leafs_full/1,stem/2,map_leafs/2]). +-include("couch_db.hrl"). + % Tree::term() is really a tree(), but we don't want to require R13B04 yet -type branch() :: {Key::term(), Value::term(), Tree::term()}. -type path() :: {Start::pos_integer(), branch()}. @@ -82,9 +84,9 @@ merge_at(OurTree, Place, [{Key, Value, SubTree}]) when Place < 0 -> no -> no end; -merge_at([{Key, Value, SubTree}|Sibs], 0, [{Key, _Value, InsertSubTree}]) -> +merge_at([{Key, V1, SubTree}|Sibs], 0, [{Key, V2, InsertSubTree}]) -> {Merged, Conflicts} = merge_simple(SubTree, InsertSubTree), - {ok, [{Key, Value, Merged} | Sibs], Conflicts}; + {ok, [{Key, value_pref(V1, V2), Merged} | Sibs], Conflicts}; merge_at([{OurKey, _, _} | _], 0, [{Key, _, _}]) when OurKey > Key -> % siblings keys are ordered, no point in continuing no; @@ -103,9 +105,10 @@ merge_simple([], B) -> {B, false}; merge_simple(A, []) -> {A, false}; -merge_simple([{Key, Value, SubA} | NextA], [{Key, _, SubB} | NextB]) -> +merge_simple([{Key, V1, SubA} | NextA], [{Key, V2, SubB} | NextB]) -> {MergedSubTree, Conflict1} = merge_simple(SubA, SubB), {MergedNextTree, Conflict2} = merge_simple(NextA, NextB), + Value = value_pref(V1, V2), {[{Key, Value, MergedSubTree} | MergedNextTree], Conflict1 or Conflict2}; merge_simple([{A, _, _} = Tree | Next], [{B, _, _} | _] = Insert) when A < B -> {Merged, _} = merge_simple(Next, Insert), @@ -157,14 +160,18 @@ remove_leafs(Trees, Keys) -> % filter out any that are in the keys list. {FilteredPaths, RemovedKeys} = filter_leafs(Paths, Keys, [], []), + SortedPaths = lists:sort( + [{Pos + 1 - length(Path), Path} || {Pos, Path} <- FilteredPaths] + ), + % convert paths back to trees NewTree = lists:foldl( - fun({PathPos, Path},TreeAcc) -> + fun({StartPos, Path},TreeAcc) -> [SingleTree] = lists:foldl( fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path), - {NewTrees, _} = merge(TreeAcc, {PathPos + 1 - length(Path), SingleTree}), + {NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}), NewTrees - end, [], FilteredPaths), + end, [], SortedPaths), {NewTree, RemovedKeys}. @@ -314,19 +321,35 @@ map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) -> stem(Trees, Limit) -> - % flatten each branch in a tree into a tree path - Paths = get_all_leafs_full(Trees), - - Paths2 = [{Pos, lists:sublist(Path, Limit)} || {Pos, Path} <- Paths], + % flatten each branch in a tree into a tree path, sort by starting rev # + Paths = lists:sort(lists:map(fun({Pos, Path}) -> + StemmedPath = lists:sublist(Path, Limit), + {Pos + 1 - length(StemmedPath), StemmedPath} + end, get_all_leafs_full(Trees))), % convert paths back to trees lists:foldl( - fun({PathPos, Path},TreeAcc) -> + fun({StartPos, Path},TreeAcc) -> [SingleTree] = lists:foldl( fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path), - {NewTrees, _} = merge(TreeAcc, {PathPos + 1 - length(Path), SingleTree}), + {NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}), NewTrees - end, [], Paths2). + end, [], Paths). + + +value_pref(Tuple, _) when is_tuple(Tuple), + (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) -> + Tuple; +value_pref(_, Tuple) when is_tuple(Tuple), + (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) -> + Tuple; +value_pref(?REV_MISSING, Other) -> + Other; +value_pref(Other, ?REV_MISSING) -> + Other; +value_pref(Last, _) -> + Last. + % Tests moved to test/etap/06?-*.t -- cgit v1.2.3 From 052284d890277ebe27fc4dea06636669484f4586 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Thu, 26 May 2011 19:00:07 +0000 Subject: COUCHDB-1173 - return Content-MD5 when fetching attachments where possible. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1128038 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_httpd_db.erl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/couchdb/couch_httpd_db.erl b/src/couchdb/couch_httpd_db.erl index 468ae3f0..2930462b 100644 --- a/src/couchdb/couch_httpd_db.erl +++ b/src/couchdb/couch_httpd_db.erl @@ -1004,7 +1004,13 @@ db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNa {identity, Ranges} when is_list(Ranges) -> send_ranges_multipart(Req, Type, Len, Att, Ranges); _ -> - {ok, Resp} = start_response_length(Req, 200, Headers, Len), + Headers1 = Headers ++ + if Enc =:= identity orelse ReqAcceptsAttEnc =:= true -> + [{"Content-MD5", base64:encode(Att#att.md5)}]; + true -> + [] + end, + {ok, Resp} = start_response_length(Req, 200, Headers1, Len), AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp}) end end -- cgit v1.2.3 From 7c719fe3b113f03e37aced91ab28957e2c4aeab3 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Thu, 26 May 2011 19:00:19 +0000 Subject: COUCHDB-1178 - fix make_first_doc_on_disk function_clause error git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1128039 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/couch_tests.js | 1 + share/www/script/test/regression.js | 48 +++++++++++++++++++++++++++++++++++++ src/couchdb/couch_db.erl | 2 ++ 3 files changed, 51 insertions(+) create mode 100644 share/www/script/test/regression.js diff --git a/share/www/script/couch_tests.js b/share/www/script/couch_tests.js index eb573526..4c5c00d6 100644 --- a/share/www/script/couch_tests.js +++ b/share/www/script/couch_tests.js @@ -76,6 +76,7 @@ loadTest("reduce.js"); loadTest("reduce_builtin.js"); loadTest("reduce_false.js"); loadTest("reduce_false_temp.js"); +loadTest("regression.js"); loadTest("replication.js"); loadTest("replicator_db.js"); loadTest("rev_stemming.js"); diff --git a/share/www/script/test/regression.js b/share/www/script/test/regression.js new file mode 100644 index 00000000..abe42b40 --- /dev/null +++ b/share/www/script/test/regression.js @@ -0,0 +1,48 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +couchTests.regression = function(debug) { + var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"}); + db.deleteDb(); + db.createDb(); + if (debug) debugger; + + // COUCHDB-1178 + { + var r1 = {"_id":"doc","foo":"bar"}; + var r2 = {"_id":"doc","foo":"baz","_rev":"1-4c6114c65e295552ab1019e2b046b10e"}; + var r3 = {"_id":"doc","foo":"bam","_rev":"2-cfcd6781f13994bde69a1c3320bfdadb"}; + var r4 = {"_id":"doc","foo":"bat","_rev":"3-cc2f3210d779aef595cd4738be0ef8ff"}; + + T(db.save({"_id":"_design/couchdb-1178","validate_doc_update":"function(){}"}).ok); + T(db.save(r1).ok); + T(db.save(r2).ok); + T(db.save(r3).ok); + + T(db.compact().ok); + while (db.info().compact_running) {}; + + TEquals({"_id":"doc", + "_rev":"3-cc2f3210d779aef595cd4738be0ef8ff", + "foo":"bam", + "_revisions":{"start":3, + "ids":["cc2f3210d779aef595cd4738be0ef8ff", + "cfcd6781f13994bde69a1c3320bfdadb", + "4c6114c65e295552ab1019e2b046b10e"]}}, + db.open("doc", {"revs": true})); + + TEquals([], db.bulkSave([r4, r3, r2], {"new_edits":false}), "no failures"); + } + + // cleanup + db.deleteDb(); +}; \ No newline at end of file diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index 1e7addaf..40d84618 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -733,6 +733,8 @@ update_docs(Db, Docs, Options, interactive_edit) -> % for the doc. make_first_doc_on_disk(_Db, _Id, _Pos, []) -> nil; +make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #doc{}} | RestPath]) -> + make_first_doc_on_disk(Db, Id, Pos-1, RestPath); make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) -> make_first_doc_on_disk(Db, Id, Pos - 1, RestPath); make_first_doc_on_disk(Db, Id, Pos, [{_Rev, {IsDel, Sp, _Seq}} |_]=DocPath) -> -- cgit v1.2.3 From d5aa8379b531c664bbad0a364fc19729f1e30315 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Thu, 26 May 2011 19:00:29 +0000 Subject: COUCHDB-1177 - don't read more of an attachment than Content-Length states. (original patch by Paul Davis, awesomeness enhanced by Robert Newson). git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1128040 13f79535-47bb-0310-9956-ffa450edef68 --- src/couchdb/couch_db.erl | 7 ++++++- src/couchdb/couch_httpd_db.erl | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index 40d84618..17402c81 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -923,10 +923,15 @@ with_stream(Fd, #att{md5=InMd5,type=Type,encoding=Enc}=Att, Fun) -> write_streamed_attachment(_Stream, _F, 0) -> ok; write_streamed_attachment(Stream, F, LenLeft) when LenLeft > 0 -> - Bin = F(), + Bin = read_next_chunk(F, LenLeft), ok = couch_stream:write(Stream, Bin), write_streamed_attachment(Stream, F, LenLeft - size(Bin)). +read_next_chunk(F, _) when is_function(F, 0) -> + F(); +read_next_chunk(F, LenLeft) when is_function(F, 1) -> + F(lists:min([LenLeft, 16#2000])). + enum_docs_since_reduce_to_count(Reds) -> couch_btree:final_reduce( fun couch_db_updater:btree_by_seq_reduce/2, Reds). diff --git a/src/couchdb/couch_httpd_db.erl b/src/couchdb/couch_httpd_db.erl index 2930462b..f51fde09 100644 --- a/src/couchdb/couch_httpd_db.erl +++ b/src/couchdb/couch_httpd_db.erl @@ -1067,7 +1067,7 @@ db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileN end, - fun() -> couch_httpd:recv(Req, 0) end + fun(Size) -> couch_httpd:recv(Req, Size) end end, att_len = case couch_httpd:header_value(Req,"Content-Length") of undefined -> -- cgit v1.2.3 From 66fa2a431d1ff3e01ff026d7e2daf7dcb1eb21d1 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Fri, 27 May 2011 08:12:47 +0000 Subject: COUCHDB-1074 - fix variable substitution in rewriter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - key= ":key", startkey=[":a", ":b"] - variable substitution via query arguments - variable substituin via reversed path matching variables The variable substition is now a lot easier than the old one. Variables are decoded from the query if they are json, and we recode them only at the end. (Patch by BenoƮt Chesneau) git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1128189 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/rewrite.js | 25 ++++- src/couchdb/couch_httpd_rewrite.erl | 192 ++++++++++++++++++++---------------- 2 files changed, 132 insertions(+), 85 deletions(-) diff --git a/share/www/script/test/rewrite.js b/share/www/script/test/rewrite.js index 86905f8f..bb188773 100644 --- a/share/www/script/test/rewrite.js +++ b/share/www/script/test/rewrite.js @@ -119,6 +119,10 @@ couchTests.rewrite = function(debug) { "query": { "startkey": ":start", "endkey": ":end" + }, + "formats": { + "start": "int", + "end": "int" } }, { @@ -163,6 +167,18 @@ couchTests.rewrite = function(debug) { "key": [":a", ":b"] } }, + { + "from": "simpleForm/complexView7/:a/:b", + "to": "_view/complexView3", + "query": { + "key": [":a", ":b"], + "include_docs": ":doc" + }, + "format": { + "doc": "bool" + } + + }, { "from": "/", "to": "_view/basicView", @@ -348,14 +364,14 @@ couchTests.rewrite = function(debug) { T(!(/Key: 1/.test(xhr.responseText))); T(/FirstKey: 3/.test(xhr.responseText)); T(/LastKey: 8/.test(xhr.responseText)); - + // get with query params xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/basicViewPath/3/8"); T(xhr.status == 200, "with query params"); T(!(/Key: 1/.test(xhr.responseText))); T(/FirstKey: 3/.test(xhr.responseText)); T(/LastKey: 8/.test(xhr.responseText)); - + // get with query params xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/complexView"); T(xhr.status == 200, "with query params"); @@ -380,6 +396,11 @@ couchTests.rewrite = function(debug) { xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/complexView6?a=test&b=essai"); T(xhr.status == 200, "with query params"); T(/Value: doc 4/.test(xhr.responseText)); + + xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/complexView7/test/essai?doc=true"); + T(xhr.status == 200, "with query params"); + var result = JSON.parse(xhr.responseText); + T(typeof(result.rows[0].doc) === "object"); // test path relative to server designDoc.rewrites.push({ diff --git a/src/couchdb/couch_httpd_rewrite.erl b/src/couchdb/couch_httpd_rewrite.erl index a8297ae1..e24cb5db 100644 --- a/src/couchdb/couch_httpd_rewrite.erl +++ b/src/couchdb/couch_httpd_rewrite.erl @@ -117,8 +117,7 @@ handle_rewrite_req(#httpd{ % we are in a design handler DesignId = <<"_design/", DesignName/binary>>, Prefix = <<"/", DbName/binary, "/", DesignId/binary>>, - QueryList = couch_httpd:qs(Req), - QueryList1 = [{to_binding(K), V} || {K, V} <- QueryList], + QueryList = lists:map(fun decode_query_value/1, couch_httpd:qs(Req)), #doc{body={Props}} = DDoc, @@ -130,10 +129,11 @@ handle_rewrite_req(#httpd{ Rules -> % create dispatch list from rules DispatchList = [make_rule(Rule) || {Rule} <- Rules], + Method1 = couch_util:to_binary(Method), %% get raw path by matching url to a rule. - RawPath = case try_bind_path(DispatchList, couch_util:to_binary(Method), PathParts, - QueryList1) of + RawPath = case try_bind_path(DispatchList, Method1, + PathParts, QueryList) of no_dispatch_path -> throw(not_found); {NewPathParts, Bindings} -> @@ -141,12 +141,13 @@ handle_rewrite_req(#httpd{ % build new path, reencode query args, eventually convert % them to json - Path = lists:append( - string:join(Parts, [?SEPARATOR]), - case Bindings of - [] -> []; - _ -> [$?, encode_query(Bindings)] - end), + Bindings1 = maybe_encode_bindings(Bindings), + Path = binary_to_list( + iolist_to_binary([ + string:join(Parts, [?SEPARATOR]), + [["?", mochiweb_util:urlencode(Bindings1)] + || Bindings1 =/= [] ] + ])), % if path is relative detect it and rewrite path case mochiweb_util:safe_relative_path(Path) of @@ -193,7 +194,7 @@ quote_plus(X) -> try_bind_path([], _Method, _PathParts, _QueryList) -> no_dispatch_path; try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) -> - [{PathParts1, Method1}, RedirectPath, QueryArgs] = Dispatch, + [{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch, case bind_method(Method1, Method) of true -> case bind_path(PathParts1, PathParts, []) of @@ -201,7 +202,8 @@ try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) -> Bindings1 = Bindings ++ QueryList, % we parse query args from the rule and fill % it eventually with bindings vars - QueryArgs1 = make_query_list(QueryArgs, Bindings1, []), + QueryArgs1 = make_query_list(QueryArgs, Bindings1, + Formats, []), % remove params in QueryLists1 that are already in % QueryArgs1 Bindings2 = lists:foldl(fun({K, V}, Acc) -> @@ -227,61 +229,79 @@ try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) -> %% rewriting dynamically the quey list given as query member in %% rewrites. Each value is replaced by one binding or an argument %% passed in url. -make_query_list([], _Bindings, Acc) -> +make_query_list([], _Bindings, _Formats, Acc) -> Acc; -make_query_list([{Key, {Value}}|Rest], Bindings, Acc) -> - Value1 = to_json({Value}), - make_query_list(Rest, Bindings, [{to_binding(Key), Value1}|Acc]); -make_query_list([{Key, Value}|Rest], Bindings, Acc) when is_binary(Value) -> - Value1 = replace_var(Key, Value, Bindings), - make_query_list(Rest, Bindings, [{to_binding(Key), Value1}|Acc]); -make_query_list([{Key, Value}|Rest], Bindings, Acc) when is_list(Value) -> - Value1 = replace_var(Key, Value, Bindings), - make_query_list(Rest, Bindings, [{to_binding(Key), Value1}|Acc]); -make_query_list([{Key, Value}|Rest], Bindings, Acc) -> - make_query_list(Rest, Bindings, [{to_binding(Key), Value}|Acc]). - -replace_var(Key, Value, Bindings) -> - case Value of - <<":", Var/binary>> -> - get_var(Var, Bindings, Value); - <<"*">> -> - get_var(Value, Bindings, Value); - _ when is_list(Value) -> - Value1 = lists:foldr(fun(V, Acc) -> - V1 = case V of - <<":", VName/binary>> -> - case get_var(VName, Bindings, V) of - V2 when is_list(V2) -> - iolist_to_binary(V2); - V2 -> V2 - end; - <<"*">> -> - get_var(V, Bindings, V); - _ -> - V - end, - [V1|Acc] - end, [], Value), - to_json(Value1); - _ when is_binary(Value) -> - Value; - _ -> - case Key of - <<"key">> -> to_json(Value); - <<"startkey">> -> to_json(Value); - <<"start_key">> -> to_json(Value); - <<"endkey">> -> to_json(Value); - <<"end_key">> -> to_json(Value); - _ -> - lists:flatten(?JSON_ENCODE(Value)) - end +make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) -> + Value1 = {Value}, + make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]); +make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) -> + Value1 = replace_var(Value, Bindings, Formats), + make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]); +make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) -> + Value1 = replace_var(Value, Bindings, Formats), + make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]); +make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) -> + make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]). + +replace_var(<<"*">>=Value, Bindings, Formats) -> + get_var(Value, Bindings, Value, Formats); +replace_var(<<":", Var/binary>> = Value, Bindings, Formats) -> + get_var(Var, Bindings, Value, Formats); +replace_var(Value, _Bindings, _Formats) when is_binary(Value) -> + Value; +replace_var(Value, Bindings, Formats) when is_list(Value) -> + lists:reverse(lists:foldl(fun + (<<":", Var/binary>>=Value1, Acc) -> + [get_var(Var, Bindings, Value1, Formats)|Acc]; + (Value1, Acc) -> + [Value1|Acc] + end, [], Value)); +replace_var(Value, _Bindings, _Formats) -> + Value. + +maybe_json(Key, Value) -> + case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>, + <<"endkey">>, <<"end_key">>, <<"keys">>]) of + true -> + ?JSON_ENCODE(Value); + false -> + Value end. - -get_var(VarName, Props, Default) -> +get_var(VarName, Props, Default, Formats) -> VarName1 = to_binding(VarName), - couch_util:get_value(VarName1, Props, Default). + Val = couch_util:get_value(VarName1, Props, Default), + maybe_format(VarName, Val, Formats). + +maybe_format(VarName, Value, Formats) -> + case couch_util:get_value(VarName, Formats) of + undefined -> + Value; + Format -> + format(Format, Value) + end. + +format(<<"int">>, Value) when is_integer(Value) -> + Value; +format(<<"int">>, Value) when is_binary(Value) -> + format(<<"int">>, ?b2l(Value)); +format(<<"int">>, Value) when is_list(Value) -> + case (catch list_to_integer(Value)) of + IntVal when is_integer(IntVal) -> + IntVal; + _ -> + Value + end; +format(<<"bool">>, Value) when is_binary(Value) -> + format(<<"bool">>, ?b2l(Value)); +format(<<"bool">>, Value) when is_list(Value) -> + case string:to_lower(Value) of + "true" -> true; + "false" -> false; + _ -> Value + end; +format(_Format, Value) -> + Value. %% doc: build new patch from bindings. bindings are query args %% (+ dynamic query rewritten if needed) and bindings found in @@ -297,7 +317,8 @@ make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) -> make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) -> P2 = case couch_util:get_value({bind, P}, Bindings) of undefined -> << "undefined">>; - P1 -> P1 + P1 -> + iolist_to_binary(P1) end, make_new_path(Rest, Bindings, Remaining, [P2|Acc]); make_new_path([P|Rest], Bindings, Remaining, Acc) -> @@ -374,7 +395,11 @@ make_rule(Rule) -> To -> parse_path(To) end, - [{FromParts, Method}, ToParts, QueryArgs]. + Formats = case couch_util:get_value(<<"formats">>, Rule) of + undefined -> []; + {Fmts} -> Fmts + end, + [{FromParts, Method}, ToParts, QueryArgs, Formats]. parse_path(Path) -> {ok, SlashRE} = re:compile(<<"\\/">>), @@ -407,21 +432,25 @@ path_to_list([P|R], Acc, DotDotCount) -> end, path_to_list(R, [P1|Acc], DotDotCount). -encode_query(Props) -> - Props1 = lists:foldl(fun ({{bind, K}, V}, Acc) -> - case K of - <<"*">> -> Acc; - _ -> - V1 = case is_list(V) orelse is_binary(V) of - true -> V; - false -> - % probably it's a number - quote_plus(V) - end, - [{K, V1} | Acc] - end - end, [], Props), - lists:flatten(mochiweb_util:urlencode(Props1)). +maybe_encode_bindings([]) -> + []; +maybe_encode_bindings(Props) -> + lists:foldl(fun + ({{bind, <<"*">>}, _V}, Acc) -> + Acc; + ({{bind, K}, V}, Acc) -> + V1 = iolist_to_binary(maybe_json(K, V)), + [{K, V1}|Acc] + end, [], Props). + +decode_query_value({K,V}) -> + case lists:member(K, ["key", "startkey", "start_key", + "endkey", "end_key", "keys"]) of + true -> + {to_binding(K), ?JSON_DECODE(V)}; + false -> + {to_binding(K), ?l2b(V)} + end. to_binding({bind, V}) -> {bind, V}; @@ -429,6 +458,3 @@ to_binding(V) when is_list(V) -> to_binding(?l2b(V)); to_binding(V) -> {bind, V}. - -to_json(V) -> - iolist_to_binary(?JSON_ENCODE(V)). -- cgit v1.2.3 From 3c2b710d8b50ae48b5a65abeebc7d08fab84bebd Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Fri, 27 May 2011 08:20:24 +0000 Subject: update CHANGES git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1128191 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGES b/CHANGES index a32797f5..7c522b96 100644 --- a/CHANGES +++ b/CHANGES @@ -31,6 +31,8 @@ Replicator: * Added `_replicator` database to manage replications. * Fixed issues when an endpoint is a remote database accessible via SSL. * Added support for continuous by-doc-IDs replication. + * Fix issue where revision info was omitted when replicating attachments. + * Integrity of attachment replication is now verified by MD5. Storage System: @@ -50,6 +52,9 @@ Futon: * Added a "change password"-feature to Futon. +URL Rewriter & Vhosts: + + * Fix for variable substituion Version 1.0.1 ------------- -- cgit v1.2.3 From 57bb30a736a7e0635c3cab394b946b1acbbed266 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Sat, 28 May 2011 16:15:14 +0000 Subject: COUCHDB-1173 - pass md5 in stub so replication verifies the transfer. git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1128698 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/attachments_multipart.js | 2 ++ src/couchdb/couch_db.erl | 2 +- src/couchdb/couch_doc.erl | 20 ++++++++++++++++---- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/share/www/script/test/attachments_multipart.js b/share/www/script/test/attachments_multipart.js index 13a5abf4..4f4590fc 100644 --- a/share/www/script/test/attachments_multipart.js +++ b/share/www/script/test/attachments_multipart.js @@ -90,6 +90,7 @@ couchTests.attachments_multipart= function(debug) { //lets change attachment bar delete doc._attachments["bar.txt"].stub; // remove stub member (or could set to false) + delete doc._attachments["bar.txt"].digest; // remove the digest (it's for the gzip form) doc._attachments["bar.txt"].length = 18; doc._attachments["bar.txt"].follows = true; //lets delete attachment baz: @@ -107,6 +108,7 @@ couchTests.attachments_multipart= function(debug) { "this is 18 chars l" + "\r\n--abc123--" }); + TEquals(201, xhr.status); xhr = CouchDB.request("GET", "/test_suite_db/multipart/bar.txt"); diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index 17402c81..85f7e291 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -812,7 +812,7 @@ doc_flush_atts(Doc, Fd) -> Doc#doc{atts=[flush_att(Fd, Att) || Att <- Doc#doc.atts]}. check_md5(_NewSig, <<>>) -> ok; -check_md5(Sig1, Sig2) when Sig1 == Sig2 -> ok; +check_md5(Sig, Sig) -> ok; check_md5(_, _) -> throw(md5_mismatch). flush_att(Fd, #att{data={Fd0, _}}=Att) when Fd0 == Fd -> diff --git a/src/couchdb/couch_doc.erl b/src/couchdb/couch_doc.erl index e2690f3e..5cd6ac80 100644 --- a/src/couchdb/couch_doc.erl +++ b/src/couchdb/couch_doc.erl @@ -87,8 +87,14 @@ to_json_attachments(Atts, OutputData, DataToFollow, ShowEncInfo) -> fun(#att{disk_len=DiskLen, att_len=AttLen, encoding=Enc}=Att) -> {Att#att.name, {[ {<<"content_type">>, Att#att.type}, - {<<"revpos">>, Att#att.revpos} - ] ++ + {<<"revpos">>, Att#att.revpos}] ++ + case Att#att.md5 of + <<>> -> + []; + Md5 -> + EncodedMd5 = base64:encode(Md5), + [{<<"digest">>, <<"md5-",EncodedMd5/binary>>}] + end ++ if not OutputData orelse Att#att.data == stub -> [{<<"length">>, DiskLen}, {<<"stub">>, true}]; true -> @@ -199,6 +205,12 @@ transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc) -> transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) -> Atts = lists:map(fun({Name, {BinProps}}) -> + Md5 = case couch_util:get_value(<<"digest">>, BinProps) of + <<"md5-",EncodedMd5/binary>> -> + base64:decode(EncodedMd5); + _ -> + <<>> + end, case couch_util:get_value(<<"stub">>, BinProps) of true -> Type = couch_util:get_value(<<"content_type">>, BinProps), @@ -206,7 +218,7 @@ transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) -> DiskLen = couch_util:get_value(<<"length">>, BinProps), {Enc, EncLen} = att_encoding_info(BinProps), #att{name=Name, data=stub, type=Type, att_len=EncLen, - disk_len=DiskLen, encoding=Enc, revpos=RevPos}; + disk_len=DiskLen, encoding=Enc, revpos=RevPos, md5=Md5}; _ -> Type = couch_util:get_value(<<"content_type">>, BinProps, ?DEFAULT_ATTACHMENT_CONTENT_TYPE), @@ -216,7 +228,7 @@ transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) -> DiskLen = couch_util:get_value(<<"length">>, BinProps), {Enc, EncLen} = att_encoding_info(BinProps), #att{name=Name, data=follows, type=Type, encoding=Enc, - att_len=EncLen, disk_len=DiskLen, revpos=RevPos}; + att_len=EncLen, disk_len=DiskLen, revpos=RevPos, md5=Md5}; _ -> Value = couch_util:get_value(<<"data">>, BinProps), Bin = base64:decode(Value), -- cgit v1.2.3 From 31e900ee642e672cec0b7b561c4bb596e1acf9a5 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Sat, 28 May 2011 16:50:10 +0000 Subject: COUCHDB-1171 Multiple requests to _changes feed causes {error, system_limit} "Too many processes" git-svn-id: https://svn.apache.org/repos/asf/couchdb/branches/1.1.x@1128704 13f79535-47bb-0310-9956-ffa450edef68 --- share/www/script/test/changes.js | 4 ++++ src/couchdb/couch_httpd_db.erl | 11 ++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/share/www/script/test/changes.js b/share/www/script/test/changes.js index 5998f48c..ea22bfb3 100644 --- a/share/www/script/test/changes.js +++ b/share/www/script/test/changes.js @@ -503,6 +503,10 @@ couchTests.changes = function(debug) { TEquals("0", resp.results[0].id); TEquals("1", resp.results[1].id); + TEquals(0, CouchDB.requestStats('httpd', 'clients_requesting_changes').current); + CouchDB.request("GET", "/" + db.name + "/_changes"); + TEquals(0, CouchDB.requestStats('httpd', 'clients_requesting_changes').current); + // cleanup db.deleteDb(); }; diff --git a/src/couchdb/couch_httpd_db.erl b/src/couchdb/couch_httpd_db.erl index f51fde09..e3638b25 100644 --- a/src/couchdb/couch_httpd_db.erl +++ b/src/couchdb/couch_httpd_db.erl @@ -114,11 +114,16 @@ handle_changes_req1(Req, Db) -> FeedChangesFun(MakeCallback(Resp)) end end, - couch_stats_collector:track_process_count( + couch_stats_collector:increment( {httpd, clients_requesting_changes} ), - WrapperFun(ChangesFun). - + try + WrapperFun(ChangesFun) + after + couch_stats_collector:decrement( + {httpd, clients_requesting_changes} + ) + end. handle_compact_req(#httpd{method='POST',path_parts=[DbName,_,Id|_]}=Req, Db) -> ok = couch_db:check_is_admin(Db), -- cgit v1.2.3