summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--etc/couchdb/default.ini.tpl.in2
-rw-r--r--etc/couchdb/local.ini6
-rw-r--r--share/server/render.js18
-rw-r--r--share/server/util.js2
-rw-r--r--share/server/views.js6
-rw-r--r--share/www/couch_tests.html2
-rw-r--r--share/www/dialog/_compact_database.html2
-rw-r--r--share/www/script/couch.js52
-rw-r--r--share/www/script/couch_test_runner.js2
-rw-r--r--share/www/script/futon.browse.js14
-rw-r--r--share/www/script/jquery.couch.js4
-rw-r--r--share/www/script/jquery.form.js52
-rw-r--r--share/www/script/jquery.js148
-rw-r--r--share/www/script/test/all_docs.js18
-rw-r--r--share/www/script/test/attachment_names.js54
-rw-r--r--share/www/script/test/attachment_paths.js12
-rw-r--r--share/www/script/test/attachment_views.js2
-rw-r--r--share/www/script/test/attachments.js30
-rw-r--r--share/www/script/test/basics.js16
-rw-r--r--share/www/script/test/batch_save.js14
-rw-r--r--share/www/script/test/bulk_docs.js8
-rw-r--r--share/www/script/test/changes.js62
-rw-r--r--share/www/script/test/compact.js4
-rw-r--r--share/www/script/test/conflicts.js2
-rw-r--r--share/www/script/test/delayed_commits.js58
-rw-r--r--share/www/script/test/design_docs.js12
-rw-r--r--share/www/script/test/design_options.js4
-rw-r--r--share/www/script/test/design_paths.js2
-rw-r--r--share/www/script/test/etags_views.js10
-rw-r--r--share/www/script/test/invalid_docids.js2
-rw-r--r--share/www/script/test/list_views.js32
-rw-r--r--share/www/script/test/purge.js26
-rw-r--r--share/www/script/test/reduce.js6
-rw-r--r--share/www/script/test/reduce_builtin.js12
-rw-r--r--share/www/script/test/reduce_false.js2
-rw-r--r--share/www/script/test/replication.js58
-rw-r--r--share/www/script/test/rev_stemming.js36
-rw-r--r--share/www/script/test/security_validation.js54
-rw-r--r--share/www/script/test/show_documents.js32
-rw-r--r--share/www/script/test/stats.js29
-rw-r--r--share/www/script/test/uuids.js12
-rw-r--r--share/www/script/test/view_collation.js10
-rw-r--r--share/www/script/test/view_errors.js32
-rw-r--r--share/www/script/test/view_multi_key_design.js2
-rw-r--r--share/www/script/test/view_multi_key_temp.js2
-rw-r--r--share/www/script/test/view_offsets.js10
-rw-r--r--share/www/script/test/view_pagination.js4
-rw-r--r--share/www/style/layout.css2
-rw-r--r--src/couchdb/couch_batch_save.erl24
-rw-r--r--src/couchdb/couch_batch_save_sup.erl4
-rw-r--r--src/couchdb/couch_btree.erl30
-rw-r--r--src/couchdb/couch_config.erl6
-rw-r--r--src/couchdb/couch_config_writer.erl22
-rw-r--r--src/couchdb/couch_db.erl84
-rw-r--r--src/couchdb/couch_db.hrl8
-rw-r--r--src/couchdb/couch_db_update_notifier_sup.erl8
-rw-r--r--src/couchdb/couch_db_updater.erl126
-rw-r--r--src/couchdb/couch_doc.erl32
-rw-r--r--src/couchdb/couch_erl_driver.c4
-rw-r--r--src/couchdb/couch_external_manager.erl2
-rw-r--r--src/couchdb/couch_external_server.erl2
-rw-r--r--src/couchdb/couch_file.erl24
-rw-r--r--src/couchdb/couch_httpd.erl40
-rw-r--r--src/couchdb/couch_httpd_db.erl88
-rw-r--r--src/couchdb/couch_httpd_external.erl10
-rw-r--r--src/couchdb/couch_httpd_misc_handlers.erl8
-rw-r--r--src/couchdb/couch_httpd_show.erl40
-rw-r--r--src/couchdb/couch_httpd_view.erl40
-rw-r--r--src/couchdb/couch_js.c44
-rw-r--r--src/couchdb/couch_key_tree.erl46
-rw-r--r--src/couchdb/couch_log.erl4
-rw-r--r--src/couchdb/couch_query_servers.erl30
-rw-r--r--src/couchdb/couch_ref_counter.erl8
-rw-r--r--src/couchdb/couch_rep.erl216
-rw-r--r--src/couchdb/couch_server.erl12
-rw-r--r--src/couchdb/couch_server_sup.erl6
-rw-r--r--src/couchdb/couch_stats_aggregator.erl48
-rw-r--r--src/couchdb/couch_stats_collector.erl16
-rw-r--r--src/couchdb/couch_stream.erl6
-rw-r--r--src/couchdb/couch_task_status.erl2
-rw-r--r--src/couchdb/couch_util.erl12
-rw-r--r--src/couchdb/couch_view.erl44
-rw-r--r--src/couchdb/couch_view_compactor.erl28
-rw-r--r--src/couchdb/couch_view_group.erl64
-rw-r--r--src/couchdb/couch_view_updater.erl16
-rw-r--r--src/couchdb/curlhelper.c4
-rw-r--r--src/ibrowse/ibrowse.app8
-rw-r--r--src/ibrowse/ibrowse.erl42
-rw-r--r--src/ibrowse/ibrowse_app.erl8
-rw-r--r--src/ibrowse/ibrowse_http_client.erl16
-rw-r--r--src/ibrowse/ibrowse_lb.erl6
-rw-r--r--src/ibrowse/ibrowse_lib.erl20
-rw-r--r--src/ibrowse/ibrowse_sup.erl4
-rw-r--r--src/ibrowse/ibrowse_test.erl4
-rw-r--r--src/mochiweb/mochijson.erl8
-rw-r--r--src/mochiweb/mochijson2.erl10
-rw-r--r--src/mochiweb/mochinum.erl8
-rw-r--r--src/mochiweb/mochiweb_charref.erl2
-rw-r--r--src/mochiweb/mochiweb_cookies.erl22
-rw-r--r--src/mochiweb/mochiweb_echo.erl2
-rw-r--r--src/mochiweb/mochiweb_headers.erl4
-rw-r--r--src/mochiweb/mochiweb_html.erl2
-rw-r--r--src/mochiweb/mochiweb_request.erl22
-rw-r--r--src/mochiweb/mochiweb_skel.erl6
-rw-r--r--src/mochiweb/mochiweb_socket_server.erl14
-rw-r--r--src/mochiweb/mochiweb_util.erl2
-rw-r--r--test/couch_config_test.erl6
-rw-r--r--test/couch_config_writer_test.erl6
-rwxr-xr-xtest/etap/010-file-basics.t14
-rwxr-xr-xtest/etap/011-file-headers.t39
-rwxr-xr-xtest/etap/020-btree-basics.t8
-rwxr-xr-xtest/etap/021-btree-reductions.t12
-rwxr-xr-xtest/etap/030-doc-from-json.t10
-rwxr-xr-xtest/etap/031-doc-to-json.t4
-rwxr-xr-xtest/etap/040-util.t8
-rwxr-xr-xtest/etap/050-stream.t8
-rwxr-xr-xtest/etap/060-kt-merging.t10
-rwxr-xr-xtest/etap/061-kt-missing-leaves.t2
-rwxr-xr-xtest/etap/062-kt-remove-leaves.t10
-rwxr-xr-xtest/etap/063-kt-get-leaves.t14
-rwxr-xr-xtest/etap/064-kt-counting.t6
-rwxr-xr-xtest/etap/070-couch-db.t8
-rwxr-xr-xtest/etap/080-config-get-set.t42
-rwxr-xr-xtest/etap/081-config-override.t62
-rwxr-xr-xtest/etap/082-config-register.t12
-rwxr-xr-xtest/etap/083-config-no-files.t4
-rwxr-xr-xtest/etap/090-task-status.t18
-rwxr-xr-xtest/etap/100-ref-counter.t8
-rw-r--r--test/query_server_spec.rb50
-rw-r--r--test/test.js6
130 files changed, 1382 insertions, 1384 deletions
diff --git a/etc/couchdb/default.ini.tpl.in b/etc/couchdb/default.ini.tpl.in
index 25509615..c4a87363 100644
--- a/etc/couchdb/default.ini.tpl.in
+++ b/etc/couchdb/default.ini.tpl.in
@@ -72,7 +72,7 @@ _temp_view = {couch_httpd_view, handle_temp_view_req}
_changes = {couch_httpd_db, handle_changes_req}
; The external module takes an optional argument allowing you to narrow it to a
-; single script. Otherwise the script name is inferred from the first path section
+; single script. Otherwise the script name is inferred from the first path section
; after _external's own path.
; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
; _external = {couch_httpd_external, handle_external_req}
diff --git a/etc/couchdb/local.ini b/etc/couchdb/local.ini
index bd72f3e3..f2dbc4fb 100644
--- a/etc/couchdb/local.ini
+++ b/etc/couchdb/local.ini
@@ -19,9 +19,9 @@
; To create an admin account uncomment the '[admins]' section below and add a
; line in the format 'username = password'. When you next start CouchDB, it
-; will change the password to a hash (so that your passwords don't linger
-; around in plain-text files). You can add more admin accounts with more
-; 'username = password' lines. Don't forget to restart CouchDB after
+; will change the password to a hash (so that your passwords don't linger
+; around in plain-text files). You can add more admin accounts with more
+; 'username = password' lines. Don't forget to restart CouchDB after
; changing this.
;[admins]
;admin = mysecretpassword
diff --git a/share/server/render.js b/share/server/render.js
index b3a24bde..304d0fc7 100644
--- a/share/server/render.js
+++ b/share/server/render.js
@@ -121,7 +121,7 @@ respondWith = function(req, responders) {
var provides = [];
for (key in responders) {
if (mimesByKey[key]) {
- provides = provides.concat(mimesByKey[key]);
+ provides = provides.concat(mimesByKey[key]);
}
}
var bestMime = Mimeparse.bestMatch(provides, accept);
@@ -130,7 +130,7 @@ respondWith = function(req, responders) {
bestKey = req.query.format;
}
var rFunc = responders[bestKey || responders.fallback || "html"];
- if (rFunc) {
+ if (rFunc) {
if (isShow) {
var resp = maybeWrapResponse(rFunc());
resp["headers"] = resp["headers"] || {};
@@ -141,7 +141,7 @@ respondWith = function(req, responders) {
respTail = rFunc();
}
} else {
- throw({code:406, body:"Not Acceptable: "+accept});
+ throw({code:406, body:"Not Acceptable: "+accept});
}
};
@@ -198,7 +198,7 @@ function sendStart(label) {
startResp = startResp || {};
startResp["headers"] = startResp["headers"] || {};
startResp["headers"]["Content-Type"] = startResp["headers"]["Content-Type"] || respCT;
-
+
respond(["start", chunks, startResp]);
chunks = [];
startResp = {};
@@ -221,7 +221,7 @@ function getRow() {
gotRow = true;
sendStart();
} else {
- blowChunks()
+ blowChunks()
}
var line = readline();
var json = eval(line);
@@ -247,7 +247,7 @@ function getRow() {
var isShow = false;
var Render = (function() {
var row_info;
-
+
return {
show : function(funSrc, doc, req) {
isShow = true;
@@ -295,11 +295,11 @@ function runListRenderFunction(renderFun, args, funSrc, htmlErrors) {
getRow();
}
if (typeof resp != "undefined") {
- chunks.push(resp);
+ chunks.push(resp);
} else if (respTail) {
- chunks.push(respTail);
+ chunks.push(respTail);
}
- blowChunks("end");
+ blowChunks("end");
} catch(e) {
respondError(e, funSrc, htmlErrors);
}
diff --git a/share/server/util.js b/share/server/util.js
index 13b8a779..28c02fbc 100644
--- a/share/server/util.js
+++ b/share/server/util.js
@@ -94,7 +94,7 @@ function recursivelySeal(obj) {
// prints the object as JSON, and rescues and logs any toJSON() related errors
function respond(obj) {
try {
- print(toJSON(obj));
+ print(toJSON(obj));
} catch(e) {
log("Error converting object to JSON: " + e.toString());
}
diff --git a/share/server/views.js b/share/server/views.js
index 5e193cbc..9e421237 100644
--- a/share/server/views.js
+++ b/share/server/views.js
@@ -27,7 +27,7 @@ sum = function(values) {
}
var Views = (function() {
-
+
function runReduce(reduceFuns, keys, values, rereduce) {
for (var i in reduceFuns) {
reduceFuns[i] = compileFunction(reduceFuns[i]);
@@ -51,7 +51,7 @@ var Views = (function() {
if (query_config && query_config.reduce_limit &&
reduce_length > 200 && ((reduce_length * 2) > line.length)) {
var reduce_preview = "Current output: '"+(reduce_line.substring(0,100) + "'... (first 100 of "+reduce_length+' bytes)');
-
+
throw {
error:"reduce_overflow_error",
reason: "Reduce output must shrink more rapidly: "+reduce_preview+""
@@ -60,7 +60,7 @@ var Views = (function() {
print("[true," + reduce_line + "]");
}
};
-
+
return {
reduce : function(reduceFuns, kvs) {
var keys = new Array(kvs.length);
diff --git a/share/www/couch_tests.html b/share/www/couch_tests.html
index 5d1b4178..0d2e4c3d 100644
--- a/share/www/couch_tests.html
+++ b/share/www/couch_tests.html
@@ -26,7 +26,7 @@ specific language governing permissions and limitations under the License.
<script src="script/futon.js?0.9.0"></script>
<script src="script/couch_test_runner.js"></script>
<script>
- $(function() {
+ $(function() {
updateTestsListing();
$("#toolbar button.run").click(runAllTests);
$("#toolbar button.load").click(function() {
diff --git a/share/www/dialog/_compact_database.html b/share/www/dialog/_compact_database.html
index efa5d345..56b09978 100644
--- a/share/www/dialog/_compact_database.html
+++ b/share/www/dialog/_compact_database.html
@@ -17,7 +17,7 @@ specific language governing permissions and limitations under the License.
<fieldset>
<p class="help">
Compacting a database removes deleted documents and previous revisions.
- It is an <strong>irreversible operation</strong> and my take
+ It is an <strong>irreversible operation</strong> and my take
a while to complete for large databases.
</p>
</fieldset>
diff --git a/share/www/script/couch.js b/share/www/script/couch.js
index c4c1ae9f..1f6a7444 100644
--- a/share/www/script/couch.js
+++ b/share/www/script/couch.js
@@ -16,11 +16,11 @@
function CouchDB(name, httpHeaders) {
this.name = name;
this.uri = "/" + encodeURIComponent(name) + "/";
-
+
// The XMLHttpRequest object from the most recent request. Callers can
// use this to check result http status and headers.
this.last_req = null;
-
+
this.request = function(method, uri, requestOptions) {
requestOptions = requestOptions || {}
requestOptions.headers = combine(requestOptions.headers, httpHeaders)
@@ -48,7 +48,7 @@ function CouchDB(name, httpHeaders) {
if (doc._id == undefined)
doc._id = CouchDB.newUuids(1)[0];
- this.last_req = this.request("PUT", this.uri +
+ this.last_req = this.request("PUT", this.uri +
encodeURIComponent(doc._id) + encodeOptions(options),
{body: JSON.stringify(doc)});
CouchDB.maybeThrowError(this.last_req);
@@ -84,7 +84,7 @@ function CouchDB(name, httpHeaders) {
doc._rev = result.rev; //record rev in input document
return result;
}
-
+
this.bulkSave = function(docs, options) {
// first prepoulate the UUIDs for new documents
var newCount = 0
@@ -119,7 +119,7 @@ function CouchDB(name, httpHeaders) {
return results;
}
}
-
+
this.ensureFullCommit = function() {
this.last_req = this.request("POST", this.uri + "_ensure_full_commit");
CouchDB.maybeThrowError(this.last_req);
@@ -130,7 +130,7 @@ function CouchDB(name, httpHeaders) {
this.query = function(mapFun, reduceFun, options, keys) {
var body = {language: "javascript"};
if(keys) {
- body.keys = keys ;
+ body.keys = keys ;
}
if (typeof(mapFun) != "string")
mapFun = mapFun.toSource ? mapFun.toSource() : "(" + mapFun.toString() + ")";
@@ -154,15 +154,15 @@ function CouchDB(name, httpHeaders) {
this.view = function(viewname, options, keys) {
var viewParts = viewname.split('/');
- var viewPath = this.uri + "_design/" + viewParts[0] + "/_view/"
+ var viewPath = this.uri + "_design/" + viewParts[0] + "/_view/"
+ viewParts[1] + encodeOptions(options);
if(!keys) {
- this.last_req = this.request("GET", viewPath);
+ this.last_req = this.request("GET", viewPath);
} else {
this.last_req = this.request("POST", viewPath, {
headers: {"Content-Type": "application/json"},
body: JSON.stringify({keys:keys})
- });
+ });
}
if (this.last_req.status == 404)
return null;
@@ -183,7 +183,7 @@ function CouchDB(name, httpHeaders) {
CouchDB.maybeThrowError(this.last_req);
return JSON.parse(this.last_req.responseText);
}
-
+
this.viewCleanup = function() {
this.last_req = this.request("POST", this.uri + "_view_cleanup");
CouchDB.maybeThrowError(this.last_req);
@@ -192,17 +192,17 @@ function CouchDB(name, httpHeaders) {
this.allDocs = function(options,keys) {
if(!keys) {
- this.last_req = this.request("GET", this.uri + "_all_docs" + encodeOptions(options));
+ this.last_req = this.request("GET", this.uri + "_all_docs" + encodeOptions(options));
} else {
this.last_req = this.request("POST", this.uri + "_all_docs" + encodeOptions(options), {
headers: {"Content-Type": "application/json"},
body: JSON.stringify({keys:keys})
- });
+ });
}
CouchDB.maybeThrowError(this.last_req);
return JSON.parse(this.last_req.responseText);
}
-
+
this.designDocs = function() {
return this.allDocs({startkey:"_design", endkey:"_design0"});
};
@@ -210,12 +210,12 @@ function CouchDB(name, httpHeaders) {
this.allDocsBySeq = function(options,keys) {
var req = null;
if(!keys) {
- req = this.request("GET", this.uri + "_all_docs_by_seq" + encodeOptions(options));
+ req = this.request("GET", this.uri + "_all_docs_by_seq" + encodeOptions(options));
} else {
req = this.request("POST", this.uri + "_all_docs_by_seq" + encodeOptions(options), {
headers: {"Content-Type": "application/json"},
body: JSON.stringify({keys:keys})
- });
+ });
}
CouchDB.maybeThrowError(req);
return JSON.parse(req.responseText);
@@ -226,7 +226,7 @@ function CouchDB(name, httpHeaders) {
CouchDB.maybeThrowError(this.last_req);
return JSON.parse(this.last_req.responseText);
}
-
+
this.setDbProperty = function(propId, propValue) {
this.last_req = this.request("PUT", this.uri + propId,{
body:JSON.stringify(propValue)
@@ -234,13 +234,13 @@ function CouchDB(name, httpHeaders) {
CouchDB.maybeThrowError(this.last_req);
return JSON.parse(this.last_req.responseText);
}
-
+
this.getDbProperty = function(propId) {
this.last_req = this.request("GET", this.uri + propId);
CouchDB.maybeThrowError(this.last_req);
return JSON.parse(this.last_req.responseText);
}
-
+
this.setAdmins = function(adminsArray) {
this.last_req = this.request("PUT", this.uri + "_admins",{
body:JSON.stringify(adminsArray)
@@ -248,13 +248,13 @@ function CouchDB(name, httpHeaders) {
CouchDB.maybeThrowError(this.last_req);
return JSON.parse(this.last_req.responseText);
}
-
+
this.getAdmins = function() {
this.last_req = this.request("GET", this.uri + "_admins");
CouchDB.maybeThrowError(this.last_req);
return JSON.parse(this.last_req.responseText);
}
-
+
// Convert a options object to an url query string.
// ex: {key:'value',key2:'value2'} becomes '?key="value"&key2="value2"'
function encodeOptions(options) {
@@ -278,26 +278,26 @@ function CouchDB(name, httpHeaders) {
function toJSON(obj) {
return obj !== null ? JSON.stringify(obj) : null;
}
-
+
function combine(object1, object2) {
if (!object2)
return object1;
if (!object1)
return object2;
-
+
for (var name in object2)
object1[name] = object2[name];
-
+
return object1;
}
-
-
+
+
}
// this is the XMLHttpRequest object from last request made by the following
// CouchDB.* functions (except for calls to request itself).
// Use this from callers to check HTTP status or header values of requests.
-CouchDB.last_req = null;
+CouchDB.last_req = null;
CouchDB.allDbs = function() {
diff --git a/share/www/script/couch_test_runner.js b/share/www/script/couch_test_runner.js
index c396cd16..ef7d42aa 100644
--- a/share/www/script/couch_test_runner.js
+++ b/share/www/script/couch_test_runner.js
@@ -12,7 +12,7 @@
// *********************** Test Framework of Sorts ************************* //
-function loadScript(url) {
+function loadScript(url) {
if (typeof document != "undefined") document.write('<script src="'+url+'"></script>');
};
diff --git a/share/www/script/futon.browse.js b/share/www/script/futon.browse.js
index e518c9ab..37afe1d4 100644
--- a/share/www/script/futon.browse.js
+++ b/share/www/script/futon.browse.js
@@ -49,7 +49,7 @@
var dbsOnPage = dbs.slice(offset, offset + maxPerPage);
$.each(dbsOnPage, function(idx, dbName) {
- $("#databases tbody.content").append("<tr>" +
+ $("#databases tbody.content").append("<tr>" +
"<th><a href='database.html?" + encodeURIComponent(dbName) + "'>" +
dbName + "</a></th>" +
"<td class='size'></td><td class='count'></td>" +
@@ -504,7 +504,7 @@
resp.rows = resp.rows.reverse();
}
var has_reduce_prev = resp.total_rows === undefined && (descending_reverse ? resp.rows.length > per_page : options.startkey !== undefined);
- if (resp.rows !== null && (has_reduce_prev || (descending_reverse ?
+ if (resp.rows !== null && (has_reduce_prev || (descending_reverse ?
(resp.total_rows - resp.offset > per_page) :
(resp.offset > 0)))) {
$("#paging a.prev").attr("href", "#" + (resp.offset - per_page)).click(function() {
@@ -527,8 +527,8 @@
$("#paging a.prev").removeAttr("href");
}
var has_reduce_next = resp.total_rows === undefined && (descending_reverse ? options.startkey !== undefined : resp.rows.length > per_page);
- if (resp.rows !== null && (has_reduce_next || (descending_reverse ?
- (resp.offset - resp.total_rows < per_page) :
+ if (resp.rows !== null && (has_reduce_next || (descending_reverse ?
+ (resp.offset - resp.total_rows < per_page) :
(resp.total_rows - resp.offset > per_page)))) {
$("#paging a.next").attr("href", "#" + (resp.offset + per_page)).click(function() {
var opt = {
@@ -967,16 +967,16 @@
}
function _renderAttachmentItem(name, attachment) {
- var attachmentHref = db.uri + encodeDocId(docId)
+ var attachmentHref = db.uri + encodeDocId(docId)
+ "/" + encodeAttachment(name);
var li = $("<li></li>");
$("<a href='' title='Download file' target='_top'></a>").text(name)
.attr("href", attachmentHref)
.wrapInner("<tt></tt>").appendTo(li);
- $("<span>()</span>").text("" + $.futon.formatSize(attachment.length) +
+ $("<span>()</span>").text("" + $.futon.formatSize(attachment.length) +
", " + attachment.content_type).addClass("info").appendTo(li);
if (name == "tests.js") {
- li.find('span.info').append(', <a href="/_utils/couch_tests.html?'
+ li.find('span.info').append(', <a href="/_utils/couch_tests.html?'
+ attachmentHref + '">open in test runner</a>');
}
_initAttachmentItem(name, attachment, li);
diff --git a/share/www/script/jquery.couch.js b/share/www/script/jquery.couch.js
index 559fafd4..3c629e1d 100644
--- a/share/www/script/jquery.couch.js
+++ b/share/www/script/jquery.couch.js
@@ -92,7 +92,7 @@
$.ajax({
type: "POST", url: this.uri + "_compact",
contentType: "application/json",
- dataType: "json", data: "", processData: false,
+ dataType: "json", data: "", processData: false,
complete: function(req) {
var resp = $.httpData(req, "json");
if (req.status == 202) {
@@ -200,7 +200,7 @@
});
});
}
- });
+ });
} else {
alert("please provide an eachApp function for allApps()");
}
diff --git a/share/www/script/jquery.form.js b/share/www/script/jquery.form.js
index 91eb688d..e35ef0ef 100644
--- a/share/www/script/jquery.form.js
+++ b/share/www/script/jquery.form.js
@@ -13,7 +13,7 @@
;(function($) {
/*
- Usage Note:
+ Usage Note:
-----------
Do not use both ajaxSubmit and ajaxForm on the same form. These
functions are intended to be exclusive. Use ajaxSubmit if you want
@@ -36,13 +36,13 @@
target: '#output'
});
});
-
+
When using ajaxForm, the ajaxSubmit function will be invoked for you
- at the appropriate time.
+ at the appropriate time.
*/
/**
- * ajaxSubmit() provides a mechanism for immediately submitting
+ * ajaxSubmit() provides a mechanism for immediately submitting
* an HTML form using AJAX.
*/
$.fn.ajaxSubmit = function(options) {
@@ -73,8 +73,8 @@ $.fn.ajaxSubmit = function(options) {
if (options.beforeSerialize && options.beforeSerialize(this, options) === false) {
log('ajaxSubmit: submit aborted via beforeSerialize callback');
return this;
- }
-
+ }
+
var a = this.formToArray(options.semantic);
if (options.data) {
options.extraData = options.data;
@@ -82,7 +82,7 @@ $.fn.ajaxSubmit = function(options) {
if(options.data[n] instanceof Array) {
for (var k in options.data[n])
a.push( { name: n, value: options.data[n][k] } )
- }
+ }
else
a.push( { name: n, value: options.data[n] } );
}
@@ -92,14 +92,14 @@ $.fn.ajaxSubmit = function(options) {
if (options.beforeSubmit && options.beforeSubmit(a, this, options) === false) {
log('ajaxSubmit: submit aborted via beforeSubmit callback');
return this;
- }
+ }
// fire vetoable 'validate' event
this.trigger('form-submit-validate', [a, this, options, veto]);
if (veto.veto) {
log('ajaxSubmit: submit vetoed via form-submit-validate trigger');
return this;
- }
+ }
var q = $.param(a);
@@ -137,7 +137,7 @@ $.fn.ajaxSubmit = function(options) {
found = true;
// options.iframe allows user to force iframe mode
- if (options.iframe || found) {
+ if (options.iframe || found) {
// hack to fix Safari hang (thanks to Tim Molendijk for this)
// see: http://groups.google.com/group/jquery-dev/browse_thread/thread/36395b7ab510dd5d
if ($.browser.safari && options.closeKeepAlive)
@@ -156,12 +156,12 @@ $.fn.ajaxSubmit = function(options) {
// private function for handling file uploads (hat tip to YAHOO!)
function fileUpload() {
var form = $form[0];
-
+
if ($(':input[name=submit]', form).length) {
alert('Error: Form elements must not be named "submit".');
return;
}
-
+
var opts = $.extend({}, $.ajaxSettings, options);
var s = jQuery.extend(true, {}, $.extend(true, {}, $.ajaxSettings), opts);
@@ -169,7 +169,7 @@ $.fn.ajaxSubmit = function(options) {
var $io = $('<iframe id="' + id + '" name="' + id + '" />');
var io = $io[0];
- if ($.browser.msie || $.browser.opera)
+ if ($.browser.msie || $.browser.opera)
io.src = 'javascript:false;document.write("");';
$io.css({ position: 'absolute', top: '-1000px', left: '-1000px' });
@@ -182,8 +182,8 @@ $.fn.ajaxSubmit = function(options) {
getAllResponseHeaders: function() {},
getResponseHeader: function() {},
setRequestHeader: function() {},
- abort: function() {
- this.aborted = 1;
+ abort: function() {
+ this.aborted = 1;
$io.attr('src','about:blank'); // abort op in progress
}
};
@@ -199,7 +199,7 @@ $.fn.ajaxSubmit = function(options) {
}
if (xhr.aborted)
return;
-
+
var cbInvoked = 0;
var timedOut = 0;
@@ -226,7 +226,7 @@ $.fn.ajaxSubmit = function(options) {
method: 'POST',
action: opts.url
});
-
+
// ie borks in some cases when setting encoding
if (! options.skipEncodingOverride) {
$form.attr({
@@ -247,7 +247,7 @@ $.fn.ajaxSubmit = function(options) {
extraInputs.push(
$('<input type="hidden" name="'+n+'" value="'+options.extraData[n]+'" />')
.appendTo(form)[0]);
-
+
// add iframe to doc and submit the form
$io.appendTo('body');
io.attachEvent ? io.attachEvent('onload', cb) : io.addEventListener('load', cb, false);
@@ -263,7 +263,7 @@ $.fn.ajaxSubmit = function(options) {
function cb() {
if (cbInvoked++) return;
-
+
io.detachEvent ? io.detachEvent('onload', cb) : io.removeEventListener('load', cb, false);
var operaHack = 0;
@@ -274,7 +274,7 @@ $.fn.ajaxSubmit = function(options) {
var data, doc;
doc = io.contentWindow ? io.contentWindow.document : io.contentDocument ? io.contentDocument : io.document;
-
+
if (doc.body == null && !operaHack && $.browser.opera) {
// In Opera 9.2.x the iframe DOM is not always traversable when
// the onload callback fires so we give Opera 100ms to right itself
@@ -283,7 +283,7 @@ $.fn.ajaxSubmit = function(options) {
setTimeout(cb, 100);
return;
}
-
+
xhr.responseText = doc.body ? doc.body.innerHTML : null;
xhr.responseXML = doc.XMLDocument ? doc.XMLDocument : doc;
xhr.getResponseHeader = function(header){
@@ -348,7 +348,7 @@ $.fn.ajaxSubmit = function(options) {
* The options argument for ajaxForm works exactly as it does for ajaxSubmit. ajaxForm merely
* passes the options argument along after properly binding events for submit elements and
* the form itself.
- */
+ */
$.fn.ajaxForm = function(options) {
return this.ajaxFormUnbind().bind('submit.form-plugin',function() {
$(this).ajaxSubmit(options);
@@ -594,10 +594,10 @@ $.fn.resetForm = function() {
/**
* Enables or disables any matching elements.
*/
-$.fn.enable = function(b) {
+$.fn.enable = function(b) {
if (b == undefined) b = true;
- return this.each(function() {
- this.disabled = !b
+ return this.each(function() {
+ this.disabled = !b
});
};
@@ -607,7 +607,7 @@ $.fn.enable = function(b) {
*/
$.fn.selected = function(select) {
if (select == undefined) select = true;
- return this.each(function() {
+ return this.each(function() {
var t = this.type;
if (t == 'checkbox' || t == 'radio')
this.checked = select;
diff --git a/share/www/script/jquery.js b/share/www/script/jquery.js
index 3a4badd0..9ee8702c 100644
--- a/share/www/script/jquery.js
+++ b/share/www/script/jquery.js
@@ -11,7 +11,7 @@
*/
(function(){
-var
+var
// Will speed up references to window, and allows munging its name.
window = this,
// Will speed up references to undefined, and allows munging its name.
@@ -399,13 +399,13 @@ jQuery.fn = jQuery.prototype = {
},
val: function( value ) {
- if ( value === undefined ) {
+ if ( value === undefined ) {
var elem = this[0];
if ( elem ) {
if( jQuery.nodeName( elem, 'option' ) )
return (elem.attributes.value || {}).specified ? elem.value : elem.text;
-
+
// We need to handle select boxes special
if ( jQuery.nodeName( elem, "select" ) ) {
var index = elem.selectedIndex,
@@ -434,7 +434,7 @@ jQuery.fn = jQuery.prototype = {
}
}
- return values;
+ return values;
}
// Everything else, we just grab the value
@@ -513,13 +513,13 @@ jQuery.fn = jQuery.prototype = {
if ( first )
for ( var i = 0, l = this.length; i < l; i++ )
callback.call( root(this[i], first), i > 0 ? extra.cloneNode(true) : fragment );
-
+
if ( scripts )
jQuery.each( scripts, evalScript );
}
return this;
-
+
function root( elem, cur ) {
return table && jQuery.nodeName(elem, "table") && jQuery.nodeName(cur, "tr") ?
(elem.getElementsByTagName("tbody")[0] ||
@@ -586,7 +586,7 @@ jQuery.extend = jQuery.fn.extend = function() {
// Recurse if we're merging object values
if ( deep && copy && typeof copy === "object" && !copy.nodeType )
- target[ name ] = jQuery.extend( deep,
+ target[ name ] = jQuery.extend( deep,
// Never move original objects, clone them
src || ( copy.length != null ? [ ] : { } )
, copy );
@@ -923,7 +923,7 @@ jQuery.extend({
// IE completely kills leading whitespace when innerHTML is used
if ( !jQuery.support.leadingWhitespace && /^\s/.test( elem ) )
div.insertBefore( context.createTextNode( elem.match(/^\s*/)[0] ), div.firstChild );
-
+
elem = jQuery.makeArray( div.childNodes );
}
@@ -944,7 +944,7 @@ jQuery.extend({
fragment.appendChild( ret[i] );
}
}
-
+
return scripts;
}
@@ -1322,16 +1322,16 @@ jQuery.extend({
},
queue: function( elem, type, data ) {
if ( elem ){
-
+
type = (type || "fx") + "queue";
-
+
var q = jQuery.data( elem, type );
-
+
if ( !q || jQuery.isArray(data) )
q = jQuery.data( elem, type, jQuery.makeArray(data) );
else if( data )
q.push( data );
-
+
}
return q;
},
@@ -1339,10 +1339,10 @@ jQuery.extend({
dequeue: function( elem, type ){
var queue = jQuery.queue( elem, type ),
fn = queue.shift();
-
+
if( !type || type === "fx" )
fn = queue[0];
-
+
if( fn !== undefined )
fn.call(elem);
}
@@ -1384,7 +1384,7 @@ jQuery.fn.extend({
return this.each(function(){
var queue = jQuery.queue( this, type, data );
-
+
if( type == "fx" && queue.length == 1 )
queue[0].call(this);
});
@@ -1412,19 +1412,19 @@ var Sizzle = function(selector, context, results, seed) {
if ( context.nodeType !== 1 && context.nodeType !== 9 )
return [];
-
+
if ( !selector || typeof selector !== "string" ) {
return results;
}
var parts = [], m, set, checkSet, check, mode, extra, prune = true;
-
+
// Reset the position of the chunker regexp (start from head)
chunker.lastIndex = 0;
-
+
while ( (m = chunker.exec(selector)) !== null ) {
parts.push( m[1] );
-
+
if ( m[2] ) {
extra = RegExp.rightContext;
break;
@@ -1525,7 +1525,7 @@ Sizzle.find = function(expr, context, isXML){
for ( var i = 0, l = Expr.order.length; i < l; i++ ) {
var type = Expr.order[i], match;
-
+
if ( (match = Expr.match[ type ].exec( expr )) ) {
var left = RegExp.leftContext;
@@ -1770,7 +1770,7 @@ var Expr = Sizzle.selectors = {
},
ATTR: function(match){
var name = match[1].replace(/\\/g, "");
-
+
if ( Expr.attrMap[name] ) {
match[1] = Expr.attrMap[name];
}
@@ -1796,7 +1796,7 @@ var Expr = Sizzle.selectors = {
} else if ( Expr.match.POS.test( match[0] ) ) {
return true;
}
-
+
return match;
},
POS: function(match){
@@ -1894,7 +1894,7 @@ var Expr = Sizzle.selectors = {
var type = match[1], parent = elem.parentNode;
var doneName = match[0];
-
+
if ( parent && (!parent[ doneName ] || !elem.nodeIndex) ) {
var count = 1;
@@ -2004,7 +2004,7 @@ var makeArray = function(array, results) {
results.push.apply( results, array );
return results;
}
-
+
return array;
};
@@ -2115,7 +2115,7 @@ if ( document.querySelectorAll ) (function(){
if ( div.querySelectorAll && div.querySelectorAll(".TEST").length === 0 ) {
return;
}
-
+
Sizzle = function(query, context, extra, seed){
context = context || document;
@@ -2126,7 +2126,7 @@ if ( document.querySelectorAll ) (function(){
return makeArray( context.querySelectorAll(query), extra );
} catch(e){}
}
-
+
return oldSizzle(query, context, extra, seed);
};
@@ -2368,7 +2368,7 @@ jQuery.event = {
// Get the current list of functions bound to this event
var handlers = events[type];
-
+
if ( jQuery.event.specialAll[type] )
jQuery.event.specialAll[type].setup.call(elem, data, namespaces);
@@ -2441,7 +2441,7 @@ jQuery.event = {
// Handle the removal of namespaced events
if ( namespace.test(events[type][handle].type) )
delete events[type][handle];
-
+
if ( jQuery.event.specialAll[type] )
jQuery.event.specialAll[type].teardown.call(elem, namespaces);
@@ -2508,11 +2508,11 @@ jQuery.event = {
// don't do events on text and comment nodes
if ( !elem || elem.nodeType == 3 || elem.nodeType == 8 )
return undefined;
-
+
// Clean up in case it is reused
event.result = undefined;
event.target = elem;
-
+
// Clone the incoming data, if any
data = jQuery.makeArray(data);
data.unshift( event );
@@ -2559,7 +2559,7 @@ jQuery.event = {
// Cache this now, all = true means, any handler
all = !namespaces.length && !event.exclusive;
-
+
var namespace = RegExp("(^|\\.)" + namespaces.slice().sort().join(".*\\.") + "(\\.|$)");
handlers = ( jQuery.data(this, "events") || {} )[event.type];
@@ -2657,7 +2657,7 @@ jQuery.event = {
teardown: function() {}
}
},
-
+
specialAll: {
live: {
setup: function( selector, namespaces ){
@@ -2666,12 +2666,12 @@ jQuery.event = {
teardown: function( namespaces ){
if ( namespaces.length ) {
var remove = 0, name = RegExp("(^|\\.)" + namespaces[0] + "(\\.|$)");
-
+
jQuery.each( (jQuery.data(this, "events").live || {}), function(){
if ( name.test(this.type) )
remove++;
});
-
+
if ( remove < 1 )
jQuery.event.remove( this, namespaces[0], liveHandler );
}
@@ -2684,7 +2684,7 @@ jQuery.Event = function( src ){
// Allow instantiation without the 'new' keyword
if( !this.preventDefault )
return new jQuery.Event(src);
-
+
// Event object
if( src && src.type ){
this.originalEvent = src;
@@ -2696,7 +2696,7 @@ jQuery.Event = function( src ){
// timeStamp is buggy for some events on Firefox(#3843)
// So we won't rely on the native value
this.timeStamp = now();
-
+
// Mark it as fixed
this[expando] = true;
};
@@ -2752,7 +2752,7 @@ var withinElement = function(event) {
while ( parent && parent != this )
try { parent = parent.parentNode; }
catch(e) { parent = this; }
-
+
if( parent != this ){
// set the correct event type
event.type = event.data;
@@ -2760,9 +2760,9 @@ var withinElement = function(event) {
jQuery.event.handle.apply( this, arguments );
}
};
-
-jQuery.each({
- mouseover: 'mouseenter',
+
+jQuery.each({
+ mouseover: 'mouseenter',
mouseout: 'mouseleave'
}, function( orig, fix ){
jQuery.event.special[ fix ] = {
@@ -2772,7 +2772,7 @@ jQuery.each({
teardown: function(){
jQuery.event.remove( this, orig, withinElement );
}
- };
+ };
});
jQuery.fn.extend({
@@ -2811,7 +2811,7 @@ jQuery.fn.extend({
event.stopPropagation();
jQuery.event.trigger( event, data, this[0] );
return event.result;
- }
+ }
},
toggle: function( fn ) {
@@ -2854,7 +2854,7 @@ jQuery.fn.extend({
return this;
},
-
+
live: function( type, fn ){
var proxy = jQuery.event.proxy( fn );
proxy.guid += this.selector + type;
@@ -2863,7 +2863,7 @@ jQuery.fn.extend({
return this;
},
-
+
die: function( type, fn ){
jQuery(document).unbind( liveConvert(type, this.selector), fn ? { guid: fn.guid + this.selector + type } : null );
return this;
@@ -2983,12 +2983,12 @@ jQuery.each( ("blur,focus,load,resize,scroll,unload,click,dblclick," +
// Prevent memory leaks in IE
// And prevent errors on refresh with events like mouseover in other browsers
// Window isn't included so as not to unbind existing unload events
-jQuery( window ).bind( 'unload', function(){
+jQuery( window ).bind( 'unload', function(){
for ( var id in jQuery.cache )
// Skip the window
if ( id != 1 && jQuery.cache[ id ].handle )
jQuery.event.remove( jQuery.cache[ id ].handle.elem );
-});
+});
(function(){
jQuery.support = {};
@@ -3012,32 +3012,32 @@ jQuery( window ).bind( 'unload', function(){
jQuery.support = {
// IE strips leading whitespace when .innerHTML is used
leadingWhitespace: div.firstChild.nodeType == 3,
-
+
// Make sure that tbody elements aren't automatically inserted
// IE will insert them into empty tables
tbody: !div.getElementsByTagName("tbody").length,
-
+
// Make sure that you can get all elements in an <object> element
// IE 7 always returns no results
objectAll: !!div.getElementsByTagName("object")[0]
.getElementsByTagName("*").length,
-
+
// Make sure that link elements get serialized correctly by innerHTML
// This requires a wrapper element in IE
htmlSerialize: !!div.getElementsByTagName("link").length,
-
+
// Get the style information from getAttribute
// (IE uses .cssText insted)
style: /red/.test( a.getAttribute("style") ),
-
+
// Make sure that URLs aren't manipulated
// (IE normalizes it by default)
hrefNormalized: a.getAttribute("href") === "/a",
-
+
// Make sure that element opacity exists
// (IE uses filter instead)
opacity: a.style.opacity === "0.5",
-
+
// Verify style float existence
// (IE uses styleFloat instead of cssFloat)
cssFloat: !!a.style.cssFloat,
@@ -3047,14 +3047,14 @@ jQuery( window ).bind( 'unload', function(){
noCloneEvent: true,
boxModel: null
};
-
+
script.type = "text/javascript";
try {
script.appendChild( document.createTextNode( "window." + id + "=1;" ) );
} catch(e){}
root.insertBefore( script, root.firstChild );
-
+
// Make sure that the execution of code works by injecting a script
// tag with appendChild/createTextNode
// (IE doesn't support this, fails, and uses .text instead)
@@ -3199,7 +3199,7 @@ jQuery.each( "ajaxStart,ajaxStop,ajaxComplete,ajaxError,ajaxSuccess,ajaxSend".sp
var jsc = now();
jQuery.extend({
-
+
get: function( url, data, callback, type ) {
// shift arguments if data argument was ommited
if ( jQuery.isFunction( data ) ) {
@@ -3585,7 +3585,7 @@ jQuery.extend({
if ( xml && data.documentElement.tagName == "parsererror" )
throw "parsererror";
-
+
// Allow a pre-filtering function to sanitize the response
// s != null is checked to keep backwards compatibility
if( s && s.dataFilter )
@@ -3602,7 +3602,7 @@ jQuery.extend({
if ( type == "json" )
data = window["eval"]("(" + data + ")");
}
-
+
return data;
},
@@ -3666,30 +3666,30 @@ jQuery.fn.extend({
} else {
for ( var i = 0, l = this.length; i < l; i++ ){
var old = jQuery.data(this[i], "olddisplay");
-
+
this[i].style.display = old || "";
-
+
if ( jQuery.css(this[i], "display") === "none" ) {
var tagName = this[i].tagName, display;
-
+
if ( elemdisplay[ tagName ] ) {
display = elemdisplay[ tagName ];
} else {
var elem = jQuery("<" + tagName + " />").appendTo("body");
-
+
display = elem.css("display");
if ( display === "none" )
display = "block";
-
+
elem.remove();
-
+
elemdisplay[ tagName ] = display;
}
-
+
this[i].style.display = jQuery.data(this[i], "olddisplay", display);
}
}
-
+
return this;
}
},
@@ -3732,11 +3732,11 @@ jQuery.fn.extend({
var optall = jQuery.speed(speed, easing, callback);
return this[ optall.queue === false ? "each" : "queue" ](function(){
-
+
var opt = jQuery.extend({}, optall), p,
hidden = this.nodeType == 1 && jQuery(this).is(":hidden"),
self = this;
-
+
for ( p in prop ) {
if ( prop[p] == "hide" && hidden || prop[p] == "show" && !hidden )
return opt.complete.call(this);
@@ -3990,7 +3990,7 @@ jQuery.fx.prototype = {
if ( this.options.hide || this.options.show )
for ( var p in this.options.curAnim )
jQuery.attr(this.elem.style, p, this.options.orig[p]);
-
+
// Execute the complete function
this.options.complete.call( this.elem );
}
@@ -4044,7 +4044,7 @@ if ( document.documentElement["getBoundingClientRect"] )
left = box.left + (self.pageXOffset || jQuery.boxModel && docElem.scrollLeft || body.scrollLeft) - clientLeft;
return { top: top, left: left };
};
-else
+else
jQuery.fn.offset = function() {
if ( !this[0] ) return { top: 0, left: 0 };
if ( this[0] === this[0].ownerDocument.body ) return jQuery.offset.bodyOffset( this[0] );
@@ -4134,7 +4134,7 @@ jQuery.fn.extend({
parentOffset = /^body|html$/i.test(offsetParent[0].tagName) ? { top: 0, left: 0 } : offsetParent.offset();
// Subtract element margins
- // note: when an element has margin: auto the offsetLeft and marginLeft
+ // note: when an element has margin: auto the offsetLeft and marginLeft
// are the same in Safari causing offset.left to incorrectly be 0
offset.top -= num( this, 'marginTop' );
offset.left -= num( this, 'marginLeft' );
@@ -4165,7 +4165,7 @@ jQuery.fn.extend({
// Create scrollLeft and scrollTop methods
jQuery.each( ['Left', 'Top'], function(i, name) {
var method = 'scroll' + name;
-
+
jQuery.fn[ method ] = function(val) {
if (!this[0]) return null;
@@ -4210,7 +4210,7 @@ jQuery.each([ "Height", "Width" ], function(i, name){
(margin ?
num(this, "margin" + tl) + num(this, "margin" + br) : 0);
};
-
+
var type = name.toLowerCase();
jQuery.fn[ type ] = function( size ) {
diff --git a/share/www/script/test/all_docs.js b/share/www/script/test/all_docs.js
index 3dd3aa53..fcec3b43 100644
--- a/share/www/script/test/all_docs.js
+++ b/share/www/script/test/all_docs.js
@@ -15,7 +15,7 @@ couchTests.all_docs = function(debug) {
db.deleteDb();
db.createDb();
if (debug) debugger;
-
+
// Create some more documents.
// Notice the use of the ok member on the return result.
T(db.save({_id:"0",a:1,b:1}).ok);
@@ -32,7 +32,7 @@ couchTests.all_docs = function(debug) {
for(var i=0; i < rows.length; i++) {
T(rows[i].id >= "0" && rows[i].id <= "4");
}
-
+
// Check _all_docs with descending=true
var desc = db.allDocs({descending:true});
T(desc.total_rows == desc.rows.length);
@@ -40,7 +40,7 @@ couchTests.all_docs = function(debug) {
// Check _all_docs offset
var all = db.allDocs({startkey:"2"});
T(all.offset == 2);
-
+
// check that the docs show up in the seq view in the order they were created
var all_seq = db.allDocsBySeq();
var ids = ["0","3","1","2"];
@@ -48,7 +48,7 @@ couchTests.all_docs = function(debug) {
var row = all_seq.rows[i];
T(row.id == ids[i]);
};
-
+
// it should work in reverse as well
all_seq = db.allDocsBySeq({descending:true});
ids = ["2","1","3","0"];
@@ -56,13 +56,13 @@ couchTests.all_docs = function(debug) {
var row = all_seq.rows[i];
T(row.id == ids[i]);
};
-
+
// check that deletions also show up right
var doc1 = db.open("1");
var deleted = db.deleteDoc(doc1);
T(deleted.ok);
all_seq = db.allDocsBySeq();
-
+
// the deletion should make doc id 1 have the last seq num
T(all_seq.rows.length == 4);
T(all_seq.rows[3].id == "1");
@@ -70,13 +70,13 @@ couchTests.all_docs = function(debug) {
// is this a bug?
// T(all_seq.rows.length == all_seq.total_rows);
-
+
// do an update
var doc2 = db.open("3");
doc2.updated = "totally";
db.save(doc2);
all_seq = db.allDocsBySeq();
-
+
// the update should make doc id 3 have the last seq num
T(all_seq.rows.length == 4);
T(all_seq.rows[3].id == "3");
@@ -90,7 +90,7 @@ couchTests.all_docs = function(debug) {
// and on the deleted one, no doc
T(all_seq.rows[2].value.deleted);
T(!all_seq.rows[2].doc);
-
+
// test the all docs collates sanely
db.save({_id: "Z", foo: "Z"});
db.save({_id: "a", foo: "a"});
diff --git a/share/www/script/test/attachment_names.js b/share/www/script/test/attachment_names.js
index 3c694dd0..f9c846eb 100644
--- a/share/www/script/test/attachment_names.js
+++ b/share/www/script/test/attachment_names.js
@@ -10,21 +10,21 @@
// License for the specific language governing permissions and limitations under
// the License.
-couchTests.attachment_names = function(debug) {
- var db = new CouchDB("test_suite_db");
- db.deleteDb();
- db.createDb();
- if (debug) debugger;
-
- var binAttDoc = {
- _id: "bin_doc",
- _attachments:{
- "foo\x80txt": {
- content_type:"text/plain",
- data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- }
- }
+couchTests.attachment_names = function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var binAttDoc = {
+ _id: "bin_doc",
+ _attachments:{
+ "foo\x80txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ }
// inline attachments
try {
@@ -38,7 +38,7 @@ couchTests.attachment_names = function(debug) {
// standalone docs
var bin_data = "JHAPDO*AU£PN ){(3u[d 93DQ9¡€])} ææøo'∂ƒæ≤çæππ•¥∫¶®#†π¶®¥π€ª®˙π8np";
-
+
var xhr = (CouchDB.request("PUT", "/test_suite_db/bin_doc3/attachment\x80txt", {
headers:{"Content-Type":"text/plain;charset=utf-8"},
body:bin_data
@@ -64,15 +64,15 @@ couchTests.attachment_names = function(debug) {
// leading underscores
- var binAttDoc = {
- _id: "bin_doc2",
- _attachments:{
- "_foo.txt": {
- content_type:"text/plain",
- data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
- }
- }
- }
+ var binAttDoc = {
+ _id: "bin_doc2",
+ _attachments:{
+ "_foo.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ }
try {
db.save(binAttDoc);
@@ -80,8 +80,8 @@ couchTests.attachment_names = function(debug) {
} catch (e) {
TEquals("bad_request", e.error, "attachment_name: leading underscore");
TEquals("Attachment name can't start with '_'", e.reason, "attachment_name: leading underscore");
- }
-
+ }
+
// todo: form uploads, waiting for cmlenz' test case for form uploads
};
diff --git a/share/www/script/test/attachment_paths.js b/share/www/script/test/attachment_paths.js
index ef9fa869..245d2949 100644
--- a/share/www/script/test/attachment_paths.js
+++ b/share/www/script/test/attachment_paths.js
@@ -44,14 +44,14 @@ couchTests.attachment_paths = function(debug) {
// lets try it with an escaped attachment id...
// weird that it's at two urls
var xhr = CouchDB.request("GET", "/"+dbName+"/bin_doc/foo%2Fbar.txt");
- T(xhr.status == 200);
+ T(xhr.status == 200);
// xhr.responseText == "This is a base64 encoded text"
var xhr = CouchDB.request("GET", "/"+dbName+"/bin_doc/foo/baz.txt");
T(xhr.status == 404);
var xhr = CouchDB.request("GET", "/"+dbName+"/bin_doc/foo%252Fbaz.txt");
- T(xhr.status == 200);
+ T(xhr.status == 200);
T(xhr.responseText == "We like percent two F.");
// require a _rev to PUT
@@ -59,7 +59,7 @@ couchTests.attachment_paths = function(debug) {
headers:{"Content-Type":"text/plain;charset=utf-8"},
body:"Just some text"
});
- T(xhr.status == 409);
+ T(xhr.status == 409);
var xhr = CouchDB.request("PUT", "/"+dbName+"/bin_doc/foo/bar2.txt?rev=" + binAttDoc._rev, {
body:"This is no base64 encoded text",
@@ -77,7 +77,7 @@ couchTests.attachment_paths = function(debug) {
T(binAttDoc._attachments["foo/bar2.txt"].length == 30);
//// now repeat the while thing with a design doc
-
+
// first just save a regular doc with an attachment that has a slash in the url.
// (also gonna run an encoding check case)
var binAttDoc = {
@@ -120,7 +120,7 @@ couchTests.attachment_paths = function(debug) {
T(xhr.status == 404);
var xhr = CouchDB.request("GET", "/"+dbName+"/_design%2Fbin_doc/foo%252Fbaz.txt");
- T(xhr.status == 200);
+ T(xhr.status == 200);
T(xhr.responseText == "We like percent two F.");
// require a _rev to PUT
@@ -128,7 +128,7 @@ couchTests.attachment_paths = function(debug) {
headers:{"Content-Type":"text/plain;charset=utf-8"},
body:"Just some text"
});
- T(xhr.status == 409);
+ T(xhr.status == 409);
var xhr = CouchDB.request("PUT", "/"+dbName+"/_design%2Fbin_doc/foo/bar2.txt?rev=" + binAttDoc._rev, {
body:"This is no base64 encoded text",
diff --git a/share/www/script/test/attachment_views.js b/share/www/script/test/attachment_views.js
index b97a4130..903fbc4c 100644
--- a/share/www/script/test/attachment_views.js
+++ b/share/www/script/test/attachment_views.js
@@ -73,7 +73,7 @@ couchTests.attachment_views= function(debug) {
var reduceFunction = function(key, values) {
return sum(values);
}
-
+
var result = db.query(mapFunction, reduceFunction);
T(result.rows.length == 1);
diff --git a/share/www/script/test/attachments.js b/share/www/script/test/attachments.js
index e68cd444..6af6ae8f 100644
--- a/share/www/script/test/attachments.js
+++ b/share/www/script/test/attachments.js
@@ -33,7 +33,7 @@ couchTests.attachments= function(debug) {
T(xhr.responseText == "This is a base64 encoded text");
T(xhr.getResponseHeader("Content-Type") == "text/plain");
T(xhr.getResponseHeader("Etag") == '"' + save_response.rev + '"');
-
+
// empty attachment
var binAttDoc2 = {
_id: "bin_doc2",
@@ -70,7 +70,7 @@ couchTests.attachments= function(debug) {
var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc2/foo2.txt");
T(xhr.responseText == "This is no base64 encoded text");
T(xhr.getResponseHeader("Content-Type") == "text/plain;charset=utf-8");
-
+
// test without rev, should fail
var xhr = CouchDB.request("DELETE", "/test_suite_db/bin_doc2/foo2.txt");
T(xhr.status == 409);
@@ -78,8 +78,8 @@ couchTests.attachments= function(debug) {
// test with rev, should not fail
var xhr = CouchDB.request("DELETE", "/test_suite_db/bin_doc2/foo2.txt?rev=" + rev);
T(xhr.status == 200);
-
-
+
+
// test binary data
var bin_data = "JHAPDO*AU£PN ){(3u[d 93DQ9¡€])} ææøo'∂ƒæ≤çæππ•¥∫¶®#†π¶®¥π€ª®˙π8np";
var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc3/attachment.txt", {
@@ -88,11 +88,11 @@ couchTests.attachments= function(debug) {
});
T(xhr.status == 201);
var rev = JSON.parse(xhr.responseText).rev;
-
+
var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc3/attachment.txt");
T(xhr.responseText == bin_data);
T(xhr.getResponseHeader("Content-Type") == "text/plain;charset=utf-8");
-
+
var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc3/attachment.txt", {
headers:{"Content-Type":"text/plain;charset=utf-8"},
body:bin_data
@@ -116,7 +116,7 @@ couchTests.attachments= function(debug) {
var xhr = CouchDB.request("DELETE", "/test_suite_db/bin_doc3/attachment.txt?rev=" + rev);
T(xhr.status == 200);
-
+
var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc3/attachment.txt");
T(xhr.status == 404);
@@ -137,7 +137,7 @@ couchTests.attachments= function(debug) {
var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc4/attachment.txt");
T(xhr.status == 200);
T(xhr.responseText.length == 0);
-
+
// overwrite previsously empty attachment
var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc4/attachment.txt?rev=" + rev, {
headers:{"Content-Type":"text/plain;charset=utf-8"},
@@ -148,8 +148,8 @@ couchTests.attachments= function(debug) {
var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc4/attachment.txt");
T(xhr.status == 200);
T(xhr.responseText == "This is a string");
-
-
+
+
// Attachment sparseness COUCHDB-220
var docs = []
@@ -167,7 +167,7 @@ couchTests.attachments= function(debug) {
}
db.bulkSave(docs);
-
+
var before = db.info().disk_size;
// Compact it.
@@ -175,14 +175,14 @@ couchTests.attachments= function(debug) {
T(db.last_req.status == 202);
// compaction isn't instantaneous, loop until done
while (db.info().compact_running) {};
-
+
var after = db.info().disk_size;
-
+
// Compaction should reduce the database slightly, but not
// orders of magnitude (unless attachments introduce sparseness)
T(after > before * 0.1, "before: " + before + " after: " + after);
-
-
+
+
// test large attachments - COUCHDB-366
var lorem = CouchDB.request("GET", "/_utils/script/test/lorem.txt").responseText;
diff --git a/share/www/script/test/basics.js b/share/www/script/test/basics.js
index 47e94d2c..62ac2df9 100644
--- a/share/www/script/test/basics.js
+++ b/share/www/script/test/basics.js
@@ -13,7 +13,7 @@
// Do some basic tests.
couchTests.basics = function(debug) {
var result = JSON.parse(CouchDB.request("GET", "/").responseText);
- T(result.couchdb == "Welcome");
+ T(result.couchdb == "Welcome");
var db = new CouchDB("test_suite_db");
db.deleteDb();
@@ -31,11 +31,11 @@ couchTests.basics = function(debug) {
// creating a new DB should return Location header
xhr = CouchDB.request("DELETE", "/test_suite_db");
xhr = CouchDB.request("PUT", "/test_suite_db");
- TEquals("/test_suite_db",
+ TEquals("/test_suite_db",
xhr.getResponseHeader("Location").substr(-14),
"should return Location header to newly created document");
- TEquals("http://",
+ TEquals("http://",
xhr.getResponseHeader("Location").substr(0, 7),
"should return absolute Location header to newly created document");
@@ -66,7 +66,7 @@ couchTests.basics = function(debug) {
// make sure you can do a seq=true option
var doc = db.open(id, {local_seq:true});
T(doc._local_seq == 1);
-
+
// Create some more documents.
// Notice the use of the ok member on the return result.
@@ -161,11 +161,11 @@ couchTests.basics = function(debug) {
var xhr = CouchDB.request("PUT", "/test_suite_db/newdoc", {
body: JSON.stringify({"a":1})
});
- TEquals("/test_suite_db/newdoc",
+ TEquals("/test_suite_db/newdoc",
xhr.getResponseHeader("Location").substr(-21),
"should return Location header to newly created document");
- TEquals("http://",
+ TEquals("http://",
xhr.getResponseHeader("Location").substr(0, 7),
"should return absolute Location header to newly created document");
@@ -182,12 +182,12 @@ couchTests.basics = function(debug) {
]
var test_doc = function(info) {
var data = JSON.stringify(info[1]);
-
+
xhr = CouchDB.request("PUT", "/test_suite_db/" + info[0], {body: data});
T(xhr.status == 500);
result = JSON.parse(xhr.responseText);
T(result.error == "doc_validation");
-
+
xhr = CouchDB.request("POST", "/test_suite_db/", {body: data});
T(xhr.status == 500);
result = JSON.parse(xhr.responseText);
diff --git a/share/www/script/test/batch_save.js b/share/www/script/test/batch_save.js
index 77aa6635..d2721901 100644
--- a/share/www/script/test/batch_save.js
+++ b/share/www/script/test/batch_save.js
@@ -19,32 +19,32 @@ couchTests.batch_save = function(debug) {
// commit should work fine with no batches
T(db.ensureFullCommit().ok);
-
+
// PUT a doc with ?batch=ok
T(db.save({_id:"0",a:1,b:1}, {batch : "ok"}).ok);
// test that response is 200 Accepted
T(db.last_req.status == 202);
T(db.last_req.statusText == "Accepted");
-
+
T(db.allDocs().total_rows == 0);
restartServer();
-
+
// lost the updates
T(db.allDocs().total_rows == 0);
-
+
T(db.save({_id:"0",a:1,b:1}, {batch : "ok"}).ok);
T(db.save({_id:"1",a:1,b:1}, {batch : "ok"}).ok);
T(db.save({_id:"2",a:1,b:1}, {batch : "ok"}).ok);
T(db.ensureFullCommit().ok);
T(db.allDocs().total_rows == 3);
-
+
// repeat the tests for POST
var resp = db.request("POST", db.uri + "?batch=ok", {body: JSON.stringify({a:1})});
T(JSON.parse(resp.responseText).ok);
-
+
// test that response is 200 Accepted
T(resp.status == 202);
T(resp.statusText == "Accepted");
@@ -59,5 +59,5 @@ couchTests.batch_save = function(debug) {
T(db.ensureFullCommit().ok);
T(db.allDocs().total_rows == 5);
-
+
};
diff --git a/share/www/script/test/bulk_docs.js b/share/www/script/test/bulk_docs.js
index 8e73ded4..b4c0ef9d 100644
--- a/share/www/script/test/bulk_docs.js
+++ b/share/www/script/test/bulk_docs.js
@@ -34,7 +34,7 @@ couchTests.bulk_docs = function(debug) {
T(results.length == 5);
for (i = 0; i < 5; i++) {
T(results[i].id == i.toString());
-
+
// set the delete flag to delete the docs in the next step
docs[i]._deleted = true;
}
@@ -72,7 +72,7 @@ couchTests.bulk_docs = function(debug) {
// Now save the bulk docs, When we use all_or_nothing, we don't get conflict
// checking, all docs are saved regardless of conflict status, or none are
// saved.
- results = db.bulkSave(docs,{all_or_nothing:true});
+ results = db.bulkSave(docs,{all_or_nothing:true});
T(results.error === undefined);
var doc = db.open("0", {conflicts:true});
@@ -88,8 +88,8 @@ couchTests.bulk_docs = function(debug) {
T(results[0].id != "");
T(results[0].rev != "");
-
-
+
+
// Regression test for failure on update/delete
var newdoc = {"_id": "foobar", "body": "baz"};
T(db.save(newdoc).ok);
diff --git a/share/www/script/test/changes.js b/share/www/script/test/changes.js
index 98f31d6f..6ae035fa 100644
--- a/share/www/script/test/changes.js
+++ b/share/www/script/test/changes.js
@@ -16,98 +16,98 @@ couchTests.changes = function(debug) {
db.deleteDb();
db.createDb();
if (debug) debugger;
-
+
var req = CouchDB.request("GET", "/test_suite_db/_changes");
var resp = JSON.parse(req.responseText);
-
+
T(resp.results.length == 0 && resp.last_seq==0)
-
+
var docFoo = {_id:"foo", bar:1};
db.save(docFoo);
-
+
req = CouchDB.request("GET", "/test_suite_db/_changes");
var resp = JSON.parse(req.responseText);
-
+
T(resp.results.length == 1 && resp.last_seq==1)
T(resp.results[0].changes[0].rev == docFoo._rev)
-
+
req = CouchDB.request("GET", "/test_suite_db/_changes?continuous=true&timeout=10");
var resp = JSON.parse(req.responseText);
T(resp.results.length == 1 && resp.last_seq==1)
T(resp.results[0].changes[0].rev == docFoo._rev)
-
+
var xhr;
-
+
try {
xhr = CouchDB.newXhr();
- } catch (err) {
+ } catch (err) {
}
-
+
if (xhr) {
// Only test the continuous stuff if we have a real XHR object
// with real async support.
-
+
var sleep = function(msecs) {
// by making a slow sync request, we allow the waiting XHR request data
// to be received.
var req = CouchDB.request("GET", "/_sleep?time=" + msecs);
T(JSON.parse(req.responseText).ok == true);
}
-
+
var parse_changes_line = function(line) {
if (line.charAt(line.length-1) == ",") {
line = line.substring(0, line.length-1);
}
return JSON.parse(line);
}
-
-
+
+
xhr.open("GET", "/test_suite_db/_changes?continuous=true", true);
xhr.send("");
-
+
var docBar = {_id:"bar", bar:1};
db.save(docBar);
-
+
sleep(100);
var lines = xhr.responseText.split("\n");
-
+
T(lines[0]='{"results":[');
-
+
var change = parse_changes_line(lines[1]);
-
+
T(change.seq == 1)
T(change.id == "foo")
-
+
change = parse_changes_line(lines[2]);
-
+
T(change.seq == 2)
T(change.id == "bar")
T(change.changes[0].rev == docBar._rev)
-
+
var docBaz = {_id:"baz", baz:1};
db.save(docBaz);
-
+
sleep(100);
var lines = xhr.responseText.split("\n");
-
+
change = parse_changes_line(lines[3]);
-
+
T(change.seq == 3);
T(change.id == "baz");
T(change.changes[0].rev == docBaz._rev);
-
-
+
+
xhr = CouchDB.newXhr();
-
+
//verify the hearbeat newlines are sent
xhr.open("GET", "/test_suite_db/_changes?continuous=true&heartbeat=10", true);
xhr.send("");
-
+
sleep(100);
-
+
var str = xhr.responseText;
-
+
T(str.charAt(str.length - 1) == "\n")
T(str.charAt(str.length - 2) == "\n")
}
diff --git a/share/www/script/test/compact.js b/share/www/script/test/compact.js
index a3b55d85..2add707e 100644
--- a/share/www/script/test/compact.js
+++ b/share/www/script/test/compact.js
@@ -43,7 +43,7 @@ couchTests.compact = function(debug) {
T(db.last_req.status == 202);
// compaction isn't instantaneous, loop until done
while (db.info().compact_running) {};
-
+
T(db.ensureFullCommit().ok);
restartServer();
var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt");
@@ -51,5 +51,5 @@ couchTests.compact = function(debug) {
T(xhr.getResponseHeader("Content-Type") == "text/plain")
T(db.info().doc_count == 1);
T(db.info().disk_size < deletesize);
-
+
};
diff --git a/share/www/script/test/conflicts.js b/share/www/script/test/conflicts.js
index dfd7e8b6..d556acd7 100644
--- a/share/www/script/test/conflicts.js
+++ b/share/www/script/test/conflicts.js
@@ -41,7 +41,7 @@ couchTests.conflicts = function(debug) {
} catch (e) {
T(e.error == "conflict");
}
-
+
var bySeq = db.allDocsBySeq();
T( bySeq.rows.length == 1)
diff --git a/share/www/script/test/delayed_commits.js b/share/www/script/test/delayed_commits.js
index 0ead2d84..1cc0b339 100644
--- a/share/www/script/test/delayed_commits.js
+++ b/share/www/script/test/delayed_commits.js
@@ -15,43 +15,43 @@ couchTests.delayed_commits = function(debug) {
db.deleteDb();
db.createDb();
if (debug) debugger;
-
+
// By default, couchdb doesn't fully commit documents to disk right away,
- // it waits about a second to batch the full commit flush along with any
+ // it waits about a second to batch the full commit flush along with any
// other updates. If it crashes or is restarted you may lose the most
// recent commits.
-
+
T(db.save({_id:"1",a:2,b:4}).ok);
T(db.open("1") != null);
-
+
restartServer();
-
+
T(db.open("1") == null); // lost the update.
// note if we waited > 1 sec before the restart, the doc would likely
// commit.
-
-
+
+
// Retry the same thing but with full commits on.
-
+
var db2 = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"true"});
-
+
T(db2.save({_id:"1",a:2,b:4}).ok);
T(db2.open("1") != null);
-
+
restartServer();
-
+
T(db2.open("1") != null);
-
+
// You can update but without committing immediately, and then ensure
// everything is commited in the last step.
-
+
T(db.save({_id:"2",a:2,b:4}).ok);
T(db.open("2") != null);
T(db.ensureFullCommit().ok);
restartServer();
-
+
T(db.open("2") != null);
-
+
// However, it's possible even when flushed, that the server crashed between
// the update and the commit, and you don't want to check to make sure
// every doc you updated actually made it to disk. So record the instance
@@ -59,37 +59,37 @@ couchTests.delayed_commits = function(debug) {
// after the flush (the instance start time is returned by the flush
// operation). if they are the same, we know everything was updated
// safely.
-
+
// First try it with a crash.
-
+
var instanceStartTime = db.info().instance_start_time;
-
+
T(db.save({_id:"3",a:2,b:4}).ok);
T(db.open("3") != null);
-
+
restartServer();
-
+
var commitResult = db.ensureFullCommit();
T(commitResult.ok && commitResult.instance_start_time != instanceStartTime);
// start times don't match, meaning the server lost our change
-
+
T(db.open("3") == null); // yup lost it
-
+
// retry with no server restart
-
+
var instanceStartTime = db.info().instance_start_time;
-
+
T(db.save({_id:"4",a:2,b:4}).ok);
T(db.open("4") != null);
-
+
var commitResult = db.ensureFullCommit();
T(commitResult.ok && commitResult.instance_start_time == instanceStartTime);
// Successful commit, start times match!
-
+
restartServer();
-
+
T(db.open("4") != null);
-
+
// Now test that when we exceed the max_dbs_open, pending commits are safely
// written.
T(db.save({_id:"5",foo:"bar"}).ok);
@@ -111,5 +111,5 @@ couchTests.delayed_commits = function(debug) {
dbi.deleteDb();
}
});
-
+
};
diff --git a/share/www/script/test/design_docs.js b/share/www/script/test/design_docs.js
index b1ff8432..403f4e43 100644
--- a/share/www/script/test/design_docs.js
+++ b/share/www/script/test/design_docs.js
@@ -74,16 +74,16 @@ function() {
T(db.ensureFullCommit().ok);
restartServer();
};
-
+
// test when language not specified, Javascript is implied
var designDoc2 = {
_id:"_design/test2",
- // language: "javascript",
+ // language: "javascript",
views: {
single_doc: {map: "function(doc) { if (doc._id == \"1\") { emit(1, null) }}"}
}
};
-
+
T(db.save(designDoc2).ok);
T(db.view("test2/single_doc").total_rows == 1);
@@ -113,14 +113,14 @@ function() {
T(db.deleteDoc(designDoc).ok);
T(db.open(designDoc._id) == null);
T(db.view("test/no_docs") == null);
-
+
T(db.ensureFullCommit().ok);
restartServer();
T(db.open(designDoc._id) == null);
T(db.view("test/no_docs") == null);
-
+
// trigger ddoc cleanup
T(db.viewCleanup().ok);
-
+
});
};
diff --git a/share/www/script/test/design_options.js b/share/www/script/test/design_options.js
index 4d7684c6..952ecc74 100644
--- a/share/www/script/test/design_options.js
+++ b/share/www/script/test/design_options.js
@@ -26,7 +26,7 @@ couchTests.design_options = function(debug) {
language: "javascript",
options: {
include_design: true,
- local_seq: true
+ local_seq: true
},
views: {
data: {"map": map},
@@ -63,7 +63,7 @@ couchTests.design_options = function(debug) {
T(db.save(designDoc).ok);
rows = db.view("bango/data").rows;
T(rows.length == 0);
-
+
// should also have local_seq in the view
var resp = db.save({});
rows = db.view("fu/with_seq").rows;
diff --git a/share/www/script/test/design_paths.js b/share/www/script/test/design_paths.js
index 7722a188..df47fbf2 100644
--- a/share/www/script/test/design_paths.js
+++ b/share/www/script/test/design_paths.js
@@ -18,7 +18,7 @@ couchTests.design_paths = function(debug) {
var dbName = encodeURIComponent(dbNames[i]);
db.deleteDb();
db.createDb();
-
+
// create a ddoc w bulk_docs
db.bulkSave([{
_id : "_design/test",
diff --git a/share/www/script/test/etags_views.js b/share/www/script/test/etags_views.js
index 018bdc25..1356cdb5 100644
--- a/share/www/script/test/etags_views.js
+++ b/share/www/script/test/etags_views.js
@@ -15,7 +15,7 @@ couchTests.etags_views = function(debug) {
db.deleteDb();
db.createDb();
if (debug) debugger;
-
+
var designDoc = {
_id:"_design/etags",
language: "javascript",
@@ -43,7 +43,7 @@ couchTests.etags_views = function(debug) {
var xhr;
var docs = makeDocs(0, 10);
db.bulkSave(docs);
-
+
// verify get w/Etag on map view
xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/basicView");
T(xhr.status == 200);
@@ -53,7 +53,7 @@ couchTests.etags_views = function(debug) {
});
T(xhr.status == 304);
// TODO GET with keys (when that is available)
-
+
// reduce view
xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/withReduce");
T(xhr.status == 200);
@@ -62,7 +62,7 @@ couchTests.etags_views = function(debug) {
headers: {"if-none-match": etag}
});
T(xhr.status == 304);
-
+
// all docs
xhr = CouchDB.request("GET", "/test_suite_db/_all_docs");
T(xhr.status == 200);
@@ -79,7 +79,7 @@ couchTests.etags_views = function(debug) {
xhr = CouchDB.request("GET", "/test_suite_db/_all_docs_by_seq", {
headers: {"if-none-match": etag}
});
- T(xhr.status == 304);
+ T(xhr.status == 304);
// list etag
// in the list test for now
diff --git a/share/www/script/test/invalid_docids.js b/share/www/script/test/invalid_docids.js
index 4fc4bbf5..a9de0e83 100644
--- a/share/www/script/test/invalid_docids.js
+++ b/share/www/script/test/invalid_docids.js
@@ -19,7 +19,7 @@ couchTests.invalid_docids = function(debug) {
// Test _local explicitly first.
T(db.save({"_id": "_local/foo"}).ok);
T(db.open("_local/foo")._id == "_local/foo");
-
+
//Test non-string
try {
db.save({"_id": 1});
diff --git a/share/www/script/test/list_views.js b/share/www/script/test/list_views.js
index bbe0814c..f9268479 100644
--- a/share/www/script/test/list_views.js
+++ b/share/www/script/test/list_views.js
@@ -15,7 +15,7 @@ couchTests.list_views = function(debug) {
db.deleteDb();
db.createDb();
if (debug) debugger;
-
+
var designDoc = {
_id:"_design/lists",
language: "javascript",
@@ -44,12 +44,12 @@ couchTests.list_views = function(debug) {
var row;
while(row = getRow()) {
log("row: "+toJSON(row));
- send(row.key);
+ send(row.key);
};
return "tail";
}),
basicJSON : stringFun(function(head, req) {
- start({"headers":{"Content-Type" : "application/json"}});
+ start({"headers":{"Content-Type" : "application/json"}});
send('{"head":'+toJSON(head)+', ');
send('"req":'+toJSON(req)+', ');
send('"rows":[');
@@ -144,7 +144,7 @@ couchTests.list_views = function(debug) {
send("head");
var row;
while(row = getRow()) {
- send(row.key);
+ send(row.key);
};
getRow();
getRow();
@@ -165,13 +165,13 @@ couchTests.list_views = function(debug) {
};
T(db.save(designDoc).ok);
-
+
var docs = makeDocs(0, 10);
db.bulkSave(docs);
-
+
var view = db.view('lists/basicView');
T(view.total_rows == 10);
-
+
// standard get
var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/basicBasic/basicView");
T(xhr.status == 200, "standard get should be 200");
@@ -214,7 +214,7 @@ couchTests.list_views = function(debug) {
T(!(/Key: 1/.test(xhr.responseText)));
T(/FirstKey: 3/.test(xhr.responseText));
T(/LastKey: 9/.test(xhr.responseText));
-
+
// with 0 rows
var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/basicView?startkey=30");
T(xhr.status == 200, "0 rows");
@@ -231,19 +231,19 @@ couchTests.list_views = function(debug) {
T(xhr.status == 200, "reduce 0 rows");
T(/Total Rows/.test(xhr.responseText));
T(/LastKey: undefined/.test(xhr.responseText));
-
+
// when there is a reduce present, but not used
var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/withReduce?reduce=false");
T(xhr.status == 200, "reduce false");
T(/Total Rows/.test(xhr.responseText));
T(/Key: 1/.test(xhr.responseText));
-
+
// when there is a reduce present, and used
xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/withReduce?group=true");
T(xhr.status == 200, "group reduce");
T(/Key: 1/.test(xhr.responseText));
-
+
// there should be etags on reduce as well
var etag = xhr.getResponseHeader("etag");
T(etag, "Etags should be served with reduce lists");
@@ -251,11 +251,11 @@ couchTests.list_views = function(debug) {
headers: {"if-none-match": etag}
});
T(xhr.status == 304);
-
+
// verify the etags expire correctly
var docs = makeDocs(11, 12);
db.bulkSave(docs);
-
+
xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/withReduce?group=true", {
headers: {"if-none-match": etag}
});
@@ -284,7 +284,7 @@ couchTests.list_views = function(debug) {
});
T(xhr.status == 400);
T(/query_parse_error/.test(xhr.responseText));
-
+
var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/rowError/basicView");
T(/ReferenceError/.test(xhr.responseText));
@@ -292,7 +292,7 @@ couchTests.list_views = function(debug) {
// now with extra qs params
var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/qsParams/basicView?foo=blam");
T(xhr.responseText.match(/blam/));
-
+
var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/stopIter/basicView");
// T(xhr.getResponseHeader("Content-Type") == "text/plain");
T(xhr.responseText.match(/^head 0 1 2 tail$/) && "basic stop");
@@ -305,7 +305,7 @@ couchTests.list_views = function(debug) {
T(xhr.responseText.match(/^head 0 1 2 tail$/) && "reduce stop");
xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/stopIter2/withReduce?group=true");
T(xhr.responseText.match(/^head 0 1 2 tail$/) && "reduce stop 2");
-
+
// with accept headers for HTML
xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/acceptSwitch/basicView", {
headers: {
diff --git a/share/www/script/test/purge.js b/share/www/script/test/purge.js
index 0b47c0d0..0951bb7a 100644
--- a/share/www/script/test/purge.js
+++ b/share/www/script/test/purge.js
@@ -15,7 +15,7 @@ couchTests.purge = function(debug) {
db.deleteDb();
db.createDb();
if (debug) debugger;
-
+
/*
purge is not to be confused with a document deletion. It removes the
document and all edit history from the local instance of the database.
@@ -31,7 +31,7 @@ couchTests.purge = function(debug) {
single_doc: {map: "function(doc) { if (doc._id == \"1\") { emit(1, null) }}"}
}
}
-
+
T(db.save(designDoc).ok);
db.bulkSave(makeDocs(1, numDocs + 1));
@@ -43,11 +43,11 @@ couchTests.purge = function(debug) {
T(rows[(2*i)+1].key == i+1);
}
T(db.view("test/single_doc").total_rows == 1);
-
+
var info = db.info();
var doc1 = db.open("1");
var doc2 = db.open("2");
-
+
// purge the documents
var xhr = CouchDB.request("POST", "/test_suite_db/_purge", {
body: JSON.stringify({"1":[doc1._rev], "2":[doc2._rev]}),
@@ -63,35 +63,35 @@ couchTests.purge = function(debug) {
var result = JSON.parse(xhr.responseText);
T(result.purged["1"][0] == doc1._rev);
T(result.purged["2"][0] == doc2._rev);
-
+
T(db.open("1") == null);
T(db.open("2") == null);
-
+
var rows = db.view("test/all_docs_twice").rows;
for (var i = 2; i < numDocs; i++) {
T(rows[2*(i-2)].key == i+1);
T(rows[(2*(i-2))+1].key == i+1);
}
T(db.view("test/single_doc").total_rows == 0);
-
+
// purge documents twice in a row without loading views
// (causes full view rebuilds)
-
+
var doc3 = db.open("3");
var doc4 = db.open("4");
-
+
xhr = CouchDB.request("POST", "/test_suite_db/_purge", {
body: JSON.stringify({"3":[doc3._rev]}),
});
-
+
T(xhr.status == 200);
-
+
xhr = CouchDB.request("POST", "/test_suite_db/_purge", {
body: JSON.stringify({"4":[doc4._rev]}),
});
-
+
T(xhr.status == 200);
-
+
var rows = db.view("test/all_docs_twice").rows;
for (var i = 4; i < numDocs; i++) {
T(rows[2*(i-4)].key == i+1);
diff --git a/share/www/script/test/reduce.js b/share/www/script/test/reduce.js
index c8bdcd92..84230998 100644
--- a/share/www/script/test/reduce.js
+++ b/share/www/script/test/reduce.js
@@ -159,11 +159,11 @@ couchTests.reduce = function(debug) {
docs.push({val:100});
db.bulkSave(docs);
}
-
+
var results = db.query(map, reduceCombine);
-
+
var difference = results.rows[0].value.stdDeviation - 28.722813232690143;
// account for floating point rounding error
T(Math.abs(difference) < 0.0000000001);
-
+
};
diff --git a/share/www/script/test/reduce_builtin.js b/share/www/script/test/reduce_builtin.js
index c3d00339..3dc26862 100644
--- a/share/www/script/test/reduce_builtin.js
+++ b/share/www/script/test/reduce_builtin.js
@@ -15,11 +15,11 @@ couchTests.reduce_builtin = function(debug) {
db.deleteDb();
db.createDb();
if (debug) debugger;
-
+
var numDocs = 500
var docs = makeDocs(1,numDocs + 1);
db.bulkSave(docs);
-
+
var summate = function(N) {return (N+1)*N/2;};
// this is the same test as the reduce.js test
@@ -42,7 +42,7 @@ couchTests.reduce_builtin = function(debug) {
T(result.rows[0].value == 18);
result = db.query(map, "_count", {startkey: 4, endkey: 5});
T(result.rows[0].value == 4);
-
+
result = db.query(map, "_sum", {startkey: 4, endkey: 6});
T(result.rows[0].value == 30);
result = db.query(map, "_count", {startkey: 4, endkey: 6});
@@ -57,7 +57,7 @@ couchTests.reduce_builtin = function(debug) {
result = db.query(map, "_sum", {startkey: i, endkey: numDocs - i});
T(result.rows[0].value == 2*(summate(numDocs-i) - summate(i-1)));
}
-
+
db.deleteDb();
db.createDb();
@@ -88,7 +88,7 @@ couchTests.reduce_builtin = function(debug) {
for (var b=0; b < builtins.length; b++) {
var fun = builtins[b];
var results = db.query(map, fun, {group:true});
-
+
//group by exact key match
T(equals(results.rows[0], {key:["a"],value:20*i}));
T(equals(results.rows[1], {key:["a","b"],value:20*i}));
@@ -114,6 +114,6 @@ couchTests.reduce_builtin = function(debug) {
T(equals(results.rows[4], {key:["d","a"],value:10*i}));
T(equals(results.rows[5], {key:["d","b"],value:10*i}));
T(equals(results.rows[6], {key:["d","c"],value:10*i}));
- };
+ };
}
} \ No newline at end of file
diff --git a/share/www/script/test/reduce_false.js b/share/www/script/test/reduce_false.js
index 22ef2e8b..e4928cc4 100644
--- a/share/www/script/test/reduce_false.js
+++ b/share/www/script/test/reduce_false.js
@@ -34,7 +34,7 @@ couchTests.reduce_false = function(debug) {
// Test that the reduce works
var res = db.view('test/summate');
T(res.rows.length == 1 && res.rows[0].value == summate(5));
-
+
//Test that we get our docs back
res = db.view('test/summate', {reduce: false});
T(res.rows.length == 5);
diff --git a/share/www/script/test/replication.js b/share/www/script/test/replication.js
index 405b0b1d..c9caa5ee 100644
--- a/share/www/script/test/replication.js
+++ b/share/www/script/test/replication.js
@@ -35,7 +35,7 @@ couchTests.replication = function(debug) {
dbA.createDb();
dbB.deleteDb();
dbB.createDb();
-
+
var repTests = {
// copy and paste and put your code in. delete unused steps.
test_template: new function () {
@@ -49,20 +49,20 @@ couchTests.replication = function(debug) {
// called after replicating src=B tgt=A first time.
};
this.afterAB2 = function(dbA, dbB) {
- // called after replicating src=A tgt=B second time.
+ // called after replicating src=A tgt=B second time.
};
this.afterBA2 = function(dbA, dbB) {
// etc...
};
},
-
+
simple_test: new function () {
this.init = function(dbA, dbB) {
var docs = makeDocs(0, numDocs);
dbA.bulkSave(docs);
};
-
- this.afterAB1 = function(dbA, dbB) {
+
+ this.afterAB1 = function(dbA, dbB) {
for (var j = 0; j < numDocs; j++) {
var docA = dbA.open("" + j);
var docB = dbB.open("" + j);
@@ -70,13 +70,13 @@ couchTests.replication = function(debug) {
}
};
},
-
+
deletes_test: new function () {
// make sure deletes are replicated
this.init = function(dbA, dbB) {
T(dbA.save({_id:"foo1",value:"a"}).ok);
};
-
+
this.afterAB1 = function(dbA, dbB) {
var docA = dbA.open("foo1");
var docB = dbB.open("foo1");
@@ -84,13 +84,13 @@ couchTests.replication = function(debug) {
dbA.deleteDoc(docA);
};
-
+
this.afterAB2 = function(dbA, dbB) {
T(dbA.open("foo1") == null);
T(dbB.open("foo1") == null);
};
},
-
+
deleted_test : new function() {
// docs created and deleted on a single node are also replicated
this.init = function(dbA, dbB) {
@@ -98,7 +98,7 @@ couchTests.replication = function(debug) {
var docA = dbA.open("del1");
dbA.deleteDoc(docA);
};
-
+
this.afterAB1 = function(dbA, dbB) {
var rows = dbB.allDocsBySeq().rows;
var rowCnt = 0;
@@ -111,13 +111,13 @@ couchTests.replication = function(debug) {
T(rowCnt == 1);
};
},
-
+
slashes_in_ids_test: new function () {
// make sure docs with slashes in id replicate properly
this.init = function(dbA, dbB) {
dbA.save({ _id:"abc/def", val:"one" });
};
-
+
this.afterAB1 = function(dbA, dbB) {
var docA = dbA.open("abc/def");
var docB = dbB.open("abc/def");
@@ -137,7 +137,7 @@ couchTests.replication = function(debug) {
T(docA._rev == docB._rev);
};
},
-
+
attachments_test: new function () {
// Test attachments
this.init = function(dbA, dbB) {
@@ -161,34 +161,34 @@ couchTests.replication = function(debug) {
}
});
};
-
+
this.afterAB1 = function(dbA, dbB) {
- var xhr = CouchDB.request("GET",
+ var xhr = CouchDB.request("GET",
"/test_suite_db_a/bin_doc/foo%2Bbar.txt");
T(xhr.responseText == "This is a base64 encoded text")
- xhr = CouchDB.request("GET",
+ xhr = CouchDB.request("GET",
"/test_suite_db_b/bin_doc/foo%2Bbar.txt");
T(xhr.responseText == "This is a base64 encoded text")
// and the design-doc
- xhr = CouchDB.request("GET",
+ xhr = CouchDB.request("GET",
"/test_suite_db_a/_design/with_bin/foo%2Bbar.txt");
T(xhr.responseText == "This is a base64 encoded text")
- xhr = CouchDB.request("GET",
+ xhr = CouchDB.request("GET",
"/test_suite_db_b/_design/with_bin/foo%2Bbar.txt");
T(xhr.responseText == "This is a base64 encoded text")
};
},
-
+
conflicts_test: new function () {
// test conflicts
this.init = function(dbA, dbB) {
dbA.save({_id:"foo",value:"a"});
dbB.save({_id:"foo",value:"b"});
};
-
+
this.afterBA1 = function(dbA, dbB) {
var docA = dbA.open("foo", {conflicts: true});
var docB = dbB.open("foo", {conflicts: true});
@@ -202,7 +202,7 @@ couchTests.replication = function(debug) {
// delete a conflict.
dbA.deleteDoc({_id:"foo", _rev:docA._conflicts[0]});
};
-
+
this.afterBA2 = function(dbA, dbB) {
// open documents and include the conflict meta data
var docA = dbA.open("foo", {conflicts: true});
@@ -223,7 +223,7 @@ couchTests.replication = function(debug) {
}
var result = CouchDB.replicate(A, B);
-
+
var seqA = result.source_last_seq;
T(0 == result.history[0].start_last_seq);
T(result.history[1] === undefined)
@@ -233,7 +233,7 @@ couchTests.replication = function(debug) {
}
result = CouchDB.replicate(B, A);
-
+
var seqB = result.source_last_seq;
T(0 == result.history[0].start_last_seq);
T(result.history[1] === undefined)
@@ -243,14 +243,14 @@ couchTests.replication = function(debug) {
}
var result2 = CouchDB.replicate(A, B);
-
+
// each successful replication produces a new session id
T(result2.session_id != result.session_id);
-
+
T(seqA < result2.source_last_seq);
T(seqA == result2.history[0].start_last_seq);
T(result2.history[1].end_last_seq == seqA)
-
+
seqA = result2.source_last_seq;
for(test in repTests) {
@@ -258,17 +258,17 @@ couchTests.replication = function(debug) {
}
result = CouchDB.replicate(B, A)
-
+
T(seqB < result.source_last_seq);
T(seqB == result.history[0].start_last_seq);
T(result.history[1].end_last_seq == seqB)
-
+
seqB = result.source_last_seq;
for(test in repTests) {
if(repTests[test].afterBA2) repTests[test].afterBA2(dbA, dbB);
}
-
+
// do an replication where nothing has changed
result2 = CouchDB.replicate(B, A);
T(result2.no_changes == true);
diff --git a/share/www/script/test/rev_stemming.js b/share/www/script/test/rev_stemming.js
index 3832b520..6dc94f70 100644
--- a/share/www/script/test/rev_stemming.js
+++ b/share/www/script/test/rev_stemming.js
@@ -18,11 +18,11 @@ couchTests.rev_stemming = function(debug) {
dbB.deleteDb();
dbB.createDb();
if (debug) debugger;
-
+
var newLimit = 5;
-
+
T(db.getDbProperty("_revs_limit") == 1000);
-
+
var doc = {_id:"foo",foo:0}
for( var i=0; i < newLimit + 1; i++) {
doc.foo++;
@@ -30,30 +30,30 @@ couchTests.rev_stemming = function(debug) {
}
var doc0 = db.open("foo", {revs:true});
T(doc0._revisions.ids.length == newLimit + 1);
-
+
var docBar = {_id:"bar",foo:0}
for( var i=0; i < newLimit + 1; i++) {
docBar.foo++;
T(db.save(docBar).ok);
}
T(db.open("bar", {revs:true})._revisions.ids.length == newLimit + 1);
-
+
T(db.setDbProperty("_revs_limit", newLimit).ok);
-
+
for( var i=0; i < newLimit + 1; i++) {
doc.foo++;
T(db.save(doc).ok);
}
doc0 = db.open("foo", {revs:true});
T(doc0._revisions.ids.length == newLimit);
-
-
+
+
// If you replicate after you make more edits than the limit, you'll
// cause a spurious edit conflict.
CouchDB.replicate("test_suite_db_a", "test_suite_db_b");
var docB1 = dbB.open("foo",{conflicts:true})
T(docB1._conflicts == null);
-
+
for( var i=0; i < newLimit - 1; i++) {
doc.foo++;
T(db.save(doc).ok);
@@ -69,30 +69,30 @@ couchTests.rev_stemming = function(debug) {
doc.foo++;
T(db.save(doc).ok);
}
-
+
CouchDB.replicate("test_suite_db_a", "test_suite_db_b");
-
+
var docB2 = dbB.open("foo",{conflicts:true});
-
+
// we have a conflict, but the previous replicated rev is always the losing
// conflict
T(docB2._conflicts[0] == docB1._rev)
-
+
// We having already updated bar before setting the limit, so it's still got
// a long rev history. compact to stem the revs.
-
+
T(db.open("bar", {revs:true})._revisions.ids.length == newLimit + 1);
-
+
T(db.compact().ok);
-
+
// compaction isn't instantaneous, loop until done
while (db.info().compact_running) {};
-
+
// force reload because ETags don't honour compaction
var req = db.request("GET", "/test_suite_db_a/bar?revs=true", {
headers:{"if-none-match":"pommes"}
});
-
+
var finalDoc = JSON.parse(req.responseText);
TEquals(newLimit, finalDoc._revisions.ids.length,
"should return a truncated revision list");
diff --git a/share/www/script/test/security_validation.js b/share/www/script/test/security_validation.js
index 1c185c01..05dff613 100644
--- a/share/www/script/test/security_validation.js
+++ b/share/www/script/test/security_validation.js
@@ -16,7 +16,7 @@ couchTests.security_validation = function(debug) {
// specifically for this testing. It is a WWWW-Authenticate scheme named
// X-Couch-Test-Auth, and the user names and passwords are hard coded
// on the server-side.
- //
+ //
// We could have used Basic authentication, however the XMLHttpRequest
// implementation for Firefox and Safari, and probably other browsers are
// broken (Firefox always prompts the user on 401 failures, Safari gives
@@ -45,7 +45,7 @@ couchTests.security_validation = function(debug) {
{section:"httpd",
key: "WWW-Authenticate",
value: "X-Couch-Test-Auth"}],
-
+
function () {
// try saving document usin the wrong credentials
var wrongPasswordDb = new CouchDB("test_suite_db",
@@ -60,8 +60,8 @@ couchTests.security_validation = function(debug) {
T(wrongPasswordDb.last_req.status == 401);
}
- // test force_login=true.
- var resp = wrongPasswordDb.request("GET", "/_whoami?force_login=true");
+ // test force_login=true.
+ var resp = wrongPasswordDb.request("GET", "/_whoami?force_login=true");
var err = JSON.parse(resp.responseText);
T(err.error == "unauthorized");
T(resp.status == 401);
@@ -110,7 +110,7 @@ couchTests.security_validation = function(debug) {
T(user.name == "Damien Katz");
// test that the roles are listed properly
TEquals(user.roles, []);
-
+
// update the document
var doc = userDb.open("testdoc");
@@ -126,7 +126,7 @@ couchTests.security_validation = function(debug) {
T(userDb.last_req.status == 403);
}
- // Now attempt to update the document as a different user, Jan
+ // Now attempt to update the document as a different user, Jan
var user2Db = new CouchDB("test_suite_db",
{"WWW-Authenticate": "X-Couch-Test-Auth Jan Lehnardt:apple"}
);
@@ -161,7 +161,7 @@ couchTests.security_validation = function(debug) {
}
// Now delete document
- T(user2Db.deleteDoc(doc).ok);
+ T(user2Db.deleteDoc(doc).ok);
// now test bulk docs
var docs = [{_id:"bahbah",author:"Damien Katz",foo:"bar"},{_id:"fahfah",foo:"baz"}];
@@ -173,11 +173,11 @@ couchTests.security_validation = function(debug) {
T(results[0].error == undefined)
T(results[1].rev === undefined)
T(results[1].error == "forbidden")
-
+
T(db.open("bahbah"));
T(db.open("fahfah") == null);
-
-
+
+
// now all or nothing with a failure
var docs = [{_id:"booboo",author:"Damien Katz",foo:"bar"},{_id:"foofoo",foo:"baz"}];
@@ -188,23 +188,23 @@ couchTests.security_validation = function(debug) {
T(results.errors[0].error == "forbidden");
T(db.open("booboo") == null);
T(db.open("foofoo") == null);
-
-
+
+
// Now test replication
var AuthHeaders = {"WWW-Authenticate": "X-Couch-Test-Auth Christopher Lenz:dog food"};
var host = CouchDB.host;
var dbPairs = [
{source:"test_suite_db_a",
target:"test_suite_db_b"},
-
+
{source:"test_suite_db_a",
target:{url: "http://" + host + "/test_suite_db_b",
headers: AuthHeaders}},
-
+
{source:{url:"http://" + host + "/test_suite_db_a",
headers: AuthHeaders},
target:"test_suite_db_b"},
-
+
{source:{url:"http://" + host + "/test_suite_db_a",
headers: AuthHeaders},
target:{url:"http://" + host + "/test_suite_db_b",
@@ -225,7 +225,7 @@ couchTests.security_validation = function(debug) {
adminDbA.createDb();
adminDbB.deleteDb();
adminDbB.createDb();
-
+
// save and replicate a documents that will and will not pass our design
// doc validation function.
dbA.save({_id:"foo1",value:"a",author:"Noah Slater"});
@@ -239,44 +239,44 @@ couchTests.security_validation = function(debug) {
T(dbB.open("foo1"));
T(dbA.open("foo2"));
T(dbB.open("foo2"));
-
+
// save the design doc to dbA
delete designDoc._rev; // clear rev from previous saves
adminDbA.save(designDoc);
// no affect on already saved docs
T(dbA.open("bad1"));
-
+
// Update some docs on dbB. Since the design hasn't replicated, anything
// is allowed.
-
+
// this edit will fail validation on replication to dbA (no author)
T(dbB.save({_id:"bad2",value:"a"}).ok);
-
+
// this edit will fail security on replication to dbA (wrong author
// replicating the change)
var foo1 = dbB.open("foo1");
foo1.value = "b";
dbB.save(foo1);
-
+
// this is a legal edit
var foo2 = dbB.open("foo2");
foo2.value = "b";
dbB.save(foo2);
-
+
var results = CouchDB.replicate(B, A, {headers:AuthHeaders});
-
+
T(results.ok);
-
+
T(results.history[0].docs_written == 1);
T(results.history[0].doc_write_failures == 2);
-
+
// bad2 should not be on dbA
T(dbA.open("bad2") == null);
-
+
// The edit to foo1 should not have replicated.
T(dbA.open("foo1").value == "a");
-
+
// The edit to foo2 should have replicated.
T(dbA.open("foo2").value == "b");
}
diff --git a/share/www/script/test/show_documents.js b/share/www/script/test/show_documents.js
index 64a6788f..7201ae81 100644
--- a/share/www/script/test/show_documents.js
+++ b/share/www/script/test/show_documents.js
@@ -16,12 +16,12 @@ couchTests.show_documents = function(debug) {
db.deleteDb();
db.createDb();
if (debug) debugger;
-
+
var designDoc = {
_id:"_design/template",
language: "javascript",
shows: {
- "hello" : stringFun(function(doc, req) {
+ "hello" : stringFun(function(doc, req) {
if (doc) {
return "Hello World";
} else {
@@ -77,7 +77,7 @@ couchTests.show_documents = function(debug) {
if (req.headers["Accept"].match(/image/)) {
return {
// a 16x16 px version of the CouchDB logo
- "base64" :
+ "base64" :
["iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAsV",
"BMVEUAAAD////////////////////////5ur3rEBn////////////////wDBL/",
"AADuBAe9EB3IEBz/7+//X1/qBQn2AgP/f3/ilpzsDxfpChDtDhXeCA76AQH/v7",
@@ -129,7 +129,7 @@ couchTests.show_documents = function(debug) {
}
};
T(db.save(designDoc).ok);
-
+
var doc = {"word":"plankton", "name":"Rusty"}
var resp = db.save(doc);
T(resp.ok);
@@ -139,7 +139,7 @@ couchTests.show_documents = function(debug) {
var xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/");
T(xhr.status == 404, 'Should be missing');
T(JSON.parse(xhr.responseText).reason == "Invalid path.");
-
+
// hello template world
xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/hello/"+docid);
T(xhr.responseText == "Hello World");
@@ -151,7 +151,7 @@ couchTests.show_documents = function(debug) {
// // error stacktraces
// xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/render-error/"+docid);
// T(JSON.parse(xhr.responseText).error == "render_error");
-
+
// hello template world (no docid)
xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/hello");
T(xhr.responseText == "Empty World");
@@ -159,21 +159,21 @@ couchTests.show_documents = function(debug) {
// // hello template world (non-existing docid)
xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/hello/nonExistingDoc");
T(xhr.responseText == "New World");
-
+
// show with doc
xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid);
T(xhr.responseText == "Just Rusty");
-
+
// show with missing doc
xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/missingdoc");
T(xhr.status == 404, 'Doc should be missing');
T(xhr.responseText == "No such doc");
-
+
// show with missing func
xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/missing/"+docid);
T(xhr.status == 404, "function is missing");
-
+
// missing design doc
xhr = CouchDB.request("GET", "/test_suite_db/_design/missingddoc/_show/just-name/"+docid);
T(xhr.status == 404);
@@ -200,7 +200,7 @@ couchTests.show_documents = function(debug) {
T("Accept" == xhr.getResponseHeader("Vary"));
// accept header switching
- // different mime has different etag
+ // different mime has different etag
xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/accept-switch/"+docid, {
headers: {"Accept": "text/html;text/plain;*/*"}
});
@@ -227,7 +227,7 @@ couchTests.show_documents = function(debug) {
headers: {"if-none-match": etag}
});
// should be 304
- T(xhr.status == 304);
+ T(xhr.status == 304);
// update the doc
doc.name = "Crusty";
@@ -237,7 +237,7 @@ couchTests.show_documents = function(debug) {
xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid, {
headers: {"if-none-match": etag}
});
- // status is 200
+ // status is 200
T(xhr.status == 200);
// get new etag and request again
@@ -251,7 +251,7 @@ couchTests.show_documents = function(debug) {
// update design doc (but not function)
designDoc.isChanged = true;
T(db.save(designDoc).ok);
-
+
xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid, {
headers: {"if-none-match": etag}
});
@@ -269,7 +269,7 @@ couchTests.show_documents = function(debug) {
xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid, {
headers: {"if-none-match": etag}
});
- // status is 200
+ // status is 200
T(xhr.status == 200);
@@ -287,7 +287,7 @@ couchTests.show_documents = function(debug) {
});
var ct = xhr.getResponseHeader("Content-Type");
T(/charset=utf-8/.test(ct))
- T(/text\/html/.test(ct))
+ T(/text\/html/.test(ct))
T(xhr.responseText == "Ha ha, you said \"plankton\".");
// now with xml
diff --git a/share/www/script/test/stats.js b/share/www/script/test/stats.js
index 8a21dd88..7c9b5da3 100644
--- a/share/www/script/test/stats.js
+++ b/share/www/script/test/stats.js
@@ -76,7 +76,7 @@ couchTests.stats = function(debug) {
})
},
};
-
+
var request_count_tests = {
'should increase the request count for every request': function(name) {
var requests = requestStatsTest("httpd", "requests").current + 1;
@@ -88,7 +88,7 @@ couchTests.stats = function(debug) {
TEquals(requests + 1, new_requests, name);
}
};
-
+
var database_read_count_tests = {
'should increase database reads counter when a document is read': function(name) {
var db = new CouchDB("test_suite_db");
@@ -186,7 +186,7 @@ couchTests.stats = function(debug) {
TEquals(reads + 1 , new_reads, name);
}
};
-
+
var http_requests_by_method_tests = {
'should count GET requests': function(name) {
var requests = requestStatsTest("httpd_request_methods", "GET").current;
@@ -199,7 +199,7 @@ couchTests.stats = function(debug) {
CouchDB.request("POST", "/");
var new_requests = requestStatsTest("httpd_request_methods", "GET").current;
- TEquals(requests + 1, new_requests, name);
+ TEquals(requests + 1, new_requests, name);
},
'should count POST requests': function(name) {
var requests = requestStatsTest("httpd_request_methods", "POST").current;
@@ -229,7 +229,7 @@ couchTests.stats = function(debug) {
var doc = {"_id":"test"};
db.save(doc);
-
+
var updates = requestStatsTest("couchdb", "database_writes").current;
db.save(doc);
var new_updates = requestStatsTest("couchdb", "database_writes").current;
@@ -243,7 +243,7 @@ couchTests.stats = function(debug) {
var doc = {"_id":"test"};
db.save(doc);
-
+
var deletes = requestStatsTest("couchdb", "database_writes").current;
db.deleteDoc(doc);
var new_deletes = requestStatsTest("couchdb", "database_writes").current;
@@ -275,7 +275,7 @@ couchTests.stats = function(debug) {
var docs = makeDocs(5);
db.bulkSave(docs);
-
+
var new_bulks = requestStatsTest("httpd", "bulk_requests").current;
TEquals(bulks + 1, new_bulks, name);
@@ -378,7 +378,7 @@ couchTests.stats = function(debug) {
var options = {};
options.headers = {"Accept": "application/json"};
var summary = JSON.parse(CouchDB.request("GET", "/_stats", options).responseText);
- var aggregates = ["mean", "min", "max", "stddev",
+ var aggregates = ["mean", "min", "max", "stddev",
"current"];
for(var i in aggregates) {
@@ -386,12 +386,12 @@ couchTests.stats = function(debug) {
}
}
};
-
+
var tests = [
open_databases_tests,
- request_count_tests,
- database_read_count_tests,
- view_read_count_tests,
+ request_count_tests,
+ database_read_count_tests,
+ view_read_count_tests,
http_requests_by_method_tests,
document_write_count_tests,
response_codes_tests,
@@ -404,7 +404,7 @@ couchTests.stats = function(debug) {
tests[testGroup][test](test);
}
};
-
+
function createAndRequestView(db) {
var designDoc = {
_id:"_design/test", // turn off couch.js id escaping?
@@ -414,7 +414,7 @@ couchTests.stats = function(debug) {
}
};
db.save(designDoc);
-
+
db.view("test/all_docs_twice");
}
@@ -422,4 +422,3 @@ couchTests.stats = function(debug) {
return CouchDB.requestStats(module, key, true);
}
}
- \ No newline at end of file
diff --git a/share/www/script/test/uuids.js b/share/www/script/test/uuids.js
index 6f701884..f4b95898 100644
--- a/share/www/script/test/uuids.js
+++ b/share/www/script/test/uuids.js
@@ -14,20 +14,20 @@ couchTests.uuids = function(debug) {
var testHashBustingHeaders = function(xhr) {
T(xhr.getResponseHeader("Cache-Control").match(/no-cache/));
T(xhr.getResponseHeader("Pragma") == "no-cache");
-
+
var currentTime = new Date();
var expiresHeader = Date.parse(xhr.getResponseHeader("Expires"));
- var dateHeader = Date.parse(xhr.getResponseHeader("Date"));
-
+ var dateHeader = Date.parse(xhr.getResponseHeader("Date"));
+
T(expiresHeader < currentTime);
T(currentTime - dateHeader < 3000);
};
-
+
var db = new CouchDB("test_suite_db");
db.deleteDb();
db.createDb();
if (debug) debugger;
-
+
// a single UUID without an explicit count
var xhr = CouchDB.request("GET", "/_uuids");
T(xhr.status == 200);
@@ -55,7 +55,7 @@ couchTests.uuids = function(debug) {
T(seen[id] === undefined);
seen[id] = 1;
}
-
+
// ensure we return a 405 on POST
xhr = CouchDB.request("POST", "/_uuids?count=1000");
T(xhr.status == 405);
diff --git a/share/www/script/test/view_collation.js b/share/www/script/test/view_collation.js
index f59204c7..09681cc9 100644
--- a/share/www/script/test/view_collation.js
+++ b/share/www/script/test/view_collation.js
@@ -85,14 +85,14 @@ couchTests.view_collation = function(debug) {
rows = db.query(queryFun, null, queryOptions).rows;
T(rows.length == 1 && equals(rows[0].key, values[i]));
}
-
+
// test inclusive_end=true (the default)
// the inclusive_end=true functionality is limited to endkey currently
// if you need inclusive_start=false for startkey, please do implement. ;)
var rows = db.query(queryFun, null, {endkey : "b", inclusive_end:true}).rows;
T(rows[rows.length-1].key == "b")
// descending=true
- var rows = db.query(queryFun, null, {endkey : "b",
+ var rows = db.query(queryFun, null, {endkey : "b",
descending:true, inclusive_end:true}).rows;
T(rows[rows.length-1].key == "b")
@@ -100,13 +100,13 @@ couchTests.view_collation = function(debug) {
var rows = db.query(queryFun, null, {endkey : "b", inclusive_end:false}).rows;
T(rows[rows.length-1].key == "aa")
// descending=true
- var rows = db.query(queryFun, null, {endkey : "b",
+ var rows = db.query(queryFun, null, {endkey : "b",
descending:true, inclusive_end:false}).rows;
T(rows[rows.length-1].key == "B")
-
+
// inclusive_end=false overrides endkey_docid
var rows = db.query(queryFun, null, {
- endkey : "b", endkey_docid: "b",
+ endkey : "b", endkey_docid: "b",
inclusive_end:false}).rows;
T(rows[rows.length-1].key == "aa")
};
diff --git a/share/www/script/test/view_errors.js b/share/www/script/test/view_errors.js
index c9ef6d0c..545115cf 100644
--- a/share/www/script/test/view_errors.js
+++ b/share/www/script/test/view_errors.js
@@ -15,14 +15,14 @@ couchTests.view_errors = function(debug) {
db.deleteDb();
db.createDb();
if (debug) debugger;
-
-
+
+
run_on_modified_server(
[{section: "couchdb",
key: "os_process_timeout",
value: "500"}],
- function() {
+ function() {
var doc = {integer: 1, string: "1", array: [1, 2, 3]};
T(db.save(doc).ok);
@@ -47,37 +47,37 @@ couchTests.view_errors = function(debug) {
emit([doc._id, doc.undef], null);
});
T(results.total_rows == 0);
-
+
// querying a view with invalid params should give a resonable error message
var xhr = CouchDB.request("POST", "/test_suite_db/_temp_view?startkey=foo", {
headers: {"Content-Type": "application/json"},
- body: JSON.stringify({language: "javascript",
+ body: JSON.stringify({language: "javascript",
map : "function(doc){emit(doc.integer)}"
})
});
T(JSON.parse(xhr.responseText).error == "invalid_json");
-
+
// views should ignore Content-Type, like the rest of CouchDB
var xhr = CouchDB.request("POST", "/test_suite_db/_temp_view", {
headers: {"Content-Type": "application/x-www-form-urlencoded"},
- body: JSON.stringify({language: "javascript",
+ body: JSON.stringify({language: "javascript",
map : "function(doc){}"
})
});
T(xhr.status == 200);
-
+
var map = function (doc) {emit(doc.integer, doc.integer);};
-
+
try {
db.query(map, null, {group: true});
T(0 == 1);
} catch(e) {
T(e.error == "query_parse_error");
}
-
+
// reduce=false on map views doesn't work, so group=true will
// never throw for temp reduce views.
-
+
var designDoc = {
_id:"_design/test",
language: "javascript",
@@ -89,7 +89,7 @@ couchTests.view_errors = function(debug) {
}
};
T(db.save(designDoc).ok);
-
+
var designDoc2 = {
_id:"_design/testbig",
language: "javascript",
@@ -100,14 +100,14 @@ couchTests.view_errors = function(debug) {
}
};
T(db.save(designDoc2).ok);
-
+
try {
db.view("test/no_reduce", {group: true});
T(0 == 1);
} catch(e) {
T(e.error == "query_parse_error");
}
-
+
try {
db.view("test/no_reduce", {reduce: true});
T(0 == 1);
@@ -122,7 +122,7 @@ couchTests.view_errors = function(debug) {
} catch(e) {
T(e.error == "query_parse_error");
}
-
+
var designDoc3 = {
_id:"_design/infinite",
language: "javascript",
@@ -138,7 +138,7 @@ couchTests.view_errors = function(debug) {
} catch(e) {
T(e.error == "os_process_error");
}
-
+
// Check error responses for invalid multi-get bodies.
var path = "/test_suite_db/_design/test/_view/no_reduce";
var xhr = CouchDB.request("POST", path, {body: "[]"});
diff --git a/share/www/script/test/view_multi_key_design.js b/share/www/script/test/view_multi_key_design.js
index c2833910..d0ba7374 100644
--- a/share/www/script/test/view_multi_key_design.js
+++ b/share/www/script/test/view_multi_key_design.js
@@ -53,7 +53,7 @@ couchTests.view_multi_key_design = function(debug) {
T(keys.indexOf(rows[i].key) != -1);
T(rows[i].key == rows[i].value);
}
-
+
var reduce = db.view("test/summate",{group:true},keys).rows;
T(reduce.length == keys.length);
for(var i=0; i<reduce.length; i++) {
diff --git a/share/www/script/test/view_multi_key_temp.js b/share/www/script/test/view_multi_key_temp.js
index 545e2520..0e42ce67 100644
--- a/share/www/script/test/view_multi_key_temp.js
+++ b/share/www/script/test/view_multi_key_temp.js
@@ -28,7 +28,7 @@ couchTests.view_multi_key_temp = function(debug) {
T(keys.indexOf(rows[i].key) != -1);
T(rows[i].key == rows[i].value);
}
-
+
var reduce = db.query(queryFun, reduceFun, {group:true}, keys).rows;
for(var i=0; i<reduce.length; i++) {
T(keys.indexOf(reduce[i].key) != -1);
diff --git a/share/www/script/test/view_offsets.js b/share/www/script/test/view_offsets.js
index e32d070b..31dee8e9 100644
--- a/share/www/script/test/view_offsets.js
+++ b/share/www/script/test/view_offsets.js
@@ -11,12 +11,12 @@
// the License.
couchTests.view_offsets = function(debug) {
- if (debug) debugger;
+ if (debug) debugger;
var db = new CouchDB("test_suite_db");
db.deleteDb();
db.createDb();
-
+
var designDoc = {
_id : "_design/test",
views : {
@@ -26,7 +26,7 @@ couchTests.view_offsets = function(debug) {
}
};
T(db.save(designDoc).ok);
-
+
var docs = [
{_id : "a1", letter : "a", number : 1, foo: "bar"},
{_id : "a2", letter : "a", number : 2, foo: "bar"},
@@ -88,8 +88,8 @@ couchTests.view_offsets = function(debug) {
];
db.bulkSave(docs);
- var res = db.view("test/offset", {
- startkey: ["b",4], startkey_docid: "b4", endkey: ["b"],
+ var res = db.view("test/offset", {
+ startkey: ["b",4], startkey_docid: "b4", endkey: ["b"],
limit: 2, descending: true, skip: 1
})
diff --git a/share/www/script/test/view_pagination.js b/share/www/script/test/view_pagination.js
index 21eab888..f6154d36 100644
--- a/share/www/script/test/view_pagination.js
+++ b/share/www/script/test/view_pagination.js
@@ -71,7 +71,7 @@ couchTests.view_pagination = function(debug) {
T(queryResults.rows[j].key == i + j);
}
}
-
+
// test endkey_docid
var queryResults = db.query(function(doc) { emit(null, null);}, null, {
startkey: null,
@@ -79,7 +79,7 @@ couchTests.view_pagination = function(debug) {
endkey: null,
endkey_docid: 40
});
-
+
T(queryResults.rows.length == 35)
T(queryResults.total_rows == docs.length)
T(queryResults.offset == 1)
diff --git a/share/www/style/layout.css b/share/www/style/layout.css
index d6e66d0a..24348f2a 100644
--- a/share/www/style/layout.css
+++ b/share/www/style/layout.css
@@ -30,7 +30,7 @@ h1 { background: #333; border-right: 2px solid #111;
line-height: 1.8em; margin: 0 0 1em; padding: 0 0 0 1em; position: relative;
}
h1 :link, h1 :visited, h1 strong { padding: .4em .5em; }
-h1 :link, h1 :visited {
+h1 :link, h1 :visited {
background: url(../image/path.gif) 100% 50% no-repeat;
color: #bbb; cursor: pointer; padding-right: 2.2em;
text-shadow: #333 2px 2px 1px;
diff --git a/src/couchdb/couch_batch_save.erl b/src/couchdb/couch_batch_save.erl
index 43a6f2dd..c1e5b866 100644
--- a/src/couchdb/couch_batch_save.erl
+++ b/src/couchdb/couch_batch_save.erl
@@ -46,10 +46,10 @@ start_link(BatchSize, BatchInterval) ->
eventually_save_doc(DbName, Doc, UserCtx) ->
% find or create a process for the {DbName, UserCtx} pair
{ok, Pid} = batch_pid_for_db_and_user(DbName, UserCtx),
- % hand it the document
+ % hand it the document
?LOG_DEBUG("sending doc to batch ~p",[Pid]),
ok = send_doc_to_batch(Pid, Doc).
-
+
%%--------------------------------------------------------------------
%% Function: commit_now(DbName) -> committed
%% Description: Commits all docs for the DB. Does not reply until
@@ -72,8 +72,8 @@ commit_now(DbName, UserCtx) ->
%%--------------------------------------------------------------------
% commit_all() ->
% committed = gen_server:call(couch_batch_save, commit_now, infinity).
-%
-
+%
+
%%====================================================================
%% gen_server callbacks
%%====================================================================
@@ -102,9 +102,9 @@ handle_call({make_pid, DbName, UserCtx}, _From, #batch_state{
batch_size=BatchSize,
batch_interval=BatchInterval
}=State) ->
- % Create the pid in a serialized process.
+ % Create the pid in a serialized process.
% We checked before to see that we need the Pid, but the check is outside
- % the gen_server for parellelism. We check again here to ensure we don't
+ % the gen_server for parellelism. We check again here to ensure we don't
% make a duplicate.
Resp = case ets:lookup(couch_batch_save_by_db, {DbName,UserCtx}) of
[{_, Pid}] ->
@@ -114,8 +114,8 @@ handle_call({make_pid, DbName, UserCtx}, _From, #batch_state{
% no match
% start and record the doc collector process
?LOG_DEBUG("making a batch pid ~p",[{DbName, UserCtx}]),
- Pid = spawn_link(fun() ->
- doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new)
+ Pid = spawn_link(fun() ->
+ doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new)
end),
true = ets:insert_new(couch_batch_save_by_db, {{DbName, UserCtx}, Pid}),
{ok, Pid}
@@ -168,7 +168,7 @@ code_change(_OldVsn, State, _Extra) ->
commit_user_docs(_DbName, _UserCtx, []) ->
{ok, []};
-
+
commit_user_docs(DbName, UserCtx, Docs) ->
?LOG_INFO("Committing ~p batch docs to ~p",[length(Docs), DbName]),
case couch_db:open(DbName, [{user_ctx, UserCtx}]) of
@@ -194,7 +194,7 @@ commit_every_ms(Pid, BatchInterval) ->
send_commit(Pid) ->
Pid ! {self(), commit},
- receive
+ receive
{Pid, committed} ->
ok
end.
@@ -225,7 +225,7 @@ send_doc_to_batch(Pid, Doc) ->
end.
% the loop that holds documents between commits
-doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new) ->
+doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new) ->
% start a process that triggers commit every BatchInterval milliseconds
_IntervalPid = spawn_link(fun() -> commit_every_ms(self(), BatchInterval) end),
doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, []);
@@ -233,7 +233,7 @@ doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, new) ->
doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, Docs) when length(Docs) >= BatchSize->
collector_commit(DbName, UserCtx, BatchInterval, Docs),
exit(normal);
-
+
doc_collector(DbName, UserCtx, {BatchSize, BatchInterval}, Docs) ->
receive
{From, add_doc, Doc} ->
diff --git a/src/couchdb/couch_batch_save_sup.erl b/src/couchdb/couch_batch_save_sup.erl
index 42cf1aba..678e0a89 100644
--- a/src/couchdb/couch_batch_save_sup.erl
+++ b/src/couchdb/couch_batch_save_sup.erl
@@ -27,9 +27,9 @@ init([]) ->
exit(Self, reload_config)
end),
- BatchSize = list_to_integer(couch_config:get("couchdb",
+ BatchSize = list_to_integer(couch_config:get("couchdb",
"batch_save_size","1000")),
- BatchInterval = list_to_integer(couch_config:get("couchdb",
+ BatchInterval = list_to_integer(couch_config:get("couchdb",
"batch_save_interval","1000")),
Batch = {batch, {couch_batch_save, start_link, [BatchSize, BatchInterval]},
diff --git a/src/couchdb/couch_btree.erl b/src/couchdb/couch_btree.erl
index c9079781..8df3cd66 100644
--- a/src/couchdb/couch_btree.erl
+++ b/src/couchdb/couch_btree.erl
@@ -39,7 +39,7 @@ less(#btree{less=Less}, A, B) ->
% pass in 'nil' for State if a new Btree.
open(State, Fd) ->
{ok, #btree{root=State, fd=Fd}}.
-
+
set_options(Bt, []) ->
Bt;
set_options(Bt, [{split, Extract}|Rest]) ->
@@ -68,7 +68,7 @@ final_reduce(Reduce, {[], Reductions}) ->
final_reduce(Reduce, {KVs, Reductions}) ->
Red = Reduce(reduce, KVs),
final_reduce(Reduce, {[], [Red | Reductions]}).
-
+
fold_reduce(Bt, StartKey, EndKey, KeyGroupFun, Fun, Acc) ->
fold_reduce(Bt, fwd, StartKey, EndKey, KeyGroupFun, Fun, Acc).
@@ -189,7 +189,7 @@ lookup(Bt, {Pointer, _Reds}, Keys) ->
lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
{ok, lists:reverse(Output)};
-
+
lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when size(NodeTuple) < LowerBound ->
{ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
@@ -238,7 +238,7 @@ complete_root(Bt, KPs) ->
{ok, ResultKeyPointers, Bt2} = write_node(Bt, kp_node, KPs),
complete_root(Bt2, ResultKeyPointers).
-%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
+%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
% It is inaccurate as it does not account for compression when blocks are
% written. Plus with the "case size(term_to_binary(InList)) of" code it's
% probably really inefficient.
@@ -277,7 +277,7 @@ modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
{NodeType, NodeList} = get_node(Bt, Pointer)
end,
NodeTuple = list_to_tuple(NodeList),
-
+
{ok, NewNodeList, QueryOutput2, Bt2} =
case NodeType of
kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
@@ -320,7 +320,7 @@ write_node(Bt, NodeType, NodeList) ->
ANodeList <- NodeListList
],
{ok, ResultList, Bt}.
-
+
modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
modify_node(Bt, nil, Actions, QueryOutput);
modify_kpnode(Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
@@ -350,15 +350,15 @@ modify_kpnode(Bt, NodeTuple, LowerBound,
LowerBound, N - 1, ResultNode)),
modify_kpnode(Bt2, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2)
end.
-
+
bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
Tail;
bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]).
-
+
bounded_tuple_to_list(Tuple, Start, End, Tail) ->
bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
-
+
bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End ->
lists:reverse(Acc, Tail);
bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) ->
@@ -426,10 +426,10 @@ modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} |
end.
-reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _KeyEnd, GroupedKey, GroupedKVsAcc,
+reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _KeyEnd, GroupedKey, GroupedKVsAcc,
GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) ->
- {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_node(Bt, Dir, {P, _R}, KeyStart, KeyEnd, GroupedKey, GroupedKVsAcc,
+ {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
+reduce_stream_node(Bt, Dir, {P, _R}, KeyStart, KeyEnd, GroupedKey, GroupedKVsAcc,
GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
case get_node(Bt, P) of
{kp_node, NodeList} ->
@@ -475,7 +475,7 @@ reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc,
reduce_stream_kv_node2(Bt, RestKVs, Key,
[assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc);
_ ->
-
+
case KeyGroupFun(GroupedKey, Key) of
true ->
reduce_stream_kv_node2(Bt, RestKVs, GroupedKey,
@@ -531,7 +531,7 @@ reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, KeyEnd,
GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
{Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) ->
KeyGroupFun(GroupedKey, Key) end, NodeList),
- {GroupedNodes, UngroupedNodes} =
+ {GroupedNodes, UngroupedNodes} =
case Grouped0 of
[] ->
{Grouped0, Ungrouped0};
@@ -542,7 +542,7 @@ reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, KeyEnd,
GroupedReds = [R || {_, {_,R}} <- GroupedNodes],
case UngroupedNodes of
[{_Key, NodeInfo}|RestNodes] ->
- {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
+ {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, KeyEnd, GroupedKey,
GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc),
reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, KeyEnd, GroupedKey2,
diff --git a/src/couchdb/couch_config.erl b/src/couchdb/couch_config.erl
index 5b93c4ec..a182b80c 100644
--- a/src/couchdb/couch_config.erl
+++ b/src/couchdb/couch_config.erl
@@ -54,7 +54,7 @@ get(Section) ->
get(Section, Key) ->
?MODULE:get(Section, Key, undefined).
-
+
get(Section, Key, Default) when is_binary(Section) and is_binary(Key) ->
?MODULE:get(?b2l(Section), ?b2l(Key), Default);
get(Section, Key, Default) ->
@@ -194,9 +194,9 @@ parse_ini_file(IniFile) ->
{ok, [ValueName|LineValues]} -> % yeehaw, got a line!
RemainingLine = couch_util:implode(LineValues, "="),
% removes comments
- {ok, [LineValue | _Rest]} =
+ {ok, [LineValue | _Rest]} =
regexp:split(RemainingLine, " ;|\t;"),
- {AccSectionName,
+ {AccSectionName,
[{{AccSectionName, ValueName}, LineValue} | AccValues]}
end
end
diff --git a/src/couchdb/couch_config_writer.erl b/src/couchdb/couch_config_writer.erl
index e47b9052..9861f842 100644
--- a/src/couchdb/couch_config_writer.erl
+++ b/src/couchdb/couch_config_writer.erl
@@ -54,7 +54,7 @@ save_to_file({{Section, Option}, Value}, File) ->
_ ->
NewFileContents2
end,
-
+
ok = file:write_file(File, list_to_binary(NewFileContents)),
ok.
@@ -66,7 +66,7 @@ save_loop({{Section, Option}, Value}, [Line|Rest], OldCurrentSection, Contents,
NewCurrentSection = parse_module(Line, OldCurrentSection),
% if the current Section is the one we want to change, try to match
% each line with the Option
- NewContents =
+ NewContents =
case NewCurrentSection of
Section ->
case OldCurrentSection of
@@ -87,21 +87,21 @@ save_loop({{Section, Option}, Value}, [Line|Rest], OldCurrentSection, Contents,
end;
_ -> % we got into a new [section]
{NewLine, DoneOptions2} = append_var_to_section(
- {{Section, Option}, Value},
- Line,
- OldCurrentSection,
+ {{Section, Option}, Value},
+ Line,
+ OldCurrentSection,
DoneOptions),
NewLine
end;
_ -> % we are reading [NewCurrentSection]
{NewLine, DoneOptions2} = append_var_to_section(
- {{Section, Option}, Value},
- Line,
- OldCurrentSection,
+ {{Section, Option}, Value},
+ Line,
+ OldCurrentSection,
DoneOptions),
NewLine
end,
- % clumsy way to only append a newline character if the line is not empty. We need this to
+ % clumsy way to only append a newline character if the line is not empty. We need this to
% avoid having a newline inserted at the top of the target file each time we save it.
Contents2 = case Contents of "" -> ""; _ -> Contents ++ "\n" end,
% go to next line
@@ -110,7 +110,7 @@ save_loop({{Section, Option}, Value}, [Line|Rest], OldCurrentSection, Contents,
save_loop({{Section, Option}, Value}, [], OldSection, NewFileContents, DoneOptions) ->
case lists:member(Option, DoneOptions) of
% append Deferred Option
- false when Section == OldSection ->
+ false when Section == OldSection ->
{NewFileContents ++ "\n" ++ Option ++ " = " ++ Value ++ "\n", DoneOptions};
% we're out of new lines, just return the new file's contents
_ -> {NewFileContents, DoneOptions}
@@ -131,7 +131,7 @@ append_var_to_section({{Section, Option}, Value}, Line, OldCurrentSection, DoneO
_ ->
{Line, DoneOptions}
end.
-
+
%% @spec parse_module(Line::string(), OldSection::string()) -> string()
%% @doc Tries to match a line against a pattern specifying a ini module or
%% section ("[Section]"). Returns OldSection if no match is found.
diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl
index 2f0fa847..33b4d542 100644
--- a/src/couchdb/couch_db.erl
+++ b/src/couchdb/couch_db.erl
@@ -140,7 +140,7 @@ get_doc_info(Db, Id) ->
Else ->
Else
end.
-
+
% returns {ok, DocInfo} or not_found
get_full_doc_info(Db, Id) ->
[Result] = get_full_doc_infos(Db, [Id]),
@@ -154,13 +154,13 @@ increment_update_seq(#db{update_pid=UpdatePid}) ->
purge_docs(#db{update_pid=UpdatePid}, IdsRevs) ->
gen_server:call(UpdatePid, {purge_docs, IdsRevs}).
-
+
get_committed_update_seq(#db{committed_update_seq=Seq}) ->
Seq.
get_update_seq(#db{update_seq=Seq})->
Seq.
-
+
get_purge_seq(#db{header=#db_header{purge_seq=PurgeSeq}})->
PurgeSeq.
@@ -230,7 +230,7 @@ set_revs_limit(_Db, _Limit) ->
name(#db{name=Name}) ->
Name.
-
+
update_doc(Db, Doc, Options) ->
case update_docs(Db, [Doc], Options) of
{ok, [{ok, NewRev}]} ->
@@ -241,7 +241,7 @@ update_doc(Db, Doc, Options) ->
update_docs(Db, Docs) ->
update_docs(Db, Docs, []).
-
+
% group_alike_docs groups the sorted documents into sublist buckets, by id.
% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
group_alike_docs(Docs) ->
@@ -375,7 +375,7 @@ update_docs(#db{update_pid=UpdatePid}=Db, Docs, Options) ->
prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
- Errors2 = [{{Id, {Pos, Rev}}, Error} ||
+ Errors2 = [{{Id, {Pos, Rev}}, Error} ||
{#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors],
{lists:reverse(AccPrepped), lists:reverse(Errors2)};
prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) ->
@@ -406,9 +406,9 @@ prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldI
fun(#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, {AccValidated, AccErrors2}) ->
case dict:find({Pos, RevId}, LeafRevsFullDict) of
{ok, {Start, Path}} ->
- % our unflushed doc is a leaf node. Go back on the path
+ % our unflushed doc is a leaf node. Go back on the path
% to find the previous rev that's on disk.
- PrevRevResult =
+ PrevRevResult =
case couch_doc:has_stubs(Doc) of
true ->
[_PrevRevFull | [PrevRevFull | _]=PrevPath] = Path,
@@ -420,14 +420,14 @@ prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldI
Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
{ok, Doc2, fun() -> DiskDoc end}
end;
- false ->
+ false ->
{ok, Doc,
fun() ->
make_first_doc_on_disk(Db,Id,Start-1, tl(Path))
end}
end,
case PrevRevResult of
- {ok, NewDoc, LoadPrevRevFun} ->
+ {ok, NewDoc, LoadPrevRevFun} ->
case validate_doc_update(Db, NewDoc, LoadPrevRevFun) of
ok ->
{[NewDoc | AccValidated], AccErrors2};
@@ -450,7 +450,7 @@ prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldI
update_docs(Db, Docs, Options, replicated_changes) ->
couch_stats_collector:increment({couchdb, database_writes}),
DocBuckets = group_alike_docs(Docs),
-
+
case (Db#db.validate_doc_funs /= []) orelse
lists:any(
fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true;
@@ -459,7 +459,7 @@ update_docs(Db, Docs, Options, replicated_changes) ->
true ->
Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
ExistingDocs = get_full_doc_infos(Db, Ids),
-
+
{DocBuckets2, DocErrors} =
prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocs, [], []),
DocBuckets3 = [Bucket || [_|_]=Bucket <- DocBuckets2]; % remove empty buckets
@@ -469,7 +469,7 @@ update_docs(Db, Docs, Options, replicated_changes) ->
end,
{ok, []} = write_and_commit(Db, DocBuckets3, [merge_conflicts | Options]),
{ok, DocErrors};
-
+
update_docs(Db, Docs, Options, interactive_edit) ->
couch_stats_collector:increment({couchdb, database_writes}),
AllOrNothing = lists:member(all_or_nothing, Options),
@@ -485,7 +485,7 @@ update_docs(Db, Docs, Options, interactive_edit) ->
end
end, Docs),
DocBuckets = group_alike_docs(Docs2),
-
+
case (Db#db.validate_doc_funs /= []) orelse
lists:any(
fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) ->
@@ -497,16 +497,16 @@ update_docs(Db, Docs, Options, interactive_edit) ->
% lookup the doc by id and get the most recent
Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
ExistingDocInfos = get_full_doc_infos(Db, Ids),
-
+
{DocBucketsPrepped, Failures} =
case AllOrNothing of
true ->
- prep_and_validate_replicated_updates(Db, DocBuckets,
+ prep_and_validate_replicated_updates(Db, DocBuckets,
ExistingDocInfos, [], []);
false ->
prep_and_validate_updates(Db, DocBuckets, ExistingDocInfos, [], [])
end,
-
+
% strip out any empty buckets
DocBuckets2 = [Bucket || [_|_] = Bucket <- DocBucketsPrepped];
false ->
@@ -517,7 +517,7 @@ update_docs(Db, Docs, Options, interactive_edit) ->
if (AllOrNothing) and (Failures /= []) ->
{aborted, Failures};
true ->
- Options2 = if AllOrNothing -> [merge_conflicts];
+ Options2 = if AllOrNothing -> [merge_conflicts];
true -> [] end ++ Options,
{ok, CommitFailures} = write_and_commit(Db, DocBuckets2, Options2),
FailDict = dict:from_list(CommitFailures ++ Failures),
@@ -575,24 +575,24 @@ doc_flush_binaries(Doc, Fd) ->
flush_binary(Fd, {Fd0, StreamPointer, Len}) when Fd0 == Fd ->
% already written to our file, nothing to write
{Fd, StreamPointer, Len};
-
+
flush_binary(Fd, {OtherFd, StreamPointer, Len}) when is_tuple(StreamPointer) ->
- {NewStreamData, Len} =
+ {NewStreamData, Len} =
couch_stream:old_copy_to_new_stream(OtherFd, StreamPointer, Len, Fd),
{Fd, NewStreamData, Len};
flush_binary(Fd, {OtherFd, StreamPointer, Len}) ->
- {NewStreamData, Len} =
+ {NewStreamData, Len} =
couch_stream:copy_to_new_stream(OtherFd, StreamPointer, Fd),
{Fd, NewStreamData, Len};
-
+
flush_binary(Fd, Bin) when is_binary(Bin) ->
with_stream(Fd, fun(OutputStream) ->
couch_stream:write(OutputStream, Bin)
end);
-
+
flush_binary(Fd, {StreamFun, undefined}) when is_function(StreamFun) ->
- with_stream(Fd, fun(OutputStream) ->
+ with_stream(Fd, fun(OutputStream) ->
% StreamFun(MaxChunkSize, WriterFun) must call WriterFun
% once for each chunk of the attachment,
StreamFun(4096,
@@ -606,19 +606,19 @@ flush_binary(Fd, {StreamFun, undefined}) when is_function(StreamFun) ->
couch_stream:write(OutputStream, Bin)
end, ok)
end);
-
+
flush_binary(Fd, {Fun, Len}) when is_function(Fun) ->
- with_stream(Fd, fun(OutputStream) ->
+ with_stream(Fd, fun(OutputStream) ->
write_streamed_attachment(OutputStream, Fun, Len)
end).
-
+
with_stream(Fd, Fun) ->
{ok, OutputStream} = couch_stream:open(Fd),
Fun(OutputStream),
{StreamInfo, Len} = couch_stream:close(OutputStream),
{Fd, StreamInfo, Len}.
-
+
write_streamed_attachment(_Stream, _F, 0) ->
ok;
write_streamed_attachment(Stream, F, LenLeft) ->
@@ -656,14 +656,14 @@ changes_since(Db, Style, StartSeq, Fun, Acc) ->
Infos = [DocInfo];
all_docs ->
% make each rev it's own doc info
- Infos = [DocInfo#doc_info{revs=[RevInfo]} ||
+ Infos = [DocInfo#doc_info{revs=[RevInfo]} ||
#rev_info{seq=RevSeq}=RevInfo <- Revs, StartSeq < RevSeq]
end,
Fun(Infos, Acc2)
end, Acc).
count_changes_since(Db, SinceSeq) ->
- {ok, Changes} =
+ {ok, Changes} =
couch_btree:fold_reduce(Db#db.docinfo_by_seq_btree,
SinceSeq + 1, % startkey
ok, % endkey
@@ -673,7 +673,7 @@ count_changes_since(Db, SinceSeq) ->
end,
0),
Changes.
-
+
enum_docs_since(Db, SinceSeq, Direction, InFun, Acc) ->
couch_btree:fold(Db#db.docinfo_by_seq_btree, SinceSeq + 1, Direction, InFun, Acc).
@@ -698,13 +698,13 @@ init({DbName, Filepath, Fd, Options}) ->
terminate(Reason, _Db) ->
couch_util:terminate_linked(Reason),
ok.
-
+
handle_call({open_ref_count, OpenerPid}, _, #db{fd_ref_counter=RefCntr}=Db) ->
ok = couch_ref_counter:add(RefCntr, OpenerPid),
{reply, {ok, Db}, Db};
-handle_call(is_idle, _From, #db{fd_ref_counter=RefCntr, compactor_pid=Compact,
+handle_call(is_idle, _From, #db{fd_ref_counter=RefCntr, compactor_pid=Compact,
waiting_delayed_commit=Delay}=Db) ->
- % Idle means no referrers. Unless in the middle of a compaction file switch,
+ % Idle means no referrers. Unless in the middle of a compaction file switch,
% there are always at least 2 referrers, couch_db_updater and us.
{reply, (Delay == nil) and (Compact == nil) and (couch_ref_counter:count(RefCntr) == 2), Db};
handle_call({db_updated, NewDb}, _From, #db{fd_ref_counter=OldRefCntr}) ->
@@ -782,7 +782,7 @@ open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) ->
Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}),
{ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}};
open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
- #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} =
+ #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} =
DocInfo = couch_doc:to_doc_info(FullDocInfo),
{[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath),
@@ -799,11 +799,11 @@ doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTre
case lists:member(revs_info, Options) of
false -> [];
true ->
- {[{Pos, RevPath}],[]} =
+ {[{Pos, RevPath}],[]} =
couch_key_tree:get_full_key_paths(RevTree, [Rev]),
-
+
[{revs_info, Pos, lists:map(
- fun({Rev1, {true, _Sp, _UpdateSeq}}) ->
+ fun({Rev1, {true, _Sp, _UpdateSeq}}) ->
{Rev1, deleted};
({Rev1, {false, _Sp, _UpdateSeq}}) ->
{Rev1, available};
@@ -849,7 +849,7 @@ doc_to_tree_simple(Doc, [RevId]) ->
doc_to_tree_simple(Doc, [RevId | Rest]) ->
[{RevId, ?REV_MISSING, doc_to_tree_simple(Doc, Rest)}].
-
+
make_doc(#db{fd=Fd}, Id, Deleted, Bp, RevisionPath) ->
{BodyData, BinValues} =
case Bp of
@@ -867,6 +867,6 @@ make_doc(#db{fd=Fd}, Id, Deleted, Bp, RevisionPath) ->
attachments = BinValues,
deleted = Deleted
}.
-
-
-
+
+
+
diff --git a/src/couchdb/couch_db.hrl b/src/couchdb/couch_db.hrl
index 905b489b..abb301eb 100644
--- a/src/couchdb/couch_db.hrl
+++ b/src/couchdb/couch_db.hrl
@@ -21,7 +21,7 @@
-define(l2b(V), list_to_binary(V)).
-define(DEFAULT_ATTACHMENT_CONTENT_TYPE, <<"application/octet-stream">>).
-
+
-define(LOG_DEBUG(Format, Args),
case couch_log:debug_on() of
true -> error_logger:info_report(couch_debug, {Format, Args});
@@ -69,7 +69,7 @@
req_body = undefined,
design_url_handlers
}).
-
+
-record(doc,
{
@@ -91,7 +91,7 @@
% couch_db:open_doc(Db, Id, Options).
meta = []
}).
-
+
-record(user_ctx,
@@ -112,7 +112,7 @@
-define(LATEST_DISK_VERSION, 3).
-record(db_header,
- {disk_version = ?LATEST_DISK_VERSION,
+ {disk_version = ?LATEST_DISK_VERSION,
update_seq = 0,
unused = 0,
fulldocinfo_by_id_btree_state = nil,
diff --git a/src/couchdb/couch_db_update_notifier_sup.erl b/src/couchdb/couch_db_update_notifier_sup.erl
index 69d6b1b0..76400637 100644
--- a/src/couchdb/couch_db_update_notifier_sup.erl
+++ b/src/couchdb/couch_db_update_notifier_sup.erl
@@ -33,11 +33,11 @@ init([]) ->
ok = couch_config:register(
fun("update_notification", Key, Value) -> reload_config(Key, Value) end
),
-
+
UpdateNotifierExes = couch_config:get("update_notification"),
-
+
{ok,
- {{one_for_one, 10, 3600},
+ {{one_for_one, 10, 3600},
lists:map(fun({Name, UpdateNotifierExe}) ->
{Name,
{couch_db_update_notifier, start_link, [UpdateNotifierExe]},
@@ -47,7 +47,7 @@ init([]) ->
[couch_db_update_notifier]}
end, UpdateNotifierExes)}}.
-%% @doc when update_notification configuration changes, terminate the process
+%% @doc when update_notification configuration changes, terminate the process
%% for that notifier and start a new one with the updated config
reload_config(Id, Exe) ->
ChildSpec = {
diff --git a/src/couchdb/couch_db_updater.erl b/src/couchdb/couch_db_updater.erl
index 6fef29eb..b715a3bf 100644
--- a/src/couchdb/couch_db_updater.erl
+++ b/src/couchdb/couch_db_updater.erl
@@ -32,7 +32,7 @@ init({MainPid, DbName, Filepath, Fd, Options}) ->
ok = couch_file:upgrade_old_header(Fd, <<$g, $m, $k, 0>>), % 09 UPGRADE CODE
{ok, Header} = couch_file:read_header(Fd)
end,
-
+
Db = init_db(DbName, Filepath, Fd, Header),
Db2 = refresh_validate_doc_funs(Db),
{ok, Db2#db{main_pid=MainPid}}.
@@ -90,7 +90,7 @@ handle_call({purge_docs, IdRevs}, _From, Db) ->
} = Db,
DocLookups = couch_btree:lookup(DocInfoByIdBTree,
[Id || {Id, _Revs} <- IdRevs]),
-
+
NewDocInfos = lists:zipwith(
fun({_Id, Revs}, {ok, #full_doc_info{rev_tree=Tree}=FullDocInfo}) ->
case couch_key_tree:remove_leafs(Tree, Revs) of
@@ -103,17 +103,17 @@ handle_call({purge_docs, IdRevs}, _From, Db) ->
nil
end,
IdRevs, DocLookups),
-
+
SeqsToRemove = [Seq
|| {#full_doc_info{update_seq=Seq},_} <- NewDocInfos],
-
+
FullDocInfoToUpdate = [FullInfo
|| {#full_doc_info{rev_tree=Tree}=FullInfo,_}
<- NewDocInfos, Tree /= []],
-
+
IdRevsPurged = [{Id, Revs}
|| {#full_doc_info{id=Id}, Revs} <- NewDocInfos],
-
+
{DocInfoToUpdate, NewSeq} = lists:mapfoldl(
fun(#full_doc_info{rev_tree=Tree}=FullInfo, SeqAcc) ->
Tree2 = couch_key_tree:map_leafs( fun(RevInfo) ->
@@ -122,27 +122,27 @@ handle_call({purge_docs, IdRevs}, _From, Db) ->
{couch_doc:to_doc_info(FullInfo#full_doc_info{rev_tree=Tree2}),
SeqAcc + 1}
end, LastSeq, FullDocInfoToUpdate),
-
+
IdsToRemove = [Id || {#full_doc_info{id=Id,rev_tree=[]},_}
<- NewDocInfos],
-
+
{ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree,
DocInfoToUpdate, SeqsToRemove),
{ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree,
FullDocInfoToUpdate, IdsToRemove),
{ok, Pointer} = couch_file:append_term(Fd, IdRevsPurged),
-
+
Db2 = commit_data(
Db#db{
fulldocinfo_by_id_btree = DocInfoByIdBTree2,
docinfo_by_seq_btree = DocInfoBySeqBTree2,
update_seq = NewSeq + 1,
header=Header#db_header{purge_seq=PurgeSeq+1, purged_docs=Pointer}}),
-
+
ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
couch_db_update_notifier:notify({updated, Db#db.name}),
{reply, {ok, Db2#db.update_seq, IdRevsPurged}, Db2}.
-
+
handle_cast(start_compact, Db) ->
case Db#db.compactor_pid of
@@ -168,10 +168,10 @@ handle_cast({compact_done, CompactFilepath}, #db{filepath=Filepath}=Db) ->
{ok, LocalDocs} = couch_btree:foldl(Db#db.local_docs_btree,
fun(Value, _Offset, Acc) -> {ok, [Value | Acc]} end, []),
{ok, NewLocalBtree} = couch_btree:add(NewDb#db.local_docs_btree, LocalDocs),
-
+
NewDb2 = commit_data( NewDb#db{local_docs_btree=NewLocalBtree,
main_pid = Db#db.main_pid,filepath = Filepath}),
-
+
?LOG_DEBUG("CouchDB swapping files ~s and ~s.",
[Filepath, CompactFilepath]),
file:delete(Filepath),
@@ -198,25 +198,25 @@ code_change(_OldVsn, State, _Extra) ->
btree_by_seq_split(#doc_info{id=Id, high_seq=KeySeq, revs=Revs}) ->
- RevInfos = [{Rev, Seq, Bp} ||
+ RevInfos = [{Rev, Seq, Bp} ||
#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp=Bp} <- Revs],
- DeletedRevInfos = [{Rev, Seq, Bp} ||
+ DeletedRevInfos = [{Rev, Seq, Bp} ||
#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp=Bp} <- Revs],
{KeySeq,{Id, RevInfos, DeletedRevInfos}}.
-
+
btree_by_seq_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
#doc_info{
id = Id,
high_seq=KeySeq,
- revs =
- [#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp = Bp} ||
- {Rev, Seq, Bp} <- RevInfos] ++
- [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} ||
+ revs =
+ [#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp = Bp} ||
+ {Rev, Seq, Bp} <- RevInfos] ++
+ [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} ||
{Rev, Seq, Bp} <- DeletedRevInfos]};
btree_by_seq_join(KeySeq,{Id, Rev, Bp, Conflicts, DelConflicts, Deleted}) ->
% 09 UPGRADE CODE
% this is the 0.9.0 and earlier by_seq record. It's missing the body pointers
- % and individual seq nums for conflicts that are currently in the index,
+ % and individual seq nums for conflicts that are currently in the index,
% meaning the filtered _changes api will not work except for on main docs.
% Simply compact a 0.9.0 database to upgrade the index.
#doc_info{
@@ -252,7 +252,7 @@ btree_by_id_join(Id, {HighSeq, Deleted, DiskTree}) ->
% This is fixed by compacting the database.
{IsDeleted, BodyPointer, HighSeq}
end, DiskTree),
-
+
#full_doc_info{id=Id, update_seq=HighSeq, deleted=Deleted==1, rev_tree=Tree}.
btree_by_id_reduce(reduce, FullDocInfos) ->
@@ -262,7 +262,7 @@ btree_by_id_reduce(reduce, FullDocInfos) ->
btree_by_id_reduce(rereduce, Reds) ->
{lists:sum([Count || {Count,_} <- Reds]),
lists:sum([DelCount || {_, DelCount} <- Reds])}.
-
+
btree_by_seq_reduce(reduce, DocInfos) ->
% count the number of documents
length(DocInfos);
@@ -293,16 +293,16 @@ init_db(DbName, Filepath, Fd, Header0) ->
_ -> throw({database_disk_version_error, "Incorrect disk header version"})
end,
Less = fun less_docid/2,
-
+
{ok, FsyncOptions} = couch_util:parse_term(
- couch_config:get("couchdb", "fsync_options",
+ couch_config:get("couchdb", "fsync_options",
"[before_header, after_header, on_file_open]")),
-
+
case lists:member(on_file_open, FsyncOptions) of
true -> ok = couch_file:sync(Fd);
_ -> ok
end,
-
+
{ok, IdBtree} = couch_btree:open(Header#db_header.fulldocinfo_by_id_btree_state, Fd,
[{split, fun(X) -> btree_by_id_split(X) end},
{join, fun(X,Y) -> btree_by_id_join(X,Y) end},
@@ -347,7 +347,7 @@ init_db(DbName, Filepath, Fd, Header0) ->
close_db(#db{fd_ref_counter = RefCntr}) ->
couch_ref_counter:drop(RefCntr).
-
+
refresh_validate_doc_funs(Db) ->
{ok, DesignDocs} = couch_db:get_design_docs(Db),
@@ -424,7 +424,7 @@ merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList],
0 -> AccRemoveSeqs;
_ -> [OldSeq | AccRemoveSeqs]
end,
- merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo,
+ merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo,
[NewInfo|AccNewInfos], RemoveSeqs, NewConflicts, AccSeq+1)
end.
@@ -443,7 +443,7 @@ new_index_entries([FullDocInfo|RestInfos], AccById, AccBySeq) ->
stem_full_doc_infos(#db{revs_limit=Limit}, DocInfos) ->
[Info#full_doc_info{rev_tree=couch_key_tree:stem(Tree, Limit)} ||
#full_doc_info{rev_tree=Tree}=Info <- DocInfos].
-
+
update_docs_int(Db, DocsList, Options) ->
#db{
@@ -461,9 +461,9 @@ update_docs_int(Db, DocsList, Options) ->
{[Docs | DocsListAcc], NonRepDocsAcc}
end
end, {[], []}, DocsList),
-
- Ids = [Id || [#doc{id=Id}|_] <- DocsList2],
-
+
+ Ids = [Id || [#doc{id=Id}|_] <- DocsList2],
+
% lookup up the old documents, if they exist.
OldDocLookups = couch_btree:lookup(DocInfoByIdBTree, Ids),
OldDocInfos = lists:zipwith(
@@ -473,23 +473,23 @@ update_docs_int(Db, DocsList, Options) ->
#full_doc_info{id=Id}
end,
Ids, OldDocLookups),
-
+
% Merge the new docs into the revision trees.
{ok, NewDocInfos0, RemoveSeqs, Conflicts, NewSeq} = merge_rev_trees(
lists:member(merge_conflicts, Options),
DocsList2, OldDocInfos, [], [], [], LastSeq),
-
+
NewFullDocInfos = stem_full_doc_infos(Db, NewDocInfos0),
-
+
% All documents are now ready to write.
-
+
{ok, LocalConflicts, Db2} = update_local_docs(Db, NonRepDocs),
-
+
% Write out the document summaries (the bodies are stored in the nodes of
% the trees, the attachments are already written to disk)
{ok, FlushedFullDocInfos} = flush_trees(Db2, NewFullDocInfos, []),
-
- {IndexFullDocInfos, IndexDocInfos} =
+
+ {IndexFullDocInfos, IndexDocInfos} =
new_index_entries(FlushedFullDocInfos, [], []),
% and the indexes
@@ -500,7 +500,7 @@ update_docs_int(Db, DocsList, Options) ->
fulldocinfo_by_id_btree = DocInfoByIdBTree2,
docinfo_by_seq_btree = DocInfoBySeqBTree2,
update_seq = NewSeq},
-
+
% Check if we just updated any design documents, and update the validation
% funs if we did.
case [1 || <<"_design/",_/binary>> <- Ids] of
@@ -509,8 +509,8 @@ update_docs_int(Db, DocsList, Options) ->
_ ->
Db4 = refresh_validate_doc_funs(Db3)
end,
-
- {ok, LocalConflicts ++ Conflicts,
+
+ {ok, LocalConflicts ++ Conflicts,
commit_data(Db4, not lists:member(full_commit, Options))}.
@@ -534,13 +534,13 @@ update_local_docs(#db{local_docs_btree=Btree}=Db, Docs) ->
false ->
{conflict, {Id, {0, RevStr}}}
end
-
+
end, Docs, OldDocLookups),
BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries],
BtreeIdsUpdate = [ByIdDocInfo || {update, ByIdDocInfo} <- BtreeEntries],
Conflicts = [{conflict, IdRev} || {conflict, IdRev} <- BtreeEntries],
-
+
{ok, Btree2} =
couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove),
@@ -580,14 +580,14 @@ commit_data(#db{fd=Fd,header=OldHeader,fsync_options=FsyncOptions}=Db, Delay) ->
true -> ok = couch_file:sync(Fd);
_ -> ok
end,
-
+
ok = couch_file:write_header(Fd, Header),
-
+
case lists:member(after_header, FsyncOptions) of
true -> ok = couch_file:sync(Fd);
_ -> ok
end,
-
+
Db#db{waiting_delayed_commit=nil,
header=Header,
committed_update_seq=Db#db.update_seq}
@@ -622,11 +622,11 @@ copy_rev_tree_attachments(SrcFd, DestFd, [{RevId, _, SubTree} | RestTree]) ->
% inner node, only copy info/data from leaf nodes
[{RevId, ?REV_MISSING, copy_rev_tree_attachments(SrcFd, DestFd, SubTree)} | copy_rev_tree_attachments(SrcFd, DestFd, RestTree)].
-
+
copy_docs(#db{fd=SrcFd}=Db, #db{fd=DestFd}=NewDb, InfoBySeq, Retry) ->
Ids = [Id || #doc_info{id=Id} <- InfoBySeq],
LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids),
-
+
% write out the attachments
NewFullDocInfos0 = lists:map(
fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) ->
@@ -656,7 +656,7 @@ copy_docs(#db{fd=SrcFd}=Db, #db{fd=DestFd}=NewDb, InfoBySeq, Retry) ->
Existing = couch_btree:lookup(NewDb#db.fulldocinfo_by_id_btree, Ids),
[Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
end,
-
+
{ok, DocInfoBTree} = couch_btree:add_remove(
NewDb#db.docinfo_by_seq_btree, NewDocInfos, RemoveSeqs),
{ok, FullDocInfoBTree} = couch_btree:add_remove(
@@ -665,14 +665,14 @@ copy_docs(#db{fd=SrcFd}=Db, #db{fd=DestFd}=NewDb, InfoBySeq, Retry) ->
docinfo_by_seq_btree=DocInfoBTree}.
-
+
copy_compact(Db, NewDb0, Retry) ->
FsyncOptions = [Op || Op <- NewDb0#db.fsync_options, Op == before_header],
NewDb = NewDb0#db{fsync_options=FsyncOptions},
TotalChanges = couch_db:count_changes_since(Db, NewDb#db.update_seq),
EnumBySeqFun =
fun(#doc_info{high_seq=Seq}=DocInfo, _Offset, {AccNewDb, AccUncopied, TotalCopied}) ->
- couch_task_status:update("Copied ~p of ~p changes (~p%)",
+ couch_task_status:update("Copied ~p of ~p changes (~p%)",
[TotalCopied, TotalChanges, (TotalCopied*100) div TotalChanges]),
if TotalCopied rem 1000 == 0 ->
NewDb2 = copy_docs(Db, AccNewDb, lists:reverse([DocInfo | AccUncopied]), Retry),
@@ -681,20 +681,20 @@ copy_compact(Db, NewDb0, Retry) ->
true ->
{ok, {NewDb2#db{update_seq=Seq}, [], TotalCopied + 1}}
end;
- true ->
+ true ->
{ok, {AccNewDb, [DocInfo | AccUncopied], TotalCopied + 1}}
end
end,
-
+
couch_task_status:set_update_frequency(500),
-
+
{ok, {NewDb2, Uncopied, TotalChanges}} =
couch_btree:foldl(Db#db.docinfo_by_seq_btree, NewDb#db.update_seq + 1, EnumBySeqFun, {NewDb, [], 0}),
-
- couch_task_status:update("Flushing"),
-
+
+ couch_task_status:update("Flushing"),
+
NewDb3 = copy_docs(Db, NewDb2, lists:reverse(Uncopied), Retry),
-
+
% copy misc header values
if NewDb3#db.admins /= Db#db.admins ->
{ok, Ptr} = couch_file:append_term(NewDb3#db.fd, Db#db.admins),
@@ -702,7 +702,7 @@ copy_compact(Db, NewDb0, Retry) ->
true ->
NewDb4 = NewDb3
end,
-
+
commit_data(NewDb4#db{update_seq=Db#db.update_seq}).
start_copy_compact(#db{name=Name,filepath=Filepath}=Db) ->
@@ -721,7 +721,7 @@ start_copy_compact(#db{name=Name,filepath=Filepath}=Db) ->
end,
NewDb = init_db(Name, CompactFile, Fd, Header),
NewDb2 = copy_compact(Db, NewDb, Retry),
-
+
gen_server:cast(Db#db.update_pid, {compact_done, CompactFile}),
close_db(NewDb2).
-
+
diff --git a/src/couchdb/couch_doc.erl b/src/couchdb/couch_doc.erl
index b9747a01..6c9a119c 100644
--- a/src/couchdb/couch_doc.erl
+++ b/src/couchdb/couch_doc.erl
@@ -34,7 +34,7 @@ to_json_revisions(Options, Start, RevIds) ->
case lists:member(revs, Options) of
false -> [];
true ->
- [{<<"_revisions">>, {[{<<"start">>, Start},
+ [{<<"_revisions">>, {[{<<"start">>, Start},
{<<"ids">>, RevIds}]}}]
end.
@@ -115,10 +115,10 @@ to_json_attachments(Attachments, Options) ->
to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
meta=Meta}=Doc,Options)->
- {[{<<"_id">>, Id}]
+ {[{<<"_id">>, Id}]
++ to_json_rev(Start, RevIds)
++ to_json_body(Del, Body)
- ++ to_json_revisions(Options, Start, RevIds)
+ ++ to_json_revisions(Options, Start, RevIds)
++ to_json_meta(Meta)
++ to_json_attachments(Doc#doc.attachments, Options)
}.
@@ -133,13 +133,13 @@ parse_rev(Rev) when is_binary(Rev) ->
parse_rev(?b2l(Rev));
parse_rev(Rev) when is_list(Rev) ->
SplitRev = lists:splitwith(fun($-) -> false; (_) -> true end, Rev),
- case SplitRev of
+ case SplitRev of
{Pos, [$- | RevId]} -> {list_to_integer(Pos), ?l2b(RevId)};
_Else -> throw({bad_request, <<"Invalid rev format">>})
end;
parse_rev(_BadRev) ->
throw({bad_request, <<"Invalid rev format">>}).
-
+
parse_revs([]) ->
[];
parse_revs([Rev | Rest]) ->
@@ -161,20 +161,20 @@ validate_docid(Id) ->
transfer_fields([], #doc{body=Fields}=Doc) ->
% convert fields back to json object
Doc#doc{body={lists:reverse(Fields)}};
-
+
transfer_fields([{<<"_id">>, Id} | Rest], Doc) ->
validate_docid(Id),
transfer_fields(Rest, Doc#doc{id=Id});
-
+
transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc) ->
{Pos, RevId} = parse_rev(Rev),
transfer_fields(Rest,
Doc#doc{revs={Pos, [RevId]}});
-
+
transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc) ->
% we already got the rev from the _revisions
transfer_fields(Rest,Doc);
-
+
transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) ->
Bins = lists:flatmap(fun({Name, {BinProps}}) ->
case proplists:get_value(<<"stub">>, BinProps) of
@@ -190,7 +190,7 @@ transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) ->
end
end, JsonBins),
transfer_fields(Rest, Doc#doc{attachments=Bins});
-
+
transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) ->
RevIds = proplists:get_value(<<"ids">>, Props),
Start = proplists:get_value(<<"start">>, Props),
@@ -204,7 +204,7 @@ transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) ->
[throw({doc_validation, "RevId isn't a string"}) ||
RevId <- RevIds, not is_binary(RevId)],
transfer_fields(Rest, Doc#doc{revs={Start, RevIds}});
-
+
transfer_fields([{<<"_deleted">>, B} | Rest], Doc) when (B==true) or (B==false) ->
transfer_fields(Rest, Doc#doc{deleted=B});
@@ -222,7 +222,7 @@ transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc) ->
transfer_fields([{<<"_",Name/binary>>, _} | _], _) ->
throw({doc_validation,
?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
-
+
transfer_fields([Field | Rest], #doc{body=Fields}=Doc) ->
transfer_fields(Rest, Doc#doc{body=[Field|Fields]}).
@@ -237,11 +237,11 @@ max_seq([#rev_info{seq=Seq}|Rest], Max) ->
to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree}) ->
RevInfosAndPath =
- [{#rev_info{deleted=Del,body_sp=Bp,seq=Seq,rev={Pos,RevId}}, Path} ||
- {{Del, Bp, Seq},{Pos, [RevId|_]}=Path} <-
+ [{#rev_info{deleted=Del,body_sp=Bp,seq=Seq,rev={Pos,RevId}}, Path} ||
+ {{Del, Bp, Seq},{Pos, [RevId|_]}=Path} <-
couch_key_tree:get_all_leafs(Tree)],
SortedRevInfosAndPath = lists:sort(
- fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA},
+ fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA},
{#rev_info{deleted=DeletedB,rev=RevB}, _PathB}) ->
% sort descending by {not deleted, rev}
{not DeletedA, RevA} > {not DeletedB, RevB}
@@ -282,7 +282,7 @@ get_validate_doc_fun(#doc{body={Props}}) ->
Lang, FunSrc, EditDoc, DiskDoc, Ctx)
end
end.
-
+
has_stubs(#doc{attachments=Bins}) ->
has_stubs(Bins);
diff --git a/src/couchdb/couch_erl_driver.c b/src/couchdb/couch_erl_driver.c
index 18fabc2b..0569f0c3 100644
--- a/src/couchdb/couch_erl_driver.c
+++ b/src/couchdb/couch_erl_driver.c
@@ -56,7 +56,7 @@ static ErlDrvData couch_drv_start(ErlDrvPort port, char *buff)
return ERL_DRV_ERROR_GENERAL;
pData->port = port;
-
+
pData->coll = ucol_open("", &status);
if (U_FAILURE(status)) {
couch_drv_stop((ErlDrvData)pData);
@@ -140,7 +140,7 @@ static int couch_drv_control(ErlDrvData drv_data, unsigned int command, char *pB
return return_control_result(&response, sizeof(response), rbuf, rlen);
}
-
+
default:
return -1;
}
diff --git a/src/couchdb/couch_external_manager.erl b/src/couchdb/couch_external_manager.erl
index 034e0c50..1becaa9c 100644
--- a/src/couchdb/couch_external_manager.erl
+++ b/src/couchdb/couch_external_manager.erl
@@ -19,7 +19,7 @@
-include("couch_db.hrl").
start_link() ->
- gen_server:start_link({local, couch_external_manager},
+ gen_server:start_link({local, couch_external_manager},
couch_external_manager, [], []).
execute(UrlName, JsonReq) ->
diff --git a/src/couchdb/couch_external_server.erl b/src/couchdb/couch_external_server.erl
index d81c4f85..107e27d6 100644
--- a/src/couchdb/couch_external_server.erl
+++ b/src/couchdb/couch_external_server.erl
@@ -14,7 +14,7 @@
-behaviour(gen_server).
-export([start_link/2, stop/1, execute/2]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
-include("couch_db.hrl").
diff --git a/src/couchdb/couch_file.erl b/src/couchdb/couch_file.erl
index 2021bf5b..65863ee2 100644
--- a/src/couchdb/couch_file.erl
+++ b/src/couchdb/couch_file.erl
@@ -36,7 +36,7 @@
open(Filepath) ->
open(Filepath, []).
-
+
open(Filepath, Options) ->
case gen_server:start_link(couch_file,
{Filepath, Options, self(), Ref = make_ref()}, []) of
@@ -76,7 +76,7 @@ append_term(Fd, Term) ->
%% serialized term. Use pread_term to read the term back.
%% or {error, Reason}.
%%----------------------------------------------------------------------
-
+
append_binary(Fd, Bin) ->
Size = iolist_size(Bin),
gen_server:call(Fd, {append_bin, [<<Size:32/integer>>, Bin]}, infinity).
@@ -89,7 +89,7 @@ append_binary(Fd, Bin) ->
%% or {error, Reason}.
%%----------------------------------------------------------------------
-
+
pread_term(Fd, Pos) ->
{ok, Bin} = pread_binary(Fd, Pos),
{ok, binary_to_term(Bin)}.
@@ -178,14 +178,14 @@ read_header(Fd) ->
Else ->
Else
end.
-
+
write_header(Fd, Data) ->
Bin = term_to_binary(Data),
Md5 = erlang:md5(Bin),
% now we assemble the final header binary and write to disk
FinalBin = <<Md5/binary, Bin/binary>>,
gen_server:call(Fd, {write_header, FinalBin}, infinity).
-
+
@@ -301,7 +301,7 @@ handle_call({upgrade_old_header, Prefix}, _From, #file{fd=Fd}=File) ->
handle_call(find_header, _From, #file{fd=Fd}=File) ->
{ok, Pos} = file:position(Fd, eof),
{reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}.
-
+
% 09 UPGRADE CODE
-define(HEADER_SIZE, 2048). % size of each segment of the doubly written header
@@ -349,7 +349,7 @@ read_old_header(Fd, Prefix) ->
_ ->
Result
end.
-
+
% 09 UPGRADE CODE
extract_header(Prefix, Bin) ->
SizeOfPrefix = size(Prefix),
@@ -373,7 +373,7 @@ extract_header(Prefix, Bin) ->
_ ->
unknown_header_type
end.
-
+
% 09 UPGRADE CODE
write_old_header(Fd, Prefix, Data) ->
@@ -401,7 +401,7 @@ write_old_header(Fd, Prefix, Data) ->
ok = file:pwrite(Fd, 0, DblWriteBin),
ok = file:sync(Fd).
-
+
handle_cast(close, Fd) ->
{stop,normal,Fd}.
@@ -422,14 +422,14 @@ find_header(Fd, Block) ->
_Error ->
find_header(Fd, Block -1)
end.
-
+
load_header(Fd, Block) ->
{ok, <<1>>} = file:pread(Fd, Block*?SIZE_BLOCK, 1),
{ok, <<HeaderLen:32/integer>>} = file:pread(Fd, (Block*?SIZE_BLOCK) + 1, 4),
TotalBytes = calculate_total_read_len(1, HeaderLen),
- {ok, <<RawBin:TotalBytes/binary>>} =
+ {ok, <<RawBin:TotalBytes/binary>>} =
file:pread(Fd, (Block*?SIZE_BLOCK) + 5, TotalBytes),
- <<Md5Sig:16/binary, HeaderBin/binary>> =
+ <<Md5Sig:16/binary, HeaderBin/binary>> =
iolist_to_binary(remove_block_prefixes(1, RawBin)),
Md5Sig = erlang:md5(HeaderBin),
{ok, HeaderBin}.
diff --git a/src/couchdb/couch_httpd.erl b/src/couchdb/couch_httpd.erl
index 91877cca..5c0869c1 100644
--- a/src/couchdb/couch_httpd.erl
+++ b/src/couchdb/couch_httpd.erl
@@ -34,17 +34,17 @@ start_link() ->
BindAddress = couch_config:get("httpd", "bind_address", any),
Port = couch_config:get("httpd", "port", "5984"),
-
+
DefaultSpec = "{couch_httpd_db, handle_request}",
DefaultFun = make_arity_1_fun(
couch_config:get("httpd", "default_handler", DefaultSpec)
),
-
+
UrlHandlersList = lists:map(
fun({UrlKey, SpecStr}) ->
{?l2b(UrlKey), make_arity_1_fun(SpecStr)}
end, couch_config:get("httpd_global_handlers")),
-
+
DbUrlHandlersList = lists:map(
fun({UrlKey, SpecStr}) ->
{?l2b(UrlKey), make_arity_2_fun(SpecStr)}
@@ -65,7 +65,7 @@ start_link() ->
end,
% and off we go
-
+
{ok, Pid} = case mochiweb_http:start([
{loop, Loop},
{name, ?MODULE},
@@ -93,7 +93,7 @@ start_link() ->
{ok, Pid}.
-% SpecStr is a string like "{my_module, my_fun}"
+% SpecStr is a string like "{my_module, my_fun}"
% or "{my_module, my_fun, <<"my_arg">>}"
make_arity_1_fun(SpecStr) ->
case couch_util:parse_term(SpecStr) of
@@ -110,11 +110,11 @@ make_arity_2_fun(SpecStr) ->
{ok, {Mod, Fun}} ->
fun(Arg1, Arg2) -> apply(Mod, Fun, [Arg1, Arg2]) end
end.
-
+
stop() ->
mochiweb_http:stop(?MODULE).
-
+
handle_request(MochiReq, DefaultFun,
UrlHandlers, DbUrlHandlers, DesignUrlHandlers) ->
@@ -125,7 +125,7 @@ handle_request(MochiReq, DefaultFun,
% removed, but URL quoting left intact
RawUri = MochiReq:get(raw_path),
{"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
-
+
HandlerKey =
case mochiweb_util:partition(Path, "/") of
{"", "", ""} ->
@@ -139,19 +139,19 @@ handle_request(MochiReq, DefaultFun,
MochiReq:get(version),
mochiweb_headers:to_list(MochiReq:get(headers))
]),
-
+
Method1 =
case MochiReq:get(method) of
% already an atom
Meth when is_atom(Meth) -> Meth;
-
+
% Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
% possible (if any module references the atom, then it's existing).
Meth -> couch_util:to_existing_atom(Meth)
end,
-
+
increment_method_stats(Method1),
-
+
% alias HEAD to GET as mochiweb takes care of stripping the body
Method = case Method1 of
'HEAD' -> 'GET';
@@ -264,13 +264,13 @@ header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
MochiReq:get_primary_header_value(Key).
-
+
serve_file(#httpd{mochi_req=MochiReq}, RelativePath, DocumentRoot) ->
{ok, MochiReq:serve_file(RelativePath, DocumentRoot, server_header())}.
qs_value(Req, Key) ->
qs_value(Req, Key, undefined).
-
+
qs_value(Req, Key, Default) ->
proplists:get_value(Key, qs(Req), Default).
@@ -319,7 +319,7 @@ json_body(Httpd) ->
json_body_obj(Httpd) ->
case json_body(Httpd) of
{Props} -> {Props};
- _Else ->
+ _Else ->
throw({bad_request, "Request body must be a JSON object"})
end.
@@ -457,7 +457,7 @@ end_jsonp() ->
end,
put(jsonp, undefined),
Resp.
-
+
validate_callback(CallBack) when is_binary(CallBack) ->
validate_callback(binary_to_list(CallBack));
validate_callback([]) ->
@@ -507,10 +507,10 @@ error_info(Error) ->
send_error(_Req, {already_sent, Resp, _Error}) ->
{ok, Resp};
-
+
send_error(Req, Error) ->
{Code, ErrorStr, ReasonStr} = error_info(Error),
- if Code == 401 ->
+ if Code == 401 ->
case couch_config:get("httpd", "WWW-Authenticate", nil) of
nil ->
Headers = [];
@@ -524,7 +524,7 @@ send_error(Req, Error) ->
send_error(Req, Code, ErrorStr, ReasonStr) ->
send_error(Req, Code, [], ErrorStr, ReasonStr).
-
+
send_error(Req, Code, Headers, ErrorStr, ReasonStr) ->
send_json(Req, Code, Headers,
{[{<<"error">>, ErrorStr},
@@ -538,7 +538,7 @@ send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
send_chunked_error(Resp, Error) ->
{Code, ErrorStr, ReasonStr} = error_info(Error),
JsonError = {[{<<"code">>, Code},
- {<<"error">>, ErrorStr},
+ {<<"error">>, ErrorStr},
{<<"reason">>, ReasonStr}]},
send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
send_chunk(Resp, []).
diff --git a/src/couchdb/couch_httpd_db.erl b/src/couchdb/couch_httpd_db.erl
index edb2f310..c00fd873 100644
--- a/src/couchdb/couch_httpd_db.erl
+++ b/src/couchdb/couch_httpd_db.erl
@@ -13,7 +13,7 @@
-module(couch_httpd_db).
-include("couch_db.hrl").
--export([handle_request/1, handle_compact_req/2, handle_design_req/2,
+-export([handle_request/1, handle_compact_req/2, handle_design_req/2,
db_req/2, couch_doc_open/4,handle_changes_req/2,
update_doc_result_to_json/1, update_doc_result_to_json/2,
handle_design_info_req/2, handle_view_cleanup_req/2]).
@@ -28,7 +28,7 @@
rev = nil,
open_revs = []
}).
-
+
% Database request handlers
handle_request(#httpd{path_parts=[DbName|RestParts],method=Method,
db_url_handlers=DbUrlHandlers}=Req)->
@@ -86,9 +86,9 @@ handle_changes_req(#httpd{method='GET',path_parts=[DbName|_]}=Req, Db) ->
couch_db_update_notifier:stop(Notify),
get_rest_db_updated() % clean out any remaining update messages
end;
-
+
"false" ->
- {ok, {LastSeq, _Prepend}} =
+ {ok, {LastSeq, _Prepend}} =
send_changes(Req, Resp, Db, StartSeq, <<"">>),
send_chunk(Resp, io_lib:format("\n],\n\"last_seq\":~w}\n", [LastSeq])),
send_chunk(Resp, "")
@@ -106,7 +106,7 @@ wait_db_updated(Timeout, TimeoutFun) ->
stop -> stop
end
end.
-
+
get_rest_db_updated() ->
receive db_updated -> get_rest_db_updated()
after 0 -> updated
@@ -127,7 +127,7 @@ keep_sending_changes(#httpd{user_ctx=UserCtx,path_parts=[DbName|_]}=Req, Resp, D
send_changes(Req, Resp, Db, StartSeq, Prepend0) ->
Style = list_to_existing_atom(
couch_httpd:qs_value(Req, "style", "main_only")),
- couch_db:changes_since(Db, Style, StartSeq,
+ couch_db:changes_since(Db, Style, StartSeq,
fun([#doc_info{id=Id, high_seq=Seq}|_]=DocInfos, {_, Prepend}) ->
FilterFun =
fun(#doc_info{revs=[#rev_info{rev=Rev}|_]}) ->
@@ -139,7 +139,7 @@ send_changes(Req, Resp, Db, StartSeq, Prepend0) ->
[] ->
{ok, {Seq, Prepend}};
_ ->
- send_chunk(Resp,
+ send_chunk(Resp,
[Prepend, ?JSON_ENCODE({[{seq,Seq}, {id, Id},
{changes,Results}]})]),
{ok, {Seq, <<",\n">>}}
@@ -172,7 +172,7 @@ handle_design_req(#httpd{
}=Req, Db) ->
Handler = couch_util:dict_find(Action, DesignUrlHandlers, fun db_req/2),
Handler(Req, Db);
-
+
handle_design_req(Req, Db) ->
db_req(Req, Db).
@@ -188,7 +188,7 @@ handle_design_info_req(#httpd{
{name, DesignName},
{view_index, {GroupInfoList}}
]});
-
+
handle_design_info_req(Req, _Db) ->
send_method_not_allowed(Req, "GET").
@@ -244,7 +244,7 @@ db_req(#httpd{method='POST',path_parts=[DbName]}=Req, Db) ->
_Normal ->
% normal
{ok, NewRev} = couch_db:update_doc(Db, Doc#doc{id=DocId}, []),
- DocUrl = absolute_uri(Req,
+ DocUrl = absolute_uri(Req,
binary_to_list(<<"/",DbName/binary,"/",DocId/binary>>)),
send_json(Req, 201, [{"Location", DocUrl}], {[
{ok, true},
@@ -265,7 +265,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) -
{ok, true},
{instance_start_time, DbStartTime}
]});
-
+
db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
send_method_not_allowed(Req, "POST");
@@ -311,14 +311,14 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
Docs, Results),
send_json(Req, 201, DocResults);
{aborted, Errors} ->
- ErrorsJson =
+ ErrorsJson =
lists:map(fun update_doc_result_to_json/1, Errors),
send_json(Req, 417, ErrorsJson)
end;
false ->
Docs = [couch_doc:from_json_obj(JsonObj) || JsonObj <- DocsArray],
{ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes),
- ErrorsJson =
+ ErrorsJson =
lists:map(fun update_doc_result_to_json/1, Errors),
send_json(Req, 201, ErrorsJson)
end;
@@ -328,7 +328,7 @@ db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
{IdsRevs} = couch_httpd:json_body_obj(Req),
IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
-
+
case couch_db:purge_docs(Db, IdsRevs2) of
{ok, PurgeSeq, PurgedIdsRevs} ->
PurgedIdsRevs2 = [{Id, couch_doc:rev_to_strs(Revs)} || {Id, Revs} <- PurgedIdsRevs],
@@ -339,7 +339,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
+
db_req(#httpd{method='GET',path_parts=[_,<<"_all_docs">>]}=Req, Db) ->
all_docs_view(Req, Db, nil);
@@ -357,7 +357,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_all_docs">>]}=Req, Db) ->
db_req(#httpd{path_parts=[_,<<"_all_docs">>]}=Req, _Db) ->
send_method_not_allowed(Req, "GET,HEAD,POST");
-
+
db_req(#httpd{method='GET',path_parts=[_,<<"_all_docs_by_seq">>]}=Req, Db) ->
#view_query_args{
start_key = StartKey,
@@ -450,18 +450,18 @@ db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
send_method_not_allowed(Req, "PUT,GET");
-% Special case to enable using an unencoded slash in the URL of design docs,
+% Special case to enable using an unencoded slash in the URL of design docs,
% as slashes in document IDs must otherwise be URL encoded.
db_req(#httpd{method='GET',mochi_req=MochiReq, path_parts=[DbName,<<"_design/",_/binary>>|_]}=Req, _Db) ->
PathFront = "/" ++ couch_httpd:quote(binary_to_list(DbName)) ++ "/",
RawSplit = regexp:split(MochiReq:get(raw_path),"_design%2F"),
{ok, [PathFront|PathTail]} = RawSplit,
- couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++
+ couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++
mochiweb_util:join(PathTail, "_design%2F"));
db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
db_doc_req(Req, Db, <<"_design/",Name/binary>>);
-
+
db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
@@ -472,7 +472,7 @@ db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
db_attachment_req(Req, Db, DocId, FileNameParts).
-all_docs_view(Req, Db, Keys) ->
+all_docs_view(Req, Db, Keys) ->
#view_query_args{
start_key = StartKey,
start_docid = StartDocId,
@@ -483,17 +483,17 @@ all_docs_view(Req, Db, Keys) ->
} = QueryArgs = couch_httpd_view:parse_view_params(Req, Keys, map),
{ok, Info} = couch_db:get_db_info(Db),
CurrentEtag = couch_httpd:make_etag(proplists:get_value(update_seq, Info)),
- couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
-
+ couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
+
TotalRowCount = proplists:get_value(doc_count, Info),
StartId = if is_binary(StartKey) -> StartKey;
true -> StartDocId
end,
FoldAccInit = {Limit, SkipCount, undefined, []},
-
+
case Keys of
nil ->
- PassedEndFun =
+ PassedEndFun =
case Dir of
fwd ->
fun(ViewKey, _ViewId) ->
@@ -504,7 +504,7 @@ all_docs_view(Req, Db, Keys) ->
couch_db_updater:less_docid(ViewKey, EndKey)
end
end,
-
+
FoldlFun = couch_httpd_view:make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db,
TotalRowCount, #view_fold_helper_funs{
reduce_count = fun couch_db:enum_docs_reduce_to_count/1,
@@ -518,7 +518,7 @@ all_docs_view(Req, Db, Keys) ->
{ok, Acc}
end
end,
- {ok, FoldResult} = couch_db:enum_docs(Db, StartId, Dir,
+ {ok, FoldResult} = couch_db:enum_docs(Db, StartId, Dir,
AdapterFun, FoldAccInit),
couch_httpd_view:finish_view_fold(Req, TotalRowCount, {ok, FoldResult});
_ ->
@@ -554,7 +554,7 @@ all_docs_view(Req, Db, Keys) ->
Acc
end
end, {ok, FoldAccInit}, Keys),
- couch_httpd_view:finish_view_fold(Req, TotalRowCount, {ok, FoldResult})
+ couch_httpd_view:finish_view_fold(Req, TotalRowCount, {ok, FoldResult})
end
end).
@@ -580,7 +580,7 @@ db_doc_req(#httpd{method='GET'}=Req, Db, DocId) ->
[] ->
Doc = couch_doc_open(Db, DocId, Rev, Options),
DiskEtag = couch_httpd:doc_etag(Doc),
- couch_httpd:etag_respond(Req, DiskEtag, fun() ->
+ couch_httpd:etag_respond(Req, DiskEtag, fun() ->
Headers = case Doc#doc.meta of
[] -> [{"Etag", DiskEtag}]; % output etag only when we have no meta
_ -> []
@@ -668,10 +668,10 @@ db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
% open old doc
Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
% save new doc
- {ok, NewTargetRev} = couch_db:update_doc(Db,
- Doc#doc{id=TargetDocId, revs=TargetRevs}, []),
+ {ok, NewTargetRev} = couch_db:update_doc(Db,
+ Doc#doc{id=TargetDocId, revs=TargetRevs}, []),
% respond
- send_json(Req, 201,
+ send_json(Req, 201,
[{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}],
update_doc_result_to_json(TargetDocId, {ok, NewTargetRev}));
@@ -698,7 +698,7 @@ update_doc(Req, Db, DocId, Json) ->
update_doc(Req, Db, DocId, Json, Headers) ->
#doc{deleted=Deleted} = Doc = couch_doc_from_req(Req, DocId, Json),
-
+
case couch_httpd:header_value(Req, "X-Couch-Full-Commit", "false") of
"true" ->
Options = [full_commit];
@@ -729,7 +729,7 @@ couch_doc_from_req(Req, DocId, Json) ->
Revs = {Pos, [Rev]}
end,
Doc#doc{id=DocId, revs=Revs}.
-
+
% Useful for debugging
% couch_doc_open(Db, DocId) ->
@@ -758,13 +758,13 @@ couch_doc_open(Db, DocId, Rev, Options) ->
db_attachment_req(#httpd{method='GET'}=Req, Db, DocId, FileNameParts) ->
FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts),"/")),
#doc_query_args{
- rev=Rev,
+ rev=Rev,
options=Options
} = parse_doc_query(Req),
#doc{
attachments=Attachments
} = Doc = couch_doc_open(Db, DocId, Rev, Options),
-
+
case proplists:get_value(FileName, Attachments) of
undefined ->
throw({not_found, "Document is missing attachment"});
@@ -789,9 +789,9 @@ db_attachment_req(#httpd{method=Method}=Req, Db, DocId, FileNameParts)
when (Method == 'PUT') or (Method == 'DELETE') ->
FileName = validate_attachment_name(
mochiweb_util:join(
- lists:map(fun binary_to_list/1,
+ lists:map(fun binary_to_list/1,
FileNameParts),"/")),
-
+
NewAttachment = case Method of
'DELETE' ->
[];
@@ -807,12 +807,12 @@ db_attachment_req(#httpd{method=Method}=Req, Db, DocId, FileNameParts)
list_to_binary(CType)
end,
case couch_httpd:header_value(Req,"Content-Length") of
- undefined ->
- {fun(MaxChunkSize, ChunkFun, InitState) ->
- couch_httpd:recv_chunked(Req, MaxChunkSize,
- ChunkFun, InitState)
+ undefined ->
+ {fun(MaxChunkSize, ChunkFun, InitState) ->
+ couch_httpd:recv_chunked(Req, MaxChunkSize,
+ ChunkFun, InitState)
end, undefined};
- Length ->
+ Length ->
{fun() -> couch_httpd:recv(Req, 0) end,
list_to_integer(Length)}
end
@@ -901,7 +901,7 @@ extract_header_rev(Req, ExplicitRev) ->
parse_copy_destination_header(Req) ->
Destination = couch_httpd:header_value(Req, "Destination"),
case regexp:match(Destination, "\\?") of
- nomatch ->
+ nomatch ->
{list_to_binary(Destination), {0, []}};
{match, _, _} ->
{ok, [DocId, RevQueryOptions]} = regexp:split(Destination, "\\?"),
@@ -911,7 +911,7 @@ parse_copy_destination_header(Req) ->
end.
validate_attachment_names(Doc) ->
- lists:foreach(fun({Name, _}) ->
+ lists:foreach(fun({Name, _}) ->
validate_attachment_name(Name)
end, Doc#doc.attachments).
diff --git a/src/couchdb/couch_httpd_external.erl b/src/couchdb/couch_httpd_external.erl
index 949bd83a..709d8337 100644
--- a/src/couchdb/couch_httpd_external.erl
+++ b/src/couchdb/couch_httpd_external.erl
@@ -34,7 +34,7 @@ handle_external_req(#httpd{path_parts=[_, _]}=Req, _Db) ->
handle_external_req(Req, _) ->
send_error(Req, 404, <<"external_server_error">>, <<"Broken assumption">>).
-% handle_external_req/3
+% handle_external_req/3
% for this type of config usage:
% _action = {couch_httpd_external, handle_external_req, <<"action">>}
% with urls like
@@ -44,7 +44,7 @@ handle_external_req(HttpReq, Db, Name) ->
process_external_req(HttpReq, Db, Name) ->
- Response = couch_external_manager:execute(binary_to_list(Name),
+ Response = couch_external_manager:execute(binary_to_list(Name),
json_req_obj(HttpReq, Db)),
case Response of
@@ -54,7 +54,7 @@ process_external_req(HttpReq, Db, Name) ->
send_external_response(HttpReq, Response)
end.
-json_req_obj(#httpd{mochi_req=Req,
+json_req_obj(#httpd{mochi_req=Req,
method=Verb,
path_parts=Path,
req_body=ReqBody
@@ -99,7 +99,7 @@ send_external_response(#httpd{mochi_req=MochiReq}, Response) ->
ctype = CType,
headers = Headers
} = parse_external_response(Response),
- Resp = MochiReq:respond({Code,
+ Resp = MochiReq:respond({Code,
default_or_content_type(CType, Headers ++ couch_httpd:server_header()), Data}),
{ok, Resp}.
@@ -120,7 +120,7 @@ parse_external_response({Response}) ->
Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"};
{<<"base64">>, Value} ->
Args#extern_resp_args{
- data=couch_util:decodeBase64(Value),
+ data=couch_util:decodeBase64(Value),
ctype="application/binary"
};
{<<"headers">>, {Headers}} ->
diff --git a/src/couchdb/couch_httpd_misc_handlers.erl b/src/couchdb/couch_httpd_misc_handlers.erl
index a49bbef6..e7c04997 100644
--- a/src/couchdb/couch_httpd_misc_handlers.erl
+++ b/src/couchdb/couch_httpd_misc_handlers.erl
@@ -16,7 +16,7 @@
handle_all_dbs_req/1,handle_replicate_req/1,handle_restart_req/1,
handle_uuids_req/1,handle_config_req/1,handle_log_req/1,
handle_task_status_req/1,handle_sleep_req/1,handle_whoami_req/1]).
-
+
-export([increment_update_seq_req/2]).
@@ -41,7 +41,7 @@ handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
couch_httpd:serve_file(Req, "favicon.ico", DocumentRoot);
handle_favicon_req(Req, _) ->
send_method_not_allowed(Req, "GET,HEAD").
-
+
handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
"/" ++ UrlPath = couch_httpd:path(Req),
case couch_httpd:partition(UrlPath) of
@@ -83,7 +83,7 @@ fix_db_url(UrlBin) ->
$/ -> Url;
_ -> Url ++ "/"
end).
-
+
get_rep_endpoint(_Req, {Props}) ->
Url = proplists:get_value(<<"url">>, Props),
@@ -136,7 +136,7 @@ handle_uuids_req(Req) ->
% Config request handler
-
+
% GET /_config/
% GET /_config
handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) ->
diff --git a/src/couchdb/couch_httpd_show.erl b/src/couchdb/couch_httpd_show.erl
index 9b65c076..854b3d80 100644
--- a/src/couchdb/couch_httpd_show.erl
+++ b/src/couchdb/couch_httpd_show.erl
@@ -11,7 +11,7 @@
% the License.
-module(couch_httpd_show).
-
+
-export([handle_doc_show_req/2, handle_view_list_req/2]).
@@ -21,7 +21,7 @@
[send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
start_json_response/2,send_chunk/2,send_chunked_error/2,
start_chunked_response/3, send_error/4]).
-
+
handle_doc_show_req(#httpd{
method='GET',
path_parts=[_DbName, _Design, DesignName, _Show, ShowName, DocId]
@@ -93,7 +93,7 @@ send_view_list_response(Lang, ListSrc, ViewName, DesignId, Req, Db, Keys) ->
Stale = couch_httpd_view:get_stale_type(Req),
Reduce = couch_httpd_view:get_reduce_type(Req),
case couch_view:get_map_view(Db, DesignId, ViewName, Stale) of
- {ok, View, Group} ->
+ {ok, View, Group} ->
QueryArgs = couch_httpd_view:parse_view_params(Req, Keys, map),
output_map_list(Req, Lang, ListSrc, View, Group, Db, QueryArgs, Keys);
{not_found, _Reason} ->
@@ -139,7 +139,7 @@ output_map_list(#httpd{mochi_req=MReq}=Req, Lang, ListSrc, View, Group, Db, Quer
StartListRespFun = make_map_start_resp_fun(QueryServer, Db),
SendListRowFun = make_map_send_row_fun(QueryServer),
-
+
FoldlFun = couch_httpd_view:make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db, RowCount,
#view_fold_helper_funs{
reduce_count = fun couch_view:reduce_to_count/1,
@@ -200,7 +200,7 @@ make_reduce_start_resp_fun(QueryServer, _Req, Db, _CurrentEtag) ->
end.
start_list_resp(QueryServer, Req, Db, Head, Etag) ->
- [<<"start">>,Chunks,JsonResp] = couch_query_servers:render_list_head(QueryServer,
+ [<<"start">>,Chunks,JsonResp] = couch_query_servers:render_list_head(QueryServer,
Req, Db, Head),
JsonResp2 = apply_etag(JsonResp, Etag),
#extern_resp_args{
@@ -266,9 +266,9 @@ output_reduce_list(#httpd{mochi_req=MReq}=Req, Lang, ListSrc, View, Group, Db, Q
couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
StartListRespFun = make_reduce_start_resp_fun(QueryServer, Req, Db, CurrentEtag),
SendListRowFun = make_reduce_send_row_fun(QueryServer, Db),
-
- {ok, GroupRowsFun, RespFun} = couch_httpd_view:make_reduce_fold_funs(Req,
- GroupLevel, QueryArgs, CurrentEtag,
+
+ {ok, GroupRowsFun, RespFun} = couch_httpd_view:make_reduce_fold_funs(Req,
+ GroupLevel, QueryArgs, CurrentEtag,
#reduce_fold_helper_funs{
start_response = StartListRespFun,
send_row = SendListRowFun
@@ -300,9 +300,9 @@ output_reduce_list(#httpd{mochi_req=MReq}=Req, Lang, ListSrc, View, Group, Db, Q
couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
StartListRespFun = make_reduce_start_resp_fun(QueryServer, Req, Db, CurrentEtag),
SendListRowFun = make_reduce_send_row_fun(QueryServer, Db),
-
+
{ok, GroupRowsFun, RespFun} = couch_httpd_view:make_reduce_fold_funs(Req,
- GroupLevel, QueryArgs, CurrentEtag,
+ GroupLevel, QueryArgs, CurrentEtag,
#reduce_fold_helper_funs{
start_response = StartListRespFun,
send_row = SendListRowFun
@@ -319,7 +319,7 @@ output_reduce_list(#httpd{mochi_req=MReq}=Req, Lang, ListSrc, View, Group, Db, Q
finish_list(Req, QueryServer, Etag, FoldResult, StartFun, TotalRows) ->
case FoldResult of
{_, _, undefined, _} ->
- {ok, Resp, BeginBody} =
+ {ok, Resp, BeginBody} =
render_head_for_empty_list(StartFun, Req, Etag, TotalRows),
[<<"end">>, Chunks] = couch_query_servers:render_list_tail(QueryServer),
Chunk = BeginBody ++ ?b2l(?l2b(Chunks)),
@@ -343,8 +343,8 @@ send_doc_show_response(Lang, ShowSrc, DocId, nil, #httpd{mochi_req=MReq}=Req, Db
Hlist = mochiweb_headers:to_list(Headers),
Accept = proplists:get_value('Accept', Hlist),
CurrentEtag = couch_httpd:make_etag({Lang, ShowSrc, nil, Accept}),
- couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
- [<<"resp">>, ExternalResp] = couch_query_servers:render_doc_show(Lang, ShowSrc,
+ couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
+ [<<"resp">>, ExternalResp] = couch_query_servers:render_doc_show(Lang, ShowSrc,
DocId, nil, Req, Db),
JsonResp = apply_etag(ExternalResp, CurrentEtag),
couch_httpd_external:send_external_response(Req, JsonResp)
@@ -356,9 +356,9 @@ send_doc_show_response(Lang, ShowSrc, DocId, #doc{revs=Revs}=Doc, #httpd{mochi_r
Hlist = mochiweb_headers:to_list(Headers),
Accept = proplists:get_value('Accept', Hlist),
CurrentEtag = couch_httpd:make_etag({Lang, ShowSrc, Revs, Accept}),
- % We know our etag now
- couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
- [<<"resp">>, ExternalResp] = couch_query_servers:render_doc_show(Lang, ShowSrc,
+ % We know our etag now
+ couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
+ [<<"resp">>, ExternalResp] = couch_query_servers:render_doc_show(Lang, ShowSrc,
DocId, Doc, Req, Db),
JsonResp = apply_etag(ExternalResp, CurrentEtag),
couch_httpd_external:send_external_response(Req, JsonResp)
@@ -378,9 +378,9 @@ set_or_replace_header({Key, NewValue}, [], Acc) ->
[{Key, NewValue}|Acc].
apply_etag({ExternalResponse}, CurrentEtag) ->
- % Here we embark on the delicate task of replacing or creating the
- % headers on the JsonResponse object. We need to control the Etag and
- % Vary headers. If the external function controls the Etag, we'd have to
+ % Here we embark on the delicate task of replacing or creating the
+ % headers on the JsonResponse object. We need to control the Etag and
+ % Vary headers. If the external function controls the Etag, we'd have to
% run it to check for a match, which sort of defeats the purpose.
case proplists:get_value(<<"headers">>, ExternalResponse, nil) of
nil ->
@@ -397,4 +397,4 @@ apply_etag({ExternalResponse}, CurrentEtag) ->
Field
end || Field <- ExternalResponse]}
end.
-
+
diff --git a/src/couchdb/couch_httpd_view.erl b/src/couchdb/couch_httpd_view.erl
index 0feb2fac..c0d7be7f 100644
--- a/src/couchdb/couch_httpd_view.erl
+++ b/src/couchdb/couch_httpd_view.erl
@@ -83,12 +83,12 @@ handle_temp_view_req(#httpd{method='POST'}=Req, Db) ->
case proplists:get_value(<<"reduce">>, Props, null) of
null ->
QueryArgs = parse_view_params(Req, Keys, map),
- {ok, View, Group} = couch_view:get_temp_map_view(Db, Language,
+ {ok, View, Group} = couch_view:get_temp_map_view(Db, Language,
DesignOptions, MapSrc),
output_map_view(Req, View, Group, Db, QueryArgs, Keys);
RedSrc ->
QueryArgs = parse_view_params(Req, Keys, reduce),
- {ok, View, Group} = couch_view:get_temp_reduce_view(Db, Language,
+ {ok, View, Group} = couch_view:get_temp_reduce_view(Db, Language,
DesignOptions, MapSrc, RedSrc),
output_reduce_view(Req, View, Group, QueryArgs, Keys)
end;
@@ -105,7 +105,7 @@ output_map_view(Req, View, Group, Db, QueryArgs, nil) ->
start_docid = StartDocId
} = QueryArgs,
CurrentEtag = view_group_etag(Group),
- couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
+ couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
{ok, RowCount} = couch_view:get_row_count(View),
Start = {StartKey, StartDocId},
FoldlFun = make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db, RowCount, #view_fold_helper_funs{reduce_count=fun couch_view:reduce_to_count/1}),
@@ -113,7 +113,7 @@ output_map_view(Req, View, Group, Db, QueryArgs, nil) ->
FoldResult = couch_view:fold(View, Start, Dir, FoldlFun, FoldAccInit),
finish_view_fold(Req, RowCount, FoldResult)
end);
-
+
output_map_view(Req, View, Group, Db, QueryArgs, Keys) ->
#view_query_args{
limit = Limit,
@@ -122,7 +122,7 @@ output_map_view(Req, View, Group, Db, QueryArgs, Keys) ->
start_docid = StartDocId
} = QueryArgs,
CurrentEtag = view_group_etag(Group, Keys),
- couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
+ couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
{ok, RowCount} = couch_view:get_row_count(View),
FoldAccInit = {Limit, SkipCount, undefined, []},
FoldResult = lists:foldl(
@@ -132,7 +132,7 @@ output_map_view(Req, View, Group, Db, QueryArgs, Keys) ->
QueryArgs#view_query_args{
start_key = Key,
end_key = Key
- }, CurrentEtag, Db, RowCount,
+ }, CurrentEtag, Db, RowCount,
#view_fold_helper_funs{
reduce_count = fun couch_view:reduce_to_count/1
}),
@@ -156,11 +156,11 @@ output_reduce_view(Req, View, Group, QueryArgs, nil) ->
couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
{ok, GroupRowsFun, RespFun} = make_reduce_fold_funs(Req, GroupLevel, QueryArgs, CurrentEtag, #reduce_fold_helper_funs{}),
FoldAccInit = {Limit, Skip, undefined, []},
- {ok, {_, _, Resp, _}} = couch_view:fold_reduce(View, Dir, {StartKey, StartDocId},
+ {ok, {_, _, Resp, _}} = couch_view:fold_reduce(View, Dir, {StartKey, StartDocId},
{EndKey, EndDocId}, GroupRowsFun, RespFun, FoldAccInit),
finish_reduce_fold(Req, Resp)
end);
-
+
output_reduce_view(Req, View, Group, QueryArgs, Keys) ->
#view_query_args{
limit = Limit,
@@ -177,7 +177,7 @@ output_reduce_view(Req, View, Group, QueryArgs, Keys) ->
fun(Key, {Resp, RedAcc}) ->
% run the reduce once for each key in keys, with limit etc reapplied for each key
FoldAccInit = {Limit, Skip, Resp, RedAcc},
- {_, {_, _, Resp2, RedAcc2}} = couch_view:fold_reduce(View, Dir, {Key, StartDocId},
+ {_, {_, _, Resp2, RedAcc2}} = couch_view:fold_reduce(View, Dir, {Key, StartDocId},
{Key, EndDocId}, GroupRowsFun, RespFun, FoldAccInit),
% Switch to comma
{Resp2, RedAcc2}
@@ -198,7 +198,7 @@ get_reduce_type(Req) ->
parse_view_params(Req, Keys, ViewType) ->
QueryList = couch_httpd:qs(Req),
- QueryParams =
+ QueryParams =
lists:foldl(fun({K, V}, Acc) ->
parse_view_param(K, V) ++ Acc
end, [], QueryList),
@@ -360,13 +360,13 @@ make_view_fold_fun(Req, QueryArgs, Etag, Db, TotalViewCount, HelperFuns) ->
inclusive_end = InclusiveEnd,
direction = Dir
} = QueryArgs,
-
+
#view_fold_helper_funs{
passed_end = PassedEndFun,
start_response = StartRespFun,
send_row = SendRowFun,
reduce_count = ReduceCountFun
- } = apply_default_helper_funs(HelperFuns,
+ } = apply_default_helper_funs(HelperFuns,
{Dir, EndKey, EndDocId, InclusiveEnd}),
#view_query_args{
@@ -390,12 +390,12 @@ make_view_fold_fun(Req, QueryArgs, Etag, Db, TotalViewCount, HelperFuns) ->
Offset = ReduceCountFun(OffsetReds),
{ok, Resp2, RowFunAcc0} = StartRespFun(Req, Etag,
TotalViewCount, Offset, RowFunAcc),
- {Go, RowFunAcc2} = SendRowFun(Resp2, Db, {{Key, DocId}, Value},
+ {Go, RowFunAcc2} = SendRowFun(Resp2, Db, {{Key, DocId}, Value},
IncludeDocs, RowFunAcc0),
{Go, {AccLimit - 1, 0, Resp2, RowFunAcc2}};
{_, AccLimit, _, Resp} when (AccLimit > 0) ->
% rendering all other rows
- {Go, RowFunAcc2} = SendRowFun(Resp, Db, {{Key, DocId}, Value},
+ {Go, RowFunAcc2} = SendRowFun(Resp, Db, {{Key, DocId}, Value},
IncludeDocs, RowFunAcc),
{Go, {AccLimit - 1, 0, Resp, RowFunAcc2}}
end
@@ -439,7 +439,7 @@ make_reduce_fold_funs(Req, GroupLevel, _QueryArgs, Etag, HelperFuns) ->
when is_integer(GroupLevel), is_list(Key) ->
% group_level and we haven't responded yet
{ok, Resp2, RowAcc} = StartRespFun(Req, Etag, RowAcc0),
- {Go, RowAcc2} = SendRowFun(Resp2, {lists:sublist(Key, GroupLevel), Red}, RowAcc),
+ {Go, RowAcc2} = SendRowFun(Resp2, {lists:sublist(Key, GroupLevel), Red}, RowAcc),
{Go, {AccLimit - 1, 0, Resp2, RowAcc2}};
(Key, Red, {AccLimit, 0, Resp, RowAcc})
when is_integer(GroupLevel), is_list(Key) ->
@@ -551,15 +551,15 @@ json_reduce_start_resp(Req, Etag, _Acc0) ->
send_json_reduce_row(Resp, {Key, Value}, RowFront) ->
send_chunk(Resp, RowFront ++ ?JSON_ENCODE({[{key, Key}, {value, Value}]})),
- {ok, ",\r\n"}.
+ {ok, ",\r\n"}.
view_group_etag(Group) ->
view_group_etag(Group, nil).
-
+
view_group_etag(#group{sig=Sig,current_seq=CurrentSeq}, Extra) ->
% This is not as granular as it could be.
% If there are updates to the db that do not effect the view index,
- % they will change the Etag. For more granular Etags we'd need to keep
+ % they will change the Etag. For more granular Etags we'd need to keep
% track of the last Db seq that caused an index change.
couch_httpd:make_etag({Sig, CurrentSeq, Extra}).
@@ -591,10 +591,10 @@ view_row_with_doc(Db, {{Key, DocId}, Value}, Rev) ->
{not_found, deleted} ->
{[{id, DocId}, {key, Key}, {value, Value}]};
Doc ->
- JsonDoc = couch_doc:to_json_obj(Doc, []),
+ JsonDoc = couch_doc:to_json_obj(Doc, []),
{[{id, DocId}, {key, Key}, {value, Value}, {doc, JsonDoc}]}
end.
-
+
finish_view_fold(Req, TotalRows, FoldResult) ->
case FoldResult of
{ok, {_, _, undefined, _}} ->
diff --git a/src/couchdb/couch_js.c b/src/couchdb/couch_js.c
index d95b9db0..43f2da12 100644
--- a/src/couchdb/couch_js.c
+++ b/src/couchdb/couch_js.c
@@ -489,12 +489,12 @@ char* JSValToChar(JSContext* context, jsval* arg) {
jsmsg = JS_ValueToString(context,*arg);
len = JS_GetStringLength(jsmsg);
tmp = JS_GetStringBytes(jsmsg);
-
+
c = (char*)malloc(len+1);
c[len] = '\0';
int i;
-
+
for(i = 0;i < len;i++) {
c[i] = tmp[i];
}
@@ -541,11 +541,11 @@ struct curl_slist* generateCurlHeaders(JSContext* context,jsval* arg) {
}
JSObject* iterator = JS_NewPropertyIterator(context,header_obj);
-
+
jsval *jsProperty = JS_malloc(context,sizeof(jsval));
jsval *jsValue = JS_malloc(context,sizeof(jsval));
jsid *jsId = JS_malloc(context,sizeof(jsid));
-
+
while(JS_NextProperty(context,iterator,jsId) == JS_TRUE) {
if(*jsId == JSVAL_VOID) {
@@ -569,7 +569,7 @@ struct curl_slist* generateCurlHeaders(JSContext* context,jsval* arg) {
append_Buffer(bTmp,"",1);
slist = curl_slist_append(slist,bTmp->data);
-
+
free_Buffer(bTmp);
free(jsPropertyValue);
free(jsPropertyName);
@@ -595,7 +595,7 @@ GetHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
// Run GC
JS_MaybeGC(context);
-
+
// Init Curl
if((handle = curl_easy_init()) == NULL) {
return JS_FALSE;
@@ -616,7 +616,7 @@ GetHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
curl_easy_setopt(handle,CURLOPT_WRITEHEADER,b);
curl_easy_setopt(handle,CURLOPT_URL,url);
curl_easy_setopt(handle,CURLOPT_HTTPGET,1);
- curl_easy_setopt(handle,CURLOPT_FOLLOWLOCATION,1);
+ curl_easy_setopt(handle,CURLOPT_FOLLOWLOCATION,1);
curl_easy_setopt(handle,CURLOPT_NOPROGRESS,1);
curl_easy_setopt(handle,CURLOPT_IPRESOLVE,CURL_IPRESOLVE_V4);
@@ -654,7 +654,7 @@ GetHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
/* Shrink the buffer to the real size and store its value in rval */
shrink_Buffer(b);
BufferToJSVal(context,b,rval);
-
+
// Free Buffer
free_Buffer(b);
@@ -679,7 +679,7 @@ HeadHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval
// Run GC
JS_MaybeGC(context);
-
+
// Init Curl
if((handle = curl_easy_init()) == NULL) {
return JS_FALSE;
@@ -741,7 +741,7 @@ HeadHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval
/* Shrink the buffer to the real size and store its value in rval */
shrink_Buffer(b);
BufferToJSVal(context,b,rval);
-
+
// Free Buffer
free_Buffer(b);
@@ -803,7 +803,7 @@ PostHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval
struct curl_slist *slist = generateCurlHeaders(context,argv+2); // Initialize Headers
if(slist != NULL) {
- curl_easy_setopt(handle,CURLOPT_HTTPHEADER,slist);
+ curl_easy_setopt(handle,CURLOPT_HTTPHEADER,slist);
}
int exitcode;
@@ -858,17 +858,17 @@ PutHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
// Allocate buffer that will store the get resultant
b = init_Buffer();
-
+
// Allocate data buffer and move data into them
b_data = (BufferCount)malloc(sizeof(Buffer) + sizeof(int));
b_data->buffer = init_Buffer();
b_data->pos = 0;
-
+
data = JSValToChar(context,(argv+1));
readlen = strlen(data);
-
-
+
+
// TODO: remove strlen
append_Buffer(b_data->buffer,data,readlen);
@@ -893,7 +893,7 @@ PutHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
curl_easy_setopt(handle,CURLOPT_URL,url);
curl_easy_setopt(handle,CURLOPT_UPLOAD,1);
curl_easy_setopt(handle,CURLOPT_INFILESIZE,readlen);
-
+
// Curl structure
@@ -908,11 +908,11 @@ PutHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
// Use only ipv4
curl_easy_setopt(handle,CURLOPT_IPRESOLVE,CURL_IPRESOLVE_V4);
-
+
// Perform
int exitcode;
-
+
if((exitcode = curl_easy_perform(handle)) != 0) {
if(slist != NULL)
curl_slist_free_all(slist);
@@ -939,7 +939,7 @@ PutHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
shrink_Buffer(b);
BufferToJSVal(context,b,rval);
-
+
free_Buffer(b);
if(rval == NULL) {
@@ -1023,7 +1023,7 @@ DelHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
shrink_Buffer(b);
BufferToJSVal(context,b,rval);
-
+
if(rval == NULL) {
curl_easy_cleanup(handle);
return JS_FALSE;
@@ -1105,7 +1105,7 @@ CopyHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval
shrink_Buffer(b);
BufferToJSVal(context,b,rval);
-
+
if(rval == NULL) {
curl_easy_cleanup(handle);
return JS_FALSE;
@@ -1187,7 +1187,7 @@ MoveHttp(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval
shrink_Buffer(b);
BufferToJSVal(context,b,rval);
-
+
if(rval == NULL) {
curl_easy_cleanup(handle);
return JS_FALSE;
diff --git a/src/couchdb/couch_key_tree.erl b/src/couchdb/couch_key_tree.erl
index d08f5ede..3177087d 100644
--- a/src/couchdb/couch_key_tree.erl
+++ b/src/couchdb/couch_key_tree.erl
@@ -26,14 +26,14 @@
% partial trees arranged by how much they are cut off.
merge(A, B) ->
- {Merged, HasConflicts} =
+ {Merged, HasConflicts} =
lists:foldl(
fun(InsertTree, {AccTrees, AccConflicts}) ->
{ok, Merged, Conflicts} = merge_one(AccTrees, InsertTree, [], false),
{Merged, Conflicts or AccConflicts}
end,
{A, false}, B),
- if HasConflicts or
+ if HasConflicts or
((length(Merged) /= length(A)) and (length(Merged) /= length(B))) ->
Conflicts = conflicts;
true ->
@@ -61,7 +61,7 @@ merge_one([{Start, Tree}|Rest], {StartInsert, TreeInsert}, OutAcc, ConflictsAcc)
no ->
merge_one(Rest, {StartB, TreeB}, [{StartA, TreeA} | OutAcc], ConflictsAcc)
end.
-
+
merge_at([], _Place, _Insert) ->
no;
merge_at([{Key, Value, SubTree}|Sibs], 0, {InsertKey, InsertValue, InsertSubTree}) ->
@@ -120,7 +120,7 @@ find_missing([{Start, {Key, Value, SubTree}} | RestTree], SeachKeys) ->
ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Start],
Missing = find_missing_simple(Start, [{Key, Value, SubTree}], PossibleKeys),
find_missing(RestTree, ImpossibleKeys ++ Missing).
-
+
find_missing_simple(_Pos, _Tree, []) ->
[];
find_missing_simple(_Pos, [], SeachKeys) ->
@@ -128,7 +128,7 @@ find_missing_simple(_Pos, [], SeachKeys) ->
find_missing_simple(Pos, [{Key, _, SubTree} | RestTree], SeachKeys) ->
PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Pos],
ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Pos],
-
+
SrcKeys2 = PossibleKeys -- [{Pos, Key}],
SrcKeys3 = find_missing_simple(Pos + 1, SubTree, SrcKeys2),
ImpossibleKeys ++ find_missing_simple(Pos, RestTree, SrcKeys3).
@@ -145,15 +145,15 @@ filter_leafs([{Pos, [{LeafKey, _}|_]} = Path |Rest], Keys, FilteredAcc, RemovedK
% this did match a key, remove both the node and the input key
filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc])
end.
-
+
% Removes any branches from the tree whose leaf node(s) are in the Keys
remove_leafs(Trees, Keys) ->
% flatten each branch in a tree into a tree path
Paths = get_all_leafs_full(Trees),
-
+
% filter out any that are in the keys list.
{FilteredPaths, RemovedKeys} = filter_leafs(Paths, Keys, [], []),
-
+
% convert paths back to trees
NewTree = lists:foldl(
fun({PathPos, Path},TreeAcc) ->
@@ -170,7 +170,7 @@ remove_leafs(Trees, Keys) ->
% are returned.
get_key_leafs(Tree, Keys) ->
get_key_leafs(Tree, Keys, []).
-
+
get_key_leafs(_, [], Acc) ->
{Acc, []};
get_key_leafs([], Keys, Acc) ->
@@ -178,14 +178,14 @@ get_key_leafs([], Keys, Acc) ->
get_key_leafs([{Pos, Tree}|Rest], Keys, Acc) ->
{Gotten, RemainingKeys} = get_key_leafs_simple(Pos, [Tree], Keys, []),
get_key_leafs(Rest, RemainingKeys, Gotten ++ Acc).
-
+
get_key_leafs_simple(_Pos, _Tree, [], _KeyPathAcc) ->
{[], []};
get_key_leafs_simple(_Pos, [], KeysToGet, _KeyPathAcc) ->
{[], KeysToGet};
get_key_leafs_simple(Pos, [{Key, _Value, SubTree}=Tree | RestTree], KeysToGet, KeyPathAcc) ->
case lists:delete({Pos, Key}, KeysToGet) of
- KeysToGet -> % same list, key not found
+ KeysToGet -> % same list, key not found
{LeafsFound, KeysToGet2} = get_key_leafs_simple(Pos + 1, SubTree, KeysToGet, [Key | KeyPathAcc]),
{RestLeafsFound, KeysRemaining} = get_key_leafs_simple(Pos, RestTree, KeysToGet2, KeyPathAcc),
{LeafsFound ++ RestLeafsFound, KeysRemaining};
@@ -201,10 +201,10 @@ get(Tree, KeysToGet) ->
{KeyPaths, KeysNotFound} = get_full_key_paths(Tree, KeysToGet),
FixedResults = [ {Value, {Pos, [Key0 || {Key0, _} <- Path]}} || {Pos, [{_Key, Value}|_]=Path} <- KeyPaths],
{FixedResults, KeysNotFound}.
-
+
get_full_key_paths(Tree, Keys) ->
get_full_key_paths(Tree, Keys, []).
-
+
get_full_key_paths(_, [], Acc) ->
{Acc, []};
get_full_key_paths([], Keys, Acc) ->
@@ -212,8 +212,8 @@ get_full_key_paths([], Keys, Acc) ->
get_full_key_paths([{Pos, Tree}|Rest], Keys, Acc) ->
{Gotten, RemainingKeys} = get_full_key_paths(Pos, [Tree], Keys, []),
get_full_key_paths(Rest, RemainingKeys, Gotten ++ Acc).
-
-
+
+
get_full_key_paths(_Pos, _Tree, [], _KeyPathAcc) ->
{[], []};
get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) ->
@@ -233,12 +233,12 @@ get_full_key_paths(Pos, [{KeyId, Value, SubTree} | RestTree], KeysToGet, KeyPath
get_all_leafs_full(Tree) ->
get_all_leafs_full(Tree, []).
-
+
get_all_leafs_full([], Acc) ->
Acc;
get_all_leafs_full([{Pos, Tree} | Rest], Acc) ->
get_all_leafs_full(Rest, get_all_leafs_full_simple(Pos, [Tree], []) ++ Acc).
-
+
get_all_leafs_full_simple(_Pos, [], _KeyPathAcc) ->
[];
get_all_leafs_full_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
@@ -253,7 +253,7 @@ get_all_leafs([], Acc) ->
Acc;
get_all_leafs([{Pos, Tree}|Rest], Acc) ->
get_all_leafs(Rest, get_all_leafs_simple(Pos, [Tree], []) ++ Acc).
-
+
get_all_leafs_simple(_Pos, [], _KeyPathAcc) ->
[];
get_all_leafs_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
@@ -266,7 +266,7 @@ count_leafs([]) ->
0;
count_leafs([{_Pos,Tree}|Rest]) ->
count_leafs_simple([Tree]) + count_leafs(Rest).
-
+
count_leafs_simple([]) ->
0;
count_leafs_simple([{_Key, _Value, []} | RestTree]) ->
@@ -274,7 +274,7 @@ count_leafs_simple([{_Key, _Value, []} | RestTree]) ->
count_leafs_simple([{_Key, _Value, SubTree} | RestTree]) ->
count_leafs_simple(SubTree) + count_leafs_simple(RestTree).
-
+
map(_Fun, []) ->
[];
map(Fun, [{Pos, Tree}|Rest]) ->
@@ -287,7 +287,7 @@ map_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
Value2 = Fun({Pos, Key}, Value),
[{Key, Value2, map_simple(Fun, Pos + 1, SubTree)} | map_simple(Fun, Pos, RestTree)].
-
+
map_leafs(_Fun, []) ->
[];
map_leafs(Fun, [{Pos, Tree}|Rest]) ->
@@ -306,9 +306,9 @@ map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
stem(Trees, Limit) ->
% flatten each branch in a tree into a tree path
Paths = get_all_leafs_full(Trees),
-
+
Paths2 = [{Pos, lists:sublist(Path, Limit)} || {Pos, Path} <- Paths],
-
+
% convert paths back to trees
lists:foldl(
fun({PathPos, Path},TreeAcc) ->
diff --git a/src/couchdb/couch_log.erl b/src/couchdb/couch_log.erl
index 14c262d0..b5507bb6 100644
--- a/src/couchdb/couch_log.erl
+++ b/src/couchdb/couch_log.erl
@@ -43,7 +43,7 @@ stop() ->
init([]) ->
% read config and register for configuration changes
-
+
% just stop if one of the config settings change. couch_server_sup
% will restart us and then we will pick up the new settings.
ok = couch_config:register(
@@ -52,7 +52,7 @@ init([]) ->
("log", "level") ->
?MODULE:stop()
end),
-
+
Filename = couch_config:get("log", "file", "couchdb.log"),
Level = couch_config:get("log", "level", "info"),
diff --git a/src/couchdb/couch_query_servers.erl b/src/couchdb/couch_query_servers.erl
index 5a1dc90a..077a7518 100644
--- a/src/couchdb/couch_query_servers.erl
+++ b/src/couchdb/couch_query_servers.erl
@@ -18,7 +18,7 @@
-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3,stop/0]).
-export([start_doc_map/2, map_docs/2, stop_doc_map/1]).
-export([reduce/3, rereduce/3,validate_doc_update/5]).
--export([render_doc_show/6, start_view_list/2,
+-export([render_doc_show/6, start_view_list/2,
render_list_head/4, render_list_row/3, render_list_tail/1]).
% -export([test/0]).
@@ -42,7 +42,7 @@ map_docs({_Lang, Pid}, Docs) ->
Results = lists:map(
fun(Doc) ->
Json = couch_doc:to_json_obj(Doc, []),
-
+
FunsResults = couch_os_process:prompt(Pid, [<<"map_doc">>, Json]),
% the results are a json array of function map yields like this:
% [FunResults1, FunResults2 ...]
@@ -90,7 +90,7 @@ rereduce(Lang, RedSrcs, ReducedValues) ->
{ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
Result;
(FunSrc, Values) ->
- [true, [Result]] =
+ [true, [Result]] =
couch_os_process:prompt(Pid, [<<"rereduce">>, [FunSrc], Values]),
Result
end, RedSrcs, Grouped)
@@ -121,7 +121,7 @@ os_reduce(_Lang, [], _KVs) ->
{ok, []};
os_reduce(Lang, OsRedSrcs, KVs) ->
Pid = get_os_process(Lang),
- OsResults = try couch_os_process:prompt(Pid,
+ OsResults = try couch_os_process:prompt(Pid,
[<<"reduce">>, OsRedSrcs, KVs]) of
[true, Reductions] -> Reductions
after
@@ -143,22 +143,22 @@ builtin_reduce(rereduce, [<<"_count">>|BuiltinReds], KVs, Acc) ->
builtin_sum_rows(KVs) ->
lists:foldl(fun
- ([_Key, Value], Acc) when is_number(Value) ->
+ ([_Key, Value], Acc) when is_number(Value) ->
Acc + Value;
- (_Else, _Acc) ->
+ (_Else, _Acc) ->
throw({invalid_value, <<"builtin _sum function requires map values to be numbers">>})
end, 0, KVs).
-
+
validate_doc_update(Lang, FunSrc, EditDoc, DiskDoc, Ctx) ->
Pid = get_os_process(Lang),
JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]),
JsonDiskDoc =
if DiskDoc == nil ->
null;
- true ->
+ true ->
couch_doc:to_json_obj(DiskDoc, [revs])
end,
- try couch_os_process:prompt(Pid,
+ try couch_os_process:prompt(Pid,
[<<"validate">>, FunSrc, JsonEditDoc, JsonDiskDoc, Ctx]) of
1 ->
ok;
@@ -181,7 +181,7 @@ render_doc_show(Lang, ShowSrc, DocId, Doc, Req, Db) ->
{DocId, nil} -> {{append_docid(DocId, JsonReqIn)}, null};
_ -> {{append_docid(DocId, JsonReqIn)}, couch_doc:to_json_obj(Doc, [revs])}
end,
- try couch_os_process:prompt(Pid,
+ try couch_os_process:prompt(Pid,
[<<"show">>, ShowSrc, JsonDoc, JsonReq]) of
FormResp ->
FormResp
@@ -209,18 +209,18 @@ render_list_row({_Lang, Pid}, _, {Key, Value}) ->
render_list_tail({Lang, Pid}) ->
JsonResp = couch_os_process:prompt(Pid, [<<"list_end">>]),
ok = ret_os_process(Lang, Pid),
- JsonResp.
-
+ JsonResp.
+
init([]) ->
-
+
% read config and register for configuration changes
-
+
% just stop if one of the config settings change. couch_server_sup
% will restart us and then we will pick up the new settings.
-
+
ok = couch_config:register(
fun("query_servers" ++ _, _) ->
?MODULE:stop()
diff --git a/src/couchdb/couch_ref_counter.erl b/src/couchdb/couch_ref_counter.erl
index 0fbec729..59ede9c9 100644
--- a/src/couchdb/couch_ref_counter.erl
+++ b/src/couchdb/couch_ref_counter.erl
@@ -18,11 +18,11 @@
start(ChildProcs) ->
gen_server:start(couch_ref_counter, {self(), ChildProcs}, []).
-
-
+
+
drop(RefCounterPid) ->
drop(RefCounterPid, self()).
-
+
drop(RefCounterPid, Pid) ->
gen_server:cast(RefCounterPid, {drop, Pid}).
@@ -42,7 +42,7 @@ count(RefCounterPid) ->
{
referrers=dict:new() % a dict of each ref counting proc.
}).
-
+
init({Pid, ChildProcs}) ->
[link(ChildProc) || ChildProc <- ChildProcs],
Referrers = dict:from_list([{Pid, {erlang:monitor(process, Pid), 1}}]),
diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl
index f5b560e9..a503684b 100644
--- a/src/couchdb/couch_rep.erl
+++ b/src/couchdb/couch_rep.erl
@@ -12,7 +12,7 @@
-module(couch_rep).
-behaviour(gen_server).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
code_change/3]).
-export([replicate/2]).
@@ -24,24 +24,24 @@
-include("couch_db.hrl").
-include("../ibrowse/ibrowse.hrl").
-%% @spec replicate(Source::binary(), Target::binary()) ->
+%% @spec replicate(Source::binary(), Target::binary()) ->
%% {ok, Stats} | {error, Reason}
-%% @doc Triggers a replication. Stats is a JSON Object with the following
+%% @doc Triggers a replication. Stats is a JSON Object with the following
%% keys: session_id (UUID), source_last_seq (integer), and history (array).
%% Each element of the history is an Object with keys start_time, end_time,
%% start_last_seq, end_last_seq, missing_checked, missing_found, docs_read,
%% and docs_written.
%%
%% The supervisor will try to restart the replication in case of any error
-%% other than shutdown. Just call this function again to listen for the
+%% other than shutdown. Just call this function again to listen for the
%% result of the retry.
replicate(Source, Target) ->
-
+
{ok, HostName} = inet:gethostname(),
RepId = couch_util:to_hex(
erlang:md5(term_to_binary([HostName, Source, Target]))),
Args = [?MODULE, [RepId, Source,Target], []],
-
+
Replicator = {RepId,
{gen_server, start_link, Args},
transient,
@@ -49,31 +49,31 @@ replicate(Source, Target) ->
worker,
[?MODULE]
},
-
+
Server = case supervisor:start_child(couch_rep_sup, Replicator) of
- {ok, Pid} ->
+ {ok, Pid} ->
?LOG_INFO("starting new replication ~p at ~p", [RepId, Pid]),
Pid;
{error, already_present} ->
case supervisor:restart_child(couch_rep_sup, RepId) of
- {ok, Pid} ->
+ {ok, Pid} ->
?LOG_INFO("starting replication ~p at ~p", [RepId, Pid]),
Pid;
- {error, running} ->
+ {error, running} ->
%% this error occurs if multiple replicators are racing
%% each other to start and somebody else won. Just grab
%% the Pid by calling start_child again.
- {error, {already_started, Pid}} =
+ {error, {already_started, Pid}} =
supervisor:start_child(couch_rep_sup, Replicator),
?LOG_INFO("replication ~p already running at ~p", [RepId, Pid]),
Pid
end;
- {error, {already_started, Pid}} ->
+ {error, {already_started, Pid}} ->
?LOG_INFO("replication ~p already running at ~p", [RepId, Pid]),
Pid
end,
-
- case gen_server:call(Server, get_result, infinity) of
+
+ case gen_server:call(Server, get_result, infinity) of
retry -> replicate(Source, Target);
Else -> Else
end.
@@ -87,7 +87,7 @@ replicate(Source, Target) ->
headers
}).
-
+
-record(state, {
context,
current_seq,
@@ -103,19 +103,19 @@ replicate(Source, Target) ->
init([RepId, Source, Target]) ->
process_flag(trap_exit, true),
-
+
{ok, DbSrc, SrcName} = open_db(Source),
{ok, DbTgt, TgtName} = open_db(Target),
-
+
DocKey = ?l2b(?LOCAL_DOC_PREFIX ++ RepId),
-
+
{ok, InfoSrc} = get_db_info(DbSrc),
{ok, InfoTgt} = get_db_info(DbTgt),
-
+
ReplicationStartTime = httpd_util:rfc1123_date(),
SrcInstanceStartTime = proplists:get_value(instance_start_time, InfoSrc),
TgtInstanceStartTime = proplists:get_value(instance_start_time, InfoTgt),
-
+
RepRecDocSrc =
case open_doc(DbSrc, DocKey, []) of
{ok, SrcDoc} ->
@@ -123,7 +123,7 @@ init([RepId, Source, Target]) ->
SrcDoc;
_ -> #doc{id=DocKey}
end,
-
+
RepRecDocTgt =
case open_doc(DbTgt, DocKey, []) of
{ok, TgtDoc} ->
@@ -131,11 +131,11 @@ init([RepId, Source, Target]) ->
TgtDoc;
_ -> #doc{id=DocKey}
end,
-
+
#doc{body={RepRecProps}} = RepRecDocSrc,
#doc{body={RepRecPropsTgt}} = RepRecDocTgt,
-
- case proplists:get_value(<<"session_id">>, RepRecProps) ==
+
+ case proplists:get_value(<<"session_id">>, RepRecProps) ==
proplists:get_value(<<"session_id">>, RepRecPropsTgt) of
true ->
% if the records have the same session id,
@@ -150,7 +150,7 @@ init([RepId, Source, Target]) ->
OldSeqNum = 0,
OldHistory = []
end,
-
+
Context = [
{start_seq, OldSeqNum},
{history, OldHistory},
@@ -160,20 +160,20 @@ init([RepId, Source, Target]) ->
{src_record, RepRecDocSrc},
{tgt_record, RepRecDocTgt}
],
-
+
Stats = ets:new(replication_stats, [set, private]),
ets:insert(Stats, {total_revs,0}),
ets:insert(Stats, {missing_revs, 0}),
ets:insert(Stats, {docs_read, 0}),
ets:insert(Stats, {docs_written, 0}),
ets:insert(Stats, {doc_write_failures, 0}),
-
+
couch_task_status:add_task("Replication", <<SrcName/binary, " -> ",
TgtName/binary>>, "Starting"),
-
+
Parent = self(),
Pid = spawn_link(fun() -> enum_docs_since(Parent,DbSrc,DbTgt,{OldSeqNum,0}) end),
-
+
State = #state{
context = Context,
current_seq = OldSeqNum,
@@ -182,7 +182,7 @@ init([RepId, Source, Target]) ->
target = DbTgt,
stats = Stats
},
-
+
{ok, State}.
handle_call(get_result, From, #state{listeners=L,done=true} = State) ->
{stop, normal, State#state{listeners=[From|L]}};
@@ -200,14 +200,14 @@ handle_call({replicate_doc, {Id, Revs}}, {Pid,_}, #state{enum_pid=Pid} = State)
} = State,
ets:update_counter(Stats, missing_revs, length(Revs)),
-
+
%% get document(s)
{ok, DocResults} = open_doc_revs(Source, Id, Revs, [latest]),
Docs = [RevDoc || {ok, RevDoc} <- DocResults],
ets:update_counter(Stats, docs_read, length(Docs)),
-
+
%% save them (maybe in a buffer)
- {NewBuffer, NewContext} =
+ {NewBuffer, NewContext} =
case should_flush(lists:flatlength([Docs|Buffer])) of
true ->
Docs2 = lists:flatten([Docs|Buffer]),
@@ -227,7 +227,7 @@ handle_call({replicate_doc, {Id, Revs}}, {Pid,_}, #state{enum_pid=Pid} = State)
false ->
{[Docs | Buffer], Context}
end,
-
+
{reply, ok, State#state{context=NewContext, docs_buffer=NewBuffer}};
handle_call({fin, {LastSeq, RevsCount}}, {Pid,_}, #state{enum_pid=Pid} = State) ->
@@ -255,7 +255,7 @@ handle_info({'EXIT', Pid, Reason}, #state{enum_pid=Pid} = State) ->
Parent = self(),
NewPid = spawn_link(fun() -> enum_docs_since(Parent,Src,Tgt,{Seq,0}) end),
{noreply, State#state{enum_pid=NewPid}};
-
+
%% if any linked process dies, respawn the enumerator to get things going again
handle_info({'EXIT', _From, normal}, State) ->
{noreply, State};
@@ -277,7 +277,7 @@ terminate(normal, State) ->
target = Target,
stats = Stats
} = State,
-
+
try update_docs(Target, lists:flatten(Buffer), [], replicated_changes) of
{ok, Errors} ->
dump_update_errors(Errors),
@@ -289,18 +289,18 @@ terminate(normal, State) ->
?LOG_ERROR("attachment request failed during final write", []),
exit({internal_server_error, replication_link_failure})
end,
-
+
couch_task_status:update("Finishing"),
-
+
{ok, NewRepHistory, _} = do_checkpoint(Source, Target, Context, Seq, Stats),
ets:delete(Stats),
close_db(Target),
-
+
[Original|Rest] = Listeners,
gen_server:reply(Original, {ok, NewRepHistory}),
-
- %% maybe trigger another replication. If this replicator uses a local
- %% source Db, changes to that Db since we started will not be included in
+
+ %% maybe trigger another replication. If this replicator uses a local
+ %% source Db, changes to that Db since we started will not be included in
%% this pass.
case up_to_date(Source, Seq) of
true ->
@@ -319,9 +319,9 @@ terminate(Reason, State) ->
target = Target,
stats = Stats
} = State,
-
+
[gen_server:reply(L, {error, Reason}) || L <- Listeners],
-
+
ets:delete(Stats),
close_db(Target),
close_db(Source).
@@ -345,19 +345,19 @@ dump_update_errors([{{Id, Rev}, Error}|Rest]) ->
attachment_loop(ReqId, Conn) ->
couch_util:should_flush(),
- receive
+ receive
{From, {set_req_id, NewId}} ->
%% we learn the ReqId to listen for
From ! {self(), {ok, NewId}},
attachment_loop(NewId, Conn);
{ibrowse_async_headers, ReqId, Status, Headers} ->
%% we got header, give the controlling process a chance to react
- receive
- {From, gimme_status} ->
+ receive
+ {From, gimme_status} ->
%% send status/headers to controller
From ! {self(), {status, Status, Headers}},
receive
- {From, continue} ->
+ {From, continue} ->
%% normal case
attachment_loop(ReqId, Conn);
{From, fail} ->
@@ -382,7 +382,7 @@ attachment_loop(ReqId, Conn) ->
?LOG_ERROR("streaming attachment failed with ~p", [Err]),
catch ibrowse:stop_worker_process(Conn),
exit(attachment_request_failed);
- {ibrowse_async_response, ReqId, Data} ->
+ {ibrowse_async_response, ReqId, Data} ->
receive {From, gimme_data} -> From ! {self(), Data} end,
attachment_loop(ReqId, Conn);
{ibrowse_async_response_end, ReqId} ->
@@ -396,7 +396,7 @@ attachment_stub_converter(DbS, Id, Rev, {Name, {stub, Type, Length}}) ->
Url = lists:flatten([DbUrl, url_encode(Id), "/", url_encode(?b2l(Name)),
"?rev=", ?b2l(couch_doc:rev_to_str({Pos,RevId}))]),
?LOG_DEBUG("Attachment URL ~s", [Url]),
- {ok, RcvFun} = make_attachment_stub_receiver(Url, Headers, Name,
+ {ok, RcvFun} = make_attachment_stub_receiver(Url, Headers, Name,
Type, Length),
{Name, {Type, {RcvFun, Length}}}.
@@ -404,21 +404,21 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length) ->
make_attachment_stub_receiver(Url, Headers, Name, Type, Length, 10, 1000).
make_attachment_stub_receiver(Url, _Headers, _Name, _Type, _Length, 0, _Pause) ->
- ?LOG_ERROR("streaming attachment request failed after 10 retries: ~s",
+ ?LOG_ERROR("streaming attachment request failed after 10 retries: ~s",
[Url]),
exit({attachment_request_failed, ?l2b(["failed to replicate ", Url])});
-
+
make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause) ->
%% start the process that receives attachment data from ibrowse
#url{host=Host, port=Port} = ibrowse_lib:parse_url(Url),
{ok, Conn} = ibrowse:spawn_link_worker_process(Host, Port),
Pid = spawn_link(fun() -> attachment_loop(nil, Conn) end),
-
+
%% make the async request
Opts = [{stream_to, Pid}, {response_format, binary}],
- ReqId =
+ ReqId =
case ibrowse:send_req_direct(Conn, Url, Headers, get, [], Opts, infinity) of
- {ibrowse_req_id, X} ->
+ {ibrowse_req_id, X} ->
X;
{error, Reason} ->
?LOG_INFO("retrying couch_rep attachment request in ~p " ++
@@ -428,11 +428,11 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause)
make_attachment_stub_receiver(Url, Headers, Name, Type, Length,
Retries-1, 2*Pause)
end,
-
+
%% tell our receiver about the ReqId it needs to look for
Pid ! {self(), {set_req_id, ReqId}},
- receive
- {Pid, {ok, ReqId}} ->
+ receive
+ {Pid, {ok, ReqId}} ->
ok;
{'EXIT', Pid, _Reason} ->
catch ibrowse:stop_worker_process(Conn),
@@ -440,19 +440,19 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause)
make_attachment_stub_receiver(Url, Headers, Name, Type, Length,
Retries-1, 2*Pause)
end,
-
+
%% wait for headers to ensure that we have a 200 status code
%% this is where we follow redirects etc
- Pid ! {self(), gimme_status},
+ Pid ! {self(), gimme_status},
receive
{'EXIT', Pid, attachment_request_failed} ->
catch ibrowse:stop_worker_process(Conn),
make_attachment_stub_receiver(Url, Headers, Name, Type, Length,
Retries-1, Pause);
- {Pid, {status, StreamStatus, StreamHeaders}} ->
+ {Pid, {status, StreamStatus, StreamHeaders}} ->
?LOG_DEBUG("streaming attachment Status ~p Headers ~p",
[StreamStatus, StreamHeaders]),
-
+
ResponseCode = list_to_integer(StreamStatus),
if
ResponseCode >= 200, ResponseCode < 300 ->
@@ -461,10 +461,10 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause)
%% this function goes into the streaming attachment code.
%% It gets executed by the replication gen_server, so it can't
%% be the one to actually receive the ibrowse data.
- {ok, fun() ->
- Pid ! {self(), gimme_data},
- receive
- {Pid, Data} ->
+ {ok, fun() ->
+ Pid ! {self(), gimme_data},
+ receive
+ {Pid, Data} ->
Data;
{'EXIT', Pid, attachment_request_failed} ->
throw(attachment_write_failed)
@@ -473,25 +473,25 @@ make_attachment_stub_receiver(Url, Headers, Name, Type, Length, Retries, Pause)
ResponseCode >= 300, ResponseCode < 400 ->
% follow the redirect
Pid ! {self(), stop_ok},
- RedirectUrl = mochiweb_headers:get_value("Location",
+ RedirectUrl = mochiweb_headers:get_value("Location",
mochiweb_headers:make(StreamHeaders)),
catch ibrowse:stop_worker_process(Conn),
make_attachment_stub_receiver(RedirectUrl, Headers, Name, Type,
Length, Retries - 1, Pause);
- ResponseCode >= 400, ResponseCode < 500 ->
+ ResponseCode >= 400, ResponseCode < 500 ->
% an error... log and fail
- ?LOG_ERROR("streaming attachment failed with code ~p: ~s",
+ ?LOG_ERROR("streaming attachment failed with code ~p: ~s",
[ResponseCode, Url]),
Pid ! {self(), fail},
exit(attachment_request_failed);
ResponseCode == 500 ->
% an error... log and retry
- ?LOG_INFO("retrying couch_rep attachment request in ~p " ++
+ ?LOG_INFO("retrying couch_rep attachment request in ~p " ++
"seconds due to 500 response: ~s", [Pause/1000, Url]),
Pid ! {self(), fail},
catch ibrowse:stop_worker_process(Conn),
timer:sleep(Pause),
- make_attachment_stub_receiver(Url, Headers, Name, Type, Length,
+ make_attachment_stub_receiver(Url, Headers, Name, Type, Length,
Retries - 1, 2*Pause)
end
end.
@@ -522,28 +522,28 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) ->
{src_record, #doc{body={LastRepRecord}}=RepRecDocSrc},
{tgt_record, RepRecDocTgt}
] = Context,
-
+
case NewSeqNum == StartSeqNum andalso OldHistory /= [] of
true ->
% nothing changed, don't record results
{ok, {[{<<"no_changes">>, true} | LastRepRecord]}, Context};
false ->
% something changed, record results for incremental replication,
-
+
% commit changes to both src and tgt. The src because if changes
- % we replicated are lost, we'll record the a seq number ahead
+ % we replicated are lost, we'll record the a seq number ahead
% of what was committed. If those changes are lost and the seq number
% reverts to a previous committed value, we will skip future changes
% when new doc updates are given our already replicated seq nums.
-
+
% commit the src async
ParentPid = self(),
- SrcCommitPid = spawn_link(fun() ->
+ SrcCommitPid = spawn_link(fun() ->
ParentPid ! {self(), ensure_full_commit(Source)} end),
-
+
% commit tgt sync
{ok, TgtInstanceStartTime2} = ensure_full_commit(Target),
-
+
SrcInstanceStartTime2 =
receive
{SrcCommitPid, {ok, Timestamp}} ->
@@ -551,7 +551,7 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) ->
{'EXIT', SrcCommitPid, {http_request_failed, _}} ->
exit(replication_link_failure)
end,
-
+
RecordSeqNum =
if SrcInstanceStartTime2 == SrcInstanceStartTime andalso
TgtInstanceStartTime2 == TgtInstanceStartTime ->
@@ -562,7 +562,7 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) ->
"replication is redone and documents reexamined.", []),
StartSeqNum
end,
-
+
NewHistoryEntry = {
[{<<"start_time">>, list_to_binary(ReplicationStartTime)},
{<<"end_time">>, list_to_binary(httpd_util:rfc1123_date())},
@@ -582,11 +582,11 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) ->
{<<"source_last_seq">>, RecordSeqNum},
{<<"history">>, HistEntries}]},
- {ok, {SrcRevPos,SrcRevId}} = update_doc(Source,
+ {ok, {SrcRevPos,SrcRevId}} = update_doc(Source,
RepRecDocSrc#doc{body=NewRepHistory}, []),
{ok, {TgtRevPos,TgtRevId}} = update_doc(Target,
RepRecDocTgt#doc{body=NewRepHistory}, []),
-
+
NewContext = [
{start_seq, StartSeqNum},
{history, OldHistory},
@@ -596,9 +596,9 @@ do_checkpoint(Source, Target, Context, NewSeqNum, Stats) ->
{src_record, RepRecDocSrc#doc{revs={SrcRevPos,[SrcRevId]}}},
{tgt_record, RepRecDocTgt#doc{revs={TgtRevPos,[TgtRevId]}}}
],
-
+
{ok, NewRepHistory, NewContext}
-
+
end.
do_http_request(Url, Action, Headers) ->
@@ -610,7 +610,7 @@ do_http_request(Url, Action, Headers, JsonBody) ->
do_http_request(Url, Action, Headers, Body, Retries, Pause) when is_binary(Url) ->
do_http_request(?b2l(Url), Action, Headers, Body, Retries, Pause);
do_http_request(Url, Action, _Headers, _JsonBody, 0, _Pause) ->
- ?LOG_ERROR("couch_rep HTTP ~p request failed after 10 retries: ~s",
+ ?LOG_ERROR("couch_rep HTTP ~p request failed after 10 retries: ~s",
[Action, Url]),
exit({http_request_failed, ?l2b(["failed to replicate ", Url])});
do_http_request(Url, Action, Headers, JsonBody, Retries, Pause) ->
@@ -637,27 +637,27 @@ do_http_request(Url, Action, Headers, JsonBody, Retries, Pause) ->
ResponseCode >= 200, ResponseCode < 300 ->
?JSON_DECODE(ResponseBody);
ResponseCode >= 300, ResponseCode < 400 ->
- RedirectUrl = mochiweb_headers:get_value("Location",
+ RedirectUrl = mochiweb_headers:get_value("Location",
mochiweb_headers:make(ResponseHeaders)),
do_http_request(RedirectUrl, Action, Headers, JsonBody, Retries-1,
Pause);
- ResponseCode >= 400, ResponseCode < 500 ->
- ?JSON_DECODE(ResponseBody);
+ ResponseCode >= 400, ResponseCode < 500 ->
+ ?JSON_DECODE(ResponseBody);
ResponseCode == 500 ->
- ?LOG_INFO("retrying couch_rep HTTP ~p request in ~p seconds " ++
+ ?LOG_INFO("retrying couch_rep HTTP ~p request in ~p seconds " ++
"due to 500 error: ~s", [Action, Pause/1000, Url]),
timer:sleep(Pause),
do_http_request(Url, Action, Headers, JsonBody, Retries - 1, 2*Pause)
end;
{error, Reason} ->
- ?LOG_INFO("retrying couch_rep HTTP ~p request in ~p seconds due to " ++
+ ?LOG_INFO("retrying couch_rep HTTP ~p request in ~p seconds due to " ++
"{error, ~p}: ~s", [Action, Pause/1000, Reason, Url]),
timer:sleep(Pause),
do_http_request(Url, Action, Headers, JsonBody, Retries - 1, 2*Pause)
end.
ensure_full_commit(#http_db{uri=DbUrl, headers=Headers}) ->
- {ResultProps} = do_http_request(DbUrl ++ "_ensure_full_commit", post,
+ {ResultProps} = do_http_request(DbUrl ++ "_ensure_full_commit", post,
Headers, true),
true = proplists:get_value(<<"ok">>, ResultProps),
{ok, proplists:get_value(<<"instance_start_time">>, ResultProps)};
@@ -672,22 +672,22 @@ enum_docs_since(Pid, DbSource, DbTarget, {StartSeq, RevsCount}) ->
SrcRevsList = lists:map(fun(#doc_info{id=Id,revs=RevInfos}) ->
SrcRevs = [Rev || #rev_info{rev=Rev} <- RevInfos],
{Id, SrcRevs}
- end, DocInfoList),
+ end, DocInfoList),
{ok, MissingRevs} = get_missing_revs(DbTarget, SrcRevsList),
-
+
%% do we need to check for success here?
- [gen_server:call(Pid, {replicate_doc, Info}, infinity)
+ [gen_server:call(Pid, {replicate_doc, Info}, infinity)
|| Info <- MissingRevs ],
-
+
#doc_info{high_seq=LastSeq} = lists:last(DocInfoList),
RevsCount2 = RevsCount + length(SrcRevsList),
gen_server:cast(Pid, {increment_update_seq, LastSeq}),
-
+
enum_docs_since(Pid, DbSource, DbTarget, {LastSeq, RevsCount2})
end.
-
+
get_db_info(#http_db{uri=DbUrl, headers=Headers}) ->
{DbProps} = do_http_request(DbUrl, get, Headers),
{ok, [{list_to_atom(?b2l(K)), V} || {K,V} <- DbProps]};
@@ -695,14 +695,14 @@ get_db_info(Db) ->
couch_db:get_db_info(Db).
get_doc_info_list(#http_db{uri=DbUrl, headers=Headers}, StartSeq) ->
- Url = DbUrl ++ "_all_docs_by_seq?limit=100&startkey="
+ Url = DbUrl ++ "_all_docs_by_seq?limit=100&startkey="
++ integer_to_list(StartSeq),
{Results} = do_http_request(Url, get, Headers),
lists:map(fun({RowInfoList}) ->
{RowValueProps} = proplists:get_value(<<"value">>, RowInfoList),
Seq = proplists:get_value(<<"key">>, RowInfoList),
- Revs =
- [#rev_info{rev=couch_doc:parse_rev(proplists:get_value(<<"rev">>, RowValueProps)), deleted = proplists:get_value(<<"deleted">>, RowValueProps, false)} |
+ Revs =
+ [#rev_info{rev=couch_doc:parse_rev(proplists:get_value(<<"rev">>, RowValueProps)), deleted = proplists:get_value(<<"deleted">>, RowValueProps, false)} |
[#rev_info{rev=Rev,deleted=false} || Rev <- couch_doc:parse_revs(proplists:get_value(<<"conflicts">>, RowValueProps, []))] ++
[#rev_info{rev=Rev,deleted=true} || Rev <- couch_doc:parse_revs(proplists:get_value(<<"deleted_conflicts">>, RowValueProps, []))]],
#doc_info{
@@ -712,11 +712,11 @@ get_doc_info_list(#http_db{uri=DbUrl, headers=Headers}, StartSeq) ->
}
end, proplists:get_value(<<"rows">>, Results));
get_doc_info_list(DbSource, StartSeq) ->
- {ok, {_Count, DocInfoList}} = couch_db:enum_docs_since(DbSource, StartSeq,
+ {ok, {_Count, DocInfoList}} = couch_db:enum_docs_since(DbSource, StartSeq,
fun (_, _, {100, DocInfoList}) ->
{stop, {100, DocInfoList}};
- (DocInfo, _, {Count, DocInfoList}) ->
- {ok, {Count+1, [DocInfo|DocInfoList]}}
+ (DocInfo, _, {Count, DocInfoList}) ->
+ {ok, {Count+1, [DocInfo|DocInfoList]}}
end, {0, []}),
lists:reverse(DocInfoList).
@@ -742,14 +742,14 @@ open_doc(#http_db{uri=DbUrl, headers=Headers}, DocId, Options) ->
open_doc(Db, DocId, Options) ->
couch_db:open_doc(Db, DocId, Options).
-open_doc_revs(#http_db{uri=DbUrl, headers=Headers} = DbS, DocId, Revs0,
+open_doc_revs(#http_db{uri=DbUrl, headers=Headers} = DbS, DocId, Revs0,
[latest]) ->
Revs = couch_doc:rev_to_strs(Revs0),
BaseUrl = DbUrl ++ url_encode(DocId) ++ "?revs=true&latest=true",
-
+
%% MochiWeb expects URLs < 8KB long, so maybe split into multiple requests
MaxN = trunc((8192 - length(BaseUrl))/14),
-
+
JsonResults = case length(Revs) > MaxN of
false ->
Url = ?l2b(BaseUrl ++ "&open_revs=" ++ ?JSON_ENCODE(Revs)),
@@ -766,7 +766,7 @@ open_doc_revs(#http_db{uri=DbUrl, headers=Headers} = DbS, DocId, Revs0,
Acc ++ do_http_request(?l2b(BaseUrl ++ "&open_revs=" ++
?JSON_ENCODE(lists:reverse(Rest))), get, Headers)
end,
-
+
Results =
lists:map(
fun({[{<<"missing">>, Rev}]}) ->
@@ -791,7 +791,7 @@ should_flush(DocCount) when DocCount > ?BUFFER_NDOCS ->
should_flush(_DocCount) ->
MeAndMyLinks = [self()|
[P || P <- element(2,process_info(self(),links)), is_pid(P)]],
-
+
case length(MeAndMyLinks)/2 > ?BUFFER_NATTACHMENTS of
true -> true;
false ->
diff --git a/src/couchdb/couch_server.erl b/src/couchdb/couch_server.erl
index 3bf59724..7c79683e 100644
--- a/src/couchdb/couch_server.erl
+++ b/src/couchdb/couch_server.erl
@@ -42,7 +42,7 @@ start(_Type, _Args) ->
restart() ->
stop(),
start().
-
+
stop() ->
couch_server_sup:stop().
@@ -127,7 +127,7 @@ hash_admin_passwords() ->
init([]) ->
% read config and register for configuration changes
-
+
% just stop if one of the config settings change. couch_server_sup
% will restart us and then we will pick up the new settings.
@@ -292,7 +292,7 @@ handle_call({delete, DbName, _Options}, _From, Server) ->
case check_dbname(Server, DbNameList) of
ok ->
FullFilepath = get_full_filename(Server, DbNameList),
- Server2 =
+ Server2 =
case ets:lookup(couch_dbs_by_name, DbName) of
[] -> Server;
[{_, {Pid, LruTime}}] ->
@@ -303,11 +303,11 @@ handle_call({delete, DbName, _Options}, _From, Server) ->
true = ets:delete(couch_dbs_by_lru, LruTime),
Server#server{dbs_open=Server#server.dbs_open - 1}
end,
-
+
%% Delete any leftover .compact files. If we don't do this a subsequent
%% request for this DB will try to open the .compact file and use it.
file:delete(FullFilepath ++ ".compact"),
-
+
case file:delete(FullFilepath) of
ok ->
couch_db_update_notifier:notify({deleted, DbName}),
@@ -326,7 +326,7 @@ handle_cast(Msg, _Server) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
+
handle_info({'EXIT', _Pid, config_change}, _Server) ->
exit(kill);
handle_info({'EXIT', Pid, _Reason}, #server{dbs_open=DbsOpen}=Server) ->
diff --git a/src/couchdb/couch_server_sup.erl b/src/couchdb/couch_server_sup.erl
index 4c77dbe1..663c8ee0 100644
--- a/src/couchdb/couch_server_sup.erl
+++ b/src/couchdb/couch_server_sup.erl
@@ -83,7 +83,7 @@ start_server(IniFiles) ->
ok = couch_util:start_driver(LibDir),
BaseChildSpecs =
- {{one_for_all, 10, 3600},
+ {{one_for_all, 10, 3600},
[{couch_config,
{couch_server_sup, couch_config_start_link_wrapper, [IniFiles, ConfigPid]},
permanent,
@@ -130,7 +130,7 @@ start_server(IniFiles) ->
start_primary_services() ->
supervisor:start_link({local, couch_primary_services}, couch_server_sup,
- {{one_for_one, 10, 3600},
+ {{one_for_one, 10, 3600},
[{couch_log,
{couch_log, start_link, []},
permanent,
@@ -168,7 +168,7 @@ start_secondary_services() ->
DaemonChildSpecs = [
begin
{ok, {Module, Fun, Args}} = couch_util:parse_term(SpecStr),
-
+
{list_to_atom(Name),
{Module, Fun, Args},
permanent,
diff --git a/src/couchdb/couch_stats_aggregator.erl b/src/couchdb/couch_stats_aggregator.erl
index 821bf60f..2e8ea380 100644
--- a/src/couchdb/couch_stats_aggregator.erl
+++ b/src/couchdb/couch_stats_aggregator.erl
@@ -18,7 +18,7 @@
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
--export([start/0, stop/0,
+-export([start/0, stop/0,
get/1, get/2, get_json/1, get_json/2, all/0,
time_passed/0, clear_aggregates/1]).
@@ -34,7 +34,7 @@
start() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
+
stop() ->
gen_server:call(?MODULE, stop).
@@ -47,18 +47,18 @@ get_json(Key) ->
gen_server:call(?MODULE, {get_json, Key}).
get_json(Key, Time) ->
gen_server:call(?MODULE, {get_json, Key, Time}).
-
+
time_passed() ->
gen_server:call(?MODULE, time_passed).
-clear_aggregates(Time) ->
+clear_aggregates(Time) ->
gen_server:call(?MODULE, {clear_aggregates, Time}).
all() ->
gen_server:call(?MODULE, all).
% GEN_SERVER
-
+
init(_) ->
ets:new(?MODULE, [named_table, set, protected]),
init_timers(),
@@ -86,13 +86,13 @@ handle_call(time_passed, _, OldState) ->
% the foldls below could probably be refactored into a less code-duping form
% update aggregates on incremental counters
- NextState = lists:foldl(fun(Counter, State) ->
+ NextState = lists:foldl(fun(Counter, State) ->
{Key, Value} = Counter,
update_aggregates_loop(Key, Value, State, incremental)
end, OldState, ?COLLECTOR:all(incremental)),
% update aggregates on absolute value counters
- NewState = lists:foldl(fun(Counter, State) ->
+ NewState = lists:foldl(fun(Counter, State) ->
{Key, Value} = Counter,
% clear the counter, we've got the important bits in State
?COLLECTOR:clear(Key),
@@ -117,7 +117,7 @@ handle_call(stop, _, State) ->
% Stats = [{Key, TimesProplist}]
% TimesProplist = [{Time, Aggrgates}]
% Aggregates = #aggregates{}
-%
+%
% [
% {Key, [
% {TimeA, #aggregates{}},
@@ -126,7 +126,7 @@ handle_call(stop, _, State) ->
% {TimeD, #aggregates{}}
% ]
% },
-%
+%
% ]
%% clear the aggregats record for a specific Time = 60 | 300 | 900
@@ -134,7 +134,7 @@ do_clear_aggregates(Time, #state{aggregates=Stats}) ->
NewStats = lists:map(fun({Key, TimesProplist}) ->
{Key, case proplists:lookup(Time, TimesProplist) of
% do have stats for this key, if we don't, return Stat unmodified
- none ->
+ none ->
TimesProplist;
% there are stats, let's unset the Time one
{_Time, _Stat} ->
@@ -177,12 +177,12 @@ update_aggregates_loop(Key, Values, State, CounterType) ->
% {'900',{aggregates,1,1,1,0,0,1,1}}]}]
[{_Key, StatsList}] = case proplists:lookup(Key, AllStats) of
none -> [{Key, [
- {'0', empty},
+ {'0', empty},
{'60', empty},
{'300', empty},
{'900', empty}
]}];
- AllStatsMatch ->
+ AllStatsMatch ->
[AllStatsMatch]
end,
@@ -236,7 +236,7 @@ update_aggregates(Value, Stat, CounterType) ->
incremental -> Value - Current;
absolute -> Value
end,
- % Knuth, The Art of Computer Programming, vol. 2, p. 232.
+ % Knuth, The Art of Computer Programming, vol. 2, p. 232.
NewCount = Count + 1,
NewMean = Mean + (NewValue - Mean) / NewCount, % NewCount is never 0.
NewVariance = Variance + (NewValue - Mean) * (NewValue - NewMean),
@@ -288,29 +288,29 @@ do_get_all(#state{aggregates=Stats}=State) ->
init_descriptions() ->
- % ets is probably overkill here, but I didn't manage to keep the
+ % ets is probably overkill here, but I didn't manage to keep the
% descriptions in the gen_server state. Which means there is probably
% a bug in one of the handle_call() functions most likely the one that
% handles the time_passed message. But don't tell anyone, the math is
% correct :) -- Jan
- % Style guide for descriptions: Start with a lowercase letter & do not add
+ % Style guide for descriptions: Start with a lowercase letter & do not add
% a trailing full-stop / period.
-
+
% please keep this in alphabetical order
ets:insert(?MODULE, {{couchdb, database_writes}, <<"number of times a database was changed">>}),
ets:insert(?MODULE, {{couchdb, database_reads}, <<"number of times a document was read from a database">>}),
ets:insert(?MODULE, {{couchdb, open_databases}, <<"number of open databases">>}),
ets:insert(?MODULE, {{couchdb, open_os_files}, <<"number of file descriptors CouchDB has open">>}),
ets:insert(?MODULE, {{couchdb, request_time}, <<"length of a request inside CouchDB without MochiWeb">>}),
-
+
ets:insert(?MODULE, {{httpd, bulk_requests}, <<"number of bulk requests">>}),
ets:insert(?MODULE, {{httpd, requests}, <<"number of HTTP requests">>}),
ets:insert(?MODULE, {{httpd, temporary_view_reads}, <<"number of temporary view reads">>}),
ets:insert(?MODULE, {{httpd, view_reads}, <<"number of view reads">>}),
ets:insert(?MODULE, {{httpd, clients_requesting_changes}, <<"Number of clients currently requesting continuous _changes">>}),
-
+
ets:insert(?MODULE, {{httpd_request_methods, 'COPY'}, <<"number of HTTP COPY requests">>}),
ets:insert(?MODULE, {{httpd_request_methods, 'DELETE'}, <<"number of HTTP DELETE requests">>}),
ets:insert(?MODULE, {{httpd_request_methods, 'GET'}, <<"number of HTTP GET requests">>}),
@@ -318,7 +318,7 @@ init_descriptions() ->
ets:insert(?MODULE, {{httpd_request_methods, 'MOVE'}, <<"number of HTTP MOVE requests">>}),
ets:insert(?MODULE, {{httpd_request_methods, 'POST'}, <<"number of HTTP POST requests">>}),
ets:insert(?MODULE, {{httpd_request_methods, 'PUT'}, <<"number of HTTP PUT requests">>}),
-
+
ets:insert(?MODULE, {{httpd_status_codes, '200'}, <<"number of HTTP 200 OK responses">>}),
ets:insert(?MODULE, {{httpd_status_codes, '201'}, <<"number of HTTP 201 Created responses">>}),
ets:insert(?MODULE, {{httpd_status_codes, '202'}, <<"number of HTTP 202 Accepted responses">>}),
@@ -338,12 +338,12 @@ init_descriptions() ->
% Timer
init_timers() ->
-
+
% OTP docs on timer: http://erlang.org/doc/man/timer.html
% start() -> ok
- % Starts the timer server. Normally, the server does not need to be
- % started explicitly. It is started dynamically if it is needed. This is
- % useful during development, but in a target system the server should be
+ % Starts the timer server. Normally, the server does not need to be
+ % started explicitly. It is started dynamically if it is needed. This is
+ % useful during development, but in a target system the server should be
% started explicitly. Use configuration parameters for kernel for this.
%
% TODO: Add timer_start to kernel start options.
@@ -361,7 +361,7 @@ init_timers() ->
% Unused gen_server behaviour API functions that we need to declare.
-
+
%% @doc Unused
handle_cast(foo, State) ->
{noreply, State}.
diff --git a/src/couchdb/couch_stats_collector.erl b/src/couchdb/couch_stats_collector.erl
index 9139f6cb..cec8138e 100644
--- a/src/couchdb/couch_stats_collector.erl
+++ b/src/couchdb/couch_stats_collector.erl
@@ -22,7 +22,7 @@
terminate/2, code_change/3]).
--export([start/0, stop/0, get/1,
+-export([start/0, stop/0, get/1,
increment/1, decrement/1,
track_process_count/1, track_process_count/2,
record/2, clear/1,
@@ -38,15 +38,15 @@
start() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
+
stop() ->
gen_server:call(?MODULE, stop).
get(Key) ->
case ets:lookup(?HIT_COUNTER_TABLE, Key) of
- [] ->
+ [] ->
case ets:lookup(?ABSOLUTE_VALUE_COUNTER_TABLE, Key) of
- [] ->
+ [] ->
0;
Result2 -> extract_value_from_ets_result(Key, Result2)
end;
@@ -62,7 +62,7 @@ increment(Key) ->
ok;
_ -> ok
end.
-
+
decrement(Key) ->
case catch ets:update_counter(?HIT_COUNTER_TABLE, Key, -1) of
{'EXIT', {badarg, _}} ->
@@ -70,7 +70,7 @@ decrement(Key) ->
ok;
_ -> ok
end.
-
+
record(Key, Value) ->
ets:insert(?ABSOLUTE_VALUE_COUNTER_TABLE, {Key, Value}).
@@ -78,7 +78,7 @@ clear(Key) ->
true = ets:delete(?ABSOLUTE_VALUE_COUNTER_TABLE, Key).
all() ->
- lists:append(ets:tab2list(?HIT_COUNTER_TABLE),
+ lists:append(ets:tab2list(?HIT_COUNTER_TABLE),
ets:tab2list(?ABSOLUTE_VALUE_COUNTER_TABLE)).
all(Type) ->
@@ -123,7 +123,7 @@ extract_value_from_ets_result(_Key, Result) ->
% Unused gen_server behaviour API functions that we need to declare.
-
+
%% @doc Unused
handle_cast(foo, State) ->
{noreply, State}.
diff --git a/src/couchdb/couch_stream.erl b/src/couchdb/couch_stream.erl
index e61d6605..54234ee5 100644
--- a/src/couchdb/couch_stream.erl
+++ b/src/couchdb/couch_stream.erl
@@ -67,7 +67,7 @@ old_copy_to_new_stream(Fd, Pos, Len, DestFd) ->
end, ok),
close(Dest).
-% 09 UPGRADE CODE
+% 09 UPGRADE CODE
old_foldl(_Fd, null, 0, _Fun, Acc) ->
Acc;
old_foldl(Fd, OldPointer, Len, Fun, Acc) when is_tuple(OldPointer)->
@@ -119,7 +119,7 @@ handle_call(close, _From, Stream) ->
written_pointers = Written,
buffer_len = BufferLen,
buffer_list = Buffer} = Stream,
-
+
case Buffer of
[] ->
Result = {lists:reverse(Written), WrittenLen};
@@ -137,7 +137,7 @@ code_change(_OldVsn, State, _Extra) ->
handle_info(_Info, State) ->
{noreply, State}.
-
+
% 09 UPGRADE CODE
diff --git a/src/couchdb/couch_task_status.erl b/src/couchdb/couch_task_status.erl
index ee7bdff5..28758a00 100644
--- a/src/couchdb/couch_task_status.erl
+++ b/src/couchdb/couch_task_status.erl
@@ -59,7 +59,7 @@ set_update_frequency(Msecs) ->
update(StatusText) ->
update("~s", [StatusText]).
-
+
update(Format, Data) ->
{LastUpdateTime, Frequency} = get(task_status_update),
case timer:now_diff(Now = now(), LastUpdateTime) >= Frequency of
diff --git a/src/couchdb/couch_util.erl b/src/couchdb/couch_util.erl
index 1a2929e4..7cf19354 100644
--- a/src/couchdb/couch_util.erl
+++ b/src/couchdb/couch_util.erl
@@ -56,7 +56,7 @@ terminate_linked(Reason) ->
new_uuid() ->
list_to_binary(to_hex(crypto:rand_bytes(16))).
-
+
to_hex([]) ->
[];
to_hex(Bin) when is_binary(Bin) ->
@@ -73,7 +73,7 @@ parse_term(Bin) when is_binary(Bin)->
parse_term(List) ->
{ok, Tokens, _} = erl_scan:string(List ++ "."),
erl_parse:parse_term(Tokens).
-
+
% returns a random integer
rand32() ->
@@ -193,15 +193,15 @@ collate(A, B, Options) when is_binary(A), is_binary(B) ->
should_flush() ->
should_flush(?FLUSH_MAX_MEM).
-
+
should_flush(MemThreshHold) ->
{memory, ProcMem} = process_info(self(), memory),
- BinMem = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
+ BinMem = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
0, element(2,process_info(self(), binary))),
if ProcMem+BinMem > 2*MemThreshHold ->
garbage_collect(),
{memory, ProcMem2} = process_info(self(), memory),
- BinMem2 = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
+ BinMem2 = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
0, element(2,process_info(self(), binary))),
if ProcMem2+BinMem2 > MemThreshHold ->
true;
@@ -230,7 +230,7 @@ encodeBase64(Bs) when list(Bs) ->
encodeBase64(list_to_binary(Bs), <<>>);
encodeBase64(Bs) ->
encodeBase64(Bs, <<>>).
-
+
encodeBase64(<<B:3/binary, Bs/binary>>, Acc) ->
<<C1:6, C2:6, C3:6, C4:6>> = B,
encodeBase64(Bs, <<Acc/binary, (enc(C1)), (enc(C2)), (enc(C3)), (enc(C4))>>);
diff --git a/src/couchdb/couch_view.erl b/src/couchdb/couch_view.erl
index 87feea12..93c3a493 100644
--- a/src/couchdb/couch_view.erl
+++ b/src/couchdb/couch_view.erl
@@ -24,14 +24,14 @@
-record(server,{
root_dir = []}).
-
+
start_link() ->
gen_server:start_link({local, couch_view}, couch_view, [], []).
get_temp_updater(DbName, Language, DesignOptions, MapSrc, RedSrc) ->
% make temp group
% do we need to close this db?
- {ok, _Db, Group} =
+ {ok, _Db, Group} =
couch_view_group:open_temp_group(DbName, Language, DesignOptions, MapSrc, RedSrc),
case gen_server:call(couch_view, {get_group_server, DbName, Group}) of
{ok, Pid} ->
@@ -44,7 +44,7 @@ get_group_server(DbName, GroupId) ->
% get signature for group
case couch_view_group:open_db_group(DbName, GroupId) of
% do we need to close this db?
- {ok, _Db, Group} ->
+ {ok, _Db, Group} ->
case gen_server:call(couch_view, {get_group_server, DbName, Group}) of
{ok, Pid} ->
Pid;
@@ -54,7 +54,7 @@ get_group_server(DbName, GroupId) ->
Error ->
throw(Error)
end.
-
+
get_group(Db, GroupId, Stale) ->
MinUpdateSeq = case Stale of
ok -> 0;
@@ -73,23 +73,23 @@ get_group_info(Db, GroupId) ->
couch_view_group:request_group_info(
get_group_server(couch_db:name(Db), GroupId)).
-cleanup_index_files(Db) ->
+cleanup_index_files(Db) ->
% load all ddocs
{ok, DesignDocs} = couch_db:get_design_docs(Db),
-
+
% make unique list of group sigs
Sigs = lists:map(fun(#doc{id = GroupId} = DDoc) ->
{ok, Info} = get_group_info(Db, GroupId),
?b2l(proplists:get_value(signature, Info))
end, [DD||DD <- DesignDocs, DD#doc.deleted == false]),
-
+
FileList = list_index_files(Db),
-
+
% regex that matches all ddocs
RegExp = "("++ string:join(Sigs, "|") ++")",
% filter out the ones in use
- DeleteFiles = lists:filter(fun(FilePath) ->
+ DeleteFiles = lists:filter(fun(FilePath) ->
regexp:first_match(FilePath, RegExp)==nomatch
end, FileList),
% delete unused files
@@ -108,7 +108,7 @@ get_row_count(#view{btree=Bt}) ->
{ok, Count}.
get_temp_reduce_view(Db, Language, DesignOptions, MapSrc, RedSrc) ->
- {ok, #group{views=[View]}=Group} =
+ {ok, #group{views=[View]}=Group} =
get_temp_group(Db, Language, DesignOptions, MapSrc, RedSrc),
{ok, {temp_reduce, View}, Group}.
@@ -161,7 +161,7 @@ fold_reduce({temp_reduce, #view{btree=Bt}}, Dir, StartKey, EndKey, GroupFun, Fun
couch_btree:fold_reduce(Bt, Dir, StartKey, EndKey, GroupFun,
WrapperFun, Acc);
-fold_reduce({reduce, NthRed, Lang, #view{btree=Bt, reduce_funs=RedFuns}}, Dir, StartKey, EndKey, GroupFun, Fun, Acc) ->
+fold_reduce({reduce, NthRed, Lang, #view{btree=Bt, reduce_funs=RedFuns}}, Dir, StartKey, EndKey, GroupFun, Fun, Acc) ->
PreResultPadding = lists:duplicate(NthRed - 1, []),
PostResultPadding = lists:duplicate(length(RedFuns) - NthRed, []),
{_Name, FunSrc} = lists:nth(NthRed,RedFuns),
@@ -180,7 +180,7 @@ fold_reduce({reduce, NthRed, Lang, #view{btree=Bt, reduce_funs=RedFuns}}, Dir, S
end,
couch_btree:fold_reduce(Bt, Dir, StartKey, EndKey, GroupFun,
WrapperFun, Acc).
-
+
get_key_pos(_Key, [], _N) ->
0;
get_key_pos(Key, [{Key1,_Value}|_], N) when Key == Key1 ->
@@ -215,7 +215,7 @@ get_map_view0(Name, [#view{map_names=MapNames}=View|Rest]) ->
end.
reduce_to_count(Reductions) ->
- {Count, _} =
+ {Count, _} =
couch_btree:final_reduce(
fun(reduce, KVs) ->
Count = lists:sum(
@@ -226,9 +226,9 @@ reduce_to_count(Reductions) ->
{lists:sum([Count0 || {Count0, _} <- Reds]), []}
end, Reductions),
Count.
-
-
+
+
fold_fun(_Fun, [], _, Acc) ->
{ok, Acc};
fold_fun(Fun, [KV|Rest], {KVReds, Reds}, Acc) ->
@@ -258,7 +258,7 @@ init([]) ->
fun("couchdb", "view_index_dir")->
exit(Self, config_change)
end),
-
+
couch_db_update_notifier:start_link(
fun({deleted, DbName}) ->
gen_server:cast(couch_view, {reset_indexes, DbName});
@@ -279,11 +279,11 @@ terminate(Reason, _Srv) ->
ok.
-handle_call({get_group_server, DbName,
+handle_call({get_group_server, DbName,
#group{name=GroupId,sig=Sig}=Group}, _From, #server{root_dir=Root}=Server) ->
case ets:lookup(group_servers_by_sig, {DbName, Sig}) of
[] ->
- ?LOG_DEBUG("Spawning new group server for view group ~s in database ~s.",
+ ?LOG_DEBUG("Spawning new group server for view group ~s in database ~s.",
[GroupId, DbName]),
case (catch couch_view_group:start_link({Root, DbName, Group})) of
{ok, NewPid} ->
@@ -325,12 +325,12 @@ handle_info({'EXIT', FromPid, Reason}, Server) ->
delete_from_ets(FromPid, DbName, GroupId)
end,
{noreply, Server}.
-
+
add_to_ets(Pid, DbName, Sig) ->
true = ets:insert(couch_groups_by_updater, {Pid, {DbName, Sig}}),
true = ets:insert(group_servers_by_sig, {{DbName, Sig}, Pid}),
true = ets:insert(couch_groups_by_db, {DbName, Sig}).
-
+
delete_from_ets(Pid, DbName, Sig) ->
true = ets:delete(couch_groups_by_updater, Pid),
true = ets:delete(group_servers_by_sig, {DbName, Sig}),
@@ -356,7 +356,7 @@ nuke_dir(Dir) ->
ok = nuke_dir(Full)
end
end,
- Files),
+ Files),
ok = file:del_dir(Dir)
end.
@@ -400,7 +400,7 @@ less_same_type(A, B) when is_list(A) ->
less_list(A, B);
less_same_type(A, B) ->
A < B.
-
+
less_props([], [_|_]) ->
true;
less_props(_, []) ->
diff --git a/src/couchdb/couch_view_compactor.erl b/src/couchdb/couch_view_compactor.erl
index 63c0ff75..22e58223 100644
--- a/src/couchdb/couch_view_compactor.erl
+++ b/src/couchdb/couch_view_compactor.erl
@@ -34,20 +34,20 @@ compact_group(Group, EmptyGroup) ->
name = GroupId,
views = Views
} = Group,
-
+
#group{
db = Db,
id_btree = EmptyIdBtree,
views = EmptyViews
} = EmptyGroup,
-
+
{ok, {Count, _}} = couch_btree:full_reduce(Db#db.fulldocinfo_by_id_btree),
-
+
<<"_design", ShortName/binary>> = GroupId,
DbName = couch_db:name(Db),
TaskName = <<DbName/binary, ShortName/binary>>,
couch_task_status:add_task(<<"View Group Compaction">>, TaskName, <<"">>),
-
+
Fun = fun(KV, {Bt, Acc, TotalCopied}) ->
if TotalCopied rem 10000 == 0 ->
couch_task_status:update("Copied ~p of ~p Ids (~p%)",
@@ -58,27 +58,27 @@ compact_group(Group, EmptyGroup) ->
{ok, {Bt, [KV|Acc], TotalCopied+1}}
end
end,
- {ok, {Bt3, Uncopied, _Total}} = couch_btree:foldl(IdBtree, Fun,
+ {ok, {Bt3, Uncopied, _Total}} = couch_btree:foldl(IdBtree, Fun,
{EmptyIdBtree, [], 0}),
{ok, NewIdBtree} = couch_btree:add(Bt3, lists:reverse(Uncopied)),
-
+
NewViews = lists:map(fun({View, EmptyView}) ->
compact_view(View, EmptyView)
end, lists:zip(Views, EmptyViews)),
-
+
NewGroup = EmptyGroup#group{
- id_btree=NewIdBtree,
- views=NewViews,
+ id_btree=NewIdBtree,
+ views=NewViews,
current_seq=Seq
},
-
+
Pid = couch_view:get_group_server(DbName, GroupId),
gen_server:cast(Pid, {compact_done, NewGroup}).
%% @spec compact_view(View, EmptyView, Retry) -> CompactView
compact_view(View, EmptyView) ->
{ok, Count} = couch_view:get_row_count(View),
-
+
%% Key is {Key,DocId}
Fun = fun(KV, {Bt, Acc, TotalCopied}) ->
if TotalCopied rem 10000 == 0 ->
@@ -86,12 +86,12 @@ compact_view(View, EmptyView) ->
[View#view.id_num, TotalCopied, Count, (TotalCopied*100) div Count]),
{ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV|Acc])),
{ok, {Bt2, [], TotalCopied + 1}};
- true ->
+ true ->
{ok, {Bt, [KV|Acc], TotalCopied + 1}}
end
end,
-
- {ok, {Bt3, Uncopied, _Total}} = couch_btree:foldl(View#view.btree, Fun,
+
+ {ok, {Bt3, Uncopied, _Total}} = couch_btree:foldl(View#view.btree, Fun,
{EmptyView#view.btree, [], 0}),
{ok, NewBt} = couch_btree:add(Bt3, lists:reverse(Uncopied)),
EmptyView#view{btree = NewBt}.
diff --git a/src/couchdb/couch_view_group.erl b/src/couchdb/couch_view_group.erl
index 0b390b22..cc2f37a6 100644
--- a/src/couchdb/couch_view_group.erl
+++ b/src/couchdb/couch_view_group.erl
@@ -22,7 +22,7 @@
terminate/2, code_change/3]).
-include("couch_db.hrl").
-
+
-record(group_state, {
type,
db_name,
@@ -70,7 +70,7 @@ start_link(InitArgs) ->
{InitArgs, self(), Ref = make_ref()}, []) of
{ok, Pid} ->
{ok, Pid};
- ignore ->
+ ignore ->
receive
{Ref, Pid, Error} ->
case process_info(self(), trap_exit) of
@@ -83,7 +83,7 @@ start_link(InitArgs) ->
Error
end.
-% init creates a closure which spawns the appropriate view_updater.
+% init creates a closure which spawns the appropriate view_updater.
init({InitArgs, ReturnPid, Ref}) ->
process_flag(trap_exit, true),
case prepare_group(InitArgs, false) of
@@ -118,7 +118,7 @@ init({InitArgs, ReturnPid, Ref}) ->
% If the request sequence is higher than our current high_target seq, we set
% that as the highest seqence. If the updater is not running, we launch it.
-handle_call({request_group, RequestSeq}, From,
+handle_call({request_group, RequestSeq}, From,
#group_state{
db_name=DbName,
group=#group{current_seq=Seq}=Group,
@@ -128,13 +128,13 @@ handle_call({request_group, RequestSeq}, From,
{ok, Db} = couch_db:open(DbName, []),
Group2 = Group#group{db=Db},
Pid = spawn_link(fun()-> couch_view_updater:update(Group2) end),
-
+
{noreply, State#group_state{
updater_pid=Pid,
group=Group2,
waiting_list=[{From,RequestSeq}|WaitList]
}, infinity};
-
+
% If the request seqence is less than or equal to the seq_id of a known Group,
% we respond with that Group.
@@ -159,7 +159,7 @@ handle_call(request_group_info, _From, #group_state{
GroupInfo = get_group_info(Group, CompactorPid),
{reply, {ok, GroupInfo}, State}.
-handle_cast({start_compact, CompactFun}, #group_state{ compactor_pid=nil,
+handle_cast({start_compact, CompactFun}, #group_state{ compactor_pid=nil,
group=Group, init_args={view, RootDir, DbName, GroupId} } = State) ->
?LOG_INFO("Starting view group compaction", []),
{ok, Db} = couch_db:open(DbName, []),
@@ -171,10 +171,10 @@ handle_cast({start_compact, _}, State) ->
%% compact already running, this is a no-op
{noreply, State};
-handle_cast({compact_done, #group{fd=NewFd, current_seq=NewSeq} = NewGroup},
- #group_state{
+handle_cast({compact_done, #group{fd=NewFd, current_seq=NewSeq} = NewGroup},
+ #group_state{
group = #group{current_seq=OldSeq, sig=GroupSig} = Group,
- init_args = {view, RootDir, DbName, _GroupId},
+ init_args = {view, RootDir, DbName, _GroupId},
updater_pid = nil,
ref_counter = RefCounter
} = State) when NewSeq >= OldSeq ->
@@ -183,7 +183,7 @@ handle_cast({compact_done, #group{fd=NewFd, current_seq=NewSeq} = NewGroup},
CompactName = index_file_name(compact, RootDir, DbName, GroupSig),
file:delete(FileName),
ok = file:rename(CompactName, FileName),
-
+
%% cleanup old group
couch_ref_counter:drop(RefCounter),
{ok, NewRefCounter} = couch_ref_counter:start([NewFd]),
@@ -191,20 +191,20 @@ handle_cast({compact_done, #group{fd=NewFd, current_seq=NewSeq} = NewGroup},
nil -> ok;
Else -> couch_db:close(Else)
end,
-
+
erlang:send_after(1000, self(), delayed_commit),
{noreply, State#group_state{
- group=NewGroup,
+ group=NewGroup,
ref_counter=NewRefCounter,
compactor_pid=nil
}};
-handle_cast({compact_done, NewGroup}, #group_state{
+handle_cast({compact_done, NewGroup}, #group_state{
init_args={view, _RootDir, DbName, GroupId} } = State) ->
?LOG_INFO("View index compaction still behind main file", []),
couch_db:close(NewGroup#group.db),
{ok, Db} = couch_db:open(DbName, []),
- Pid = spawn_link(fun() ->
- {_,Ref} = erlang:spawn_monitor(fun() ->
+ Pid = spawn_link(fun() ->
+ {_,Ref} = erlang:spawn_monitor(fun() ->
couch_view_updater:update(NewGroup#group{db = Db})
end),
receive
@@ -258,8 +258,8 @@ handle_info({'EXIT', FromPid, {new_group, #group{db=Db}=Group}},
{noreply, State#group_state{waiting_commit=true,
waiting_list=StillWaiting, group=Group2, updater_pid=Pid}}
end;
-
-handle_info({'EXIT', FromPid, reset},
+
+handle_info({'EXIT', FromPid, reset},
#group_state{
init_args=InitArgs,
updater_pid=UpPid,
@@ -274,10 +274,10 @@ handle_info({'EXIT', FromPid, reset},
Error ->
{stop, normal, reply_all(State, Error)}
end;
-
+
handle_info({'EXIT', _FromPid, normal}, State) ->
{noreply, State};
-
+
handle_info({'EXIT', FromPid, {{nocatch, Reason}, _Trace}}, State) ->
?LOG_DEBUG("Uncaught throw() in linked pid: ~p", [{FromPid, Reason}]),
{stop, Reason, State};
@@ -285,7 +285,7 @@ handle_info({'EXIT', FromPid, {{nocatch, Reason}, _Trace}}, State) ->
handle_info({'EXIT', FromPid, Reason}, State) ->
?LOG_DEBUG("Exit from linked pid: ~p", [{FromPid, Reason}]),
{stop, Reason, State};
-
+
handle_info({'DOWN',_,_,_,_}, State) ->
?LOG_INFO("Shutting down view group server, monitored db is closing.", []),
{stop, normal, reply_all(State, shutdown)}.
@@ -305,13 +305,13 @@ code_change(_OldVsn, State, _Extra) ->
% reply_with_group/3
% for each item in the WaitingList {Pid, Seq}
% if the Seq is =< GroupSeq, reply
-reply_with_group(Group=#group{current_seq=GroupSeq}, [{Pid, Seq}|WaitList],
+reply_with_group(Group=#group{current_seq=GroupSeq}, [{Pid, Seq}|WaitList],
StillWaiting, RefCounter) when Seq =< GroupSeq ->
gen_server:reply(Pid, {ok, Group, RefCounter}),
reply_with_group(Group, WaitList, StillWaiting, RefCounter);
% else
-% put it in the continuing waiting list
+% put it in the continuing waiting list
reply_with_group(Group, [{Pid, Seq}|WaitList], StillWaiting, RefCounter) ->
reply_with_group(Group, WaitList, [{Pid, Seq}|StillWaiting], RefCounter);
@@ -351,7 +351,7 @@ prepare_group({RootDir, DbName, #group{sig=Sig}=Group}, ForceReset)->
Else
end.
-get_index_header_data(#group{current_seq=Seq, purge_seq=PurgeSeq,
+get_index_header_data(#group{current_seq=Seq, purge_seq=PurgeSeq,
id_btree=IdBtree,views=Views}) ->
ViewStates = [couch_btree:get_state(Btree) || #view{btree=Btree} <- Views],
#index_header{seq=Seq,
@@ -364,7 +364,7 @@ hex_sig(GroupSig) ->
design_root(RootDir, DbName) ->
RootDir ++ "/." ++ ?b2l(DbName) ++ "_design/".
-
+
index_file_name(RootDir, DbName, GroupSig) ->
design_root(RootDir, DbName) ++ hex_sig(GroupSig) ++".view".
@@ -390,17 +390,17 @@ open_temp_group(DbName, Language, DesignOptions, MapSrc, RedSrc) ->
reduce_funs= if RedSrc==[] -> []; true -> [{<<"_temp">>, RedSrc}] end},
{ok, Db, #group{
- name = <<"_temp">>,
+ name = <<"_temp">>,
db=Db,
- views=[View],
- def_lang=Language,
+ views=[View],
+ def_lang=Language,
design_options=DesignOptions,
sig = erlang:md5(term_to_binary({[View], Language, DesignOptions}))
}};
Error ->
Error
end.
-
+
open_db_group(DbName, GroupId) ->
case couch_db:open(DbName, []) of
{ok, Db} ->
@@ -425,7 +425,7 @@ get_group_info(#group{
{signature, ?l2b(hex_sig(GroupSig))},
{language, Lang},
{disk_size, Size},
- {compact_running, CompactorPid /= nil}
+ {compact_running, CompactorPid /= nil}
].
% maybe move to another module
@@ -490,11 +490,11 @@ init_group(Db, Fd, #group{def_lang=Lang,views=Views}=Group, IndexHeader) ->
Views2 = lists:zipwith(
fun(BtreeState, #view{reduce_funs=RedFuns}=View) ->
FunSrcs = [FunSrc || {_Name, FunSrc} <- RedFuns],
- ReduceFun =
+ ReduceFun =
fun(reduce, KVs) ->
KVs2 = couch_view:expand_dups(KVs,[]),
KVs3 = couch_view:detuple_kvs(KVs2,[]),
- {ok, Reduced} = couch_query_servers:reduce(Lang, FunSrcs,
+ {ok, Reduced} = couch_query_servers:reduce(Lang, FunSrcs,
KVs3),
{length(KVs3), Reduced};
(rereduce, Reds) ->
diff --git a/src/couchdb/couch_view_updater.erl b/src/couchdb/couch_view_updater.erl
index 11dfb544..97ce3c31 100644
--- a/src/couchdb/couch_view_updater.erl
+++ b/src/couchdb/couch_view_updater.erl
@@ -18,7 +18,7 @@
update(#group{db=#db{name=DbName}=Db,name=GroupName,current_seq=Seq,purge_seq=PurgeSeq}=Group) ->
couch_task_status:add_task(<<"View Group Indexer">>, <<DbName/binary," ",GroupName/binary>>, <<"Starting index update">>),
-
+
DbPurgeSeq = couch_db:get_purge_seq(Db),
Group2 =
if DbPurgeSeq == PurgeSeq ->
@@ -30,7 +30,7 @@ update(#group{db=#db{name=DbName}=Db,name=GroupName,current_seq=Seq,purge_seq=Pu
couch_task_status:update(<<"Resetting view index due to lost purge entries.">>),
exit(reset)
end,
-
+
ViewEmptyKVs = [{View, []} || View <- Group2#group.views],
% compute on all docs modified since we last computed.
TotalChanges = couch_db:count_changes_since(Db, Seq),
@@ -95,9 +95,9 @@ process_doc(Db, DocInfo, {Docs, #group{sig=Sig,name=GroupId,design_options=Desig
% This fun computes once for each document
#doc_info{id=DocId, revs=[#rev_info{deleted=Deleted}|_]} = DocInfo,
- IncludeDesign = proplists:get_value(<<"include_design">>,
+ IncludeDesign = proplists:get_value(<<"include_design">>,
DesignOptions, false),
- LocalSeq = proplists:get_value(<<"local_seq">>,
+ LocalSeq = proplists:get_value(<<"local_seq">>,
DesignOptions, false),
DocOpts = case LocalSeq of
true ->
@@ -113,15 +113,15 @@ process_doc(Db, DocInfo, {Docs, #group{sig=Sig,name=GroupId,design_options=Desig
if Deleted ->
{Docs, [{DocId, []} | DocIdViewIdKeys]};
true ->
- {ok, Doc} = couch_db:open_doc_int(Db, DocInfo,
+ {ok, Doc} = couch_db:open_doc_int(Db, DocInfo,
DocOpts),
{[Doc | Docs], DocIdViewIdKeys}
end,
-
+
case couch_util:should_flush() of
true ->
{Group1, Results} = view_compute(Group, Docs2),
- {ViewKVs3, DocIdViewIdKeys3} = view_insert_query_results(Docs2,
+ {ViewKVs3, DocIdViewIdKeys3} = view_insert_query_results(Docs2,
Results, ViewKVs, DocIdViewIdKeys2),
{ok, Group2} = write_changes(Group1, ViewKVs3, DocIdViewIdKeys3,
DocInfo#doc_info.high_seq),
@@ -159,7 +159,7 @@ view_insert_doc_query_results(#doc{id=DocId}=Doc, [ResultKVs|RestResults], [{Vie
[{Key,Value},{PrevKey,PrevVal}|AccRest]
end;
(KV, []) ->
- [KV]
+ [KV]
end, [], lists:sort(ResultKVs)),
NewKVs = [{{Key, DocId}, Value} || {Key, Value} <- ResultKVs2],
NewViewKVsAcc = [{View, NewKVs ++ KVs} | ViewKVsAcc],
diff --git a/src/couchdb/curlhelper.c b/src/couchdb/curlhelper.c
index 99b2e6ab..116612cd 100644
--- a/src/couchdb/curlhelper.c
+++ b/src/couchdb/curlhelper.c
@@ -38,7 +38,7 @@ Buffer init_Buffer() {
}
void free_Buffer(Buffer b) {
- if(b == NULL)
+ if(b == NULL)
return;
if(b->data != NULL)
free(b->data);
@@ -186,7 +186,7 @@ int set_List(List l, int pos, void* ptr) {
}
*(l->elements + pos) = ptr;
-
+
return TRUE;
}
diff --git a/src/ibrowse/ibrowse.app b/src/ibrowse/ibrowse.app
index 5e4621d3..a3d23ae7 100644
--- a/src/ibrowse/ibrowse.app
+++ b/src/ibrowse/ibrowse.app
@@ -1,10 +1,10 @@
{application, ibrowse,
[{description, "HTTP client application"},
{vsn, "1.5.0"},
- {modules, [ ibrowse,
- ibrowse_http_client,
- ibrowse_app,
- ibrowse_sup,
+ {modules, [ ibrowse,
+ ibrowse_http_client,
+ ibrowse_app,
+ ibrowse_sup,
ibrowse_lib,
ibrowse_lb ]},
{registered, []},
diff --git a/src/ibrowse/ibrowse.erl b/src/ibrowse/ibrowse.erl
index 0d3478b3..56f0ef4d 100644
--- a/src/ibrowse/ibrowse.erl
+++ b/src/ibrowse/ibrowse.erl
@@ -21,14 +21,14 @@
%% <p>Here are a few sample invocations.</p>
%%
%% <code>
-%% ibrowse:send_req("http://intranet/messenger/", [], get).
+%% ibrowse:send_req("http://intranet/messenger/", [], get).
%% <br/><br/>
-%%
-%% ibrowse:send_req("http://www.google.com/", [], get, [],
+%%
+%% ibrowse:send_req("http://www.google.com/", [], get, [],
%% [{proxy_user, "XXXXX"},
%% {proxy_password, "XXXXX"},
%% {proxy_host, "proxy"},
-%% {proxy_port, 8080}], 1000).
+%% {proxy_port, 8080}], 1000).
%% <br/><br/>
%%
%%ibrowse:send_req("http://www.erlang.org/download/otp_src_R10B-3.tar.gz", [], get, [],
@@ -48,7 +48,7 @@
%% ibrowse:send_req("http://www.bbc.co.uk", [], trace).
%%
%% <br/><br/>
-%% ibrowse:send_req("http://www.google.com", [], get, [],
+%% ibrowse:send_req("http://www.google.com", [], get, [],
%% [{stream_to, self()}]).
%% </code>
%%
@@ -110,7 +110,7 @@
get_value/3,
do_trace/2
]).
-
+
-record(state, {trace = false}).
-include("ibrowse.hrl").
@@ -158,7 +158,7 @@ stop() ->
send_req(Url, Headers, Method) ->
send_req(Url, Headers, Method, [], []).
-%% @doc Same as send_req/3.
+%% @doc Same as send_req/3.
%% If a list is specified for the body it has to be a flat list. The body can also be a fun/0 or a fun/1. <br/>
%% If fun/0, the connection handling process will repeatdely call the fun until it returns an error or eof. <pre>Fun() = {ok, Data} | eof</pre><br/>
%% If fun/1, the connection handling process will repeatedly call the fun with the supplied state until it returns an error or eof. <pre>Fun(State) = {ok, Data} | {ok, Data, NewState} | eof</pre>
@@ -168,7 +168,7 @@ send_req(Url, Headers, Method) ->
send_req(Url, Headers, Method, Body) ->
send_req(Url, Headers, Method, Body, []).
-%% @doc Same as send_req/4.
+%% @doc Same as send_req/4.
%% For a description of SSL Options, look in the ssl manpage. If the
%% HTTP Version to use is not specified, the default is 1.1.
%% <br/>
@@ -181,7 +181,7 @@ send_req(Url, Headers, Method, Body) ->
%% used to specify what should go in the <code>Host</code> header in
%% the request.</p>
%% <ul>
-%% <li>When both the options <code>save_response_to_file</code> and <code>stream_to</code>
+%% <li>When both the options <code>save_response_to_file</code> and <code>stream_to</code>
%% are specified, the former takes precedence.</li>
%%
%% <li>For the <code>save_response_to_file</code> option, the response body is saved to
@@ -211,21 +211,21 @@ send_req(Url, Headers, Method, Body) ->
%% ibrowse:send_req("http://www.example.com/cgi-bin/request", [], get, [], [{connect_timeout, 100}], 1000).
%% </code>
%% In the above invocation, if the connection isn't established within
-%% 100 milliseconds, the request will fail with
+%% 100 milliseconds, the request will fail with
%% <code>{error, conn_failed}</code>.<br/>
%% If connection setup succeeds, the total time allowed for the
%% request to complete will be 1000 milliseconds minus the time taken
%% for connection setup.
%% </li>
%% </ul>
-%%
+%%
%% @spec send_req(Url::string(), Headers::headerList(), Method::method(), Body::body(), Options::optionList()) -> response()
%% optionList() = [option()]
%% option() = {max_sessions, integer()} |
%% {response_format,response_format()}|
%% {stream_chunk_size, integer()} |
%% {max_pipeline_size, integer()} |
-%% {trace, boolean()} |
+%% {trace, boolean()} |
%% {is_ssl, boolean()} |
%% {ssl_options, [SSLOpt]} |
%% {pool_name, atom()} |
@@ -257,7 +257,7 @@ send_req(Url, Headers, Method, Body) ->
send_req(Url, Headers, Method, Body, Options) ->
send_req(Url, Headers, Method, Body, Options, 30000).
-%% @doc Same as send_req/5.
+%% @doc Same as send_req/5.
%% All timeout values are in milliseconds.
%% @spec send_req(Url, Headers::headerList(), Method::method(), Body::body(), Options::optionList(), Timeout) -> response()
%% Timeout = integer() | infinity
@@ -282,7 +282,7 @@ send_req(Url, Headers, Method, Body, Options, Timeout) ->
true -> {get_value(ssl_options, Options_1, []), true}
end,
case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
- Max_sessions,
+ Max_sessions,
Max_pipeline_size,
{SSLOptions, IsSSL}) of
{ok, Conn_Pid} ->
@@ -333,7 +333,7 @@ set_dest(_Host, _Port, [H | _]) ->
exit({invalid_option, H});
set_dest(_, _, []) ->
ok.
-
+
%% @doc Set the maximum number of connections allowed to a specific Host:Port.
%% @spec set_max_sessions(Host::string(), Port::integer(), Max::integer()) -> ok
set_max_sessions(Host, Port, Max) when is_integer(Max), Max > 0 ->
@@ -432,7 +432,7 @@ send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, Timeout) ->
%% caller. Should be used in conjunction with the
%% <code>stream_to</code> option
%% @spec stream_next(Req_id :: req_id()) -> ok | {error, unknown_req_id}
-stream_next(Req_id) ->
+stream_next(Req_id) ->
case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of
[] ->
{error, unknown_req_id};
@@ -451,7 +451,7 @@ trace_off() ->
%% @doc Turn tracing on for all connections to the specified HTTP
%% server. Host is whatever is specified as the domain name in the URL
%% @spec trace_on(Host, Port) -> ok
-%% Host = string()
+%% Host = string()
%% Port = integer()
trace_on(Host, Port) ->
ibrowse ! {trace, true, Host, Port},
@@ -554,7 +554,7 @@ import_config(Filename) ->
case file:consult(Filename) of
{ok, Terms} ->
ets:delete_all_objects(ibrowse_conf),
- Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options})
+ Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options})
when is_list(Host), is_integer(Port),
is_integer(MaxSess), MaxSess > 0,
is_integer(MaxPipe), MaxPipe > 0, is_list(Options) ->
@@ -564,7 +564,7 @@ import_config(Filename) ->
lists:foreach(
fun({X, Y}) ->
ets:insert(ibrowse_conf,
- #ibrowse_conf{key = X,
+ #ibrowse_conf{key = X,
value = Y})
end, I);
({K, V}) ->
@@ -663,7 +663,7 @@ handle_info(all_trace_off, State) ->
ets:foldl(Fun, undefined, ibrowse_lb),
ets:select_delete(ibrowse_conf, [{{ibrowse_conf,{trace,'$1','$2'},true},[],['true']}]),
{noreply, State};
-
+
handle_info({trace, Bool}, State) ->
put(my_trace_flag, Bool),
{noreply, State};
@@ -680,7 +680,7 @@ handle_info({trace, Bool, Host, Port}, State) ->
ets:insert(ibrowse_conf, #ibrowse_conf{key = {trace, Host, Port},
value = Bool}),
{noreply, State};
-
+
handle_info(_Info, State) ->
{noreply, State}.
diff --git a/src/ibrowse/ibrowse_app.erl b/src/ibrowse/ibrowse_app.erl
index f5e523c2..8c83e8f1 100644
--- a/src/ibrowse/ibrowse_app.erl
+++ b/src/ibrowse/ibrowse_app.erl
@@ -1,7 +1,7 @@
%%%-------------------------------------------------------------------
%%% File : ibrowse_app.erl
%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
-%%% Description :
+%%% Description :
%%%
%%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
%%%-------------------------------------------------------------------
@@ -42,11 +42,11 @@
%% Func: start/2
%% Returns: {ok, Pid} |
%% {ok, Pid, State} |
-%% {error, Reason}
+%% {error, Reason}
%%--------------------------------------------------------------------
start(_Type, _StartArgs) ->
case ibrowse_sup:start_link() of
- {ok, Pid} ->
+ {ok, Pid} ->
{ok, Pid};
Error ->
Error
@@ -54,7 +54,7 @@ start(_Type, _StartArgs) ->
%%--------------------------------------------------------------------
%% Func: stop/1
-%% Returns: any
+%% Returns: any
%%--------------------------------------------------------------------
stop(_State) ->
ok.
diff --git a/src/ibrowse/ibrowse_http_client.erl b/src/ibrowse/ibrowse_http_client.erl
index 3cacf391..43aa51f4 100644
--- a/src/ibrowse/ibrowse_http_client.erl
+++ b/src/ibrowse/ibrowse_http_client.erl
@@ -51,10 +51,10 @@
}).
-record(request, {url, method, options, from,
- stream_to, caller_controls_socket = false,
+ stream_to, caller_controls_socket = false,
req_id,
stream_chunk_size,
- save_response_to_file = false,
+ save_response_to_file = false,
tmp_file_name, tmp_file_fd,
response_format}).
@@ -338,7 +338,7 @@ accumulate_response(Data, #state{reply_buffer = RepBuf,
State#state{reply_buffer = RepBuf_1};
_ when Caller_controls_socket == true ->
do_interim_reply(StreamTo, Response_format, ReqId, RepBuf_1),
- State#state{reply_buffer = <<>>,
+ State#state{reply_buffer = <<>>,
streamed_size = Streamed_size + size(RepBuf_1)};
_ when New_data_size >= Stream_chunk_size ->
{Stream_chunk, Rem_data} = split_binary(RepBuf_1, Stream_chunk_size),
@@ -514,7 +514,7 @@ send_req_1(From,
{Caller, once} when is_pid(Caller) or
is_atom(Caller) ->
Async_pid_rec = {{req_id_pid, ReqId}, self()},
- true = ets:insert(ibrowse_stream, Async_pid_rec),
+ true = ets:insert(ibrowse_stream, Async_pid_rec),
{Caller, true};
undefined ->
{undefined, false};
@@ -869,7 +869,7 @@ is_connection_closing(_, _) -> false.
%% This clause determines the chunk size when given data from the beginning of the chunk
parse_11_response(DataRecvd,
- #state{transfer_encoding=chunked,
+ #state{transfer_encoding=chunked,
chunk_size=chunk_start,
chunk_size_buffer = Chunk_sz_buf
} = State) ->
@@ -899,7 +899,7 @@ parse_11_response(DataRecvd,
%% This clause is to remove the CRLF between two chunks
%%
parse_11_response(DataRecvd,
- #state{transfer_encoding = chunked,
+ #state{transfer_encoding = chunked,
chunk_size = tbd,
chunk_size_buffer = Buf}=State) ->
case scan_crlf(Buf, DataRecvd) of
@@ -916,7 +916,7 @@ parse_11_response(DataRecvd,
%% This clause deals with the end of a chunked transfer
parse_11_response(DataRecvd,
- #state{transfer_encoding = chunked, chunk_size = 0,
+ #state{transfer_encoding = chunked, chunk_size = 0,
cur_req = CurReq,
deleted_crlf = DelCrlf,
reply_buffer = Trailer, reqs = Reqs}=State) ->
@@ -1449,7 +1449,7 @@ get_stream_chunk_size(Options) ->
?DEFAULT_STREAM_CHUNK_SIZE
end.
-get_inac_timeout(#state{cur_req = #request{options = Opts}}) ->
+get_inac_timeout(#state{cur_req = #request{options = Opts}}) ->
get_value(inactivity_timeout, Opts, infinity);
get_inac_timeout(#state{cur_req = undefined}) ->
infinity.
diff --git a/src/ibrowse/ibrowse_lb.erl b/src/ibrowse/ibrowse_lb.erl
index 9212ccd7..b0654b72 100644
--- a/src/ibrowse/ibrowse_lb.erl
+++ b/src/ibrowse/ibrowse_lb.erl
@@ -1,7 +1,7 @@
%%%-------------------------------------------------------------------
%%% File : ibrowse_lb.erl
%%% Author : chandru <chandrashekhar.mullaparthi@t-mobile.co.uk>
-%%% Description :
+%%% Description :
%%%
%%% Created : 6 Mar 2008 by chandru <chandrashekhar.mullaparthi@t-mobile.co.uk>
%%%-------------------------------------------------------------------
@@ -101,7 +101,7 @@ spawn_connection(Lb_pid, Url,
% #state{max_sessions = Max_sess,
% ets_tid = Tid,
% max_pipeline_size = Max_pipe_sz,
-% num_cur_sessions = Num} = State)
+% num_cur_sessions = Num} = State)
% when Num >= Max ->
% Reply = find_best_connection(Tid),
% {reply, sorry_dude_reuse, State};
@@ -109,7 +109,7 @@ spawn_connection(Lb_pid, Url,
%% Update max_sessions in #state with supplied value
handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
#state{ets_tid = Tid,
- num_cur_sessions = Num} = State)
+ num_cur_sessions = Num} = State)
when Num >= Max_sess ->
Reply = find_best_connection(Tid, Max_pipe),
{reply, Reply, State#state{max_sessions = Max_sess}};
diff --git a/src/ibrowse/ibrowse_lib.erl b/src/ibrowse/ibrowse_lib.erl
index 67c5eee2..7567a6a6 100644
--- a/src/ibrowse/ibrowse_lib.erl
+++ b/src/ibrowse/ibrowse_lib.erl
@@ -1,6 +1,6 @@
%%% File : ibrowse_lib.erl
%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
-%%% Description :
+%%% Description :
%%% Created : 27 Feb 2004 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
%% @doc Module with a few useful functions
@@ -110,7 +110,7 @@ month_int("Oct") -> 10;
month_int("Nov") -> 11;
month_int("Dec") -> 12.
-%% @doc Given a status code, returns an atom describing the status code.
+%% @doc Given a status code, returns an atom describing the status code.
%% @spec status_code(StatusCode::status_code()) -> StatusDescription
%% status_code() = string() | integer()
%% StatusDescription = atom()
@@ -271,7 +271,7 @@ parse_url([$:, $/, $/ | _], get_protocol, Url, []) ->
{invalid_uri_1, Url};
parse_url([$:, $/, $/ | T], get_protocol, Url, TmpAcc) ->
Prot = list_to_atom(lists:reverse(TmpAcc)),
- parse_url(T, get_username,
+ parse_url(T, get_username,
Url#url{protocol = Prot},
[]);
parse_url([$/ | T], get_username, Url, TmpAcc) ->
@@ -285,16 +285,16 @@ parse_url([$: | T], get_username, Url, TmpAcc) ->
%% a username/password. If we encounter a '@' later on, there is a
%% username/password indeed. If we encounter a '/', it was
%% actually the hostname
- parse_url(T, get_password,
+ parse_url(T, get_password,
Url#url{username = lists:reverse(TmpAcc)},
[]);
parse_url([$@ | T], get_username, Url, TmpAcc) ->
- parse_url(T, get_host,
+ parse_url(T, get_host,
Url#url{username = lists:reverse(TmpAcc),
password = ""},
[]);
parse_url([$@ | T], get_password, Url, TmpAcc) ->
- parse_url(T, get_host,
+ parse_url(T, get_host,
Url#url{password = lists:reverse(TmpAcc)},
[]);
parse_url([$/ | T], get_password, Url, TmpAcc) ->
@@ -308,7 +308,7 @@ parse_url([$/ | T], get_password, Url, TmpAcc) ->
password = undefined,
path = [$/ | T]};
parse_url([$: | T], get_host, #url{} = Url, TmpAcc) ->
- parse_url(T, get_port,
+ parse_url(T, get_port,
Url#url{host = lists:reverse(TmpAcc)},
[]);
parse_url([$/ | T], get_host, #url{protocol=Prot} = Url, TmpAcc) ->
@@ -340,7 +340,7 @@ parse_url([], get_port, #url{protocol=Prot} = Url, TmpAcc) ->
_ ->
list_to_integer(lists:reverse(TmpAcc))
end,
- Url#url{port = Port,
+ Url#url{port = Port,
path = "/"};
parse_url([], get_password, Url, TmpAcc) ->
%% Ok, what we thought was the username/password was the hostname
@@ -387,12 +387,12 @@ do_trace(Fmt, Args) ->
-ifdef(DEBUG).
do_trace(_, Fmt, Args) ->
io:format("~s -- (~s) - "++Fmt,
- [printable_date(),
+ [printable_date(),
get(ibrowse_trace_token) | Args]).
-else.
do_trace(true, Fmt, Args) ->
io:format("~s -- (~s) - "++Fmt,
- [printable_date(),
+ [printable_date(),
get(ibrowse_trace_token) | Args]);
do_trace(_, _, _) ->
ok.
diff --git a/src/ibrowse/ibrowse_sup.erl b/src/ibrowse/ibrowse_sup.erl
index 300435d4..1b9b863a 100644
--- a/src/ibrowse/ibrowse_sup.erl
+++ b/src/ibrowse/ibrowse_sup.erl
@@ -1,7 +1,7 @@
%%%-------------------------------------------------------------------
%%% File : ibrowse_sup.erl
%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
-%%% Description :
+%%% Description :
%%%
%%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
%%%-------------------------------------------------------------------
@@ -53,7 +53,7 @@ start_link() ->
%% Func: init/1
%% Returns: {ok, {SupFlags, [ChildSpec]}} |
%% ignore |
-%% {error, Reason}
+%% {error, Reason}
%%--------------------------------------------------------------------
init([]) ->
AChild = {ibrowse,{ibrowse,start_link,[]},
diff --git a/src/ibrowse/ibrowse_test.erl b/src/ibrowse/ibrowse_test.erl
index ad3e8126..cab1f882 100644
--- a/src/ibrowse/ibrowse_test.erl
+++ b/src/ibrowse/ibrowse_test.erl
@@ -225,7 +225,7 @@ unit_tests() ->
unit_tests(Options) ->
Options_1 = Options ++ [{connect_timeout, 5000}],
{Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]),
- receive
+ receive
{done, Pid} ->
ok;
{'DOWN', Ref, _, _, Info} ->
@@ -292,7 +292,7 @@ compare_responses(R1, R2, R3) ->
do_async_req_list(Url, Method, Options) ->
{Pid,_} = erlang:spawn_monitor(?MODULE, i_do_async_req_list,
- [self(), Url, Method,
+ [self(), Url, Method,
Options ++ [{stream_chunk_size, 1000}]]),
io:format("Spawned process ~p~n", [Pid]),
wait_for_resp(Pid).
diff --git a/src/mochiweb/mochijson.erl b/src/mochiweb/mochijson.erl
index 0e887a91..029642ac 100644
--- a/src/mochiweb/mochijson.erl
+++ b/src/mochiweb/mochijson.erl
@@ -145,7 +145,7 @@ json_encode_proplist([], _State) ->
"{}";
json_encode_proplist(Props, State) ->
F = fun ({K, V}, Acc) ->
- KS = case K of
+ KS = case K of
K when is_atom(K) ->
json_encode_string_utf8(atom_to_list(K));
K when is_integer(K) ->
@@ -320,12 +320,12 @@ tokenize_string([$\\, $u, C3, C2, C1, C0 | Rest], S, Acc) ->
% coalesce UTF-16 surrogate pair?
C = dehex(C0) bor
(dehex(C1) bsl 4) bor
- (dehex(C2) bsl 8) bor
+ (dehex(C2) bsl 8) bor
(dehex(C3) bsl 12),
tokenize_string(Rest, ?ADV_COL(S, 6), [C | Acc]);
tokenize_string([C | Rest], S, Acc) when C >= $\s; C < 16#10FFFF ->
tokenize_string(Rest, ?ADV_COL(S, 1), [C | Acc]).
-
+
tokenize_number(IoList=[C | _], Mode, S=#decoder{input_encoding=utf8}, Acc)
when is_list(C); is_binary(C); C >= 16#7f ->
List = xmerl_ucs:from_utf8(iolist_to_binary(IoList)),
@@ -418,7 +418,7 @@ is_obj({struct, Props}) ->
true;
(_) ->
false
- end,
+ end,
lists:all(F, Props).
obj_from_list(Props) ->
diff --git a/src/mochiweb/mochijson2.erl b/src/mochiweb/mochijson2.erl
index 7d7a8aff..8b6adb1f 100644
--- a/src/mochiweb/mochijson2.erl
+++ b/src/mochiweb/mochijson2.erl
@@ -500,7 +500,7 @@ is_obj({Props}) ->
true;
(_) ->
false
- end,
+ end,
lists:all(F, Props).
obj_from_list(Props) ->
@@ -586,21 +586,21 @@ e2j_test_vec(utf8) ->
{[], "[]"},
{[[]], "[[]]"},
{[1, <<"foo">>], "[1,\"foo\"]"},
-
+
%% json array in a json object
{obj_from_list([{<<"foo">>, [123]}]),
"{\"foo\":[123]}"},
-
+
%% json object in a json object
{obj_from_list([{<<"foo">>, obj_from_list([{<<"bar">>, true}])}]),
"{\"foo\":{\"bar\":true}}"},
-
+
%% fold evaluation order
{obj_from_list([{<<"foo">>, []},
{<<"bar">>, obj_from_list([{<<"baz">>, true}])},
{<<"alice">>, <<"bob">>}]),
"{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
-
+
%% json object in a json array
{[-123, <<"foo">>, obj_from_list([{<<"bar">>, []}]), null],
"[-123,\"foo\",{\"bar\":[]},null]"}
diff --git a/src/mochiweb/mochinum.erl b/src/mochiweb/mochinum.erl
index 4f88f9a5..6a866042 100644
--- a/src/mochiweb/mochinum.erl
+++ b/src/mochiweb/mochinum.erl
@@ -40,7 +40,7 @@ digits(Float) ->
_ ->
R
end.
-
+
%% @spec frexp(F::float()) -> {Frac::float(), Exp::float()}
%% @doc Return the fractional and exponent part of an IEEE 754 double,
%% equivalent to the libc function of the same name.
@@ -205,7 +205,7 @@ generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
end
end.
-unpack(Float) ->
+unpack(Float) ->
<<Sign:1, Exp:11, Frac:52>> = <<Float:64/float>>,
{Sign, Exp, Frac}.
@@ -243,7 +243,7 @@ test_int_ceil() ->
-1 = int_ceil(-1.5),
-2 = int_ceil(-2.0),
ok.
-
+
test_int_pow() ->
1 = int_pow(1, 1),
1 = int_pow(1, 0),
@@ -252,7 +252,7 @@ test_int_pow() ->
100 = int_pow(10, 2),
1000 = int_pow(10, 3),
ok.
-
+
test_digits() ->
"0" = digits(0),
"0.0" = digits(0.0),
diff --git a/src/mochiweb/mochiweb_charref.erl b/src/mochiweb/mochiweb_charref.erl
index 59fd4a47..d037d2f8 100644
--- a/src/mochiweb/mochiweb_charref.erl
+++ b/src/mochiweb/mochiweb_charref.erl
@@ -26,7 +26,7 @@ charref([$# | L]) ->
end;
charref(L) ->
entity(L).
-
+
%% @spec test() -> ok
%% @doc Run tests for mochiweb_charref.
test() ->
diff --git a/src/mochiweb/mochiweb_cookies.erl b/src/mochiweb/mochiweb_cookies.erl
index acea12ae..b9da37b4 100644
--- a/src/mochiweb/mochiweb_cookies.erl
+++ b/src/mochiweb/mochiweb_cookies.erl
@@ -29,8 +29,8 @@
cookie(Key, Value) ->
cookie(Key, Value, []).
-%% @spec cookie(Key::string(), Value::string(), Options::[Option]) -> header()
-%% where Option = {max_age, integer()} | {local_time, {date(), time()}}
+%% @spec cookie(Key::string(), Value::string(), Options::[Option]) -> header()
+%% where Option = {max_age, integer()} | {local_time, {date(), time()}}
%% | {domain, string()} | {path, string()}
%% | {secure, true | false}
%%
@@ -118,9 +118,9 @@ age_to_cookie_date(Age, LocalTime) ->
%% @spec parse_cookie(string()) -> [{K::string(), V::string()}]
%% @doc Parse the contents of a Cookie header field, ignoring cookie
%% attributes, and return a simple property list.
-parse_cookie("") ->
+parse_cookie("") ->
[];
-parse_cookie(Cookie) ->
+parse_cookie(Cookie) ->
parse_cookie(Cookie, []).
%% @spec test() -> ok
@@ -133,8 +133,8 @@ test() ->
%% Internal API
parse_cookie([], Acc) ->
- lists:reverse(Acc);
-parse_cookie(String, Acc) ->
+ lists:reverse(Acc);
+parse_cookie(String, Acc) ->
{{Token, Value}, Rest} = read_pair(String),
Acc1 = case Token of
"" ->
@@ -173,7 +173,7 @@ read_quoted([$\\, Any | Rest], Acc) ->
read_quoted(Rest, [Any | Acc]);
read_quoted([C | Rest], Acc) ->
read_quoted(Rest, [C | Acc]).
-
+
skip_whitespace(String) ->
F = fun (C) -> ?IS_WHITESPACE(C) end,
lists:dropwhile(F, String).
@@ -182,7 +182,7 @@ read_token(String) ->
F = fun (C) -> not ?IS_SEPARATOR(C) end,
lists:splitwith(F, String).
-skip_past_separator([]) ->
+skip_past_separator([]) ->
[];
skip_past_separator([$; | Rest]) ->
Rest;
@@ -193,7 +193,7 @@ skip_past_separator([_ | Rest]) ->
parse_cookie_test() ->
%% RFC example
- C1 = "$Version=\"1\"; Customer=\"WILE_E_COYOTE\"; $Path=\"/acme\";
+ C1 = "$Version=\"1\"; Customer=\"WILE_E_COYOTE\"; $Path=\"/acme\";
Part_Number=\"Rocket_Launcher_0001\"; $Path=\"/acme\";
Shipping=\"FedEx\"; $Path=\"/acme\"",
[
@@ -231,8 +231,8 @@ cookie_test() ->
C1 = cookie(<<"Customer">>, <<"WILE_E_COYOTE">>, [{path, <<"/acme">>}]),
{"Set-Cookie","=NoKey; Version=1"} = cookie("", "NoKey", []),
-
- LocalTime = calendar:universal_time_to_local_time({{2007, 5, 15}, {13, 45, 33}}),
+
+ LocalTime = calendar:universal_time_to_local_time({{2007, 5, 15}, {13, 45, 33}}),
C2 = {"Set-Cookie",
"Customer=WILE_E_COYOTE; "
"Version=1; "
diff --git a/src/mochiweb/mochiweb_echo.erl b/src/mochiweb/mochiweb_echo.erl
index f0c455a5..f32d6803 100644
--- a/src/mochiweb/mochiweb_echo.erl
+++ b/src/mochiweb/mochiweb_echo.erl
@@ -9,7 +9,7 @@
stop() ->
mochiweb_socket_server:stop(?MODULE).
-
+
start() ->
mochiweb_socket_server:start([{name, ?MODULE},
{port, 6789},
diff --git a/src/mochiweb/mochiweb_headers.erl b/src/mochiweb/mochiweb_headers.erl
index 4c0a2d75..b29ff311 100644
--- a/src/mochiweb/mochiweb_headers.erl
+++ b/src/mochiweb/mochiweb_headers.erl
@@ -19,7 +19,7 @@
%% @doc Run tests for this module.
test() ->
H = ?MODULE:make([{hdr, foo}, {"Hdr", "bar"}, {'Hdr', 2}]),
- [{hdr, "foo, bar, 2"}] = ?MODULE:to_list(H),
+ [{hdr, "foo, bar, 2"}] = ?MODULE:to_list(H),
H1 = ?MODULE:insert(taco, grande, H),
[{hdr, "foo, bar, 2"}, {taco, "grande"}] = ?MODULE:to_list(H1),
H2 = ?MODULE:make([{"Set-Cookie", "foo"}]),
@@ -69,7 +69,7 @@ default_from_list(List, T) ->
%% @spec to_list(headers()) -> [{key(), string()}]
%% @doc Return the contents of the headers. The keys will be the exact key
-%% that was first inserted (e.g. may be an atom or binary, case is
+%% that was first inserted (e.g. may be an atom or binary, case is
%% preserved).
to_list(T) ->
F = fun ({K, {array, L}}, Acc) ->
diff --git a/src/mochiweb/mochiweb_html.erl b/src/mochiweb/mochiweb_html.erl
index d9a3cf2e..05ea6382 100644
--- a/src/mochiweb/mochiweb_html.erl
+++ b/src/mochiweb/mochiweb_html.erl
@@ -540,7 +540,7 @@ tree([T={comment, _Comment} | Rest], S) ->
tree(Rest, append_stack_child(T, S));
tree(L=[{data, _Data, _Whitespace} | _], S) ->
case tree_data(L, true, []) of
- {_, true, Rest} ->
+ {_, true, Rest} ->
tree(Rest, S);
{Data, false, Rest} ->
tree(Rest, append_stack_child(Data, S))
diff --git a/src/mochiweb/mochiweb_request.erl b/src/mochiweb/mochiweb_request.erl
index 1a2764e0..64c4f58d 100644
--- a/src/mochiweb/mochiweb_request.erl
+++ b/src/mochiweb/mochiweb_request.erl
@@ -173,13 +173,13 @@ recv_body() ->
recv_body(MaxBody) ->
% we could use a sane constant for max chunk size
Body = stream_body(?MAX_RECV_BODY, fun
- ({0, _ChunkedFooter}, {_LengthAcc, BinAcc}) ->
+ ({0, _ChunkedFooter}, {_LengthAcc, BinAcc}) ->
iolist_to_binary(lists:reverse(BinAcc));
({Length, Bin}, {LengthAcc, BinAcc}) ->
NewLength = Length + LengthAcc,
if NewLength > MaxBody ->
exit({body_too_large, chunked});
- true ->
+ true ->
{NewLength, [Bin | BinAcc]}
end
end, {0, []}, MaxBody),
@@ -188,7 +188,7 @@ recv_body(MaxBody) ->
stream_body(MaxChunkSize, ChunkFun, FunState) ->
stream_body(MaxChunkSize, ChunkFun, FunState, undefined).
-
+
stream_body(MaxChunkSize, ChunkFun, FunState, MaxBodyLength) ->
case get_header_value("expect") of
@@ -215,7 +215,7 @@ stream_body(MaxChunkSize, ChunkFun, FunState, MaxBodyLength) ->
exit({body_too_large, content_length});
_ ->
stream_unchunked_body(Length, MaxChunkSize, ChunkFun, FunState)
- end;
+ end;
Length ->
exit({length_not_integer, Length})
end.
@@ -510,7 +510,7 @@ read_sub_chunks(Length, MaxChunkSize, Fun, FunState) when Length > MaxChunkSize
read_sub_chunks(Length, _MaxChunkSize, Fun, FunState) ->
Fun({Length, read_chunk(Length)}, FunState).
-
+
%% @spec serve_file(Path, DocRoot) -> Response
%% @doc Serve a file relative to DocRoot.
serve_file(Path, DocRoot) ->
@@ -723,19 +723,19 @@ test_range() ->
%% valid, multiple range
io:format("Testing parse_range_request with valid multiple ranges~n"),
io:format("1"),
- [{20, 30}, {50, 100}, {110, 200}] =
+ [{20, 30}, {50, 100}, {110, 200}] =
parse_range_request("bytes=20-30,50-100,110-200"),
io:format("2"),
- [{20, none}, {50, 100}, {none, 200}] =
+ [{20, none}, {50, 100}, {none, 200}] =
parse_range_request("bytes=20-,50-100,-200"),
io:format(".. ok~n"),
-
+
%% no ranges
io:format("Testing out parse_range_request with no ranges~n"),
io:format("1"),
[] = parse_range_request("bytes="),
io:format(".. ok~n"),
-
+
Body = <<"012345678901234567890123456789012345678901234567890123456789">>,
BodySize = size(Body), %% 60
BodySize = 60,
@@ -751,7 +751,7 @@ test_range() ->
io:format("4"),
{30, 30} = range_skip_length({30, none}, BodySize), %% 30-
io:format(".. ok ~n"),
-
+
%% valid edge cases for range_skip_length
io:format("Testing out range_skip_length on valid edge case ranges~n"),
io:format("1"),
@@ -787,4 +787,4 @@ test_range() ->
invalid_range = range_skip_length({BodySize, none}, BodySize),
io:format(".. ok ~n"),
ok.
-
+
diff --git a/src/mochiweb/mochiweb_skel.erl b/src/mochiweb/mochiweb_skel.erl
index a1c37f98..098951be 100644
--- a/src/mochiweb/mochiweb_skel.erl
+++ b/src/mochiweb/mochiweb_skel.erl
@@ -7,7 +7,7 @@
skelcopy(DestDir, Name) ->
ok = ensuredir(DestDir),
- LDst = case length(filename:dirname(DestDir)) of
+ LDst = case length(filename:dirname(DestDir)) of
1 -> %% handle case when dirname returns "/"
0;
N ->
@@ -17,7 +17,7 @@ skelcopy(DestDir, Name) ->
ok = file:make_symlink(
filename:join(filename:dirname(code:which(?MODULE)), ".."),
filename:join([DestDir, Name, "deps", "mochiweb-src"])).
-
+
%% Internal API
@@ -40,7 +40,7 @@ skelcopy(Src, DestDir, Name, LDst) ->
io:format("~s/~n", [EDst]),
lists:foreach(fun ("." ++ _) -> ok;
(F) ->
- skelcopy(filename:join(Src, F),
+ skelcopy(filename:join(Src, F),
Dir,
Name,
LDst)
diff --git a/src/mochiweb/mochiweb_socket_server.erl b/src/mochiweb/mochiweb_socket_server.erl
index d4853dad..a483c3d0 100644
--- a/src/mochiweb/mochiweb_socket_server.erl
+++ b/src/mochiweb/mochiweb_socket_server.erl
@@ -106,7 +106,7 @@ ipv6_supported() ->
init(State=#mochiweb_socket_server{ip=Ip, port=Port, backlog=Backlog}) ->
process_flag(trap_exit, true),
- BaseOpts = [binary,
+ BaseOpts = [binary,
{reuseaddr, true},
{packet, 0},
{backlog, Backlog},
@@ -126,7 +126,7 @@ init(State=#mochiweb_socket_server{ip=Ip, port=Port, backlog=Backlog}) ->
end,
case gen_tcp_listen(Port, Opts, State) of
{stop, eacces} ->
- case Port < 1024 of
+ case Port < 1024 of
true ->
case fdsrv:start() of
{ok, _} ->
@@ -150,7 +150,7 @@ gen_tcp_listen(Port, Opts, State) ->
case gen_tcp:listen(Port, Opts) of
{ok, Listen} ->
{ok, ListenPort} = inet:port(Listen),
- {ok, new_acceptor(State#mochiweb_socket_server{listen=Listen,
+ {ok, new_acceptor(State#mochiweb_socket_server{listen=Listen,
port=ListenPort})};
{error, Reason} ->
{stop, Reason}
@@ -183,11 +183,11 @@ acceptor_loop({Server, Listen, Loop}) ->
lists:flatten(io_lib:format("~p", [Other]))]),
exit({error, accept_failed})
end.
-
+
do_get(port, #mochiweb_socket_server{port=Port}) ->
Port.
-
+
handle_call({get, Property}, _From, State) ->
Res = do_get(Property, State),
{reply, Res, State};
@@ -205,7 +205,7 @@ handle_cast(stop, State) ->
terminate(_Reason, #mochiweb_socket_server{listen=Listen, port=Port}) ->
gen_tcp:close(Listen),
- case Port < 1024 of
+ case Port < 1024 of
true ->
catch fdsrv:stop(),
ok;
@@ -228,7 +228,7 @@ handle_info({'EXIT', Pid, Reason},
{noreply, new_acceptor(State)};
handle_info({'EXIT', _LoopPid, Reason},
State=#mochiweb_socket_server{acceptor=Pid, max=Max}) ->
- case Reason of
+ case Reason of
normal ->
ok;
_ ->
diff --git a/src/mochiweb/mochiweb_util.erl b/src/mochiweb/mochiweb_util.erl
index 9a0675f5..93c03431 100644
--- a/src/mochiweb/mochiweb_util.erl
+++ b/src/mochiweb/mochiweb_util.erl
@@ -418,7 +418,7 @@ record_to_proplist(Record, Fields) ->
record_to_proplist(Record, Fields, '__record').
%% @spec record_to_proplist(Record, Fields, TypeKey) -> proplist()
-%% @doc Return a proplist of the given Record with each field in the
+%% @doc Return a proplist of the given Record with each field in the
%% Fields list set as a key with the corresponding value in the Record.
%% TypeKey is the key that is used to store the record type
%% Fields should be obtained by calling record_info(fields, record_type)
diff --git a/test/couch_config_test.erl b/test/couch_config_test.erl
index faacd5fc..a02affc1 100644
--- a/test/couch_config_test.erl
+++ b/test/couch_config_test.erl
@@ -1,7 +1,7 @@
% couch_config module test suote
% Set up test suite
-% ?MODULE_test() returns a list of functions
+% ?MODULE_test() returns a list of functions
% that run the actual tests.
couch_config_test() ->
[
@@ -10,7 +10,7 @@ couch_config_test() ->
% test functions
-
+
store_strings() ->
Filename = "test.ini",
file:write_file(Filename, ""),
@@ -26,7 +26,7 @@ store_strings() ->
exit(Proc, kill),
receive {'EXIT', Proc, _} -> ok end,
-
+
% clean up
file:delete(Filename),
diff --git a/test/couch_config_writer_test.erl b/test/couch_config_writer_test.erl
index aa88abeb..6fa87078 100644
--- a/test/couch_config_writer_test.erl
+++ b/test/couch_config_writer_test.erl
@@ -1,7 +1,7 @@
% couch_config_writer module test suote
% Set up test suite
-% ?MODULE_test() returns a list of functions
+% ?MODULE_test() returns a list of functions
% that run the actual tests.
% todo, fix replace_existing_variable2 (i.e. reordering)
couch_config_writer_test() ->
@@ -186,12 +186,12 @@ run_operation_and_compare_results(Contents, Expect, Config) ->
% call replace function
[couch_config_writer:save_to_file(ConfigVar, Filename) || ConfigVar <- Config],
-
+
% compare new file with expected file
{ok, Result_} = file:read_file(Filename),
Result = binary_to_list(Result_),
% clean up
% file:delete(Filename),
-
+
Result = Expect.
diff --git a/test/etap/010-file-basics.t b/test/etap/010-file-basics.t
index 9033317f..055c9780 100755
--- a/test/etap/010-file-basics.t
+++ b/test/etap/010-file-basics.t
@@ -15,7 +15,7 @@ main(_) ->
etap:bail()
end,
ok.
-
+
test() ->
etap:is({error, enoent}, couch_file:open("not a real file"),
"Opening a non-existant file should return an enoent error."),
@@ -29,7 +29,7 @@ test() ->
{ok, Fd} = couch_file:open(filename() ++ ".0", [create, overwrite]),
etap:ok(is_pid(Fd),
"Returned file descriptor is a Pid"),
-
+
etap:is({ok, 0}, couch_file:bytes(Fd),
"Newly created files have 0 bytes."),
@@ -39,16 +39,16 @@ test() ->
{ok, Size} = couch_file:bytes(Fd),
etap:is_greater(Size, 0,
"Writing a term increased the file size."),
-
+
etap:is({ok, Size}, couch_file:append_binary(Fd, <<"fancy!">>),
"Appending a binary returns the current file size."),
-
+
etap:is({ok, foo}, couch_file:pread_term(Fd, 0),
"Reading the first term returns what we wrote: foo"),
-
+
etap:is({ok, <<"fancy!">>}, couch_file:pread_binary(Fd, Size),
"Reading back the binary returns what we wrote: <<\"fancy\">>."),
-
+
etap:is({ok, <<131, 100, 0, 3, 102, 111, 111>>},
couch_file:pread_binary(Fd, 0),
"Reading a binary at a term position returns the term as binary."
@@ -75,7 +75,7 @@ test() ->
% "Reading data that was truncated fails.")
etap:skip(fun() -> ok end,
"No idea how to test reading beyond EOF"),
-
+
etap:is({ok, foo}, couch_file:pread_term(Fd, 0),
"Truncating does not affect data located before the truncation mark."),
diff --git a/test/etap/011-file-headers.t b/test/etap/011-file-headers.t
index 83478d34..4ad3d21f 100755
--- a/test/etap/011-file-headers.t
+++ b/test/etap/011-file-headers.t
@@ -19,39 +19,39 @@ main(_) ->
etap:bail()
end,
ok.
-
+
test() ->
{ok, Fd} = couch_file:open(filename(), [create,overwrite]),
-
+
etap:is({ok, 0}, couch_file:bytes(Fd),
"File should be initialized to contain zero bytes."),
-
+
etap:is(ok, couch_file:write_header(Fd, {<<"some_data">>, 32}),
"Writing a header succeeds."),
-
+
{ok, Size1} = couch_file:bytes(Fd),
etap:is_greater(Size1, 0,
"Writing a header allocates space in the file."),
-
+
etap:is({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd),
"Reading the header returns what we wrote."),
-
+
etap:is(ok, couch_file:write_header(Fd, [foo, <<"more">>]),
"Writing a second header succeeds."),
-
+
{ok, Size2} = couch_file:bytes(Fd),
etap:is_greater(Size2, Size1,
"Writing a second header allocates more space."),
-
+
etap:is({ok, [foo, <<"more">>]}, couch_file:read_header(Fd),
"Reading the second header does not return the first header."),
-
+
% Delete the second header.
ok = couch_file:truncate(Fd, Size1),
-
+
etap:is({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd),
"Reading the header after a truncation returns a previous header."),
-
+
couch_file:write_header(Fd, [foo, <<"more">>]),
etap:is({ok, Size2}, couch_file:bytes(Fd),
"Rewriting the same second header returns the same second size."),
@@ -60,16 +60,16 @@ test() ->
% Now for the fun stuff. Try corrupting the second header and see
% if we recover properly.
-
+
% Destroy the 0x1 byte that marks a header
check_header_recovery(fun(CouchFd, RawFd, Expect, HeaderPos) ->
etap:isnt(Expect, couch_file:read_header(CouchFd),
"Should return a different header before corruption."),
file:pwrite(RawFd, HeaderPos, <<0>>),
etap:is(Expect, couch_file:read_header(CouchFd),
- "Corrupting the byte marker should read the previous header.")
+ "Corrupting the byte marker should read the previous header.")
end),
-
+
% Corrupt the size.
check_header_recovery(fun(CouchFd, RawFd, Expect, HeaderPos) ->
etap:isnt(Expect, couch_file:read_header(CouchFd),
@@ -79,7 +79,7 @@ test() ->
etap:is(Expect, couch_file:read_header(CouchFd),
"Corrupting the size should read the previous header.")
end),
-
+
% Corrupt the MD5 signature
check_header_recovery(fun(CouchFd, RawFd, Expect, HeaderPos) ->
etap:isnt(Expect, couch_file:read_header(CouchFd),
@@ -89,7 +89,7 @@ test() ->
etap:is(Expect, couch_file:read_header(CouchFd),
"Corrupting the MD5 signature should read the previous header.")
end),
-
+
% Corrupt the data
check_header_recovery(fun(CouchFd, RawFd, Expect, HeaderPos) ->
etap:isnt(Expect, couch_file:read_header(CouchFd),
@@ -99,7 +99,7 @@ test() ->
etap:is(Expect, couch_file:read_header(CouchFd),
"Corrupting the header data should read the previous header.")
end),
-
+
ok.
check_header_recovery(CheckFun) ->
@@ -112,9 +112,9 @@ check_header_recovery(CheckFun) ->
{ok, HeaderPos} = write_random_data(Fd),
ok = couch_file:write_header(Fd, {2342, <<"corruption! greed!">>}),
-
+
CheckFun(Fd, RawFd, {ok, ExpectHeader}, HeaderPos),
-
+
ok = file:close(RawFd),
ok = couch_file:close(Fd),
ok.
@@ -131,4 +131,3 @@ write_random_data(Fd, N) ->
{ok, _} = couch_file:append_term(Fd, Term),
write_random_data(Fd, N-1).
- \ No newline at end of file
diff --git a/test/etap/020-btree-basics.t b/test/etap/020-btree-basics.t
index fbc895c1..1b985a4a 100755
--- a/test/etap/020-btree-basics.t
+++ b/test/etap/020-btree-basics.t
@@ -29,7 +29,7 @@ test()->
etap:ok(test_kvs(shuffle(Sorted)), "Testing shuffled keys."),
ok.
-test_kvs(KeyValues) ->
+test_kvs(KeyValues) ->
ReduceFun = fun
(reduce, KVs) ->
length(KVs);
@@ -53,7 +53,7 @@ test_kvs(KeyValues) ->
{ok, Btree2} = couch_btree:add_remove(Btree1, KeyValues, []),
etap:ok(test_btree(Btree2, KeyValues),
"Adding all keys at once returns a complete btree."),
-
+
etap:fun_is(
fun
({ok, {kp_node, _}}) -> true;
@@ -98,7 +98,7 @@ test_kvs(KeyValues) ->
etap:ok(test_add_remove(Btree6, Rem2Keys0, Rem2Keys1),
"Add/Remove every other key."),
-
+
etap:ok(test_add_remove(Btree6, Rem2Keys1, Rem2Keys0),
"Add/Remove opposite every other key."),
@@ -189,5 +189,5 @@ randomize(List) ->
D = lists:map(fun(A) ->
{random:uniform(), A}
end, List),
- {_, D1} = lists:unzip(lists:keysort(1, D)),
+ {_, D1} = lists:unzip(lists:keysort(1, D)),
D1.
diff --git a/test/etap/021-btree-reductions.t b/test/etap/021-btree-reductions.t
index 9ae7dbbf..d2de4c92 100755
--- a/test/etap/021-btree-reductions.t
+++ b/test/etap/021-btree-reductions.t
@@ -22,10 +22,10 @@ test()->
(reduce, KVs) -> length(KVs);
(rereduce, Reds) -> lists:sum(Reds)
end,
-
+
{ok, Fd} = couch_file:open(filename(), [create,overwrite]),
{ok, Btree} = couch_btree:open(nil, Fd, [{reduce, ReduceFun}]),
-
+
% Create a list, of {"even", Value} or {"odd", Value} pairs.
{_, EvenOddKVs} = lists:foldl(fun(Idx, {Key, Acc}) ->
case Key of
@@ -50,7 +50,7 @@ test()->
true;
(_) ->
false
- end,
+ end,
couch_btree:fold_reduce(Btree2, nil, nil, GroupFun, FoldFun, []),
"Reduction works with no specified direction, startkey, or endkey."
),
@@ -76,7 +76,7 @@ test()->
couch_btree:fold_reduce(Btree2, rev, nil, nil, GroupFun, FoldFun, []),
"Reducing backwards works with no startkey or endkey."
),
-
+
etap:fun_is(
fun
({ok, [{{"odd", _}, 500}, {{"even", _}, 500}]}) ->
@@ -87,7 +87,7 @@ test()->
couch_btree:fold_reduce(Btree2, fwd, SK1, EK2, GroupFun, FoldFun, []),
"Reducing works over the entire range with startkey and endkey set."
),
-
+
etap:fun_is(
fun
({ok, [{{"even", _}, 500}]}) -> true;
@@ -114,7 +114,7 @@ test()->
couch_btree:fold_reduce(Btree2, rev, EK2, SK2, GroupFun, FoldFun, []),
"Reducing in reverse works after swapping the startkey and endkey."
),
-
+
etap:fun_is(
fun
({ok, [{{"even", _}, 500}, {{"odd", _}, 500}]}) ->
diff --git a/test/etap/030-doc-from-json.t b/test/etap/030-doc-from-json.t
index 242591ed..431aa7ed 100755
--- a/test/etap/030-doc-from-json.t
+++ b/test/etap/030-doc-from-json.t
@@ -18,7 +18,7 @@ main(_) ->
etap:bail()
end,
ok.
-
+
test() ->
ok = test_from_json_success(),
ok = test_from_json_errors(),
@@ -49,7 +49,7 @@ test_from_json_success() ->
{
{[{<<"_rev">>, <<"4-230234">>}]},
#doc{revs={4, [<<"230234">>]}},
- "_rev stored in revs."
+ "_rev stored in revs."
},
{
{[{<<"soap">>, 35}]},
@@ -105,7 +105,7 @@ test_from_json_success() ->
{
{[{<<"_local_seq">>, dropping}]},
#doc{},
- "Drops _local_seq."
+ "Drops _local_seq."
},
{
{[{<<"_conflicts">>, dropping}]},
@@ -118,7 +118,7 @@ test_from_json_success() ->
"Drops _deleted_conflicts."
}
],
-
+
lists:foreach(fun({EJson, Expect, Mesg}) ->
etap:is(couch_doc:from_json_obj(EJson), Expect, Mesg)
end, Cases),
@@ -193,7 +193,7 @@ test_from_json_errors() ->
"Underscore prefix fields are reserved."
}
],
-
+
lists:foreach(fun
({EJson, Expect, Mesg}) ->
Error = (catch couch_doc:from_json_obj(EJson)),
diff --git a/test/etap/031-doc-to-json.t b/test/etap/031-doc-to-json.t
index 37ee9946..9bb5747e 100755
--- a/test/etap/031-doc-to-json.t
+++ b/test/etap/031-doc-to-json.t
@@ -22,7 +22,7 @@ main(_) ->
test() ->
ok = test_to_json_success(),
ok.
-
+
test_to_json_success() ->
Cases = [
{
@@ -92,7 +92,7 @@ test_to_json_success() ->
{
#doc{meta=[{deleted_conflicts, [{10923, <<"big_cowboy_hat">>}]}]},
{[
- {<<"_id">>, <<>>},
+ {<<"_id">>, <<>>},
{<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]}
]},
"_deleted_conflicsts is added as an array of strings."
diff --git a/test/etap/040-util.t b/test/etap/040-util.t
index a6b7df33..94db4534 100755
--- a/test/etap/040-util.t
+++ b/test/etap/040-util.t
@@ -19,13 +19,13 @@ test() ->
% to_existing_atom
etap:is(true, couch_util:to_existing_atom(true), "An atom is an atom."),
etap:is(foo, couch_util:to_existing_atom(<<"foo">>),
- "A binary foo is the atom foo."),
+ "A binary foo is the atom foo."),
etap:is(foobarbaz, couch_util:to_existing_atom("foobarbaz"),
"A list of atoms is one munged atom."),
% terminate_linked
Self = self(),
- spawn(fun() ->
+ spawn(fun() ->
ChildPid = spawn_link(fun() -> receive shutdown -> ok end end),
couch_util:terminate_linked(normal),
Self ! {pid, ChildPid}
@@ -33,7 +33,7 @@ test() ->
receive
{pid, Pid} ->
etap:ok(not is_process_alive(Pid), "why wont this work?")
- end,
+ end,
% new_uuid
etap:isnt(couch_util:new_uuid(), couch_util:new_uuid(),
@@ -68,5 +68,5 @@ test() ->
etap:ok(not couch_util:should_flush(),
"Checking to flush invokes GC."),
-
+
ok.
diff --git a/test/etap/050-stream.t b/test/etap/050-stream.t
index dab2d50c..bb28a96b 100755
--- a/test/etap/050-stream.t
+++ b/test/etap/050-stream.t
@@ -20,7 +20,7 @@ read_all(Fd, PosList) ->
test() ->
{ok, Fd} = couch_file:open("test/etap/temp.050", [create,overwrite]),
{ok, Stream} = couch_stream:open(Fd),
-
+
etap:is(ok, couch_stream:write(Stream, <<"food">>),
"Writing to streams works."),
@@ -43,16 +43,16 @@ test() ->
"Successfully wrote 80 1 bits."),
ZeroBits = <<0:(8*10)>>,
- etap:is(ok, couch_stream:write(Stream2, ZeroBits),
+ etap:is(ok, couch_stream:write(Stream2, ZeroBits),
"Successfully wrote 80 0 bits."),
-
+
{Ptrs2, Length2} = couch_stream:close(Stream2),
etap:is(Ptrs2, [ExpPtr], "Closing stream returns the file pointers."),
etap:is(Length2, 20, "Length written is 160 bytes."),
AllBits = iolist_to_binary([OneBits,ZeroBits]),
etap:is(AllBits, read_all(Fd, Ptrs2), "Returned pointers are valid."),
-
+
% Stream more the 4K chunk size.
{ok, ExpPtr2} = couch_file:bytes(Fd),
{ok, Stream3} = couch_stream:open(Fd),
diff --git a/test/etap/060-kt-merging.t b/test/etap/060-kt-merging.t
index 004e004f..f5d9fbe7 100755
--- a/test/etap/060-kt-merging.t
+++ b/test/etap/060-kt-merging.t
@@ -28,7 +28,7 @@ test() ->
Stemmed1a = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
Stemmed1aa = [{2, {"1aa", "bar", []}}],
Stemmed1bb = [{2, {"1bb", "boo", []}}],
-
+
etap:is(
{EmptyTree, no_conflicts},
couch_key_tree:merge(EmptyTree, EmptyTree),
@@ -69,7 +69,7 @@ test() ->
{TwoChildSibs, no_conflicts},
couch_key_tree:merge(TwoChildSibs, TwoChildSibs),
"Merging a tree to itself is itself."),
-
+
etap:is(
{TwoChildSibs, no_conflicts},
couch_key_tree:merge(TwoChildSibs, Stemmed1b),
@@ -87,7 +87,7 @@ test() ->
couch_key_tree:merge(TwoChildSibs2, Stemmed1bb),
"Merging a stem at a deeper level."
),
-
+
etap:is(
{TwoChildSibs2, no_conflicts},
couch_key_tree:merge(Stemmed1bb, TwoChildSibs2),
@@ -118,11 +118,11 @@ test() ->
couch_key_tree:merge(OneChild, Stemmed1aa),
"Merging should create conflicts."
),
-
+
etap:is(
{TwoChild, no_conflicts},
couch_key_tree:merge(Expect1, TwoChild),
"Merge should have no conflicts."
),
-
+
ok.
diff --git a/test/etap/061-kt-missing-leaves.t b/test/etap/061-kt-missing-leaves.t
index c5ee1222..85fac0b6 100755
--- a/test/etap/061-kt-missing-leaves.t
+++ b/test/etap/061-kt-missing-leaves.t
@@ -17,7 +17,7 @@ test() ->
TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
Stemmed2 = [{2, {"1aa", "bar", []}}],
-
+
etap:is(
[],
couch_key_tree:find_missing(TwoChildSibs, [{0,"1"}, {1,"1a"}]),
diff --git a/test/etap/062-kt-remove-leaves.t b/test/etap/062-kt-remove-leaves.t
index 566efa06..e560c6a9 100755
--- a/test/etap/062-kt-remove-leaves.t
+++ b/test/etap/062-kt-remove-leaves.t
@@ -23,31 +23,31 @@ test() ->
couch_key_tree:remove_leafs(TwoChildSibs, []),
"Removing no leaves has no effect on the tree."
),
-
+
etap:is(
{TwoChildSibs, []},
couch_key_tree:remove_leafs(TwoChildSibs, [{0, "1"}]),
"Removing a non-existant branch has no effect."
),
-
+
etap:is(
{OneChild, [{1, "1b"}]},
couch_key_tree:remove_leafs(TwoChildSibs, [{1, "1b"}]),
"Removing a leaf removes the leaf."
),
-
+
etap:is(
{[], [{1, "1b"},{1, "1a"}]},
couch_key_tree:remove_leafs(TwoChildSibs, [{1, "1a"}, {1, "1b"}]),
"Removing all leaves returns an empty tree."
),
-
+
etap:is(
{Stemmed, []},
couch_key_tree:remove_leafs(Stemmed, [{1, "1a"}]),
"Removing a non-existant node has no effect."
),
-
+
etap:is(
{[], [{2, "1aa"}]},
couch_key_tree:remove_leafs(Stemmed, [{2, "1aa"}]),
diff --git a/test/etap/063-kt-get-leaves.t b/test/etap/063-kt-get-leaves.t
index 9e366ff8..342657bf 100755
--- a/test/etap/063-kt-get-leaves.t
+++ b/test/etap/063-kt-get-leaves.t
@@ -22,7 +22,7 @@ test() ->
couch_key_tree:get(TwoChildSibs, [{0, "1"}]),
"extract a subtree."
),
-
+
etap:is(
{[{"bar", {1, ["1a", "1"]}}],[]},
couch_key_tree:get(TwoChildSibs, [{1, "1a"}]),
@@ -34,13 +34,13 @@ test() ->
couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}]),
"gather up the leaves."
),
-
+
etap:is(
{[{"bar", {1, ["1a","1"]}}],[]},
couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}]),
"gather up the leaves."
),
-
+
etap:is(
{[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]},
couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}]),
@@ -52,7 +52,7 @@ test() ->
couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}]),
"retrieve full key paths."
),
-
+
etap:is(
{[{1,[{"1a", "bar"},{"1", "foo"}]}],[]},
couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}]),
@@ -64,19 +64,19 @@ test() ->
couch_key_tree:get_all_leafs_full(Stemmed),
"retrieve all leaves."
),
-
+
etap:is(
[{1, [{"1a", "bar"},{"1", "foo"}]}, {1, [{"1b", "bar"},{"1", "foo"}]}],
couch_key_tree:get_all_leafs_full(TwoChildSibs),
"retrieve all the leaves."
),
-
+
etap:is(
[{"bar", {2, ["1aa","1a"]}}],
couch_key_tree:get_all_leafs(Stemmed),
"retrieve all leaves."
),
-
+
etap:is(
[{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b","1"]}}],
couch_key_tree:get_all_leafs(TwoChildSibs),
diff --git a/test/etap/064-kt-counting.t b/test/etap/064-kt-counting.t
index dd00cedd..aa42fec6 100755
--- a/test/etap/064-kt-counting.t
+++ b/test/etap/064-kt-counting.t
@@ -18,7 +18,7 @@ test() ->
One = [{0, {"1","foo",[]}}],
TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
Stemmed = [{2, {"1bb", "boo", []}}],
-
+
etap:is(0, couch_key_tree:count_leafs(EmptyTree),
"Empty trees have no leaves."),
@@ -27,8 +27,8 @@ test() ->
etap:is(2, couch_key_tree:count_leafs(TwoChildSibs),
"Two children siblings counted as two leaves."),
-
+
etap:is(1, couch_key_tree:count_leafs(Stemmed),
"Stemming does not affect leaf counting."),
-
+
ok.
diff --git a/test/etap/070-couch-db.t b/test/etap/070-couch-db.t
index 76519a55..d0ae0a52 100755
--- a/test/etap/070-couch-db.t
+++ b/test/etap/070-couch-db.t
@@ -4,7 +4,7 @@
main(_) ->
code:add_pathz("src/couchdb"),
code:add_pathz("src/mochiweb"),
-
+
etap:plan(4),
case (catch test()) of
ok ->
@@ -16,7 +16,7 @@ main(_) ->
ok.
test() ->
-
+
couch_server:start(
["etc/couchdb/default_dev.ini", "etc/couchdb/local_dev.ini"]
),
@@ -48,7 +48,7 @@ test() ->
Acc+1
end, 0, lists:seq(1, 200)),
etap:is(200, NumCreated, "Created all databases."),
-
+
lists:foreach(fun(Int) ->
ok = couch_server:delete(MkDbName(Int), [])
end, lists:seq(1, 200)),
@@ -59,5 +59,5 @@ test() ->
Acc+1
end, 0, lists:seq(1, 200)),
etap:is(200, NumDeleted, "Deleted all databases."),
-
+
ok.
diff --git a/test/etap/080-config-get-set.t b/test/etap/080-config-get-set.t
index 8cd6a9e6..7abd8a0a 100755
--- a/test/etap/080-config-get-set.t
+++ b/test/etap/080-config-get-set.t
@@ -19,23 +19,23 @@ main(_) ->
test() ->
% start couch_config with default
couch_config:start_link([default_config()]),
-
-
+
+
% Check that we can get values
-
-
+
+
etap:fun_is(
fun(List) -> length(List) > 0 end,
couch_config:all(),
"Data was loaded from the INI file."
),
-
+
etap:fun_is(
fun(List) -> length(List) > 0 end,
couch_config:get("daemons"),
"There are settings in the [daemons] section of the INI file."
),
-
+
etap:is(
couch_config:get("httpd_design_handlers", "_view"),
"{couch_httpd_view, handle_view_req}",
@@ -47,13 +47,13 @@ test() ->
"bar",
"Returns the default when key doesn't exist in config."
),
-
+
etap:is(
couch_config:get("httpd", "foo"),
undefined,
"The default default is the atom 'undefined'."
),
-
+
etap:is(
couch_config:get("httpd", "port", "bar"),
"5984",
@@ -63,43 +63,43 @@ test() ->
% Check that setting values works.
-
+
ok = couch_config:set("log", "level", "severe", false),
-
+
etap:is(
couch_config:get("log", "level"),
"severe",
"Non persisted changes take effect."
),
-
+
etap:is(
couch_config:get("new_section", "bizzle"),
undefined,
"Section 'new_section' does not exist."
),
-
+
ok = couch_config:set("new_section", "bizzle", "bang", false),
-
+
etap:is(
couch_config:get("new_section", "bizzle"),
"bang",
"New section 'new_section' was created for a new key/value pair."
),
-
-
+
+
% Check that deleting works
-
-
+
+
ok = couch_config:delete("new_section", "bizzle", false),
etap:is(
couch_config:get("new_section", "bizzle"),
"",
"Deleting sets the value to \"\""
),
-
-
+
+
% Check ge/set/delete binary strings
-
+
ok = couch_config:set(<<"foo">>, <<"bar">>, <<"baz">>, false),
etap:is(
couch_config:get(<<"foo">>, <<"bar">>),
@@ -112,5 +112,5 @@ test() ->
"",
"Deleting with binary section/key pairs sets the value to \"\""
),
-
+
ok.
diff --git a/test/etap/081-config-override.t b/test/etap/081-config-override.t
index bf9655f1..abb88854 100755
--- a/test/etap/081-config-override.t
+++ b/test/etap/081-config-override.t
@@ -29,7 +29,7 @@ run_tests(IniFiles, Tests) ->
main(_) ->
code:add_pathz("src/couchdb"),
etap:plan(17),
-
+
case (catch test()) of
ok ->
etap:end_tests();
@@ -50,42 +50,42 @@ test() ->
"100",
"{couchdb, max_dbs_open} is 100 by defualt."
),
-
+
etap:is(
couch_config:get("httpd","port"),
"5984",
"{httpd, port} is 5984 by default"
),
-
+
etap:is(
couch_config:get("fizbang", "unicode"),
undefined,
"{fizbang, unicode} is undefined by default"
)
end,
-
+
run_tests([default_config()], CheckDefaults),
-
-
+
+
% Check that subsequent files override values appropriately
-
+
CheckOverride = fun() ->
etap:is(
couch_config:get("couchdb", "max_dbs_open"),
"10",
"{couchdb, max_dbs_open} was overriden with the value 10"
),
-
+
etap:is(
couch_config:get("httpd", "port"),
"4895",
"{httpd, port} was overriden with the value 4895"
)
end,
-
+
run_tests([default_config(), local_config_1()], CheckOverride),
-
-
+
+
% Check that overrides can create new sections
CheckOverride2 = fun() ->
@@ -94,19 +94,19 @@ test() ->
"80",
"{httpd, port} is overriden with the value 80"
),
-
+
etap:is(
couch_config:get("fizbang", "unicode"),
"normalized",
"{fizbang, unicode} was created by override INI file"
)
end,
-
+
run_tests([default_config(), local_config_2()], CheckOverride2),
-
-
+
+
% Check that values can be overriden multiple times
-
+
CheckOverride3 = fun() ->
etap:is(
couch_config:get("httpd", "port"),
@@ -114,19 +114,19 @@ test() ->
"{httpd, port} value was taken from the last specified INI file."
)
end,
-
+
run_tests(
[default_config(), local_config_1(), local_config_2()],
CheckOverride3
),
% Check persistence to last file.
-
+
% Empty the file in case it exists.
{ok, Fd} = file:open(local_config_write(), write),
ok = file:truncate(Fd),
ok = file:close(Fd),
-
+
% Open and write a value
CheckCanWrite = fun() ->
etap:is(
@@ -134,34 +134,34 @@ test() ->
"5984",
"{httpd, port} is still 5984 by default"
),
-
+
etap:is(
couch_config:set("httpd", "port", "8080"),
ok,
"Writing {httpd, port} is kosher."
),
-
+
etap:is(
couch_config:get("httpd", "port"),
"8080",
"{httpd, port} was updated to 8080 successfully."
),
-
+
etap:is(
couch_config:delete("httpd", "bind_address"),
ok,
"Deleting {httpd, bind_address} succeeds"
),
-
+
etap:is(
couch_config:get("httpd", "bind_address"),
"",
"{httpd, bind_address} was actually deleted."
)
end,
-
+
run_tests([default_config(), local_config_write()], CheckCanWrite),
-
+
% Open and check where we don't expect persistence.
CheckDidntWrite = fun() ->
@@ -170,16 +170,16 @@ test() ->
"5984",
"{httpd, port} was not persisted to the primary INI file."
),
-
+
etap:is(
couch_config:get("httpd", "bind_address"),
"127.0.0.1",
"{httpd, bind_address} was not deleted form the primary INI file."
)
end,
-
+
run_tests([default_config()], CheckDidntWrite),
-
+
% Open and check we have only the persistence we expect.
CheckDidWrite = fun() ->
etap:is(
@@ -187,14 +187,14 @@ test() ->
"8080",
"{httpd, port} is still 8080 after reopening the config."
),
-
+
etap:is(
couch_config:get("httpd", "bind_address"),
"",
"{httpd, bind_address} is still \"\" after reopening."
)
end,
-
+
run_tests([local_config_write()], CheckDidWrite),
-
+
ok.
diff --git a/test/etap/082-config-register.t b/test/etap/082-config-register.t
index b64e2d9c..09972c9e 100755
--- a/test/etap/082-config-register.t
+++ b/test/etap/082-config-register.t
@@ -24,9 +24,9 @@ test() ->
"5984",
"{httpd, port} is 5984 by default."
),
-
+
ok = couch_config:set("httpd", "port", "4895", false),
-
+
etap:is(
couch_config:get("httpd", "port"),
"4895",
@@ -46,9 +46,9 @@ test() ->
end,
SentinelPid
),
-
+
ok = couch_config:set("httpd", "port", "8080", false),
-
+
% Implicitly checking that we *don't* call the function
etap:is(
couch_config:get("httpd", "bind_address"),
@@ -56,7 +56,7 @@ test() ->
"{httpd, bind_address} is not '0.0.0.0'"
),
ok = couch_config:set("httpd", "bind_address", "0.0.0.0", false),
-
+
% Ping-Pong kill process
SentinelPid ! {ping, self()},
receive
@@ -71,5 +71,5 @@ test() ->
"80",
"Implicitly test that the function got de-registered"
),
-
+
ok.
diff --git a/test/etap/083-config-no-files.t b/test/etap/083-config-no-files.t
index 28931d85..61ad9517 100755
--- a/test/etap/083-config-no-files.t
+++ b/test/etap/083-config-no-files.t
@@ -24,9 +24,9 @@ test() ->
couch_config:all(),
"No INI files specified returns 0 key/value pairs."
),
-
+
ok = couch_config:set("httpd", "port", "80", false),
-
+
etap:is(
couch_config:get("httpd", "port"),
"80",
diff --git a/test/etap/090-task-status.t b/test/etap/090-task-status.t
index 45d38dff..bc4c20ca 100755
--- a/test/etap/090-task-status.t
+++ b/test/etap/090-task-status.t
@@ -59,11 +59,11 @@ wait(Pid) ->
{ok, Pid, Msg} -> Msg
after 1000 ->
throw(timeout_error)
- end.
+ end.
-test() ->
+test() ->
{ok, TaskStatusPid} = couch_task_status:start_link(),
-
+
TaskUpdater = fun() -> loop() end,
% create three updaters
Pid1 = spawn(TaskUpdater),
@@ -103,13 +103,13 @@ test() ->
2,
"Started a second task."
),
-
+
etap:is(
check_status(Pid2, couch_task_status:all()),
<<"init">>,
"Second tasks's status was set to 'init'."
),
-
+
call(Pid2, update, "running"),
etap:is(
check_status(Pid2, couch_task_status:all()),
@@ -124,13 +124,13 @@ test() ->
3,
"Registered a third task."
),
-
+
etap:is(
check_status(Pid3, couch_task_status:all()),
<<"init">>,
"Third tasks's status was set to 'init'."
),
-
+
call(Pid3, update, "running"),
etap:is(
check_status(Pid3, couch_task_status:all()),
@@ -169,14 +169,14 @@ test() ->
2,
"First task finished."
),
-
+
call(Pid2, done),
etap:is(
length(couch_task_status:all()),
1,
"Second task finished."
),
-
+
call(Pid3, done),
etap:is(
length(couch_task_status:all()),
diff --git a/test/etap/100-ref-counter.t b/test/etap/100-ref-counter.t
index 746b70a9..144a95b8 100755
--- a/test/etap/100-ref-counter.t
+++ b/test/etap/100-ref-counter.t
@@ -26,9 +26,9 @@ wait() ->
after
1000 ->
throw(timeout_error)
- end.
+ end.
-test() ->
+test() ->
{ok, RefCtr} = couch_ref_counter:start([]),
etap:is(
@@ -67,7 +67,7 @@ test() ->
2,
"Droping the doubly added Pid only removes a ref, not a referer."
),
-
+
couch_ref_counter:drop(RefCtr, ChildPid1),
etap:is(
couch_ref_counter:count(RefCtr),
@@ -81,7 +81,7 @@ test() ->
2,
"Sanity checking that the Pid was re-added."
),
-
+
ChildPid1 ! {ping, self()},
wait(),
etap:is(
diff --git a/test/query_server_spec.rb b/test/query_server_spec.rb
index f4b124ca..4605ca1b 100644
--- a/test/query_server_spec.rb
+++ b/test/query_server_spec.rb
@@ -94,7 +94,7 @@ class OSProcessRunner
end
class QueryServerRunner < OSProcessRunner
-
+
COMMANDS = {"js" => "#{COUCH_ROOT}/src/couchdb/couchjs #{COUCH_ROOT}/share/server/main.js" }
def self.run_command
@@ -159,7 +159,7 @@ functions = {
var row;
log("about to getRow " + typeof(getRow));
while(row = getRow()) {
- send(row.key);
+ send(row.key);
};
return "tail";
};
@@ -172,8 +172,8 @@ functions = {
var row;
log("about to getRow " + typeof(getRow));
while(row = getRow()) {
- send(row.key);
- send("eggs");
+ send(row.key);
+ send("eggs");
};
return "tail";
};
@@ -186,7 +186,7 @@ functions = {
send(req.q);
var row;
while(row = getRow()) {
- send(row.key);
+ send(row.key);
};
return "early";
};
@@ -199,11 +199,11 @@ functions = {
send(req.q);
var row, i=0;
while(row = getRow()) {
- send(row.key);
+ send(row.key);
i += 1;
if (i > 2) {
return('early tail');
- }
+ }
};
};
JS
@@ -221,7 +221,7 @@ functions = {
send("bacon")
var row, i = 0;
while(row = getRow()) {
- send(row.key);
+ send(row.key);
i += 1;
if (i > 2) {
return('early');
@@ -237,7 +237,7 @@ functions = {
send(req.q);
var row;
while(row = getRow()) {
- send(row.key);
+ send(row.key);
};
return "tail";
};
@@ -254,7 +254,7 @@ describe "query server normal case" do
@qs.close
end
it "should reset" do
- @qs.run(["reset"]).should == true
+ @qs.run(["reset"]).should == true
end
it "should run map funs" do
@qs.reset!
@@ -285,7 +285,7 @@ describe "query server normal case" do
@qs.run(["rereduce", [@fun], vs]).should == [true, [45]]
end
end
-
+
# it "should validate"
describe "validation" do
before(:all) do
@@ -299,35 +299,35 @@ describe "query server normal case" do
@qs.run(["validate", @fun, {"bad" => true}, {}, {}]).should == {"forbidden"=>"bad doc"}
end
end
-
+
describe "show" do
before(:all) do
@fun = functions["show-simple"][LANGUAGE]
@qs.reset!
end
it "should show" do
- @qs.rrun(["show", @fun,
+ @qs.rrun(["show", @fun,
{:title => "Best ever", :body => "Doc body"}])
@qs.jsgets.should == ["resp", {"body" => "Best ever - Doc body"}]
end
end
-
+
describe "show with headers" do
before(:all) do
@fun = functions["show-headers"][LANGUAGE]
@qs.reset!
end
it "should show headers" do
- @qs.rrun(["show", @fun,
+ @qs.rrun(["show", @fun,
{:title => "Best ever", :body => "Doc body"}])
@qs.jsgets.should == ["resp", {"code"=>200,"headers" => {"X-Plankton"=>"Rusty"}, "body" => "Best ever - Doc body"}]
end
end
-
+
# end
# LIST TESTS
# __END__
-
+
describe "raw list with headers" do
before(:each) do
@fun = functions["show-sends"][LANGUAGE]
@@ -341,11 +341,11 @@ describe "query server normal case" do
@qs.jsgets.should == ["end", ["tail"]]
end
end
-
+
describe "list with rows" do
before(:each) do
@fun = functions["show-while-get-rows"][LANGUAGE]
- @qs.run(["reset"]).should == true
+ @qs.run(["reset"]).should == true
@qs.add_fun(@fun).should == true
end
it "should list em" do
@@ -365,7 +365,7 @@ describe "query server normal case" do
@qs.jsgets.should == ["end", ["tail"]]
end
end
-
+
describe "should buffer multiple chunks sent for a single row." do
before(:all) do
@fun = functions["show-while-get-rows-multi-send"][LANGUAGE]
@@ -400,7 +400,7 @@ describe "query server normal case" do
@qs.run(["list_end"]).should == ["end" , ["early"]]
end
end
-
+
describe "only goes to 2 list" do
before(:all) do
@fun = functions["list-chunky"][LANGUAGE]
@@ -443,7 +443,7 @@ describe "query server that exits" do
after(:each) do
@qs.close
end
-
+
describe "old style list" do
before(:each) do
@fun = functions["list-old-style"][LANGUAGE]
@@ -456,7 +456,7 @@ describe "query server that exits" do
resp["reason"].should include("the list API has changed")
end
end
-
+
describe "only goes to 2 list" do
before(:each) do
@fun = functions["list-capped"][LANGUAGE]
@@ -473,7 +473,7 @@ describe "query server that exits" do
should_have_exited @qs
end
end
-
+
describe "raw list" do
before(:each) do
@fun = functions["list-raw"][LANGUAGE]
@@ -486,5 +486,5 @@ describe "query server that exits" do
@qs.run(["reset"])["error"].should == "query_server_error"
should_have_exited @qs
end
- end
+ end
end
diff --git a/test/test.js b/test/test.js
index f3990966..07988085 100644
--- a/test/test.js
+++ b/test/test.js
@@ -56,7 +56,7 @@ var HTTP = (function() {
var body = parts.pop();
var header = parts.pop();
var headers = header.split(/\n/);
-
+
var status = /HTTP\/1.\d (\d*)/.exec(header)[1];
return {
responseText: body,
@@ -83,7 +83,7 @@ var HTTP = (function() {
HEAD : function(url, body, headers) {
var st, urx = url, hx = (headers || null);
st = headhttp(urx, hx);
- return parseCurl(st);
+ return parseCurl(st);
},
DELETE : function(url, body, headers) {
var st, urx = url, hx = (headers || null);
@@ -202,7 +202,7 @@ function runAllTestsConsole() {
} else {
numTests += 1;
var testFun = couchTests[t];
- runTestConsole(testFun, debug);
+ runTestConsole(testFun, debug);
}
}
p("Results: "+numFailures.toString() + " failures in "+numTests+" tests.")