summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Newson <rnewson@apache.org>2011-05-17 11:15:14 +0000
committerRobert Newson <rnewson@apache.org>2011-05-17 11:15:14 +0000
commite8e4b0d293021fe90326a85828f3cfb087bf18b7 (patch)
tree986f544eac623ec23b769b36828894f93a173aa3
parentda6a5322b0b8084f434752060caa8be214c6f4fa (diff)
tagging 1.1.0
git-svn-id: https://svn.apache.org/repos/asf/couchdb/tags/1.1.0@1104149 13f79535-47bb-0310-9956-ffa450edef68
-rw-r--r--1.1.x/.gitignore100
-rw-r--r--1.1.x/AUTHORS20
-rw-r--r--1.1.x/BUGS6
-rw-r--r--1.1.x/CHANGES632
-rw-r--r--1.1.x/DEVELOPERS95
-rw-r--r--1.1.x/INSTALL.Unix231
-rw-r--r--1.1.x/INSTALL.Windows153
-rw-r--r--1.1.x/LICENSE400
-rw-r--r--1.1.x/Makefile.am149
-rw-r--r--1.1.x/NEWS301
-rw-r--r--1.1.x/NOTICE55
-rw-r--r--1.1.x/README81
-rw-r--r--1.1.x/THANKS85
-rw-r--r--1.1.x/acinclude.m4.in30
-rw-r--r--1.1.x/bin/Makefile.am98
-rw-r--r--1.1.x/bin/couchdb.bat.tpl.in26
-rw-r--r--1.1.x/bin/couchdb.tpl.in330
-rw-r--r--1.1.x/bin/couchjs.tpl.in92
-rwxr-xr-x1.1.x/bootstrap68
-rw-r--r--1.1.x/configure.ac440
-rw-r--r--1.1.x/etc/Makefile.am117
-rw-r--r--1.1.x/etc/couchdb/Makefile.am86
-rw-r--r--1.1.x/etc/couchdb/default.ini.tpl.in148
-rw-r--r--1.1.x/etc/couchdb/local.ini74
-rw-r--r--1.1.x/etc/default/Makefile.am13
-rw-r--r--1.1.x/etc/default/couchdb7
-rw-r--r--1.1.x/etc/init/Makefile.am13
-rw-r--r--1.1.x/etc/init/couchdb.tpl.in168
-rw-r--r--1.1.x/etc/launchd/Makefile.am13
-rw-r--r--1.1.x/etc/launchd/org.apache.couchdb.plist.tpl.in30
-rw-r--r--1.1.x/etc/logrotate.d/Makefile.am13
-rw-r--r--1.1.x/etc/logrotate.d/couchdb.tpl.in9
-rw-r--r--1.1.x/etc/windows/Makefile.am13
-rw-r--r--1.1.x/etc/windows/README.txt.tpl29
-rw-r--r--1.1.x/etc/windows/couchdb.iss.tpl87
-rw-r--r--1.1.x/license.skip111
-rw-r--r--1.1.x/m4/ac_check_curl.m4.gzbin0 -> 944 bytes
-rw-r--r--1.1.x/m4/ac_check_icu.m4.gzbin0 -> 956 bytes
-rw-r--r--1.1.x/share/Makefile.am201
-rw-r--r--1.1.x/share/server/filter.js23
-rw-r--r--1.1.x/share/server/json2.js482
-rw-r--r--1.1.x/share/server/loop.js142
-rw-r--r--1.1.x/share/server/mimeparse.js158
-rw-r--r--1.1.x/share/server/render.js352
-rw-r--r--1.1.x/share/server/state.js32
-rw-r--r--1.1.x/share/server/util.js146
-rw-r--r--1.1.x/share/server/validate.js22
-rw-r--r--1.1.x/share/server/views.js126
-rw-r--r--1.1.x/share/www/_sidebar.html59
-rw-r--r--1.1.x/share/www/config.html135
-rw-r--r--1.1.x/share/www/couch_tests.html98
-rw-r--r--1.1.x/share/www/custom_test.html112
-rw-r--r--1.1.x/share/www/database.html267
-rw-r--r--1.1.x/share/www/dialog/_admin_party.html33
-rw-r--r--1.1.x/share/www/dialog/_change_password.html31
-rw-r--r--1.1.x/share/www/dialog/_compact_cleanup.html51
-rw-r--r--1.1.x/share/www/dialog/_create_admin.html50
-rw-r--r--1.1.x/share/www/dialog/_create_config.html42
-rw-r--r--1.1.x/share/www/dialog/_create_database.html33
-rw-r--r--1.1.x/share/www/dialog/_database_security.html50
-rw-r--r--1.1.x/share/www/dialog/_delete_database.html27
-rw-r--r--1.1.x/share/www/dialog/_delete_document.html26
-rw-r--r--1.1.x/share/www/dialog/_login.html34
-rw-r--r--1.1.x/share/www/dialog/_save_view_as.html35
-rw-r--r--1.1.x/share/www/dialog/_share_test_reports.html42
-rw-r--r--1.1.x/share/www/dialog/_signup.html35
-rw-r--r--1.1.x/share/www/dialog/_upload_attachment.html36
-rw-r--r--1.1.x/share/www/document.html114
-rw-r--r--1.1.x/share/www/favicon.icobin0 -> 9326 bytes
-rw-r--r--1.1.x/share/www/image/add.pngbin0 -> 709 bytes
-rw-r--r--1.1.x/share/www/image/apply.gifbin0 -> 652 bytes
-rw-r--r--1.1.x/share/www/image/bg.pngbin0 -> 372 bytes
-rw-r--r--1.1.x/share/www/image/cancel.gifbin0 -> 659 bytes
-rw-r--r--1.1.x/share/www/image/compact.pngbin0 -> 28735 bytes
-rw-r--r--1.1.x/share/www/image/delete-mini.pngbin0 -> 418 bytes
-rw-r--r--1.1.x/share/www/image/delete.pngbin0 -> 718 bytes
-rw-r--r--1.1.x/share/www/image/grippie.gifbin0 -> 75 bytes
-rw-r--r--1.1.x/share/www/image/hgrad.gifbin0 -> 118 bytes
-rw-r--r--1.1.x/share/www/image/key.pngbin0 -> 859 bytes
-rw-r--r--1.1.x/share/www/image/load.pngbin0 -> 780 bytes
-rw-r--r--1.1.x/share/www/image/logo.pngbin0 -> 3010 bytes
-rw-r--r--1.1.x/share/www/image/order-asc.gifbin0 -> 195 bytes
-rw-r--r--1.1.x/share/www/image/order-desc.gifbin0 -> 187 bytes
-rw-r--r--1.1.x/share/www/image/path.gifbin0 -> 104 bytes
-rw-r--r--1.1.x/share/www/image/progress.gifbin0 -> 10819 bytes
-rw-r--r--1.1.x/share/www/image/rarrow.pngbin0 -> 27721 bytes
-rw-r--r--1.1.x/share/www/image/run-mini.pngbin0 -> 478 bytes
-rw-r--r--1.1.x/share/www/image/run.pngbin0 -> 718 bytes
-rw-r--r--1.1.x/share/www/image/running.pngbin0 -> 284 bytes
-rw-r--r--1.1.x/share/www/image/save.pngbin0 -> 843 bytes
-rw-r--r--1.1.x/share/www/image/sidebar-toggle.pngbin0 -> 512 bytes
-rw-r--r--1.1.x/share/www/image/spinner.gifbin0 -> 3008 bytes
-rw-r--r--1.1.x/share/www/image/spinner_33.gifbin0 -> 2987 bytes
-rw-r--r--1.1.x/share/www/image/spinner_6b.gifbin0 -> 2969 bytes
-rw-r--r--1.1.x/share/www/image/test_failure.gifbin0 -> 114 bytes
-rw-r--r--1.1.x/share/www/image/test_success.gifbin0 -> 185 bytes
-rw-r--r--1.1.x/share/www/image/thead-key.gifbin0 -> 77 bytes
-rw-r--r--1.1.x/share/www/image/thead.gifbin0 -> 51 bytes
-rw-r--r--1.1.x/share/www/image/toggle-collapse.gifbin0 -> 176 bytes
-rw-r--r--1.1.x/share/www/image/toggle-expand.gifbin0 -> 181 bytes
-rw-r--r--1.1.x/share/www/image/twisty.gifbin0 -> 160 bytes
-rw-r--r--1.1.x/share/www/index.html94
-rw-r--r--1.1.x/share/www/replicator.html184
-rw-r--r--1.1.x/share/www/script/base64.js124
-rw-r--r--1.1.x/share/www/script/couch.js473
-rw-r--r--1.1.x/share/www/script/couch_test_runner.js437
-rw-r--r--1.1.x/share/www/script/couch_tests.js105
-rw-r--r--1.1.x/share/www/script/futon.browse.js1290
-rw-r--r--1.1.x/share/www/script/futon.format.js146
-rw-r--r--1.1.x/share/www/script/futon.js535
-rw-r--r--1.1.x/share/www/script/jquery-ui-1.8.11.custom.min.js81
-rw-r--r--1.1.x/share/www/script/jquery.couch.js699
-rw-r--r--1.1.x/share/www/script/jquery.dialog.js96
-rw-r--r--1.1.x/share/www/script/jquery.editinline.js114
-rw-r--r--1.1.x/share/www/script/jquery.form.js660
-rw-r--r--1.1.x/share/www/script/jquery.js6240
-rw-r--r--1.1.x/share/www/script/jquery.resizer.js84
-rw-r--r--1.1.x/share/www/script/jquery.suggest.js163
-rw-r--r--1.1.x/share/www/script/json2.js482
-rw-r--r--1.1.x/share/www/script/jspec/jspec.css149
-rw-r--r--1.1.x/share/www/script/jspec/jspec.jquery.js72
-rw-r--r--1.1.x/share/www/script/jspec/jspec.js1756
-rw-r--r--1.1.x/share/www/script/jspec/jspec.xhr.js195
-rw-r--r--1.1.x/share/www/script/oauth.js511
-rw-r--r--1.1.x/share/www/script/sha1.js202
-rw-r--r--1.1.x/share/www/script/test/all_docs.js136
-rw-r--r--1.1.x/share/www/script/test/attachment_conflicts.js56
-rw-r--r--1.1.x/share/www/script/test/attachment_names.js98
-rw-r--r--1.1.x/share/www/script/test/attachment_paths.js153
-rw-r--r--1.1.x/share/www/script/test/attachment_ranges.js134
-rw-r--r--1.1.x/share/www/script/test/attachment_views.js98
-rw-r--r--1.1.x/share/www/script/test/attachments.js275
-rw-r--r--1.1.x/share/www/script/test/attachments_multipart.js408
-rw-r--r--1.1.x/share/www/script/test/auth_cache.js280
-rw-r--r--1.1.x/share/www/script/test/basics.js249
-rw-r--r--1.1.x/share/www/script/test/batch_save.js48
-rw-r--r--1.1.x/share/www/script/test/bulk_docs.js100
-rw-r--r--1.1.x/share/www/script/test/changes.js509
-rw-r--r--1.1.x/share/www/script/test/compact.js59
-rw-r--r--1.1.x/share/www/script/test/config.js163
-rw-r--r--1.1.x/share/www/script/test/conflicts.js64
-rw-r--r--1.1.x/share/www/script/test/content_negotiation.js39
-rw-r--r--1.1.x/share/www/script/test/cookie_auth.js256
-rw-r--r--1.1.x/share/www/script/test/copy_doc.js51
-rw-r--r--1.1.x/share/www/script/test/delayed_commits.js154
-rw-r--r--1.1.x/share/www/script/test/design_docs.js427
-rw-r--r--1.1.x/share/www/script/test/design_options.js74
-rw-r--r--1.1.x/share/www/script/test/design_paths.js72
-rw-r--r--1.1.x/share/www/script/test/erlang_views.js133
-rw-r--r--1.1.x/share/www/script/test/etags_head.js78
-rw-r--r--1.1.x/share/www/script/test/etags_views.js212
-rw-r--r--1.1.x/share/www/script/test/form_submit.js26
-rw-r--r--1.1.x/share/www/script/test/http.js54
-rw-r--r--1.1.x/share/www/script/test/invalid_docids.js77
-rw-r--r--1.1.x/share/www/script/test/jsonp.js82
-rw-r--r--1.1.x/share/www/script/test/large_docs.js33
-rw-r--r--1.1.x/share/www/script/test/list_views.js475
-rw-r--r--1.1.x/share/www/script/test/lorem.txt103
-rw-r--r--1.1.x/share/www/script/test/lorem_b64.txt1
-rw-r--r--1.1.x/share/www/script/test/lots_of_docs.js55
-rw-r--r--1.1.x/share/www/script/test/method_override.js40
-rw-r--r--1.1.x/share/www/script/test/multiple_rows.js80
-rw-r--r--1.1.x/share/www/script/test/oauth.js267
-rw-r--r--1.1.x/share/www/script/test/proxyauth.js130
-rw-r--r--1.1.x/share/www/script/test/purge.js145
-rw-r--r--1.1.x/share/www/script/test/reader_acl.js198
-rw-r--r--1.1.x/share/www/script/test/recreate_doc.js80
-rw-r--r--1.1.x/share/www/script/test/reduce.js185
-rw-r--r--1.1.x/share/www/script/test/reduce_builtin.js179
-rw-r--r--1.1.x/share/www/script/test/reduce_false.js44
-rw-r--r--1.1.x/share/www/script/test/reduce_false_temp.js37
-rw-r--r--1.1.x/share/www/script/test/replication.js792
-rw-r--r--1.1.x/share/www/script/test/replicator_db.js1155
-rw-r--r--1.1.x/share/www/script/test/rev_stemming.js99
-rw-r--r--1.1.x/share/www/script/test/rewrite.js410
-rw-r--r--1.1.x/share/www/script/test/security_validation.js336
-rw-r--r--1.1.x/share/www/script/test/show_documents.js436
-rw-r--r--1.1.x/share/www/script/test/stats.js330
-rw-r--r--1.1.x/share/www/script/test/update_documents.js168
-rw-r--r--1.1.x/share/www/script/test/users_db.js124
-rw-r--r--1.1.x/share/www/script/test/utf8.js41
-rw-r--r--1.1.x/share/www/script/test/uuids.js120
-rw-r--r--1.1.x/share/www/script/test/view_collation.js116
-rw-r--r--1.1.x/share/www/script/test/view_collation_raw.js123
-rw-r--r--1.1.x/share/www/script/test/view_compaction.js104
-rw-r--r--1.1.x/share/www/script/test/view_conflicts.js49
-rw-r--r--1.1.x/share/www/script/test/view_errors.js189
-rw-r--r--1.1.x/share/www/script/test/view_include_docs.js192
-rw-r--r--1.1.x/share/www/script/test/view_multi_key_all_docs.js91
-rw-r--r--1.1.x/share/www/script/test/view_multi_key_design.js216
-rw-r--r--1.1.x/share/www/script/test/view_multi_key_temp.js37
-rw-r--r--1.1.x/share/www/script/test/view_offsets.js108
-rw-r--r--1.1.x/share/www/script/test/view_pagination.js147
-rw-r--r--1.1.x/share/www/script/test/view_sandboxing.js140
-rw-r--r--1.1.x/share/www/script/test/view_update_seq.js106
-rw-r--r--1.1.x/share/www/script/test/view_xml.js39
-rw-r--r--1.1.x/share/www/session.html96
-rw-r--r--1.1.x/share/www/spec/couch_js_class_methods_spec.js401
-rw-r--r--1.1.x/share/www/spec/couch_js_instance_methods_1_spec.js311
-rw-r--r--1.1.x/share/www/spec/couch_js_instance_methods_2_spec.js246
-rw-r--r--1.1.x/share/www/spec/couch_js_instance_methods_3_spec.js215
-rw-r--r--1.1.x/share/www/spec/custom_helpers.js51
-rw-r--r--1.1.x/share/www/spec/jquery_couch_js_class_methods_spec.js523
-rw-r--r--1.1.x/share/www/spec/jquery_couch_js_instance_methods_1_spec.js202
-rw-r--r--1.1.x/share/www/spec/jquery_couch_js_instance_methods_2_spec.js433
-rw-r--r--1.1.x/share/www/spec/jquery_couch_js_instance_methods_3_spec.js540
-rw-r--r--1.1.x/share/www/spec/run.html46
-rw-r--r--1.1.x/share/www/status.html109
-rw-r--r--1.1.x/share/www/style/jquery-ui-1.8.11.custom.css347
-rw-r--r--1.1.x/share/www/style/layout.css618
-rw-r--r--1.1.x/src/Makefile.am13
-rw-r--r--1.1.x/src/couchdb/Makefile.am209
-rw-r--r--1.1.x/src/couchdb/couch.app.tpl.in29
-rw-r--r--1.1.x/src/couchdb/couch.erl39
-rw-r--r--1.1.x/src/couchdb/couch_app.erl56
-rw-r--r--1.1.x/src/couchdb/couch_auth_cache.erl419
-rw-r--r--1.1.x/src/couchdb/couch_btree.erl679
-rw-r--r--1.1.x/src/couchdb/couch_changes.erl339
-rw-r--r--1.1.x/src/couchdb/couch_config.erl254
-rw-r--r--1.1.x/src/couchdb/couch_config_writer.erl86
-rw-r--r--1.1.x/src/couchdb/couch_db.erl1210
-rw-r--r--1.1.x/src/couchdb/couch_db.hrl278
-rw-r--r--1.1.x/src/couchdb/couch_db_update_notifier.erl73
-rw-r--r--1.1.x/src/couchdb/couch_db_update_notifier_sup.erl63
-rw-r--r--1.1.x/src/couchdb/couch_db_updater.erl896
-rw-r--r--1.1.x/src/couchdb/couch_doc.erl527
-rw-r--r--1.1.x/src/couchdb/couch_event_sup.erl73
-rw-r--r--1.1.x/src/couchdb/couch_external_manager.erl101
-rw-r--r--1.1.x/src/couchdb/couch_external_server.erl69
-rw-r--r--1.1.x/src/couchdb/couch_file.erl614
-rw-r--r--1.1.x/src/couchdb/couch_httpd.erl997
-rw-r--r--1.1.x/src/couchdb/couch_httpd_auth.erl359
-rw-r--r--1.1.x/src/couchdb/couch_httpd_db.erl1283
-rw-r--r--1.1.x/src/couchdb/couch_httpd_external.erl169
-rw-r--r--1.1.x/src/couchdb/couch_httpd_misc_handlers.erl284
-rw-r--r--1.1.x/src/couchdb/couch_httpd_oauth.erl176
-rw-r--r--1.1.x/src/couchdb/couch_httpd_proxy.erl431
-rw-r--r--1.1.x/src/couchdb/couch_httpd_rewrite.erl434
-rw-r--r--1.1.x/src/couchdb/couch_httpd_show.erl404
-rw-r--r--1.1.x/src/couchdb/couch_httpd_stats_handlers.erl56
-rw-r--r--1.1.x/src/couchdb/couch_httpd_vhost.erl403
-rw-r--r--1.1.x/src/couchdb/couch_httpd_view.erl755
-rw-r--r--1.1.x/src/couchdb/couch_js_functions.hrl226
-rw-r--r--1.1.x/src/couchdb/couch_key_tree.erl332
-rw-r--r--1.1.x/src/couchdb/couch_log.erl193
-rw-r--r--1.1.x/src/couchdb/couch_native_process.erl402
-rw-r--r--1.1.x/src/couchdb/couch_os_daemons.erl364
-rw-r--r--1.1.x/src/couchdb/couch_os_process.erl185
-rw-r--r--1.1.x/src/couchdb/couch_query_servers.erl589
-rw-r--r--1.1.x/src/couchdb/couch_ref_counter.erl111
-rw-r--r--1.1.x/src/couchdb/couch_rep.erl972
-rw-r--r--1.1.x/src/couchdb/couch_rep_att.erl119
-rw-r--r--1.1.x/src/couchdb/couch_rep_changes_feed.erl503
-rw-r--r--1.1.x/src/couchdb/couch_rep_httpc.erl317
-rw-r--r--1.1.x/src/couchdb/couch_rep_missing_revs.erl198
-rw-r--r--1.1.x/src/couchdb/couch_rep_reader.erl283
-rw-r--r--1.1.x/src/couchdb/couch_rep_sup.erl31
-rw-r--r--1.1.x/src/couchdb/couch_rep_writer.erl165
-rw-r--r--1.1.x/src/couchdb/couch_replication_manager.erl383
-rw-r--r--1.1.x/src/couchdb/couch_server.erl405
-rw-r--r--1.1.x/src/couchdb/couch_server_sup.erl220
-rw-r--r--1.1.x/src/couchdb/couch_stats_aggregator.erl297
-rw-r--r--1.1.x/src/couchdb/couch_stats_collector.erl136
-rw-r--r--1.1.x/src/couchdb/couch_stream.erl357
-rw-r--r--1.1.x/src/couchdb/couch_task_status.erl124
-rw-r--r--1.1.x/src/couchdb/couch_util.erl478
-rw-r--r--1.1.x/src/couchdb/couch_uuids.erl95
-rw-r--r--1.1.x/src/couchdb/couch_view.erl460
-rw-r--r--1.1.x/src/couchdb/couch_view_compactor.erl102
-rw-r--r--1.1.x/src/couchdb/couch_view_group.erl642
-rw-r--r--1.1.x/src/couchdb/couch_view_updater.erl265
-rw-r--r--1.1.x/src/couchdb/couch_work_queue.erl155
-rw-r--r--1.1.x/src/couchdb/priv/Makefile.am93
-rw-r--r--1.1.x/src/couchdb/priv/couch_js/http.c675
-rw-r--r--1.1.x/src/couchdb/priv/couch_js/http.h18
-rw-r--r--1.1.x/src/couchdb/priv/couch_js/main.c338
-rw-r--r--1.1.x/src/couchdb/priv/couch_js/utf8.c286
-rw-r--r--1.1.x/src/couchdb/priv/couch_js/utf8.h19
-rw-r--r--1.1.x/src/couchdb/priv/icu_driver/couch_icu_driver.c177
-rw-r--r--1.1.x/src/couchdb/priv/spawnkillable/couchspawnkillable.sh20
-rw-r--r--1.1.x/src/couchdb/priv/spawnkillable/couchspawnkillable_win.c145
-rw-r--r--1.1.x/src/couchdb/priv/stat_descriptions.cfg.in50
-rw-r--r--1.1.x/src/erlang-oauth/Makefile.am50
-rw-r--r--1.1.x/src/erlang-oauth/oauth.app.in20
-rw-r--r--1.1.x/src/erlang-oauth/oauth.erl107
-rw-r--r--1.1.x/src/erlang-oauth/oauth_hmac_sha1.erl11
-rw-r--r--1.1.x/src/erlang-oauth/oauth_http.erl22
-rw-r--r--1.1.x/src/erlang-oauth/oauth_plaintext.erl10
-rw-r--r--1.1.x/src/erlang-oauth/oauth_rsa_sha1.erl30
-rw-r--r--1.1.x/src/erlang-oauth/oauth_unix.erl16
-rw-r--r--1.1.x/src/erlang-oauth/oauth_uri.erl88
-rw-r--r--1.1.x/src/etap/Makefile.am44
-rw-r--r--1.1.x/src/etap/etap.erl416
-rw-r--r--1.1.x/src/etap/etap_application.erl72
-rw-r--r--1.1.x/src/etap/etap_can.erl79
-rw-r--r--1.1.x/src/etap/etap_exception.erl66
-rw-r--r--1.1.x/src/etap/etap_process.erl42
-rw-r--r--1.1.x/src/etap/etap_report.erl343
-rw-r--r--1.1.x/src/etap/etap_request.erl89
-rw-r--r--1.1.x/src/etap/etap_string.erl47
-rw-r--r--1.1.x/src/etap/etap_web.erl65
-rw-r--r--1.1.x/src/ibrowse/Makefile.am49
-rw-r--r--1.1.x/src/ibrowse/ibrowse.app.in13
-rw-r--r--1.1.x/src/ibrowse/ibrowse.erl863
-rw-r--r--1.1.x/src/ibrowse/ibrowse.hrl21
-rw-r--r--1.1.x/src/ibrowse/ibrowse_app.erl63
-rw-r--r--1.1.x/src/ibrowse/ibrowse_http_client.erl1855
-rw-r--r--1.1.x/src/ibrowse/ibrowse_lb.erl235
-rw-r--r--1.1.x/src/ibrowse/ibrowse_lib.erl391
-rw-r--r--1.1.x/src/ibrowse/ibrowse_sup.erl63
-rw-r--r--1.1.x/src/ibrowse/ibrowse_test.erl513
-rw-r--r--1.1.x/src/mochiweb/Makefile.am102
-rw-r--r--1.1.x/src/mochiweb/internal.hrl3
-rw-r--r--1.1.x/src/mochiweb/mochifmt.erl425
-rw-r--r--1.1.x/src/mochiweb/mochifmt_records.erl38
-rw-r--r--1.1.x/src/mochiweb/mochifmt_std.erl30
-rw-r--r--1.1.x/src/mochiweb/mochiglobal.erl107
-rw-r--r--1.1.x/src/mochiweb/mochihex.erl91
-rw-r--r--1.1.x/src/mochiweb/mochijson.erl531
-rw-r--r--1.1.x/src/mochiweb/mochijson2.erl802
-rw-r--r--1.1.x/src/mochiweb/mochilists.erl104
-rw-r--r--1.1.x/src/mochiweb/mochilogfile2.erl140
-rw-r--r--1.1.x/src/mochiweb/mochinum.erl331
-rw-r--r--1.1.x/src/mochiweb/mochitemp.erl310
-rw-r--r--1.1.x/src/mochiweb/mochiutf8.erl316
-rw-r--r--1.1.x/src/mochiweb/mochiweb.app.in32
-rw-r--r--1.1.x/src/mochiweb/mochiweb.app.src9
-rw-r--r--1.1.x/src/mochiweb/mochiweb.erl289
-rw-r--r--1.1.x/src/mochiweb/mochiweb_acceptor.erl48
-rw-r--r--1.1.x/src/mochiweb/mochiweb_app.erl27
-rw-r--r--1.1.x/src/mochiweb/mochiweb_charref.erl308
-rw-r--r--1.1.x/src/mochiweb/mochiweb_cookies.erl309
-rw-r--r--1.1.x/src/mochiweb/mochiweb_cover.erl75
-rw-r--r--1.1.x/src/mochiweb/mochiweb_echo.erl38
-rw-r--r--1.1.x/src/mochiweb/mochiweb_headers.erl299
-rw-r--r--1.1.x/src/mochiweb/mochiweb_html.erl1061
-rw-r--r--1.1.x/src/mochiweb/mochiweb_http.erl273
-rw-r--r--1.1.x/src/mochiweb/mochiweb_io.erl46
-rw-r--r--1.1.x/src/mochiweb/mochiweb_mime.erl94
-rw-r--r--1.1.x/src/mochiweb/mochiweb_multipart.erl824
-rw-r--r--1.1.x/src/mochiweb/mochiweb_request.erl768
-rw-r--r--1.1.x/src/mochiweb/mochiweb_response.erl64
-rw-r--r--1.1.x/src/mochiweb/mochiweb_skel.erl86
-rw-r--r--1.1.x/src/mochiweb/mochiweb_socket.erl84
-rw-r--r--1.1.x/src/mochiweb/mochiweb_socket_server.erl272
-rw-r--r--1.1.x/src/mochiweb/mochiweb_sup.erl41
-rw-r--r--1.1.x/src/mochiweb/mochiweb_util.erl973
-rw-r--r--1.1.x/src/mochiweb/reloader.erl161
-rw-r--r--1.1.x/test/Makefile.am14
-rw-r--r--1.1.x/test/bench/Makefile.am22
-rw-r--r--1.1.x/test/bench/bench_marks.js103
-rwxr-xr-x1.1.x/test/bench/benchbulk.sh69
-rwxr-xr-x1.1.x/test/bench/run.tpl28
-rwxr-xr-x1.1.x/test/etap/001-load.t68
-rw-r--r--1.1.x/test/etap/002-icu-driver.t33
-rwxr-xr-x1.1.x/test/etap/010-file-basics.t108
-rwxr-xr-x1.1.x/test/etap/011-file-headers.t145
-rwxr-xr-x1.1.x/test/etap/020-btree-basics.t205
-rwxr-xr-x1.1.x/test/etap/021-btree-reductions.t141
-rwxr-xr-x1.1.x/test/etap/030-doc-from-json.t236
-rwxr-xr-x1.1.x/test/etap/031-doc-to-json.t197
-rwxr-xr-x1.1.x/test/etap/040-util.t80
-rw-r--r--1.1.x/test/etap/041-uuid-gen-seq.ini19
-rw-r--r--1.1.x/test/etap/041-uuid-gen-utc.ini19
-rwxr-xr-x1.1.x/test/etap/041-uuid-gen.t118
-rwxr-xr-x1.1.x/test/etap/050-stream.t87
-rwxr-xr-x1.1.x/test/etap/060-kt-merging.t115
-rwxr-xr-x1.1.x/test/etap/061-kt-missing-leaves.t65
-rwxr-xr-x1.1.x/test/etap/062-kt-remove-leaves.t69
-rwxr-xr-x1.1.x/test/etap/063-kt-get-leaves.t98
-rwxr-xr-x1.1.x/test/etap/064-kt-counting.t46
-rwxr-xr-x1.1.x/test/etap/065-kt-stemming.t42
-rwxr-xr-x1.1.x/test/etap/070-couch-db.t73
-rwxr-xr-x1.1.x/test/etap/080-config-get-set.t128
-rw-r--r--1.1.x/test/etap/081-config-override.1.ini22
-rw-r--r--1.1.x/test/etap/081-config-override.2.ini22
-rwxr-xr-x1.1.x/test/etap/081-config-override.t212
-rwxr-xr-x1.1.x/test/etap/082-config-register.t94
-rwxr-xr-x1.1.x/test/etap/083-config-no-files.t55
-rwxr-xr-x1.1.x/test/etap/090-task-status.t209
-rwxr-xr-x1.1.x/test/etap/100-ref-counter.t114
-rwxr-xr-x1.1.x/test/etap/110-replication-httpc.t132
-rwxr-xr-x1.1.x/test/etap/111-replication-changes-feed.t253
-rwxr-xr-x1.1.x/test/etap/112-replication-missing-revs.t207
-rwxr-xr-x1.1.x/test/etap/113-replication-attachment-comp.t314
-rwxr-xr-x1.1.x/test/etap/120-stats-collect.t150
-rw-r--r--1.1.x/test/etap/121-stats-aggregates.cfg19
-rw-r--r--1.1.x/test/etap/121-stats-aggregates.ini20
-rwxr-xr-x1.1.x/test/etap/121-stats-aggregates.t171
-rwxr-xr-x1.1.x/test/etap/130-attachments-md5.t248
-rwxr-xr-x1.1.x/test/etap/140-attachment-comp.t762
-rwxr-xr-x1.1.x/test/etap/150-invalid-view-seq.t190
-rwxr-xr-x1.1.x/test/etap/160-vhosts.t291
-rwxr-xr-x1.1.x/test/etap/170-os-daemons.es26
-rwxr-xr-x1.1.x/test/etap/170-os-daemons.t114
-rwxr-xr-x1.1.x/test/etap/171-os-daemons-config.es83
-rwxr-xr-x1.1.x/test/etap/171-os-daemons-config.t74
-rw-r--r--1.1.x/test/etap/172-os-daemon-errors.1.es22
-rwxr-xr-x1.1.x/test/etap/172-os-daemon-errors.2.es16
-rwxr-xr-x1.1.x/test/etap/172-os-daemon-errors.3.es17
-rwxr-xr-x1.1.x/test/etap/172-os-daemon-errors.4.es17
-rwxr-xr-x1.1.x/test/etap/172-os-daemon-errors.t126
-rwxr-xr-x1.1.x/test/etap/173-os-daemon-cfg-register.t93
-rw-r--r--1.1.x/test/etap/180-http-proxy.ini20
-rwxr-xr-x1.1.x/test/etap/180-http-proxy.t378
-rwxr-xr-x1.1.x/test/etap/200-view-group-no-db-leaks.t262
-rw-r--r--1.1.x/test/etap/Makefile.am88
-rw-r--r--1.1.x/test/etap/random_port.ini19
-rw-r--r--1.1.x/test/etap/run.tpl27
-rw-r--r--1.1.x/test/etap/test_cfg_register.c30
-rw-r--r--1.1.x/test/etap/test_util.erl.in42
-rw-r--r--1.1.x/test/etap/test_web.erl99
-rw-r--r--1.1.x/test/javascript/Makefile.am25
-rw-r--r--1.1.x/test/javascript/cli_runner.js52
-rw-r--r--1.1.x/test/javascript/couch_http.js62
-rw-r--r--1.1.x/test/javascript/run.tpl30
-rw-r--r--1.1.x/test/view_server/Makefile.am15
-rw-r--r--1.1.x/test/view_server/query_server_spec.rb824
-rwxr-xr-x1.1.x/test/view_server/run_native_process.es59
-rw-r--r--1.1.x/utils/Makefile.am42
-rw-r--r--1.1.x/var/Makefile.am23
421 files changed, 85032 insertions, 0 deletions
diff --git a/1.1.x/.gitignore b/1.1.x/.gitignore
new file mode 100644
index 00000000..33028e7e
--- /dev/null
+++ b/1.1.x/.gitignore
@@ -0,0 +1,100 @@
+*.beam
+*.gz
+*.tpl
+*.o
+*.lo
+*.la
+*.m4
+*.in
+*~
+*.orig
+*.rej
+erl_crash.dump
+configure
+autom4te.cache
+build-aux
+*.diff
+
+# ./configure
+
+Makefile
+bin/Makefile
+config.h
+config.log
+config.status
+etc/Makefile
+etc/couchdb/Makefile
+etc/default/Makefile
+etc/init/Makefile
+etc/launchd/Makefile
+etc/logrotate.d/Makefile
+libtool
+share/Makefile
+src/couchdb/.deps/*
+src/couchdb/Makefile
+src/couchdb/priv/Makefile
+src/mochiweb/Makefile
+stamp-h1
+test/.deps/
+test/Makefile
+test/javascript/run_js_tests.sh
+var/Makefile
+
+# for make
+
+bin/couchdb
+bin/couchdb.1
+bin/couchjs
+bin/couchjs.1
+etc/couchdb/default.ini
+etc/launchd/org.apache.couchdb.plist
+etc/logrotate.d/couchdb
+src/couchdb/.libs/*
+src/couchdb/couch.app
+src/couchdb/couchjs
+src/couchdb/edoc-info
+src/couchdb/erlang.png
+src/couchdb/stylesheet.css
+src/couchdb/priv/.deps/
+src/couchdb/priv/.libs/
+src/couchdb/priv/couch_icu_driver.la
+src/couchdb/priv/couchjs
+src/couchdb/priv/couchspawnkillable
+src/couchdb/priv/stat_descriptions.cfg
+src/erlang-oauth/oauth.app
+src/ibrowse/ibrowse.app
+src/mochiweb/mochiweb.app
+test/local.ini
+test/etap/.deps/
+test/etap/run
+test/etap/test_cfg_register
+test/etap/test_util.erl
+test/javascript/run
+share/server/main.js
+
+# for make dev
+
+bin/.deps/
+bin/couchjs_dev
+bin/couchpw
+etc/couchdb/default_dev.ini
+etc/couchdb/local_dev.ini
+utils/run
+tmp
+src/couchdb/priv/stat_descriptions.cfg
+src/erlang-oauth/oauth.app
+src/ibrowse/ibrowse.app
+src/mochiweb/mochiweb.app
+test/etap/run
+
+# for make check
+
+test/etap/temp.*
+test/bench/run
+couchdb.stderr
+couchdb.stdout
+
+# for make cover
+
+cover/*
+INSTALL
diff --git a/1.1.x/AUTHORS b/1.1.x/AUTHORS
new file mode 100644
index 00000000..e0181c1d
--- /dev/null
+++ b/1.1.x/AUTHORS
@@ -0,0 +1,20 @@
+Apache CouchDB AUTHORS
+======================
+
+A number of people have contributed directly to Apache CouchDB by writing
+documentation or developing software. Some of these people are:
+
+ * Damien Katz <damien@apache.org>
+ * Jan Lehnardt <jan@apache.org>
+ * Noah Slater <nslater@apache.org>
+ * Christopher Lenz <cmlenz@apache.org>
+ * J. Chris Anderson <jchris@apache.org>
+ * Paul Joseph Davis <davisp@apache.org>
+ * Adam Kocoloski <kocolosk@apache.org>
+ * Jason Davies <jasondavies@apache.org>
+ * Mark Hammond <mhammond@skippinet.com.au>
+ * Benoît Chesneau <benoitc@apache.org>
+ * Filipe Manana <fdmanana@apache.org>
+ * Robert Newson <rnewson@apache.org>
+
+For a list of other credits see the `THANKS` file.
diff --git a/1.1.x/BUGS b/1.1.x/BUGS
new file mode 100644
index 00000000..8cd1d161
--- /dev/null
+++ b/1.1.x/BUGS
@@ -0,0 +1,6 @@
+Apache CouchDB BUGS
+===================
+
+Please see the [documentation][1] on how to report bugs with Apache CouchDB.
+
+[1] http://couchdb.apache.org/community/issues.html
diff --git a/1.1.x/CHANGES b/1.1.x/CHANGES
new file mode 100644
index 00000000..a32797f5
--- /dev/null
+++ b/1.1.x/CHANGES
@@ -0,0 +1,632 @@
+Apache CouchDB CHANGES
+======================
+
+Version 1.1.0
+-------------
+
+All CHANGES for 1.0.2 and 1.0.3 also apply to 1.1.0.
+
+HTTP Interface:
+
+ * Native SSL support.
+ * Added support for HTTP range requests for attachments.
+ * Added built-in filters for `_changes`: `_doc_ids` and `_design`.
+ * Added configuration option for TCP_NODELAY aka "Nagle".
+ * Allow POSTing arguments to `_changes`.
+ * Allow `keys` parameter for GET requests to views.
+ * Allow wildcards in vhosts definitions.
+ * More granular ETag support for views.
+ * More flexible URL rewriter.
+ * Added support for recognizing "Q values" and media parameters in
+ HTTP Accept headers.
+ * Validate doc ids that come from a PUT to a URL.
+
+Externals:
+
+ * Added OS Process module to manage daemons outside of CouchDB.
+ * Added HTTP Proxy handler for more scalable externals.
+
+Replicator:
+
+ * Added `_replicator` database to manage replications.
+ * Fixed issues when an endpoint is a remote database accessible via SSL.
+ * Added support for continuous by-doc-IDs replication.
+
+Storage System:
+
+ * Multiple micro-optimizations when reading data.
+
+View Server:
+
+ * Added CommonJS support to map functions.
+ * Added `stale=update_after` query option that triggers a view update after
+ returning a `stale=ok` response.
+ * Warn about empty result caused by `startkey` and `endkey` limiting.
+ * Built-in reduce function `_sum` now accepts lists of integers as input.
+ * Added view query aliases start_key, end_key, start_key_doc_id and
+ end_key_doc_id.
+
+Futon:
+
+ * Added a "change password"-feature to Futon.
+
+
+Version 1.0.1
+-------------
+
+Storage System:
+
+ * Fix data corruption bug COUCHDB-844. Please see
+ http://couchdb.apache.org/notice/1.0.1.html for details.
+
+Replicator:
+
+ * Added support for replication via an HTTP/HTTPS proxy.
+ * Fix pull replication of attachments from 0.11 to 1.0.x.
+ * Make the _changes feed work with non-integer seqnums.
+
+HTTP Interface:
+
+ * Expose `committed_update_seq` for monitoring purposes.
+ * Show fields saved along with _deleted=true. Allows for auditing of deletes.
+ * More robust Accept-header detection.
+
+Authentication:
+
+ * Enable basic-auth popup when required to access the server, to prevent
+ people from getting locked out.
+
+Futon:
+
+ * User interface element for querying stale (cached) views.
+
+Build and System Integration:
+
+ * Included additional source files for distribution.
+
+Version 1.0.0
+-------------
+
+Security:
+
+ * Added authentication caching, to avoid repeated opening and closing of the
+ users database for each request requiring authentication.
+
+Storage System:
+
+ * Small optimization for reordering result lists.
+ * More efficient header commits.
+ * Use O_APPEND to save lseeks.
+ * Faster implementation of pread_iolist(). Further improves performance on
+ concurrent reads.
+
+View Server:
+
+ * Faster default view collation.
+ * Added option to include update_seq in view responses.
+
+Version 0.11.2
+--------------
+
+Replicator:
+
+ * Fix bug when pushing design docs by non-admins, which was hanging the
+ replicator for no good reason.
+ * Fix bug when pulling design documents from a source that requires
+ basic-auth.
+
+HTTP Interface:
+
+ * Better error messages on invalid URL requests.
+
+Authentication:
+
+ * User documents can now be deleted by admins or the user.
+
+Security:
+
+ * Avoid potential DOS attack by guarding all creation of atoms.
+
+Futon:
+
+ * Add some Futon files that were missing from the Makefile.
+
+Version 0.11.1
+--------------
+
+HTTP Interface:
+
+ * Mask passwords in active tasks and logging.
+ * Update mochijson2 to allow output of BigNums not in float form.
+ * Added support for X-HTTP-METHOD-OVERRIDE.
+ * Better error message for database names.
+ * Disable jsonp by default.
+ * Accept gzip encoded standalone attachments.
+ * Made max_concurrent_connections configurable.
+ * Made changes API more robust.
+ * Send newly generated document rev to callers of an update function.
+
+Futon:
+
+ * Use "expando links" for over-long document values in Futon.
+ * Added continuous replication option.
+ * Added option to replicating test results anonymously to a community
+ CouchDB instance.
+ * Allow creation and deletion of config entries.
+ * Fixed display issues with doc ids that have escaped characters.
+ * Fixed various UI issues.
+
+Build and System Integration:
+
+ * Output of `couchdb --help` has been improved.
+ * Fixed compatibility with the Erlang R14 series.
+ * Fixed warnings on Linux builds.
+ * Fixed build error when aclocal needs to be called during the build.
+ * Require ICU 4.3.1.
+ * Fixed compatibility with Solaris.
+
+Security:
+
+ * Added authentication redirect URL to log in clients.
+ * Fixed query parameter encoding issue in oauth.js.
+ * Made authentication timeout configurable.
+ * Temporary views are now admin-only resources.
+
+Storage System:
+
+ * Don't require a revpos for attachment stubs.
+ * Added checking to ensure when a revpos is sent with an attachment stub,
+ it's correct.
+ * Make file deletions async to avoid pauses during compaction and db
+ deletion.
+ * Fixed for wrong offset when writing headers and converting them to blocks,
+ only triggered when header is larger than 4k.
+ * Preserve _revs_limit and instance_start_time after compaction.
+
+Configuration System:
+
+ * Fixed timeout with large .ini files.
+
+JavaScript Clients:
+
+ * Added tests for couch.js and jquery.couch.js
+ * Added changes handler to jquery.couch.js.
+ * Added cache busting to jquery.couch.js if the user agent is msie.
+ * Added support for multi-document-fetch (via _all_docs) to jquery.couch.js.
+ * Added attachment versioning to jquery.couch.js.
+ * Added option to control ensure_full_commit to jquery.couch.js.
+ * Added list functionality to jquery.couch.js.
+ * Fixed issues where bulkSave() wasn't sending a POST body.
+
+View Server:
+
+ * Provide a UUID to update functions (and all other functions) that they can
+ use to create new docs.
+ * Upgrade CommonJS modules support to 1.1.1.
+ * Fixed erlang filter funs and normalize filter fun API.
+ * Fixed hang in view shutdown.
+
+Log System:
+
+ * Log HEAD requests as HEAD, not GET.
+ * Keep massive JSON blobs out of the error log.
+ * Fixed a timeout issue.
+
+Replication System:
+
+ * Refactored various internal APIs related to attachment streaming.
+ * Fixed hanging replication.
+ * Fixed keepalive issue.
+
+URL Rewriter & Vhosts:
+
+ * Allow more complex keys in rewriter.
+ * Allow global rewrites so system defaults are available in vhosts.
+ * Allow isolation of databases with vhosts.
+ * Fix issue with passing variables to query parameters.
+
+Test Suite:
+
+ * Made the test suite overall more reliable.
+
+Version 0.11.0
+--------------
+
+Security:
+
+ * Fixed CVE-2010-0009: Apache CouchDB Timing Attack Vulnerability.
+ * Added default cookie-authentication and users database.
+ * Added Futon user interface for user signup and login.
+ * Added per-database reader access control lists.
+ * Added per-database security object for configuration data in validation
+ functions.
+ * Added proxy authentication handler
+
+HTTP Interface:
+
+ * Provide Content-MD5 header support for attachments.
+ * Added URL Rewriter handler.
+ * Added virtual host handling.
+
+View Server:
+
+ * Added optional 'raw' binary collation for faster view builds where Unicode
+ collation is not important.
+ * Improved view index build time by reducing ICU collation callouts.
+ * Improved view information objects.
+ * Bug fix for partial updates during view builds.
+ * Move query server to a design-doc based protocol.
+ * Use json2.js for JSON serialization for compatiblity with native JSON.
+ * Major refactoring of couchjs to lay the groundwork for disabling cURL
+ support. The new HTTP interaction acts like a synchronous XHR. Example usage
+ of the new system is in the JavaScript CLI test runner.
+
+Replication:
+
+ * Added option to implicitly create replication target databases.
+ * Avoid leaking file descriptors on automatic replication restarts.
+ * Added option to replicate a list of documents by id.
+ * Allow continuous replication to be cancelled.
+
+Storage System:
+
+ * Adds batching of multiple updating requests, to improve throughput with many
+ writers. Removed the now redundant couch_batch_save module.
+ * Adds configurable compression of attachments.
+
+Runtime Statistics:
+
+ * Statistics are now calculated for a moving window instead of non-overlapping
+ timeframes.
+ * Fixed a problem with statistics timers and system sleep.
+ * Moved statistic names to a term file in the priv directory.
+
+Futon:
+
+ * Added a button for view compaction.
+ * JSON strings are now displayed as-is in the document view, without the escaping of
+ new-lines and quotes. That dramatically improves readability of multi-line
+ strings.
+ * Same goes for editing of JSON string values. When a change to a field value is
+ submitted, and the value is not valid JSON it is assumed to be a string. This
+ improves editing of multi-line strings a lot.
+ * Hitting tab in textareas no longer moves focus to the next form field, but simply
+ inserts a tab character at the current caret position.
+ * Fixed some font declarations.
+
+Build and System Integration:
+
+ * Updated and improved source documentation.
+ * Fixed distribution preparation for building on Mac OS X.
+ * Added support for building a Windows installer as part of 'make dist'.
+ * Bug fix for building couch.app's module list.
+ * ETap tests are now run during make distcheck. This included a number of
+ updates to the build system to properly support VPATH builds.
+ * Gavin McDonald setup a build-bot instance. More info can be found at
+ http://ci.apache.org/buildbot.html
+
+Version 0.10.1
+--------------
+
+Replicator:
+
+ * Stability enhancements regarding redirects, timeouts, OAuth.
+
+Query Server:
+
+ * Avoid process leaks
+ * Allow list and view to span languages
+
+Stats:
+
+ * Eliminate new process flood on system wake
+
+Build and System Integration:
+
+ * Test suite now works with the distcheck target.
+
+Version 0.10.0
+--------------
+
+Storage Format:
+
+ * Add move headers with checksums to the end of database files for extra robust
+ storage and faster storage.
+
+View Server:
+
+ * Added native Erlang views for high-performance applications.
+
+HTTP Interface:
+
+ * Added optional cookie-based authentication handler.
+ * Added optional two-legged OAuth authentication handler.
+
+Build and System Integration:
+
+ * Changed `couchdb` script configuration options.
+ * Added default.d and local.d configuration directories to load sequence.
+
+
+Version 0.9.2
+-------------
+
+Replication:
+
+ * Fix replication with 0.10 servers initiated by an 0.9 server (COUCHDB-559).
+
+Build and System Integration:
+
+ * Remove branch callbacks to allow building couchjs against newer versions of
+ Spidermonkey.
+
+Version 0.9.1
+-------------
+
+Build and System Integration:
+
+ * PID file directory is now created by the SysV/BSD daemon scripts.
+ * Fixed the environment variables shown by the configure script.
+ * Fixed the build instructions shown by the configure script.
+ * Updated ownership and permission advice in `README` for better security.
+
+Configuration and stats system:
+
+ * Corrected missing configuration file error message.
+ * Fixed incorrect recording of request time.
+
+Database Core:
+
+ * Document validation for underscore prefixed variables.
+ * Made attachment storage less sparse.
+ * Fixed problems when a database with delayed commits pending is considered
+ idle, and subject to losing changes when shutdown. (COUCHDB-334)
+
+External Handlers:
+
+ * Fix POST requests.
+
+Futon:
+
+ * Redirect when loading a deleted view URI from the cookie.
+
+HTTP Interface:
+
+ * Attachment requests respect the "rev" query-string parameter.
+
+JavaScript View Server:
+
+ * Useful JavaScript Error messages.
+
+Replication:
+
+ * Added support for Unicode characters transmitted as UTF-16 surrogate pairs.
+ * URL-encode attachment names when necessary.
+ * Pull specific revisions of an attachment, instead of just the latest one.
+ * Work around a rare chunk-merging problem in ibrowse.
+ * Work with documents containing Unicode characters outside the Basic
+ Multilingual Plane.
+
+Version 0.9.0
+-------------
+
+Futon Utility Client:
+
+ * Added pagination to the database listing page.
+ * Implemented attachment uploading from the document page.
+ * Added page that shows the current configuration, and allows modification of
+ option values.
+ * Added a JSON "source view" for document display.
+ * JSON data in view rows is now syntax highlighted.
+ * Removed the use of an iframe for better integration with browser history and
+ bookmarking.
+ * Full database listing in the sidebar has been replaced by a short list of
+ recent databases.
+ * The view editor now allows selection of the view language if there is more
+ than one configured.
+ * Added links to go to the raw view or document URI.
+ * Added status page to display currently running tasks in CouchDB.
+ * JavaScript test suite split into multiple files.
+ * Pagination for reduce views.
+
+Design Document Resource Paths:
+
+ * Added httpd_design_handlers config section.
+ * Moved _view to httpd_design_handlers.
+ * Added ability to render documents as non-JSON content-types with _show and
+ _list functions, which are also httpd_design_handlers.
+
+HTTP Interface:
+
+ * Added client side UUIDs for idempotent document creation
+ * HTTP COPY for documents
+ * Streaming of chunked attachment PUTs to disk
+ * Remove negative count feature
+ * Add include_docs option for view queries
+ * Add multi-key view post for views
+ * Query parameter validation
+ * Use stale=ok to request potentially cached view index
+ * External query handler module for full-text or other indexers.
+ * Etags for attachments, views, shows and lists
+ * Show and list functions for rendering documents and views as developer
+ controlled content-types.
+ * Attachment names may use slashes to allow uploading of nested directories
+ (useful for static web hosting).
+ * Option for a view to run over design documents.
+ * Added newline to JSON responses. Closes bike-shed.
+
+Replication:
+
+ * Using ibrowse.
+ * Checkpoint replications so failures are less expensive.
+ * Automatically retry of failed replications.
+ * Stream attachments in pull-replication.
+
+Database Core:
+
+ * Faster B-tree implementation.
+ * Changed internal JSON term format.
+ * Improvements to Erlang VM interactions under heavy load.
+ * User context and administrator role.
+ * Update validations with design document validation functions.
+ * Document purge functionality.
+ * Ref-counting for database file handles.
+
+Build and System Integration:
+
+ * The `couchdb` script now supports system chainable configuration files.
+ * The Mac OS X daemon script now redirects STDOUT and STDERR like SysV/BSD.
+ * The build and system integration have been improved for portability.
+ * Added COUCHDB_OPTIONS to etc/default/couchdb file.
+ * Remove COUCHDB_INI_FILE and COUCHDB_PID_FILE from etc/default/couchdb file.
+ * Updated `configure.ac` to manually link `libm` for portability.
+ * Updated `configure.ac` to extended default library paths.
+ * Removed inets configuration files.
+ * Added command line test runner.
+ * Created dev target for make.
+
+Configuration and stats system:
+
+ * Separate default and local configuration files.
+ * HTTP interface for configuration changes.
+ * Statistics framework with HTTP query API.
+
+Version 0.8.1-incubating
+------------------------
+
+Database Core:
+
+ * Fix for replication problems where the write queues can get backed up if the
+ writes aren't happening fast enough to keep up with the reads. For a large
+ replication, this can exhaust memory and crash, or slow down the machine
+ dramatically. The fix keeps only one document in the write queue at a time.
+ * Fix for databases sometimes incorrectly reporting that they contain 0
+ documents after compaction.
+ * CouchDB now uses ibrowse instead of inets for its internal HTTP client
+ implementation. This means better replication stability.
+
+HTTP Interface:
+
+ * Fix for chunked responses where chunks were always being split into multiple
+ TCP packets, which caused problems with the test suite under Safari, and in
+ some other cases.
+ * Fix for an invalid JSON response body being returned for some kinds of
+ views. (COUCHDB-84)
+ * Fix for connections not getting closed after rejecting a chunked request.
+ (COUCHDB-55)
+ * CouchDB can now be bound to IPv6 addresses.
+ * The HTTP `Server` header now contains the versions of CouchDB and Erlang.
+
+JavaScript View Server:
+
+ * Sealing of documents has been disabled due to an incompatibility with
+ SpiderMonkey 1.9.
+ * Improve error handling for undefined values emitted by map functions.
+ (COUCHDB-83)
+
+Build and System Integration:
+
+ * The `couchdb` script no longer uses `awk` for configuration checks as this
+ was causing portability problems.
+ * Updated `sudo` example in `README` to use the `-i` option, this fixes
+ problems when invoking from a directory the `couchdb` user cannot access.
+
+Futon:
+
+ * The view selector dropdown should now work in Opera and Internet Explorer
+ even when it includes optgroups for design documents. (COUCHDB-81)
+
+Version 0.8.0-incubating
+------------------------
+
+Database Core:
+
+ * The view engine has been completely decoupled from the storage engine. Index
+ data is now stored in separate files, and the format of the main database
+ file has changed.
+ * Databases can now be compacted to reclaim space used for deleted documents
+ and old document revisions.
+ * Support for incremental map/reduce views has been added.
+ * To support map/reduce, the structure of design documents has changed. View
+ values are now JSON objects containing at least a `map` member, and
+ optionally a `reduce` member.
+ * View servers are now identified by name (for example `javascript`) instead of
+ by media type.
+ * Automatically generated document IDs are now based on proper UUID generation
+ using the crypto module.
+ * The field `content-type` in the JSON representation of attachments has been
+ renamed to `content_type` (underscore).
+
+HTTP Interface:
+
+ * CouchDB now uses MochiWeb instead of inets for the HTTP server
+ implementation. Among other things, this means that the extra configuration
+ files needed for inets (such as `couch_httpd.conf`) are no longer used.
+ * The HTTP interface now completely supports the `HEAD` method. (COUCHDB-3)
+ * Improved compliance of `Etag` handling with the HTTP specification.
+ (COUCHDB-13)
+ * Etags are no longer included in responses to document `GET` requests that
+ include query string parameters causing the JSON response to change without
+ the revision or the URI having changed.
+ * The bulk document update API has changed slightly on both the request and the
+ response side. In addition, bulk updates are now atomic.
+ * CouchDB now uses `TCP_NODELAY` to fix performance problems with persistent
+ connections on some platforms due to nagling.
+ * Including a `?descending=false` query string parameter in requests to views
+ no longer raises an error.
+ * Requests to unknown top-level reserved URLs (anything with a leading
+ underscore) now return a `unknown_private_path` error instead of the
+ confusing `illegal_database_name`.
+ * The Temporary view handling now expects a JSON request body, where the JSON
+ is an object with at least a `map` member, and optional `reduce` and
+ `language` members.
+ * Temporary views no longer determine the view server based on the Content-Type
+ header of the `POST` request, but rather by looking for a `language` member
+ in the JSON body of the request.
+ * The status code of responses to `DELETE` requests is now 200 to reflect that
+ that the deletion is performed synchronously.
+
+JavaScript View Server:
+
+ * SpiderMonkey is no longer included with CouchDB, but rather treated as a
+ normal external dependency. A simple C program (`_couchjs`) is provided that
+ links against an existing SpiderMonkey installation and uses the interpreter
+ embedding API.
+ * View functions using the default JavaScript view server can now do logging
+ using the global `log(message)` function. Log messages are directed into the
+ CouchDB log at `INFO` level. (COUCHDB-59)
+ * The global `map(key, value)` function made available to view code has been
+ renamed to `emit(key, value)`.
+ * Fixed handling of exceptions raised by view functions.
+
+Build and System Integration:
+
+ * CouchDB can automatically respawn following a server crash.
+ * Database server no longer refuses to start with a stale PID file.
+ * System logrotate configuration provided.
+ * Improved handling of ICU shared libraries.
+ * The `couchdb` script now automatically enables SMP support in Erlang.
+ * The `couchdb` and `couchjs` scripts have been improved for portability.
+ * The build and system integration have been improved for portability.
+
+Futon:
+
+ * When adding a field to a document, Futon now just adds a field with an
+ autogenerated name instead of prompting for the name with a dialog. The name
+ is automatically put into edit mode so that it can be changed immediately.
+ * Fields are now sorted alphabetically by name when a document is displayed.
+ * Futon can be used to create and update permanent views.
+ * The maximum number of rows to display per page on the database page can now
+ be adjusted.
+ * Futon now uses the XMLHTTPRequest API asynchronously to communicate with the
+ CouchDB HTTP server, so that most operations no longer block the browser.
+ * View results sorting can now be switched between ascending and descending by
+ clicking on the `Key` column header.
+ * Fixed a bug where documents that contained a `@` character could not be
+ viewed. (COUCHDB-12)
+ * The database page now provides a `Compact` button to trigger database
+ compaction. (COUCHDB-38)
+ * Fixed portential double encoding of document IDs and other URI segments in
+ many instances. (COUCHDB-39)
+ * Improved display of attachments.
+ * The JavaScript Shell has been removed due to unresolved licensing issues.
diff --git a/1.1.x/DEVELOPERS b/1.1.x/DEVELOPERS
new file mode 100644
index 00000000..a7a6926e
--- /dev/null
+++ b/1.1.x/DEVELOPERS
@@ -0,0 +1,95 @@
+Apache CouchDB DEVELOPERS
+=========================
+
+Only follow these instructions if you are building from a source checkout.
+
+If you're unsure what this means, ignore this document.
+
+Dependencies
+------------
+
+You will need the following installed:
+
+ * GNU Automake (>=1.6.3) (http://www.gnu.org/software/automake/)
+ * GNU Autoconf (>=2.59) (http://www.gnu.org/software/autoconf/)
+ * GNU Libtool (http://www.gnu.org/software/libtool/)
+ * GNU help2man (http://www.gnu.org/software/help2man/)
+
+The `help2man` tool is optional, but will generate `man` pages for you.
+
+Debian-based (inc. Ubuntu) Systems
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can install the dependencies by running:
+
+ apt-get install automake autoconf libtool help2man
+
+Be sure to update the version numbers to match your system's available packages.
+
+Mac OS X
+~~~~~~~~
+
+You can install the dependencies by running:
+
+ port install automake autoconf libtool help2man
+
+You will need MacPorts installed to use the `port` command.
+
+Bootstrapping
+-------------
+
+Bootstrap the pristine source by running:
+
+ ./bootstrap
+
+You must repeat this step every time you update your source checkout.
+
+Testing
+-------
+
+Check the test suite by running:
+
+ make check
+
+Generate a coverage report by running:
+
+ make cover
+
+Please report any problems to the developer's mailing list.
+
+Releasing
+---------
+
+Unix-like Systems
+~~~~~~~~~~~~~~~~~
+
+Configure the source by running:
+
+ ./configure
+
+Prepare the release artefacts by running:
+
+ make distcheck
+
+You can prepare signed release artefacts by running:
+
+ make distsign
+
+The release artefacts can be found in the root source directory.
+
+Microsoft Windows
+~~~~~~~~~~~~~~~~~
+
+Configure the source by running:
+
+ ./configure
+
+Prepare the release artefacts by running:
+
+ make dist
+
+The release artefacts can be found in the `etc/windows` directory.
+
+Until the build system has been improved, you must make sure that you run this
+command from a clean source checkout. If you do not, your test database and log
+files will be bundled up in the release artefact.
diff --git a/1.1.x/INSTALL.Unix b/1.1.x/INSTALL.Unix
new file mode 100644
index 00000000..768e3846
--- /dev/null
+++ b/1.1.x/INSTALL.Unix
@@ -0,0 +1,231 @@
+Apache CouchDB README.Unix
+==========================
+
+A high-level guide to Unix-like systems, inc. Mac OS X and Ubuntu.
+
+Dependencies
+------------
+
+You will need the following installed:
+
+ * Erlang OTP (>=R12B5) (http://erlang.org/)
+ * ICU (http://icu.sourceforge.net/)
+ * OpenSSL (http://www.openssl.org/)
+ * Mozilla SpiderMonkey (1.8) (http://www.mozilla.org/js/spidermonkey/)
+ * libcurl (http://curl.haxx.se/libcurl/)
+ * GNU Make (http://www.gnu.org/software/make/)
+ * GNU Compiler Collection (http://gcc.gnu.org/)
+
+It is recommended that you install Erlang OTP R12B-5 or above where possible.
+
+Ubuntu
+~~~~~~
+
+See
+
+ http://wiki.apache.org/couchdb/Installing_on_Ubuntu
+
+for updated instructions on how to install on Ubuntu.
+
+Debian-based Systems
+~~~~~~~~~~~~~~~~~~~~
+
+You can install the build tools by running:
+
+ sudo apt-get install build-essential
+
+You can install the other dependencies by running:
+
+ sudo apt-get install erlang libicu-dev libmozjs-dev libcurl4-openssl-dev
+
+Be sure to update the version numbers to match your system's available packages.
+
+Mac OS X
+~~~~~~~~
+
+You can install the build tools by running:
+
+ open /Applications/Installers/Xcode\ Tools/XcodeTools.mpkg
+
+You can install the other dependencies by running:
+
+ sudo port install icu erlang spidermonkey curl
+
+You will need MacPorts installed to use the `port` command.
+
+Installing
+----------
+
+Once you have satisfied the dependencies you should run:
+
+ ./configure
+
+This script will configure CouchDB to be installed into `/usr/local` by default.
+
+If you wish to customise the installation, pass `--help` to this script.
+
+If everything was successful you should see the following message:
+
+ You have configured Apache CouchDB, time to relax.
+
+Relax.
+
+To install CouchDB you should run:
+
+ make && sudo make install
+
+You only need to use `sudo` if you're installing into a system directory.
+
+Try `gmake` if `make` is giving you any problems.
+
+If everything was successful you should see the following message:
+
+ You have installed Apache CouchDB, time to relax.
+
+Relax.
+
+First Run
+---------
+
+You can start the CouchDB server by running:
+
+ sudo -i -u couchdb couchdb
+
+This uses the `sudo` command to run the `couchdb` command as the `couchdb` user.
+
+When CouchDB starts it should eventually display the following message:
+
+ Apache CouchDB has started, time to relax.
+
+Relax.
+
+To check that everything has worked, point your web browser to:
+
+ http://127.0.0.1:5984/_utils/index.html
+
+From here you should run the test suite.
+
+Security Considerations
+-----------------------
+
+You should create a special `couchdb` user for CouchDB.
+
+On many Unix-like systems you can run:
+
+ adduser --system \
+ --home /usr/local/var/lib/couchdb \
+ --no-create-home \
+ --shell /bin/bash \
+ --group --gecos \
+ "CouchDB Administrator" couchdb
+
+On Mac OS X you can use the Workgroup Manager to create users:
+
+ http://www.apple.com/support/downloads/serveradmintools1047.html
+
+You must make sure that:
+
+ * The user has a working POSIX shell
+
+ * The user's home directory is `/usr/local/var/lib/couchdb`
+
+You can test this by:
+
+ * Trying to log in as the `couchdb` user
+
+ * Running `pwd` and checking the present working directory
+
+Change the ownership of the CouchDB directories by running:
+
+ chown -R couchdb:couchdb /usr/local/etc/couchdb
+ chown -R couchdb:couchdb /usr/local/var/lib/couchdb
+ chown -R couchdb:couchdb /usr/local/var/log/couchdb
+ chown -R couchdb:couchdb /usr/local/var/run/couchdb
+
+Change the permission of the CouchDB directories by running:
+
+ chmod 0770 /usr/local/etc/couchdb
+ chmod 0770 /usr/local/var/lib/couchdb
+ chmod 0770 /usr/local/var/log/couchdb
+ chmod 0770 /usr/local/var/run/couchdb
+
+Running as a Daemon
+-------------------
+
+SysV/BSD-style Systems
+~~~~~~~~~~~~~~~~~~~~~~
+
+You can use the `couchdb` init script to control the CouchDB daemon.
+
+On SysV-style systems, the init script will be installed into:
+
+ /usr/local/etc/init.d
+
+On BSD-style systems, the init script will be installed into:
+
+ /usr/local/etc/rc.d
+
+We use the `[init.d|rc.d]` notation to refer to both of these directories.
+
+You can control the CouchDB daemon by running:
+
+ /usr/local/etc/[init.d|rc.d]/couchdb [start|stop|restart|status]
+
+If you wish to configure how the init script works, you can edit:
+
+ /usr/local/etc/default/couchdb
+
+Comment out the `COUCHDB_USER` setting if you're running as a non-superuser.
+
+To start the daemon on boot, copy the init script to:
+
+ /etc/[init.d|rc.d]
+
+You should then configure your system to run the init script automatically.
+
+You may be able to run:
+
+ sudo update-rc.d couchdb defaults
+
+If this fails, consult your system documentation for more information.
+
+A `logrotate` configuration is installed into:
+
+ /usr/local/etc/logrotate.d/couchdb
+
+Consult your `logrotate` documentation for more information.
+
+It is critical that the CouchDB logs are rotated so as not to fill your disk.
+
+Mac OS X
+~~~~~~~~
+
+You can use the `launchctl` command to control the CouchDB daemon.
+
+You can load the configuration by running:
+
+ sudo launchctl load \
+ /usr/local/Library/LaunchDaemons/org.apache.couchdb.plist
+
+You can stop the CouchDB daemon by running:
+
+ sudo launchctl unload \
+ /usr/local/Library/LaunchDaemons/org.apache.couchdb.plist
+
+You can start CouchDB by running:
+
+ sudo launchctl start org.apache.couchdb
+
+You can restart CouchDB by running:
+
+ sudo launchctl stop org.apache.couchdb
+
+You can edit the launchd configuration by running:
+
+ open /usr/local/Library/LaunchDaemons/org.apache.couchdb.plist
+
+To start the daemon on boot, copy the configuration file to:
+
+ /Library/LaunchDaemons
+
+Consult your system documentation for more information.
diff --git a/1.1.x/INSTALL.Windows b/1.1.x/INSTALL.Windows
new file mode 100644
index 00000000..d2082734
--- /dev/null
+++ b/1.1.x/INSTALL.Windows
@@ -0,0 +1,153 @@
+Apache CouchDB README.Windows
+==============================
+
+For a high-level guide to Microsoft Windows.
+
+Dependencies
+------------
+
+You will need the following installed:
+
+ * Erlang OTP (=14B01) (http://erlang.org/)
+ * ICU (http://icu.sourceforge.net/)
+ * OpenSSL (http://www.openssl.org/)
+ * Mozilla SpiderMonkey (1.8) (http://www.mozilla.org/js/spidermonkey/)
+ * libcurl (http://curl.haxx.se/libcurl/)
+ * Cygwin (http://www.cygwin.com/)
+ * Visual Studio 2008 (http://msdn.microsoft.com/en-gb/vstudio/default.aspx)
+
+General Notes
+-------------
+
+ * When installing Erlang, you must build it from source.
+
+ The CouchDB build makes use of a number of the Erlang build scripts.
+
+ * When installing ICU, select the binaries built with Visual Studio 2008.
+
+ * When installing Cygwin, be sure to select all the `development` tools.
+
+ * When installing libcurl, be sure to install by hand.
+
+ The Cygwin binaries are incompatible and will not work with Erlang.
+
+Setting Up Cygwin
+-----------------
+
+Before starting any Cygwin terminals, run:
+
+ set CYGWIN=nontsec
+
+To set up your environment, run:
+
+ [VS_BIN]/vcvars32.bat
+
+Replace [VS_BIN] with the path to your Visual Studio `bin` directory.
+
+You must check that:
+
+ * The `which link` command points to the Microsoft linker.
+
+ * The `which cl` command points to the Microsoft compiler.
+
+ * The `which mc` command points to the Microsoft message compiler.
+
+ * The `which mt` command points to the Microsoft manifest tool.
+
+If you do not do this, the build may fail due to Cygwin ones found in `/usr/bin`
+being used instead.
+
+Building Erlang
+---------------
+
+You must include Win32 OpenSSL.
+
+However, you can skip the GUI tools by running:
+
+ echo "skipping gs" > lib/gs/SKIP
+
+ echo "skipping ic" > lib/ic/SKIP
+
+Follow the rest of the Erlang instructions as described.
+
+After running:
+
+ ./otp_build release -a
+
+You should run:
+
+ ./release/win32/Install.exe
+
+This will set up the release/win32/bin directory correctly.
+
+To set up your environment for building CouchDB, run:
+
+ eval `./otp_build env_win32`
+
+To set up the `ERL_TOP` environment variable, run:
+
+ export ERL_TOP=[ERL_TOP]
+
+Replace `[ERL_TOP]` with the Erlang source directory name.
+
+Remember to use `/cygdrive/c/` instead of `c:/` as the directory prefix.
+
+To set up your path, run:
+
+ export PATH=$ERL_TOP/release/win32/erts-5.8.2/bin:$PATH
+
+If everything was successful, you should be ready to build CouchDB.
+
+Relax.
+
+Building CouchDB
+----------------
+
+Once you have satisfied the dependencies you should run:
+
+ ./configure \
+ --with-js-include=/cygdrive/c/path_to_spidermonkey_include \
+ --with-js-lib=/cygdrive/c/path_to_spidermonkey_lib \
+ --with-win32-icu-binaries=/cygdrive/c/path_to_icu_binaries_root \
+ --with-erlang=$ERL_TOP/release/win32/usr/include \
+ --with-win32-curl=/cygdrive/c/path/to/curl/root/directory \
+ --with-openssl-bin-dir=/cygdrive/c/openssl/bin \
+ --with-msvc-redist-dir=/cygdrive/c/dir/with/vcredist_platform_executable \
+ --prefix=$ERL_TOP/release/win32
+
+This command could take a while to complete.
+
+If everything was successful you should see the following message:
+
+ You have configured Apache CouchDB, time to relax.
+
+Relax.
+
+To install CouchDB you should run:
+
+ make install
+
+If everything was successful you should see the following message:
+
+ You have installed Apache CouchDB, time to relax.
+
+Relax.
+
+First Run
+---------
+
+You can start the CouchDB server by running:
+
+ $ERL_TOP/release/win32/bin/couchdb.bat
+
+When CouchDB starts it should eventually display the following message:
+
+ Apache CouchDB has started, time to relax.
+
+Relax.
+
+To check that everything has worked, point your web browser to:
+
+ http://127.0.0.1:5984/_utils/index.html
+
+From here you should run the test suite in either Firefox 3.6+ or Safari 4+.
diff --git a/1.1.x/LICENSE b/1.1.x/LICENSE
new file mode 100644
index 00000000..063c9981
--- /dev/null
+++ b/1.1.x/LICENSE
@@ -0,0 +1,400 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+Apache CouchDB Subcomponents
+
+The Apache CouchDB project includes a number of subcomponents with separate
+copyright notices and license terms. Your use of the code for the these
+subcomponents is subject to the terms and conditions of the following licenses.
+
+For the m4/ac_check_icu.m4 component:
+
+ Copyright (c) 2005 Akos Maroy <darkeye@tyrell.hu>
+
+ Copying and distribution of this file, with or without modification, are
+ permitted in any medium without royalty provided the copyright notice
+ and this notice are preserved.
+
+For the share/www/script/jquery.js component:
+
+ Copyright (c) 2009 John Resig, http://jquery.com/
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+For the share/www/script/jquery-ui-1.8.11.custom.min.js and
+ share/www/style/jquery-ui-1.8.11.custom.css components:
+
+ Copyright (c) 2011 Paul Bakaus, http://jqueryui.com/
+
+ This software consists of voluntary contributions made by many
+ individuals (AUTHORS.txt, http://jqueryui.com/about) For exact
+ contribution history, see the revision history and logs, available
+ at http://jquery-ui.googlecode.com/svn/
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+For the share/www/script/jquery.form.js component:
+
+ http://malsup.com/jquery/form/
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+For the share/www/script/json2.js component:
+
+ Public Domain
+
+ No warranty expressed or implied. Use at your own risk.
+
+For the src/mochiweb component:
+
+ Copyright (c) 2007 Mochi Media, Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+For the src/ibrowse component:
+
+ Copyright (c) 2006, Chandrashekhar Mullaparthi
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of the T-Mobile nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+For the src/erlang-oauth component:
+
+ Copyright (c) 2008-2009 Tim Fletcher <http://tfletcher.com/>
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use,
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following
+ conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+For the src/etap component:
+
+ Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use,
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following
+ conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/1.1.x/Makefile.am b/1.1.x/Makefile.am
new file mode 100644
index 00000000..2d0bbbe6
--- /dev/null
+++ b/1.1.x/Makefile.am
@@ -0,0 +1,149 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+SUBDIRS = bin etc src share test var utils
+
+ACLOCAL_AMFLAGS = -I m4
+
+localdoc_DATA = \
+ AUTHORS.gz \
+ BUGS.gz \
+ CHANGES.gz \
+ DEVELOPERS.gz \
+ INSTALL.gz \
+ INSTALL.Unix.gz \
+ INSTALL.Windows.gz \
+ LICENSE.gz \
+ NEWS.gz \
+ NOTICE.gz \
+ README.gz \
+ THANKS.gz
+
+DISTCLEANFILES = $(localdoc_DATA)
+
+EXTRA_DIST = \
+ AUTHORS \
+ BUGS \
+ CHANGES \
+ DEVELOPERS \
+ INSTALL \
+ INSTALL.Unix \
+ INSTALL.Windows \
+ LICENSE \
+ NEWS \
+ NOTICE \
+ README \
+ THANKS \
+ license.skip
+
+AUTHORS.gz: $(top_srcdir)/AUTHORS
+ -gzip -9 < $< > $@
+
+BUGS.gz: $(top_srcdir)/BUGS
+ -gzip -9 < $< > $@
+
+CHANGES.gz: $(top_srcdir)/CHANGES
+ -gzip -9 < $< > $@
+
+DEVELOPERS.gz: $(top_srcdir)/DEVELOPERS
+ -gzip -9 < $< > $@
+
+INSTALL.gz: $(top_srcdir)/INSTALL
+ -gzip -9 < $< > $@
+
+INSTALL.Unix.gz: $(top_srcdir)/INSTALL.Unix
+ -gzip -9 < $< > $@
+
+INSTALL.Windows.gz: $(top_srcdir)/INSTALL.Windows
+ -gzip -9 < $< > $@
+
+LICENSE.gz: $(top_srcdir)/LICENSE
+ -gzip -9 < $< > $@
+
+NEWS.gz: $(top_srcdir)/NEWS
+ -gzip -9 < $< > $@
+
+NOTICE.gz: $(top_srcdir)/NOTICE
+ -gzip -9 < $< > $@
+
+README.gz: $(top_srcdir)/README
+ -gzip -9 < $< > $@
+
+THANKS.gz: $(top_srcdir)/THANKS
+ -gzip -9 < $< > $@
+
+check: dev
+ $(top_builddir)/test/etap/run
+
+cover: dev
+ rm -f cover/*.coverdata
+ COVER=1 COVER_BIN=./src/couchdb/ $(top_builddir)/test/etap/run
+ SRC=./src/couchdb/ \
+ $(ERL) -noshell \
+ -pa src/etap \
+ -eval 'etap_report:create()' \
+ -s init stop > /dev/null 2>&1
+
+dev: all
+ @echo "This command is intended for developers to use;"
+ @echo "it creates development ini files as well as a"
+ @echo "$(top_builddir)/tmp structure for development runtime files."
+ @echo "Use ./utils/run to launch CouchDB from the source tree."
+ mkdir -p $(top_builddir)/etc/couchdb/default.d
+ mkdir -p $(top_builddir)/etc/couchdb/local.d
+ mkdir -p $(top_builddir)/tmp/lib
+ mkdir -p $(top_builddir)/tmp/log
+ mkdir -p $(top_builddir)/tmp/run/couchdb
+
+install-data-hook:
+ @echo
+ @echo "You have installed Apache CouchDB, time to relax."
+
+distclean-local:
+ rm -fr $(top_builddir)/etc/couchdb/default.d
+ rm -fr $(top_builddir)/etc/couchdb/local.d
+ rm -fr $(top_builddir)/tmp
+
+.PHONY: local-clean
+local-clean: maintainer-clean
+ @echo "This command is intended for maintainers to use;"
+ @echo "it deletes files that may need special tools to rebuild."
+ rm -f $(top_srcdir)/INSTALL
+ rm -f $(top_srcdir)/acinclude.m4
+ rm -f $(top_srcdir)/aclocal.m4
+ rm -f $(top_srcdir)/config.h.in
+ rm -f $(top_srcdir)/configure
+ rm -f $(top_srcdir)/test/etap/temp.*
+ rm -f $(top_srcdir)/*.tar.gz
+ rm -f $(top_srcdir)/*.tar.gz.*
+ find $(top_srcdir) -name Makefile.in -exec rm -f {} \;
+
+dist-hook:
+ find $(top_srcdir) -type f -name "._*" -exec rm -f {} \;
+ find $(top_builddir) -type f -name "._*" -exec rm -f {} \;
+
+distcheck-hook:
+ grep -rL 'http://www.apache.org/licenses/LICENSE-2.0' * \
+ | grep -vEf license.skip; \
+ test "$$?" -eq 1
+
+.PHONY: distsign
+distsign: distcheck check
+ @# @@ unpack archive and run diff -r to double check missing files
+ @# @@ does automake have anything that does this?
+ gpg --armor --detach-sig --default-key DF3CEBA3 \
+ < $(top_srcdir)/$(distdir).tar.gz \
+ > $(top_srcdir)/$(distdir).tar.gz.asc
+ md5sum $(top_srcdir)/$(distdir).tar.gz \
+ > $(top_srcdir)/$(distdir).tar.gz.md5
+ sha1sum $(top_srcdir)/$(distdir).tar.gz \
+ > $(top_srcdir)/$(distdir).tar.gz.sha
diff --git a/1.1.x/NEWS b/1.1.x/NEWS
new file mode 100644
index 00000000..52e5f87b
--- /dev/null
+++ b/1.1.x/NEWS
@@ -0,0 +1,301 @@
+Apache CouchDB NEWS
+===================
+
+For details about backwards incompatible changes, see:
+
+ http://wiki.apache.org/couchdb/Breaking_changes
+
+Each release section notes when backwards incompatible changes have been made.
+
+Version 1.1.0
+-------------
+
+All NEWS for 1.0.2 also apply to 1.1.0.
+
+ * Native SSL support.
+ * Added support for HTTP range requests for attachments.
+ * Added built-in filters for `_changes`: `_doc_ids` and `_design`.
+ * Added configuration option for TCP_NODELAY aka "Nagle".
+ * Allow wildcards in vhosts definitions.
+ * More granular ETag support for views.
+ * More flexible URL rewriter.
+ * Added OS Process module to manage daemons outside of CouchDB.
+ * Added HTTP Proxy handler for more scalable externals.
+ * Added `_replicator` database to manage replications.
+ * Multiple micro-optimizations when reading data.
+ * Added CommonJS support to map functions.
+ * Added `stale=update_after` query option that triggers a view update after
+ returning a `stale=ok` response.
+ * More explicit error messages when it's not possible to access a file due
+ to lack of permissions.
+ * Added a "change password"-feature to Futon.
+
+
+Version 1.0.1
+-------------
+
+ * Fix data corruption bug COUCHDB-844. Please see
+ http://couchdb.apache.org/notice/1.0.1.html for details.
+ * Added support for replication via an HTTP/HTTPS proxy.
+ * Fixed various replicator bugs for interop with older CouchDB versions.
+ * Show fields saved along with _deleted=true. Allows for auditing of deletes.
+ * Enable basic-auth popup when required to access the server, to prevent
+ people from getting locked out.
+ * User interface element for querying stale (cached) views.
+
+Version 1.0.0
+-------------
+
+ * More efficient header commits.
+ * Use O_APPEND to save lseeks.
+ * Faster implementation of pread_iolist(). Further improves performance on
+ concurrent reads.
+ * Added authentication caching
+ * Faster default view collation.
+ * Added option to include update_seq in view responses.
+
+Version 0.11.2
+--------------
+
+ * Replicator buxfixes for replicating design documents from secured databases.
+ * Better error messages on invalid URL requests.
+ * User documents can now be deleted by admins or the user.
+ * Avoid potential DOS attack by guarding all creation of atoms.
+ * Some Futon and JavaScript library bugfixes.
+ * Fixed CVE-2010-2234: Apache CouchDB Cross Site Request Forgery Attack
+
+Version 0.11.1
+--------------
+
+ * Mask passwords in active tasks and logging.
+ * Update mochijson2 to allow output of BigNums not in float form.
+ * Added support for X-HTTP-METHOD-OVERRIDE.
+ * Disable jsonp by default.
+ * Accept gzip encoded standalone attachments.
+ * Made max_concurrent_connections configurable.
+ * Added continuous replication option to Futon.
+ * Added option to replicating test results anonymously to a community
+ CouchDB instance.
+ * Allow creation and deletion of config entries in Futon.
+ * Fixed various UI issues in Futon.
+ * Fixed compatibility with the Erlang R14 series.
+ * Fixed warnings on Linux builds.
+ * Fixed build error when aclocal needs to be called during the build.
+ * Require ICU 4.3.1.
+ * Fixed compatibility with Solaris.
+ * Added authentication redirect URL to log in clients.
+ * Added authentication caching, to avoid repeated opening and closing of the
+ users database for each request requiring authentication.
+ * Made authentication timeout configurable.
+ * Temporary views are now admin-only resources.
+ * Don't require a revpos for attachment stubs.
+ * Make file deletions async to avoid pauses during compaction and db
+ deletion.
+ * Fixed for wrong offset when writing headers and converting them to blocks,
+ only triggered when header is larger than 4k.
+ * Preserve _revs_limit and instance_start_time after compaction.
+ * Fixed timeout with large .ini files.
+ * Added tests for couch.js and jquery.couch.js
+ * Added various API features to jquery.couch.js
+ * Faster default view collation.
+ * Upgrade CommonJS modules support to 1.1.1.
+ * Added option to include update_seq in view responses.
+ * Fixed erlang filter funs and normalize filter fun API.
+ * Fixed hang in view shutdown.
+ * Refactored various internal APIs related to attachment streaming.
+ * Fixed hanging replication.
+ * Fixed keepalive issue.
+ * Allow global rewrites so system defaults are available in vhosts.
+ * Allow isolation of databases with vhosts.
+ * Made the test suite overall more reliable.
+
+Version 0.11.0
+--------------
+
+This version is a feature-freeze release candidate for Apache CouchDB 1.0.
+
+ * Fixed CVE-2010-0009: Apache CouchDB Timing Attack Vulnerability.
+ * Added support for building a Windows installer as part of 'make dist'.
+ * Added optional 'raw' binary collation for faster view builds where Unicode
+ collation is not important.
+ * Improved view index build time by reducing ICU collation callouts.
+ * Added option to implicitly create replication target databases.
+ * Improved view information objects.
+ * Bug fix for partial updates during view builds.
+ * Bug fix for building couch.app's module list.
+ * Fixed a problem with statistics timers and system sleep.
+ * Improved the statistics calculations to use an online moving window
+ algorithm.
+ * Adds batching of multiple updating requests, to improve throughput with many
+ writers.
+ * Removed the now redundant couch_batch_save module.
+ * Bug fix for premature termination of chunked responses.
+ * Improved speed and concurrency of config lookups.
+ * Fixed an edge case for HTTP redirects during replication.
+ * Fixed HTTP timeout handling for replication.
+ * Fixed query parameter handling in OAuth'd replication.
+ * Fixed a bug preventing mixing languages with lists and views.
+ * Avoid OS process leaks in lists.
+ * Avoid leaking file descriptors on automatic replication restarts.
+ * Various improvements to the Futon UI.
+ * Provide Content-MD5 header support for attachments.
+ * Added default cookie-authentication and users db.
+ * Added per-db reader access control lists.
+ * Added per-db security object for configuration data in validation functions.
+ * Added URL Rewriter handler.
+ * Added proxy authentication handler.
+ * Added ability to replicate documents by id.
+ * Added virtual host handling.
+ * Uses json2.js for JSON serialization compatiblity with native JSON.
+ * Fixed CVE-2010-0009: Apache CouchDB Timing Attack Vulnerability.
+
+Version 0.10.2
+--------------
+
+ * Fixed CVE-2010-0009: Apache CouchDB Timing Attack Vulnerability.
+
+Version 0.10.1
+--------------
+
+ * Fixed test suite to work with build system.
+ * Fixed a problem with statistics timers and system sleep.
+ * Fixed an edge case for HTTP redirects during replication.
+ * Fixed HTTP timeout handling for replication.
+ * Fixed query parameter handling in OAuth'd replication.
+ * Fixed a bug preventing mixing languages with lists and views.
+ * Avoid OS process leaks in lists.
+
+Version 0.10.0
+--------------
+
+This release contains backwards incompatible changes, please see above for help.
+
+ * General performance improvements.
+ * View index generation speedups.
+ * Even more robust storage format.
+ * Native Erlang Views for high-performance applications.
+ * More robust push and pull replication.
+ * Two-legged OAuth support for applications and replication (three-legged in
+ preparation).
+ * Cookie authentication.
+ * API detail improvements.
+ * Better RFC 2616 (HTTP 1.1) compliance.
+ * Added modular configuration file directories.
+ * Miscellaneous improvements to build, system integration, and portability.
+
+Version 0.9.2
+-------------
+
+ * Remove branch callbacks to allow building couchjs against newer versions of
+ Spidermonkey.
+ * Fix replication with 0.10 servers initiated by an 0.9 server.
+
+Version 0.9.1
+-------------
+
+ * Various bug fixes for the build system, configuration, statistics reporting,
+ database core, external handlers, Futon interface, HTTP interface,
+ JavaScript View Server and replicator.
+
+Version 0.9.0
+-------------
+
+This release contains backwards incompatible changes, please see above for help.
+
+ * Modular configuration.
+ * Performance enhancements for document and view access.
+ * More resilient replication process.
+ * Replication streams binary attachments.
+ * Administrator role and basic authentication.
+ * Document validation functions in design documents.
+ * Show and list functions for rendering documents and views as developer
+ controlled content-types.
+ * External process server module.
+ * Attachment uploading from Futon.
+ * Etags for views, lists, shows, document and attachment requests.
+ * Miscellaneous improvements to build, system integration, and portability.
+
+Version 0.8.1-incubating
+------------------------
+
+ * Various bug fixes for replication, compaction, the HTTP interface and the
+ JavaScript View Server.
+
+Version 0.8.0-incubating
+------------------------
+
+This release contains backwards incompatible changes, please see above for help.
+
+ * Changed core licensing to the Apache Software License 2.0.
+ * Refactoring of the core view and storage engines.
+ * Added support for incremental map/reduce views.
+ * Changed database file format.
+ * Many improvements to Futon, the web administration interface.
+ * Miscellaneous improvements to build, system integration, and portability.
+ * Swapped out Erlang's inets HTTP server for the Mochiweb HTTP server.
+ * SpiderMonkey is no longer included with CouchDB, but rather treated as an
+ external dependency.
+ * Added bits of awesome.
+
+Version 0.7.2
+-------------
+
+ * Small changes to build process and `couchdb` command.
+ * Database server official port is now 5984 TCP/UDP instead of 8888.
+
+Version 0.7.1
+-------------
+
+ * Small compatibility issue with Firefox 3 fixed.
+
+Version 0.7.0
+-------------
+
+ * Infrastructure rewritten to use the GNU build system for portability.
+ * The built-in database browsing tool has been rewritten to provide a much
+ nicer interface for interacting directly with CouchDB from your web browser.
+ * XML and Fabric have been replaced with JSON and JavaScript for data
+ transport and View definitions.
+
+Version 0.6.0
+-------------
+
+ * A replication facility is now available.
+ * CouchPeek can now create, delete and view documents.
+ * Building from source is easier and less error prone.
+
+Version 0.5.0
+-------------
+
+ * A built-in CouchPeek utility.
+ * A full install kit buildable from a single command.
+ * A new GNU/Linux version is available. An OS X version is coming soon.
+
+Version 0.4.0
+-------------
+
+ * Non-existent variables are now nil lists.
+ * Couch error codes and messages are no longer sent in the HTTP fields,
+ instead they are exclusively returned in the XML body. This is to avoid HTTP
+ header parsing problems with oddly formed error messages.
+ * Returned error messages are now logged at the server at the `info` level to
+ make general debugging easier.
+ * Fixed a problem where big table builds caused timeout errors.
+ * Lots of changes in the low level machinery. Most formulas will continue to
+ function the same.
+ * Added full compiler support for extended characters in formula source.
+ * Support for Perl/Ruby like regular expressions.
+ * Added `total_rows` and `result_start` attributes to tables.
+
+Version 0.3.0
+-------------
+
+ * CouchDB now fully supports Unicode and locale specific collation via the ICU
+ library, both in the Fabric engine and computed tables.
+ * The `in` operator has been added to Fabric.
+ * The `startdoc` query string variable specifies the starting document to use
+ if there are multiple rows with identical startkeys.
+ * The `skip` query string variable specifies the number of rows to skip before
+ returning results. The `skip` value must be a positive integer. If used with
+ a `count` variable the skipped rows aren't counted as output.
+ * Various changes to the output XML format.
diff --git a/1.1.x/NOTICE b/1.1.x/NOTICE
new file mode 100644
index 00000000..4daa496f
--- /dev/null
+++ b/1.1.x/NOTICE
@@ -0,0 +1,55 @@
+Apache CouchDB
+Copyright 2009 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+This product also includes the following third-party components:
+
+ * ac_check_icu.m4 (http://autoconf-archive.cryp.to/ac_check_icu.html)
+
+ Copyright 2008, Akos Maroy <darkeye@tyrell.hu>
+
+ * ac_check_curl.m4 (http://autoconf-archive.cryp.to/ac_check_curl.html)
+
+ Copyright 2008, Akos Maroy <darkeye@tyrell.hu>
+
+ * jQuery (http://jquery.com/)
+
+ Copyright 2010, John Resig
+
+ * jQuery UI (http://jqueryui.com)
+
+ Copyright 2011, Paul Bakaus
+
+ * json2.js (http://www.json.org/)
+
+ In the public domain
+
+ * MochiWeb (http://code.google.com/p/mochiweb/)
+
+ Copyright 2007, Mochi Media Coporation
+
+ * ibrowse (http://github.com/cmullaparthi/ibrowse/tree/master)
+
+ Copyright 2009, Chandrashekhar Mullaparthi
+
+ * Erlang OAuth (http://github.com/tim/erlang-oauth/tree/master)
+
+ Copyright 2009, Tim Fletcher <http://tfletcher.com/>
+
+ * ETap (http://github.com/ngerakines/etap/)
+
+ Copyright 2009, Nick Gerakines <nick@gerakines.net>
+
+ * mimeparse.js (http://code.google.com/p/mimeparse/)
+
+ Copyright 2009, Chris Anderson <jchris@apache.org>
+
+ * base64.js
+
+ Copyright 1999, Masanao Izumo <iz@onicos.co.jp>
+
+ * jspec.js (http://visionmedia.github.com/jspec/)
+
+ Copyright 2010 TJ Holowaychuk <tj@vision-media.ca>
diff --git a/1.1.x/README b/1.1.x/README
new file mode 100644
index 00000000..540226d3
--- /dev/null
+++ b/1.1.x/README
@@ -0,0 +1,81 @@
+Apache CouchDB README
+=====================
+
+Installation
+------------
+
+For a low-level guide, see:
+
+ INSTALL
+
+For a high-level guide to Unix-like systems, inc. Mac OS X and Ubuntu, see:
+
+ INSTALL.Unix
+
+For a high-level guide to Microsoft Windows, see:
+
+ INSTALL.Windows
+
+Follow the proper instructions to get CouchDB installed on your system.
+
+If you're having problems, skip to the next section.
+
+Troubleshooting
+----------------
+
+For troubleshooting, see:
+
+ http://wiki.apache.org/couchdb/Troubleshooting
+
+If you're getting a cryptic error message, see:
+
+ http://wiki.apache.org/couchdb/Error_messages
+
+For general help, see:
+
+ http://couchdb.apache.org/community/lists.html
+
+The mailing lists provide a wealth of support and knowledge for you to tap into.
+Feel free to drop by with your questions or discussion. See the official CouchDB
+website for more information about our community resources.
+
+
+Running the Testsuite
+---------------------
+
+Run the testsuite for couch.js and jquery.couch.js by browsing to this site: http://127.0.0.1:5984/_utils/spec/run.html
+It should work in at least Firefox >= 3.6 and Safari >= 4.0.4.
+
+Read more about JSpec here: http://jspec.info/
+
+Trouble shooting
+~~~~~~~~~~~~~~~~
+
+ * When you change the specs, but your changes have no effect, manually reload the changed spec file in the browser.
+
+ * When the spec that tests erlang views fails, make sure you have enabled erlang views as described here: <http://wiki.apache.org/couchdb/EnableErlangViews>
+
+
+Cryptographic Software Notice
+-----------------------------
+
+This distribution includes cryptographic software. The country in which you
+currently reside may have restrictions on the import, possession, use, and/or
+re-export to another country, of encryption software. BEFORE using any
+encryption software, please check your country's laws, regulations and policies
+concerning the import, possession, or use, and re-export of encryption software,
+to see if this is permitted. See <http://www.wassenaar.org/> for more
+information.
+
+The U.S. Government Department of Commerce, Bureau of Industry and Security
+(BIS), has classified this software as Export Commodity Control Number (ECCN)
+5D002.C.1, which includes information security software using or performing
+cryptographic functions with asymmetric algorithms. The form and manner of this
+Apache Software Foundation distribution makes it eligible for export under the
+License Exception ENC Technology Software Unrestricted (TSU) exception (see the
+BIS Export Administration Regulations, Section 740.13) for both object code and
+source code.
+
+The following provides more details on the included cryptographic software:
+
+CouchDB includes a HTTP client (ibrowse) with SSL functionality.
diff --git a/1.1.x/THANKS b/1.1.x/THANKS
new file mode 100644
index 00000000..aae7991c
--- /dev/null
+++ b/1.1.x/THANKS
@@ -0,0 +1,85 @@
+Apache CouchDB THANKS
+=====================
+
+A number of people have contributed to Apache CouchDB by reporting problems,
+suggesting improvements or submitting changes. Some of these people are:
+
+ * William Beh <willbeh@gmail.com>
+ * Dirk Schalge <dirk@epd-me.net>
+ * Roger Leigh <rleigh@debian.org>
+ * Sam Ruby <rubys@intertwingly.net>
+ * Carlos Valiente <superdupont@gmail.com>
+ * Till Klampaeckel <till@klampaeckel.de>
+ * Jim Lindley <web@jimlindley.com>
+ * Yoan Blanc <yoan.blanc@gmail.com>
+ * Michael Gottesman <gottesmm@reed.edu>
+ * Mark Baran <mebaran@gmail.com>
+ * Michael Hendricks <michael@ndrix.org>
+ * Antony Blakey <antony.blakey@gmail.com>
+ * Paul Carey <paul.p.carey@gmail.com>
+ * Hunter Morris <huntermorris@gmail.com>
+ * Brian Palmer <jira@brian.codekitchen.net>
+ * Maximillian Dornseif <md@hudora.de>
+ * Eric Casteleijn <eric.casteleijn@canonical.com>
+ * Maarten Thibaut <mthibaut@cisco.com>
+ * Florian Ebeling <florian.ebeling@gmail.com>
+ * Volker Mische <volker.mische@gmail.com>
+ * Brian Candler <B.Candler@pobox.com>
+ * Brad Anderson <brad@sankatygroup.com>
+ * Nick Gerakines <nick@gerakines.net>
+ * Bob Dionne <dionne@member.fsf.org>
+ * Kevin Ilchmann Jørgensen <kijmail@gmail.com>
+ * Dirkjan Ochtman <dirkjan@ochtman.nl>
+ * Sebastian Cohnen <sebastian.cohnen@gmx.net>
+ * Sven Helmberger <sven.helmberger@gmx.de>
+ * Dan Walters <dan@danwalters.net>
+ * Curt Arnold <carnold@apache.org>
+ * Gustavo Niemeyer
+ * Joshua Bronson <jabronson@gmail.com>
+ * Kostis Sagonas <kostis@cs.ntua.gr>
+ * Matthew Hooker <mwhooker@gmail.com>
+ * Ilia Cheishvili <ilia.cheishvili@gmail.com>
+ * Lena Herrmann <lena@zeromail.org>
+ * Jack Moffit <metajack@gmail.com>
+ * Damjan Georgievski <gdamjan@gmail.com>
+ * Jan Kassens <jan@kassens.net>
+ * James Marca <jmarca@translab.its.uci.edu>
+ * Matt Goodall <matt.goodall@gmail.com>
+ * Joel Clark <unsigned_char@yahoo.com>
+ * Matt Lyon <matt@flowerpowered.com>
+ * mikeal <mikeal.rogers@gmail.com>
+ * Randall Leeds <randall.leeds@gmail.com>
+ * Joscha Feth <joscha@feth.com>
+ * Jarrod Roberson <jarrod@vertigrated.com>
+ * Jae Kwon <jkwon.work@gmail.com>
+ * Gavin Sherry <swm@alcove.com.au>
+ * Timothy Smith <tim@couch.io>
+ * Martin Haaß <MartinHaass@gmx.net>
+ * Hans Ulrich Niedermann <hun@n-dimensional.de>
+ * Jason Smith <jhs@proven-corporation.com>
+ * Dmitry Unkovsky <oil.crayons@gmail.com>
+ * Zachary Zolton <zachary.zolton@gmail.com>
+ * Brian Jenkins <bonkydog@bonkydog.com>
+ * Paul Bonser <pib@paulbonser.com>
+ * Caleb Land <caleb.land@gmail.com>
+ * Juhani Ränkimies <juhani@juranki.com>
+ * Kev Jackson <foamdino@gmail.com>
+ * Jonathan D. Knezek <jdknezek@gmail.com>
+ * David Rose <doppler@gmail.com>
+ * Lim Yue Chuan <shasderias@gmail.com>
+ * David Davis <xantus@xantus.org>
+ * Klaus Trainer <klaus.trainer@web.de>
+ * Dale Harvey <dale@arandomurl.com>
+ * Juuso Väänänen <juuso@vaananen.org>
+ * Jeff Zellner <jeff.zellner@gmail.com>
+ * Benjamin Young <byoung@bigbluehat.com>
+ * Gabriel Farrell <gsf747@gmail.com>
+ * Mike Leddy <mike@loop.com.br>
+ * Felix Hummel <apache@felixhummel.de>
+ * Tim Smith <tim@couchbase.com>
+ * Sam Bisbee <sam@sbisbee.com>
+ * Nathan Vander Wilt <natevw@yahoo.com>
+ * Caolan McMahon <caolan.mcmahon@googlemail.com>
+
+
+For a list of authors see the `AUTHORS` file.
diff --git a/1.1.x/acinclude.m4.in b/1.1.x/acinclude.m4.in
new file mode 100644
index 00000000..e1efe10c
--- /dev/null
+++ b/1.1.x/acinclude.m4.in
@@ -0,0 +1,30 @@
+dnl Licensed under the Apache License, Version 2.0 (the "License"); you may not
+dnl use this file except in compliance with the License. dnl You may obtain a
+dnl copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+dnl WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+dnl License for the specific language governing permissions and limitations
+dnl under the License.
+
+m4_define([LOCAL_PACKAGE_AUTHOR_NAME], [The Apache Software Foundation])
+m4_define([LOCAL_PACKAGE_AUTHOR_ADDRESS], [dev@couchdb.apache.org])
+m4_define([LOCAL_PACKAGE_IDENTIFIER], [couchdb])
+m4_define([LOCAL_PACKAGE_TARNAME], [apache-couchdb])
+m4_define([LOCAL_PACKAGE_NAME], [Apache CouchDB])
+m4_define([LOCAL_BUG_URI], [https://issues.apache.org/jira/browse/COUCHDB])
+m4_define([LOCAL_VERSION_MAJOR], [1])
+m4_define([LOCAL_VERSION_MINOR], [1])
+m4_define([LOCAL_VERSION_REVISION], [0])
+m4_define([LOCAL_VERSION_STAGE], [])
+m4_define([LOCAL_VERSION_RELEASE], [%release%])
+m4_define([LOCAL_VERSION_PRIMARY],
+ [LOCAL_VERSION_MAJOR.LOCAL_VERSION_MINOR.LOCAL_VERSION_REVISION])
+m4_define([LOCAL_VERSION_SECONDARY],
+ [LOCAL_VERSION_STAGE[]LOCAL_VERSION_RELEASE])
+m4_define([LOCAL_VERSION],
+ [LOCAL_VERSION_PRIMARY[]LOCAL_VERSION_SECONDARY])
+
diff --git a/1.1.x/bin/Makefile.am b/1.1.x/bin/Makefile.am
new file mode 100644
index 00000000..b8b818cb
--- /dev/null
+++ b/1.1.x/bin/Makefile.am
@@ -0,0 +1,98 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+if WINDOWS
+bin_SCRIPTS = couchdb.bat
+else
+bin_SCRIPTS = couchdb couchjs
+endif
+
+noinst_SCRIPTS = couchjs_dev
+
+if HELP2MAN
+dist_man1_MANS = couchdb.1 couchjs.1
+endif
+
+CLEANFILES = $(bin_SCRIPTS) $(dist_man1_MANS) $(noinst_SCRIPTS)
+
+transform = @program_transform_name@
+couchdb_command_name = `echo couchdb | sed '$(transform)'`
+couchjs_command_name = `echo couchjs | sed '$(transform)'`
+
+couchdb: couchdb.tpl
+ sed -e "s|%ERL%|$(ERL)|g" \
+ -e "s|%ICU_CONFIG%|$(ICU_CONFIG)|g" \
+ -e "s|%bindir%|@bindir@|g" \
+ -e "s|%localerlanglibdir%|@localerlanglibdir@|g" \
+ -e "s|%defaultini%|default.ini|g" \
+ -e "s|%localini%|local.ini|g" \
+ -e "s|%localconfdir%|@localconfdir@|g" \
+ -e "s|%localstatelogdir%|@localstatelogdir@|g" \
+ -e "s|%localstatelibdir%|@localstatelibdir@|g" \
+ -e "s|%localstatedir%|@localstatedir@|g" \
+ -e "s|%bug_uri%|@bug_uri@|g" \
+ -e "s|%package_author_address%|@package_author_address@|g" \
+ -e "s|%package_author_name%|@package_author_name@|g" \
+ -e "s|%package_name%|@package_name@|g" \
+ -e "s|%version%|@version@|g" \
+ -e "s|%couchdb_command_name%|$(couchdb_command_name)|g" > \
+ $@ < $<
+ chmod +x $@
+
+couchjs: couchjs.tpl
+ sed -e "s|%locallibbindir%|@locallibbindir@|g" \
+ -e "s|%bug_uri%|@bug_uri@|g" \
+ -e "s|%package_author_address%|@package_author_address@|g" \
+ -e "s|%package_author_name%|@package_author_name@|g" \
+ -e "s|%package_name%|@package_name@|g" \
+ -e "s|%version%|@version@|g" \
+ -e "s|%couchjs_command_name%|$(couchjs_command_name)|g" > \
+ $@ < $<
+ chmod +x $@
+
+couchjs_dev: couchjs.tpl
+ sed -e "s|%locallibbindir%|$(abs_top_builddir)/src/couchdb/priv|g" \
+ -e "s|%bug_uri%|@bug_uri@|g" \
+ -e "s|%package_author_address%|@package_author_address@|g" \
+ -e "s|%package_author_name%|@package_author_name@|g" \
+ -e "s|%package_name%|@package_name@|g" \
+ -e "s|%version%|@version@|g" \
+ -e "s|%couchjs_command_name%|$(couchjs_command_name)|g" > \
+ $@ < $<
+ chmod +x $@
+
+couchdb.bat: couchdb.bat.tpl
+ sed -e "s|%ICU_CONFIG%|$(ICU_CONFIG)|g" \
+ -e "s|%version%|@version@|g" \
+ $< > $@
+
+HELP2MAN_OPTION=--no-info --help-option="-h" --version-option="-V"
+
+# XXX: Because the scripts are made at build time for the user we need to
+# XXX: depend on the original templates so as not to cause the rebuilding of
+# XXX: the man pages.
+
+couchdb.1: couchdb.tpl.in
+ touch $@
+ if test -x "$(HELP2MAN_EXECUTABLE)"; then \
+ $(MAKE) -f Makefile couchdb; \
+ $(HELP2MAN_EXECUTABLE) $(HELP2MAN_OPTION) \
+ --name="Apache CouchDB database server" ./couchdb --output $@; \
+ fi
+
+couchjs.1: couchjs.tpl.in
+ touch $@
+ if test -x "$(HELP2MAN_EXECUTABLE)"; then \
+ $(MAKE) -f Makefile couchjs; \
+ $(HELP2MAN_EXECUTABLE) $(HELP2MAN_OPTION) \
+ --name="Apache CouchDB JavaScript interpreter" ./couchjs --output $@; \
+ fi
diff --git a/1.1.x/bin/couchdb.bat.tpl.in b/1.1.x/bin/couchdb.bat.tpl.in
new file mode 100644
index 00000000..48d78513
--- /dev/null
+++ b/1.1.x/bin/couchdb.bat.tpl.in
@@ -0,0 +1,26 @@
+@echo off
+rem Licensed under the Apache License, Version 2.0 (the "License"); you may not
+rem use this file except in compliance with the License. You may obtain a copy
+rem of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+rem WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+rem License for the specific language governing permissions and limitations
+rem under the License.
+
+setlocal
+rem First change to the drive with the erlang bin directory
+%~d0
+rem then change to the erlang bin directory
+cd %~dp0
+
+rem Allow a different erlang executable (eg, erl) to be used.
+rem When using erl instead of werl, server restarts during test runs can fail
+rem intermittently. But using erl should be fine for production use.
+if "%ERL%x" == "x" set ERL=werl.exe
+
+echo CouchDB %version% - prepare to relax...
+%ERL% -sasl errlog_type error -s couch
diff --git a/1.1.x/bin/couchdb.tpl.in b/1.1.x/bin/couchdb.tpl.in
new file mode 100644
index 00000000..94d47439
--- /dev/null
+++ b/1.1.x/bin/couchdb.tpl.in
@@ -0,0 +1,330 @@
+#! /bin/sh -e
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+BACKGROUND=false
+DEFAULT_CONFIG_DIR=%localconfdir%/default.d
+DEFAULT_CONFIG_FILE=%localconfdir%/%defaultini%
+ERL_START_OPTIONS="-sasl errlog_type error +K true +A 4"
+HEART_BEAT_TIMEOUT=11
+HEART_COMMAND="%bindir%/%couchdb_command_name% -k"
+INTERACTIVE=false
+KILL=false
+LOCAL_CONFIG_DIR=%localconfdir%/local.d
+LOCAL_CONFIG_FILE=%localconfdir%/%localini%
+PID_FILE=%localstatedir%/run/couchdb/couchdb.pid
+RECURSED=false
+RESET_CONFIG=true
+RESPAWN_TIMEOUT=0
+SCRIPT_ERROR=1
+SCRIPT_OK=0
+SHUTDOWN=false
+STDERR_FILE=couchdb.stderr
+STDOUT_FILE=couchdb.stdout
+
+print_arguments=""
+start_arguments=""
+background_start_arguments=""
+
+basename=`basename $0`
+
+display_version () {
+ cat << EOF
+$basename - %package_name% %version%
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+EOF
+}
+
+display_help () {
+ cat << EOF
+Usage: $basename [OPTION]
+
+The $basename command runs the %package_name% server.
+
+Erlang is called with:
+
+ $ERL_START_OPTIONS
+
+Erlang inherits the environment of this command.
+
+You can override these options using the environment:
+
+ ERL_AFLAGS, ERL_FLAGS, ERL_ZFLAGS
+
+See erl(1) for more information about the environment variables.
+
+The exit status is 0 for success or 1 for failure.
+
+Options:
+
+ -h display a short help message and exit
+ -V display version information and exit
+ -a FILE add configuration FILE to chain
+ -A DIR add configuration DIR to chain
+ -n reset configuration file chain (including system default)
+ -c print configuration file chain and exit
+ -i use the interactive Erlang shell
+ -b spawn as a background process
+ -p FILE set the background PID FILE (overrides system default)
+ -r SECONDS respawn background process after SECONDS (defaults to no respawn)
+ -o FILE redirect background stdout to FILE (defaults to $STDOUT_FILE)
+ -e FILE redirect background stderr to FILE (defaults to $STDERR_FILE)
+ -s display the status of the background process
+ -k kill the background process, will respawn if needed
+ -d shutdown the background process
+
+Report bugs at <%bug_uri%>.
+EOF
+}
+
+display_error () {
+ if test -n "$1"; then
+ echo $1 >&2
+ fi
+ echo >&2
+ echo "Try \`"$basename" -h' for more information." >&2
+ false
+}
+
+_get_pid () {
+ if test -f $PID_FILE; then
+ PID=`cat $PID_FILE`
+ fi
+ echo $PID
+}
+
+_add_config_file () {
+ if test -z "$print_arguments"; then
+ print_arguments="$1"
+ else
+ print_arguments="`cat <<EOF
+$print_arguments
+$1
+EOF
+`"
+ fi
+ start_arguments="$start_arguments $1"
+ background_start_arguments="$background_start_arguments -a $1"
+}
+
+_add_config_dir () {
+ for file in "$1"/*.ini; do
+ if [ -r "$file" ]; then
+ _add_config_file "$file"
+ fi
+ done
+}
+
+_load_config () {
+ _add_config_file "$DEFAULT_CONFIG_FILE"
+ _add_config_dir "$DEFAULT_CONFIG_DIR"
+ _add_config_file "$LOCAL_CONFIG_FILE"
+ _add_config_dir "$LOCAL_CONFIG_DIR"
+}
+
+_reset_config () {
+ print_arguments=""
+ start_arguments=""
+ background_start_arguments="-n"
+}
+
+_print_config () {
+ cat <<EOF
+$print_arguments
+EOF
+}
+
+check_status () {
+ PID=`_get_pid`
+ if test -n "$PID"; then
+ if kill -0 $PID 2> /dev/null; then
+ echo "Apache CouchDB is running as process $PID, time to relax."
+ return $SCRIPT_OK
+ else
+ echo >&2 << EOF
+Apache CouchDB is not running but a stale PID file exists: $PID_FILE"
+EOF
+ fi
+ else
+ echo "Apache CouchDB is not running." >&2
+ fi
+ return $SCRIPT_ERROR
+}
+
+check_environment () {
+ if test "$BACKGROUND" != "true"; then
+ return
+ fi
+ touch $PID_FILE 2> /dev/null || true
+ touch $STDOUT_FILE 2> /dev/null || true
+ touch $STDERR_FILE 2> /dev/null || true
+ message_prefix="Apache CouchDB needs write permission on the"
+ if test ! -w $PID_FILE; then
+ echo "$message_prefix PID file: $PID_FILE" >&2
+ false
+ fi
+ if test ! -w $STDOUT_FILE; then
+ echo "$message_prefix STDOUT file: $STDOUT_FILE" >&2
+ false
+ fi
+ if test ! -w $STDERR_FILE; then
+ echo "$message_prefix STDERR file: $STDERR_FILE" >&2
+ false
+ fi
+ message_prefix="Apache CouchDB needs a regular"
+ if test `echo 2> /dev/null >> $PID_FILE; echo $?` -gt 0; then
+ echo "$message_prefix PID file: $PID_FILE" >&2
+ false
+ fi
+ if test `echo 2> /dev/null >> $STDOUT_FILE; echo $?` -gt 0; then
+ echo "$message_prefix STDOUT file: $STDOUT_FILE" >&2
+ false
+ fi
+ if test `echo 2> /dev/null >> $STDERR_FILE; echo $?` -gt 0; then
+ echo "$message_prefix STDERR file: $STDERR_FILE" >&2
+ false
+ fi
+}
+
+start_couchdb () {
+ if test ! "$RECURSED" = "true"; then
+ if check_status 2> /dev/null; then
+ exit
+ fi
+ check_environment
+ fi
+ interactive_option="+Bd -noinput"
+ if test "$INTERACTIVE" = "true"; then
+ interactive_option=""
+ fi
+ if test "$BACKGROUND" = "true"; then
+ touch $PID_FILE
+ interactive_option="+Bd -noinput"
+ fi
+ command="%ERL% $interactive_option $ERL_START_OPTIONS \
+ -env ERL_LIBS %localerlanglibdir% -couch_ini $start_arguments -s couch"
+ if test "$BACKGROUND" = "true" -a "$RECURSED" = "false"; then
+ $0 $background_start_arguments -b -r $RESPAWN_TIMEOUT -p $PID_FILE \
+ -o $STDOUT_FILE -e $STDERR_FILE -R &
+ echo "Apache CouchDB has started, time to relax."
+ else
+ if test "$RECURSED" = "true"; then
+ while true; do
+ export HEART_COMMAND
+ export HEART_BEAT_TIMEOUT
+ `eval $command -pidfile $PID_FILE -heart \
+ >> $STDOUT_FILE 2>> $STDERR_FILE` || true
+ PID=`_get_pid`
+ if test -n "$PID"; then
+ if kill -0 $PID 2> /dev/null; then
+ return $SCRIPT_ERROR
+ fi
+ else
+ return $SCRIPT_OK
+ fi
+ if test "$RESPAWN_TIMEOUT" = "0"; then
+ return $SCRIPT_OK
+ fi
+ if test "$RESPAWN_TIMEOUT" != "1"; then
+ plural_ending="s"
+ fi
+ cat << EOF
+Apache CouchDB crashed, restarting in $RESPAWN_TIMEOUT second$plural_ending.
+EOF
+ sleep $RESPAWN_TIMEOUT
+ done
+ else
+ eval exec $command
+ fi
+ fi
+}
+
+stop_couchdb () {
+ PID=`_get_pid`
+ if test -n "$PID"; then
+ if test "$1" = "false"; then
+ echo > $PID_FILE
+ fi
+ if kill -0 $PID 2> /dev/null; then
+ if kill -1 $PID 2> /dev/null; then
+ if test "$1" = "false"; then
+ echo "Apache CouchDB has been shutdown."
+ else
+ echo "Apache CouchDB has been killed."
+ fi
+ return $SCRIPT_OK
+ else
+ echo "Apache CouchDB could not be killed." >&2
+ return $SCRIPT_ERROR
+ fi
+ if test "$1" = "false"; then
+ echo "Stale PID file exists but Apache CouchDB is not running."
+ else
+ echo "Stale PID file existed but Apache CouchDB is not running."
+ fi
+ fi
+ else
+ echo "Apache CouchDB is not running."
+ fi
+}
+
+parse_script_option_list () {
+ _load_config
+ set +e
+ options=`getopt hVa:A:ncibp:r:Ro:e:skd $@`
+ if test ! $? -eq 0; then
+ display_error
+ fi
+ set -e
+ eval set -- $options
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ -h) shift; display_help; exit;;
+ -V) shift; display_version; exit;;
+ -a) shift; _add_config_file "$1"; shift;;
+ -A) shift; _add_config_dir "$1"; shift;;
+ -n) shift; _reset_config;;
+ -c) shift; _print_config; exit;;
+ -i) shift; INTERACTIVE=true;;
+ -b) shift; BACKGROUND=true;;
+ -r) shift; RESPAWN_TIMEOUT=$1; shift;;
+ -R) shift; RECURSED=true;;
+ -p) shift; PID_FILE=$1; shift;;
+ -o) shift; STDOUT_FILE=$1; shift;;
+ -e) shift; STDERR_FILE=$1; shift;;
+ -s) shift; check_status; exit;;
+ -k) shift; KILL=true;;
+ -d) shift; SHUTDOWN=true;;
+ --) shift; break;;
+ *) display_error "Unknown option: $1" >&2;;
+ esac
+ done
+ if test "$KILL" = "true" -o "$SHUTDOWN" = "true"; then
+ stop_couchdb $KILL
+ else
+ start_couchdb
+ fi
+}
+
+parse_script_option_list $@
diff --git a/1.1.x/bin/couchjs.tpl.in b/1.1.x/bin/couchjs.tpl.in
new file mode 100644
index 00000000..6927a0d4
--- /dev/null
+++ b/1.1.x/bin/couchjs.tpl.in
@@ -0,0 +1,92 @@
+#! /bin/sh -e
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+SCRIPT_OK=0
+SCRIPT_ERROR=1
+
+DEFAULT_VERSION=170
+
+basename=`basename $0`
+
+display_version () {
+ cat << EOF
+$basename - %package_name% %version%
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+EOF
+}
+
+display_help () {
+ cat << EOF
+Usage: $basename [FILE]
+
+The $basename command runs the %package_name% JavaScript interpreter.
+
+The exit status is 0 for success or 1 for failure.
+
+Options:
+
+ -h display a short help message and exit
+ -V display version information and exit
+
+Report bugs at <%bug_uri%>.
+EOF
+}
+
+display_error () {
+ if test -n "$1"; then
+ echo $1 >&2
+ fi
+ echo >&2
+ echo "Try \`"$basename" -h' for more information." >&2
+ exit $SCRIPT_ERROR
+}
+
+run_couchjs () {
+ exec %locallibbindir%/%couchjs_command_name% $@
+}
+
+parse_script_option_list () {
+ set +e
+ options=`getopt hV $@`
+ if test ! $? -eq 0; then
+ display_error
+ fi
+ set -e
+ eval set -- $options
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ -h) shift; display_help; exit $SCRIPT_OK;;
+ -V) shift; display_version; exit $SCRIPT_OK;;
+ --) shift; break;;
+ *) break;;
+ esac
+ done
+ option_list=`echo $@ | sed 's/--//'`
+ if test -z "$option_list"; then
+ display_error "You must specify a FILE."
+ fi
+ run_couchjs $option_list
+}
+
+parse_script_option_list $@
diff --git a/1.1.x/bootstrap b/1.1.x/bootstrap
new file mode 100755
index 00000000..0c576c40
--- /dev/null
+++ b/1.1.x/bootstrap
@@ -0,0 +1,68 @@
+#!/bin/sh -e
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+get () {
+ variable_name=$1
+ echo "changequote(\`[', \`]')" > acinclude.m4.tmp
+ sed -e "s/m4_//" < acinclude.m4.in >> acinclude.m4.tmp
+ echo $variable_name >> acinclude.m4.tmp
+ if test -x "`which gm4 2> /dev/null || true`"; then
+ gm4 acinclude.m4.tmp | grep -v "^$" || true
+ else
+ if test -x "`which m4 2> /dev/null || true`"; then
+ m4 acinclude.m4.tmp | grep -v "^$" || true
+ else
+ echo unknown
+ fi
+ fi
+ rm -f acinclude.m4.tmp
+}
+
+mkdir -p build-aux
+
+if test -z "$REVISION"; then
+ if test -d .git; then
+ REVISION=`git log --pretty="format:%h" | head -1`-git
+ else
+ # default to svn
+ REVISION=`\`which svn\` info . 2> /dev/null | awk "/Revision:/{print \\$2}"`
+ fi
+fi
+if test -z "`get LOCAL_VERSION_STAGE`" -o -z "$REVISION"; then
+ sed "s/%release%//" < acinclude.m4.in > acinclude.m4
+else
+ sed "s/%release%/$REVISION/" < acinclude.m4.in > acinclude.m4
+fi
+
+gunzip -c m4/ac_check_icu.m4.gz > m4/ac_check_icu.m4
+gunzip -c m4/ac_check_curl.m4.gz > m4/ac_check_curl.m4
+
+if test -x "`which glibtoolize 2> /dev/null || true`"; then
+ glibtoolize -f -c --automake
+else
+ libtoolize -f -c --automake
+fi
+
+aclocal -I m4
+autoheader -f
+automake -f -a 2>&1 | sed -e "/install/d"
+autoconf -f
+
+ln -f -s "`dirname \`readlink build-aux/missing\``/INSTALL"
+
+cat << EOF
+You have bootstrapped Apache CouchDB, time to relax.
+
+Run \`./configure' to configure the source before you install.
+EOF
diff --git a/1.1.x/configure.ac b/1.1.x/configure.ac
new file mode 100644
index 00000000..b776a258
--- /dev/null
+++ b/1.1.x/configure.ac
@@ -0,0 +1,440 @@
+dnl Licensed under the Apache License, Version 2.0 (the "License"); you may not
+dnl use this file except in compliance with the License. dnl You may obtain a
+dnl copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+dnl WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+dnl License for the specific language governing permissions and limitations
+dnl under the License.
+
+AC_INIT([LOCAL_PACKAGE_NAME], [LOCAL_VERSION], [], [LOCAL_PACKAGE_TARNAME])
+
+AC_PREREQ([2.59])
+
+AC_CONFIG_SRCDIR([CHANGES])
+AC_CONFIG_AUX_DIR([build-aux])
+AC_CONFIG_MACRO_DIR([m4])
+
+AM_CONFIG_HEADER([config.h])
+
+AM_INIT_AUTOMAKE([1.6.3 foreign])
+
+AC_GNU_SOURCE
+AC_ENABLE_SHARED
+AC_DISABLE_STATIC
+
+AC_PROG_CC
+AC_PROG_LIBTOOL
+AC_PROG_LN_S
+
+AC_MSG_CHECKING([for pthread_create in -lpthread])
+
+original_LIBS="$LIBS"
+LIBS="-lpthread $original_LIBS"
+
+AC_TRY_LINK([#include<pthread.h>],
+ [pthread_create((void *)0, (void *)0, (void *)0, (void *)0)],
+ [pthread=yes], [pthread=no])
+
+if test x${pthread} = xyes; then
+ AC_MSG_RESULT([yes])
+else
+ LIBS="$original_LIBS"
+ AC_MSG_RESULT([no])
+fi
+
+AC_ARG_WITH([erlang], [AC_HELP_STRING([--with-erlang=PATH],
+ [set PATH to the Erlang include directory])], [
+ ERLANG_FLAGS="-I$withval"
+], [
+ ERLANG_FLAGS="-I${libdir}/erlang/usr/include"
+ ERLANG_FLAGS="$ERLANG_FLAGS -I/usr/lib/erlang/usr/include"
+ ERLANG_FLAGS="$ERLANG_FLAGS -I/usr/local/lib/erlang/usr/include"
+ ERLANG_FLAGS="$ERLANG_FLAGS -I/opt/local/lib/erlang/usr/include"
+])
+
+AC_ARG_WITH([js-include], [AC_HELP_STRING([--with-js-include=PATH],
+ [set PATH to the SpiderMonkey include directory])], [
+ JS_INCLUDE="$withval"
+ JS_FLAGS="-I$JS_INCLUDE"
+], [
+ JS_FLAGS="-I/usr/include"
+ JS_FLAGS="$JS_FLAGS -I/usr/include/js"
+ JS_FLAGS="$JS_FLAGS -I/usr/include/mozjs"
+ JS_FLAGS="$JS_FLAGS -I/usr/local/include"
+ JS_FLAGS="$JS_FLAGS -I/opt/local/include"
+ JS_FLAGS="$JS_FLAGS -I/usr/local/include/js"
+ JS_FLAGS="$JS_FLAGS -I/opt/local/include/js"
+])
+
+AC_ARG_WITH([js-lib], [AC_HELP_STRING([--with-js-lib=PATH],
+ [set PATH to the SpiderMonkey library directory])],
+ [
+ JS_LIB_DIR=$withval
+ JS_LIB_FLAGS="-L$withval"
+], [
+ JS_LIB_DIR=
+])
+AC_SUBST(JS_LIB_DIR)
+
+AC_ARG_VAR([ERLC_FLAGS], [general flags to prepend to ERLC_FLAGS])
+AC_ARG_VAR([FLAGS], [general flags to prepend to LDFLAGS and CPPFLAGS])
+
+LIB_FLAGS="$JS_LIB_FLAGS -L/usr/local/lib -L/opt/local/lib"
+LIBS="$LIB_FLAGS $LIBS"
+
+case "$(uname -s)" in
+ CYGWIN*)
+ FLAGS="$LIB_FLAGS $ERLANG_FLAGS $JS_FLAGS -DXP_WIN $FLAGS"
+ CPPFLAGS="$FLAGS $CPPFLAGS"
+ LDFLAGS="$FLAGS $LDFLAGS"
+ IS_WINDOWS="TRUE"
+ # The erlang cc.sh/ld.sh scripts will convert a -O option
+ # into the same optimization flags erlang itself uses.
+ CFLAGS="-O2"
+ LTCFLAGS="$CFLAGS"
+ ;;
+ *)
+ # XP_UNIX required for jsapi.h and has been tested to work on Linux and Darwin.
+ FLAGS="$LIB_FLAGS $ERLANG_FLAGS $JS_FLAGS -DXP_UNIX $FLAGS"
+ CPPFLAGS="$FLAGS $CPPFLAGS"
+ # manually linking libm is requred for FreeBSD 7.0
+ LDFLAGS="$FLAGS -lm $LDFLAGS"
+ ;;
+esac
+
+AM_CONDITIONAL([WINDOWS], [test x$IS_WINDOWS = xTRUE])
+
+AC_CHECK_LIB([mozjs], [JS_NewContext], [JS_LIB_BASE=mozjs], [
+ AC_CHECK_LIB([js], [JS_NewContext], [JS_LIB_BASE=js], [
+ AC_CHECK_LIB([js3250], [JS_NewContext], [JS_LIB_BASE=js3250], [
+ AC_CHECK_LIB([js32], [JS_NewContext], [JS_LIB_BASE=js32], [
+ AC_MSG_ERROR([Could not find the js library.
+
+Is the Mozilla SpiderMonkey library installed?])])])])])
+
+AC_SUBST(JS_LIB_BASE)
+
+if test x${IS_WINDOWS} = xTRUE; then
+ if test -f "$JS_LIB_DIR/$JS_LIB_BASE.dll"; then
+ # seamonkey 1.7- build layout on Windows
+ JS_LIB_BINARY="$JS_LIB_DIR/$JS_LIB_BASE.dll"
+ else
+ # seamonkey 1.8+ build layout on Windows
+ if test -f "$JS_LIB_DIR/../bin/$JS_LIB_BASE.dll"; then
+ JS_LIB_BINARY="$JS_LIB_DIR/../bin/$JS_LIB_BASE.dll"
+ else
+ AC_MSG_ERROR([Could not find $JS_LIB_BASE.dll.])
+ fi
+ fi
+ AC_SUBST(JS_LIB_BINARY)
+
+ # On windows we need to know the path to the openssl binaries.
+ AC_ARG_WITH([openssl-bin-dir], [AC_HELP_STRING([--with-openssl-bin-dir=PATH],
+ [path to the open ssl binaries for distribution on Windows])], [
+ openssl_bin_dir=`cygpath -m "$withval"`
+ AC_SUBST(openssl_bin_dir)
+ ], [])
+
+ # Windows uses Inno setup - look for its compiler.
+ AC_PATH_PROG([INNO_COMPILER_EXECUTABLE], [iscc])
+ if test x${INNO_COMPILER_EXECUTABLE} = x; then
+ AC_MSG_WARN([You will be unable to build the Windows installer.])
+ fi
+
+ # We need the msvc redistributables for this platform too
+ # (in theory we could just install the assembly locally - but
+ # there are at least 4 directories with binaries, meaning 4 copies;
+ # so using the redist .exe means it ends up installed globally...)
+ AC_ARG_WITH([msvc-redist-dir], [AC_HELP_STRING([--with-msvc-redist-dir=PATH],
+ [path to the msvc redistributables for the Windows platform])], [
+ msvc_redist_dir=`cygpath -m "$withval"`
+ msvc_redist_name="vcredist_x86.exe"
+ AC_SUBST(msvc_redist_dir)
+ AC_SUBST(msvc_redist_name)
+ ], [])
+ if test ! -f ${msvc_redist_dir}/${msvc_redist_name}; then
+ AC_MSG_WARN([The MSVC redistributable seems to be missing; expect the installer to fail.])
+ fi
+fi
+
+JSLIB=-l$JS_LIB_BASE
+
+AC_CHECK_HEADER([jsapi.h], [], [
+ AC_CHECK_HEADER([js/jsapi.h],
+ [
+ CPPFLAGS="$CPPFLAGS -I$JS_INCLUDE/js"
+ ],
+ [
+ AC_MSG_ERROR([Could not find the jsapi header.
+
+Are the Mozilla SpiderMonkey headers installed?])
+ ])])
+
+AC_SUBST(JSLIB)
+
+AC_LANG_PUSH(C)
+OLD_CFLAGS="$CFLAGS"
+CFLAGS="-Werror-implicit-function-declaration"
+AC_COMPILE_IFELSE(
+ [AC_LANG_PROGRAM(
+ [[#include <jsapi.h>]],
+ [[JS_SetOperationCallback(0, 0);]]
+ )],
+ AC_DEFINE([USE_JS_SETOPCB], [], [Use new JS_SetOperationCallback])
+)
+CFLAGS="$OLD_CFLAGS"
+AC_LANG_POP(C)
+
+AC_ARG_WITH([win32-icu-binaries], [AC_HELP_STRING([--with-win32-icu-binaries=PATH],
+ [set PATH to the Win32 native ICU binaries directory])], [
+ ICU_CONFIG="" # supposed to be a command to query options...
+ ICU_LOCAL_CFLAGS="-I$withval/include"
+ ICU_LOCAL_LDFLAGS="-L$withval/lib"
+ ICU_LOCAL_BIN=$withval/bin
+], [
+ AC_CHECK_ICU([3.4.1])
+ ICU_LOCAL_CFLAGS=`$ICU_CONFIG --cppflags-searchpath`
+ ICU_LOCAL_LDFLAGS=`$ICU_CONFIG --ldflags-searchpath`
+ ICU_LOCAL_BIN=
+])
+
+AC_SUBST(ICU_CONFIG)
+AC_SUBST(ICU_LOCAL_CFLAGS)
+AC_SUBST(ICU_LOCAL_LDFLAGS)
+AC_SUBST(ICU_LOCAL_BIN)
+
+AC_ARG_WITH([win32-curl], [AC_HELP_STRING([--with-win32-curl=PATH],
+ [set PATH to the Win32 native curl directory])], [
+ # default build on windows is a static lib, and that's what we want too
+ CURL_CFLAGS="-I$withval/include -DCURL_STATICLIB"
+ CURL_LIBS="$withval/lib/libcurl"
+ CURL_LDFLAGS="-l$CURL_LIBS -lWs2_32 -lkernel32 -luser32 -ladvapi32 -lWldap32"
+], [
+ AC_CHECK_CURL([7.18.0])
+ CURL_LDFLAGS=-lcurl
+])
+
+AC_SUBST(CURL_CFLAGS)
+AC_SUBST(CURL_LIBS)
+AC_SUBST(CURL_LDFLAGS)
+
+case "$(uname -s)" in
+ Linux)
+ LIBS="$LIBS -lcrypt"
+ CPPFLAGS="-D_XOPEN_SOURCE $CPPFLAGS"
+ ;;
+ FreeBSD)
+ LIBS="$LIBS -lcrypt"
+ ;;
+ OpenBSD)
+ LIBS="$LIBS -lcrypto"
+ ;;
+esac
+
+AC_PATH_PROG([ERL], [erl])
+
+if test x${ERL} = x; then
+ AC_MSG_ERROR([Could not find the `erl' executable. Is Erlang installed?])
+fi
+
+erlang_version_error="The installed Erlang version is less than 5.6.5 (R12B05)."
+
+version="`${ERL} -version 2>&1 | ${SED} "s/[[^0-9]]/ /g"`"
+
+if test `echo $version | ${AWK} "{print \\$1}"` -lt 5; then
+ AC_MSG_ERROR([$erlang_version_error])
+fi
+
+if test `echo $version | ${AWK} "{print \\$2}"` -lt 6; then
+ AC_MSG_ERROR([$erlang_version_error])
+fi
+
+if test `echo $version | ${AWK} "{print \\$2}"` -eq 6; then
+ if test `echo $version | ${AWK} "{print \\$3}"` -lt 5; then
+ AC_MSG_ERROR([$erlang_version_error])
+ fi
+fi
+
+has_crypto=`${ERL} -eval "case application:load(crypto) of ok -> ok; _ -> exit(no_crypto) end." -noshell -s init stop`
+
+if test -n "$has_crypto"; then
+ AC_MSG_ERROR([Could not find the Erlang crypto library. Has Erlang been compiled with OpenSSL support?])
+fi
+
+AC_PATH_PROG([ERLC], [erlc])
+
+if test x${ERLC} = x; then
+ AC_MSG_ERROR([Could not find the `erlc' executable. Is Erlang installed?])
+fi
+
+AC_CHECK_HEADER([erl_driver.h], [], [
+ AC_MSG_ERROR([Could not find the `erl_driver.h' header.
+
+Are the Erlang headers installed? Use the `--with-erlang' option to specify the
+path to the Erlang include directory.])])
+
+AC_PATH_PROG([HELP2MAN_EXECUTABLE], [help2man])
+if test x${HELP2MAN_EXECUTABLE} = x; then
+ AC_MSG_WARN([You will be unable to regenerate any man pages.])
+fi
+
+use_init=yes
+use_launchd=yes
+
+AC_ARG_ENABLE([init], [AC_HELP_STRING([--disable-init],
+ [don't install init script where applicable])], [
+ use_init=$enableval
+], [])
+
+AC_ARG_ENABLE([launchd], [AC_HELP_STRING([--disable-launchd],
+ [don't install launchd configuration where applicable])], [
+ use_launchd=$enableval
+], [])
+
+init_enabled=false
+launchd_enabled=false
+
+if test "$use_init" = "yes"; then
+ AC_MSG_CHECKING(location of init directory)
+ if test -d /etc/rc.d; then
+ init_enabled=true
+ AC_SUBST([initdir], ['${sysconfdir}/rc.d'])
+ AC_MSG_RESULT(${initdir})
+ else
+ if test -d /etc/init.d; then
+ init_enabled=true
+ AC_SUBST([initdir], ['${sysconfdir}/init.d'])
+ AC_MSG_RESULT(${initdir})
+ else
+ AC_MSG_RESULT(not found)
+ fi
+ fi
+fi
+
+if test "$use_launchd" = "yes"; then
+ AC_MSG_CHECKING(location of launchd directory)
+ if test -d /Library/LaunchDaemons; then
+ init_enabled=false
+ launchd_enabled=true
+ AC_SUBST([launchddir], ['${prefix}/Library/LaunchDaemons'])
+ AC_MSG_RESULT(${launchddir})
+ else
+ AC_MSG_RESULT(not found)
+ fi
+fi
+
+AC_ARG_VAR([ERL], [path to the `erl' executable])
+AC_ARG_VAR([ERLC], [path to the `erlc' executable])
+AC_ARG_VAR([HELP2MAN_EXECUTABLE], [path to the `help2man' program])
+
+if test -n "$HELP2MAN_EXECUTABLE"; then
+ help2man_enabled=true
+else
+ if test -f "$srcdir/bin/couchdb.1" -a -f "$srcdir/bin/couchjs.1"; then
+ help2man_enabled=true
+ else
+ help2man_enabled=false
+ fi
+fi
+
+AM_CONDITIONAL([INIT], [test x${init_enabled} = xtrue])
+AM_CONDITIONAL([LAUNCHD], [test x${launchd_enabled} = xtrue])
+AM_CONDITIONAL([HELP2MAN], [test x${help2man_enabled} = xtrue])
+
+AC_SUBST([package_author_name], ["LOCAL_PACKAGE_AUTHOR_NAME"])
+AC_SUBST([package_author_address], ["LOCAL_PACKAGE_AUTHOR_ADDRESS"])
+AC_SUBST([package_identifier], ["LOCAL_PACKAGE_IDENTIFIER"])
+AC_SUBST([package_tarname], ["LOCAL_PACKAGE_TARNAME"])
+AC_SUBST([package_name], ["LOCAL_PACKAGE_NAME"])
+
+AC_SUBST([version], ["LOCAL_VERSION"])
+AC_SUBST([version_major], ["LOCAL_VERSION_MAJOR"])
+AC_SUBST([version_minor], ["LOCAL_VERSION_MINOR"])
+AC_SUBST([version_revision], ["LOCAL_VERSION_REVISION"])
+AC_SUBST([version_stage], ["LOCAL_VERSION_STAGE"])
+AC_SUBST([version_release], ["LOCAL_VERSION_RELEASE"])
+
+AC_SUBST([bug_uri], ["LOCAL_BUG_URI"])
+
+AC_SUBST([localconfdir], [${sysconfdir}/${package_identifier}])
+AC_SUBST([localdatadir], [${datadir}/${package_identifier}])
+AC_SUBST([localdocdir], [${datadir}/doc/${package_identifier}])
+AC_SUBST([locallibdir], [${libdir}/${package_identifier}])
+AC_SUBST([localstatelibdir], [${localstatedir}/lib/${package_identifier}])
+AC_SUBST([localstatelogdir], [${localstatedir}/log/${package_identifier}])
+AC_SUBST([localstaterundir], [${localstatedir}/run/${package_identifier}])
+
+# On Windows we install directly into our erlang distribution.
+if test x${IS_WINDOWS} = xTRUE; then
+ AC_SUBST([locallibbindir], [${prefix}/bin])
+ AC_SUBST([localerlanglibdir], [${libdir}])
+else
+ AC_SUBST([locallibbindir], [${locallibdir}/bin])
+ AC_SUBST([localerlanglibdir], [${locallibdir}/erlang/lib])
+fi
+
+# fix for older autotools that don't define "abs_top_YYY" by default
+AC_SUBST(abs_top_srcdir)
+AC_SUBST(abs_top_builddir)
+
+AC_REVISION([LOCAL_VERSION])
+
+AC_CONFIG_FILES([Makefile])
+AC_CONFIG_FILES([bin/couchjs.tpl])
+AC_CONFIG_FILES([bin/couchdb.tpl])
+AC_CONFIG_FILES([bin/couchdb.bat.tpl])
+AC_CONFIG_FILES([bin/Makefile])
+AC_CONFIG_FILES([etc/couchdb/Makefile])
+AC_CONFIG_FILES([etc/couchdb/default.ini.tpl])
+AC_CONFIG_FILES([etc/default/Makefile])
+AC_CONFIG_FILES([etc/init/couchdb.tpl])
+AC_CONFIG_FILES([etc/init/Makefile])
+AC_CONFIG_FILES([etc/launchd/org.apache.couchdb.plist.tpl])
+AC_CONFIG_FILES([etc/launchd/Makefile])
+AC_CONFIG_FILES([etc/logrotate.d/couchdb.tpl])
+AC_CONFIG_FILES([etc/logrotate.d/Makefile])
+AC_CONFIG_FILES([etc/windows/Makefile])
+AC_CONFIG_FILES([etc/Makefile])
+AC_CONFIG_FILES([share/Makefile])
+AC_CONFIG_FILES([src/Makefile])
+AC_CONFIG_FILES([src/couchdb/couch.app.tpl])
+AC_CONFIG_FILES([src/couchdb/Makefile])
+AC_CONFIG_FILES([src/couchdb/priv/Makefile])
+AC_CONFIG_FILES([src/erlang-oauth/Makefile])
+AC_CONFIG_FILES([src/etap/Makefile])
+AC_CONFIG_FILES([src/ibrowse/Makefile])
+AC_CONFIG_FILES([src/mochiweb/Makefile])
+AC_CONFIG_FILES([test/Makefile])
+AC_CONFIG_FILES([test/bench/Makefile])
+AC_CONFIG_FILES([test/etap/Makefile])
+AC_CONFIG_FILES([test/etap/test_util.erl])
+AC_CONFIG_FILES([test/javascript/Makefile])
+AC_CONFIG_FILES([test/view_server/Makefile])
+AC_CONFIG_FILES([utils/Makefile])
+AC_CONFIG_FILES([var/Makefile])
+
+AC_OUTPUT
+
+# *sob* - on Windows libtool fails as 'libname_spec' isn't correct (it
+# expects GNU style lib names). I can't work out how to configure this
+# option sanely, so we pass the script through sed to modify it.
+# Also, the erlang cc.sh script doesn't cope well with the '-link' command
+# line option libtool provides.
+# PLEASE, someone help put this out of its misery!!
+# This hackery is being tracked via COUCHDB-440.
+if test x${IS_WINDOWS} = xTRUE; then
+ sed -e 's,libname_spec="lib\\$name",libname_spec="\\\$name",' \
+ -e 's,-link,,' \
+ < libtool > libtool.tmp
+ mv libtool.tmp libtool
+ # probably would chmod +x if we weren't on windows...
+fi
+
+echo
+echo "You have configured Apache CouchDB, time to relax."
+echo
+echo "Run \`make && sudo make install' to install."
diff --git a/1.1.x/etc/Makefile.am b/1.1.x/etc/Makefile.am
new file mode 100644
index 00000000..148f7015
--- /dev/null
+++ b/1.1.x/etc/Makefile.am
@@ -0,0 +1,117 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+SUBDIRS = couchdb default init launchd logrotate.d windows
+
+default_sysconf_data = logrotate.d/couchdb
+
+if INIT
+init_sysconf_data = default/couchdb
+init_DATA = init/couchdb
+endif
+
+if LAUNCHD
+launchd_DATA = launchd/org.apache.couchdb.plist
+endif
+
+nobase_sysconf_DATA = $(default_sysconf_data) $(init_sysconf_data)
+
+EXTRA_DIST = \
+ default/couchdb \
+ windows/couchdb.iss.tpl \
+ windows/README.txt.tpl
+
+if WINDOWS
+EXTRA_DIST += \
+ windows/setup-couchdb-@version@.exe.md5 \
+ windows/setup-couchdb-@version@.exe.sha
+endif
+
+CLEANFILES = $(init_DATA) $(default_sysconf_data) $(launchd_DATA)
+
+transform = @program_transform_name@
+couchdb_command_name = `echo couchdb | sed '$(transform)'`
+
+init/couchdb: init/couchdb.tpl
+ if test "$(mkdir_p)"; then \
+ $(mkdir_p) init; \
+ else \
+ if test ! -d init; then \
+ mkdir init; \
+ fi \
+ fi
+ sed -e "s|%configure_input%|$@. Generated from $< by configure.|" \
+ -e "s|%bindir%|$(bindir)|" \
+ -e "s|%sysconfdir%|$(sysconfdir)|" \
+ -e "s|%localstaterundir%|$(localstaterundir)|" \
+ -e "s|%couchdb_command_name%|$(couchdb_command_name)|" \
+ < $< > $@
+
+logrotate.d/couchdb: logrotate.d/couchdb.tpl
+ sed -e "s|%localstatelogdir%|@localstatelogdir@|g" < $< > $@
+
+launchd/org.apache.couchdb.plist: launchd/org.apache.couchdb.plist.tpl
+ if test "$(mkdir_p)"; then \
+ $(mkdir_p) launchd; \
+ else \
+ if test ! -d launchd; then \
+ mkdir launchd; \
+ fi \
+ fi
+ sed -e "s|%configure_input%|$@. Generated from $< by configure.|" \
+ -e "s|%bindir%|$(bindir)|" \
+ -e "s|%couchdb_command_name%|$(couchdb_command_name)|" \
+ < $< > $@
+
+if WINDOWS
+
+# README.txt has \n line endings in the repo and must have \r\n
+# when installed as notepad is used to view it.
+# Also: the targets below don't seem to update after an svn-up (which
+# changes the version string in the generated files) so we trick make
+# into always building it with the FORCE pattern...
+windows/README.txt: windows/README.txt.tpl FORCE
+ sed -e "s|%package_name%|@package_name@|g" \
+ -e "s|%version%|@version@|g" \
+ < $< | unix2dos > $@
+
+windows/couchdb.iss: windows/couchdb.iss.tpl FORCE
+ sed -e "s|%configure_input%|$@. Generated from $< by configure.|" \
+ -e "s|%package_name%|@package_name@|g" \
+ -e "s|%locallibbindir%|`cygpath -m @locallibbindir@`|g" \
+ -e "s|%version%|@version@|g" \
+ -e "s|%erts_version%|`$(ERL) -version 2>&1 | $(SED) 's/[^0-9\.]//g'`|g" \
+ -e "s|%openssl_bin_dir%|@openssl_bin_dir@|g" \
+ -e "s|%msvc_redist_dir%|@msvc_redist_dir@|g" \
+ -e "s|%msvc_redist_name%|@msvc_redist_name@|g" \
+ < $< > $@
+
+# The installer depends on all files, not just the source .iss file,
+# so we trick make into always building it with the FORCE pattern...
+windows/setup-couchdb-@version@.exe: windows/couchdb.iss windows/README.txt FORCE
+ "$(INNO_COMPILER_EXECUTABLE)" /q $<
+ @echo Windows Installer is at: `cygpath -a -w windows/setup-couchdb-@version@.exe`
+
+windows/setup-couchdb-@version@.exe.md5: windows/setup-couchdb-@version@.exe
+ cd windows && md5sum ./$(<F) > $(@F)
+
+windows/setup-couchdb-@version@.exe.sha: windows/setup-couchdb-@version@.exe
+ cd windows && sha1sum ./$(<F) > $(@F)
+
+FORCE:
+
+endif
+
+install-data-hook:
+ if test -n "$(init_DATA)"; then \
+ chmod +x "$(DESTDIR)$(initdir)/couchdb"; \
+ fi
diff --git a/1.1.x/etc/couchdb/Makefile.am b/1.1.x/etc/couchdb/Makefile.am
new file mode 100644
index 00000000..9367ceac
--- /dev/null
+++ b/1.1.x/etc/couchdb/Makefile.am
@@ -0,0 +1,86 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+couchprivlibdir = $(localerlanglibdir)/couch-$(version)/priv/lib
+devcouchprivlibdir = $(abs_top_builddir)/src/couchdb/priv/.libs
+
+localconf_DATA = default.ini
+noinst_DATA = default_dev.ini local_dev.ini
+
+EXTRA_DIST = local.ini
+
+CLEANFILES = $(localconf_DATA) $(noinst_DATA)
+
+transform = @program_transform_name@
+couchjs_command_name = `echo couchjs | sed '$(transform)'`
+couchjs_dev_command_name = `echo couchjs_dev | sed '$(transform)'`
+
+if WINDOWS
+default.ini: default.ini.tpl
+ sed -e "s|%bindir%|.|g" \
+ -e "s|%localconfdir%|$(localconfdir)|g" \
+ -e "s|%localdatadir%|../share/couchdb|g" \
+ -e "s|%localbuilddatadir%|../share/couchdb|g" \
+ -e "s|%localstatelibdir%|../var/lib/couchdb|g" \
+ -e "s|%localstatelogdir%|../var/log/couchdb|g" \
+ -e "s|%localstaterundir%|../var/run/couchdb|g" \
+ -e "s|%couchprivlibdir%|../lib/couch-$(version)/priv/lib|g" \
+ -e "s|%couchjs_command_name%|couchjs.exe|g" \
+ < $< > $@
+else
+default.ini: default.ini.tpl
+ sed -e "s|%bindir%|$(bindir)|g" \
+ -e "s|%localconfdir%|$(localconfdir)|g" \
+ -e "s|%localdatadir%|$(localdatadir)|g" \
+ -e "s|%localbuilddatadir%|$(localdatadir)|g" \
+ -e "s|%localstatelibdir%|$(localstatelibdir)|g" \
+ -e "s|%localstatelogdir%|$(localstatelogdir)|g" \
+ -e "s|%localstaterundir%|$(localstaterundir)|g" \
+ -e "s|%couchprivlibdir%|$(couchprivlibdir)|g" \
+ -e "s|%couchjs_command_name%|$(couchjs_command_name)|g" \
+ < $< > $@
+endif
+
+default_dev.ini: default.ini.tpl
+ sed -e "s|%bindir%|$(abs_top_builddir)/bin|g" \
+ -e "s|%localconfdir%|$(abs_top_builddir)/etc/couchdb|g" \
+ -e "s|%localdatadir%|$(abs_top_srcdir)/share|g" \
+ -e "s|%localbuilddatadir%|$(abs_top_builddir)/share|g" \
+ -e "s|%localstatelibdir%|$(abs_top_builddir)/tmp/lib|g" \
+ -e "s|%localstatelogdir%|$(abs_top_builddir)/tmp/log|g" \
+ -e "s|%localstaterundir%|$(abs_top_builddir)/tmp/run|g" \
+ -e "s|%couchprivlibdir%|$(devcouchprivlibdir)|g" \
+ -e "s|%couchjs_command_name%|$(couchjs_dev_command_name)|g" \
+ < $< > $@
+
+# Noah said to not specify local.ini but it borks
+# VPATH builds that make distcheck uses.
+local_dev.ini: local.ini
+ if test ! -f "$@"; then \
+ cp $< $@; \
+ fi
+
+install-data-hook:
+ if test ! -f "$(DESTDIR)$(localconfdir)/local.ini"; then \
+ cp $(srcdir)/local.ini "$(DESTDIR)$(localconfdir)/local.ini"; \
+ fi
+ if test ! "$(mkdir_p)" = ""; then \
+ $(mkdir_p) "$(DESTDIR)$(localconfdir)/default.d"; \
+ $(mkdir_p) "$(DESTDIR)$(localconfdir)/local.d"; \
+ else \
+ echo "WARNING: You may have to create these directories by hand."; \
+ mkdir -p "$(DESTDIR)$(localconfdir)/default.d"; \
+ mkdir -p "$(DESTDIR)$(localconfdir)/local.d"; \
+ fi
+
+uninstall-local:
+ rm -f "$(DESTDIR)/$(localconfdir)/local.ini"
diff --git a/1.1.x/etc/couchdb/default.ini.tpl.in b/1.1.x/etc/couchdb/default.ini.tpl.in
new file mode 100644
index 00000000..f5dc24af
--- /dev/null
+++ b/1.1.x/etc/couchdb/default.ini.tpl.in
@@ -0,0 +1,148 @@
+; @configure_input@
+
+; Upgrading CouchDB will overwrite this file.
+
+[couchdb]
+database_dir = %localstatelibdir%
+view_index_dir = %localstatelibdir%
+util_driver_dir = %couchprivlibdir%
+max_document_size = 4294967296 ; 4 GB
+os_process_timeout = 5000 ; 5 seconds. for view and external servers.
+max_dbs_open = 100
+delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
+uri_file = %localstaterundir%/couch.uri
+
+[httpd]
+port = 5984
+bind_address = 127.0.0.1
+authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
+default_handler = {couch_httpd_db, handle_request}
+secure_rewrites = true
+vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
+allow_jsonp = false
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+
+[ssl]
+port = 6984
+
+[log]
+file = %localstatelogdir%/couch.log
+level = info
+include_sasl = true
+
+[couch_httpd_auth]
+authentication_db = _users
+authentication_redirect = /_utils/session.html
+require_valid_user = false
+timeout = 43200 ; (default to 12 hours) number of seconds before automatic logout
+auth_cache_size = 50 ; size is number of cache entries
+
+[query_servers]
+javascript = %bindir%/%couchjs_command_name% %localbuilddatadir%/server/main.js
+
+; Changing reduce_limit to false will disable reduce_limit.
+; If you think you're hitting reduce_limit with a "good" reduce function,
+; please let us know on the mailing list so we can fine tune the heuristic.
+[query_server_config]
+reduce_limit = true
+os_process_limit = 25
+
+[daemons]
+view_manager={couch_view, start_link, []}
+external_manager={couch_external_manager, start_link, []}
+db_update_notifier={couch_db_update_notifier_sup, start_link, []}
+query_servers={couch_query_servers, start_link, []}
+httpd={couch_httpd, start_link, []}
+stats_aggregator={couch_stats_aggregator, start, []}
+stats_collector={couch_stats_collector, start, []}
+uuids={couch_uuids, start, []}
+auth_cache={couch_auth_cache, start_link, []}
+replication_manager={couch_replication_manager, start_link, []}
+vhosts={couch_httpd_vhost, start_link, []}
+os_daemons={couch_os_daemons, start_link, []}
+
+[httpd_global_handlers]
+/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
+favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "%localdatadir%/www"}
+
+_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "%localdatadir%/www"}
+_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
+_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
+_config = {couch_httpd_misc_handlers, handle_config_req}
+_replicate = {couch_httpd_misc_handlers, handle_replicate_req}
+_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
+_restart = {couch_httpd_misc_handlers, handle_restart_req}
+_stats = {couch_httpd_stats_handlers, handle_stats_req}
+_log = {couch_httpd_misc_handlers, handle_log_req}
+_session = {couch_httpd_auth, handle_session_req}
+_oauth = {couch_httpd_oauth, handle_oauth_req}
+
+[httpd_db_handlers]
+_view_cleanup = {couch_httpd_db, handle_view_cleanup_req}
+_compact = {couch_httpd_db, handle_compact_req}
+_design = {couch_httpd_db, handle_design_req}
+_temp_view = {couch_httpd_view, handle_temp_view_req}
+_changes = {couch_httpd_db, handle_changes_req}
+
+; The external module takes an optional argument allowing you to narrow it to a
+; single script. Otherwise the script name is inferred from the first path section
+; after _external's own path.
+; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
+; _external = {couch_httpd_external, handle_external_req}
+
+[httpd_design_handlers]
+_view = {couch_httpd_view, handle_view_req}
+_show = {couch_httpd_show, handle_doc_show_req}
+_list = {couch_httpd_show, handle_view_list_req}
+_info = {couch_httpd_db, handle_design_info_req}
+_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
+_update = {couch_httpd_show, handle_doc_update_req}
+
+; enable external as an httpd handler, then link it with commands here.
+; note, this api is still under consideration.
+; [external]
+; mykey = /path/to/mycommand
+
+; Here you can setup commands for CouchDB to manage
+; while it is alive. It will attempt to keep each command
+; alive if it exits.
+; [os_daemons]
+; some_daemon_name = /path/to/script -with args
+
+
+[uuids]
+; Known algorithms:
+; random - 128 bits of random awesome
+; All awesome, all the time.
+; sequential - monotonically increasing ids with random increments
+; First 26 hex characters are random. Last 6 increment in
+; random amounts until an overflow occurs. On overflow, the
+; random prefix is regenerated and the process starts over.
+; utc_random - Time since Jan 1, 1970 UTC with microseconds
+; First 14 characters are the time in hex. Last 18 are random.
+algorithm = sequential
+
+[stats]
+; rate is in milliseconds
+rate = 1000
+; sample intervals are in seconds
+samples = [0, 60, 300, 900]
+
+[attachments]
+compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
+compressible_types = text/*, application/javascript, application/json, application/xml
+
+[replicator]
+db = _replicator
+max_replication_retry_count = 10
+max_http_sessions = 20
+max_http_pipeline_size = 50
+; set to true to validate peer certificates
+verify_ssl_certificates = false
+; file containing a list of peer trusted certificates (PEM format)
+; ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
+; maximum peer certificate depth (must be set even if certificate validation is off)
+ssl_certificate_max_depth = 3
diff --git a/1.1.x/etc/couchdb/local.ini b/1.1.x/etc/couchdb/local.ini
new file mode 100644
index 00000000..33380a32
--- /dev/null
+++ b/1.1.x/etc/couchdb/local.ini
@@ -0,0 +1,74 @@
+; CouchDB Configuration Settings
+
+; Custom settings should be made in this file. They will override settings
+; in default.ini, but unlike changes made to default.ini, this file won't be
+; overwritten on server upgrade.
+
+[couchdb]
+;max_document_size = 4294967296 ; bytes
+
+[httpd]
+;port = 5984
+;bind_address = 127.0.0.1
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+
+; Uncomment next line to trigger basic-auth popup on unauthorized requests.
+;WWW-Authenticate = Basic realm="administrator"
+
+; Uncomment next line to set the configuration modification whitelist. Only
+; whitelisted values may be changed via the /_config URLs. To allow the admin
+; to change this value over HTTP, remember to include {httpd,config_whitelist}
+; itself. Excluding it from the list would require editing this file to update
+; the whitelist.
+;config_whitelist = [{httpd,config_whitelist}, {log,level}, {etc,etc}]
+
+[httpd_global_handlers]
+;_google = {couch_httpd_proxy, handle_proxy_req, <<"http://www.google.com">>}
+
+[couch_httpd_auth]
+; If you set this to true, you should also uncomment the WWW-Authenticate line
+; above. If you don't configure a WWW-Authenticate header, CouchDB will send
+; Basic realm="server" in order to prevent you getting logged out.
+; require_valid_user = false
+
+[log]
+;level = debug
+
+[os_daemons]
+; For any commands listed here, CouchDB will attempt to ensure that
+; the process remains alive while CouchDB runs as well as shut them
+; down when CouchDB exits.
+;foo = /path/to/command -with args
+
+[daemons]
+; enable SSL support by uncommenting the following line and supply the PEM's below.
+; the default ssl port CouchDB listens on is 6984
+; httpsd = {couch_httpd, start_link, [https]}
+
+[ssl]
+;cert_file = /full/path/to/server_cert.pem
+;key_file = /full/path/to/server_key.pem
+
+; To enable Virtual Hosts in CouchDB, add a vhost = path directive. All requests to
+; the Virual Host will be redirected to the path. In the example below all requests
+; to http://example.com/ are redirected to /database.
+; If you run CouchDB on a specific port, include the port number in the vhost:
+; example.com:5984 = /database
+
+[vhosts]
+;example.com = /database/
+
+[update_notification]
+;unique notifier name=/full/path/to/exe -with "cmd line arg"
+
+; To create an admin account uncomment the '[admins]' section below and add a
+; line in the format 'username = password'. When you next start CouchDB, it
+; will change the password to a hash (so that your passwords don't linger
+; around in plain-text files). You can add more admin accounts with more
+; 'username = password' lines. Don't forget to restart CouchDB after
+; changing this.
+[admins]
+;admin = mysecretpassword
diff --git a/1.1.x/etc/default/Makefile.am b/1.1.x/etc/default/Makefile.am
new file mode 100644
index 00000000..5b4faae0
--- /dev/null
+++ b/1.1.x/etc/default/Makefile.am
@@ -0,0 +1,13 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+## This file intentionally left blank.
diff --git a/1.1.x/etc/default/couchdb b/1.1.x/etc/default/couchdb
new file mode 100644
index 00000000..c2a3f2ae
--- /dev/null
+++ b/1.1.x/etc/default/couchdb
@@ -0,0 +1,7 @@
+# Sourced by init script for configuration.
+
+COUCHDB_USER=couchdb
+COUCHDB_STDOUT_FILE=/dev/null
+COUCHDB_STDERR_FILE=/dev/null
+COUCHDB_RESPAWN_TIMEOUT=5
+COUCHDB_OPTIONS=
diff --git a/1.1.x/etc/init/Makefile.am b/1.1.x/etc/init/Makefile.am
new file mode 100644
index 00000000..5b4faae0
--- /dev/null
+++ b/1.1.x/etc/init/Makefile.am
@@ -0,0 +1,13 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+## This file intentionally left blank.
diff --git a/1.1.x/etc/init/couchdb.tpl.in b/1.1.x/etc/init/couchdb.tpl.in
new file mode 100644
index 00000000..3b8d17ea
--- /dev/null
+++ b/1.1.x/etc/init/couchdb.tpl.in
@@ -0,0 +1,168 @@
+#!/bin/sh -e
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+### BEGIN INIT INFO
+# Provides: couchdb
+# Required-Start: $local_fs $remote_fs
+# Required-Stop: $local_fs $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Apache CouchDB init script
+# Description: Apache CouchDB init script for the database server.
+### END INIT INFO
+
+SCRIPT_OK=0
+SCRIPT_ERROR=1
+
+DESCRIPTION="database server"
+NAME=couchdb
+SCRIPT_NAME=`basename $0`
+COUCHDB=%bindir%/%couchdb_command_name%
+CONFIGURATION_FILE=%sysconfdir%/default/couchdb
+RUN_DIR=%localstaterundir%
+LSB_LIBRARY=/lib/lsb/init-functions
+
+if test ! -x $COUCHDB; then
+ exit $SCRIPT_ERROR
+fi
+
+if test -r $CONFIGURATION_FILE; then
+ . $CONFIGURATION_FILE
+fi
+
+log_daemon_msg () {
+ # Dummy function to be replaced by LSB library.
+
+ echo $@
+}
+
+log_end_msg () {
+ # Dummy function to be replaced by LSB library.
+
+ if test "$1" != "0"; then
+ echo "Error with $DESCRIPTION: $NAME"
+ fi
+ return $1
+}
+
+if test -r $LSB_LIBRARY; then
+ . $LSB_LIBRARY
+fi
+
+start_couchdb () {
+ # Start Apache CouchDB as a background process.
+
+ command="$COUCHDB -b"
+ if test -n "$COUCHDB_STDOUT_FILE"; then
+ command="$command -o $COUCHDB_STDOUT_FILE"
+ fi
+ if test -n "$COUCHDB_STDERR_FILE"; then
+ command="$command -e $COUCHDB_STDERR_FILE"
+ fi
+ if test -n "$COUCHDB_RESPAWN_TIMEOUT"; then
+ command="$command -r $COUCHDB_RESPAWN_TIMEOUT"
+ fi
+ if test -n "$COUCHDB_OPTIONS"; then
+ command="$command $COUCHDB_OPTIONS"
+ fi
+ mkdir -p "$RUN_DIR"
+ if test -n "$COUCHDB_USER"; then
+ chown $COUCHDB_USER "$RUN_DIR"
+ if su $COUCHDB_USER -c "$command" > /dev/null; then
+ return $SCRIPT_OK
+ else
+ return $SCRIPT_ERROR
+ fi
+ else
+ if $command > /dev/null; then
+ return $SCRIPT_OK
+ else
+ return $SCRIPT_ERROR
+ fi
+ fi
+}
+
+stop_couchdb () {
+ # Stop the running Apache CouchDB process.
+
+ command="$COUCHDB -d"
+ if test -n "$COUCHDB_OPTIONS"; then
+ command="$command $COUCHDB_OPTIONS"
+ fi
+ if test -n "$COUCHDB_USER"; then
+ if su $COUCHDB_USER -c "$command" > /dev/null; then
+ return $SCRIPT_OK
+ else
+ return $SCRIPT_ERROR
+ fi
+ else
+ if $command > /dev/null; then
+ return $SCRIPT_OK
+ else
+ return $SCRIPT_ERROR
+ fi
+ fi
+}
+
+display_status () {
+ # Display the status of the running Apache CouchDB process.
+
+ $COUCHDB -s
+}
+
+parse_script_option_list () {
+ # Parse arguments passed to the script and take appropriate action.
+
+ case "$1" in
+ start)
+ log_daemon_msg "Starting $DESCRIPTION" $NAME
+ if start_couchdb; then
+ log_end_msg $SCRIPT_OK
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESCRIPTION" $NAME
+ if stop_couchdb; then
+ log_end_msg $SCRIPT_OK
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ ;;
+ restart)
+ log_daemon_msg "Restarting $DESCRIPTION" $NAME
+ if stop_couchdb; then
+ if start_couchdb; then
+ log_end_msg $SCRIPT_OK
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ ;;
+ status)
+ display_status
+ ;;
+ *)
+ cat << EOF >&2
+Usage: $SCRIPT_NAME {start|stop|restart|status}
+EOF
+ exit $SCRIPT_ERROR
+ ;;
+ esac
+}
+
+parse_script_option_list $@
diff --git a/1.1.x/etc/launchd/Makefile.am b/1.1.x/etc/launchd/Makefile.am
new file mode 100644
index 00000000..5b4faae0
--- /dev/null
+++ b/1.1.x/etc/launchd/Makefile.am
@@ -0,0 +1,13 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+## This file intentionally left blank.
diff --git a/1.1.x/etc/launchd/org.apache.couchdb.plist.tpl.in b/1.1.x/etc/launchd/org.apache.couchdb.plist.tpl.in
new file mode 100644
index 00000000..c72f3480
--- /dev/null
+++ b/1.1.x/etc/launchd/org.apache.couchdb.plist.tpl.in
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN"
+ "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>Label</key>
+ <string>org.apache.couchdb</string>
+ <key>EnvironmentVariables</key>
+ <dict>
+ <key>HOME</key>
+ <string>~</string>
+ <key>DYLD_LIBRARY_PATH</key>
+ <string>/opt/local/lib:$DYLD_LIBRARY_PATH</string>
+ </dict>
+ <key>ProgramArguments</key>
+ <array>
+ <string>%bindir%/%couchdb_command_name%</string>
+ </array>
+ <key>UserName</key>
+ <string>couchdb</string>
+ <key>StandardOutPath</key>
+ <string>/dev/null</string>
+ <key>StandardErrorPath</key>
+ <string>/dev/null</string>
+ <key>RunAtLoad</key>
+ <true/>
+ <key>KeepAlive</key>
+ <true/>
+ </dict>
+</plist>
diff --git a/1.1.x/etc/logrotate.d/Makefile.am b/1.1.x/etc/logrotate.d/Makefile.am
new file mode 100644
index 00000000..5b4faae0
--- /dev/null
+++ b/1.1.x/etc/logrotate.d/Makefile.am
@@ -0,0 +1,13 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+## This file intentionally left blank.
diff --git a/1.1.x/etc/logrotate.d/couchdb.tpl.in b/1.1.x/etc/logrotate.d/couchdb.tpl.in
new file mode 100644
index 00000000..0bb07e13
--- /dev/null
+++ b/1.1.x/etc/logrotate.d/couchdb.tpl.in
@@ -0,0 +1,9 @@
+%localstatelogdir%/*.log {
+ weekly
+ rotate 10
+ copytruncate
+ delaycompress
+ compress
+ notifempty
+ missingok
+}
diff --git a/1.1.x/etc/windows/Makefile.am b/1.1.x/etc/windows/Makefile.am
new file mode 100644
index 00000000..5b4faae0
--- /dev/null
+++ b/1.1.x/etc/windows/Makefile.am
@@ -0,0 +1,13 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+## This file intentionally left blank.
diff --git a/1.1.x/etc/windows/README.txt.tpl b/1.1.x/etc/windows/README.txt.tpl
new file mode 100644
index 00000000..791bcc85
--- /dev/null
+++ b/1.1.x/etc/windows/README.txt.tpl
@@ -0,0 +1,29 @@
+This is the README for the %package_name% binary distribution for
+Windows, version %version%.
+
+* Although CouchDB defaults to installing into your "Program Files" directory,
+ the permissions on the 'var' and 'etc' sub-directories have been adjusted
+ to allow modification by any authorized user so the couchdb databases, logs
+ and .ini files can be written. You may like to further restrict these
+ permissions to only the user who will be running couchdb.
+
+* The installer offers to install CouchDB as a Windows Service. If you select
+ this option, the service will run as the "LocalSystem" user and be
+ configured to start automatically. You can configure the service properties
+ via the Windows 'Services' applet.
+
+* To start CouchDB in a "console" environment, execute couchdb.bat in the
+ 'bin' directory. A shortcut to this batch file should have been installed.
+ Don't try and start couchdb this way if the service is running - it will
+ fail.
+
+* The Futon application which comes with CouchDB does not work with
+ Internet Explorer - Mozilla Firefox is generally recommended.
+
+* The test suite is known to fail on Windows due to what appear to be
+ permissions errors; this is due to couch being unable to delete a
+ file while it is in use on Windows.
+ See also https://issues.apache.org/jira/browse/COUCHDB-326
+
+* Additional help with the Windows support is needed - please contact the
+ couchdb-dev list if you can help.
diff --git a/1.1.x/etc/windows/couchdb.iss.tpl b/1.1.x/etc/windows/couchdb.iss.tpl
new file mode 100644
index 00000000..8a32561d
--- /dev/null
+++ b/1.1.x/etc/windows/couchdb.iss.tpl
@@ -0,0 +1,87 @@
+; Licensed under the Apache License, Version 2.0 (the "License"); you may not
+; use this file except in compliance with the License. You may obtain a copy of
+; the License at
+;
+; http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+; WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+; License for the specific language governing permissions and limitations under
+; the License.
+
+; CouchDB inno installer script
+; %configure_input%
+
+[Setup]
+AppID=ApacheCouchDB
+AppName=%package_name%
+AppVerName=%package_name% %version%
+AppPublisher=Apache Software Foundation
+AppPublisherURL=http://couchdb.apache.org/
+LicenseFile=../../LICENSE
+DefaultDirName={pf}\Apache Software Foundation\CouchDB
+DefaultGroupName=%package_name%
+OutputBaseFilename=setup-couchdb-%version%
+OutputDir=.
+
+[Files]
+Source: "%locallibbindir%\..\*.*"; DestDir: "{app}"; Flags: ignoreversion uninsrestartdelete restartreplace
+; bin dir
+Source: "%locallibbindir%\*.*"; DestDir: "{app}\bin"; Flags: ignoreversion uninsrestartdelete restartreplace recursesubdirs
+; other dirs copied '*.*'
+Source: "%locallibbindir%\..\erts-%erts_version%\*.*"; DestDir: "{app}\erts-%erts_version%"; Flags: ignoreversion uninsrestartdelete restartreplace recursesubdirs
+Source: "%locallibbindir%\..\lib\*.*"; DestDir: "{app}\lib"; Flags: ignoreversion uninsrestartdelete restartreplace recursesubdirs
+Source: "%locallibbindir%\..\share\*.*"; DestDir: "{app}\share"; Flags: ignoreversion uninsrestartdelete restartreplace recursesubdirs
+Source: "%locallibbindir%\..\releases\*.*"; DestDir: "{app}\releases"; Flags: ignoreversion uninsrestartdelete restartreplace recursesubdirs
+; skip ./usr, ./var
+
+; custom stuff...
+; ./etc/default.ini is unconditional
+Source: "%locallibbindir%\..\etc\couchdb\default.ini"; DestDir: "{app}\etc\couchdb"; Flags: ignoreversion uninsrestartdelete restartreplace
+; ./etc/local.ini is preserved and should not be updated if it exists
+Source: "%locallibbindir%\..\etc\couchdb\local.ini"; DestDir: "{app}\etc\couchdb"; Flags: onlyifdoesntexist uninsneveruninstall
+; readme
+Source: "README.txt"; DestDir: "{app}"; Flags: isreadme
+
+; msvc redists - see comments in configure.ac for notes about these...
+; ( deleteafterinstall - not needed - {tmp} auto cleaned????
+Source: "%msvc_redist_dir%\%msvc_redist_name%"; DestDir: "{tmp}"; Flags: deleteafterinstall
+
+; These are erlang requirements and not copied by our makefiles.
+Source: "%openssl_bin_dir%\ssleay32.dll"; DestDir: "{app}\bin"; Flags: ignoreversion uninsrestartdelete restartreplace
+Source: "%openssl_bin_dir%\libeay32.dll"; DestDir: "{app}\bin"; Flags: ignoreversion uninsrestartdelete restartreplace
+
+[Dirs]
+Name: "{app}\var\lib\couchdb"; Permissions: authusers-modify
+Name: "{app}\var\log\couchdb"; Permissions: authusers-modify
+Name: "{app}\etc\couchdb"; Permissions: authusers-modify
+
+[Icons]
+Name: "{group}\Start CouchDB"; Filename: "{app}\bin\couchdb.bat"
+Name: "{group}\Futon (CouchDB web interface)"; Filename: "http://127.0.0.1:5984/_utils"
+Name: "{group}\CouchDB Web Site"; Filename: "http://couchdb.apache.org/"
+
+[Tasks]
+Name: service; Description: "Install couchdb as a Windows service"
+Name: service\start; Description: "Start the service after installation"
+
+[Run]
+Filename: "{tmp}\%msvc_redist_name%"; Parameters: "/q"
+; This is erlang's Install.exe which updates erl.ini correctly.
+Filename: "{app}\Install.exe"; Parameters: "-s"; Flags: runhidden
+
+; Commands for a service
+; First attempt to nuke an existing service of this name, incase they are
+; reinstalling without uninstalling
+Filename: "{app}\erts-%erts_version%\bin\erlsrv.exe"; Parameters: "remove ""%package_name%"""; Flags: runhidden; Tasks: service
+; add a new service, including automatic restart by default on failure
+Filename: "{app}\erts-%erts_version%\bin\erlsrv.exe"; Parameters: "add ""%package_name%"" -workdir ""{app}\bin"" -onfail restart_always -args ""-sasl errlog_type error -s couch +A 4 +W w"" -comment ""%package_name% %version%"""; Flags: runhidden; Tasks: service
+; and start it if requested
+Filename: "{app}\erts-%erts_version%\bin\erlsrv.exe"; Parameters: "start ""%package_name%"""; Flags: runhidden; Tasks: service\start
+
+[UninstallRun]
+; erlsrv stops services prior to removing them
+Filename: "{app}\erts-%erts_version%\bin\erlsrv.exe"; Parameters: "remove ""%package_name%"""; Flags: runhidden; Tasks: service
+; kill epmd.exe if running to ensure uninstaller is not prevented from removing all binaries
+Filename: "{app}\erts-%erts_version%\bin\epmd.exe"; Parameters: "-kill"; Flags: runhidden
diff --git a/1.1.x/license.skip b/1.1.x/license.skip
new file mode 100644
index 00000000..151ccc08
--- /dev/null
+++ b/1.1.x/license.skip
@@ -0,0 +1,111 @@
+\.svn
+^AUTHORS
+^BUGS
+^CHANGES
+^DEVELOPERS
+^DEVELOPERS.gz
+^INSTALL
+^INSTALL.Unix
+^INSTALL.Unix.gz
+^INSTALL.Windows
+^INSTALL.Windows.gz
+^INSTALL.gz
+^LICENSE.gz
+^Makefile
+^Makefile.in
+^NEWS
+^NOTICE
+^README
+^THANKS
+^aclocal.m4
+^apache-couchdb-*
+^autom4te.cache/*
+^bin/Makefile
+^bin/Makefile.in
+^bin/couchdb.1
+^bin/couchjs.1
+^build-aux/*
+^config.*
+^configure
+^couchdb.stderr
+^couchdb.stdout
+^cover/.*\.coverdata
+^cover/.*\.html
+^erl_crash.dump
+^etc/Makefile
+^etc/Makefile.in
+^etc/couchdb/Makefile
+^etc/couchdb/Makefile.in
+^etc/couchdb/default*
+^etc/couchdb/local*
+^etc/default/Makefile
+^etc/default/Makefile.in
+^etc/default/couchdb
+^etc/init/Makefile
+^etc/init/Makefile.in
+^etc/launchd/Makefile
+^etc/launchd/Makefile.in
+^etc/launchd/org.apache.couchdb.plist.*
+^etc/logrotate.d/Makefile
+^etc/logrotate.d/Makefile.in
+^etc/logrotate.d/couchdb*
+^etc/windows/Makefile
+^etc/windows/README.txt.tpl
+^libtool
+^license.skip
+^m4/*
+^share/Makefile
+^share/Makefile.in
+^share/server/json2.js
+^share/server/mimeparse.js
+^share/www/favicon.ico
+^share/www/image/*
+^share/www/script/jquery.*
+^share/www/script/json2.js
+^share/www/script/jspec/*
+^share/www/script/sha1.js
+^share/www/script/base64.js
+^share/www/script/test/lorem*
+^share/www/style/jquery-ui-1.8.11.custom.css
+^src/Makefile
+^src/Makefile.in
+^src/couchdb/.*beam
+^src/couchdb/.deps/*
+^src/couchdb/Makefile
+^src/couchdb/Makefile.in
+^src/couchdb/couch.app*
+^src/couchdb/couch.app.tpl.in
+^src/couchdb/priv/.*o
+^src/couchdb/priv/.deps/*
+^src/couchdb/priv/Makefile
+^src/couchdb/priv/Makefile.in
+^src/couchdb/priv/couch_icu_driver.la
+^src/couchdb/priv/couchjs
+^src/couchdb/priv/couchspawnkillable
+^src/couchdb/priv/stat_descriptions.cfg
+^src/erlang-oauth/*
+^src/etap/*
+^src/ibrowse/*
+^src/mochiweb/*
+^stamp-h1
+^test/Makefile
+^test/Makefile.in
+^test/bench/Makefile
+^test/bench/Makefile.in
+^test/etap/.*beam
+^test/etap/.*\.o
+^test/etap/.deps/*
+^test/etap/test_cfg_register
+^test/etap/Makefile
+^test/etap/Makefile.in
+^test/etap/temp.*
+^test/javascript/Makefile
+^test/javascript/Makefile.in
+^test/local.ini
+^test/view_server/Makefile
+^test/view_server/Makefile.in
+^tmp/*
+^utils/Makefile
+^utils/Makefile.in
+^var/Makefile
+^var/Makefile.in
diff --git a/1.1.x/m4/ac_check_curl.m4.gz b/1.1.x/m4/ac_check_curl.m4.gz
new file mode 100644
index 00000000..020e646f
--- /dev/null
+++ b/1.1.x/m4/ac_check_curl.m4.gz
Binary files differ
diff --git a/1.1.x/m4/ac_check_icu.m4.gz b/1.1.x/m4/ac_check_icu.m4.gz
new file mode 100644
index 00000000..8af50ff9
--- /dev/null
+++ b/1.1.x/m4/ac_check_icu.m4.gz
Binary files differ
diff --git a/1.1.x/share/Makefile.am b/1.1.x/share/Makefile.am
new file mode 100644
index 00000000..968660ff
--- /dev/null
+++ b/1.1.x/share/Makefile.am
@@ -0,0 +1,201 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+JS_FILE = server/main.js
+
+JS_FILE_COMPONENTS = \
+ server/json2.js \
+ server/filter.js \
+ server/mimeparse.js \
+ server/render.js \
+ server/state.js \
+ server/util.js \
+ server/validate.js \
+ server/views.js
+
+JS_FILE_COMPONENTS_LAST = server/loop.js
+
+$(JS_FILE): $(JS_FILE_COMPONENTS) $(JS_FILE_COMPONENTS_LAST)
+ mkdir -p `dirname $(JS_FILE)`
+ echo "// DO NOT EDIT THIS FILE BY HAND" > $@
+ echo >> $@
+ cat $^ >> $@
+
+CLEANFILES = $(JS_FILE)
+
+EXTRA_DIST = $(JS_FILE_COMPONENTS) $(JS_FILE_COMPONENTS_LAST)
+
+nobase_localdata_SCRIPTS = \
+ $(JS_FILE)
+
+nobase_dist_localdata_DATA = \
+ www/config.html \
+ www/couch_tests.html \
+ www/custom_test.html \
+ www/database.html \
+ www/session.html \
+ www/dialog/_admin_party.html \
+ www/dialog/_compact_cleanup.html \
+ www/dialog/_create_admin.html \
+ www/dialog/_login.html \
+ www/dialog/_signup.html \
+ www/dialog/_create_database.html \
+ www/dialog/_create_config.html \
+ www/dialog/_delete_database.html \
+ www/dialog/_delete_document.html \
+ www/dialog/_database_security.html \
+ www/dialog/_share_test_reports.html \
+ www/dialog/_save_view_as.html \
+ www/dialog/_upload_attachment.html \
+ www/document.html \
+ www/favicon.ico \
+ www/image/add.png \
+ www/image/apply.gif \
+ www/image/bg.png \
+ www/image/cancel.gif \
+ www/image/compact.png \
+ www/image/delete-mini.png \
+ www/image/delete.png \
+ www/image/grippie.gif \
+ www/image/hgrad.gif \
+ www/image/key.png \
+ www/image/load.png \
+ www/image/logo.png \
+ www/image/order-asc.gif \
+ www/image/order-desc.gif \
+ www/image/path.gif \
+ www/image/progress.gif \
+ www/image/rarrow.png \
+ www/image/run-mini.png \
+ www/image/run.png \
+ www/image/running.png \
+ www/image/save.png \
+ www/image/sidebar-toggle.png \
+ www/image/spinner.gif \
+ www/image/spinner_33.gif \
+ www/image/spinner_6b.gif \
+ www/image/test_failure.gif \
+ www/image/test_success.gif \
+ www/image/thead-key.gif \
+ www/image/thead.gif \
+ www/image/toggle-collapse.gif \
+ www/image/toggle-expand.gif \
+ www/image/twisty.gif \
+ www/index.html \
+ www/replicator.html \
+ www/script/couch.js \
+ www/script/couch_tests.js \
+ www/script/couch_test_runner.js \
+ www/script/futon.browse.js \
+ www/script/futon.format.js \
+ www/script/futon.js \
+ www/script/jquery.couch.js \
+ www/script/jquery.dialog.js \
+ www/script/jquery.editinline.js \
+ www/script/jquery.form.js \
+ www/script/jquery.js \
+ www/script/jquery-ui-1.8.11.custom.min.js \
+ www/script/jquery.resizer.js \
+ www/script/jquery.suggest.js \
+ www/script/json2.js \
+ www/script/jspec/jspec.css \
+ www/script/jspec/jspec.jquery.js \
+ www/script/jspec/jspec.js \
+ www/script/jspec/jspec.xhr.js \
+ www/script/oauth.js \
+ www/script/sha1.js \
+ www/script/base64.js \
+ www/script/test/all_docs.js \
+ www/script/test/attachments.js \
+ www/script/test/attachments_multipart.js \
+ www/script/test/attachment_conflicts.js \
+ www/script/test/attachment_names.js \
+ www/script/test/attachment_paths.js \
+ www/script/test/attachment_ranges.js \
+ www/script/test/attachment_views.js \
+ www/script/test/auth_cache.js \
+ www/script/test/basics.js \
+ www/script/test/batch_save.js \
+ www/script/test/bulk_docs.js \
+ www/script/test/changes.js \
+ www/script/test/compact.js \
+ www/script/test/config.js \
+ www/script/test/conflicts.js \
+ www/script/test/content_negotiation.js \
+ www/script/test/cookie_auth.js \
+ www/script/test/copy_doc.js \
+ www/script/test/delayed_commits.js \
+ www/script/test/design_docs.js \
+ www/script/test/design_options.js \
+ www/script/test/design_paths.js \
+ www/script/test/erlang_views.js \
+ www/script/test/etags_head.js \
+ www/script/test/etags_views.js \
+ www/script/test/form_submit.js \
+ www/script/test/http.js \
+ www/script/test/invalid_docids.js \
+ www/script/test/jsonp.js \
+ www/script/test/large_docs.js \
+ www/script/test/list_views.js \
+ www/script/test/lorem.txt \
+ www/script/test/lorem_b64.txt \
+ www/script/test/lots_of_docs.js \
+ www/script/test/method_override.js \
+ www/script/test/multiple_rows.js \
+ www/script/test/oauth.js \
+ www/script/test/proxyauth.js \
+ www/script/test/purge.js \
+ www/script/test/reader_acl.js \
+ www/script/test/recreate_doc.js \
+ www/script/test/reduce.js \
+ www/script/test/reduce_builtin.js \
+ www/script/test/reduce_false.js \
+ www/script/test/reduce_false_temp.js \
+ www/script/test/replication.js \
+ www/script/test/replicator_db.js \
+ www/script/test/rev_stemming.js \
+ www/script/test/rewrite.js \
+ www/script/test/security_validation.js \
+ www/script/test/show_documents.js \
+ www/script/test/stats.js \
+ www/script/test/update_documents.js \
+ www/script/test/users_db.js \
+ www/script/test/utf8.js \
+ www/script/test/uuids.js \
+ www/script/test/view_collation.js \
+ www/script/test/view_collation_raw.js \
+ www/script/test/view_conflicts.js \
+ www/script/test/view_compaction.js \
+ www/script/test/view_errors.js \
+ www/script/test/view_include_docs.js \
+ www/script/test/view_multi_key_all_docs.js \
+ www/script/test/view_multi_key_design.js \
+ www/script/test/view_multi_key_temp.js \
+ www/script/test/view_offsets.js \
+ www/script/test/view_update_seq.js \
+ www/script/test/view_pagination.js \
+ www/script/test/view_sandboxing.js \
+ www/script/test/view_xml.js \
+ www/spec/couch_js_class_methods_spec.js \
+ www/spec/couch_js_instance_methods_1_spec.js \
+ www/spec/couch_js_instance_methods_2_spec.js \
+ www/spec/couch_js_instance_methods_3_spec.js \
+ www/spec/custom_helpers.js \
+ www/spec/jquery_couch_js_class_methods_spec.js \
+ www/spec/jquery_couch_js_instance_methods_1_spec.js \
+ www/spec/jquery_couch_js_instance_methods_2_spec.js \
+ www/spec/jquery_couch_js_instance_methods_3_spec.js \
+ www/spec/run.html \
+ www/status.html \
+ www/style/jquery-ui-1.8.11.custom.css \
+ www/style/layout.css \
+ www/_sidebar.html
diff --git a/1.1.x/share/server/filter.js b/1.1.x/share/server/filter.js
new file mode 100644
index 00000000..1e8556a4
--- /dev/null
+++ b/1.1.x/share/server/filter.js
@@ -0,0 +1,23 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var Filter = {
+ filter : function(fun, ddoc, args) {
+ var results = [];
+ var docs = args[0];
+ var req = args[1];
+ for (var i=0; i < docs.length; i++) {
+ results.push((fun.apply(ddoc, [docs[i], req]) && true) || false);
+ };
+ respond([true, results]);
+ }
+};
diff --git a/1.1.x/share/server/json2.js b/1.1.x/share/server/json2.js
new file mode 100644
index 00000000..a1a3b170
--- /dev/null
+++ b/1.1.x/share/server/json2.js
@@ -0,0 +1,482 @@
+/*
+ http://www.JSON.org/json2.js
+ 2010-03-20
+
+ Public Domain.
+
+ NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+ See http://www.JSON.org/js.html
+
+
+ This code should be minified before deployment.
+ See http://javascript.crockford.com/jsmin.html
+
+ USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO
+ NOT CONTROL.
+
+
+ This file creates a global JSON object containing two methods: stringify
+ and parse.
+
+ JSON.stringify(value, replacer, space)
+ value any JavaScript value, usually an object or array.
+
+ replacer an optional parameter that determines how object
+ values are stringified for objects. It can be a
+ function or an array of strings.
+
+ space an optional parameter that specifies the indentation
+ of nested structures. If it is omitted, the text will
+ be packed without extra whitespace. If it is a number,
+ it will specify the number of spaces to indent at each
+ level. If it is a string (such as '\t' or '&nbsp;'),
+ it contains the characters used to indent at each level.
+
+ This method produces a JSON text from a JavaScript value.
+
+ When an object value is found, if the object contains a toJSON
+ method, its toJSON method will be called and the result will be
+ stringified. A toJSON method does not serialize: it returns the
+ value represented by the name/value pair that should be serialized,
+ or undefined if nothing should be serialized. The toJSON method
+ will be passed the key associated with the value, and this will be
+ bound to the value
+
+ For example, this would serialize Dates as ISO strings.
+
+ Date.prototype.toJSON = function (key) {
+ function f(n) {
+ // Format integers to have at least two digits.
+ return n < 10 ? '0' + n : n;
+ }
+
+ return this.getUTCFullYear() + '-' +
+ f(this.getUTCMonth() + 1) + '-' +
+ f(this.getUTCDate()) + 'T' +
+ f(this.getUTCHours()) + ':' +
+ f(this.getUTCMinutes()) + ':' +
+ f(this.getUTCSeconds()) + 'Z';
+ };
+
+ You can provide an optional replacer method. It will be passed the
+ key and value of each member, with this bound to the containing
+ object. The value that is returned from your method will be
+ serialized. If your method returns undefined, then the member will
+ be excluded from the serialization.
+
+ If the replacer parameter is an array of strings, then it will be
+ used to select the members to be serialized. It filters the results
+ such that only members with keys listed in the replacer array are
+ stringified.
+
+ Values that do not have JSON representations, such as undefined or
+ functions, will not be serialized. Such values in objects will be
+ dropped; in arrays they will be replaced with null. You can use
+ a replacer function to replace those with JSON values.
+ JSON.stringify(undefined) returns undefined.
+
+ The optional space parameter produces a stringification of the
+ value that is filled with line breaks and indentation to make it
+ easier to read.
+
+ If the space parameter is a non-empty string, then that string will
+ be used for indentation. If the space parameter is a number, then
+ the indentation will be that many spaces.
+
+ Example:
+
+ text = JSON.stringify(['e', {pluribus: 'unum'}]);
+ // text is '["e",{"pluribus":"unum"}]'
+
+
+ text = JSON.stringify(['e', {pluribus: 'unum'}], null, '\t');
+ // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]'
+
+ text = JSON.stringify([new Date()], function (key, value) {
+ return this[key] instanceof Date ?
+ 'Date(' + this[key] + ')' : value;
+ });
+ // text is '["Date(---current time---)"]'
+
+
+ JSON.parse(text, reviver)
+ This method parses a JSON text to produce an object or array.
+ It can throw a SyntaxError exception.
+
+ The optional reviver parameter is a function that can filter and
+ transform the results. It receives each of the keys and values,
+ and its return value is used instead of the original value.
+ If it returns what it received, then the structure is not modified.
+ If it returns undefined then the member is deleted.
+
+ Example:
+
+ // Parse the text. Values that look like ISO date strings will
+ // be converted to Date objects.
+
+ myData = JSON.parse(text, function (key, value) {
+ var a;
+ if (typeof value === 'string') {
+ a =
+/^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value);
+ if (a) {
+ return new Date(Date.UTC(+a[1], +a[2] - 1, +a[3], +a[4],
+ +a[5], +a[6]));
+ }
+ }
+ return value;
+ });
+
+ myData = JSON.parse('["Date(09/09/2001)"]', function (key, value) {
+ var d;
+ if (typeof value === 'string' &&
+ value.slice(0, 5) === 'Date(' &&
+ value.slice(-1) === ')') {
+ d = new Date(value.slice(5, -1));
+ if (d) {
+ return d;
+ }
+ }
+ return value;
+ });
+
+
+ This is a reference implementation. You are free to copy, modify, or
+ redistribute.
+*/
+
+/*jslint evil: true, strict: false */
+
+/*members "", "\b", "\t", "\n", "\f", "\r", "\"", JSON, "\\", apply,
+ call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours,
+ getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join,
+ lastIndex, length, parse, prototype, push, replace, slice, stringify,
+ test, toJSON, toString, valueOf
+*/
+
+
+// Create a JSON object only if one does not already exist. We create the
+// methods in a closure to avoid creating global variables.
+
+if (!this.JSON) {
+ this.JSON = {};
+}
+
+(function () {
+
+ function f(n) {
+ // Format integers to have at least two digits.
+ return n < 10 ? '0' + n : n;
+ }
+
+ if (typeof Date.prototype.toJSON !== 'function') {
+
+ Date.prototype.toJSON = function (key) {
+
+ return isFinite(this.valueOf()) ?
+ this.getUTCFullYear() + '-' +
+ f(this.getUTCMonth() + 1) + '-' +
+ f(this.getUTCDate()) + 'T' +
+ f(this.getUTCHours()) + ':' +
+ f(this.getUTCMinutes()) + ':' +
+ f(this.getUTCSeconds()) + 'Z' : null;
+ };
+
+ String.prototype.toJSON =
+ Number.prototype.toJSON =
+ Boolean.prototype.toJSON = function (key) {
+ return this.valueOf();
+ };
+ }
+
+ var cx = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
+ escapable = /[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
+ gap,
+ indent,
+ meta = { // table of character substitutions
+ '\b': '\\b',
+ '\t': '\\t',
+ '\n': '\\n',
+ '\f': '\\f',
+ '\r': '\\r',
+ '"' : '\\"',
+ '\\': '\\\\'
+ },
+ rep;
+
+
+ function quote(string) {
+
+// If the string contains no control characters, no quote characters, and no
+// backslash characters, then we can safely slap some quotes around it.
+// Otherwise we must also replace the offending characters with safe escape
+// sequences.
+
+ escapable.lastIndex = 0;
+ return escapable.test(string) ?
+ '"' + string.replace(escapable, function (a) {
+ var c = meta[a];
+ return typeof c === 'string' ? c :
+ '\\u' + ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
+ }) + '"' :
+ '"' + string + '"';
+ }
+
+
+ function str(key, holder) {
+
+// Produce a string from holder[key].
+
+ var i, // The loop counter.
+ k, // The member key.
+ v, // The member value.
+ length,
+ mind = gap,
+ partial,
+ value = holder[key];
+
+// If the value has a toJSON method, call it to obtain a replacement value.
+
+ if (value && typeof value === 'object' &&
+ typeof value.toJSON === 'function') {
+ value = value.toJSON(key);
+ }
+
+// If we were called with a replacer function, then call the replacer to
+// obtain a replacement value.
+
+ if (typeof rep === 'function') {
+ value = rep.call(holder, key, value);
+ }
+
+// What happens next depends on the value's type.
+
+ switch (typeof value) {
+ case 'string':
+ return quote(value);
+
+ case 'number':
+
+// JSON numbers must be finite. Encode non-finite numbers as null.
+
+ return isFinite(value) ? String(value) : 'null';
+
+ case 'boolean':
+ case 'null':
+
+// If the value is a boolean or null, convert it to a string. Note:
+// typeof null does not produce 'null'. The case is included here in
+// the remote chance that this gets fixed someday.
+
+ return String(value);
+
+// If the type is 'object', we might be dealing with an object or an array or
+// null.
+
+ case 'object':
+
+// Due to a specification blunder in ECMAScript, typeof null is 'object',
+// so watch out for that case.
+
+ if (!value) {
+ return 'null';
+ }
+
+// Make an array to hold the partial results of stringifying this object value.
+
+ gap += indent;
+ partial = [];
+
+// Is the value an array?
+
+ if (Object.prototype.toString.apply(value) === '[object Array]') {
+
+// The value is an array. Stringify every element. Use null as a placeholder
+// for non-JSON values.
+
+ length = value.length;
+ for (i = 0; i < length; i += 1) {
+ partial[i] = str(i, value) || 'null';
+ }
+
+// Join all of the elements together, separated with commas, and wrap them in
+// brackets.
+
+ v = partial.length === 0 ? '[]' :
+ gap ? '[\n' + gap +
+ partial.join(',\n' + gap) + '\n' +
+ mind + ']' :
+ '[' + partial.join(',') + ']';
+ gap = mind;
+ return v;
+ }
+
+// If the replacer is an array, use it to select the members to be stringified.
+
+ if (rep && typeof rep === 'object') {
+ length = rep.length;
+ for (i = 0; i < length; i += 1) {
+ k = rep[i];
+ if (typeof k === 'string') {
+ v = str(k, value);
+ if (v) {
+ partial.push(quote(k) + (gap ? ': ' : ':') + v);
+ }
+ }
+ }
+ } else {
+
+// Otherwise, iterate through all of the keys in the object.
+
+ for (k in value) {
+ if (Object.hasOwnProperty.call(value, k)) {
+ v = str(k, value);
+ if (v) {
+ partial.push(quote(k) + (gap ? ': ' : ':') + v);
+ }
+ }
+ }
+ }
+
+// Join all of the member texts together, separated with commas,
+// and wrap them in braces.
+
+ v = partial.length === 0 ? '{}' :
+ gap ? '{\n' + gap + partial.join(',\n' + gap) + '\n' +
+ mind + '}' : '{' + partial.join(',') + '}';
+ gap = mind;
+ return v;
+ }
+ }
+
+// If the JSON object does not yet have a stringify method, give it one.
+
+ if (typeof JSON.stringify !== 'function') {
+ JSON.stringify = function (value, replacer, space) {
+
+// The stringify method takes a value and an optional replacer, and an optional
+// space parameter, and returns a JSON text. The replacer can be a function
+// that can replace values, or an array of strings that will select the keys.
+// A default replacer method can be provided. Use of the space parameter can
+// produce text that is more easily readable.
+
+ var i;
+ gap = '';
+ indent = '';
+
+// If the space parameter is a number, make an indent string containing that
+// many spaces.
+
+ if (typeof space === 'number') {
+ for (i = 0; i < space; i += 1) {
+ indent += ' ';
+ }
+
+// If the space parameter is a string, it will be used as the indent string.
+
+ } else if (typeof space === 'string') {
+ indent = space;
+ }
+
+// If there is a replacer, it must be a function or an array.
+// Otherwise, throw an error.
+
+ rep = replacer;
+ if (replacer && typeof replacer !== 'function' &&
+ (typeof replacer !== 'object' ||
+ typeof replacer.length !== 'number')) {
+ throw new Error('JSON.stringify');
+ }
+
+// Make a fake root object containing our value under the key of ''.
+// Return the result of stringifying the value.
+
+ return str('', {'': value});
+ };
+ }
+
+
+// If the JSON object does not yet have a parse method, give it one.
+
+ if (typeof JSON.parse !== 'function') {
+ JSON.parse = function (text, reviver) {
+
+// The parse method takes a text and an optional reviver function, and returns
+// a JavaScript value if the text is a valid JSON text.
+
+ var j;
+
+ function walk(holder, key) {
+
+// The walk method is used to recursively walk the resulting structure so
+// that modifications can be made.
+
+ var k, v, value = holder[key];
+ if (value && typeof value === 'object') {
+ for (k in value) {
+ if (Object.hasOwnProperty.call(value, k)) {
+ v = walk(value, k);
+ if (v !== undefined) {
+ value[k] = v;
+ } else {
+ delete value[k];
+ }
+ }
+ }
+ }
+ return reviver.call(holder, key, value);
+ }
+
+
+// Parsing happens in four stages. In the first stage, we replace certain
+// Unicode characters with escape sequences. JavaScript handles many characters
+// incorrectly, either silently deleting them, or treating them as line endings.
+
+ text = String(text);
+ cx.lastIndex = 0;
+ if (cx.test(text)) {
+ text = text.replace(cx, function (a) {
+ return '\\u' +
+ ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
+ });
+ }
+
+// In the second stage, we run the text against regular expressions that look
+// for non-JSON patterns. We are especially concerned with '()' and 'new'
+// because they can cause invocation, and '=' because it can cause mutation.
+// But just to be safe, we want to reject all unexpected forms.
+
+// We split the second stage into 4 regexp operations in order to work around
+// crippling inefficiencies in IE's and Safari's regexp engines. First we
+// replace the JSON backslash pairs with '@' (a non-JSON character). Second, we
+// replace all simple value tokens with ']' characters. Third, we delete all
+// open brackets that follow a colon or comma or that begin the text. Finally,
+// we look to see that the remaining characters are only whitespace or ']' or
+// ',' or ':' or '{' or '}'. If that is so, then the text is safe for eval.
+
+ if (/^[\],:{}\s]*$/.
+test(text.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g, '@').
+replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, ']').
+replace(/(?:^|:|,)(?:\s*\[)+/g, ''))) {
+
+// In the third stage we use the eval function to compile the text into a
+// JavaScript structure. The '{' operator is subject to a syntactic ambiguity
+// in JavaScript: it can begin a block or an object literal. We wrap the text
+// in parens to eliminate the ambiguity.
+
+ j = eval('(' + text + ')');
+
+// In the optional fourth stage, we recursively walk the new structure, passing
+// each name/value pair to a reviver function for possible transformation.
+
+ return typeof reviver === 'function' ?
+ walk({'': j}, '') : j;
+ }
+
+// If the text is not JSON parseable, then a SyntaxError is thrown.
+
+ throw new SyntaxError('JSON.parse');
+ };
+ }
+}());
diff --git a/1.1.x/share/server/loop.js b/1.1.x/share/server/loop.js
new file mode 100644
index 00000000..d2a07f61
--- /dev/null
+++ b/1.1.x/share/server/loop.js
@@ -0,0 +1,142 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var sandbox = null;
+
+function init_sandbox() {
+ try {
+ // if possible, use evalcx (not always available)
+ sandbox = evalcx('');
+ sandbox.emit = Views.emit;
+ sandbox.sum = Views.sum;
+ sandbox.log = log;
+ sandbox.toJSON = Couch.toJSON;
+ sandbox.JSON = JSON;
+ sandbox.provides = Mime.provides;
+ sandbox.registerType = Mime.registerType;
+ sandbox.start = Render.start;
+ sandbox.send = Render.send;
+ sandbox.getRow = Render.getRow;
+ sandbox.isArray = isArray;
+ } catch (e) {
+ log(e.toSource());
+ }
+};
+init_sandbox();
+
+// Commands are in the form of json arrays:
+// ["commandname",..optional args...]\n
+//
+// Responses are json values followed by a new line ("\n")
+
+var DDoc = (function() {
+ var ddoc_dispatch = {
+ "lists" : Render.list,
+ "shows" : Render.show,
+ "filters" : Filter.filter,
+ "updates" : Render.update,
+ "validate_doc_update" : Validate.validate
+ };
+ var ddocs = {};
+ return {
+ ddoc : function() {
+ var args = [];
+ for (var i=0; i < arguments.length; i++) {
+ args.push(arguments[i]);
+ };
+ var ddocId = args.shift();
+ if (ddocId == "new") {
+ // get the real ddocId.
+ ddocId = args.shift();
+ // store the ddoc, functions are lazily compiled.
+ ddocs[ddocId] = args.shift();
+ print("true");
+ } else {
+ // Couch makes sure we know this ddoc already.
+ var ddoc = ddocs[ddocId];
+ if (!ddoc) throw(["fatal", "query_protocol_error", "uncached design doc: "+ddocId]);
+ var funPath = args.shift();
+ var cmd = funPath[0];
+ // the first member of the fun path determines the type of operation
+ var funArgs = args.shift();
+ if (ddoc_dispatch[cmd]) {
+ // get the function, call the command with it
+ var point = ddoc;
+ for (var i=0; i < funPath.length; i++) {
+ if (i+1 == funPath.length) {
+ fun = point[funPath[i]]
+ if (typeof fun != "function") {
+ fun = Couch.compileFunction(fun, ddoc);
+ // cache the compiled fun on the ddoc
+ point[funPath[i]] = fun
+ };
+ } else {
+ point = point[funPath[i]]
+ }
+ };
+
+ // run the correct responder with the cmd body
+ ddoc_dispatch[cmd].apply(null, [fun, ddoc, funArgs]);
+ } else {
+ // unknown command, quit and hope the restarted version is better
+ throw(["fatal", "unknown_command", "unknown ddoc command '" + cmd + "'"]);
+ }
+ }
+ }
+ };
+})();
+
+var Loop = function() {
+ var line, cmd, cmdkey, dispatch = {
+ "ddoc" : DDoc.ddoc,
+ // "view" : Views.handler,
+ "reset" : State.reset,
+ "add_fun" : State.addFun,
+ "add_lib" : State.addLib,
+ "map_doc" : Views.mapDoc,
+ "reduce" : Views.reduce,
+ "rereduce" : Views.rereduce
+ };
+ function handleError(e) {
+ var type = e[0];
+ if (type == "fatal") {
+ e[0] = "error"; // we tell the client it was a fatal error by dying
+ respond(e);
+ quit(-1);
+ } else if (type == "error") {
+ respond(e);
+ } else if (e.error && e.reason) {
+ // compatibility with old error format
+ respond(["error", e.error, e.reason]);
+ } else {
+ respond(["error","unnamed_error",e.toSource()]);
+ }
+ };
+ while (line = readline()) {
+ cmd = eval('('+line+')');
+ State.line_length = line.length;
+ try {
+ cmdkey = cmd.shift();
+ if (dispatch[cmdkey]) {
+ // run the correct responder with the cmd body
+ dispatch[cmdkey].apply(null, cmd);
+ } else {
+ // unknown command, quit and hope the restarted version is better
+ throw(["fatal", "unknown_command", "unknown command '" + cmdkey + "'"]);
+ }
+ } catch(e) {
+ handleError(e);
+ }
+ };
+};
+
+Loop();
diff --git a/1.1.x/share/server/mimeparse.js b/1.1.x/share/server/mimeparse.js
new file mode 100644
index 00000000..3642a194
--- /dev/null
+++ b/1.1.x/share/server/mimeparse.js
@@ -0,0 +1,158 @@
+// mimeparse.js
+//
+// This module provides basic functions for handling mime-types. It can
+// handle matching mime-types against a list of media-ranges. See section
+// 14.1 of the HTTP specification [RFC 2616] for a complete explanation.
+//
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
+//
+// A port to JavaScript of Joe Gregorio's MIME-Type Parser:
+//
+// http://code.google.com/p/mimeparse/
+//
+// Ported by J. Chris Anderson <jchris@apache.org>, targeting the Spidermonkey runtime.
+//
+// To run the tests, open mimeparse-js-test.html in a browser.
+// Ported from version 0.1.2
+// Comments are mostly excerpted from the original.
+
+var Mimeparse = (function() {
+ // private helpers
+ function strip(string) {
+ return string.replace(/^\s+/, '').replace(/\s+$/, '')
+ };
+
+ function parseRanges(ranges) {
+ var parsedRanges = [], rangeParts = ranges.split(",");
+ for (var i=0; i < rangeParts.length; i++) {
+ parsedRanges.push(publicMethods.parseMediaRange(rangeParts[i]))
+ };
+ return parsedRanges;
+ };
+
+ var publicMethods = {
+ // Carves up a mime-type and returns an Array of the
+ // [type, subtype, params] where "params" is a Hash of all
+ // the parameters for the media range.
+ //
+ // For example, the media range "application/xhtml;q=0.5" would
+ // get parsed into:
+ //
+ // ["application", "xhtml", { "q" : "0.5" }]
+ parseMimeType : function(mimeType) {
+ var fullType, typeParts, params = {}, parts = mimeType.split(';');
+ for (var i=0; i < parts.length; i++) {
+ var p = parts[i].split('=');
+ if (p.length == 2) {
+ params[strip(p[0])] = strip(p[1]);
+ }
+ };
+ fullType = parts[0].replace(/^\s+/, '').replace(/\s+$/, '');
+ if (fullType == '*') fullType = '*/*';
+ typeParts = fullType.split('/');
+ return [typeParts[0], typeParts[1], params];
+ },
+
+ // Carves up a media range and returns an Array of the
+ // [type, subtype, params] where "params" is a Object with
+ // all the parameters for the media range.
+ //
+ // For example, the media range "application/*;q=0.5" would
+ // get parsed into:
+ //
+ // ["application", "*", { "q" : "0.5" }]
+ //
+ // In addition this function also guarantees that there
+ // is a value for "q" in the params dictionary, filling it
+ // in with a proper default if necessary.
+ parseMediaRange : function(range) {
+ var q, parsedType = this.parseMimeType(range);
+ if (!parsedType[2]['q']) {
+ parsedType[2]['q'] = '1';
+ } else {
+ q = parseFloat(parsedType[2]['q']);
+ if (isNaN(q)) {
+ parsedType[2]['q'] = '1';
+ } else if (q > 1 || q < 0) {
+ parsedType[2]['q'] = '1';
+ }
+ }
+ return parsedType;
+ },
+
+ // Find the best match for a given mime-type against
+ // a list of media_ranges that have already been
+ // parsed by parseMediaRange(). Returns an array of
+ // the fitness value and the value of the 'q' quality
+ // parameter of the best match, or (-1, 0) if no match
+ // was found. Just as for qualityParsed(), 'parsed_ranges'
+ // must be a list of parsed media ranges.
+ fitnessAndQualityParsed : function(mimeType, parsedRanges) {
+ var bestFitness = -1, bestFitQ = 0, target = this.parseMediaRange(mimeType);
+ var targetType = target[0], targetSubtype = target[1], targetParams = target[2];
+
+ for (var i=0; i < parsedRanges.length; i++) {
+ var parsed = parsedRanges[i];
+ var type = parsed[0], subtype = parsed[1], params = parsed[2];
+ if ((type == targetType || type == "*" || targetType == "*") &&
+ (subtype == targetSubtype || subtype == "*" || targetSubtype == "*")) {
+ var matchCount = 0;
+ for (param in targetParams) {
+ if (param != 'q' && params[param] && params[param] == targetParams[param]) {
+ matchCount += 1;
+ }
+ }
+
+ var fitness = (type == targetType) ? 100 : 0;
+ fitness += (subtype == targetSubtype) ? 10 : 0;
+ fitness += matchCount;
+
+ if (fitness > bestFitness) {
+ bestFitness = fitness;
+ bestFitQ = params["q"];
+ }
+ }
+ };
+ return [bestFitness, parseFloat(bestFitQ)];
+ },
+
+ // Find the best match for a given mime-type against
+ // a list of media_ranges that have already been
+ // parsed by parseMediaRange(). Returns the
+ // 'q' quality parameter of the best match, 0 if no
+ // match was found. This function bahaves the same as quality()
+ // except that 'parsedRanges' must be a list of
+ // parsed media ranges.
+ qualityParsed : function(mimeType, parsedRanges) {
+ return this.fitnessAndQualityParsed(mimeType, parsedRanges)[1];
+ },
+
+ // Returns the quality 'q' of a mime-type when compared
+ // against the media-ranges in ranges. For example:
+ //
+ // >>> Mimeparse.quality('text/html','text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
+ // 0.7
+ quality : function(mimeType, ranges) {
+ return this.qualityParsed(mimeType, parseRanges(ranges));
+ },
+
+ // Takes a list of supported mime-types and finds the best
+ // match for all the media-ranges listed in header. The value of
+ // header must be a string that conforms to the format of the
+ // HTTP Accept: header. The value of 'supported' is a list of
+ // mime-types.
+ //
+ // >>> bestMatch(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1')
+ // 'text/xml'
+ bestMatch : function(supported, header) {
+ var parsedHeader = parseRanges(header);
+ var weighted = [];
+ for (var i=0; i < supported.length; i++) {
+ weighted.push([publicMethods.fitnessAndQualityParsed(supported[i], parsedHeader), i, supported[i]])
+ };
+ weighted.sort();
+ return weighted[weighted.length-1][0][1] ? weighted[weighted.length-1][2] : '';
+ }
+ }
+ return publicMethods;
+})();
diff --git a/1.1.x/share/server/render.js b/1.1.x/share/server/render.js
new file mode 100644
index 00000000..d207db41
--- /dev/null
+++ b/1.1.x/share/server/render.js
@@ -0,0 +1,352 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+
+var Mime = (function() {
+ // registerType(name, mime-type, mime-type, ...)
+ //
+ // Available in query server sandbox. TODO: The list is cleared on reset.
+ // This registers a particular name with the set of mimetypes it can handle.
+ // Whoever registers last wins.
+ //
+ // Example:
+ // registerType("html", "text/html; charset=utf-8");
+
+ var mimesByKey = {};
+ var keysByMime = {};
+ function registerType() {
+ var mimes = [], key = arguments[0];
+ for (var i=1; i < arguments.length; i++) {
+ mimes.push(arguments[i]);
+ };
+ mimesByKey[key] = mimes;
+ for (var i=0; i < mimes.length; i++) {
+ keysByMime[mimes[i]] = key;
+ };
+ }
+
+ // Some default types
+ // Ported from Ruby on Rails
+ // Build list of Mime types for HTTP responses
+ // http://www.iana.org/assignments/media-types/
+ // http://dev.rubyonrails.org/svn/rails/trunk/actionpack/lib/action_controller/mime_types.rb
+
+ registerType("all", "*/*");
+ registerType("text", "text/plain; charset=utf-8", "txt");
+ registerType("html", "text/html; charset=utf-8");
+ registerType("xhtml", "application/xhtml+xml", "xhtml");
+ registerType("xml", "application/xml", "text/xml", "application/x-xml");
+ registerType("js", "text/javascript", "application/javascript", "application/x-javascript");
+ registerType("css", "text/css");
+ registerType("ics", "text/calendar");
+ registerType("csv", "text/csv");
+ registerType("rss", "application/rss+xml");
+ registerType("atom", "application/atom+xml");
+ registerType("yaml", "application/x-yaml", "text/yaml");
+ // just like Rails
+ registerType("multipart_form", "multipart/form-data");
+ registerType("url_encoded_form", "application/x-www-form-urlencoded");
+ // http://www.ietf.org/rfc/rfc4627.txt
+ registerType("json", "application/json", "text/x-json");
+
+
+ var mimeFuns = [];
+ function provides(type, fun) {
+ Mime.providesUsed = true;
+ mimeFuns.push([type, fun]);
+ };
+
+ function resetProvides() {
+ // set globals
+ Mime.providesUsed = false;
+ mimeFuns = [];
+ Mime.responseContentType = null;
+ };
+
+ function runProvides(req, ddoc) {
+ var supportedMimes = [], bestFun, bestKey = null, accept = req.headers["Accept"];
+ if (req.query && req.query.format) {
+ bestKey = req.query.format;
+ Mime.responseContentType = mimesByKey[bestKey][0];
+ } else if (accept) {
+ // log("using accept header: "+accept);
+ mimeFuns.reverse().forEach(function(mimeFun) {
+ var mimeKey = mimeFun[0];
+ if (mimesByKey[mimeKey]) {
+ supportedMimes = supportedMimes.concat(mimesByKey[mimeKey]);
+ }
+ });
+ Mime.responseContentType = Mimeparse.bestMatch(supportedMimes, accept);
+ bestKey = keysByMime[Mime.responseContentType];
+ } else {
+ // just do the first one
+ bestKey = mimeFuns[0][0];
+ Mime.responseContentType = mimesByKey[bestKey][0];
+ }
+
+ if (bestKey) {
+ for (var i=0; i < mimeFuns.length; i++) {
+ if (mimeFuns[i][0] == bestKey) {
+ bestFun = mimeFuns[i][1];
+ break;
+ }
+ };
+ };
+
+ if (bestFun) {
+ return bestFun.call(ddoc);
+ } else {
+ var supportedTypes = mimeFuns.map(function(mf) {return mimesByKey[mf[0]].join(', ') || mf[0]});
+ throw(["error","not_acceptable",
+ "Content-Type "+(accept||bestKey)+" not supported, try one of: "+supportedTypes.join(', ')]);
+ }
+ };
+
+
+ return {
+ registerType : registerType,
+ provides : provides,
+ resetProvides : resetProvides,
+ runProvides : runProvides
+ }
+})();
+
+
+
+
+////
+//// Render dispatcher
+////
+////
+////
+////
+
+var Render = (function() {
+ var chunks = [];
+
+
+ // Start chunks
+ var startResp = {};
+ function start(resp) {
+ startResp = resp || {};
+ };
+
+ function sendStart() {
+ startResp = applyContentType((startResp || {}), Mime.responseContentType);
+ respond(["start", chunks, startResp]);
+ chunks = [];
+ startResp = {};
+ }
+
+ function applyContentType(resp, responseContentType) {
+ resp["headers"] = resp["headers"] || {};
+ if (responseContentType) {
+ resp["headers"]["Content-Type"] = resp["headers"]["Content-Type"] || responseContentType;
+ }
+ return resp;
+ }
+
+ function send(chunk) {
+ chunks.push(chunk.toString());
+ };
+
+ function blowChunks(label) {
+ respond([label||"chunks", chunks]);
+ chunks = [];
+ };
+
+ var gotRow = false, lastRow = false;
+ function getRow() {
+ if (lastRow) return null;
+ if (!gotRow) {
+ gotRow = true;
+ sendStart();
+ } else {
+ blowChunks();
+ }
+ var line = readline();
+ var json = eval('('+line+')');
+ if (json[0] == "list_end") {
+ lastRow = true;
+ return null;
+ }
+ if (json[0] != "list_row") {
+ throw(["fatal", "list_error", "not a row '" + json[0] + "'"]);
+ }
+ return json[1];
+ };
+
+
+ function maybeWrapResponse(resp) {
+ var type = typeof resp;
+ if ((type == "string") || (type == "xml")) {
+ return {body:resp};
+ } else {
+ return resp;
+ }
+ };
+
+ // from http://javascript.crockford.com/remedial.html
+ function typeOf(value) {
+ var s = typeof value;
+ if (s === 'object') {
+ if (value) {
+ if (value instanceof Array) {
+ s = 'array';
+ }
+ } else {
+ s = 'null';
+ }
+ }
+ return s;
+ };
+
+ function isDocRequestPath(info) {
+ var path = info.path;
+ return path.length > 5;
+ };
+
+ function runShow(fun, ddoc, args) {
+ try {
+ resetList();
+ Mime.resetProvides();
+ var resp = fun.apply(ddoc, args) || {};
+
+ // handle list() style API
+ if (chunks.length && chunks.length > 0) {
+ resp = maybeWrapResponse(resp);
+ resp.headers = resp.headers || {};
+ for(var header in startResp) {
+ resp.headers[header] = startResp[header]
+ }
+ resp.body = chunks.join("") + (resp.body || "");
+ resetList();
+ }
+
+ if (Mime.providesUsed) {
+ resp = Mime.runProvides(args[1], ddoc);
+ resp = applyContentType(maybeWrapResponse(resp), Mime.responseContentType);
+ }
+
+ var type = typeOf(resp);
+ if (type == 'object' || type == 'string') {
+ respond(["resp", maybeWrapResponse(resp)]);
+ } else {
+ throw(["error", "render_error", "undefined response from show function"]);
+ }
+ } catch(e) {
+ if (args[0] === null && isDocRequestPath(args[1])) {
+ throw(["error", "not_found", "document not found"]);
+ } else {
+ renderError(e, fun.toSource());
+ }
+ }
+ };
+
+ function runUpdate(fun, ddoc, args) {
+ try {
+ var method = args[1].method;
+ // for analytics logging applications you might want to remove the next line
+ if (method == "GET") throw(["error","method_not_allowed","Update functions do not allow GET"]);
+ var result = fun.apply(ddoc, args);
+ var doc = result[0];
+ var resp = result[1];
+ var type = typeOf(resp);
+ if (type == 'object' || type == 'string') {
+ respond(["up", doc, maybeWrapResponse(resp)]);
+ } else {
+ throw(["error", "render_error", "undefined response from update function"]);
+ }
+ } catch(e) {
+ renderError(e, fun.toSource());
+ }
+ };
+
+ function resetList() {
+ gotRow = false;
+ lastRow = false;
+ chunks = [];
+ startResp = {};
+ };
+
+ function runList(listFun, ddoc, args) {
+ try {
+ Mime.resetProvides();
+ resetList();
+ head = args[0]
+ req = args[1]
+ var tail = listFun.apply(ddoc, args);
+
+ if (Mime.providesUsed) {
+ tail = Mime.runProvides(req, ddoc);
+ }
+ if (!gotRow) getRow();
+ if (typeof tail != "undefined") {
+ chunks.push(tail);
+ }
+ blowChunks("end");
+ } catch(e) {
+ renderError(e, listFun.toSource());
+ }
+ };
+
+ function renderError(e, funSrc) {
+ if (e.error && e.reason || e[0] == "error" || e[0] == "fatal") {
+ throw(e);
+ } else {
+ var logMessage = "function raised error: "+e.toSource()+" \nstacktrace: "+e.stack;
+ log(logMessage);
+ throw(["error", "render_error", logMessage]);
+ }
+ };
+
+ function escapeHTML(string) {
+ return string && string.replace(/&/g, "&amp;")
+ .replace(/</g, "&lt;")
+ .replace(/>/g, "&gt;");
+ };
+
+
+ return {
+ start : start,
+ send : send,
+ getRow : getRow,
+ show : function(fun, ddoc, args) {
+ // var showFun = Couch.compileFunction(funSrc);
+ runShow(fun, ddoc, args);
+ },
+ update : function(fun, ddoc, args) {
+ // var upFun = Couch.compileFunction(funSrc);
+ runUpdate(fun, ddoc, args);
+ },
+ list : function(fun, ddoc, args) {
+ runList(fun, ddoc, args);
+ }
+ };
+})();
+
+// send = Render.send;
+// getRow = Render.getRow;
+// start = Render.start;
+
+// unused. this will be handled in the Erlang side of things.
+// function htmlRenderError(e, funSrc) {
+// var msg = ["<html><body><h1>Render Error</h1>",
+// "<p>JavaScript function raised error: ",
+// e.toString(),
+// "</p><h2>Stacktrace:</h2><code><pre>",
+// escapeHTML(e.stack),
+// "</pre></code><h2>Function source:</h2><code><pre>",
+// escapeHTML(funSrc),
+// "</pre></code></body></html>"].join('');
+// return {body:msg};
+// };
diff --git a/1.1.x/share/server/state.js b/1.1.x/share/server/state.js
new file mode 100644
index 00000000..e6416382
--- /dev/null
+++ b/1.1.x/share/server/state.js
@@ -0,0 +1,32 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var State = {
+ reset : function(config) {
+ // clear the globals and run gc
+ State.funs = [];
+ State.lib = null;
+ State.query_config = config || {};
+ init_sandbox();
+ gc();
+ print("true"); // indicates success
+ },
+ addFun : function(newFun) {
+ // Compile to a function and add it to funs array
+ State.funs.push(Couch.compileFunction(newFun, {views : {lib : State.lib}}));
+ print("true");
+ },
+ addLib : function(lib) {
+ State.lib = lib;
+ print("true");
+ }
+}
diff --git a/1.1.x/share/server/util.js b/1.1.x/share/server/util.js
new file mode 100644
index 00000000..e4386701
--- /dev/null
+++ b/1.1.x/share/server/util.js
@@ -0,0 +1,146 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var resolveModule = function(names, mod, root) {
+ if (names.length == 0) {
+ if (typeof mod.current != "string") {
+ throw ["error","invalid_require_path",
+ 'Must require a JavaScript string, not: '+(typeof mod.current)];
+ }
+ return {
+ current : mod.current,
+ parent : mod.parent,
+ id : mod.id,
+ exports : {}
+ }
+ }
+ // we need to traverse the path
+ var n = names.shift();
+ if (n == '..') {
+ if (!(mod.parent && mod.parent.parent)) {
+ throw ["error", "invalid_require_path", 'Object has no parent '+JSON.stringify(mod.current)];
+ }
+ return resolveModule(names, {
+ id : mod.id.slice(0, mod.id.lastIndexOf('/')),
+ parent : mod.parent.parent,
+ current : mod.parent.current
+ });
+ } else if (n == '.') {
+ if (!mod.parent) {
+ throw ["error", "invalid_require_path", 'Object has no parent '+JSON.stringify(mod.current)];
+ }
+ return resolveModule(names, {
+ parent : mod.parent,
+ current : mod.current,
+ id : mod.id
+ });
+ } else if (root) {
+ mod = {current : root};
+ }
+ if (!mod.current[n]) {
+ throw ["error", "invalid_require_path", 'Object has no property "'+n+'". '+JSON.stringify(mod.current)];
+ }
+ return resolveModule(names, {
+ current : mod.current[n],
+ parent : mod,
+ id : mod.id ? mod.id + '/' + n : n
+ });
+};
+
+var Couch = {
+ // moving this away from global so we can move to json2.js later
+ toJSON : function (val) {
+ return JSON.stringify(val);
+ },
+ compileFunction : function(source, ddoc) {
+ if (!source) throw(["error","not_found","missing function"]);
+ try {
+ if (sandbox) {
+ if (ddoc) {
+ if (!ddoc._module_cache) {
+ ddoc._module_cache = {};
+ }
+ var require = function(name, module) {
+ module = module || {};
+ var newModule = resolveModule(name.split('/'), module.parent, ddoc);
+ if (!ddoc._module_cache.hasOwnProperty(newModule.id)) {
+ // create empty exports object before executing the module,
+ // stops circular requires from filling the stack
+ ddoc._module_cache[newModule.id] = {};
+ var s = "function (module, exports, require) { " + newModule.current + " }";
+ try {
+ var func = sandbox ? evalcx(s, sandbox) : eval(s);
+ func.apply(sandbox, [newModule, newModule.exports, function(name) {
+ return require(name, newModule);
+ }]);
+ } catch(e) {
+ throw ["error","compilation_error","Module require('"+name+"') raised error "+e.toSource()];
+ }
+ ddoc._module_cache[newModule.id] = newModule.exports;
+ }
+ return ddoc._module_cache[newModule.id];
+ }
+ sandbox.require = require;
+ }
+ var functionObject = evalcx(source, sandbox);
+ } else {
+ var functionObject = eval(source);
+ }
+ } catch (err) {
+ throw(["error", "compilation_error", err.toSource() + " (" + source + ")"]);
+ };
+ if (typeof(functionObject) == "function") {
+ return functionObject;
+ } else {
+ throw(["error","compilation_error",
+ "Expression does not eval to a function. (" + source.toSource() + ")"]);
+ };
+ },
+ recursivelySeal : function(obj) {
+ // seal() is broken in current Spidermonkey
+ try {
+ seal(obj);
+ } catch (x) {
+ // Sealing of arrays broken in some SpiderMonkey versions.
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=449657
+ }
+ for (var propname in obj) {
+ if (typeof obj[propname] == "object") {
+ arguments.callee(obj[propname]);
+ }
+ }
+ }
+}
+
+// prints the object as JSON, and rescues and logs any toJSON() related errors
+function respond(obj) {
+ try {
+ print(Couch.toJSON(obj));
+ } catch(e) {
+ log("Error converting object to JSON: " + e.toString());
+ log("error on obj: "+ obj.toSource());
+ }
+};
+
+function log(message) {
+ // idea: query_server_config option for log level
+ if (typeof message == "xml") {
+ message = message.toXMLString();
+ } else if (typeof message != "string") {
+ message = Couch.toJSON(message);
+ }
+ respond(["log", String(message)]);
+};
+
+function isArray(obj) {
+ return toString.call(obj) === "[object Array]";
+}
diff --git a/1.1.x/share/server/validate.js b/1.1.x/share/server/validate.js
new file mode 100644
index 00000000..76a14129
--- /dev/null
+++ b/1.1.x/share/server/validate.js
@@ -0,0 +1,22 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var Validate = {
+ validate : function(fun, ddoc, args) {
+ try {
+ fun.apply(ddoc, args);
+ print("1");
+ } catch (error) {
+ respond(error);
+ }
+ }
+};
diff --git a/1.1.x/share/server/views.js b/1.1.x/share/server/views.js
new file mode 100644
index 00000000..2a15ee56
--- /dev/null
+++ b/1.1.x/share/server/views.js
@@ -0,0 +1,126 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+
+
+var Views = (function() {
+
+ var map_results = []; // holds temporary emitted values during doc map
+
+ function runReduce(reduceFuns, keys, values, rereduce) {
+ for (var i in reduceFuns) {
+ reduceFuns[i] = Couch.compileFunction(reduceFuns[i]);
+ };
+ var reductions = new Array(reduceFuns.length);
+ for(var i = 0; i < reduceFuns.length; i++) {
+ try {
+ reductions[i] = reduceFuns[i](keys, values, rereduce);
+ } catch (err) {
+ handleViewError(err);
+ // if the error is not fatal, ignore the results and continue
+ reductions[i] = null;
+ }
+ };
+ var reduce_line = Couch.toJSON(reductions);
+ var reduce_length = reduce_line.length;
+ // TODO make reduce_limit config into a number
+ if (State.query_config && State.query_config.reduce_limit &&
+ reduce_length > 200 && ((reduce_length * 2) > State.line_length)) {
+ var reduce_preview = "Current output: '"+(reduce_line.substring(0,100) + "'... (first 100 of "+reduce_length+" bytes)");
+ throw(["error",
+ "reduce_overflow_error",
+ "Reduce output must shrink more rapidly: "+reduce_preview]);
+ } else {
+ print("[true," + reduce_line + "]");
+ }
+ };
+
+ function handleViewError(err, doc) {
+ if (err == "fatal_error") {
+ // Only if it's a "fatal_error" do we exit. What's a fatal error?
+ // That's for the query to decide.
+ //
+ // This will make it possible for queries to completely error out,
+ // by catching their own local exception and rethrowing a
+ // fatal_error. But by default if they don't do error handling we
+ // just eat the exception and carry on.
+ //
+ // In this case we abort map processing but don't destroy the
+ // JavaScript process. If you need to destroy the JavaScript
+ // process, throw the error form matched by the block below.
+ throw(["error", "map_runtime_error", "function raised 'fatal_error'"]);
+ } else if (err[0] == "fatal") {
+ // Throwing errors of the form ["fatal","error_key","reason"]
+ // will kill the OS process. This is not normally what you want.
+ throw(err);
+ }
+ var message = "function raised exception " + err.toSource();
+ if (doc) message += " with doc._id " + doc._id;
+ log(message);
+ };
+
+ return {
+ // view helper functions
+ emit : function(key, value) {
+ map_results.push([key, value]);
+ },
+ sum : function(values) {
+ var rv = 0;
+ for (var i in values) {
+ rv += values[i];
+ }
+ return rv;
+ },
+ reduce : function(reduceFuns, kvs) {
+ var keys = new Array(kvs.length);
+ var values = new Array(kvs.length);
+ for(var i = 0; i < kvs.length; i++) {
+ keys[i] = kvs[i][0];
+ values[i] = kvs[i][1];
+ }
+ runReduce(reduceFuns, keys, values, false);
+ },
+ rereduce : function(reduceFuns, values) {
+ runReduce(reduceFuns, null, values, true);
+ },
+ mapDoc : function(doc) {
+ // Compute all the map functions against the document.
+ //
+ // Each function can output multiple key/value pairs for each document.
+ //
+ // Example output of map_doc after three functions set by add_fun cmds:
+ // [
+ // [["Key","Value"]], <- fun 1 returned 1 key value
+ // [], <- fun 2 returned 0 key values
+ // [["Key1","Value1"],["Key2","Value2"]] <- fun 3 returned 2 key values
+ // ]
+ //
+
+ Couch.recursivelySeal(doc);
+
+ var buf = [];
+ for (var i = 0; i < State.funs.length; i++) {
+ map_results = [];
+ try {
+ State.funs[i](doc);
+ buf.push(Couch.toJSON(map_results));
+ } catch (err) {
+ handleViewError(err, doc);
+ // If the error is not fatal, we treat the doc as if it
+ // did not emit anything, by buffering an empty array.
+ buf.push("[]");
+ }
+ }
+ print("[" + buf.join(", ") + "]");
+ }
+ }
+})();
diff --git a/1.1.x/share/www/_sidebar.html b/1.1.x/share/www/_sidebar.html
new file mode 100644
index 00000000..563a85c8
--- /dev/null
+++ b/1.1.x/share/www/_sidebar.html
@@ -0,0 +1,59 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<div id="sidebar">
+ <a id="sidebar-toggle" href="#" title="Hide Sidebar"></a>
+ <a href="index.html">
+ <img id="logo" src="image/logo.png" width="175" height="150" alt="Apache CouchDB: Relax">
+ </a>
+ <ul id="nav">
+ <li><span>Tools</span><ul>
+ <li><a href="index.html">Overview</a></li>
+ <li><a href="config.html">Configuration</a></li>
+ <li><a href="replicator.html">Replicator</a></li>
+ <li><a href="status.html">Status</a></li>
+ <li><a href="couch_tests.html?script/couch_tests.js">Test Suite</a></li>
+ </ul></li>
+ <li><span>Recent Databases</span>
+ <ul id="dbs"></ul>
+ </li>
+ </ul>
+ <div id="footer">
+ <span id="userCtx">
+ <span class="loggedout">
+ <a href="#" class="signup">Signup</a> or <a href="#" class="login">Login</a>
+ </span>
+ <span class="loggedin">
+ Welcome <a class="name">?</a>!
+ <br/>
+ <span class="loggedinadmin">
+ <a href="#" class="createadmin">Setup more admins</a> or
+ <br/>
+ </span>
+ <a href="#" class="changepass">Change password</a> or
+ <a href="#" class="logout">Logout</a>
+ </span>
+ <span class="adminparty">
+ Welcome to Admin Party!
+ <br/>
+ Everyone is admin. <a href="#" class="createadmin">Fix this</a>
+ </span>
+ </span>
+ <hr/>
+ <span class="couch">
+ Futon on <a href="http://couchdb.apache.org/">Apache CouchDB</a>
+ <span id="version">?</span>
+ </span>
+ </div>
+</div>
diff --git a/1.1.x/share/www/config.html b/1.1.x/share/www/config.html
new file mode 100644
index 00000000..9863c8b5
--- /dev/null
+++ b/1.1.x/share/www/config.html
@@ -0,0 +1,135 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Configuration</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="style/layout.css?0.11.0" type="text/css">
+ <script src="script/json2.js"></script>
+ <script src="script/sha1.js"></script>
+ <script src="script/jquery.js?1.4.2"></script>
+ <script src="script/jquery.couch.js?0.11.0"></script>
+ <script src="script/jquery.dialog.js?0.11.0"></script>
+ <script src="script/futon.js?0.11.0"></script>
+ <script src="script/jquery.editinline.js?0.11.0"></script>
+ <script>
+ $(function() {
+ $.couch.config({
+ success: function(resp) {
+ var sections = [];
+ for (var sectionName in resp) {
+ sections.push(sectionName);
+ }
+ sections.sort();
+ $.each(sections, function(idx, sectionName) {
+ var row = $("<tr><th></th></tr>")
+ .find("th").text(sectionName).end()
+ .appendTo("#config tbody.content");
+ var section = resp[sectionName]
+ var options = [];
+ for (var option in section) {
+ options.push(option);
+ }
+ options = options.sort();
+ var prev = null;
+ $.each(options, function(idx, optionName) {
+ var cur = idx == 0 ? row : $("<tr></tr>");
+ $("<td class='name' section="+sectionName+"><b></b></td>")
+ .find("b").text(optionName).end().appendTo(cur);
+ $("<td class='value'><code></code></td>")
+ .find("code").text(section[optionName]).end().appendTo(cur);
+ cur.data("section", sectionName).data("option", optionName);
+ if (cur !== row) cur.insertAfter(prev);
+ prev = cur;
+ });
+ row.find("th").attr("rowspan", options.length);
+ });
+ $("#config tbody tr").removeClass("odd").filter(":odd").addClass("odd");
+ $("#config tbody td.value code").makeEditable({
+ accept: function(newValue) {
+ var row = $(this).parents("tr").eq(0);
+ $.couch.config({
+ success: function(resp) {
+ row.find("td.value code").text(newValue);
+ }}, row.data("section"), row.data("option"), newValue);
+ }
+ }).parent().parent()
+ .append($('<td><div style="text-align:center;""><a class="remove" href="#remove">x</a></div></td>'))
+ .click(function (ev) {
+ // There is a listener that stops all events below this element which is
+ // why the remove link listener has to be here
+ var n = $(ev.target).parent().parent().parent();
+ if ($(ev.target).attr('href') === "#remove" ) {
+ $.couch.config({ success: function () {location = location.href.split('#')[0];} }
+ , n.find('td.name').attr("section"), n.find('td.name').text(), null);
+ }
+ })
+ var add = $('<a href="#add">Add a new section</a>').click(function () {
+ $.showDialog("dialog/_create_config.html", {
+ submit: function(data, callback) {
+ var fail = false;
+ if (!data.section || data.section.length == 0) {
+ callback({section: "Please enter a section."});
+ fail = true;
+ }
+ if (!data.option || data.option.length == 0) {
+ callback({option: "Please enter a option."});
+ fail = true;
+ }
+ if (!data.val || data.val.length == 0) {
+ callback({val: "Please enter a value."});
+ fail = true;
+ }
+ if (fail) {return}
+ $.couch.config({ success: function () {callback();location = location.href.split('#')[0]} }
+ , data.section, data.option, data.val);
+ }
+ });
+ })
+ $("div#content").append(add)
+ }
+ });
+
+ });
+ </script>
+ </head>
+ <body><div id="wrap">
+ <h1>
+ <a href="index.html">Overview</a>
+ <strong>Configuration</strong>
+ </h1>
+ <div id="content">
+ <p class="help">
+ <strong>Note:</strong> Some configuration options may require
+ restarting the server to take effect after modification.
+ </p>
+ <p class="help">
+ For the strongest consistency guarantees, <tt>delayed_commits</tt> should be set to <tt>false</tt>. The default value of <tt>true</tt> is designed for single-user performance. For more details see <a href="http://wiki.apache.org/couchdb/Durability_Matrix">a discussion of durability on the CouchDB wiki</a>.
+ </p>
+ <table id="config" class="listing" cellspacing="0">
+ <caption>Configuration</caption>
+ <thead><tr>
+ <th>Section</th>
+ <th>Option</th>
+ <th>Value</th>
+ <th>Delete</th>
+ </tr></thead>
+ <tbody class="content"></tbody>
+ </table>
+
+ </div>
+ </div></body>
+</html>
diff --git a/1.1.x/share/www/couch_tests.html b/1.1.x/share/www/couch_tests.html
new file mode 100644
index 00000000..f10bad23
--- /dev/null
+++ b/1.1.x/share/www/couch_tests.html
@@ -0,0 +1,98 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Test Suite</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="style/layout.css?0.11.0" type="text/css">
+ <script src="script/json2.js"></script>
+ <script src="script/sha1.js"></script>
+ <script src="script/jquery.js?1.4.2"></script>
+ <script src="script/jquery.couch.js?0.11.0"></script>
+ <script src="script/jquery.dialog.js?0.11.0"></script>
+ <script src="script/futon.js?0.11.0"></script>
+ <script src="script/couch.js?0.11.0"></script>
+ <script src="script/couch_test_runner.js?0.11.0"></script>
+ <script>
+ $(function() {
+ updateTestsListing();
+ $("#toolbar button.run").click(function() {
+ setupAdminParty(runAllTests) ;
+ });
+ $("#toolbar button.load").click(function() {
+ location.reload(true);
+ });
+ $("#toolbar button.share").click(function() {
+ $.showDialog("dialog/_share_test_reports.html", {
+ submit: function(data, callback) {
+ $.couch.replicate("test_suite_reports", "http://couchdb.couchdb.org/test_suite_reports");
+ callback();
+ }
+ });
+ });
+ $("#toolbar button.add").click(function() {
+ location = "custom_test.html";
+ });
+ });
+ var testsPath = document.location.toString().split('?')[1];
+ loadScript(testsPath||"script/couch_tests.js");
+ </script>
+ </head>
+ <body><div id="wrap">
+ <h1>
+ <a href="index.html">Overview</a>
+ <strong>Test Suite</strong>
+ </h1>
+ <div id="content">
+ <ul id="toolbar">
+ <li><button class="run">Run All</button></li>
+ <li><button class="load">Reload</button></li>
+ <li><button class="share">Share Test Reports</button></li>
+ <li><button class="add">Custom Test</button></li>
+ <li class="current"></li>
+ </ul>
+ <p class="help">
+ <strong>Note:</strong> Each of the tests will block the browser. If the
+ connection to your CouchDB server is slow, running the tests will take
+ some time, and you'll not be able to do much with your browser while
+ a test is being executed. <strong>Also:</strong> The test suite is designed
+ to work with Firefox (with Firebug disabled). Patches are welcome for
+ convenience compatibility with other browsers, but official support is
+ for Firefox (latest stable version) only.
+ </p>
+
+ <table class="listing" id="tests" cellspacing="0">
+ <caption>Tests</caption>
+ <thead>
+ <tr>
+ <th class="name">Name</th>
+ <th class="status">Status</th>
+ <th class="duration">Elapsed Time</th>
+ <th class="details">Details</th>
+ </tr>
+ </thead>
+ <tbody class="content">
+ </tbody>
+ <tbody class="footer">
+ <tr>
+ <td colspan="4"></td>
+ </tr>
+ </tbody>
+ </table>
+
+ </div>
+ </div></body>
+</html>
diff --git a/1.1.x/share/www/custom_test.html b/1.1.x/share/www/custom_test.html
new file mode 100644
index 00000000..2566a000
--- /dev/null
+++ b/1.1.x/share/www/custom_test.html
@@ -0,0 +1,112 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Custom Test</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="style/layout.css?0.11.0" type="text/css">
+ <script src="script/json2.js"></script>
+ <script src="script/sha1.js"></script>
+ <script src="script/jquery.js?1.4.2"></script>
+ <script src="script/jquery.couch.js?0.11.0"></script>
+ <script src="script/jquery.dialog.js?0.11.0"></script>
+ <script src="script/futon.js?0.11.0"></script>
+ <script src="script/jquery.resizer.js?0.11.0"></script>
+ <script src="script/couch.js?0.11.0"></script>
+ <script src="script/couch_test_runner.js?0.11.0"></script>
+ <script src="script/couch_tests.js"></script>
+ <script>
+ function T(arg, desc) {
+ if(!arg) {
+ mesg = "Assertion failed" + (desc ? ": " + desc : "");
+ throw new Error(mesg);
+ }
+ }
+
+ function TEquals(expect, found, descr) {
+ var mesg = "expected '" + expect + "', got '" + found + "' " + descr;
+ T(expect === found, mesg);
+ }
+
+ $.futon.navigation.ready(function() {
+ this.updateSelection(
+ location.pathname.replace(/custom_test\.html/, "couch_tests.html"),
+ "?script/couch_tests.js");
+ });
+
+ $(function() {
+ $("#status").removeClass("failure").removeClass("success");
+ $("#viewcode textarea").enableTabInsertion().makeResizable({
+ always: true,
+ grippie: $("#viewcode .bottom"),
+ vertical: true
+ });
+ $("#viewcode button.run").click(function() {
+ $("#status").removeClass("failure").removeClass("success");
+ var code = $("#code").val();
+ try {
+ var couchTests = {};
+ var debug = false;
+ code = eval(code);
+ $.each(couchTests, function(elm) {
+ couchTests[elm](debug);
+ });
+ } catch(e) {
+ alert("" + e);
+ $("#status").text("failure").addClass("failure");
+ return false;
+ }
+ $("#status").text("success").addClass("success");
+ return false;
+ });
+ });
+ </script>
+ </head>
+ <body><div id="wrap">
+ <h1>
+ <a href="index.html">Overview</a>
+ <a class="dbname" href="couch_tests.html">Test Suite</a>
+ <strong>Custom Test</strong>
+ </h1>
+
+ <div id="content">
+ <div id="viewcode">
+ <div class="top">
+ <span>Test Function</span>
+ </div>
+ <table summary="Custom Test Function" cellspacing="0"><tr>
+ <td class="code">
+ <textarea name="code" id="code" rows="18" cols="120">
+couchTests.custom_test = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+
+ if (debug) debugger;
+
+ alert("You can start writing your test now.");
+};
+</textarea>
+ </td>
+ </tr></table>
+ <div class="bottom">
+ <button class="run" type="button">Run</button>
+ <span id="status">&nbsp;&nbsp;</span>
+ </div>
+ </div>
+ </div>
+ </div></body>
+</html>
diff --git a/1.1.x/share/www/database.html b/1.1.x/share/www/database.html
new file mode 100644
index 00000000..9a9f121e
--- /dev/null
+++ b/1.1.x/share/www/database.html
@@ -0,0 +1,267 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Browse Database</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="style/layout.css?0.11.0" type="text/css">
+ <script src="script/json2.js"></script>
+ <script src="script/sha1.js"></script>
+ <script src="script/jquery.js?1.4.2"></script>
+ <script src="script/jquery.couch.js?0.11.0"></script>
+ <script src="script/jquery.dialog.js?0.11.0"></script>
+ <script src="script/futon.js?0.11.0"></script>
+ <script src="script/jquery.resizer.js?0.11.0"></script>
+ <script src="script/jquery.suggest.js?0.11.0"></script>
+ <script src="script/futon.browse.js?0.11.0"></script>
+ <script src="script/futon.format.js?0.11.0"></script>
+ <script>
+ var page = new $.futon.CouchDatabasePage();
+ $.futon.navigation.ready(function() {
+ this.addDatabase(page.db.name);
+ this.updateSelection(location.pathname, "?" + page.db.name);
+ });
+
+ $(function() {
+ if (page.redirecting) return;
+ $("h1 strong").text(page.db.name);
+ var viewPath = page.viewName || "_all_docs";
+ if (viewPath != "_temp_view" && viewPath != "_design_docs") {
+ viewPath = $.map(viewPath.split("/"), function (part) {
+ return encodeURIComponent(part);
+ }).join("/");
+
+ $("h1 a.raw").attr("href", "/" + encodeURIComponent(page.db.name) +
+ "/" + viewPath);
+ }
+
+ $("#viewcode span").click(function() {
+ $("#viewcode").toggleClass("collapsed");
+ });
+ $("#viewcode button.run").click(function() {
+ page.updateDocumentListing();
+ });
+ $("#viewcode button.revert").click(function() {
+ page.revertViewChanges();
+ });
+ $("#viewcode button.save").click(function() {
+ page.saveViewChanges();
+ });
+ $("#viewcode button.saveas").click(function() {
+ page.saveViewAs();
+ });
+ $("#viewcode textarea").makeResizable({
+ always: true,
+ grippie: $("#viewcode .bottom"),
+ vertical: true
+ });
+ $("#viewcode td.map").makeResizable({
+ always: true,
+ grippie: $("#viewcode td.splitter"),
+ horizontal: true
+ });
+
+ // Restore preferences/state
+ $("#documents thead th.key").toggleClass("desc", !!$.futon.storage.get("desc"));
+ var reduce = !!$.futon.storage.get("reduce");
+ $("#reduce :checkbox")[0].checked = reduce;
+ $("#grouplevel select").val(parseInt($.futon.storage.get("group_level")));
+ $("#grouplevel").toggleClass("disabled", !reduce).find("select").each(function() {
+ this.disabled = !reduce;
+ });
+
+ $("#perpage").val(parseInt($.futon.storage.get("per_page")));
+
+ var staleViews = !!$.futon.storage.get("stale");
+ $("#staleviews :checkbox")[0].checked = staleViews;
+
+ page.populateViewsMenu();
+ page.populateViewEditor();
+ if (page.isTempView) {
+ $("#viewcode").show().removeClass("collapsed").find("textarea")[0].focus();
+ $("#documents").hide();
+ }
+
+ $("#switch select").change(function() {
+ var viewName = $(this).val();
+ if (!viewName) $.futon.storage.del("view");
+ location.href = "?" + encodeURIComponent(page.db.name) +
+ (viewName ? "/" + viewName : "");
+ });
+ $("#staleviews :checkbox").click(function() {
+ $.futon.storage.set("stale", this.checked);
+ });
+ $("#documents thead th.key span").click(function() {
+ $(this).closest("th").toggleClass("desc");
+ page.updateDocumentListing();
+ });
+ $("#grouplevel select").change(function() {
+ page.updateDocumentListing();
+ $.futon.storage.set("group_level", this.value);
+ });
+ $("#reduce :checkbox").click(function() {
+ page.updateDocumentListing();
+ var cb = this;
+ $("#grouplevel").toggleClass("disabled", !cb.checked).find("select").each(function() {
+ this.disabled = !cb.checked;
+ });
+ $.futon.storage.set("reduce", this.checked);
+ });
+ $("#perpage").change(function() {
+ page.updateDocumentListing();
+ $.futon.storage.set("per_page", this.value);
+ });
+ $("#toolbar button.add").click(page.newDocument);
+ $("#toolbar button.compact").click(page.compactAndCleanup);
+ $("#toolbar button.delete").click(page.deleteDatabase);
+ $("#toolbar button.security").click(page.databaseSecurity);
+
+ $('#jumpto input').suggest(function(text, callback) {
+ page.db.allDocs({
+ limit: 10, startkey: text, endkey: text + 'zzz',
+ success: function(docs) {
+ var matches = [];
+ for (var i = 0; i < docs.rows.length; i++) {
+ if (docs.rows[i].id.indexOf(text) == 0) {
+ matches[i] = docs.rows[i].id;
+ }
+ }
+ callback(matches);
+ }
+ });
+ }).keypress(function(e) {
+ if (e.keyCode == 13) {
+ page.jumpToDocument($(this).val());
+ }
+ });
+ });
+ </script>
+ </head>
+
+ <body><div id="wrap">
+ <h1>
+ <a href="index.html">Overview</a>
+ <strong>?</strong>
+ <a class="raw" title="Raw view"></a>
+ </h1>
+ <div id="content">
+ <div id="staleviews">
+ <label>Stale views
+ <input name="staleviews" type="checkbox" />
+ </label>
+ </div>
+ <div id="switch">
+ <label>View: <select autocomplete="false">
+ <option value="_all_docs">All documents</option>
+ <option value="_design_docs">Design documents</option>
+ <option value="_temp_view">Temporary view…</option>
+ </select></label>
+ </div>
+ <div id="jumpto">
+ <label>Jump to:
+ <input type="text" name="docid" placeholder="Document ID" autocomplete="off" />
+ </label>
+ </div>
+ <ul id="toolbar">
+ <li><button class="add">New Document</button></li>
+ <li><button class="security">Security…</button></li>
+ <li><button class="compact">Compact &amp; Cleanup…</button></li>
+ <li><button class="delete">Delete Database…</button></li>
+ </ul>
+
+ <div id="viewcode" class="collapsed" style="display: none">
+ <div class="top">
+ <a id="designdoc-link"></a>
+ <span id="view-toggle">View Code</span>
+ </div>
+ <table summary="View functions" cellspacing="0"><tr>
+ <td class="code map">
+ <label for="viewcode_map">Map Function:</label>
+ <textarea id="viewcode_map" class="map" rows="5" cols="20" spellcheck="false" wrap="off"></textarea>
+ </td>
+ <td class="splitter"></td>
+ <td class="code reduce">
+ <label for="viewcode_reduce">Reduce Function (optional):</label>
+ <textarea id="viewcode_reduce" class="reduce" rows="5" cols="20" spellcheck="false" wrap="off"></textarea>
+ </td>
+ </tr></table>
+ <div class="bottom">
+ <button class="save" type="button" disabled>Save</button>
+ <button class="saveas" type="button">Save As…</button>
+ <button class="revert" type="button" disabled>Revert</button>
+ <button class="run" type="button">Run</button>
+ <label>Language: <select id="language"></select></label>
+ </div>
+ </div>
+ <p id="tempwarn">
+ <strong>Warning</strong>: Please note that temporary views are not
+ suitable for use in production, as they are really slow for any
+ database with more than a few dozen documents. You can use a temporary
+ view to experiment with view functions, but switch to a permanent view
+ before using them in an application.
+ </p>
+
+ <table id="documents" class="listing" cellspacing="0">
+ <caption>Documents</caption>
+ <thead>
+ <tr>
+ <th class="key">
+ <label id="grouplevel">
+ Grouping: <select>
+ <option value="0">none</option>
+ <option value="1">level 1</option>
+ <option value="2">level 2</option>
+ <option value="3">level 3</option>
+ <option value="4">level 4</option>
+ <option value="5">level 5</option>
+ <option value="6">level 6</option>
+ <option value="7">level 7</option>
+ <option value="8">level 8</option>
+ <option value="9">level 9</option>
+ <option value="100" selected>exact</option>
+ </select>
+ </label>
+ <span>Key</span>
+ </th>
+ <th class="value">
+ <label id="reduce"><input type="checkbox" autocomplete="off" checked> Reduce</label>
+ Value
+ </th>
+ </tr>
+ </thead>
+ <tbody class="content">
+ </tbody>
+ <tbody class="footer">
+ <tr>
+ <td colspan="4">
+ <div id="paging">
+ <a class="prev">← Previous Page</a> |
+ <label>Rows per page: <select id="perpage">
+ <option selected>10</option>
+ <option>25</option>
+ <option>50</option>
+ <option>100</option>
+ </select></label> |
+ <a class="next">Next Page →</a>
+ </div>
+ <span></span>
+ </td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+ </div></body>
+</html>
diff --git a/1.1.x/share/www/dialog/_admin_party.html b/1.1.x/share/www/dialog/_admin_party.html
new file mode 100644
index 00000000..ea9fb15a
--- /dev/null
+++ b/1.1.x/share/www/dialog/_admin_party.html
@@ -0,0 +1,33 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Admin Party!</h2>
+ <fieldset>
+ <p class="help">
+ The test suite requires CouchDB to be in <em>Admin Party</em> mode. This
+ mode give all users admin capabilities. This is the least secure mode of
+ operation. Do not run the tests on production servers, as you'll impact
+ both performance and security.
+ </p>
+ <p class="help">
+ Clicking “Remove Admins” will remove all admins from the configuration. You will
+ have to recreate any admins by hand after the tests have finished.
+ </p>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Remove Admins</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_change_password.html b/1.1.x/share/www/dialog/_change_password.html
new file mode 100644
index 00000000..40601d9a
--- /dev/null
+++ b/1.1.x/share/www/dialog/_change_password.html
@@ -0,0 +1,31 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Change Password</h2>
+ <fieldset>
+ <table summary=""><tbody><tr>
+ <th><label>New Password:</label></th>
+ <td><input type="password" name="password" size="24" /></td>
+ </tr><tr>
+ <th><label>Verify New Password:</label></th>
+ <td><input type="password" name="verify_password" size="24" /></td>
+ </tr>
+ </tbody></table>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Login</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_compact_cleanup.html b/1.1.x/share/www/dialog/_compact_cleanup.html
new file mode 100644
index 00000000..506417f4
--- /dev/null
+++ b/1.1.x/share/www/dialog/_compact_cleanup.html
@@ -0,0 +1,51 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Compact &amp; Cleanup</h2>
+ <fieldset class="radiogroup">
+ <label>
+ <input type="radio" name="action" value="compact_database" checked>
+ Compact Database
+ </label>
+ <p class="help">
+ Compacting a database removes deleted documents and previous revisions.
+ It is an <strong>irreversible operation</strong> and may take
+ a while to complete for large databases.
+ </p>
+ <hr>
+ <label>
+ <input type="radio" name="action" value="compact_views">
+ Compact Views
+ </label>
+ <p class="help">
+ View compaction will affect all views in this design document. This
+ operation may take some time to complete. Your views will still operate
+ normally during compaction.
+ </p>
+ <hr>
+ <label>
+ <input type="radio" name="action" value="view_cleanup">
+ Cleanup Views
+ </label>
+ <p class="help">
+ Cleaning up views in a database removes old view files still stored
+ on the filesystem. It is an <strong>irreversible operation</strong>.
+ </p>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Run</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_create_admin.html b/1.1.x/share/www/dialog/_create_admin.html
new file mode 100644
index 00000000..d4aec95a
--- /dev/null
+++ b/1.1.x/share/www/dialog/_create_admin.html
@@ -0,0 +1,50 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Create Server Admin</h2>
+ <fieldset>
+ <p class="help">
+ Before a server admin is configured, all clients have admin privileges.
+ This is fine when HTTP access is restricted
+ to trusted users. <strong>If end-users will be accessing this CouchDB, you must
+ create an admin account to prevent accidental (or malicious) data loss.</strong>
+ </p>
+ <p class="help">Server admins can create and destroy databases, install
+ and update _design documents, run the test suite, and edit all aspects of CouchDB
+ configuration.
+ </p>
+ <table summary=""><tbody><tr>
+ <th><label>Username:</label></th>
+ <td><input type="text" name="name" size="24"></td>
+ </tr><tr>
+ <th><label>Password:</label></th>
+ <td><input type="password" name="password" size="24"></td>
+ </tr>
+ </tbody></table>
+ <p class="help">Non-admin users have read and write access to all databases, which
+ are controlled by validation functions. CouchDB can be configured to block all
+ access to anonymous users.
+ </p>
+ <h3>About Authentication</h3>
+ <p class="help">
+ Couch has a pluggable authentication mechanism. Futon exposes a user friendly cookie-auth which handles login and logout, so app developers can relax. Just use <tt>$.couch.session()</tt> to load the current user's info.
+ </p>
+
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Create</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_create_config.html b/1.1.x/share/www/dialog/_create_config.html
new file mode 100644
index 00000000..79e08b08
--- /dev/null
+++ b/1.1.x/share/www/dialog/_create_config.html
@@ -0,0 +1,42 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Create New Config Option</h2>
+ <fieldset>
+ <p class="help">
+ Please enter the section, option, and value.
+ </p>
+ <table summary="">
+ <tbody>
+ <tr>
+ <th><label>section:</label></th>
+ <td><input type="text" name="section" size="24"></td>
+ </tr>
+ <tr>
+ <th><label>option:</label></th>
+ <td><input type="text" name="option" size="24"></td>
+ </tr>
+ <tr>
+ <th><label>value:</label></th>
+ <td><input type="text" name="val" size="24"></td>
+ </tr>
+ </tbody>
+ </table>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Create</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_create_database.html b/1.1.x/share/www/dialog/_create_database.html
new file mode 100644
index 00000000..74e7ea61
--- /dev/null
+++ b/1.1.x/share/www/dialog/_create_database.html
@@ -0,0 +1,33 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Create New Database</h2>
+ <fieldset>
+ <p class="help">
+ Please enter the name of the database. Note that only lowercase
+ characters (<tt>a-z</tt>), digits (<tt>0-9</tt>), or any of the
+ characters <tt>_</tt>, <tt>$</tt>, <tt>(</tt>, <tt>)</tt>, <tt>+</tt>,
+ <tt>-</tt>, and <tt>/</tt> are allowed.
+ </p>
+ <table summary=""><tbody><tr>
+ <th><label>Database Name:</label></th>
+ <td><input type="text" name="name" size="24"></td>
+ </tr></table>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Create</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_database_security.html b/1.1.x/share/www/dialog/_database_security.html
new file mode 100644
index 00000000..d63fa787
--- /dev/null
+++ b/1.1.x/share/www/dialog/_database_security.html
@@ -0,0 +1,50 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Security</h2>
+ <fieldset>
+ <p class="help">
+ Each database contains lists of admins and readers.
+ Admins and readers are each defined by <tt>names</tt> and <tt>roles</tt>, which are lists of strings.
+ </p>
+
+ <h3>Admins</h3>
+ <p class="help">Database admins can update design documents and edit the readers list.</p>
+ <table summary=""><tbody><tr>
+ <th><label>Names:</label></th>
+ <td><input type="text" name="admin_names" size="40"></td>
+ </tr><tr>
+ <th><label>Roles:</label></th>
+ <td><input type="text" name="admin_roles" size="40"></td>
+ </tr>
+ </tbody></table>
+
+ <h3>Readers</h3>
+ <p class="help">Database readers can access the database. If no readers are defined, the database is public.</p>
+ <table summary=""><tbody><tr>
+ <th><label>Names:</label></th>
+ <td><input type="text" name="reader_names" size="40"></td>
+ </tr><tr>
+ <th><label>Roles:</label></th>
+ <td><input type="text" name="reader_roles" size="40"></td>
+ </tr>
+ </tbody></table>
+
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Update</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_delete_database.html b/1.1.x/share/www/dialog/_delete_database.html
new file mode 100644
index 00000000..039ba39b
--- /dev/null
+++ b/1.1.x/share/www/dialog/_delete_database.html
@@ -0,0 +1,27 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Delete Database</h2>
+ <fieldset>
+ <p class="help">
+ Are you sure you want to delete this database? Note that this is an
+ <strong>irreversible operation</strong>!
+ </p>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Delete</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_delete_document.html b/1.1.x/share/www/dialog/_delete_document.html
new file mode 100644
index 00000000..8ae89710
--- /dev/null
+++ b/1.1.x/share/www/dialog/_delete_document.html
@@ -0,0 +1,26 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Delete Document</h2>
+ <fieldset>
+ <p class="help">
+ Are you sure you want to delete this document?
+ </p>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Delete</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_login.html b/1.1.x/share/www/dialog/_login.html
new file mode 100644
index 00000000..f05a5fdc
--- /dev/null
+++ b/1.1.x/share/www/dialog/_login.html
@@ -0,0 +1,34 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Login</h2>
+ <fieldset>
+ <p class="help">
+ Login to CouchDB with your name and password.
+ </p>
+ <table summary=""><tbody><tr>
+ <th><label>Username:</label></th>
+ <td><input type="text" name="name" size="24"></td>
+ </tr><tr>
+ <th><label>Password:</label></th>
+ <td><input type="password" name="password" size="24"></td>
+ </tr>
+ </tbody></table>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Login</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_save_view_as.html b/1.1.x/share/www/dialog/_save_view_as.html
new file mode 100644
index 00000000..d59122bf
--- /dev/null
+++ b/1.1.x/share/www/dialog/_save_view_as.html
@@ -0,0 +1,35 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post" id="view-save-as" onload="initForm(this)">
+ <h2>Save View As&hellip;</h2>
+ <fieldset>
+ <p class="help">
+ You can save this function code as a permanent view in the database. Just
+ enter or select the design document and the name of the view below. Note
+ that if you choose an existing view, it will be overwritten!
+ </p>
+ <table summary=""><tbody><tr>
+ <th><label for="input_docid">Design Document:</label></th>
+ <td><tt>_design/</tt><input type="text" id="input_docid" name="docid" size="20"></td>
+ </tr><tr>
+ <th><label for="input_name">View Name:<label></th>
+ <td><input type="text" id="input_name" name="name" size="30"></td>
+ </tr></table>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Save</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_share_test_reports.html b/1.1.x/share/www/dialog/_share_test_reports.html
new file mode 100644
index 00000000..82b49a74
--- /dev/null
+++ b/1.1.x/share/www/dialog/_share_test_reports.html
@@ -0,0 +1,42 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Share Test Reports</h2>
+ <fieldset>
+ <p class="help">
+ After each test run, a results summary document is stored in
+ <a href="/_utils/database.html?test_suite_reports">your local
+ <tt>test_suite_reports</tt> database.</a> The data has no personally
+ identifying information, just details about the test run and your CouchDB
+ and browser versions. (Click the red link above to see what's stored.)
+ The data remains private until you click the "share" button below.
+ </p>
+ <p class="help">
+ Test reports are very valuable to the CouchDB community, and are easy to share.
+ Clicking the "share" button below triggers replication from
+ your local <tt>test_suite_reports</tt> database, to a database hosted by the
+ project.
+ </p>
+ <p class="help">
+ <a href="http://couchdb.couchdb.org/_utils/database.html?test_suite_reports">
+ Browse test reports shared by other users.</a>
+ Thank you for sharing!
+ </p>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Share</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_signup.html b/1.1.x/share/www/dialog/_signup.html
new file mode 100644
index 00000000..7ba3448a
--- /dev/null
+++ b/1.1.x/share/www/dialog/_signup.html
@@ -0,0 +1,35 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Create User Account</h2>
+ <fieldset>
+ <p class="help">
+ Create a user document on this CouchDB. You will be logged in as this
+ user after the document is created.
+ </p>
+ <table summary=""><tbody><tr>
+ <th><label>Username:</label></th>
+ <td><input type="text" name="name" size="24"></td>
+ </tr><tr>
+ <th><label>Password:</label></th>
+ <td><input type="password" name="password" size="24"></td>
+ </tr>
+ </tbody></table>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Create</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/dialog/_upload_attachment.html b/1.1.x/share/www/dialog/_upload_attachment.html
new file mode 100644
index 00000000..50b7e1fa
--- /dev/null
+++ b/1.1.x/share/www/dialog/_upload_attachment.html
@@ -0,0 +1,36 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post" id="upload-form">
+ <h2>Upload Attachment</h2>
+ <fieldset>
+ <p class="help">
+ Please select the file you want to upload as an attachment to this
+ document. Please note that this will result in the immediate creation of
+ a new revision of the document, so it's not necessary to save the
+ document after the upload.
+ </p>
+ <table summary=""><tbody><tr>
+ <th><label>File:</label></th>
+ <td><input type="file" name="_attachments"></td>
+ </tr><tr>
+ <td id="progress" colspan="2">&nbsp;</td>
+ </tr></table>
+ </fieldset>
+ <div class="buttons">
+ <input type="hidden" name="_rev" value="">
+ <button type="submit">Upload</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/1.1.x/share/www/document.html b/1.1.x/share/www/document.html
new file mode 100644
index 00000000..b6f42018
--- /dev/null
+++ b/1.1.x/share/www/document.html
@@ -0,0 +1,114 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>View Document</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="style/layout.css?0.11.0" type="text/css">
+ <script src="script/json2.js"></script>
+ <script src="script/sha1.js"></script>
+ <script src="script/base64.js"></script>
+ <script src="script/jquery.js?1.4.2"></script>
+ <script src="script/jquery.couch.js?0.11.0"></script>
+ <script src="script/jquery.dialog.js?0.11.0"></script>
+ <script src="script/futon.js?0.11.0"></script>
+ <script src="script/jquery.resizer.js?0.11.0"></script>
+ <script src="script/futon.browse.js?0.11.0"></script>
+ <script src="script/futon.format.js?0.11.0"></script>
+ <script src="script/jquery.editinline.js?0.11.0"></script>
+ <script src="script/jquery.form.js?2.36"></script>
+ <script>
+ var page = new $.futon.CouchDocumentPage();
+
+ $.futon.navigation.ready(function() {
+ this.addDatabase(page.db.name);
+ this.updateSelection(
+ location.pathname.replace(/document\.html/, "database.html"),
+ "?" + page.db.name
+ );
+ });
+
+ $(function() {
+ $("h1 a.dbname").text(page.dbName)
+ .attr("href", "database.html?" + encodeURIComponent(page.db.name));
+ $("h1 strong").text(page.docId);
+ $("h1 a.raw").attr("href", "/" + encodeURIComponent(page.db.name) +
+ "/" + encodeURIComponent(page.docId));
+ page.updateFieldListing();
+
+ $("#tabs li.tabular a").click(page.activateTabularView);
+ $("#tabs li.source a").click(page.activateSourceView);
+
+ $("#toolbar button.save").click(page.saveDocument);
+ $("#toolbar button.add").click(page.addField);
+ $("#toolbar button.load").click(page.uploadAttachment);
+ if (page.isNew) {
+ $("#toolbar button.delete").hide();
+ } else {
+ $("#toolbar button.delete").click(page.deleteDocument);
+ }
+ });
+ </script>
+ </head>
+
+ <body><div id="wrap">
+ <h1>
+ <a href="index.html">Overview</a>
+ <a class="dbname" href="#">?</a>
+ <strong>?</strong>
+ <a class="raw" title="Raw document"></a>
+ </h1>
+ <div id="content">
+ <ul id="toolbar">
+ <li><button class="save">Save Document</button></li>
+ <li><button class="add">Add Field</button></li>
+ <li><button class="load">Upload Attachment…</button></li>
+ <li><button class="delete">Delete Document…</button></li>
+ </ul>
+
+ <ul id="tabs">
+ <li class="active tabular"><a href="#tabular">Fields</a></li>
+ <li class="source"><a href="#source">Source</a></li>
+ </ul>
+ <table id="fields" class="listing" cellspacing="0">
+ <col class="field"><col class="value">
+ <caption>Fields</caption>
+ <thead>
+ <tr>
+ <th>Field</th>
+ <th>Value</th>
+ </tr>
+ </thead>
+ <tbody class="content">
+ </tbody>
+ <tbody class="source" style="display: none">
+ <tr><td colspan="2"></td></tr>
+ </tbody>
+ <tbody class="footer">
+ <tr>
+ <td colspan="2">
+ <div id="paging">
+ <a class="prev">← Previous Version</a> | <a class="next">Next Version →</a>
+ </div>
+ <span></span>
+ </td>
+ </tr>
+ </tbody>
+ </table>
+
+ </div>
+ </div></body>
+</html>
diff --git a/1.1.x/share/www/favicon.ico b/1.1.x/share/www/favicon.ico
new file mode 100644
index 00000000..34bfaa86
--- /dev/null
+++ b/1.1.x/share/www/favicon.ico
Binary files differ
diff --git a/1.1.x/share/www/image/add.png b/1.1.x/share/www/image/add.png
new file mode 100644
index 00000000..34e8c7d7
--- /dev/null
+++ b/1.1.x/share/www/image/add.png
Binary files differ
diff --git a/1.1.x/share/www/image/apply.gif b/1.1.x/share/www/image/apply.gif
new file mode 100644
index 00000000..63de0d53
--- /dev/null
+++ b/1.1.x/share/www/image/apply.gif
Binary files differ
diff --git a/1.1.x/share/www/image/bg.png b/1.1.x/share/www/image/bg.png
new file mode 100644
index 00000000..ec815244
--- /dev/null
+++ b/1.1.x/share/www/image/bg.png
Binary files differ
diff --git a/1.1.x/share/www/image/cancel.gif b/1.1.x/share/www/image/cancel.gif
new file mode 100644
index 00000000..4329076e
--- /dev/null
+++ b/1.1.x/share/www/image/cancel.gif
Binary files differ
diff --git a/1.1.x/share/www/image/compact.png b/1.1.x/share/www/image/compact.png
new file mode 100644
index 00000000..ea8985dc
--- /dev/null
+++ b/1.1.x/share/www/image/compact.png
Binary files differ
diff --git a/1.1.x/share/www/image/delete-mini.png b/1.1.x/share/www/image/delete-mini.png
new file mode 100644
index 00000000..ad5588d9
--- /dev/null
+++ b/1.1.x/share/www/image/delete-mini.png
Binary files differ
diff --git a/1.1.x/share/www/image/delete.png b/1.1.x/share/www/image/delete.png
new file mode 100644
index 00000000..e8384017
--- /dev/null
+++ b/1.1.x/share/www/image/delete.png
Binary files differ
diff --git a/1.1.x/share/www/image/grippie.gif b/1.1.x/share/www/image/grippie.gif
new file mode 100644
index 00000000..a8807896
--- /dev/null
+++ b/1.1.x/share/www/image/grippie.gif
Binary files differ
diff --git a/1.1.x/share/www/image/hgrad.gif b/1.1.x/share/www/image/hgrad.gif
new file mode 100644
index 00000000..08aa80ca
--- /dev/null
+++ b/1.1.x/share/www/image/hgrad.gif
Binary files differ
diff --git a/1.1.x/share/www/image/key.png b/1.1.x/share/www/image/key.png
new file mode 100644
index 00000000..e04ed108
--- /dev/null
+++ b/1.1.x/share/www/image/key.png
Binary files differ
diff --git a/1.1.x/share/www/image/load.png b/1.1.x/share/www/image/load.png
new file mode 100644
index 00000000..07b4f791
--- /dev/null
+++ b/1.1.x/share/www/image/load.png
Binary files differ
diff --git a/1.1.x/share/www/image/logo.png b/1.1.x/share/www/image/logo.png
new file mode 100644
index 00000000..d21ac025
--- /dev/null
+++ b/1.1.x/share/www/image/logo.png
Binary files differ
diff --git a/1.1.x/share/www/image/order-asc.gif b/1.1.x/share/www/image/order-asc.gif
new file mode 100644
index 00000000..d2a237ae
--- /dev/null
+++ b/1.1.x/share/www/image/order-asc.gif
Binary files differ
diff --git a/1.1.x/share/www/image/order-desc.gif b/1.1.x/share/www/image/order-desc.gif
new file mode 100644
index 00000000..1043b499
--- /dev/null
+++ b/1.1.x/share/www/image/order-desc.gif
Binary files differ
diff --git a/1.1.x/share/www/image/path.gif b/1.1.x/share/www/image/path.gif
new file mode 100644
index 00000000..01ec717e
--- /dev/null
+++ b/1.1.x/share/www/image/path.gif
Binary files differ
diff --git a/1.1.x/share/www/image/progress.gif b/1.1.x/share/www/image/progress.gif
new file mode 100644
index 00000000..d84f6537
--- /dev/null
+++ b/1.1.x/share/www/image/progress.gif
Binary files differ
diff --git a/1.1.x/share/www/image/rarrow.png b/1.1.x/share/www/image/rarrow.png
new file mode 100644
index 00000000..507e87e7
--- /dev/null
+++ b/1.1.x/share/www/image/rarrow.png
Binary files differ
diff --git a/1.1.x/share/www/image/run-mini.png b/1.1.x/share/www/image/run-mini.png
new file mode 100644
index 00000000..b2fcbd82
--- /dev/null
+++ b/1.1.x/share/www/image/run-mini.png
Binary files differ
diff --git a/1.1.x/share/www/image/run.png b/1.1.x/share/www/image/run.png
new file mode 100644
index 00000000..a1d79f65
--- /dev/null
+++ b/1.1.x/share/www/image/run.png
Binary files differ
diff --git a/1.1.x/share/www/image/running.png b/1.1.x/share/www/image/running.png
new file mode 100644
index 00000000..9b50cd67
--- /dev/null
+++ b/1.1.x/share/www/image/running.png
Binary files differ
diff --git a/1.1.x/share/www/image/save.png b/1.1.x/share/www/image/save.png
new file mode 100644
index 00000000..a04e4bcc
--- /dev/null
+++ b/1.1.x/share/www/image/save.png
Binary files differ
diff --git a/1.1.x/share/www/image/sidebar-toggle.png b/1.1.x/share/www/image/sidebar-toggle.png
new file mode 100644
index 00000000..3ea32ffe
--- /dev/null
+++ b/1.1.x/share/www/image/sidebar-toggle.png
Binary files differ
diff --git a/1.1.x/share/www/image/spinner.gif b/1.1.x/share/www/image/spinner.gif
new file mode 100644
index 00000000..6239655e
--- /dev/null
+++ b/1.1.x/share/www/image/spinner.gif
Binary files differ
diff --git a/1.1.x/share/www/image/spinner_33.gif b/1.1.x/share/www/image/spinner_33.gif
new file mode 100644
index 00000000..5ad51927
--- /dev/null
+++ b/1.1.x/share/www/image/spinner_33.gif
Binary files differ
diff --git a/1.1.x/share/www/image/spinner_6b.gif b/1.1.x/share/www/image/spinner_6b.gif
new file mode 100644
index 00000000..4e3d9725
--- /dev/null
+++ b/1.1.x/share/www/image/spinner_6b.gif
Binary files differ
diff --git a/1.1.x/share/www/image/test_failure.gif b/1.1.x/share/www/image/test_failure.gif
new file mode 100644
index 00000000..2a873b24
--- /dev/null
+++ b/1.1.x/share/www/image/test_failure.gif
Binary files differ
diff --git a/1.1.x/share/www/image/test_success.gif b/1.1.x/share/www/image/test_success.gif
new file mode 100644
index 00000000..6df8bae2
--- /dev/null
+++ b/1.1.x/share/www/image/test_success.gif
Binary files differ
diff --git a/1.1.x/share/www/image/thead-key.gif b/1.1.x/share/www/image/thead-key.gif
new file mode 100644
index 00000000..42a43b58
--- /dev/null
+++ b/1.1.x/share/www/image/thead-key.gif
Binary files differ
diff --git a/1.1.x/share/www/image/thead.gif b/1.1.x/share/www/image/thead.gif
new file mode 100644
index 00000000..1587b1f2
--- /dev/null
+++ b/1.1.x/share/www/image/thead.gif
Binary files differ
diff --git a/1.1.x/share/www/image/toggle-collapse.gif b/1.1.x/share/www/image/toggle-collapse.gif
new file mode 100644
index 00000000..f0979304
--- /dev/null
+++ b/1.1.x/share/www/image/toggle-collapse.gif
Binary files differ
diff --git a/1.1.x/share/www/image/toggle-expand.gif b/1.1.x/share/www/image/toggle-expand.gif
new file mode 100644
index 00000000..03fa8360
--- /dev/null
+++ b/1.1.x/share/www/image/toggle-expand.gif
Binary files differ
diff --git a/1.1.x/share/www/image/twisty.gif b/1.1.x/share/www/image/twisty.gif
new file mode 100644
index 00000000..5ba57a1a
--- /dev/null
+++ b/1.1.x/share/www/image/twisty.gif
Binary files differ
diff --git a/1.1.x/share/www/index.html b/1.1.x/share/www/index.html
new file mode 100644
index 00000000..975f5986
--- /dev/null
+++ b/1.1.x/share/www/index.html
@@ -0,0 +1,94 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Overview</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="style/layout.css?0.11.0" type="text/css">
+ <script src="script/json2.js"></script>
+ <script src="script/sha1.js"></script>
+ <script src="script/jquery.js?1.4.2"></script>
+ <script src="script/jquery.couch.js?0.11.0"></script>
+ <script src="script/jquery.dialog.js?0.11.0"></script>
+ <script src="script/futon.js?0.11.0"></script>
+ <script src="script/futon.browse.js?0.11.0"></script>
+ <script src="script/futon.format.js?0.11.0"></script>
+ <script>
+ var page = new $.futon.CouchIndexPage();
+ $(document).ready(function() {
+ if (!/index\.html$/.test(location.pathname)) {
+ $.futon.navigation.ready(function() {
+ this.updateSelection(location.pathname + "index.html");
+ });
+ }
+ var dbsPerPage = parseInt($.futon.storage.get("per_page"));
+ if (dbsPerPage) $("#perpage").val(dbsPerPage);
+ $("#perpage").change(function() {
+ page.updateDatabaseListing();
+ $.futon.storage.set("per_page", this.value);
+ });
+
+ page.updateDatabaseListing();
+
+ $("#toolbar button.add").click(function() {
+ page.addDatabase();
+ });
+ });
+ </script>
+ </head>
+ <body>
+ <div id="wrap">
+ <h1><strong>Overview</strong></h1>
+ <div id="content">
+ <ul id="toolbar">
+ <li><button class="add">Create Database …</button></li>
+ </ul>
+
+ <table class="listing" id="databases" cellspacing="0">
+ <caption>Databases</caption>
+ <thead>
+ <tr>
+ <th>Name</th>
+ <th class="size">Size</th>
+ <th class="count">Number of Documents</th>
+ <th class="seq">Update Seq</th>
+ </tr>
+ </thead>
+ <tbody class="content">
+ </tbody>
+ <tbody class="footer">
+ <tr>
+ <td colspan="5">
+ <div id="paging">
+ <a class="prev">← Previous Page</a> |
+ <label>Rows per page: <select id="perpage">
+ <option selected>10</option>
+ <option>25</option>
+ <option>50</option>
+ <option>100</option>
+ </select></label> |
+ <a class="next">Next Page →</a>
+ </div>
+ <span></span>
+ </td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+
+ </div>
+ </body>
+</html>
diff --git a/1.1.x/share/www/replicator.html b/1.1.x/share/www/replicator.html
new file mode 100644
index 00000000..16c0940b
--- /dev/null
+++ b/1.1.x/share/www/replicator.html
@@ -0,0 +1,184 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Replicator</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="style/layout.css?0.11.0" type="text/css">
+ <link rel="stylesheet" href="style/jquery-ui-1.8.11.custom.css" type="text/css">
+ <script src="script/json2.js"></script>
+ <script src="script/sha1.js"></script>
+ <script src="script/jquery.js?1.4.2"></script>
+ <script src="script/jquery.couch.js?0.11.0"></script>
+ <script src="script/jquery.dialog.js?0.11.0"></script>
+ <script src="script/futon.js?0.11.0"></script>
+ <script src="script/jquery-ui-1.8.11.custom.min.js"></script>
+ <script>
+ $(document).ready(function() {
+ var allDatabases;
+
+ $("fieldset input[type=radio]").click(function() {
+ var radio = this;
+ var fieldset = $(this).parents("fieldset").get(0);
+ $("input[type=text]", fieldset).each(function() {
+ this.disabled = radio.value == "local";
+ if (!this.disabled) this.focus();
+ });
+ $('.local', fieldset).each(function() {
+ this.disabled = radio.value == "remote";
+ if (!this.disabled) this.focus();
+ });
+ });
+
+ var getDatabases = function() {
+ $.couch.allDbs({
+ success: function(dbs) {
+ allDatabases = dbs.sort();
+
+ $("fieldset select").each(function() {
+ var select = this;
+ $.each(dbs, function(idx, dbName) {
+ $("<option></option>").text(dbName).appendTo(select);
+ });
+ select.selectedIndex = 0;
+ });
+
+ $('#to_name').autocomplete({ source: dbs });
+ }
+ });
+ };
+ getDatabases();
+
+ $("button#swap").click(function() {
+ var fromName = $("#source select").val();
+ $("#source select").val($("#target select").val());
+ $("#target select").val(fromName);
+
+ var fromUrl = $("#source input[type=text]").val();
+ $("#source input[type=text]").val($("#target input[type=text]").val());
+ $("#target input[type=text]").val(fromUrl);
+
+ var fromType = $("#source input[type=radio]").filter(function() {
+ return this.checked;
+ }).val();
+ var toType = $("#target input[type=radio]").filter(function() {
+ return this.checked;
+ }).val();
+ $("#source input[value=" + toType + "]").click();
+ $("#target input[value=" + fromType + "]").click();
+
+ $("#replicate").get(0).focus();
+ return false;
+ });
+
+ $("button#replicate").click(function() {
+ $("#records tbody.content").empty();
+ var targetIsLocal = $('#to_local:checked').length > 0;
+ var source = $("#from_local")[0].checked ? $("#from_name").val() : $("#from_url").val();
+ var target = targetIsLocal ? $("#to_name").val() : $("#to_url").val();
+ var repOpts = {};
+
+ if (targetIsLocal && $.inArray(target, allDatabases) < 0) {
+ if(!confirm('This will create a database named '+target+'. Ok?')) {
+ return;
+ }
+ else {
+ repOpts.create_target = true;
+ }
+ }
+
+ if ($("#continuous")[0].checked) {
+ repOpts.continuous = true;
+ }
+ $.couch.replicate(source, target, {
+ success: function(resp) {
+ if (resp._local_id) {
+ $("<tr><th></th></tr>")
+ .find("th").text(JSON.stringify(resp)).end()
+ .appendTo("#records tbody.content");
+ $("#records tbody tr").removeClass("odd").filter(":odd").addClass("odd");
+ } else {
+ $.each(resp.history, function(idx, record) {
+ $("<tr><th></th></tr>")
+ .find("th").text(JSON.stringify(record)).end()
+ .appendTo("#records tbody.content");
+ });
+ $("#records tbody tr").removeClass("odd").filter(":odd").addClass("odd");
+ $("#records tbody.footer td").text("Replication session " + resp.session_id);
+
+ if (repOpts.create_target) {
+ getDatabases();
+ }
+ }
+ }
+ }, repOpts);
+ });
+ });
+ </script>
+ </head>
+ <body><div id="wrap">
+ <h1>
+ <a href="index.html">Overview</a>
+ <strong>Replicator</strong>
+ </h1>
+ <div id="content">
+
+ <form id="replicator">
+ <fieldset id="source">
+ <legend>Replicate changes from:</legend>
+ <p>
+ <input type="radio" id="from_local" name="from_type" value="local" checked>
+ <label for="from_local">Local Database: </label>
+ <select id="from_name" name="from_name" class="local"></select>
+ </p><p>
+ <input type="radio" id="from_to_remote" name="from_type" value="remote">
+ <label for="from_to_remote">Remote database: </label>
+ <input type="text" id="from_url" name="from_url" size="30" value="http://" disabled>
+ </p>
+ </fieldset>
+ <p class="swap"><button id="swap" tabindex="99">⇄</button></p>
+ <fieldset id="target">
+ <legend>to:</legend>
+ <p>
+ <input type="radio" id="to_local" name="to_type" value="local" checked>
+ <label for="to_local">Local database: </label>
+ <input type="text" id="to_name" name="to_name" class="local"></select>
+ </p><p>
+ <input type="radio" id="to_remote" name="to_type" value="remote">
+ <label for="to_remote">Remote database: </label>
+ <input type="text" id="to_url" name="to_url" size="30" value="http://" disabled>
+ </p>
+ </fieldset>
+ <p class="actions">
+ <label><input type="checkbox" name="continuous" value="continuous" id="continuous"> Continuous</label>
+ <button id="replicate" type="button">Replicate</button>
+ </p>
+ </form>
+
+ <table id="records" class="listing" cellspacing="0">
+ <caption>Replication History</caption>
+ <thead><tr>
+ <th>Event</th>
+ </tr></thead>
+ <tbody class="content"></tbody>
+ <tbody class="footer"><tr>
+ <td colspan="4">No replication</td>
+ </tr></tbody>
+ </table>
+
+ </div>
+ </div></body>
+</html>
diff --git a/1.1.x/share/www/script/base64.js b/1.1.x/share/www/script/base64.js
new file mode 100644
index 00000000..e0aab303
--- /dev/null
+++ b/1.1.x/share/www/script/base64.js
@@ -0,0 +1,124 @@
+/* Copyright (C) 1999 Masanao Izumo <iz@onicos.co.jp>
+ * Version: 1.0
+ * LastModified: Dec 25 1999
+ * This library is free. You can redistribute it and/or modify it.
+ */
+ /* Modified by Chris Anderson to not use CommonJS */
+ /* Modified by Dan Webb not to require Narwhal's binary library */
+
+var Base64 = {};
+(function(exports) {
+
+ var encodeChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ var decodeChars = [
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1,
+ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1,
+ -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1
+ ];
+
+ exports.encode = function (str) {
+ var out, i, length;
+ var c1, c2, c3;
+
+ length = len(str);
+ i = 0;
+ out = [];
+ while(i < length) {
+ c1 = str.charCodeAt(i++) & 0xff;
+ if(i == length)
+ {
+ out.push(encodeChars.charCodeAt(c1 >> 2));
+ out.push(encodeChars.charCodeAt((c1 & 0x3) << 4));
+ out.push("=".charCodeAt(0));
+ out.push("=".charCodeAt(0));
+ break;
+ }
+ c2 = str.charCodeAt(i++);
+ if(i == length)
+ {
+ out.push(encodeChars.charCodeAt(c1 >> 2));
+ out.push(encodeChars.charCodeAt(((c1 & 0x3)<< 4) | ((c2 & 0xF0) >> 4)));
+ out.push(encodeChars.charCodeAt((c2 & 0xF) << 2));
+ out.push("=".charCodeAt(0));
+ break;
+ }
+ c3 = str.charCodeAt(i++);
+ out.push(encodeChars.charCodeAt(c1 >> 2));
+ out.push(encodeChars.charCodeAt(((c1 & 0x3)<< 4) | ((c2 & 0xF0) >> 4)));
+ out.push(encodeChars.charCodeAt(((c2 & 0xF) << 2) | ((c3 & 0xC0) >>6)));
+ out.push(encodeChars.charCodeAt(c3 & 0x3F));
+ }
+
+ var str = "";
+ out.forEach(function(chr) { str += String.fromCharCode(chr) });
+ return str;
+ };
+
+ exports.decode = function (str) {
+ var c1, c2, c3, c4;
+ var i, length, out;
+
+ length = len(str);
+ i = 0;
+ out = [];
+ while(i < length) {
+ /* c1 */
+ do {
+ c1 = decodeChars[str.charCodeAt(i++) & 0xff];
+ } while(i < length && c1 == -1);
+ if(c1 == -1)
+ break;
+
+ /* c2 */
+ do {
+ c2 = decodeChars[str.charCodeAt(i++) & 0xff];
+ } while(i < length && c2 == -1);
+ if(c2 == -1)
+ break;
+
+ out.push(String.fromCharCode((c1 << 2) | ((c2 & 0x30) >> 4)));
+
+ /* c3 */
+ do {
+ c3 = str.charCodeAt(i++) & 0xff;
+ if(c3 == 61)
+ return out.join('');
+ c3 = decodeChars[c3];
+ } while(i < length && c3 == -1);
+ if(c3 == -1)
+ break;
+
+ out.push(String.fromCharCode(((c2 & 0xF) << 4) | ((c3 & 0x3C) >> 2)));
+
+ /* c4 */
+ do {
+ c4 = str.charCodeAt(i++) & 0xff;
+ if(c4 == 61)
+ return out.join('');
+ c4 = decodeChars[c4];
+ } while(i < length && c4 == -1);
+
+ if(c4 == -1)
+ break;
+
+ out.push(String.fromCharCode(((c3 & 0x03) << 6) | c4));
+ }
+
+ return out.join('');
+ };
+
+ var len = function (object) {
+ if (object.length !== undefined) {
+ return object.length;
+ } else if (object.getLength !== undefined) {
+ return object.getLength();
+ } else {
+ return undefined;
+ }
+ };
+})(Base64);
diff --git a/1.1.x/share/www/script/couch.js b/1.1.x/share/www/script/couch.js
new file mode 100644
index 00000000..bcc19652
--- /dev/null
+++ b/1.1.x/share/www/script/couch.js
@@ -0,0 +1,473 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// A simple class to represent a database. Uses XMLHttpRequest to interface with
+// the CouchDB server.
+
+function CouchDB(name, httpHeaders) {
+ this.name = name;
+ this.uri = "/" + encodeURIComponent(name) + "/";
+
+ // The XMLHttpRequest object from the most recent request. Callers can
+ // use this to check result http status and headers.
+ this.last_req = null;
+
+ this.request = function(method, uri, requestOptions) {
+ requestOptions = requestOptions || {};
+ requestOptions.headers = combine(requestOptions.headers, httpHeaders);
+ return CouchDB.request(method, uri, requestOptions);
+ };
+
+ // Creates the database on the server
+ this.createDb = function() {
+ this.last_req = this.request("PUT", this.uri);
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ // Deletes the database on the server
+ this.deleteDb = function() {
+ this.last_req = this.request("DELETE", this.uri);
+ if (this.last_req.status == 404) {
+ return false;
+ }
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ // Save a document to the database
+ this.save = function(doc, options) {
+ if (doc._id == undefined) {
+ doc._id = CouchDB.newUuids(1)[0];
+ }
+
+ this.last_req = this.request("PUT", this.uri +
+ encodeURIComponent(doc._id) + encodeOptions(options),
+ {body: JSON.stringify(doc)});
+ CouchDB.maybeThrowError(this.last_req);
+ var result = JSON.parse(this.last_req.responseText);
+ doc._rev = result.rev;
+ return result;
+ };
+
+ // Open a document from the database
+ this.open = function(docId, options) {
+ this.last_req = this.request("GET", this.uri + encodeURIComponent(docId)
+ + encodeOptions(options));
+ if (this.last_req.status == 404) {
+ return null;
+ }
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ // Deletes a document from the database
+ this.deleteDoc = function(doc) {
+ this.last_req = this.request("DELETE", this.uri + encodeURIComponent(doc._id)
+ + "?rev=" + doc._rev);
+ CouchDB.maybeThrowError(this.last_req);
+ var result = JSON.parse(this.last_req.responseText);
+ doc._rev = result.rev; //record rev in input document
+ doc._deleted = true;
+ return result;
+ };
+
+ // Deletes an attachment from a document
+ this.deleteDocAttachment = function(doc, attachment_name) {
+ this.last_req = this.request("DELETE", this.uri + encodeURIComponent(doc._id)
+ + "/" + attachment_name + "?rev=" + doc._rev);
+ CouchDB.maybeThrowError(this.last_req);
+ var result = JSON.parse(this.last_req.responseText);
+ doc._rev = result.rev; //record rev in input document
+ return result;
+ };
+
+ this.bulkSave = function(docs, options) {
+ // first prepoulate the UUIDs for new documents
+ var newCount = 0;
+ for (var i=0; i<docs.length; i++) {
+ if (docs[i]._id == undefined) {
+ newCount++;
+ }
+ }
+ var newUuids = CouchDB.newUuids(docs.length);
+ var newCount = 0;
+ for (var i=0; i<docs.length; i++) {
+ if (docs[i]._id == undefined) {
+ docs[i]._id = newUuids.pop();
+ }
+ }
+ var json = {"docs": docs};
+ // put any options in the json
+ for (var option in options) {
+ json[option] = options[option];
+ }
+ this.last_req = this.request("POST", this.uri + "_bulk_docs", {
+ body: JSON.stringify(json)
+ });
+ if (this.last_req.status == 417) {
+ return {errors: JSON.parse(this.last_req.responseText)};
+ }
+ else {
+ CouchDB.maybeThrowError(this.last_req);
+ var results = JSON.parse(this.last_req.responseText);
+ for (var i = 0; i < docs.length; i++) {
+ if(results[i] && results[i].rev) {
+ docs[i]._rev = results[i].rev;
+ }
+ }
+ return results;
+ }
+ };
+
+ this.ensureFullCommit = function() {
+ this.last_req = this.request("POST", this.uri + "_ensure_full_commit");
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ // Applies the map function to the contents of database and returns the results.
+ this.query = function(mapFun, reduceFun, options, keys, language) {
+ var body = {language: language || "javascript"};
+ if(keys) {
+ body.keys = keys ;
+ }
+ if (typeof(mapFun) != "string") {
+ mapFun = mapFun.toSource ? mapFun.toSource() : "(" + mapFun.toString() + ")";
+ }
+ body.map = mapFun;
+ if (reduceFun != null) {
+ if (typeof(reduceFun) != "string") {
+ reduceFun = reduceFun.toSource ?
+ reduceFun.toSource() : "(" + reduceFun.toString() + ")";
+ }
+ body.reduce = reduceFun;
+ }
+ if (options && options.options != undefined) {
+ body.options = options.options;
+ delete options.options;
+ }
+ this.last_req = this.request("POST", this.uri + "_temp_view"
+ + encodeOptions(options), {
+ headers: {"Content-Type": "application/json"},
+ body: JSON.stringify(body)
+ });
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ this.view = function(viewname, options, keys) {
+ var viewParts = viewname.split('/');
+ var viewPath = this.uri + "_design/" + viewParts[0] + "/_view/"
+ + viewParts[1] + encodeOptions(options);
+ if(!keys) {
+ this.last_req = this.request("GET", viewPath);
+ } else {
+ this.last_req = this.request("POST", viewPath, {
+ headers: {"Content-Type": "application/json"},
+ body: JSON.stringify({keys:keys})
+ });
+ }
+ if (this.last_req.status == 404) {
+ return null;
+ }
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ // gets information about the database
+ this.info = function() {
+ this.last_req = this.request("GET", this.uri);
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ // gets information about a design doc
+ this.designInfo = function(docid) {
+ this.last_req = this.request("GET", this.uri + docid + "/_info");
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ this.allDocs = function(options,keys) {
+ if(!keys) {
+ this.last_req = this.request("GET", this.uri + "_all_docs"
+ + encodeOptions(options));
+ } else {
+ this.last_req = this.request("POST", this.uri + "_all_docs"
+ + encodeOptions(options), {
+ headers: {"Content-Type": "application/json"},
+ body: JSON.stringify({keys:keys})
+ });
+ }
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ this.designDocs = function() {
+ return this.allDocs({startkey:"_design", endkey:"_design0"});
+ };
+
+ this.changes = function(options) {
+ this.last_req = this.request("GET", this.uri + "_changes"
+ + encodeOptions(options));
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ this.compact = function() {
+ this.last_req = this.request("POST", this.uri + "_compact");
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ this.viewCleanup = function() {
+ this.last_req = this.request("POST", this.uri + "_view_cleanup");
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ this.setDbProperty = function(propId, propValue) {
+ this.last_req = this.request("PUT", this.uri + propId,{
+ body:JSON.stringify(propValue)
+ });
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ this.getDbProperty = function(propId) {
+ this.last_req = this.request("GET", this.uri + propId);
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ this.setSecObj = function(secObj) {
+ this.last_req = this.request("PUT", this.uri + "_security",{
+ body:JSON.stringify(secObj)
+ });
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ this.getSecObj = function() {
+ this.last_req = this.request("GET", this.uri + "_security");
+ CouchDB.maybeThrowError(this.last_req);
+ return JSON.parse(this.last_req.responseText);
+ };
+
+ // Convert a options object to an url query string.
+ // ex: {key:'value',key2:'value2'} becomes '?key="value"&key2="value2"'
+ function encodeOptions(options) {
+ var buf = [];
+ if (typeof(options) == "object" && options !== null) {
+ for (var name in options) {
+ if (!options.hasOwnProperty(name)) { continue; };
+ var value = options[name];
+ if (name == "key" || name == "keys" || name == "startkey" || name == "endkey") {
+ value = toJSON(value);
+ }
+ buf.push(encodeURIComponent(name) + "=" + encodeURIComponent(value));
+ }
+ }
+ if (!buf.length) {
+ return "";
+ }
+ return "?" + buf.join("&");
+ }
+
+ function toJSON(obj) {
+ return obj !== null ? JSON.stringify(obj) : null;
+ }
+
+ function combine(object1, object2) {
+ if (!object2) {
+ return object1;
+ }
+ if (!object1) {
+ return object2;
+ }
+
+ for (var name in object2) {
+ object1[name] = object2[name];
+ }
+ return object1;
+ }
+
+}
+
+// this is the XMLHttpRequest object from last request made by the following
+// CouchDB.* functions (except for calls to request itself).
+// Use this from callers to check HTTP status or header values of requests.
+CouchDB.last_req = null;
+CouchDB.urlPrefix = '';
+
+CouchDB.login = function(name, password) {
+ CouchDB.last_req = CouchDB.request("POST", "/_session", {
+ headers: {"Content-Type": "application/x-www-form-urlencoded",
+ "X-CouchDB-WWW-Authenticate": "Cookie"},
+ body: "name=" + encodeURIComponent(name) + "&password="
+ + encodeURIComponent(password)
+ });
+ return JSON.parse(CouchDB.last_req.responseText);
+}
+
+CouchDB.logout = function() {
+ CouchDB.last_req = CouchDB.request("DELETE", "/_session", {
+ headers: {"Content-Type": "application/x-www-form-urlencoded",
+ "X-CouchDB-WWW-Authenticate": "Cookie"}
+ });
+ return JSON.parse(CouchDB.last_req.responseText);
+};
+
+CouchDB.session = function(options) {
+ options = options || {};
+ CouchDB.last_req = CouchDB.request("GET", "/_session", options);
+ CouchDB.maybeThrowError(CouchDB.last_req);
+ return JSON.parse(CouchDB.last_req.responseText);
+};
+
+CouchDB.user_prefix = "org.couchdb.user:";
+
+CouchDB.prepareUserDoc = function(user_doc, new_password) {
+ user_doc._id = user_doc._id || CouchDB.user_prefix + user_doc.name;
+ if (new_password) {
+ // handle the password crypto
+ user_doc.salt = CouchDB.newUuids(1)[0];
+ user_doc.password_sha = hex_sha1(new_password + user_doc.salt);
+ }
+ user_doc.type = "user";
+ if (!user_doc.roles) {
+ user_doc.roles = [];
+ }
+ return user_doc;
+};
+
+CouchDB.allDbs = function() {
+ CouchDB.last_req = CouchDB.request("GET", "/_all_dbs");
+ CouchDB.maybeThrowError(CouchDB.last_req);
+ return JSON.parse(CouchDB.last_req.responseText);
+};
+
+CouchDB.allDesignDocs = function() {
+ var ddocs = {}, dbs = CouchDB.allDbs();
+ for (var i=0; i < dbs.length; i++) {
+ var db = new CouchDB(dbs[i]);
+ ddocs[dbs[i]] = db.designDocs();
+ };
+ return ddocs;
+};
+
+CouchDB.getVersion = function() {
+ CouchDB.last_req = CouchDB.request("GET", "/");
+ CouchDB.maybeThrowError(CouchDB.last_req);
+ return JSON.parse(CouchDB.last_req.responseText).version;
+};
+
+CouchDB.replicate = function(source, target, rep_options) {
+ rep_options = rep_options || {};
+ var headers = rep_options.headers || {};
+ var body = rep_options.body || {};
+ body.source = source;
+ body.target = target;
+ CouchDB.last_req = CouchDB.request("POST", "/_replicate", {
+ headers: headers,
+ body: JSON.stringify(body)
+ });
+ CouchDB.maybeThrowError(CouchDB.last_req);
+ return JSON.parse(CouchDB.last_req.responseText);
+};
+
+CouchDB.newXhr = function() {
+ if (typeof(XMLHttpRequest) != "undefined") {
+ return new XMLHttpRequest();
+ } else if (typeof(ActiveXObject) != "undefined") {
+ return new ActiveXObject("Microsoft.XMLHTTP");
+ } else {
+ throw new Error("No XMLHTTPRequest support detected");
+ }
+};
+
+CouchDB.request = function(method, uri, options) {
+ options = typeof(options) == 'object' ? options : {};
+ options.headers = typeof(options.headers) == 'object' ? options.headers : {};
+ options.headers["Content-Type"] = options.headers["Content-Type"] || options.headers["content-type"] || "application/json";
+ options.headers["Accept"] = options.headers["Accept"] || options.headers["accept"] || "application/json";
+ var req = CouchDB.newXhr();
+ if(uri.substr(0, CouchDB.protocol.length) != CouchDB.protocol) {
+ uri = CouchDB.urlPrefix + uri;
+ }
+ req.open(method, uri, false);
+ if (options.headers) {
+ var headers = options.headers;
+ for (var headerName in headers) {
+ if (!headers.hasOwnProperty(headerName)) { continue; }
+ req.setRequestHeader(headerName, headers[headerName]);
+ }
+ }
+ req.send(options.body || "");
+ return req;
+};
+
+CouchDB.requestStats = function(module, key, test) {
+ var query_arg = "";
+ if(test !== null) {
+ query_arg = "?flush=true";
+ }
+
+ var url = "/_stats/" + module + "/" + key + query_arg;
+ var stat = CouchDB.request("GET", url).responseText;
+ return JSON.parse(stat)[module][key];
+};
+
+CouchDB.uuids_cache = [];
+
+CouchDB.newUuids = function(n, buf) {
+ buf = buf || 100;
+ if (CouchDB.uuids_cache.length >= n) {
+ var uuids = CouchDB.uuids_cache.slice(CouchDB.uuids_cache.length - n);
+ if(CouchDB.uuids_cache.length - n == 0) {
+ CouchDB.uuids_cache = [];
+ } else {
+ CouchDB.uuids_cache =
+ CouchDB.uuids_cache.slice(0, CouchDB.uuids_cache.length - n);
+ }
+ return uuids;
+ } else {
+ CouchDB.last_req = CouchDB.request("GET", "/_uuids?count=" + (buf + n));
+ CouchDB.maybeThrowError(CouchDB.last_req);
+ var result = JSON.parse(CouchDB.last_req.responseText);
+ CouchDB.uuids_cache =
+ CouchDB.uuids_cache.concat(result.uuids.slice(0, buf));
+ return result.uuids.slice(buf);
+ }
+};
+
+CouchDB.maybeThrowError = function(req) {
+ if (req.status >= 400) {
+ try {
+ var result = JSON.parse(req.responseText);
+ } catch (ParseError) {
+ var result = {error:"unknown", reason:req.responseText};
+ }
+ throw result;
+ }
+}
+
+CouchDB.params = function(options) {
+ options = options || {};
+ var returnArray = [];
+ for(var key in options) {
+ var value = options[key];
+ returnArray.push(key + "=" + value);
+ }
+ return returnArray.join("&");
+};
diff --git a/1.1.x/share/www/script/couch_test_runner.js b/1.1.x/share/www/script/couch_test_runner.js
new file mode 100644
index 00000000..55a6533f
--- /dev/null
+++ b/1.1.x/share/www/script/couch_test_runner.js
@@ -0,0 +1,437 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// *********************** Test Framework of Sorts ************************* //
+
+
+function loadScript(url) {
+ // disallow loading remote URLs
+ if((url.substr(0, 7) == "http://")
+ || (url.substr(0, 2) == "//")
+ || (url.substr(0, 5) == "data:")
+ || (url.substr(0, 11) == "javascript:")) {
+ throw "Not loading remote test scripts";
+ }
+ if (typeof document != "undefined") document.write('<script src="'+url+'"></script>');
+};
+
+function patchTest(fun) {
+ var source = fun.toString();
+ var output = "";
+ var i = 0;
+ var testMarker = "T(";
+ while (i < source.length) {
+ var testStart = source.indexOf(testMarker, i);
+ if (testStart == -1) {
+ output = output + source.substring(i, source.length);
+ break;
+ }
+ var testEnd = source.indexOf(");", testStart);
+ var testCode = source.substring(testStart + testMarker.length, testEnd);
+ output += source.substring(i, testStart) + "T(" + testCode + "," + JSON.stringify(testCode);
+ i = testEnd;
+ }
+ try {
+ return eval("(" + output + ")");
+ } catch (e) {
+ return null;
+ }
+}
+
+function runAllTests() {
+ var rows = $("#tests tbody.content tr");
+ $("td", rows).text("");
+ $("td.status", rows).removeClass("error").removeClass("failure").removeClass("success").text("not run");
+ var offset = 0;
+ function runNext() {
+ if (offset < rows.length) {
+ var row = rows.get(offset);
+ runTest($("th button", row).get(0), function() {
+ offset += 1;
+ setTimeout(runNext, 100);
+ }, false, true);
+ } else {
+ saveTestReport();
+ }
+ }
+ runNext();
+}
+
+var numFailures = 0;
+var currentRow = null;
+
+function runTest(button, callback, debug, noSave) {
+
+ // offer to save admins
+ if (currentRow != null) {
+ alert("Can not run multiple tests simultaneously.");
+ return;
+ }
+ var row = currentRow = $(button).parents("tr").get(0);
+ $("td.status", row).removeClass("error").removeClass("failure").removeClass("success");
+ $("td", row).text("");
+ $("#toolbar li.current").text("Running: "+row.id);
+ var testFun = couchTests[row.id];
+ function run() {
+ numFailures = 0;
+ var start = new Date().getTime();
+ try {
+ if (debug == undefined || !debug) {
+ testFun = patchTest(testFun) || testFun;
+ }
+ testFun(debug);
+ var status = numFailures > 0 ? "failure" : "success";
+ } catch (e) {
+ var status = "error";
+ if ($("td.details ol", row).length == 0) {
+ $("<ol></ol>").appendTo($("td.details", row));
+ }
+ $("<li><b>Exception raised:</b> <code class='error'></code></li>")
+ .find("code").text(JSON.stringify(e)).end()
+ .appendTo($("td.details ol", row));
+ if (debug) {
+ currentRow = null;
+ throw e;
+ }
+ }
+ if ($("td.details ol", row).length) {
+ $("<a href='#'>Run with debugger</a>").click(function() {
+ runTest(this, undefined, true);
+ }).prependTo($("td.details ol", row));
+ }
+ var duration = new Date().getTime() - start;
+ $("td.status", row).removeClass("running").addClass(status).text(status);
+ $("td.duration", row).text(duration + "ms");
+ $("#toolbar li.current").text("Finished: "+row.id);
+ updateTestsFooter();
+ currentRow = null;
+ if (callback) callback();
+ if (!noSave) saveTestReport();
+ }
+ $("td.status", row).addClass("running").text("running…");
+ setTimeout(run, 100);
+}
+
+function showSource(cell) {
+ var name = $(cell).text();
+ var win = window.open("", name, "width=700,height=500,resizable=yes,scrollbars=yes");
+ win.document.location = "script/test/" + name + ".js";
+}
+
+var readyToRun;
+function setupAdminParty(fun) {
+ if (readyToRun) {
+ fun();
+ } else {
+ function removeAdmins(confs, doneFun) {
+ // iterate through the config and remove current user last
+ // current user is at front of list
+ var remove = confs.pop();
+ if (remove) {
+ $.couch.config({
+ success : function() {
+ removeAdmins(confs, doneFun);
+ }
+ }, "admins", remove[0], null);
+ } else {
+ doneFun();
+ }
+ };
+ $.couch.session({
+ success : function(resp) {
+ var userCtx = resp.userCtx;
+ if (userCtx.name && userCtx.roles.indexOf("_admin") != -1) {
+ // admin but not admin party. dialog offering to make admin party
+ $.showDialog("dialog/_admin_party.html", {
+ submit: function(data, callback) {
+ $.couch.config({
+ success : function(conf) {
+ var meAdmin, adminConfs = [];
+ for (var name in conf) {
+ if (name == userCtx.name) {
+ meAdmin = [name, conf[name]];
+ } else {
+ adminConfs.push([name, conf[name]]);
+ }
+ }
+ adminConfs.unshift(meAdmin);
+ removeAdmins(adminConfs, function() {
+ callback();
+ $.futon.session.sidebar();
+ readyToRun = true;
+ setTimeout(fun, 500);
+ });
+ }
+ }, "admins");
+ }
+ });
+ } else if (userCtx.roles.indexOf("_admin") != -1) {
+ // admin party!
+ readyToRun = true;
+ fun();
+ } else {
+ // not an admin
+ alert("Error: You need to be an admin to run the tests.");
+ };
+ }
+ });
+ }
+};
+
+function updateTestsListing() {
+ for (var name in couchTests) {
+ var testFunction = couchTests[name];
+ var row = $("<tr><th></th><td></td><td></td><td></td></tr>")
+ .find("th").text(name).attr("title", "Show source").click(function() {
+ showSource(this);
+ }).end()
+ .find("td:nth(0)").addClass("status").text("not run").end()
+ .find("td:nth(1)").addClass("duration").end()
+ .find("td:nth(2)").addClass("details").end();
+ $("<button type='button' class='run' title='Run test'></button>").click(function() {
+ this.blur();
+ var self = this;
+ // check for admin party
+ setupAdminParty(function() {
+ runTest(self);
+ });
+ return false;
+ }).prependTo(row.find("th"));
+ row.attr("id", name).appendTo("#tests tbody.content");
+ }
+ $("#tests tr").removeClass("odd").filter(":odd").addClass("odd");
+ updateTestsFooter();
+}
+
+function updateTestsFooter() {
+ var tests = $("#tests tbody.content tr td.status");
+ var testsRun = tests.filter(".success, .error, .failure");
+ var testsFailed = testsRun.not(".success");
+ var totalDuration = 0;
+ $("#tests tbody.content tr td.duration:contains('ms')").each(function() {
+ var text = $(this).text();
+ totalDuration += parseInt(text.substr(0, text.length - 2), 10);
+ });
+ $("#tests tbody.footer td").html("<span>"+testsRun.length + " of " + tests.length +
+ " test(s) run, " + testsFailed.length + " failures (" +
+ totalDuration + " ms)</span> ");
+}
+
+// make report and save to local db
+// display how many reports need replicating to the mothership
+// have button to replicate them
+
+function saveTestReport(report) {
+ var report = makeTestReport();
+ if (report) {
+ var db = $.couch.db("test_suite_reports");
+ var saveReport = function(db_info) {
+ report.db = db_info;
+ $.couch.info({success : function(node_info) {
+ report.node = node_info;
+ db.saveDoc(report);
+ }});
+ };
+ var createDb = function() {
+ db.create({success: function() {
+ db.info({success:saveReport});
+ }});
+ };
+ db.info({error: createDb, success:saveReport});
+ }
+};
+
+function makeTestReport() {
+ var report = {};
+ report.summary = $("#tests tbody.footer td").text();
+ report.platform = testPlatform();
+ var date = new Date();
+ report.timestamp = date.getTime();
+ report.timezone = date.getTimezoneOffset();
+ report.tests = [];
+ $("#tests tbody.content tr").each(function() {
+ var status = $("td.status", this).text();
+ if (status != "not run") {
+ var test = {};
+ test.name = this.id;
+ test.status = status;
+ test.duration = parseInt($("td.duration", this).text());
+ test.details = [];
+ $("td.details li", this).each(function() {
+ test.details.push($(this).text());
+ });
+ if (test.details.length == 0) {
+ delete test.details;
+ }
+ report.tests.push(test);
+ }
+ });
+ if (report.tests.length > 0) return report;
+};
+
+function testPlatform() {
+ var b = $.browser;
+ var bs = ["mozilla", "msie", "opera", "safari"];
+ for (var i=0; i < bs.length; i++) {
+ if (b[bs[i]]) {
+ return {"browser" : bs[i], "version" : b.version};
+ }
+ };
+ return {"browser" : "undetected"};
+}
+
+
+function reportTests() {
+ // replicate the database to couchdb.couchdb.org
+}
+
+// Use T to perform a test that returns false on failure and if the test fails,
+// display the line that failed.
+// Example:
+// T(MyValue==1);
+function T(arg1, arg2, testName) {
+ if (!arg1) {
+ if (currentRow) {
+ if ($("td.details ol", currentRow).length == 0) {
+ $("<ol></ol>").appendTo($("td.details", currentRow));
+ }
+ var message = (arg2 != null ? arg2 : arg1).toString();
+ $("<li><b>Assertion " + (testName ? "'" + testName + "'" : "") + " failed:</b> <code class='failure'></code></li>")
+ .find("code").text(message).end()
+ .appendTo($("td.details ol", currentRow));
+ }
+ numFailures += 1;
+ }
+}
+
+function TEquals(expected, actual, testName) {
+ T(equals(expected, actual), "expected '" + repr(expected) +
+ "', got '" + repr(actual) + "'", testName);
+}
+
+function TEqualsIgnoreCase(expected, actual, testName) {
+ T(equals(expected.toUpperCase(), actual.toUpperCase()), "expected '" + repr(expected) +
+ "', got '" + repr(actual) + "'", testName);
+}
+
+function equals(a,b) {
+ if (a === b) return true;
+ try {
+ return repr(a) === repr(b);
+ } catch (e) {
+ return false;
+ }
+}
+
+function repr(val) {
+ if (val === undefined) {
+ return null;
+ } else if (val === null) {
+ return "null";
+ } else {
+ return JSON.stringify(val);
+ }
+}
+
+function makeDocs(start, end, templateDoc) {
+ var templateDocSrc = templateDoc ? JSON.stringify(templateDoc) : "{}";
+ if (end === undefined) {
+ end = start;
+ start = 0;
+ }
+ var docs = [];
+ for (var i = start; i < end; i++) {
+ var newDoc = eval("(" + templateDocSrc + ")");
+ newDoc._id = (i).toString();
+ newDoc.integer = i;
+ newDoc.string = (i).toString();
+ docs.push(newDoc);
+ }
+ return docs;
+}
+
+function run_on_modified_server(settings, fun) {
+ try {
+ // set the settings
+ for(var i=0; i < settings.length; i++) {
+ var s = settings[i];
+ var xhr = CouchDB.request("PUT", "/_config/" + s.section + "/" + s.key, {
+ body: JSON.stringify(s.value),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ CouchDB.maybeThrowError(xhr);
+ s.oldValue = xhr.responseText;
+ }
+ // run the thing
+ fun();
+ } finally {
+ // unset the settings
+ for(var j=0; j < i; j++) {
+ var s = settings[j];
+ if(s.oldValue == "\"\"\n") { // unset value
+ CouchDB.request("DELETE", "/_config/" + s.section + "/" + s.key, {
+ headers: {"X-Couch-Persist": "false"}
+ });
+ } else {
+ CouchDB.request("PUT", "/_config/" + s.section + "/" + s.key, {
+ body: s.oldValue,
+ headers: {"X-Couch-Persist": "false"}
+ });
+ }
+ }
+ }
+}
+
+function stringFun(fun) {
+ var string = fun.toSource ? fun.toSource() : "(" + fun.toString() + ")";
+ return string;
+}
+
+function waitForSuccess(fun, tag) {
+ var start = new Date();
+ while(true) {
+ if (new Date() - start > 5000) {
+ throw("timeout: "+tag);
+ } else {
+ try {
+ fun();
+ break;
+ } catch (e) {}
+ // sync http req allow async req to happen
+ CouchDB.request("GET", "/test_suite_db/?tag="+encodeURIComponent(tag));
+ }
+ }
+}
+
+function waitForRestart() {
+ var waiting = true;
+ while (waiting) {
+ try {
+ CouchDB.request("GET", "/");
+ CouchDB.request("GET", "/");
+ waiting = false;
+ } catch(e) {
+ // the request will fail until restart completes
+ }
+ }
+};
+
+function restartServer() {
+ var xhr;
+ try {
+ CouchDB.request("POST", "/_restart");
+ } catch(e) {
+ // this request may sometimes fail
+ }
+ waitForRestart();
+}
+
diff --git a/1.1.x/share/www/script/couch_tests.js b/1.1.x/share/www/script/couch_tests.js
new file mode 100644
index 00000000..eb573526
--- /dev/null
+++ b/1.1.x/share/www/script/couch_tests.js
@@ -0,0 +1,105 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Used by replication test
+if (typeof window == 'undefined' || !window) {
+ CouchDB.host = "127.0.0.1:5984";
+ CouchDB.protocol = "http://";
+ CouchDB.inBrowser = false;
+} else {
+ CouchDB.host = window.location.host;
+ CouchDB.inBrowser = true;
+ CouchDB.protocol = window.location.protocol + "//";
+}
+
+CouchDB.urlPrefix = "..";
+var couchTests = {};
+
+function loadTest(file) {
+ loadScript("script/test/"+file);
+};
+// keep first
+loadTest("basics.js");
+
+// keep sorted
+loadTest("all_docs.js");
+loadTest("attachments.js");
+loadTest("attachments_multipart.js");
+loadTest("attachment_conflicts.js");
+loadTest("attachment_names.js");
+loadTest("attachment_paths.js");
+loadTest("attachment_ranges.js");
+loadTest("attachment_views.js");
+loadTest("auth_cache.js");
+loadTest("batch_save.js");
+loadTest("bulk_docs.js");
+loadTest("changes.js");
+loadTest("compact.js");
+loadTest("config.js");
+loadTest("conflicts.js");
+loadTest("content_negotiation.js");
+loadTest("cookie_auth.js");
+loadTest("copy_doc.js");
+loadTest("delayed_commits.js");
+loadTest("design_docs.js");
+loadTest("design_options.js");
+loadTest("design_paths.js");
+loadTest("erlang_views.js");
+loadTest("etags_head.js");
+loadTest("etags_views.js");
+loadTest("form_submit.js");
+loadTest("http.js");
+loadTest("invalid_docids.js");
+loadTest("jsonp.js");
+loadTest("large_docs.js");
+loadTest("list_views.js");
+loadTest("lots_of_docs.js");
+loadTest("method_override.js");
+loadTest("multiple_rows.js");
+loadScript("script/oauth.js");
+loadScript("script/sha1.js");
+loadTest("oauth.js");
+loadTest("proxyauth.js");
+loadTest("purge.js");
+loadTest("reader_acl.js");
+loadTest("recreate_doc.js");
+loadTest("reduce.js");
+loadTest("reduce_builtin.js");
+loadTest("reduce_false.js");
+loadTest("reduce_false_temp.js");
+loadTest("replication.js");
+loadTest("replicator_db.js");
+loadTest("rev_stemming.js");
+loadTest("rewrite.js");
+loadTest("security_validation.js");
+loadTest("show_documents.js");
+loadTest("stats.js");
+loadTest("update_documents.js");
+loadTest("users_db.js");
+loadTest("utf8.js");
+loadTest("uuids.js");
+loadTest("view_collation.js");
+loadTest("view_collation_raw.js");
+loadTest("view_conflicts.js");
+loadTest("view_compaction.js");
+loadTest("view_errors.js");
+loadTest("view_include_docs.js");
+loadTest("view_multi_key_all_docs.js");
+loadTest("view_multi_key_design.js");
+loadTest("view_multi_key_temp.js");
+loadTest("view_offsets.js");
+loadTest("view_pagination.js");
+loadTest("view_sandboxing.js");
+loadTest("view_update_seq.js");
+loadTest("view_xml.js");
+// keep sorted
+
diff --git a/1.1.x/share/www/script/futon.browse.js b/1.1.x/share/www/script/futon.browse.js
new file mode 100644
index 00000000..56435ae4
--- /dev/null
+++ b/1.1.x/share/www/script/futon.browse.js
@@ -0,0 +1,1290 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+(function($) {
+ $.futon = $.futon || {};
+ $.extend($.futon, {
+
+ // Page class for browse/index.html
+ CouchIndexPage: function() {
+ page = this;
+
+ $.futon.storage.declare("per_page", {defaultValue: 10});
+
+ this.addDatabase = function() {
+ $.showDialog("dialog/_create_database.html", {
+ submit: function(data, callback) {
+ if (!data.name || data.name.length == 0) {
+ callback({name: "Please enter a name."});
+ return;
+ }
+ $.couch.db(data.name).create({
+ error: function(status, id, reason) { callback({name: reason}) },
+ success: function(resp) {
+ location.href = "database.html?" + encodeURIComponent(data.name);
+ callback();
+ }
+ });
+ }
+ });
+ return false;
+ }
+
+ this.updateDatabaseListing = function(offset) {
+ offset |= 0;
+ var maxPerPage = parseInt($("#perpage").val(), 10);
+
+ $.couch.allDbs({
+ success: function(dbs) {
+ $("#paging a").unbind();
+ $("#databases tbody.content").empty();
+
+ var dbsOnPage = dbs.slice(offset, offset + maxPerPage);
+
+ $.each(dbsOnPage, function(idx, dbName) {
+ $("#databases tbody.content").append("<tr>" +
+ "<th><a href='database.html?" + encodeURIComponent(dbName) + "'>" +
+ dbName + "</a></th>" +
+ "<td class='size'></td><td class='count'></td>" +
+ "<td class='seq'></td></tr>");
+ $.couch.db(dbName).info({
+ success: function(info) {
+ $("#databases tbody.content tr:eq(" + idx + ")")
+ .find("td.size").text($.futon.formatSize(info.disk_size)).end()
+ .find("td.count").text(info.doc_count).end()
+ .find("td.seq").text(info.update_seq);
+ },
+ error : function() {}
+ });
+ });
+ $("#databases tbody tr:odd").addClass("odd");
+
+ if (offset > 0) {
+ $("#paging a.prev").attr("href", "#" + (offset - maxPerPage)).click(function() {
+ page.updateDatabaseListing(offset - maxPerPage);
+ });
+ } else {
+ $("#paging a.prev").removeAttr("href");
+ }
+ if (offset + maxPerPage < dbs.length) {
+ $("#paging a.next").attr("href", "#" + (offset + maxPerPage)).click(function() {
+ page.updateDatabaseListing(offset + maxPerPage);
+ });
+ } else {
+ $("#paging a.next").removeAttr("href");
+ }
+
+ var firstNum = offset + 1;
+ var lastNum = firstNum + dbsOnPage.length - 1;
+ $("#databases tbody.footer tr td span").text(
+ "Showing " + firstNum + "-" + lastNum + " of " + dbs.length +
+ " databases");
+ }
+ });
+ }
+
+ },
+
+ // Page class for browse/database.html
+ CouchDatabasePage: function() {
+ var urlParts = location.search.substr(1).split("/");
+ var dbName = decodeURIComponent(urlParts.shift())
+
+ var dbNameRegExp = new RegExp("[^a-z0-9\_\$\(\)\+\/\-]", "g");
+ dbName = dbName.replace(dbNameRegExp, "");
+
+ $.futon.storage.declareWithPrefix(dbName + ".", {
+ desc: {},
+ language: {defaultValue: "javascript"},
+ map_fun: {defaultValue: ""},
+ reduce_fun: {defaultValue: ""},
+ reduce: {},
+ group_level: {defaultValue: 100},
+ per_page: {defaultValue: 10},
+ view: {defaultValue: ""},
+ stale: {defaultValue: false}
+ });
+
+ var viewName = (urlParts.length > 0) ? urlParts.join("/") : null;
+ if (viewName) {
+ $.futon.storage.set("view", decodeURIComponent(viewName));
+ } else {
+ viewName = $.futon.storage.get("view");
+ if (viewName) {
+ this.redirecting = true;
+ location.href = "database.html?" + encodeURIComponent(dbName) +
+ "/" + encodeURIComponent(viewName);
+ }
+ }
+ var db = $.couch.db(dbName);
+
+ this.dbName = dbName;
+ viewName = decodeURIComponent(viewName);
+ this.viewName = viewName;
+ this.viewLanguage = "javascript";
+ this.db = db;
+ this.isDirty = false;
+ this.isTempView = viewName == "_temp_view";
+ page = this;
+
+ var templates = {
+ javascript: "function(doc) {\n emit(null, doc);\n}",
+ python: "def fun(doc):\n yield None, doc",
+ ruby: "lambda {|doc|\n emit(nil, doc);\n}"
+ }
+
+ this.newDocument = function() {
+ location.href = "document.html?" + encodeURIComponent(db.name);
+ }
+
+ this.compactAndCleanup = function() {
+ $.showDialog("dialog/_compact_cleanup.html", {
+ submit: function(data, callback) {
+ switch (data.action) {
+ case "compact_database":
+ db.compact({success: function(resp) { callback() }});
+ break;
+ case "compact_views":
+ var idx = page.viewName.indexOf("/_view");
+ if (idx == -1) {
+ alert("Compact Views requires focus on a view!");
+ } else {
+ var groupname = page.viewName.substring(8, idx);
+ db.compactView(groupname, {success: function(resp) { callback() }});
+ }
+ break;
+ case "view_cleanup":
+ db.viewCleanup({success: function(resp) { callback() }});
+ break;
+ }
+ }
+ });
+ }
+
+ this.deleteDatabase = function() {
+ $.showDialog("dialog/_delete_database.html", {
+ submit: function(data, callback) {
+ db.drop({
+ success: function(resp) {
+ callback();
+ location.href = "index.html";
+ if (window !== null) {
+ $("#dbs li").filter(function(index) {
+ return $("a", this).text() == dbName;
+ }).remove();
+ $.futon.navigation.removeDatabase(dbName);
+ }
+ }
+ });
+ }
+ });
+ }
+
+ this.databaseSecurity = function() {
+ $.showDialog("dialog/_database_security.html", {
+ load : function(d) {
+ db.getDbProperty("_security", {
+ success: function(r) {
+ ["admin", "reader"].forEach(function(key) {
+ var names = [];
+ var roles = [];
+
+ if (r && typeof r[key + "s"] === "object") {
+ if ($.isArray(r[key + "s"]["names"])) {
+ names = r[key + "s"]["names"];
+ }
+ if ($.isArray(r[key + "s"]["roles"])) {
+ roles = r[key + "s"]["roles"];
+ }
+ }
+
+ $("input[name=" + key + "_names]", d).val(JSON.stringify(names));
+ $("input[name=" + key + "_roles]", d).val(JSON.stringify(roles));
+ });
+ }
+ });
+ },
+ // maybe this should be 2 forms
+ submit: function(data, callback) {
+ var errors = {};
+ var secObj = {
+ admins: {
+ names: [],
+ roles: []
+ },
+ readers: {
+ names: [],
+ roles: []
+ }
+ };
+
+ ["admin", "reader"].forEach(function(key) {
+ var names, roles;
+
+ try {
+ names = JSON.parse(data[key + "_names"]);
+ } catch(e) { }
+ try {
+ roles = JSON.parse(data[key + "_roles"]);
+ } catch(e) { }
+
+ if ($.isArray(names)) {
+ secObj[key + "s"]["names"] = names;
+ } else {
+ errors[key + "_names"] = "The " + key +
+ " names must be an array of strings";
+ }
+ if ($.isArray(roles)) {
+ secObj[key + "s"]["roles"] = roles;
+ } else {
+ errors[key + "_roles"] = "The " + key +
+ " roles must be an array of strings";
+ }
+ });
+
+ if ($.isEmptyObject(errors)) {
+ db.setDbProperty("_security", secObj);
+ }
+ callback(errors);
+ }
+ });
+ }
+
+ this.populateViewEditor = function() {
+ if (viewName.match(/^_design\//)) {
+ page.revertViewChanges(function() {
+ var dirtyTimeout = null;
+ function updateDirtyState() {
+ clearTimeout(dirtyTimeout);
+ dirtyTimeout = setTimeout(function() {
+ var buttons = $("#viewcode button.save, #viewcode button.revert");
+ var viewCode = {
+ map: $("#viewcode_map").val(),
+ reduce: $("#viewcode_reduce").val()
+ };
+ $("#reduce, #grouplevel").toggle(!!viewCode.reduce);
+ page.isDirty = (viewCode.map != page.storedViewCode.map)
+ || (viewCode.reduce != (page.storedViewCode.reduce || ""))
+ || page.viewLanguage != page.storedViewLanguage;
+ if (page.isDirty) {
+ buttons.removeAttr("disabled");
+ } else {
+ buttons.attr("disabled", "disabled");
+ }
+ }, 100);
+ }
+ $("#viewcode textarea").enableTabInsertion()
+ .bind("input", updateDirtyState);
+ if ($.browser.msie || $.browser.safari) {
+ $("#viewcode textarea").bind("paste", updateDirtyState)
+ .bind("change", updateDirtyState)
+ .bind("keydown", updateDirtyState)
+ .bind("keypress", updateDirtyState)
+ .bind("keyup", updateDirtyState)
+ .bind("textInput", updateDirtyState);
+ }
+ $("#language").change(updateDirtyState);
+ page.updateDocumentListing();
+ });
+ } else if (viewName == "_temp_view") {
+ $("#viewcode textarea").enableTabInsertion();
+ page.viewLanguage = $.futon.storage.get("language");
+ page.updateViewEditor(
+ $.futon.storage.get("map_fun", templates[page.viewLanguage]),
+ $.futon.storage.get("reduce_fun")
+ );
+ } else {
+ $("#grouplevel, #reduce").hide();
+ page.updateDocumentListing();
+ }
+ page.populateLanguagesMenu();
+ if (this.isTempView) {
+ $("#tempwarn").show();
+ }
+ }
+
+ // Populate the languages dropdown, and listen to selection changes
+ this.populateLanguagesMenu = function() {
+ var all_langs = {};
+ fill_language = function() {
+ var select = $("#language");
+ for (var language in all_langs) {
+ var option = $(document.createElement("option"))
+ .attr("value", language).text(language)
+ .appendTo(select);
+ }
+ if (select[0].options.length == 1) {
+ select[0].disabled = true;
+ } else {
+ select[0].disabled = false;
+ select.val(page.viewLanguage);
+ select.change(function() {
+ var language = $("#language").val();
+ if (language != page.viewLanguage) {
+ var mapFun = $("#viewcode_map").val();
+ if (mapFun == "" || mapFun == templates[page.viewLanguage]) {
+ // no edits made, so change to the new default
+ $("#viewcode_map").val(templates[language]);
+ }
+ page.viewLanguage = language;
+ $("#viewcode_map")[0].focus();
+ }
+ return false;
+ });
+ }
+ }
+ $.couch.config({
+ success: function(resp) {
+ for (var language in resp) {
+ all_langs[language] = resp[language];
+ }
+
+ $.couch.config({
+ success: function(resp) {
+ for (var language in resp) {
+ all_langs[language] = resp[language];
+ }
+ fill_language();
+ }
+ }, "native_query_servers");
+ },
+ error : function() {}
+ }, "query_servers");
+ }
+
+ this.populateViewsMenu = function() {
+ var select = $("#switch select");
+ db.allDocs({startkey: "_design/", endkey: "_design0",
+ include_docs: true,
+ success: function(resp) {
+ select[0].options.length = 3;
+ for (var i = 0; i < resp.rows.length; i++) {
+ var doc = resp.rows[i].doc;
+ var optGroup = $(document.createElement("optgroup"))
+ .attr("label", doc._id.substr(8)).appendTo(select);
+ var viewNames = [];
+ for (var name in doc.views) {
+ viewNames.push(name);
+ }
+ viewNames.sort();
+ for (var j = 0; j < viewNames.length; j++) {
+ var path = $.couch.encodeDocId(doc._id) + "/_view/" +
+ encodeURIComponent(viewNames[j]);
+ var option = $(document.createElement("option"))
+ .attr("value", path).text(encodeURIComponent(viewNames[j]))
+ .appendTo(optGroup);
+ if (path == viewName) {
+ option[0].selected = true;
+ }
+ }
+ }
+ }
+ });
+ if (!viewName.match(/^_design\//)) {
+ $.each(["_all_docs", "_design_docs", "_temp_view"], function(idx, name) {
+ if (viewName == name) {
+ select[0].options[idx].selected = true;
+ }
+ });
+ }
+ }
+
+ this.revertViewChanges = function(callback) {
+ if (!page.storedViewCode) {
+ var viewNameParts = viewName.split("/");
+ var designDocId = decodeURIComponent(viewNameParts[1]);
+ var localViewName = decodeURIComponent(viewNameParts[3]);
+ db.openDoc("_design/" + designDocId, {
+ error: function(status, error, reason) {
+ if (status == 404) {
+ $.futon.storage.del("view");
+ location.href = "database.html?" + encodeURIComponent(db.name);
+ }
+ },
+ success: function(resp) {
+ if(!resp.views || !resp.views[localViewName]) {
+ $.futon.storage.del("view");
+ location.href = "database.html?" + encodeURIComponent(db.name);
+ }
+ var viewCode = resp.views[localViewName];
+ page.viewLanguage = resp.language || "javascript";
+ $("#language").val(encodeURIComponent(page.viewLanguage));
+ page.updateViewEditor(viewCode.map, viewCode.reduce || "");
+ $("#viewcode button.revert, #viewcode button.save").attr("disabled", "disabled");
+ page.storedViewCode = viewCode;
+ page.storedViewLanguage = page.viewLanguage;
+ if (callback) callback();
+ }
+ }, {async: false});
+ } else {
+ page.updateViewEditor(page.storedViewCode.map,
+ page.storedViewCode.reduce || "");
+ page.viewLanguage = page.storedViewLanguage;
+ $("#language").val(encodeURIComponent(page.viewLanguage));
+ $("#viewcode button.revert, #viewcode button.save").attr("disabled", "disabled");
+ page.isDirty = false;
+ if (callback) callback();
+ }
+ }
+
+ this.updateViewEditor = function(mapFun, reduceFun) {
+ if (!mapFun) return;
+ $("#viewcode_map").val(mapFun);
+ $("#viewcode_reduce").val(reduceFun);
+ var lines = Math.max(
+ mapFun.split("\n").length,
+ reduceFun.split("\n").length
+ );
+ $("#reduce, #grouplevel").toggle(!!reduceFun);
+ $("#viewcode textarea").attr("rows", Math.min(15, Math.max(3, lines)));
+ }
+
+ this.saveViewAs = function() {
+ if (viewName && /^_design/.test(viewName)) {
+ var viewNameParts = viewName.split("/");
+ var designDocId = decodeURIComponent(viewNameParts[1]);
+ var localViewName = decodeURIComponent(viewNameParts[3]);
+ } else {
+ var designDocId = "", localViewName = "";
+ }
+ $.showDialog("dialog/_save_view_as.html", {
+ load: function(elem) {
+ $("#input_docid", elem).val(designDocId).suggest(function(text, callback) {
+ db.allDocs({
+ limit: 10, startkey: "_design/" + text, endkey: "_design0",
+ success: function(docs) {
+ var matches = [];
+ for (var i = 0; i < docs.rows.length; i++) {
+ var docName = docs.rows[i].id.substr(8);
+ if (docName.indexOf(text) == 0) {
+ matches[i] = docName;
+ }
+ }
+ callback(matches);
+ }
+ });
+ });
+ $("#input_name", elem).val(localViewName).suggest(function(text, callback) {
+ db.openDoc("_design/" + $("#input_docid").val(), {
+ error: function() {}, // ignore
+ success: function(doc) {
+ var matches = [];
+ if (!doc.views) return;
+ for (var viewName in doc.views) {
+ if (viewName.indexOf(text) == 0) {
+ matches.push(viewName);
+ }
+ }
+ callback(matches);
+ }
+ });
+ });
+ },
+ submit: function(data, callback) {
+ if (!data.docid || !data.name) {
+ var errors = {};
+ if (!data.docid) errors.docid = "Please enter a document ID";
+ if (!data.name) errors.name = "Please enter a view name";
+ callback(errors);
+ } else {
+ var viewCode = {
+ map: $("#viewcode_map").val(),
+ reduce: $("#viewcode_reduce").val() || undefined
+ };
+ var docId = ["_design", data.docid].join("/");
+ function save(doc) {
+ if (!doc) {
+ doc = {_id: docId, language: page.viewLanguage};
+ } else {
+ var numViews = 0;
+ for (var viewName in (doc.views || {})) {
+ if (viewName != data.name) numViews++;
+ }
+ if (numViews > 0 && page.viewLanguage != doc.language) {
+ callback({
+ docid: "Cannot save to " + data.docid +
+ " because its language is \"" + doc.language +
+ "\", not \"" +
+ encodeURIComponent(page.viewLanguage) + "\"."
+ });
+ return;
+ }
+ doc.language = page.viewLanguage;
+ }
+ if (doc.views === undefined) doc.views = {};
+ doc.views[data.name] = viewCode;
+ db.saveDoc(doc, {
+ success: function(resp) {
+ callback();
+ page.isDirty = false;
+ location.href = "database.html?" + encodeURIComponent(dbName) +
+ "/" + $.couch.encodeDocId(doc._id) +
+ "/_view/" + encodeURIComponent(data.name);
+ }
+ });
+ }
+ db.openDoc(docId, {
+ error: function(status, error, reason) {
+ if (status == 404) save(null);
+ else alert(reason);
+ },
+ success: function(doc) {
+ save(doc);
+ }
+ });
+ }
+ }
+ });
+ }
+
+ this.saveViewChanges = function() {
+ var viewNameParts = viewName.split("/");
+ var designDocId = decodeURIComponent(viewNameParts[1]);
+ var localViewName = decodeURIComponent(viewNameParts[3]);
+ db.openDoc("_design/" + designDocId, {
+ success: function(doc) {
+ var numViews = 0;
+ for (var viewName in (doc.views || {})) {
+ if (viewName != localViewName) numViews++;
+ }
+ if (numViews > 0 && page.viewLanguage != doc.language) {
+ alert("Cannot save view because the design document language " +
+ "is \"" + doc.language + "\", not \"" +
+ page.viewLanguage + "\".");
+ return;
+ }
+ doc.language = page.viewLanguage;
+ var viewDef = doc.views[localViewName];
+ viewDef.map = $("#viewcode_map").val();
+ viewDef.reduce = $("#viewcode_reduce").val() || undefined;
+ db.saveDoc(doc, {
+ success: function(resp) {
+ page.isDirty = false;
+ $("#viewcode button.revert, #viewcode button.save")
+ .attr("disabled", "disabled");
+ }
+ });
+ }
+ });
+ }
+
+ this.updateDesignDocLink = function() {
+ if (viewName && /^_design/.test(viewName)) {
+ var docId = "_design/" + encodeURIComponent(decodeURIComponent(viewName).split("/")[1]);
+ $("#designdoc-link").attr("href", "document.html?" +
+ encodeURIComponent(dbName) + "/" + $.couch.encodeDocId(docId)).text(docId);
+ } else {
+ $("#designdoc-link").removeAttr("href").text("");
+ }
+ }
+
+ this.jumpToDocument = function(docId) {
+ if (docId != "") {
+ location.href = 'document.html?' + encodeURIComponent(db.name)
+ + "/" + $.couch.encodeDocId(docId);
+ }
+ }
+
+ this.updateDocumentListing = function(options) {
+ if (options === undefined) options = {};
+ if (options.limit === undefined) {
+ var perPage = parseInt($("#perpage").val(), 10)
+ // Fetch an extra row so we know when we're on the last page for
+ // reduce views
+ options.limit = perPage + 1;
+ } else {
+ perPage = options.limit - 1;
+ }
+ if ($("#documents thead th.key").is(".desc")) {
+ if (typeof options.descending == 'undefined') options.descending = true;
+ var descend = true;
+ $.futon.storage.set("desc", "1");
+ } else {
+ var descend = false;
+ $.futon.storage.del("desc");
+ }
+ $("#paging a").unbind();
+ $("#documents").find("tbody.content").empty().end().show();
+ page.updateDesignDocLink();
+
+ options.success = function(resp) {
+ if (resp.offset === undefined) {
+ resp.offset = 0;
+ }
+ var descending_reverse = ((options.descending && !descend) || (descend && (options.descending === false)));
+ var has_reduce_prev = resp.total_rows === undefined && (descending_reverse ? resp.rows.length > perPage : options.startkey !== undefined);
+ if (descending_reverse && resp.rows) {
+ resp.rows = resp.rows.reverse();
+ if (resp.rows.length > perPage) {
+ resp.rows.push(resp.rows.shift());
+ }
+ }
+ if (resp.rows !== null && (has_reduce_prev || (descending_reverse ?
+ (resp.total_rows - resp.offset > perPage) :
+ (resp.offset > 0)))) {
+ $("#paging a.prev").attr("href", "#" + (resp.offset - perPage)).click(function() {
+ var opt = {
+ descending: !descend,
+ limit: options.limit
+ };
+ if (resp.rows.length > 0) {
+ var firstDoc = resp.rows[0];
+ opt.startkey = firstDoc.key !== undefined ? firstDoc.key : null;
+ if (firstDoc.id !== undefined) {
+ opt.startkey_docid = firstDoc.id;
+ }
+ opt.skip = 1;
+ }
+ page.updateDocumentListing(opt);
+ return false;
+ });
+ } else {
+ $("#paging a.prev").removeAttr("href");
+ }
+ var has_reduce_next = resp.total_rows === undefined && (descending_reverse ? options.startkey !== undefined : resp.rows.length > perPage);
+ if (resp.rows !== null && (has_reduce_next || (descending_reverse ?
+ (resp.offset - resp.total_rows < perPage) :
+ (resp.total_rows - resp.offset > perPage)))) {
+ $("#paging a.next").attr("href", "#" + (resp.offset + perPage)).click(function() {
+ var opt = {
+ descending: descend,
+ limit: options.limit
+ };
+ if (resp.rows.length > 0) {
+ var lastDoc = resp.rows[Math.min(perPage, resp.rows.length) - 1];
+ opt.startkey = lastDoc.key !== undefined ? lastDoc.key : null;
+ if (lastDoc.id !== undefined) {
+ opt.startkey_docid = lastDoc.id;
+ }
+ opt.skip = 1;
+ }
+ page.updateDocumentListing(opt);
+ return false;
+ });
+ } else {
+ $("#paging a.next").removeAttr("href");
+ }
+
+ for (var i = 0; i < Math.min(perPage, resp.rows.length); i++) {
+ var row = resp.rows[i];
+ var tr = $("<tr></tr>");
+ var key = "null";
+ if (row.key !== null) {
+ key = $.futon.formatJSON(row.key, {indent: 0, linesep: ""});
+ }
+ if (row.id) {
+ key = key.replace(/\\"/, '"');
+ var rowlink = encodeURIComponent(db.name) +
+ "/" + $.couch.encodeDocId(row.id);
+ $("<td class='key'><a href=\"document.html?" + rowlink + "\"><strong>"
+ + $.futon.escape(key) + "</strong><br>"
+ + "<span class='docid'>ID:&nbsp;" + $.futon.escape(row.id) + "</span></a></td>")
+ .appendTo(tr);
+ } else {
+ $("<td class='key'><strong></strong></td>")
+ .find("strong").text(key).end()
+ .appendTo(tr);
+ }
+ var value = "null";
+ if (row.value !== null) {
+ value = $.futon.formatJSON(row.value, {
+ html: true, indent: 0, linesep: "", quoteKeys: false
+ });
+ }
+ $("<td class='value'><div></div></td>").find("div").html(value).end()
+ .appendTo(tr).dblclick(function() {
+ location.href = this.previousSibling.firstChild.href;
+ });
+ tr.appendTo("#documents tbody.content");
+ }
+ var firstNum = 1;
+ var lastNum = totalNum = Math.min(perPage, resp.rows.length);
+ if (resp.total_rows != null) {
+ if (descending_reverse) {
+ lastNum = Math.min(resp.total_rows, resp.total_rows - resp.offset);
+ firstNum = lastNum - totalNum + 1;
+ } else {
+ firstNum = Math.min(resp.total_rows, resp.offset + 1);
+ lastNum = firstNum + totalNum - 1;
+ }
+ totalNum = resp.total_rows;
+ } else {
+ totalNum = "unknown";
+ }
+ $("#paging").show();
+
+ $("#documents tbody.footer td span").text(
+ "Showing " + firstNum + "-" + lastNum + " of " + totalNum +
+ " row" + (firstNum != lastNum || totalNum == "unknown" ? "s" : ""));
+ $("#documents tbody tr:odd").addClass("odd");
+ }
+ options.error = function(status, error, reason) {
+ alert("Error: " + error + "\n\n" + reason);
+ }
+
+ if (!viewName || viewName == "_all_docs") {
+ $("#switch select")[0].selectedIndex = 0;
+ db.allDocs(options);
+ } else {
+ if (viewName == "_temp_view") {
+ $("#viewcode").show().removeClass("collapsed");
+ var mapFun = $("#viewcode_map").val();
+ $.futon.storage.set("map_fun", mapFun);
+ var reduceFun = $.trim($("#viewcode_reduce").val()) || null;
+ if (reduceFun) {
+ $.futon.storage.set("reduce_fun", reduceFun);
+ if ($("#reduce :checked").length) {
+ var level = parseInt($("#grouplevel select").val(), 10);
+ options.group = level > 0;
+ if (options.group && level < 100) {
+ options.group_level = level;
+ }
+ } else {
+ options.reduce = false;
+ }
+ }
+ $.futon.storage.set("language", page.viewLanguage);
+ db.query(mapFun, reduceFun, page.viewLanguage, options);
+ } else if (viewName == "_design_docs") {
+ options.startkey = options.descending ? "_design0" : "_design";
+ options.endkey = options.descending ? "_design" : "_design0";
+ db.allDocs(options);
+ } else {
+ $("button.compactview").show();
+ $("#viewcode").show();
+ var currentMapCode = $("#viewcode_map").val();
+ var currentReduceCode = $.trim($("#viewcode_reduce").val()) || null;
+ if (currentReduceCode) {
+ if ($("#reduce :checked").length) {
+ var level = parseInt($("#grouplevel select").val(), 10);
+ options.group = level > 0;
+ if (options.group && level < 100) {
+ options.group_level = level;
+ }
+ } else {
+ options.reduce = false;
+ }
+ }
+ if (page.isDirty) {
+ db.query(currentMapCode, currentReduceCode, page.viewLanguage, options);
+ } else {
+ var viewParts = decodeURIComponent(viewName).split('/');
+ if ($.futon.storage.get("stale")) {
+ options.stale = "ok";
+ }
+
+ db.view(viewParts[1] + "/" + viewParts[3], options);
+ }
+ }
+ }
+ }
+
+ window.onbeforeunload = function() {
+ $("#switch select").val(viewName);
+ if (page.isDirty) {
+ return "You've made changes to the view code that have not been " +
+ "saved yet.";
+ }
+ }
+
+ },
+
+ // Page class for browse/document.html
+ CouchDocumentPage: function() {
+ var urlParts = location.search.substr(1).split("/");
+ var dbName = decodeURIComponent(urlParts.shift());
+ if (urlParts.length) {
+ var idParts = urlParts.join("/").split("@", 2);
+ var docId = decodeURIComponent(idParts[0]);
+ var docRev = (idParts.length > 1) ? idParts[1] : null;
+ this.isNew = false;
+ } else {
+ var docId = $.couch.newUUID();
+ var docRev = null;
+ this.isNew = true;
+ }
+ var db = $.couch.db(dbName);
+
+ $.futon.storage.declare("tab", {defaultValue: "tabular", scope: "cookie"});
+
+ this.dbName = dbName;
+ this.db = db;
+ this.docId = docId;
+ this.doc = null;
+ this.isDirty = this.isNew;
+ page = this;
+
+ this.activateTabularView = function() {
+ if ($("#fields tbody.source textarea").length > 0)
+ return;
+
+ $.futon.storage.set("tab", "tabular");
+ $("#tabs li").removeClass("active").filter(".tabular").addClass("active");
+ $("#fields thead th:first").text("Field").attr("colspan", 1).next().show();
+ $("#fields tbody.content").show();
+ $("#fields tbody.source").hide();
+ return false;
+ }
+
+ this.activateSourceView = function() {
+ $.futon.storage.set("tab", "source");
+ $("#tabs li").removeClass("active").filter(".source").addClass("active");
+ $("#fields thead th:first").text("Source").attr("colspan", 2).next().hide();
+ $("#fields tbody.content").hide();
+ $("#fields tbody.source").find("td").each(function() {
+ $(this).html($("<pre></pre>").html($.futon.formatJSON(page.doc, {html: true})))
+ .makeEditable({allowEmpty: false,
+ createInput: function(value) {
+ var rows = value.split("\n").length;
+ return $("<textarea rows='" + rows + "' cols='80' spellcheck='false'></textarea>").enableTabInsertion();
+ },
+ prepareInput: function(input) {
+ $(input).makeResizable({vertical: true});
+ },
+ end: function() {
+ $(this).html($("<pre></pre>").html($.futon.formatJSON(page.doc, {html: true})));
+ },
+ accept: function(newValue) {
+ page.doc = JSON.parse(newValue);
+ page.isDirty = true;
+ page.updateFieldListing(true);
+ },
+ populate: function(value) {
+ return $.futon.formatJSON(page.doc);
+ },
+ validate: function(value) {
+ try {
+ var doc = JSON.parse(value);
+ if (typeof doc != "object")
+ throw new SyntaxError("Please enter a valid JSON document (for example, {}).");
+ return true;
+ } catch (err) {
+ var msg = err.message;
+ if (msg == "parseJSON" || msg == "JSON.parse") {
+ msg = "There is a syntax error in the document.";
+ }
+ $("<div class='error'></div>").text(msg).appendTo(this);
+ return false;
+ }
+ }
+ });
+ }).end().show();
+ return false;
+ }
+
+ this.addField = function() {
+ if (!$("#fields tbody.content:visible").length) {
+ location.hash = "#tabular";
+ page.activateTabularView();
+ }
+ var fieldName = "unnamed";
+ var fieldIdx = 1;
+ while (page.doc.hasOwnProperty(fieldName)) {
+ fieldName = "unnamed " + fieldIdx++;
+ }
+ page.doc[fieldName] = null;
+ var row = _addRowForField(page.doc, fieldName);
+ page.isDirty = true;
+ row.find("th b").dblclick();
+ }
+
+ var _sortFields = function(a, b) {
+ var a0 = a.charAt(0), b0 = b.charAt(0);
+ if (a0 == "_" && b0 != "_") {
+ return -1;
+ } else if (a0 != "_" && b0 == "_") {
+ return 1;
+ } else if (a == "_attachments" || b == "_attachments") {
+ return a0 == "_attachments" ? 1 : -1;
+ } else {
+ return a < b ? -1 : a != b ? 1 : 0;
+ }
+ }
+
+ this.updateFieldListing = function(noReload) {
+ $("#fields tbody.content").empty();
+
+ function handleResult(doc, revs) {
+ page.doc = doc;
+ var propNames = [];
+ for (var prop in doc) {
+ propNames.push(prop);
+ }
+ // Order properties alphabetically, but put internal fields first
+ propNames.sort(_sortFields);
+ for (var pi = 0; pi < propNames.length; pi++) {
+ _addRowForField(doc, propNames[pi]);
+ }
+ if (revs.length > 1) {
+ var currentIndex = 0;
+ for (var i = 0; i < revs.length; i++) {
+ if (revs[i].rev == doc._rev) {
+ currentIndex = i;
+ break;
+ }
+ }
+ if (currentIndex < revs.length - 1) {
+ var prevRev = revs[currentIndex + 1].rev;
+ $("#paging a.prev").attr("href", "?" + encodeURIComponent(dbName) +
+ "/" + $.couch.encodeDocId(docId) + "@" + prevRev);
+ }
+ if (currentIndex > 0) {
+ var nextRev = revs[currentIndex - 1].rev;
+ $("#paging a.next").attr("href", "?" + encodeURIComponent(dbName) +
+ "/" + $.couch.encodeDocId(docId) + "@" + nextRev);
+ }
+ $("#fields tbody.footer td span").text("Showing revision " +
+ (revs.length - currentIndex) + " of " + revs.length);
+ }
+ if ($.futon.storage.get("tab") == "source") {
+ page.activateSourceView();
+ }
+ }
+
+ if (noReload) {
+ handleResult(page.doc, []);
+ return;
+ }
+
+ if (!page.isNew) {
+ db.openDoc(docId, {revs_info: true,
+ success: function(doc) {
+ var revs = doc._revs_info || [];
+ delete doc._revs_info;
+ if (docRev != null) {
+ db.openDoc(docId, {rev: docRev,
+ error: function(status, error, reason) {
+ alert("The requested revision was not found. You will " +
+ "be redirected back to the latest revision.");
+ location.href = "?" + encodeURIComponent(dbName) +
+ "/" + $.couch.encodeDocId(docId);
+ },
+ success: function(doc) {
+ handleResult(doc, revs);
+ }
+ });
+ } else {
+ handleResult(doc, revs);
+ }
+ }
+ });
+ } else {
+ handleResult({_id: docId}, []);
+ $("#fields tbody td").dblclick();
+ }
+ }
+
+ this.deleteDocument = function() {
+ $.showDialog("dialog/_delete_document.html", {
+ submit: function(data, callback) {
+ db.removeDoc(page.doc, {
+ success: function(resp) {
+ callback();
+ location.href = "database.html?" + encodeURIComponent(dbName);
+ }
+ });
+ }
+ });
+ }
+
+ this.saveDocument = function() {
+ db.saveDoc(page.doc, {
+ error: function(status, error, reason) {
+ alert("Error: " + error + "\n\n" + reason);
+ },
+ success: function(resp) {
+ page.isDirty = false;
+ location.href = "?" + encodeURIComponent(dbName) +
+ "/" + $.couch.encodeDocId(page.docId);
+ }
+ });
+ }
+
+ this.uploadAttachment = function() {
+ if (page.isDirty) {
+ alert("You need to save or revert any changes you have made to the " +
+ "document before you can attach a new file.");
+ return false;
+ }
+ $.showDialog("dialog/_upload_attachment.html", {
+ load: function(elem) {
+ $("input[name='_rev']", elem).val(page.doc._rev);
+ },
+ submit: function(data, callback) {
+ if (!data._attachments || data._attachments.length == 0) {
+ callback({_attachments: "Please select a file to upload."});
+ return;
+ }
+ var form = $("#upload-form");
+ form.find("#progress").css("visibility", "visible");
+ form.ajaxSubmit({
+ url: db.uri + $.couch.encodeDocId(page.docId),
+ success: function(resp) {
+ form.find("#progress").css("visibility", "hidden");
+ page.isDirty = false;
+ location.href = "?" + encodeURIComponent(dbName) +
+ "/" + $.couch.encodeDocId(page.docId);
+ }
+ });
+ }
+ });
+ }
+
+ window.onbeforeunload = function() {
+ if (page.isDirty) {
+ return "You've made changes to this document that have not been " +
+ "saved yet.";
+ }
+ }
+
+ function _addRowForField(doc, fieldName) {
+ var row = $("<tr><th></th><td></td></tr>")
+ .find("th").append($("<b></b>").text(fieldName)).end()
+ .appendTo("#fields tbody.content");
+ if (fieldName == "_attachments") {
+ row.find("td").append(_renderAttachmentList(doc[fieldName]));
+ } else {
+ row.find("td").append(_renderValue(doc[fieldName]));
+ _initKey(doc, row, fieldName);
+ _initValue(doc, row, fieldName);
+ }
+ $("#fields tbody.content tr").removeClass("odd").filter(":odd").addClass("odd");
+ row.data("name", fieldName);
+ return row;
+ }
+
+ function _initKey(doc, row, fieldName) {
+ if (fieldName == "_id" || fieldName == "_rev") {
+ return;
+ }
+
+ var cell = row.find("th");
+
+ $("<button type='button' class='delete' title='Delete field'></button>").click(function() {
+ delete doc[fieldName];
+ row.remove();
+ page.isDirty = true;
+ $("#fields tbody.content tr").removeClass("odd").filter(":odd").addClass("odd");
+ }).prependTo(cell);
+
+ cell.find("b").makeEditable({allowEmpty: false,
+ accept: function(newName, oldName) {
+ doc[newName] = doc[oldName];
+ delete doc[oldName];
+ row.data("name", newName);
+ $(this).text(newName);
+ page.isDirty = true;
+ },
+ begin: function() {
+ row.find("th button.delete").hide();
+ return true;
+ },
+ end: function(keyCode) {
+ row.find("th button.delete").show();
+ if (keyCode == 9) { // tab, move to editing the value
+ row.find("td").dblclick();
+ }
+ },
+ validate: function(newName, oldName) {
+ $("div.error", this).remove();
+ if (newName != oldName && doc[newName] !== undefined) {
+ $("<div class='error'>Already have field with that name.</div>")
+ .appendTo(this);
+ return false;
+ }
+ return true;
+ }
+ });
+ }
+
+ function _initValue(doc, row, fieldName) {
+ if ((fieldName == "_id" && !page.isNew) || fieldName == "_rev") {
+ return;
+ }
+
+ row.find("td").makeEditable({acceptOnBlur: false, allowEmpty: true,
+ createInput: function(value) {
+ value = doc[row.data("name")];
+ var elem = $(this);
+ if (elem.find("dl").length > 0 ||
+ elem.find("code").is(".array, .object") ||
+ typeof(value) == "string" && (value.length > 60 || value.match(/\n/))) {
+ return $("<textarea rows='1' cols='40' spellcheck='false'></textarea>");
+ }
+ return $("<input type='text' spellcheck='false'>");
+ },
+ end: function() {
+ $(this).children().remove();
+ $(this).append(_renderValue(doc[row.data("name")]));
+ },
+ prepareInput: function(input) {
+ if ($(input).is("textarea")) {
+ var height = Math.min(input.scrollHeight, document.body.clientHeight - 100);
+ $(input).height(height).makeResizable({vertical: true}).enableTabInsertion();
+ }
+ },
+ accept: function(newValue) {
+ var fieldName = row.data("name");
+ try {
+ doc[fieldName] = JSON.parse(newValue);
+ } catch (err) {
+ doc[fieldName] = newValue;
+ }
+ page.isDirty = true;
+ if (fieldName == "_id") {
+ page.docId = page.doc._id = doc[fieldName];
+ $("h1 strong").text(page.docId);
+ }
+ },
+ populate: function(value) {
+ value = doc[row.data("name")];
+ if (typeof(value) == "string") {
+ return value;
+ }
+ return $.futon.formatJSON(value);
+ },
+ validate: function(value) {
+ $("div.error", this).remove();
+ try {
+ var parsed = JSON.parse(value);
+ if (row.data("name") == "_id" && typeof(parsed) != "string") {
+ $("<div class='error'>The document ID must be a string.</div>")
+ .appendTo(this);
+ return false;
+ }
+ return true;
+ } catch (err) {
+ return true;
+ }
+ }
+ });
+ }
+
+ function _renderValue(value) {
+ function isNullOrEmpty(val) {
+ if (val == null) return true;
+ for (var i in val) return false;
+ return true;
+ }
+ function render(val) {
+ var type = typeof(val);
+ if (type == "object" && !isNullOrEmpty(val)) {
+ var list = $("<dl></dl>");
+ for (var i in val) {
+ $("<dt></dt>").text(i).appendTo(list);
+ $("<dd></dd>").append(render(val[i])).appendTo(list);
+ }
+ return list;
+ } else {
+ var html = $.futon.formatJSON(val, {
+ html: true,
+ escapeStrings: false
+ });
+ var n = $(html);
+ if (n.text().length > 140) {
+ // This code reduces a long string in to a summarized string with a link to expand it.
+ // Someone, somewhere, is doing something nasty with the event after it leaves these handlers.
+ // At this time I can't track down the offender, it might actually be a jQuery propogation issue.
+ var fulltext = n.text();
+ var mintext = n.text().slice(0, 140);
+ var e = $('<a href="#expand">...</a>');
+ var m = $('<a href="#min">X</a>');
+ var expand = function (evt) {
+ n.empty();
+ n.text(fulltext);
+ n.append(m);
+ evt.stopPropagation();
+ evt.stopImmediatePropagation();
+ evt.preventDefault();
+ }
+ var minimize = function (evt) {
+ n.empty();
+ n.text(mintext);
+ // For some reason the old element's handler won't fire after removed and added again.
+ e = $('<a href="#expand">...</a>');
+ e.click(expand);
+ n.append(e);
+ evt.stopPropagation();
+ evt.stopImmediatePropagation();
+ evt.preventDefault();
+ }
+ e.click(expand);
+ n.click(minimize);
+ n.text(mintext);
+ n.append(e)
+ }
+ return n;
+ }
+ }
+ var elem = render(value);
+
+ elem.find("dd:has(dl)").hide().prev("dt").addClass("collapsed");
+ elem.find("dd:not(:has(dl))").addClass("inline").prev().addClass("inline");
+ elem.find("dt.collapsed").click(function() {
+ $(this).toggleClass("collapsed").next().toggle();
+ });
+
+ return elem;
+ }
+
+ function _renderAttachmentList(attachments) {
+ var ul = $("<ul></ul>").addClass("attachments");
+ $.each(attachments, function(idx, attachment) {
+ _renderAttachmentItem(idx, attachment).appendTo(ul);
+ });
+ return ul;
+ }
+
+ function _renderAttachmentItem(name, attachment) {
+ var attachmentHref = db.uri + $.couch.encodeDocId(page.docId)
+ + "/" + encodeAttachment(name);
+ var li = $("<li></li>");
+ $("<a href='' title='Download file' target='_top'></a>").text(name)
+ .attr("href", attachmentHref)
+ .wrapInner("<tt></tt>").appendTo(li);
+ $("<span>()</span>").text("" + $.futon.formatSize(attachment.length) +
+ ", " + attachment.content_type).addClass("info").appendTo(li);
+ if (name == "tests.js") {
+ li.find('span.info').append(', <a href="/_utils/couch_tests.html?'
+ + attachmentHref + '">open in test runner</a>');
+ }
+ _initAttachmentItem(name, attachment, li);
+ return li;
+ }
+
+ function _initAttachmentItem(name, attachment, li) {
+ $("<button type='button' class='delete' title='Delete attachment'></button>").click(function() {
+ if (!li.siblings("li").length) {
+ delete page.doc._attachments;
+ li.parents("tr").remove();
+ $("#fields tbody.content tr").removeClass("odd").filter(":odd").addClass("odd");
+ } else {
+ delete page.doc._attachments[name];
+ li.remove();
+ }
+ page.isDirty = true;
+ return false;
+ }).prependTo($("a", li));
+ }
+ },
+
+ });
+
+ function encodeAttachment(name) {
+ var encoded = [], parts = name.split('/');
+ for (var i=0; i < parts.length; i++) {
+ encoded.push(encodeURIComponent(parts[i]));
+ };
+ return encoded.join('%2f');
+ }
+
+})(jQuery);
diff --git a/1.1.x/share/www/script/futon.format.js b/1.1.x/share/www/script/futon.format.js
new file mode 100644
index 00000000..0eb9b104
--- /dev/null
+++ b/1.1.x/share/www/script/futon.format.js
@@ -0,0 +1,146 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+(function($) {
+ $.futon = $.futon || {};
+ $.extend($.futon, {
+ escape: function(string) {
+ return string.replace(/&/g, "&amp;")
+ .replace(/</g, "&lt;")
+ .replace(/>/g, "&gt;")
+ .replace(/"/g, "&quot;")
+ .replace(/'/g, "&#39;")
+ ;
+ },
+
+ // JSON pretty printing
+ formatJSON: function(val, options) {
+ options = $.extend({
+ escapeStrings: true,
+ indent: 4,
+ linesep: "\n",
+ quoteKeys: true
+ }, options || {});
+ var itemsep = options.linesep.length ? "," + options.linesep : ", ";
+
+ function format(val, depth) {
+ var tab = [];
+ for (var i = 0; i < options.indent * depth; i++) tab.push("");
+ tab = tab.join(" ");
+
+ var type = typeof val;
+ switch (type) {
+ case "boolean":
+ case "number":
+ case "string":
+ var retval = val;
+ if (type == "string" && !options.escapeStrings) {
+ retval = indentLines(retval.replace(/\r\n/g, "\n"), tab.substr(options.indent));
+ } else {
+ if (options.html) {
+ retval = $.futon.escape(JSON.stringify(val));
+ } else {
+ retval = JSON.stringify(val);
+ }
+ }
+ if (options.html) {
+ retval = "<code class='" + type + "'>" + retval + "</code>";
+ }
+ return retval;
+
+ case "object": {
+ if (val === null) {
+ if (options.html) {
+ return "<code class='null'>null</code>";
+ }
+ return "null";
+ }
+ if (val.constructor == Date) {
+ return JSON.stringify(val);
+ }
+
+ var buf = [];
+
+ if (val.constructor == Array) {
+ buf.push("[");
+ for (var index = 0; index < val.length; index++) {
+ buf.push(index > 0 ? itemsep : options.linesep);
+ buf.push(tab, format(val[index], depth + 1));
+ }
+ if (index >= 0) {
+ buf.push(options.linesep, tab.substr(options.indent));
+ }
+ buf.push("]");
+ if (options.html) {
+ return "<code class='array'>" + buf.join("") + "</code>";
+ }
+
+ } else {
+ buf.push("{");
+ var index = 0;
+ for (var key in val) {
+ buf.push(index > 0 ? itemsep : options.linesep);
+ var keyDisplay = options.quoteKeys ? JSON.stringify(key) : key;
+ if (options.html) {
+ if (options.quoteKeys) {
+ keyDisplay = keyDisplay.substr(1, keyDisplay.length - 2);
+ }
+ keyDisplay = "<code class='key'>" + $.futon.escape(keyDisplay) + "</code>";
+ if (options.quoteKeys) {
+ keyDisplay = '"' + keyDisplay + '"';
+ }
+ }
+ buf.push(tab, keyDisplay,
+ ": ", format(val[key], depth + 1));
+ index++;
+ }
+ if (index >= 0) {
+ buf.push(options.linesep, tab.substr(options.indent));
+ }
+ buf.push("}");
+ if (options.html) {
+ return "<code class='object'>" + buf.join("") + "</code>";
+ }
+ }
+
+ return buf.join("");
+ }
+ }
+ }
+
+ function indentLines(text, tab) {
+ var lines = text.split("\n");
+ for (var i in lines) {
+ lines[i] = (i > 0 ? tab : "") + $.futon.escape(lines[i]);
+ }
+ return lines.join("<br>");
+ }
+
+ return format(val, 1);
+ },
+
+ // File size pretty printing
+ formatSize: function(size) {
+ var jump = 512;
+ if (size < jump) return size + " bytes";
+ var units = ["KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"];
+ var i = 0;
+ while (size >= jump && i < units.length) {
+ i += 1;
+ size /= 1024
+ }
+ return size.toFixed(1) + ' ' + units[i - 1];
+ }
+
+ });
+
+})(jQuery);
diff --git a/1.1.x/share/www/script/futon.js b/1.1.x/share/www/script/futon.js
new file mode 100644
index 00000000..fb73e3c9
--- /dev/null
+++ b/1.1.x/share/www/script/futon.js
@@ -0,0 +1,535 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// $$ inspired by @wycats: http://yehudakatz.com/2009/04/20/evented-programming-with-jquery/
+function $$(node) {
+ var data = $(node).data("$$");
+ if (data) {
+ return data;
+ } else {
+ data = {};
+ $(node).data("$$", data);
+ return data;
+ }
+};
+
+(function($) {
+
+ function Session() {
+
+ function doLogin(name, password, callback) {
+ $.couch.login({
+ name : name,
+ password : password,
+ success : function() {
+ $.futon.session.sidebar();
+ callback();
+ },
+ error : function(code, error, reason) {
+ $.futon.session.sidebar();
+ callback({name : "Error logging in: "+reason});
+ }
+ });
+ };
+
+ function doSignup(name, password, callback, runLogin) {
+ $.couch.signup({
+ name : name
+ }, password, {
+ success : function() {
+ if (runLogin) {
+ doLogin(name, password, callback);
+ } else {
+ callback();
+ }
+ },
+ error : function(status, error, reason) {
+ $.futon.session.sidebar();
+ if (error == "conflict") {
+ callback({name : "Name '"+name+"' is taken"});
+ } else {
+ callback({name : "Signup error: "+reason});
+ }
+ }
+ });
+ };
+
+ function validateUsernameAndPassword(data, callback) {
+ if (!data.name || data.name.length == 0) {
+ callback({name: "Please enter a name."});
+ return false;
+ };
+ return validatePassword(data, callback);
+ };
+
+ function validatePassword(data, callback) {
+ if (!data.password || data.password.length == 0) {
+ callback({password: "Please enter a password."});
+ return false;
+ };
+ return true;
+ };
+
+ function createAdmin() {
+ $.showDialog("dialog/_create_admin.html", {
+ submit: function(data, callback) {
+ if (!validateUsernameAndPassword(data, callback)) return;
+ $.couch.config({
+ success : function() {
+ doLogin(data.name, data.password, function(errors) {
+ if(!$.isEmptyObject(errors)) {
+ callback(errors);
+ return;
+ }
+ doSignup(data.name, null, function(errors) {
+ if (errors && errors.name && errors.name.indexOf && errors.name.indexOf("taken") == -1) {
+ callback(errors);
+ } else {
+ callback();
+ }
+ }, false);
+ });
+ }
+ }, "admins", data.name, data.password);
+ }
+ });
+ return false;
+ };
+
+ function login() {
+ $.showDialog("dialog/_login.html", {
+ submit: function(data, callback) {
+ if (!validateUsernameAndPassword(data, callback)) return;
+ doLogin(data.name, data.password, callback);
+ }
+ });
+ return false;
+ };
+
+ function logout() {
+ $.couch.logout({
+ success : function(resp) {
+ $.futon.session.sidebar();
+ }
+ })
+ };
+
+ function signup() {
+ $.showDialog("dialog/_signup.html", {
+ submit: function(data, callback) {
+ if (!validateUsernameAndPassword(data, callback)) return;
+ doSignup(data.name, data.password, callback, true);
+ }
+ });
+ return false;
+ };
+
+ function changePassword () {
+ $.showDialog("dialog/_change_password.html", {
+ submit: function(data, callback) {
+ if (validatePassword(data, callback)) {
+ if (data.password != data.verify_password) {
+ callback({verify_password: "Passwords don't match."});
+ return false;
+ }
+ } else {
+ return false;
+ }
+ $.couch.session({success: function (resp) {
+ if (resp.userCtx.roles.indexOf("_admin") > -1) {
+ $.couch.config({
+ success : function () {
+ doLogin(resp.userCtx.name, data.password, function(errors) {
+ if(!$.isEmptyObject(errors)) {
+ callback(errors);
+ return;
+ } else {
+ location.reload();
+ }
+ });
+ }
+ }, "admins", resp.userCtx.name, data.password);
+ } else {
+ $.couch.db(resp.info.authentication_db).openDoc("org.couchdb.user:"+resp.userCtx.name, {
+ success: function (user) {
+ $.couch.db(resp.info.authentication_db).saveDoc($.couch.prepareUserDoc(user, data.password), {
+ success: function() {
+ doLogin(user.name, data.password, function(errors) {
+ if(!$.isEmptyObject(errors)) {
+ callback(errors);
+ return;
+ } else {
+ location.reload();
+ }
+ });
+ }
+ });
+ }
+ });
+ }
+ }});
+ }
+ });
+ return false;
+ };
+
+ this.setupSidebar = function() {
+ $("#userCtx .login").click(login);
+ $("#userCtx .logout").click(logout);
+ $("#userCtx .signup").click(signup);
+ $("#userCtx .createadmin").click(createAdmin);
+ $("#userCtx .changepass").click(changePassword);
+ };
+
+ this.sidebar = function() {
+ // get users db info?
+ $("#userCtx span").hide();
+ $.couch.session({
+ success : function(r) {
+ var userCtx = r.userCtx;
+ $$("#userCtx").userCtx = userCtx;
+ if (userCtx.name) {
+ $("#userCtx .name").text(userCtx.name).attr({href : $.couch.urlPrefix + "/_utils/document.html?"+encodeURIComponent(r.info.authentication_db)+"/org.couchdb.user%3A"+encodeURIComponent(userCtx.name)});
+ if (userCtx.roles.indexOf("_admin") != -1) {
+ $("#userCtx .loggedin").show();
+ $("#userCtx .loggedinadmin").show();
+ } else {
+ $("#userCtx .loggedin").show();
+ }
+ } else if (userCtx.roles.indexOf("_admin") != -1) {
+ $("#userCtx .adminparty").show();
+ } else {
+ $("#userCtx .loggedout").show();
+ };
+ }
+ })
+ };
+ };
+
+ function Navigation() {
+ var nav = this;
+ this.loaded = false;
+ this.eventHandlers = {
+ load: []
+ };
+
+ this.ready = function(callback) {
+ if (callback) {
+ if (this.loaded) {
+ callback.apply(this);
+ }
+ this.eventHandlers["load"].push(callback);
+ } else {
+ this.loaded = true;
+ callbacks = this.eventHandlers["load"];
+ for (var i = 0; i < callbacks.length; i++) {
+ callbacks[i].apply(this);
+ }
+ }
+ }
+
+ this.addDatabase = function(name) {
+ var current = $.futon.storage.get("recent", "");
+ var recentDbs = current ? current.split(",") : [];
+ if ($.inArray(name, recentDbs) == -1) {
+ recentDbs.unshift(name);
+ if (recentDbs.length > 10) recentDbs.length = 10;
+ $.futon.storage.set("recent", recentDbs.join(","));
+ this.updateDatabases();
+ }
+ }
+
+ this.removeDatabase = function(name) {
+ // remove database from recent databases list
+ var current = $.futon.storage.get("recent", "");
+ var recentDbs = current ? current.split(",") : [];
+ var recentIdx = $.inArray(name, recentDbs);
+ if (recentIdx >= 0) {
+ recentDbs.splice(recentIdx, 1);
+ $.futon.storage.set("recent", recentDbs.join(","));
+ this.updateDatabases();
+ }
+ }
+
+ this.updateDatabases = function() {
+ var selection = null;
+ $("#dbs .selected a").each(function() {
+ selection = [this.pathname, this.search];
+ });
+ $("#dbs").empty();
+ var recentDbs = $.futon.storage.get("recent").split(",");
+ recentDbs.sort();
+ $.each(recentDbs, function(idx, name) {
+ if (name) {
+ name = encodeURIComponent(name);
+ $("#dbs").append("<li>" +
+ "<button class='remove' title='Remove from list' value='" + name + "'></button>" +
+ "<a href='database.html?" + name + "' title='" + name + "'>" + name +
+ "</a></li>");
+ }
+ });
+ if (selection) {
+ this.updateSelection(selection[0], selection[1]);
+ }
+ $("#dbs button.remove").click(function() {
+ nav.removeDatabase(this.value);
+ return false;
+ });
+ }
+
+ this.updateSelection = function(path, queryString) {
+ function fixupPath(path) { // hack for IE/Win
+ return (path.charAt(0) != "/") ? ("/" + path) : path;
+ }
+ if (!path) {
+ path = location.pathname;
+ if (!queryString) {
+ queryString = location.search;
+ }
+ } else if (!queryString) {
+ queryString = "";
+ }
+ var href = fixupPath(path + queryString);
+ $("#nav li").removeClass("selected");
+ $("#nav li a").each(function() {
+ if (fixupPath(this.pathname) + this.search != href) return;
+ $(this).parent("li").addClass("selected").parents("li").addClass("selected");
+ });
+ }
+
+ this.toggle = function(speed) {
+ if (speed === undefined) {
+ speed = 500;
+ }
+ var sidebar = $("#sidebar").stop(true, true);
+ var hidden = !$(sidebar).is(".hidden");
+
+ $("#wrap").animate({
+ marginRight: hidden ? 0 : 210
+ }, speed, function() {
+ $(document.body).toggleClass("fullwidth", hidden);
+ });
+ sidebar.toggleClass("hidden").animate({
+ width: hidden ? 26 : 210,
+ height: hidden ? $("h1").outerHeight() - 1 : "100%",
+ right: hidden ? 0 : -210
+ }, speed).children(":not(#sidebar-toggle)").animate({
+ opacity: "toggle"
+ }, speed);
+ $("h1").animate({marginRight: hidden ? 26 : 0}, speed);
+
+ $("#sidebar-toggle")
+ .attr("title", hidden ? "Show Sidebar" : "Hide Sidebar");
+ $.futon.storage.set("sidebar", hidden ? "hidden" : "show");
+ };
+ }
+
+ function Storage() {
+ var storage = this;
+ this.decls = {};
+
+ this.declare = function(name, options) {
+ this.decls[name] = $.extend({}, {
+ scope: "window",
+ defaultValue: null,
+ prefix: ""
+ }, options || {});
+ }
+
+ this.declareWithPrefix = function(prefix, decls) {
+ for (var name in decls) {
+ var options = decls[name];
+ options.prefix = prefix;
+ storage.declare(name, options);
+ }
+ }
+
+ this.del = function(name) {
+ lookup(name, function(decl) {
+ handlers[decl.scope].del(decl.prefix + name);
+ });
+ }
+
+ this.get = function(name, defaultValue) {
+ return lookup(name, function(decl) {
+ var value = handlers[decl.scope].get(decl.prefix + name);
+ if (value !== undefined) {
+ return value;
+ }
+ if (defaultValue !== undefined) {
+ return defaultValue;
+ }
+ return decl.defaultValue;
+ });
+ }
+
+ this.set = function(name, value) {
+ lookup(name, function(decl) {
+ if (value == decl.defaultValue) {
+ handlers[decl.scope].del(decl.prefix + name);
+ } else {
+ handlers[decl.scope].set(decl.prefix + name, value);
+ }
+ });
+ }
+
+ function lookup(name, callback) {
+ var decl = storage.decls[name];
+ if (decl === undefined) {
+ return decl;
+ }
+ return callback(decl);
+ }
+
+ function windowName() {
+ try {
+ return JSON.parse(window.name || "{}");
+ } catch (e) {
+ return {};
+ }
+ }
+
+ // add suffix to cookie names to be able to separate between ports
+ var cookiePrefix = location.port + "_";
+
+ var handlers = {
+
+ "cookie": {
+ get: function(name) {
+ var nameEq = cookiePrefix + name + "=";
+ var parts = document.cookie.split(';');
+ for (var i = 0; i < parts.length; i++) {
+ var part = parts[i].replace(/^\s+/, "");
+ if (part.indexOf(nameEq) == 0) {
+ return unescape(part.substring(nameEq.length, part.length));
+ }
+ }
+ },
+ set: function(name, value) {
+ var date = new Date();
+ date.setTime(date.getTime() + 14*24*60*60*1000); // two weeks
+ document.cookie = cookiePrefix + name + "=" + escape(value) +
+ "; expires=" + date.toGMTString();
+ },
+ del: function(name) {
+ var date = new Date();
+ date.setTime(date.getTime() - 24*60*60*1000); // yesterday
+ document.cookie = cookiePrefix + name + "=" +
+ "; expires=" + date.toGMTString();
+ }
+ },
+
+ "window": {
+ get: function(name) {
+ return windowName()[name];
+ },
+ set: function(name, value) {
+ var obj = windowName();
+ obj[name] = value || null;
+ window.name = JSON.stringify(obj);
+ },
+ del: function(name) {
+ var obj = windowName();
+ delete obj[name];
+ window.name = JSON.stringify(obj);
+ }
+ }
+
+ };
+
+ }
+
+ $.couch.urlPrefix = "..";
+ $.futon = $.futon || {};
+ $.extend($.futon, {
+ navigation: new Navigation(),
+ session : new Session(),
+ storage: new Storage()
+ });
+
+ $.fn.addPlaceholder = function() {
+ if (this[0] && "placeholder" in document.createElement("input")) {
+ return; // found native placeholder support
+ }
+ return this.live('focusin', function() {
+ var input = $(this);
+ if (input.val() === input.attr("placeholder")) {
+ input.removeClass("placeholder").val("");
+ }
+ }).live("focusout", function() {
+ var input = $(this);
+ if (input.val() === "") {
+ input.val(input.attr("placeholder")).addClass("placeholder");
+ }
+ }).trigger("focusout");
+ }
+
+ $.fn.enableTabInsertion = function(chars) {
+ chars = chars || "\t";
+ var width = chars.length;
+ return this.keydown(function(evt) {
+ if (evt.keyCode == 9) {
+ var v = this.value;
+ var start = this.selectionStart;
+ var scrollTop = this.scrollTop;
+ if (start !== undefined) {
+ this.value = v.slice(0, start) + chars + v.slice(start);
+ this.selectionStart = this.selectionEnd = start + width;
+ } else {
+ document.selection.createRange().text = chars;
+ this.caretPos += width;
+ }
+ return false;
+ }
+ });
+ }
+
+ $(document)
+ .ajaxStart(function() { $(this.body).addClass("loading"); })
+ .ajaxStop(function() { $(this.body).removeClass("loading"); });
+
+ $.futon.storage.declare("sidebar", {scope: "cookie", defaultValue: "show"});
+ $.futon.storage.declare("recent", {scope: "cookie", defaultValue: ""});
+
+ $(function() {
+ document.title = "Apache CouchDB - Futon: " + document.title;
+ if ($.futon.storage.get("sidebar") == "hidden") {
+ // doing this as early as possible prevents flickering
+ $(document.body).addClass("fullwidth");
+ }
+ $("input[placeholder]").addPlaceholder();
+
+ $.get("_sidebar.html", function(resp) {
+ $("#wrap").append(resp)
+ .find("#sidebar-toggle").click(function(e) {
+ $.futon.navigation.toggle(e.shiftKey ? 2500 : 500);
+ return false;
+ });
+ if ($.futon.storage.get("sidebar") == "hidden") {
+ $.futon.navigation.toggle(0);
+ }
+
+ $.futon.navigation.updateDatabases();
+ $.futon.navigation.updateSelection();
+ $.futon.navigation.ready();
+ $.futon.session.setupSidebar();
+ $.futon.session.sidebar();
+
+ $.couch.info({
+ success: function(info, status) {
+ $("#version").text(info.version);
+ }
+ });
+ });
+ });
+
+})(jQuery);
diff --git a/1.1.x/share/www/script/jquery-ui-1.8.11.custom.min.js b/1.1.x/share/www/script/jquery-ui-1.8.11.custom.min.js
new file mode 100644
index 00000000..45b927e0
--- /dev/null
+++ b/1.1.x/share/www/script/jquery-ui-1.8.11.custom.min.js
@@ -0,0 +1,81 @@
+/*!
+ * jQuery UI 1.8.11
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI
+ */
+(function(c,j){function k(a){return!c(a).parents().andSelf().filter(function(){return c.curCSS(this,"visibility")==="hidden"||c.expr.filters.hidden(this)}).length}c.ui=c.ui||{};if(!c.ui.version){c.extend(c.ui,{version:"1.8.11",keyCode:{ALT:18,BACKSPACE:8,CAPS_LOCK:20,COMMA:188,COMMAND:91,COMMAND_LEFT:91,COMMAND_RIGHT:93,CONTROL:17,DELETE:46,DOWN:40,END:35,ENTER:13,ESCAPE:27,HOME:36,INSERT:45,LEFT:37,MENU:93,NUMPAD_ADD:107,NUMPAD_DECIMAL:110,NUMPAD_DIVIDE:111,NUMPAD_ENTER:108,NUMPAD_MULTIPLY:106,
+NUMPAD_SUBTRACT:109,PAGE_DOWN:34,PAGE_UP:33,PERIOD:190,RIGHT:39,SHIFT:16,SPACE:32,TAB:9,UP:38,WINDOWS:91}});c.fn.extend({_focus:c.fn.focus,focus:function(a,b){return typeof a==="number"?this.each(function(){var d=this;setTimeout(function(){c(d).focus();b&&b.call(d)},a)}):this._focus.apply(this,arguments)},scrollParent:function(){var a;a=c.browser.msie&&/(static|relative)/.test(this.css("position"))||/absolute/.test(this.css("position"))?this.parents().filter(function(){return/(relative|absolute|fixed)/.test(c.curCSS(this,
+"position",1))&&/(auto|scroll)/.test(c.curCSS(this,"overflow",1)+c.curCSS(this,"overflow-y",1)+c.curCSS(this,"overflow-x",1))}).eq(0):this.parents().filter(function(){return/(auto|scroll)/.test(c.curCSS(this,"overflow",1)+c.curCSS(this,"overflow-y",1)+c.curCSS(this,"overflow-x",1))}).eq(0);return/fixed/.test(this.css("position"))||!a.length?c(document):a},zIndex:function(a){if(a!==j)return this.css("zIndex",a);if(this.length){a=c(this[0]);for(var b;a.length&&a[0]!==document;){b=a.css("position");
+if(b==="absolute"||b==="relative"||b==="fixed"){b=parseInt(a.css("zIndex"),10);if(!isNaN(b)&&b!==0)return b}a=a.parent()}}return 0},disableSelection:function(){return this.bind((c.support.selectstart?"selectstart":"mousedown")+".ui-disableSelection",function(a){a.preventDefault()})},enableSelection:function(){return this.unbind(".ui-disableSelection")}});c.each(["Width","Height"],function(a,b){function d(f,g,l,m){c.each(e,function(){g-=parseFloat(c.curCSS(f,"padding"+this,true))||0;if(l)g-=parseFloat(c.curCSS(f,
+"border"+this+"Width",true))||0;if(m)g-=parseFloat(c.curCSS(f,"margin"+this,true))||0});return g}var e=b==="Width"?["Left","Right"]:["Top","Bottom"],h=b.toLowerCase(),i={innerWidth:c.fn.innerWidth,innerHeight:c.fn.innerHeight,outerWidth:c.fn.outerWidth,outerHeight:c.fn.outerHeight};c.fn["inner"+b]=function(f){if(f===j)return i["inner"+b].call(this);return this.each(function(){c(this).css(h,d(this,f)+"px")})};c.fn["outer"+b]=function(f,g){if(typeof f!=="number")return i["outer"+b].call(this,f);return this.each(function(){c(this).css(h,
+d(this,f,true,g)+"px")})}});c.extend(c.expr[":"],{data:function(a,b,d){return!!c.data(a,d[3])},focusable:function(a){var b=a.nodeName.toLowerCase(),d=c.attr(a,"tabindex");if("area"===b){b=a.parentNode;d=b.name;if(!a.href||!d||b.nodeName.toLowerCase()!=="map")return false;a=c("img[usemap=#"+d+"]")[0];return!!a&&k(a)}return(/input|select|textarea|button|object/.test(b)?!a.disabled:"a"==b?a.href||!isNaN(d):!isNaN(d))&&k(a)},tabbable:function(a){var b=c.attr(a,"tabindex");return(isNaN(b)||b>=0)&&c(a).is(":focusable")}});
+c(function(){var a=document.body,b=a.appendChild(b=document.createElement("div"));c.extend(b.style,{minHeight:"100px",height:"auto",padding:0,borderWidth:0});c.support.minHeight=b.offsetHeight===100;c.support.selectstart="onselectstart"in b;a.removeChild(b).style.display="none"});c.extend(c.ui,{plugin:{add:function(a,b,d){a=c.ui[a].prototype;for(var e in d){a.plugins[e]=a.plugins[e]||[];a.plugins[e].push([b,d[e]])}},call:function(a,b,d){if((b=a.plugins[b])&&a.element[0].parentNode)for(var e=0;e<b.length;e++)a.options[b[e][0]]&&
+b[e][1].apply(a.element,d)}},contains:function(a,b){return document.compareDocumentPosition?a.compareDocumentPosition(b)&16:a!==b&&a.contains(b)},hasScroll:function(a,b){if(c(a).css("overflow")==="hidden")return false;b=b&&b==="left"?"scrollLeft":"scrollTop";var d=false;if(a[b]>0)return true;a[b]=1;d=a[b]>0;a[b]=0;return d},isOverAxis:function(a,b,d){return a>b&&a<b+d},isOver:function(a,b,d,e,h,i){return c.ui.isOverAxis(a,d,h)&&c.ui.isOverAxis(b,e,i)}})}})(jQuery);
+;/*!
+ * jQuery UI Widget 1.8.11
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Widget
+ */
+(function(b,j){if(b.cleanData){var k=b.cleanData;b.cleanData=function(a){for(var c=0,d;(d=a[c])!=null;c++)b(d).triggerHandler("remove");k(a)}}else{var l=b.fn.remove;b.fn.remove=function(a,c){return this.each(function(){if(!c)if(!a||b.filter(a,[this]).length)b("*",this).add([this]).each(function(){b(this).triggerHandler("remove")});return l.call(b(this),a,c)})}}b.widget=function(a,c,d){var e=a.split(".")[0],f;a=a.split(".")[1];f=e+"-"+a;if(!d){d=c;c=b.Widget}b.expr[":"][f]=function(h){return!!b.data(h,
+a)};b[e]=b[e]||{};b[e][a]=function(h,g){arguments.length&&this._createWidget(h,g)};c=new c;c.options=b.extend(true,{},c.options);b[e][a].prototype=b.extend(true,c,{namespace:e,widgetName:a,widgetEventPrefix:b[e][a].prototype.widgetEventPrefix||a,widgetBaseClass:f},d);b.widget.bridge(a,b[e][a])};b.widget.bridge=function(a,c){b.fn[a]=function(d){var e=typeof d==="string",f=Array.prototype.slice.call(arguments,1),h=this;d=!e&&f.length?b.extend.apply(null,[true,d].concat(f)):d;if(e&&d.charAt(0)==="_")return h;
+e?this.each(function(){var g=b.data(this,a),i=g&&b.isFunction(g[d])?g[d].apply(g,f):g;if(i!==g&&i!==j){h=i;return false}}):this.each(function(){var g=b.data(this,a);g?g.option(d||{})._init():b.data(this,a,new c(d,this))});return h}};b.Widget=function(a,c){arguments.length&&this._createWidget(a,c)};b.Widget.prototype={widgetName:"widget",widgetEventPrefix:"",options:{disabled:false},_createWidget:function(a,c){b.data(c,this.widgetName,this);this.element=b(c);this.options=b.extend(true,{},this.options,
+this._getCreateOptions(),a);var d=this;this.element.bind("remove."+this.widgetName,function(){d.destroy()});this._create();this._trigger("create");this._init()},_getCreateOptions:function(){return b.metadata&&b.metadata.get(this.element[0])[this.widgetName]},_create:function(){},_init:function(){},destroy:function(){this.element.unbind("."+this.widgetName).removeData(this.widgetName);this.widget().unbind("."+this.widgetName).removeAttr("aria-disabled").removeClass(this.widgetBaseClass+"-disabled ui-state-disabled")},
+widget:function(){return this.element},option:function(a,c){var d=a;if(arguments.length===0)return b.extend({},this.options);if(typeof a==="string"){if(c===j)return this.options[a];d={};d[a]=c}this._setOptions(d);return this},_setOptions:function(a){var c=this;b.each(a,function(d,e){c._setOption(d,e)});return this},_setOption:function(a,c){this.options[a]=c;if(a==="disabled")this.widget()[c?"addClass":"removeClass"](this.widgetBaseClass+"-disabled ui-state-disabled").attr("aria-disabled",c);return this},
+enable:function(){return this._setOption("disabled",false)},disable:function(){return this._setOption("disabled",true)},_trigger:function(a,c,d){var e=this.options[a];c=b.Event(c);c.type=(a===this.widgetEventPrefix?a:this.widgetEventPrefix+a).toLowerCase();d=d||{};if(c.originalEvent){a=b.event.props.length;for(var f;a;){f=b.event.props[--a];c[f]=c.originalEvent[f]}}this.element.trigger(c,d);return!(b.isFunction(e)&&e.call(this.element[0],c,d)===false||c.isDefaultPrevented())}}})(jQuery);
+;/*
+ * jQuery UI Position 1.8.11
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Position
+ */
+(function(c){c.ui=c.ui||{};var n=/left|center|right/,o=/top|center|bottom/,t=c.fn.position,u=c.fn.offset;c.fn.position=function(b){if(!b||!b.of)return t.apply(this,arguments);b=c.extend({},b);var a=c(b.of),d=a[0],g=(b.collision||"flip").split(" "),e=b.offset?b.offset.split(" "):[0,0],h,k,j;if(d.nodeType===9){h=a.width();k=a.height();j={top:0,left:0}}else if(d.setTimeout){h=a.width();k=a.height();j={top:a.scrollTop(),left:a.scrollLeft()}}else if(d.preventDefault){b.at="left top";h=k=0;j={top:b.of.pageY,
+left:b.of.pageX}}else{h=a.outerWidth();k=a.outerHeight();j=a.offset()}c.each(["my","at"],function(){var f=(b[this]||"").split(" ");if(f.length===1)f=n.test(f[0])?f.concat(["center"]):o.test(f[0])?["center"].concat(f):["center","center"];f[0]=n.test(f[0])?f[0]:"center";f[1]=o.test(f[1])?f[1]:"center";b[this]=f});if(g.length===1)g[1]=g[0];e[0]=parseInt(e[0],10)||0;if(e.length===1)e[1]=e[0];e[1]=parseInt(e[1],10)||0;if(b.at[0]==="right")j.left+=h;else if(b.at[0]==="center")j.left+=h/2;if(b.at[1]==="bottom")j.top+=
+k;else if(b.at[1]==="center")j.top+=k/2;j.left+=e[0];j.top+=e[1];return this.each(function(){var f=c(this),l=f.outerWidth(),m=f.outerHeight(),p=parseInt(c.curCSS(this,"marginLeft",true))||0,q=parseInt(c.curCSS(this,"marginTop",true))||0,v=l+p+(parseInt(c.curCSS(this,"marginRight",true))||0),w=m+q+(parseInt(c.curCSS(this,"marginBottom",true))||0),i=c.extend({},j),r;if(b.my[0]==="right")i.left-=l;else if(b.my[0]==="center")i.left-=l/2;if(b.my[1]==="bottom")i.top-=m;else if(b.my[1]==="center")i.top-=
+m/2;i.left=Math.round(i.left);i.top=Math.round(i.top);r={left:i.left-p,top:i.top-q};c.each(["left","top"],function(s,x){c.ui.position[g[s]]&&c.ui.position[g[s]][x](i,{targetWidth:h,targetHeight:k,elemWidth:l,elemHeight:m,collisionPosition:r,collisionWidth:v,collisionHeight:w,offset:e,my:b.my,at:b.at})});c.fn.bgiframe&&f.bgiframe();f.offset(c.extend(i,{using:b.using}))})};c.ui.position={fit:{left:function(b,a){var d=c(window);d=a.collisionPosition.left+a.collisionWidth-d.width()-d.scrollLeft();b.left=
+d>0?b.left-d:Math.max(b.left-a.collisionPosition.left,b.left)},top:function(b,a){var d=c(window);d=a.collisionPosition.top+a.collisionHeight-d.height()-d.scrollTop();b.top=d>0?b.top-d:Math.max(b.top-a.collisionPosition.top,b.top)}},flip:{left:function(b,a){if(a.at[0]!=="center"){var d=c(window);d=a.collisionPosition.left+a.collisionWidth-d.width()-d.scrollLeft();var g=a.my[0]==="left"?-a.elemWidth:a.my[0]==="right"?a.elemWidth:0,e=a.at[0]==="left"?a.targetWidth:-a.targetWidth,h=-2*a.offset[0];b.left+=
+a.collisionPosition.left<0?g+e+h:d>0?g+e+h:0}},top:function(b,a){if(a.at[1]!=="center"){var d=c(window);d=a.collisionPosition.top+a.collisionHeight-d.height()-d.scrollTop();var g=a.my[1]==="top"?-a.elemHeight:a.my[1]==="bottom"?a.elemHeight:0,e=a.at[1]==="top"?a.targetHeight:-a.targetHeight,h=-2*a.offset[1];b.top+=a.collisionPosition.top<0?g+e+h:d>0?g+e+h:0}}}};if(!c.offset.setOffset){c.offset.setOffset=function(b,a){if(/static/.test(c.curCSS(b,"position")))b.style.position="relative";var d=c(b),
+g=d.offset(),e=parseInt(c.curCSS(b,"top",true),10)||0,h=parseInt(c.curCSS(b,"left",true),10)||0;g={top:a.top-g.top+e,left:a.left-g.left+h};"using"in a?a.using.call(b,g):d.css(g)};c.fn.offset=function(b){var a=this[0];if(!a||!a.ownerDocument)return null;if(b)return this.each(function(){c.offset.setOffset(this,b)});return u.call(this)}}})(jQuery);
+;/*
+ * jQuery UI Autocomplete 1.8.11
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Autocomplete
+ *
+ * Depends:
+ * jquery.ui.core.js
+ * jquery.ui.widget.js
+ * jquery.ui.position.js
+ */
+(function(d){var e=0;d.widget("ui.autocomplete",{options:{appendTo:"body",autoFocus:false,delay:300,minLength:1,position:{my:"left top",at:"left bottom",collision:"none"},source:null},pending:0,_create:function(){var a=this,b=this.element[0].ownerDocument,g;this.element.addClass("ui-autocomplete-input").attr("autocomplete","off").attr({role:"textbox","aria-autocomplete":"list","aria-haspopup":"true"}).bind("keydown.autocomplete",function(c){if(!(a.options.disabled||a.element.attr("readonly"))){g=
+false;var f=d.ui.keyCode;switch(c.keyCode){case f.PAGE_UP:a._move("previousPage",c);break;case f.PAGE_DOWN:a._move("nextPage",c);break;case f.UP:a._move("previous",c);c.preventDefault();break;case f.DOWN:a._move("next",c);c.preventDefault();break;case f.ENTER:case f.NUMPAD_ENTER:if(a.menu.active){g=true;c.preventDefault()}case f.TAB:if(!a.menu.active)return;a.menu.select(c);break;case f.ESCAPE:a.element.val(a.term);a.close(c);break;default:clearTimeout(a.searching);a.searching=setTimeout(function(){if(a.term!=
+a.element.val()){a.selectedItem=null;a.search(null,c)}},a.options.delay);break}}}).bind("keypress.autocomplete",function(c){if(g){g=false;c.preventDefault()}}).bind("focus.autocomplete",function(){if(!a.options.disabled){a.selectedItem=null;a.previous=a.element.val()}}).bind("blur.autocomplete",function(c){if(!a.options.disabled){clearTimeout(a.searching);a.closing=setTimeout(function(){a.close(c);a._change(c)},150)}});this._initSource();this.response=function(){return a._response.apply(a,arguments)};
+this.menu=d("<ul></ul>").addClass("ui-autocomplete").appendTo(d(this.options.appendTo||"body",b)[0]).mousedown(function(c){var f=a.menu.element[0];d(c.target).closest(".ui-menu-item").length||setTimeout(function(){d(document).one("mousedown",function(h){h.target!==a.element[0]&&h.target!==f&&!d.ui.contains(f,h.target)&&a.close()})},1);setTimeout(function(){clearTimeout(a.closing)},13)}).menu({focus:function(c,f){f=f.item.data("item.autocomplete");false!==a._trigger("focus",c,{item:f})&&/^key/.test(c.originalEvent.type)&&
+a.element.val(f.value)},selected:function(c,f){var h=f.item.data("item.autocomplete"),i=a.previous;if(a.element[0]!==b.activeElement){a.element.focus();a.previous=i;setTimeout(function(){a.previous=i;a.selectedItem=h},1)}false!==a._trigger("select",c,{item:h})&&a.element.val(h.value);a.term=a.element.val();a.close(c);a.selectedItem=h},blur:function(){a.menu.element.is(":visible")&&a.element.val()!==a.term&&a.element.val(a.term)}}).zIndex(this.element.zIndex()+1).css({top:0,left:0}).hide().data("menu");
+d.fn.bgiframe&&this.menu.element.bgiframe()},destroy:function(){this.element.removeClass("ui-autocomplete-input").removeAttr("autocomplete").removeAttr("role").removeAttr("aria-autocomplete").removeAttr("aria-haspopup");this.menu.element.remove();d.Widget.prototype.destroy.call(this)},_setOption:function(a,b){d.Widget.prototype._setOption.apply(this,arguments);a==="source"&&this._initSource();if(a==="appendTo")this.menu.element.appendTo(d(b||"body",this.element[0].ownerDocument)[0]);a==="disabled"&&
+b&&this.xhr&&this.xhr.abort()},_initSource:function(){var a=this,b,g;if(d.isArray(this.options.source)){b=this.options.source;this.source=function(c,f){f(d.ui.autocomplete.filter(b,c.term))}}else if(typeof this.options.source==="string"){g=this.options.source;this.source=function(c,f){a.xhr&&a.xhr.abort();a.xhr=d.ajax({url:g,data:c,dataType:"json",autocompleteRequest:++e,success:function(h){this.autocompleteRequest===e&&f(h)},error:function(){this.autocompleteRequest===e&&f([])}})}}else this.source=
+this.options.source},search:function(a,b){a=a!=null?a:this.element.val();this.term=this.element.val();if(a.length<this.options.minLength)return this.close(b);clearTimeout(this.closing);if(this._trigger("search",b)!==false)return this._search(a)},_search:function(a){this.pending++;this.element.addClass("ui-autocomplete-loading");this.source({term:a},this.response)},_response:function(a){if(!this.options.disabled&&a&&a.length){a=this._normalize(a);this._suggest(a);this._trigger("open")}else this.close();
+this.pending--;this.pending||this.element.removeClass("ui-autocomplete-loading")},close:function(a){clearTimeout(this.closing);if(this.menu.element.is(":visible")){this.menu.element.hide();this.menu.deactivate();this._trigger("close",a)}},_change:function(a){this.previous!==this.element.val()&&this._trigger("change",a,{item:this.selectedItem})},_normalize:function(a){if(a.length&&a[0].label&&a[0].value)return a;return d.map(a,function(b){if(typeof b==="string")return{label:b,value:b};return d.extend({label:b.label||
+b.value,value:b.value||b.label},b)})},_suggest:function(a){var b=this.menu.element.empty().zIndex(this.element.zIndex()+1);this._renderMenu(b,a);this.menu.deactivate();this.menu.refresh();b.show();this._resizeMenu();b.position(d.extend({of:this.element},this.options.position));this.options.autoFocus&&this.menu.next(new d.Event("mouseover"))},_resizeMenu:function(){var a=this.menu.element;a.outerWidth(Math.max(a.width("").outerWidth(),this.element.outerWidth()))},_renderMenu:function(a,b){var g=this;
+d.each(b,function(c,f){g._renderItem(a,f)})},_renderItem:function(a,b){return d("<li></li>").data("item.autocomplete",b).append(d("<a></a>").text(b.label)).appendTo(a)},_move:function(a,b){if(this.menu.element.is(":visible"))if(this.menu.first()&&/^previous/.test(a)||this.menu.last()&&/^next/.test(a)){this.element.val(this.term);this.menu.deactivate()}else this.menu[a](b);else this.search(null,b)},widget:function(){return this.menu.element}});d.extend(d.ui.autocomplete,{escapeRegex:function(a){return a.replace(/[-[\]{}()*+?.,\\^$|#\s]/g,
+"\\$&")},filter:function(a,b){var g=new RegExp(d.ui.autocomplete.escapeRegex(b),"i");return d.grep(a,function(c){return g.test(c.label||c.value||c)})}})})(jQuery);
+(function(d){d.widget("ui.menu",{_create:function(){var e=this;this.element.addClass("ui-menu ui-widget ui-widget-content ui-corner-all").attr({role:"listbox","aria-activedescendant":"ui-active-menuitem"}).click(function(a){if(d(a.target).closest(".ui-menu-item a").length){a.preventDefault();e.select(a)}});this.refresh()},refresh:function(){var e=this;this.element.children("li:not(.ui-menu-item):has(a)").addClass("ui-menu-item").attr("role","menuitem").children("a").addClass("ui-corner-all").attr("tabindex",
+-1).mouseenter(function(a){e.activate(a,d(this).parent())}).mouseleave(function(){e.deactivate()})},activate:function(e,a){this.deactivate();if(this.hasScroll()){var b=a.offset().top-this.element.offset().top,g=this.element.attr("scrollTop"),c=this.element.height();if(b<0)this.element.attr("scrollTop",g+b);else b>=c&&this.element.attr("scrollTop",g+b-c+a.height())}this.active=a.eq(0).children("a").addClass("ui-state-hover").attr("id","ui-active-menuitem").end();this._trigger("focus",e,{item:a})},
+deactivate:function(){if(this.active){this.active.children("a").removeClass("ui-state-hover").removeAttr("id");this._trigger("blur");this.active=null}},next:function(e){this.move("next",".ui-menu-item:first",e)},previous:function(e){this.move("prev",".ui-menu-item:last",e)},first:function(){return this.active&&!this.active.prevAll(".ui-menu-item").length},last:function(){return this.active&&!this.active.nextAll(".ui-menu-item").length},move:function(e,a,b){if(this.active){e=this.active[e+"All"](".ui-menu-item").eq(0);
+e.length?this.activate(b,e):this.activate(b,this.element.children(a))}else this.activate(b,this.element.children(a))},nextPage:function(e){if(this.hasScroll())if(!this.active||this.last())this.activate(e,this.element.children(".ui-menu-item:first"));else{var a=this.active.offset().top,b=this.element.height(),g=this.element.children(".ui-menu-item").filter(function(){var c=d(this).offset().top-a-b+d(this).height();return c<10&&c>-10});g.length||(g=this.element.children(".ui-menu-item:last"));this.activate(e,
+g)}else this.activate(e,this.element.children(".ui-menu-item").filter(!this.active||this.last()?":first":":last"))},previousPage:function(e){if(this.hasScroll())if(!this.active||this.first())this.activate(e,this.element.children(".ui-menu-item:last"));else{var a=this.active.offset().top,b=this.element.height();result=this.element.children(".ui-menu-item").filter(function(){var g=d(this).offset().top-a+b-d(this).height();return g<10&&g>-10});result.length||(result=this.element.children(".ui-menu-item:first"));
+this.activate(e,result)}else this.activate(e,this.element.children(".ui-menu-item").filter(!this.active||this.first()?":last":":first"))},hasScroll:function(){return this.element.height()<this.element.attr("scrollHeight")},select:function(e){this._trigger("selected",e,{item:this.active})}})})(jQuery);
+; \ No newline at end of file
diff --git a/1.1.x/share/www/script/jquery.couch.js b/1.1.x/share/www/script/jquery.couch.js
new file mode 100644
index 00000000..edae18fc
--- /dev/null
+++ b/1.1.x/share/www/script/jquery.couch.js
@@ -0,0 +1,699 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+(function($) {
+ $.couch = $.couch || {};
+
+ function encodeDocId(docID) {
+ var parts = docID.split("/");
+ if (parts[0] == "_design") {
+ parts.shift();
+ return "_design/" + encodeURIComponent(parts.join('/'));
+ }
+ return encodeURIComponent(docID);
+ };
+
+ var uuidCache = [];
+
+ $.extend($.couch, {
+ urlPrefix: '',
+ activeTasks: function(options) {
+ ajax(
+ {url: this.urlPrefix + "/_active_tasks"},
+ options,
+ "Active task status could not be retrieved"
+ );
+ },
+
+ allDbs: function(options) {
+ ajax(
+ {url: this.urlPrefix + "/_all_dbs"},
+ options,
+ "An error occurred retrieving the list of all databases"
+ );
+ },
+
+ config: function(options, section, option, value) {
+ var req = {url: this.urlPrefix + "/_config/"};
+ if (section) {
+ req.url += encodeURIComponent(section) + "/";
+ if (option) {
+ req.url += encodeURIComponent(option);
+ }
+ }
+ if (value === null) {
+ req.type = "DELETE";
+ } else if (value !== undefined) {
+ req.type = "PUT";
+ req.data = toJSON(value);
+ req.contentType = "application/json";
+ req.processData = false
+ }
+
+ ajax(req, options,
+ "An error occurred retrieving/updating the server configuration"
+ );
+ },
+
+ session: function(options) {
+ options = options || {};
+ $.ajax({
+ type: "GET", url: this.urlPrefix + "/_session",
+ beforeSend: function(xhr) {
+ xhr.setRequestHeader('Accept', 'application/json');
+ },
+ complete: function(req) {
+ var resp = httpData(req, "json");
+ if (req.status == 200) {
+ if (options.success) options.success(resp);
+ } else if (options.error) {
+ options.error(req.status, resp.error, resp.reason);
+ } else {
+ alert("An error occurred getting session info: " + resp.reason);
+ }
+ }
+ });
+ },
+
+ userDb : function(callback) {
+ $.couch.session({
+ success : function(resp) {
+ var userDb = $.couch.db(resp.info.authentication_db);
+ callback(userDb);
+ }
+ });
+ },
+
+ signup: function(user_doc, password, options) {
+ options = options || {};
+ // prepare user doc based on name and password
+ user_doc = this.prepareUserDoc(user_doc, password);
+ $.couch.userDb(function(db) {
+ db.saveDoc(user_doc, options);
+ });
+ },
+
+ prepareUserDoc: function(user_doc, new_password) {
+ if (typeof hex_sha1 == "undefined") {
+ alert("creating a user doc requires sha1.js to be loaded in the page");
+ return;
+ }
+ var user_prefix = "org.couchdb.user:";
+ user_doc._id = user_doc._id || user_prefix + user_doc.name;
+ if (new_password) {
+ // handle the password crypto
+ user_doc.salt = $.couch.newUUID();
+ user_doc.password_sha = hex_sha1(new_password + user_doc.salt);
+ }
+ user_doc.type = "user";
+ if (!user_doc.roles) {
+ user_doc.roles = [];
+ }
+ return user_doc;
+ },
+
+ login: function(options) {
+ options = options || {};
+ $.ajax({
+ type: "POST", url: this.urlPrefix + "/_session", dataType: "json",
+ data: {name: options.name, password: options.password},
+ beforeSend: function(xhr) {
+ xhr.setRequestHeader('Accept', 'application/json');
+ },
+ complete: function(req) {
+ var resp = httpData(req, "json");
+ if (req.status == 200) {
+ if (options.success) options.success(resp);
+ } else if (options.error) {
+ options.error(req.status, resp.error, resp.reason);
+ } else {
+ alert("An error occurred logging in: " + resp.reason);
+ }
+ }
+ });
+ },
+ logout: function(options) {
+ options = options || {};
+ $.ajax({
+ type: "DELETE", url: this.urlPrefix + "/_session", dataType: "json",
+ username : "_", password : "_",
+ beforeSend: function(xhr) {
+ xhr.setRequestHeader('Accept', 'application/json');
+ },
+ complete: function(req) {
+ var resp = httpData(req, "json");
+ if (req.status == 200) {
+ if (options.success) options.success(resp);
+ } else if (options.error) {
+ options.error(req.status, resp.error, resp.reason);
+ } else {
+ alert("An error occurred logging out: " + resp.reason);
+ }
+ }
+ });
+ },
+
+ db: function(name, db_opts) {
+ db_opts = db_opts || {};
+ var rawDocs = {};
+ function maybeApplyVersion(doc) {
+ if (doc._id && doc._rev && rawDocs[doc._id] && rawDocs[doc._id].rev == doc._rev) {
+ // todo: can we use commonjs require here?
+ if (typeof Base64 == "undefined") {
+ alert("please include /_utils/script/base64.js in the page for base64 support");
+ return false;
+ } else {
+ doc._attachments = doc._attachments || {};
+ doc._attachments["rev-"+doc._rev.split("-")[0]] = {
+ content_type :"application/json",
+ data : Base64.encode(rawDocs[doc._id].raw)
+ };
+ return true;
+ }
+ }
+ };
+ return {
+ name: name,
+ uri: this.urlPrefix + "/" + encodeURIComponent(name) + "/",
+
+ compact: function(options) {
+ $.extend(options, {successStatus: 202});
+ ajax({
+ type: "POST", url: this.uri + "_compact",
+ data: "", processData: false
+ },
+ options,
+ "The database could not be compacted"
+ );
+ },
+ viewCleanup: function(options) {
+ $.extend(options, {successStatus: 202});
+ ajax({
+ type: "POST", url: this.uri + "_view_cleanup",
+ data: "", processData: false
+ },
+ options,
+ "The views could not be cleaned up"
+ );
+ },
+ compactView: function(groupname, options) {
+ $.extend(options, {successStatus: 202});
+ ajax({
+ type: "POST", url: this.uri + "_compact/" + groupname,
+ data: "", processData: false
+ },
+ options,
+ "The view could not be compacted"
+ );
+ },
+ create: function(options) {
+ $.extend(options, {successStatus: 201});
+ ajax({
+ type: "PUT", url: this.uri, contentType: "application/json",
+ data: "", processData: false
+ },
+ options,
+ "The database could not be created"
+ );
+ },
+ drop: function(options) {
+ ajax(
+ {type: "DELETE", url: this.uri},
+ options,
+ "The database could not be deleted"
+ );
+ },
+ info: function(options) {
+ ajax(
+ {url: this.uri},
+ options,
+ "Database information could not be retrieved"
+ );
+ },
+ changes: function(since, options) {
+ options = options || {};
+ // set up the promise object within a closure for this handler
+ var timeout = 100, db = this, active = true,
+ listeners = [],
+ promise = {
+ onChange : function(fun) {
+ listeners.push(fun);
+ },
+ stop : function() {
+ active = false;
+ }
+ };
+ // call each listener when there is a change
+ function triggerListeners(resp) {
+ $.each(listeners, function() {
+ this(resp);
+ });
+ };
+ // when there is a change, call any listeners, then check for another change
+ options.success = function(resp) {
+ timeout = 100;
+ if (active) {
+ since = resp.last_seq;
+ triggerListeners(resp);
+ getChangesSince();
+ };
+ };
+ options.error = function() {
+ if (active) {
+ setTimeout(getChangesSince, timeout);
+ timeout = timeout * 2;
+ }
+ };
+ // actually make the changes request
+ function getChangesSince() {
+ var opts = $.extend({heartbeat : 10 * 1000}, options, {
+ feed : "longpoll",
+ since : since
+ });
+ ajax(
+ {url: db.uri + "_changes"+encodeOptions(opts)},
+ options,
+ "Error connecting to "+db.uri+"/_changes."
+ );
+ }
+ // start the first request
+ if (since) {
+ getChangesSince();
+ } else {
+ db.info({
+ success : function(info) {
+ since = info.update_seq;
+ getChangesSince();
+ }
+ });
+ }
+ return promise;
+ },
+ allDocs: function(options) {
+ var type = "GET";
+ var data = null;
+ if (options["keys"]) {
+ type = "POST";
+ var keys = options["keys"];
+ delete options["keys"];
+ data = toJSON({ "keys": keys });
+ }
+ ajax({
+ type: type,
+ data: data,
+ url: this.uri + "_all_docs" + encodeOptions(options)
+ },
+ options,
+ "An error occurred retrieving a list of all documents"
+ );
+ },
+ allDesignDocs: function(options) {
+ this.allDocs($.extend({startkey:"_design", endkey:"_design0"}, options));
+ },
+ allApps: function(options) {
+ options = options || {};
+ var self = this;
+ if (options.eachApp) {
+ this.allDesignDocs({
+ success: function(resp) {
+ $.each(resp.rows, function() {
+ self.openDoc(this.id, {
+ success: function(ddoc) {
+ var index, appPath, appName = ddoc._id.split('/');
+ appName.shift();
+ appName = appName.join('/');
+ index = ddoc.couchapp && ddoc.couchapp.index;
+ if (index) {
+ appPath = ['', name, ddoc._id, index].join('/');
+ } else if (ddoc._attachments && ddoc._attachments["index.html"]) {
+ appPath = ['', name, ddoc._id, "index.html"].join('/');
+ }
+ if (appPath) options.eachApp(appName, appPath, ddoc);
+ }
+ });
+ });
+ }
+ });
+ } else {
+ alert("Please provide an eachApp function for allApps()");
+ }
+ },
+ openDoc: function(docId, options, ajaxOptions) {
+ options = options || {};
+ if (db_opts.attachPrevRev || options.attachPrevRev) {
+ $.extend(options, {
+ beforeSuccess : function(req, doc) {
+ rawDocs[doc._id] = {
+ rev : doc._rev,
+ raw : req.responseText
+ };
+ }
+ });
+ } else {
+ $.extend(options, {
+ beforeSuccess : function(req, doc) {
+ if (doc["jquery.couch.attachPrevRev"]) {
+ rawDocs[doc._id] = {
+ rev : doc._rev,
+ raw : req.responseText
+ };
+ }
+ }
+ });
+ }
+ ajax({url: this.uri + encodeDocId(docId) + encodeOptions(options)},
+ options,
+ "The document could not be retrieved",
+ ajaxOptions
+ );
+ },
+ saveDoc: function(doc, options) {
+ options = options || {};
+ var db = this;
+ var beforeSend = fullCommit(options);
+ if (doc._id === undefined) {
+ var method = "POST";
+ var uri = this.uri;
+ } else {
+ var method = "PUT";
+ var uri = this.uri + encodeDocId(doc._id);
+ }
+ var versioned = maybeApplyVersion(doc);
+ $.ajax({
+ type: method, url: uri + encodeOptions(options),
+ contentType: "application/json",
+ dataType: "json", data: toJSON(doc),
+ beforeSend : beforeSend,
+ complete: function(req) {
+ var resp = httpData(req, "json");
+ if (req.status == 200 || req.status == 201 || req.status == 202) {
+ doc._id = resp.id;
+ doc._rev = resp.rev;
+ if (versioned) {
+ db.openDoc(doc._id, {
+ attachPrevRev : true,
+ success : function(d) {
+ doc._attachments = d._attachments;
+ if (options.success) options.success(resp);
+ }
+ });
+ } else {
+ if (options.success) options.success(resp);
+ }
+ } else if (options.error) {
+ options.error(req.status, resp.error, resp.reason);
+ } else {
+ alert("The document could not be saved: " + resp.reason);
+ }
+ }
+ });
+ },
+ bulkSave: function(docs, options) {
+ var beforeSend = fullCommit(options);
+ $.extend(options, {successStatus: 201, beforeSend : beforeSend});
+ ajax({
+ type: "POST",
+ url: this.uri + "_bulk_docs" + encodeOptions(options),
+ contentType: "application/json", data: toJSON(docs)
+ },
+ options,
+ "The documents could not be saved"
+ );
+ },
+ removeDoc: function(doc, options) {
+ ajax({
+ type: "DELETE",
+ url: this.uri +
+ encodeDocId(doc._id) +
+ encodeOptions({rev: doc._rev})
+ },
+ options,
+ "The document could not be deleted"
+ );
+ },
+ bulkRemove: function(docs, options){
+ docs.docs = $.each(
+ docs.docs, function(i, doc){
+ doc._deleted = true;
+ }
+ );
+ $.extend(options, {successStatus: 201});
+ ajax({
+ type: "POST",
+ url: this.uri + "_bulk_docs" + encodeOptions(options),
+ data: toJSON(docs)
+ },
+ options,
+ "The documents could not be deleted"
+ );
+ },
+ copyDoc: function(docId, options, ajaxOptions) {
+ ajaxOptions = $.extend(ajaxOptions, {
+ complete: function(req) {
+ var resp = httpData(req, "json");
+ if (req.status == 201) {
+ if (options.success) options.success(resp);
+ } else if (options.error) {
+ options.error(req.status, resp.error, resp.reason);
+ } else {
+ alert("The document could not be copied: " + resp.reason);
+ }
+ }
+ });
+ ajax({
+ type: "COPY",
+ url: this.uri + encodeDocId(docId)
+ },
+ options,
+ "The document could not be copied",
+ ajaxOptions
+ );
+ },
+ query: function(mapFun, reduceFun, language, options) {
+ language = language || "javascript";
+ if (typeof(mapFun) !== "string") {
+ mapFun = mapFun.toSource ? mapFun.toSource() : "(" + mapFun.toString() + ")";
+ }
+ var body = {language: language, map: mapFun};
+ if (reduceFun != null) {
+ if (typeof(reduceFun) !== "string")
+ reduceFun = reduceFun.toSource ? reduceFun.toSource() : "(" + reduceFun.toString() + ")";
+ body.reduce = reduceFun;
+ }
+ ajax({
+ type: "POST",
+ url: this.uri + "_temp_view" + encodeOptions(options),
+ contentType: "application/json", data: toJSON(body)
+ },
+ options,
+ "An error occurred querying the database"
+ );
+ },
+ list: function(list, view, options) {
+ var list = list.split('/');
+ var options = options || {};
+ var type = 'GET';
+ var data = null;
+ if (options['keys']) {
+ type = 'POST';
+ var keys = options['keys'];
+ delete options['keys'];
+ data = toJSON({'keys': keys });
+ }
+ ajax({
+ type: type,
+ data: data,
+ url: this.uri + '_design/' + list[0] +
+ '/_list/' + list[1] + '/' + view + encodeOptions(options)
+ },
+ options, 'An error occured accessing the list'
+ );
+ },
+ view: function(name, options) {
+ var name = name.split('/');
+ var options = options || {};
+ var type = "GET";
+ var data= null;
+ if (options["keys"]) {
+ type = "POST";
+ var keys = options["keys"];
+ delete options["keys"];
+ data = toJSON({ "keys": keys });
+ }
+ ajax({
+ type: type,
+ data: data,
+ url: this.uri + "_design/" + name[0] +
+ "/_view/" + name[1] + encodeOptions(options)
+ },
+ options, "An error occurred accessing the view"
+ );
+ },
+ getDbProperty: function(propName, options, ajaxOptions) {
+ ajax({url: this.uri + propName + encodeOptions(options)},
+ options,
+ "The property could not be retrieved",
+ ajaxOptions
+ );
+ },
+
+ setDbProperty: function(propName, propValue, options, ajaxOptions) {
+ ajax({
+ type: "PUT",
+ url: this.uri + propName + encodeOptions(options),
+ data : JSON.stringify(propValue)
+ },
+ options,
+ "The property could not be updated",
+ ajaxOptions
+ );
+ }
+ };
+ },
+
+ encodeDocId: encodeDocId,
+
+ info: function(options) {
+ ajax(
+ {url: this.urlPrefix + "/"},
+ options,
+ "Server information could not be retrieved"
+ );
+ },
+
+ replicate: function(source, target, ajaxOptions, repOpts) {
+ repOpts = $.extend({source: source, target: target}, repOpts);
+ if (repOpts.continuous && !repOpts.cancel) {
+ ajaxOptions.successStatus = 202;
+ }
+ ajax({
+ type: "POST", url: this.urlPrefix + "/_replicate",
+ data: JSON.stringify(repOpts),
+ contentType: "application/json"
+ },
+ ajaxOptions,
+ "Replication failed"
+ );
+ },
+
+ newUUID: function(cacheNum) {
+ if (cacheNum === undefined) {
+ cacheNum = 1;
+ }
+ if (!uuidCache.length) {
+ ajax({url: this.urlPrefix + "/_uuids", data: {count: cacheNum}, async: false}, {
+ success: function(resp) {
+ uuidCache = resp.uuids;
+ }
+ },
+ "Failed to retrieve UUID batch."
+ );
+ }
+ return uuidCache.shift();
+ }
+ });
+
+ var httpData = $.httpData || function( xhr, type, s ) { // lifted from jq1.4.4
+ var ct = xhr.getResponseHeader("content-type") || "",
+ xml = type === "xml" || !type && ct.indexOf("xml") >= 0,
+ data = xml ? xhr.responseXML : xhr.responseText;
+
+ if ( xml && data.documentElement.nodeName === "parsererror" ) {
+ $.error( "parsererror" );
+ }
+ if ( s && s.dataFilter ) {
+ data = s.dataFilter( data, type );
+ }
+ if ( typeof data === "string" ) {
+ if ( type === "json" || !type && ct.indexOf("json") >= 0 ) {
+ data = $.parseJSON( data );
+ } else if ( type === "script" || !type && ct.indexOf("javascript") >= 0 ) {
+ $.globalEval( data );
+ }
+ }
+ return data;
+ };
+
+ function ajax(obj, options, errorMessage, ajaxOptions) {
+ options = $.extend({successStatus: 200}, options);
+ ajaxOptions = $.extend({contentType: "application/json"}, ajaxOptions);
+ errorMessage = errorMessage || "Unknown error";
+ $.ajax($.extend($.extend({
+ type: "GET", dataType: "json", cache : !$.browser.msie,
+ beforeSend: function(xhr){
+ if(ajaxOptions && ajaxOptions.headers){
+ for (var header in ajaxOptions.headers){
+ xhr.setRequestHeader(header, ajaxOptions.headers[header]);
+ }
+ }
+ },
+ complete: function(req) {
+ try {
+ var resp = httpData(req, "json");
+ } catch(e) {
+ if (options.error) {
+ options.error(req.status, req, e);
+ } else {
+ alert(errorMessage + ": " + e);
+ }
+ return;
+ }
+ if (options.ajaxStart) {
+ options.ajaxStart(resp);
+ }
+ if (req.status == options.successStatus) {
+ if (options.beforeSuccess) options.beforeSuccess(req, resp);
+ if (options.success) options.success(resp);
+ } else if (options.error) {
+ options.error(req.status, resp && resp.error || errorMessage, resp && resp.reason || "no response");
+ } else {
+ alert(errorMessage + ": " + resp.reason);
+ }
+ }
+ }, obj), ajaxOptions));
+ }
+
+ function fullCommit(options) {
+ var options = options || {};
+ if (typeof options.ensure_full_commit !== "undefined") {
+ var commit = options.ensure_full_commit;
+ delete options.ensure_full_commit;
+ return function(xhr) {
+ xhr.setRequestHeader('Accept', 'application/json');
+ xhr.setRequestHeader("X-Couch-Full-Commit", commit.toString());
+ };
+ }
+ };
+
+ // Convert a options object to an url query string.
+ // ex: {key:'value',key2:'value2'} becomes '?key="value"&key2="value2"'
+ function encodeOptions(options) {
+ var buf = [];
+ if (typeof(options) === "object" && options !== null) {
+ for (var name in options) {
+ if ($.inArray(name, ["error", "success", "beforeSuccess", "ajaxStart"]) >= 0)
+ continue;
+ var value = options[name];
+ if ($.inArray(name, ["key", "startkey", "endkey"]) >= 0) {
+ value = toJSON(value);
+ }
+ buf.push(encodeURIComponent(name) + "=" + encodeURIComponent(value));
+ }
+ }
+ return buf.length ? "?" + buf.join("&") : "";
+ }
+
+ function toJSON(obj) {
+ return obj !== null ? JSON.stringify(obj) : null;
+ }
+
+})(jQuery);
diff --git a/1.1.x/share/www/script/jquery.dialog.js b/1.1.x/share/www/script/jquery.dialog.js
new file mode 100644
index 00000000..02c0c497
--- /dev/null
+++ b/1.1.x/share/www/script/jquery.dialog.js
@@ -0,0 +1,96 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+(function($) {
+
+ $.fn.centerBox = function() {
+ return this.each(function() {
+ var s = this.style;
+ s.left = (($(window).width() - $(this).width()) / 2) + "px";
+ s.top = (($(window).height() - $(this).height()) / 2) + "px";
+ });
+ }
+
+ $.showDialog = function(url, options) {
+ options = options || {};
+ options.load = options.load || function() {};
+ options.cancel = options.cancel || function() {};
+ options.validate = options.validate || function() { return true };
+ options.submit = options.submit || function() {};
+
+ var overlay = $('<div id="overlay" style="z-index:1001"></div>')
+ .css("opacity", "0");
+ var dialog = $('<div id="dialog" style="z-index:1002;position:fixed;display:none;"></div>');
+ if ($.browser.msie) {
+ var frame = $('<iframe id="overlay-frame" style="z-index:1000;border:none;margin:0;padding:0;position:absolute;width:100%;height:100%;top:0;left:0" src="javascript:false"></iframe>')
+ .css("opacity", "0").appendTo(document.body);
+ if (parseInt($.browser.version)<7) {
+ dialog.css("position", "absolute");
+ overlay.css("position", "absolute");
+ $("html,body").css({width: "100%", height: "100%"});
+ }
+ }
+ overlay.appendTo(document.body).fadeTo(100, 0.6);
+ dialog.appendTo(document.body).addClass("loading").centerBox().fadeIn(400);
+
+ $(document).keydown(function(e) {
+ if (e.keyCode == 27) dismiss(); // dismiss on escape key
+ });
+ function dismiss() {
+ dialog.fadeOut("fast", function() {
+ $("#dialog, #overlay, #overlay-frame").remove();
+ });
+ $(document).unbind("keydown");
+ }
+ overlay.click(function() { dismiss(); });
+
+ function showError(name, message) {
+ var input = dialog.find(":input[name=" + name + "]");
+ input.addClass("error").next("div.error").remove();
+ $('<div class="error"></div>').text(message).insertAfter(input);
+ }
+
+ $.get(url, function(html) {
+ $(html).appendTo(dialog);
+ dialog.removeClass("loading").addClass("loaded").centerBox().each(function() {
+ options.load(dialog.children()[0]);
+ $(":input:first", dialog).each(function() { this.focus() });
+ $("button.cancel", dialog).click(function() { // dismiss on cancel
+ dismiss();
+ options.cancel();
+ });
+ $("form", dialog).submit(function(e) { // invoke callback on submit
+ e.preventDefault();
+ dialog.find("div.error").remove().end().find(".error").removeClass("error");
+ var data = {};
+ $.each($("form :input", dialog).serializeArray(), function(i, field) {
+ data[field.name] = field.value;
+ });
+ $("form :file", dialog).each(function() {
+ data[this.name] = this.value; // file inputs need special handling
+ });
+ options.submit(data, function callback(errors) {
+ if ($.isEmptyObject(errors)) {
+ dismiss();
+ } else {
+ for (var name in errors) {
+ showError(name, errors[name]);
+ }
+ }
+ });
+ return false;
+ });
+ });
+ });
+ }
+
+})(jQuery);
diff --git a/1.1.x/share/www/script/jquery.editinline.js b/1.1.x/share/www/script/jquery.editinline.js
new file mode 100644
index 00000000..b48607d4
--- /dev/null
+++ b/1.1.x/share/www/script/jquery.editinline.js
@@ -0,0 +1,114 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+(function($) {
+
+ function startEditing(elem, options) {
+ var editable = $(elem);
+ var origHtml = editable.html();
+ var origText = options.populate($.trim(editable.text()));
+
+ if (!options.begin.apply(elem, [origText])) {
+ return;
+ }
+
+ var input = options.createInput.apply(elem, [origText])
+ .addClass("editinline").val(origText)
+ .dblclick(function() { return false; })
+ .keydown(function(evt) {
+ switch (evt.keyCode) {
+ case 13: { // return
+ if (!input.is("textarea")) applyChange(evt.keyCode);
+ break;
+ }
+ case 27: { // escape
+ cancelChange(evt.keyCode);
+ break;
+ }
+ case 9: { // tab
+ if (!input.is("textarea")) {
+ applyChange(evt.keyCode);
+ return false;
+ }
+ }
+ }
+ });
+ if (options.acceptOnBlur) {
+ input.blur(function() {
+ return applyChange();
+ });
+ }
+
+ function applyChange(keyCode) {
+ var newText = input.val();
+ if (newText == origText) {
+ cancelChange(keyCode);
+ return true;
+ }
+ if ((!options.allowEmpty && !newText.length) ||
+ !options.validate.apply(elem, [newText, origText])) {
+ input.addClass("invalid");
+ return false;
+ }
+ input.remove();
+ tools.remove();
+ options.accept.apply(elem, [newText, origText]);
+ editable.removeClass("editinline-container");
+ options.end.apply(elem, [keyCode]);
+ return true;
+ }
+
+ function cancelChange(keyCode) {
+ options.cancel.apply(elem, [origText]);
+ editable.html(origHtml).removeClass("editinline-container");
+ options.end.apply(elem, [keyCode]);
+ }
+
+ var tools = $("<span class='editinline-tools'></span>");
+ $("<button type='button' class='apply'></button>")
+ .text(options.acceptLabel).click(applyChange).appendTo(tools);
+ $("<button type='button' class='cancel'></button>")
+ .text(options.cancelLabel).click(cancelChange).appendTo(tools)
+
+ editable.html("").append(tools).append(input)
+ .addClass("editinline-container");
+ options.prepareInput.apply(elem, [input[0]]);
+ input.each(function() { this.focus(); this.select(); });
+ }
+
+ $.fn.makeEditable = function(options) {
+ options = $.extend({
+ allowEmpty: true,
+ acceptLabel: "",
+ cancelLabel: "",
+ toolTip: "Double click to edit",
+ acceptOnBlur: true,
+
+ // callbacks
+ begin: function() { return true },
+ accept: function(newValue, oldValue) {},
+ cancel: function(oldValue) {},
+ createInput: function(value) { return $("<input type='text'>") },
+ prepareInput: function(input) {},
+ end: function(keyCode) {},
+ populate: function(value) { return value },
+ validate: function() { return true }
+ }, options || {});
+
+ return this.each(function() {
+ $(this).attr("title", options.toolTip).dblclick(function() {
+ startEditing(this, options);
+ });
+ });
+ }
+
+})(jQuery);
diff --git a/1.1.x/share/www/script/jquery.form.js b/1.1.x/share/www/script/jquery.form.js
new file mode 100644
index 00000000..dde39427
--- /dev/null
+++ b/1.1.x/share/www/script/jquery.form.js
@@ -0,0 +1,660 @@
+/*
+ * jQuery Form Plugin
+ * version: 2.36 (07-NOV-2009)
+ * @requires jQuery v1.2.6 or later
+ *
+ * Examples and documentation at: http://malsup.com/jquery/form/
+ * Dual licensed under the MIT and GPL licenses:
+ * http://www.opensource.org/licenses/mit-license.php
+ * http://www.gnu.org/licenses/gpl.html
+ */
+;(function($) {
+
+/*
+ Usage Note:
+ -----------
+ Do not use both ajaxSubmit and ajaxForm on the same form. These
+ functions are intended to be exclusive. Use ajaxSubmit if you want
+ to bind your own submit handler to the form. For example,
+
+ $(document).ready(function() {
+ $('#myForm').bind('submit', function() {
+ $(this).ajaxSubmit({
+ target: '#output'
+ });
+ return false; // <-- important!
+ });
+ });
+
+ Use ajaxForm when you want the plugin to manage all the event binding
+ for you. For example,
+
+ $(document).ready(function() {
+ $('#myForm').ajaxForm({
+ target: '#output'
+ });
+ });
+
+ When using ajaxForm, the ajaxSubmit function will be invoked for you
+ at the appropriate time.
+*/
+
+/**
+ * ajaxSubmit() provides a mechanism for immediately submitting
+ * an HTML form using AJAX.
+ */
+$.fn.ajaxSubmit = function(options) {
+ // fast fail if nothing selected (http://dev.jquery.com/ticket/2752)
+ if (!this.length) {
+ log('ajaxSubmit: skipping submit process - no element selected');
+ return this;
+ }
+
+ if (typeof options == 'function')
+ options = { success: options };
+
+ var url = $.trim(this.attr('action'));
+ if (url) {
+ // clean url (don't include hash vaue)
+ url = (url.match(/^([^#]+)/)||[])[1];
+ }
+ url = url || window.location.href || '';
+
+ options = $.extend({
+ url: url,
+ type: this.attr('method') || 'GET',
+ iframeSrc: /^https/i.test(window.location.href || '') ? 'javascript:false' : 'about:blank'
+ }, options || {});
+
+ // hook for manipulating the form data before it is extracted;
+ // convenient for use with rich editors like tinyMCE or FCKEditor
+ var veto = {};
+ this.trigger('form-pre-serialize', [this, options, veto]);
+ if (veto.veto) {
+ log('ajaxSubmit: submit vetoed via form-pre-serialize trigger');
+ return this;
+ }
+
+ // provide opportunity to alter form data before it is serialized
+ if (options.beforeSerialize && options.beforeSerialize(this, options) === false) {
+ log('ajaxSubmit: submit aborted via beforeSerialize callback');
+ return this;
+ }
+
+ var a = this.formToArray(options.semantic);
+ if (options.data) {
+ options.extraData = options.data;
+ for (var n in options.data) {
+ if(options.data[n] instanceof Array) {
+ for (var k in options.data[n])
+ a.push( { name: n, value: options.data[n][k] } );
+ }
+ else
+ a.push( { name: n, value: options.data[n] } );
+ }
+ }
+
+ // give pre-submit callback an opportunity to abort the submit
+ if (options.beforeSubmit && options.beforeSubmit(a, this, options) === false) {
+ log('ajaxSubmit: submit aborted via beforeSubmit callback');
+ return this;
+ }
+
+ // fire vetoable 'validate' event
+ this.trigger('form-submit-validate', [a, this, options, veto]);
+ if (veto.veto) {
+ log('ajaxSubmit: submit vetoed via form-submit-validate trigger');
+ return this;
+ }
+
+ var q = $.param(a);
+
+ if (options.type.toUpperCase() == 'GET') {
+ options.url += (options.url.indexOf('?') >= 0 ? '&' : '?') + q;
+ options.data = null; // data is null for 'get'
+ }
+ else
+ options.data = q; // data is the query string for 'post'
+
+ var $form = this, callbacks = [];
+ if (options.resetForm) callbacks.push(function() { $form.resetForm(); });
+ if (options.clearForm) callbacks.push(function() { $form.clearForm(); });
+
+ // perform a load on the target only if dataType is not provided
+ if (!options.dataType && options.target) {
+ var oldSuccess = options.success || function(){};
+ callbacks.push(function(data) {
+ $(options.target).html(data).each(oldSuccess, arguments);
+ });
+ }
+ else if (options.success)
+ callbacks.push(options.success);
+
+ options.success = function(data, status) {
+ for (var i=0, max=callbacks.length; i < max; i++)
+ callbacks[i].apply(options, [data, status, $form]);
+ };
+
+ // are there files to upload?
+ var files = $('input:file', this).fieldValue();
+ var found = false;
+ for (var j=0; j < files.length; j++)
+ if (files[j])
+ found = true;
+
+ var multipart = false;
+// var mp = 'multipart/form-data';
+// multipart = ($form.attr('enctype') == mp || $form.attr('encoding') == mp);
+
+ // options.iframe allows user to force iframe mode
+ // 06-NOV-09: now defaulting to iframe mode if file input is detected
+ if ((files.length && options.iframe !== false) || options.iframe || found || multipart) {
+ // hack to fix Safari hang (thanks to Tim Molendijk for this)
+ // see: http://groups.google.com/group/jquery-dev/browse_thread/thread/36395b7ab510dd5d
+ if (options.closeKeepAlive)
+ $.get(options.closeKeepAlive, fileUpload);
+ else
+ fileUpload();
+ }
+ else
+ $.ajax(options);
+
+ // fire 'notify' event
+ this.trigger('form-submit-notify', [this, options]);
+ return this;
+
+
+ // private function for handling file uploads (hat tip to YAHOO!)
+ function fileUpload() {
+ var form = $form[0];
+
+ if ($(':input[name=submit]', form).length) {
+ alert('Error: Form elements must not be named "submit".');
+ return;
+ }
+
+ var opts = $.extend({}, $.ajaxSettings, options);
+ var s = $.extend(true, {}, $.extend(true, {}, $.ajaxSettings), opts);
+
+ var id = 'jqFormIO' + (new Date().getTime());
+ var $io = $('<iframe id="' + id + '" name="' + id + '" src="'+ opts.iframeSrc +'" />');
+ var io = $io[0];
+
+ $io.css({ position: 'absolute', top: '-1000px', left: '-1000px' });
+
+ var xhr = { // mock object
+ aborted: 0,
+ responseText: null,
+ responseXML: null,
+ status: 0,
+ statusText: 'n/a',
+ getAllResponseHeaders: function() {},
+ getResponseHeader: function() {},
+ setRequestHeader: function() {},
+ abort: function() {
+ this.aborted = 1;
+ $io.attr('src', opts.iframeSrc); // abort op in progress
+ }
+ };
+
+ var g = opts.global;
+ // trigger ajax global events so that activity/block indicators work like normal
+ if (g && ! $.active++) $.event.trigger("ajaxStart");
+ if (g) $.event.trigger("ajaxSend", [xhr, opts]);
+
+ if (s.beforeSend && s.beforeSend(xhr, s) === false) {
+ s.global && $.active--;
+ return;
+ }
+ if (xhr.aborted)
+ return;
+
+ var cbInvoked = 0;
+ var timedOut = 0;
+
+ // add submitting element to data if we know it
+ var sub = form.clk;
+ if (sub) {
+ var n = sub.name;
+ if (n && !sub.disabled) {
+ options.extraData = options.extraData || {};
+ options.extraData[n] = sub.value;
+ if (sub.type == "image") {
+ options.extraData[name+'.x'] = form.clk_x;
+ options.extraData[name+'.y'] = form.clk_y;
+ }
+ }
+ }
+
+ // take a breath so that pending repaints get some cpu time before the upload starts
+ setTimeout(function() {
+ // make sure form attrs are set
+ var t = $form.attr('target'), a = $form.attr('action');
+
+ // update form attrs in IE friendly way
+ form.setAttribute('target',id);
+ if (form.getAttribute('method') != 'POST')
+ form.setAttribute('method', 'POST');
+ if (form.getAttribute('action') != opts.url)
+ form.setAttribute('action', opts.url);
+
+ // ie borks in some cases when setting encoding
+ if (! options.skipEncodingOverride) {
+ $form.attr({
+ encoding: 'multipart/form-data',
+ enctype: 'multipart/form-data'
+ });
+ }
+
+ // support timout
+ if (opts.timeout)
+ setTimeout(function() { timedOut = true; cb(); }, opts.timeout);
+
+ // add "extra" data to form if provided in options
+ var extraInputs = [];
+ try {
+ if (options.extraData)
+ for (var n in options.extraData)
+ extraInputs.push(
+ $('<input type="hidden" name="'+n+'" value="'+options.extraData[n]+'" />')
+ .appendTo(form)[0]);
+
+ // add iframe to doc and submit the form
+ $io.appendTo('body');
+ io.attachEvent ? io.attachEvent('onload', cb) : io.addEventListener('load', cb, false);
+ form.submit();
+ }
+ finally {
+ // reset attrs and remove "extra" input elements
+ form.setAttribute('action',a);
+ t ? form.setAttribute('target', t) : $form.removeAttr('target');
+ $(extraInputs).remove();
+ }
+ }, 10);
+
+ var domCheckCount = 50;
+
+ function cb() {
+ if (cbInvoked++) return;
+
+ io.detachEvent ? io.detachEvent('onload', cb) : io.removeEventListener('load', cb, false);
+
+ var ok = true;
+ try {
+ if (timedOut) throw 'timeout';
+ // extract the server response from the iframe
+ var data, doc;
+
+ doc = io.contentWindow ? io.contentWindow.document : io.contentDocument ? io.contentDocument : io.document;
+
+ var isXml = opts.dataType == 'xml' || doc.XMLDocument || $.isXMLDoc(doc);
+ log('isXml='+isXml);
+ if (!isXml && (doc.body == null || doc.body.innerHTML == '')) {
+ if (--domCheckCount) {
+ // in some browsers (Opera) the iframe DOM is not always traversable when
+ // the onload callback fires, so we loop a bit to accommodate
+ cbInvoked = 0;
+ setTimeout(cb, 100);
+ return;
+ }
+ log('Could not access iframe DOM after 50 tries.');
+ return;
+ }
+
+ xhr.responseText = doc.body ? doc.body.innerHTML : null;
+ xhr.responseXML = doc.XMLDocument ? doc.XMLDocument : doc;
+ xhr.getResponseHeader = function(header){
+ var headers = {'content-type': opts.dataType};
+ return headers[header];
+ };
+
+ if (opts.dataType == 'json' || opts.dataType == 'script') {
+ // see if user embedded response in textarea
+ var ta = doc.getElementsByTagName('textarea')[0];
+ if (ta)
+ xhr.responseText = ta.value;
+ else {
+ // account for browsers injecting pre around json response
+ var pre = doc.getElementsByTagName('pre')[0];
+ if (pre)
+ xhr.responseText = pre.innerHTML;
+ }
+ }
+ else if (opts.dataType == 'xml' && !xhr.responseXML && xhr.responseText != null) {
+ xhr.responseXML = toXml(xhr.responseText);
+ }
+ data = $.httpData(xhr, opts.dataType);
+ }
+ catch(e){
+ ok = false;
+ $.handleError(opts, xhr, 'error', e);
+ }
+
+ // ordering of these callbacks/triggers is odd, but that's how $.ajax does it
+ if (ok) {
+ opts.success(data, 'success');
+ if (g) $.event.trigger("ajaxSuccess", [xhr, opts]);
+ }
+ if (g) $.event.trigger("ajaxComplete", [xhr, opts]);
+ if (g && ! --$.active) $.event.trigger("ajaxStop");
+ if (opts.complete) opts.complete(xhr, ok ? 'success' : 'error');
+
+ // clean up
+ setTimeout(function() {
+ $io.remove();
+ xhr.responseXML = null;
+ }, 100);
+ };
+
+ function toXml(s, doc) {
+ if (window.ActiveXObject) {
+ doc = new ActiveXObject('Microsoft.XMLDOM');
+ doc.async = 'false';
+ doc.loadXML(s);
+ }
+ else
+ doc = (new DOMParser()).parseFromString(s, 'text/xml');
+ return (doc && doc.documentElement && doc.documentElement.tagName != 'parsererror') ? doc : null;
+ };
+ };
+};
+
+/**
+ * ajaxForm() provides a mechanism for fully automating form submission.
+ *
+ * The advantages of using this method instead of ajaxSubmit() are:
+ *
+ * 1: This method will include coordinates for <input type="image" /> elements (if the element
+ * is used to submit the form).
+ * 2. This method will include the submit element's name/value data (for the element that was
+ * used to submit the form).
+ * 3. This method binds the submit() method to the form for you.
+ *
+ * The options argument for ajaxForm works exactly as it does for ajaxSubmit. ajaxForm merely
+ * passes the options argument along after properly binding events for submit elements and
+ * the form itself.
+ */
+$.fn.ajaxForm = function(options) {
+ return this.ajaxFormUnbind().bind('submit.form-plugin', function() {
+ $(this).ajaxSubmit(options);
+ return false;
+ }).bind('click.form-plugin', function(e) {
+ var target = e.target;
+ var $el = $(target);
+ if (!($el.is(":submit,input:image"))) {
+ // is this a child element of the submit el? (ex: a span within a button)
+ var t = $el.closest(':submit');
+ if (t.length == 0)
+ return;
+ target = t[0];
+ }
+ var form = this;
+ form.clk = target;
+ if (target.type == 'image') {
+ if (e.offsetX != undefined) {
+ form.clk_x = e.offsetX;
+ form.clk_y = e.offsetY;
+ } else if (typeof $.fn.offset == 'function') { // try to use dimensions plugin
+ var offset = $el.offset();
+ form.clk_x = e.pageX - offset.left;
+ form.clk_y = e.pageY - offset.top;
+ } else {
+ form.clk_x = e.pageX - target.offsetLeft;
+ form.clk_y = e.pageY - target.offsetTop;
+ }
+ }
+ // clear form vars
+ setTimeout(function() { form.clk = form.clk_x = form.clk_y = null; }, 100);
+ });
+};
+
+// ajaxFormUnbind unbinds the event handlers that were bound by ajaxForm
+$.fn.ajaxFormUnbind = function() {
+ return this.unbind('submit.form-plugin click.form-plugin');
+};
+
+/**
+ * formToArray() gathers form element data into an array of objects that can
+ * be passed to any of the following ajax functions: $.get, $.post, or load.
+ * Each object in the array has both a 'name' and 'value' property. An example of
+ * an array for a simple login form might be:
+ *
+ * [ { name: 'username', value: 'jresig' }, { name: 'password', value: 'secret' } ]
+ *
+ * It is this array that is passed to pre-submit callback functions provided to the
+ * ajaxSubmit() and ajaxForm() methods.
+ */
+$.fn.formToArray = function(semantic) {
+ var a = [];
+ if (this.length == 0) return a;
+
+ var form = this[0];
+ var els = semantic ? form.getElementsByTagName('*') : form.elements;
+ if (!els) return a;
+ for(var i=0, max=els.length; i < max; i++) {
+ var el = els[i];
+ var n = el.name;
+ if (!n) continue;
+
+ if (semantic && form.clk && el.type == "image") {
+ // handle image inputs on the fly when semantic == true
+ if(!el.disabled && form.clk == el) {
+ a.push({name: n, value: $(el).val()});
+ a.push({name: n+'.x', value: form.clk_x}, {name: n+'.y', value: form.clk_y});
+ }
+ continue;
+ }
+
+ var v = $.fieldValue(el, true);
+ if (v && v.constructor == Array) {
+ for(var j=0, jmax=v.length; j < jmax; j++)
+ a.push({name: n, value: v[j]});
+ }
+ else if (v !== null && typeof v != 'undefined')
+ a.push({name: n, value: v});
+ }
+
+ if (!semantic && form.clk) {
+ // input type=='image' are not found in elements array! handle it here
+ var $input = $(form.clk), input = $input[0], n = input.name;
+ if (n && !input.disabled && input.type == 'image') {
+ a.push({name: n, value: $input.val()});
+ a.push({name: n+'.x', value: form.clk_x}, {name: n+'.y', value: form.clk_y});
+ }
+ }
+ return a;
+};
+
+/**
+ * Serializes form data into a 'submittable' string. This method will return a string
+ * in the format: name1=value1&amp;name2=value2
+ */
+$.fn.formSerialize = function(semantic) {
+ //hand off to jQuery.param for proper encoding
+ return $.param(this.formToArray(semantic));
+};
+
+/**
+ * Serializes all field elements in the jQuery object into a query string.
+ * This method will return a string in the format: name1=value1&amp;name2=value2
+ */
+$.fn.fieldSerialize = function(successful) {
+ var a = [];
+ this.each(function() {
+ var n = this.name;
+ if (!n) return;
+ var v = $.fieldValue(this, successful);
+ if (v && v.constructor == Array) {
+ for (var i=0,max=v.length; i < max; i++)
+ a.push({name: n, value: v[i]});
+ }
+ else if (v !== null && typeof v != 'undefined')
+ a.push({name: this.name, value: v});
+ });
+ //hand off to jQuery.param for proper encoding
+ return $.param(a);
+};
+
+/**
+ * Returns the value(s) of the element in the matched set. For example, consider the following form:
+ *
+ * <form><fieldset>
+ * <input name="A" type="text" />
+ * <input name="A" type="text" />
+ * <input name="B" type="checkbox" value="B1" />
+ * <input name="B" type="checkbox" value="B2"/>
+ * <input name="C" type="radio" value="C1" />
+ * <input name="C" type="radio" value="C2" />
+ * </fieldset></form>
+ *
+ * var v = $(':text').fieldValue();
+ * // if no values are entered into the text inputs
+ * v == ['','']
+ * // if values entered into the text inputs are 'foo' and 'bar'
+ * v == ['foo','bar']
+ *
+ * var v = $(':checkbox').fieldValue();
+ * // if neither checkbox is checked
+ * v === undefined
+ * // if both checkboxes are checked
+ * v == ['B1', 'B2']
+ *
+ * var v = $(':radio').fieldValue();
+ * // if neither radio is checked
+ * v === undefined
+ * // if first radio is checked
+ * v == ['C1']
+ *
+ * The successful argument controls whether or not the field element must be 'successful'
+ * (per http://www.w3.org/TR/html4/interact/forms.html#successful-controls).
+ * The default value of the successful argument is true. If this value is false the value(s)
+ * for each element is returned.
+ *
+ * Note: This method *always* returns an array. If no valid value can be determined the
+ * array will be empty, otherwise it will contain one or more values.
+ */
+$.fn.fieldValue = function(successful) {
+ for (var val=[], i=0, max=this.length; i < max; i++) {
+ var el = this[i];
+ var v = $.fieldValue(el, successful);
+ if (v === null || typeof v == 'undefined' || (v.constructor == Array && !v.length))
+ continue;
+ v.constructor == Array ? $.merge(val, v) : val.push(v);
+ }
+ return val;
+};
+
+/**
+ * Returns the value of the field element.
+ */
+$.fieldValue = function(el, successful) {
+ var n = el.name, t = el.type, tag = el.tagName.toLowerCase();
+ if (typeof successful == 'undefined') successful = true;
+
+ if (successful && (!n || el.disabled || t == 'reset' || t == 'button' ||
+ (t == 'checkbox' || t == 'radio') && !el.checked ||
+ (t == 'submit' || t == 'image') && el.form && el.form.clk != el ||
+ tag == 'select' && el.selectedIndex == -1))
+ return null;
+
+ if (tag == 'select') {
+ var index = el.selectedIndex;
+ if (index < 0) return null;
+ var a = [], ops = el.options;
+ var one = (t == 'select-one');
+ var max = (one ? index+1 : ops.length);
+ for(var i=(one ? index : 0); i < max; i++) {
+ var op = ops[i];
+ if (op.selected) {
+ var v = op.value;
+ if (!v) // extra pain for IE...
+ v = (op.attributes && op.attributes['value'] && !(op.attributes['value'].specified)) ? op.text : op.value;
+ if (one) return v;
+ a.push(v);
+ }
+ }
+ return a;
+ }
+ return el.value;
+};
+
+/**
+ * Clears the form data. Takes the following actions on the form's input fields:
+ * - input text fields will have their 'value' property set to the empty string
+ * - select elements will have their 'selectedIndex' property set to -1
+ * - checkbox and radio inputs will have their 'checked' property set to false
+ * - inputs of type submit, button, reset, and hidden will *not* be effected
+ * - button elements will *not* be effected
+ */
+$.fn.clearForm = function() {
+ return this.each(function() {
+ $('input,select,textarea', this).clearFields();
+ });
+};
+
+/**
+ * Clears the selected form elements.
+ */
+$.fn.clearFields = $.fn.clearInputs = function() {
+ return this.each(function() {
+ var t = this.type, tag = this.tagName.toLowerCase();
+ if (t == 'text' || t == 'password' || tag == 'textarea')
+ this.value = '';
+ else if (t == 'checkbox' || t == 'radio')
+ this.checked = false;
+ else if (tag == 'select')
+ this.selectedIndex = -1;
+ });
+};
+
+/**
+ * Resets the form data. Causes all form elements to be reset to their original value.
+ */
+$.fn.resetForm = function() {
+ return this.each(function() {
+ // guard against an input with the name of 'reset'
+ // note that IE reports the reset function as an 'object'
+ if (typeof this.reset == 'function' || (typeof this.reset == 'object' && !this.reset.nodeType))
+ this.reset();
+ });
+};
+
+/**
+ * Enables or disables any matching elements.
+ */
+$.fn.enable = function(b) {
+ if (b == undefined) b = true;
+ return this.each(function() {
+ this.disabled = !b;
+ });
+};
+
+/**
+ * Checks/unchecks any matching checkboxes or radio buttons and
+ * selects/deselects and matching option elements.
+ */
+$.fn.selected = function(select) {
+ if (select == undefined) select = true;
+ return this.each(function() {
+ var t = this.type;
+ if (t == 'checkbox' || t == 'radio')
+ this.checked = select;
+ else if (this.tagName.toLowerCase() == 'option') {
+ var $sel = $(this).parent('select');
+ if (select && $sel[0] && $sel[0].type == 'select-one') {
+ // deselect all other options
+ $sel.find('option').selected(false);
+ }
+ this.selected = select;
+ }
+ });
+};
+
+// helper fn for console logging
+// set $.fn.ajaxSubmit.debug to true to enable debug logging
+function log() {
+ if ($.fn.ajaxSubmit.debug && window.console && window.console.log)
+ window.console.log('[jquery.form] ' + Array.prototype.join.call(arguments,''));
+};
+
+})(jQuery);
diff --git a/1.1.x/share/www/script/jquery.js b/1.1.x/share/www/script/jquery.js
new file mode 100644
index 00000000..fff67764
--- /dev/null
+++ b/1.1.x/share/www/script/jquery.js
@@ -0,0 +1,6240 @@
+/*!
+ * jQuery JavaScript Library v1.4.2
+ * http://jquery.com/
+ *
+ * Copyright 2010, John Resig
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * Includes Sizzle.js
+ * http://sizzlejs.com/
+ * Copyright 2010, The Dojo Foundation
+ * Released under the MIT, BSD, and GPL Licenses.
+ *
+ * Date: Sat Feb 13 22:33:48 2010 -0500
+ */
+(function( window, undefined ) {
+
+// Define a local copy of jQuery
+var jQuery = function( selector, context ) {
+ // The jQuery object is actually just the init constructor 'enhanced'
+ return new jQuery.fn.init( selector, context );
+ },
+
+ // Map over jQuery in case of overwrite
+ _jQuery = window.jQuery,
+
+ // Map over the $ in case of overwrite
+ _$ = window.$,
+
+ // Use the correct document accordingly with window argument (sandbox)
+ document = window.document,
+
+ // A central reference to the root jQuery(document)
+ rootjQuery,
+
+ // A simple way to check for HTML strings or ID strings
+ // (both of which we optimize for)
+ quickExpr = /^[^<]*(<[\w\W]+>)[^>]*$|^#([\w-]+)$/,
+
+ // Is it a simple selector
+ isSimple = /^.[^:#\[\.,]*$/,
+
+ // Check if a string has a non-whitespace character in it
+ rnotwhite = /\S/,
+
+ // Used for trimming whitespace
+ rtrim = /^(\s|\u00A0)+|(\s|\u00A0)+$/g,
+
+ // Match a standalone tag
+ rsingleTag = /^<(\w+)\s*\/?>(?:<\/\1>)?$/,
+
+ // Keep a UserAgent string for use with jQuery.browser
+ userAgent = navigator.userAgent,
+
+ // For matching the engine and version of the browser
+ browserMatch,
+
+ // Has the ready events already been bound?
+ readyBound = false,
+
+ // The functions to execute on DOM ready
+ readyList = [],
+
+ // The ready event handler
+ DOMContentLoaded,
+
+ // Save a reference to some core methods
+ toString = Object.prototype.toString,
+ hasOwnProperty = Object.prototype.hasOwnProperty,
+ push = Array.prototype.push,
+ slice = Array.prototype.slice,
+ indexOf = Array.prototype.indexOf;
+
+jQuery.fn = jQuery.prototype = {
+ init: function( selector, context ) {
+ var match, elem, ret, doc;
+
+ // Handle $(""), $(null), or $(undefined)
+ if ( !selector ) {
+ return this;
+ }
+
+ // Handle $(DOMElement)
+ if ( selector.nodeType ) {
+ this.context = this[0] = selector;
+ this.length = 1;
+ return this;
+ }
+
+ // The body element only exists once, optimize finding it
+ if ( selector === "body" && !context ) {
+ this.context = document;
+ this[0] = document.body;
+ this.selector = "body";
+ this.length = 1;
+ return this;
+ }
+
+ // Handle HTML strings
+ if ( typeof selector === "string" ) {
+ // Are we dealing with HTML string or an ID?
+ match = quickExpr.exec( selector );
+
+ // Verify a match, and that no context was specified for #id
+ if ( match && (match[1] || !context) ) {
+
+ // HANDLE: $(html) -> $(array)
+ if ( match[1] ) {
+ doc = (context ? context.ownerDocument || context : document);
+
+ // If a single string is passed in and it's a single tag
+ // just do a createElement and skip the rest
+ ret = rsingleTag.exec( selector );
+
+ if ( ret ) {
+ if ( jQuery.isPlainObject( context ) ) {
+ selector = [ document.createElement( ret[1] ) ];
+ jQuery.fn.attr.call( selector, context, true );
+
+ } else {
+ selector = [ doc.createElement( ret[1] ) ];
+ }
+
+ } else {
+ ret = buildFragment( [ match[1] ], [ doc ] );
+ selector = (ret.cacheable ? ret.fragment.cloneNode(true) : ret.fragment).childNodes;
+ }
+
+ return jQuery.merge( this, selector );
+
+ // HANDLE: $("#id")
+ } else {
+ elem = document.getElementById( match[2] );
+
+ if ( elem ) {
+ // Handle the case where IE and Opera return items
+ // by name instead of ID
+ if ( elem.id !== match[2] ) {
+ return rootjQuery.find( selector );
+ }
+
+ // Otherwise, we inject the element directly into the jQuery object
+ this.length = 1;
+ this[0] = elem;
+ }
+
+ this.context = document;
+ this.selector = selector;
+ return this;
+ }
+
+ // HANDLE: $("TAG")
+ } else if ( !context && /^\w+$/.test( selector ) ) {
+ this.selector = selector;
+ this.context = document;
+ selector = document.getElementsByTagName( selector );
+ return jQuery.merge( this, selector );
+
+ // HANDLE: $(expr, $(...))
+ } else if ( !context || context.jquery ) {
+ return (context || rootjQuery).find( selector );
+
+ // HANDLE: $(expr, context)
+ // (which is just equivalent to: $(context).find(expr)
+ } else {
+ return jQuery( context ).find( selector );
+ }
+
+ // HANDLE: $(function)
+ // Shortcut for document ready
+ } else if ( jQuery.isFunction( selector ) ) {
+ return rootjQuery.ready( selector );
+ }
+
+ if (selector.selector !== undefined) {
+ this.selector = selector.selector;
+ this.context = selector.context;
+ }
+
+ return jQuery.makeArray( selector, this );
+ },
+
+ // Start with an empty selector
+ selector: "",
+
+ // The current version of jQuery being used
+ jquery: "1.4.2",
+
+ // The default length of a jQuery object is 0
+ length: 0,
+
+ // The number of elements contained in the matched element set
+ size: function() {
+ return this.length;
+ },
+
+ toArray: function() {
+ return slice.call( this, 0 );
+ },
+
+ // Get the Nth element in the matched element set OR
+ // Get the whole matched element set as a clean array
+ get: function( num ) {
+ return num == null ?
+
+ // Return a 'clean' array
+ this.toArray() :
+
+ // Return just the object
+ ( num < 0 ? this.slice(num)[ 0 ] : this[ num ] );
+ },
+
+ // Take an array of elements and push it onto the stack
+ // (returning the new matched element set)
+ pushStack: function( elems, name, selector ) {
+ // Build a new jQuery matched element set
+ var ret = jQuery();
+
+ if ( jQuery.isArray( elems ) ) {
+ push.apply( ret, elems );
+
+ } else {
+ jQuery.merge( ret, elems );
+ }
+
+ // Add the old object onto the stack (as a reference)
+ ret.prevObject = this;
+
+ ret.context = this.context;
+
+ if ( name === "find" ) {
+ ret.selector = this.selector + (this.selector ? " " : "") + selector;
+ } else if ( name ) {
+ ret.selector = this.selector + "." + name + "(" + selector + ")";
+ }
+
+ // Return the newly-formed element set
+ return ret;
+ },
+
+ // Execute a callback for every element in the matched set.
+ // (You can seed the arguments with an array of args, but this is
+ // only used internally.)
+ each: function( callback, args ) {
+ return jQuery.each( this, callback, args );
+ },
+
+ ready: function( fn ) {
+ // Attach the listeners
+ jQuery.bindReady();
+
+ // If the DOM is already ready
+ if ( jQuery.isReady ) {
+ // Execute the function immediately
+ fn.call( document, jQuery );
+
+ // Otherwise, remember the function for later
+ } else if ( readyList ) {
+ // Add the function to the wait list
+ readyList.push( fn );
+ }
+
+ return this;
+ },
+
+ eq: function( i ) {
+ return i === -1 ?
+ this.slice( i ) :
+ this.slice( i, +i + 1 );
+ },
+
+ first: function() {
+ return this.eq( 0 );
+ },
+
+ last: function() {
+ return this.eq( -1 );
+ },
+
+ slice: function() {
+ return this.pushStack( slice.apply( this, arguments ),
+ "slice", slice.call(arguments).join(",") );
+ },
+
+ map: function( callback ) {
+ return this.pushStack( jQuery.map(this, function( elem, i ) {
+ return callback.call( elem, i, elem );
+ }));
+ },
+
+ end: function() {
+ return this.prevObject || jQuery(null);
+ },
+
+ // For internal use only.
+ // Behaves like an Array's method, not like a jQuery method.
+ push: push,
+ sort: [].sort,
+ splice: [].splice
+};
+
+// Give the init function the jQuery prototype for later instantiation
+jQuery.fn.init.prototype = jQuery.fn;
+
+jQuery.extend = jQuery.fn.extend = function() {
+ // copy reference to target object
+ var target = arguments[0] || {}, i = 1, length = arguments.length, deep = false, options, name, src, copy;
+
+ // Handle a deep copy situation
+ if ( typeof target === "boolean" ) {
+ deep = target;
+ target = arguments[1] || {};
+ // skip the boolean and the target
+ i = 2;
+ }
+
+ // Handle case when target is a string or something (possible in deep copy)
+ if ( typeof target !== "object" && !jQuery.isFunction(target) ) {
+ target = {};
+ }
+
+ // extend jQuery itself if only one argument is passed
+ if ( length === i ) {
+ target = this;
+ --i;
+ }
+
+ for ( ; i < length; i++ ) {
+ // Only deal with non-null/undefined values
+ if ( (options = arguments[ i ]) != null ) {
+ // Extend the base object
+ for ( name in options ) {
+ src = target[ name ];
+ copy = options[ name ];
+
+ // Prevent never-ending loop
+ if ( target === copy ) {
+ continue;
+ }
+
+ // Recurse if we're merging object literal values or arrays
+ if ( deep && copy && ( jQuery.isPlainObject(copy) || jQuery.isArray(copy) ) ) {
+ var clone = src && ( jQuery.isPlainObject(src) || jQuery.isArray(src) ) ? src
+ : jQuery.isArray(copy) ? [] : {};
+
+ // Never move original objects, clone them
+ target[ name ] = jQuery.extend( deep, clone, copy );
+
+ // Don't bring in undefined values
+ } else if ( copy !== undefined ) {
+ target[ name ] = copy;
+ }
+ }
+ }
+ }
+
+ // Return the modified object
+ return target;
+};
+
+jQuery.extend({
+ noConflict: function( deep ) {
+ window.$ = _$;
+
+ if ( deep ) {
+ window.jQuery = _jQuery;
+ }
+
+ return jQuery;
+ },
+
+ // Is the DOM ready to be used? Set to true once it occurs.
+ isReady: false,
+
+ // Handle when the DOM is ready
+ ready: function() {
+ // Make sure that the DOM is not already loaded
+ if ( !jQuery.isReady ) {
+ // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443).
+ if ( !document.body ) {
+ return setTimeout( jQuery.ready, 13 );
+ }
+
+ // Remember that the DOM is ready
+ jQuery.isReady = true;
+
+ // If there are functions bound, to execute
+ if ( readyList ) {
+ // Execute all of them
+ var fn, i = 0;
+ while ( (fn = readyList[ i++ ]) ) {
+ fn.call( document, jQuery );
+ }
+
+ // Reset the list of functions
+ readyList = null;
+ }
+
+ // Trigger any bound ready events
+ if ( jQuery.fn.triggerHandler ) {
+ jQuery( document ).triggerHandler( "ready" );
+ }
+ }
+ },
+
+ bindReady: function() {
+ if ( readyBound ) {
+ return;
+ }
+
+ readyBound = true;
+
+ // Catch cases where $(document).ready() is called after the
+ // browser event has already occurred.
+ if ( document.readyState === "complete" ) {
+ return jQuery.ready();
+ }
+
+ // Mozilla, Opera and webkit nightlies currently support this event
+ if ( document.addEventListener ) {
+ // Use the handy event callback
+ document.addEventListener( "DOMContentLoaded", DOMContentLoaded, false );
+
+ // A fallback to window.onload, that will always work
+ window.addEventListener( "load", jQuery.ready, false );
+
+ // If IE event model is used
+ } else if ( document.attachEvent ) {
+ // ensure firing before onload,
+ // maybe late but safe also for iframes
+ document.attachEvent("onreadystatechange", DOMContentLoaded);
+
+ // A fallback to window.onload, that will always work
+ window.attachEvent( "onload", jQuery.ready );
+
+ // If IE and not a frame
+ // continually check to see if the document is ready
+ var toplevel = false;
+
+ try {
+ toplevel = window.frameElement == null;
+ } catch(e) {}
+
+ if ( document.documentElement.doScroll && toplevel ) {
+ doScrollCheck();
+ }
+ }
+ },
+
+ // See test/unit/core.js for details concerning isFunction.
+ // Since version 1.3, DOM methods and functions like alert
+ // aren't supported. They return false on IE (#2968).
+ isFunction: function( obj ) {
+ return toString.call(obj) === "[object Function]";
+ },
+
+ isArray: function( obj ) {
+ return toString.call(obj) === "[object Array]";
+ },
+
+ isPlainObject: function( obj ) {
+ // Must be an Object.
+ // Because of IE, we also have to check the presence of the constructor property.
+ // Make sure that DOM nodes and window objects don't pass through, as well
+ if ( !obj || toString.call(obj) !== "[object Object]" || obj.nodeType || obj.setInterval ) {
+ return false;
+ }
+
+ // Not own constructor property must be Object
+ if ( obj.constructor
+ && !hasOwnProperty.call(obj, "constructor")
+ && !hasOwnProperty.call(obj.constructor.prototype, "isPrototypeOf") ) {
+ return false;
+ }
+
+ // Own properties are enumerated firstly, so to speed up,
+ // if last one is own, then all properties are own.
+
+ var key;
+ for ( key in obj ) {}
+
+ return key === undefined || hasOwnProperty.call( obj, key );
+ },
+
+ isEmptyObject: function( obj ) {
+ for ( var name in obj ) {
+ return false;
+ }
+ return true;
+ },
+
+ error: function( msg ) {
+ throw msg;
+ },
+
+ parseJSON: function( data ) {
+ if ( typeof data !== "string" || !data ) {
+ return null;
+ }
+
+ // Make sure leading/trailing whitespace is removed (IE can't handle it)
+ data = jQuery.trim( data );
+
+ // Make sure the incoming data is actual JSON
+ // Logic borrowed from http://json.org/json2.js
+ if ( /^[\],:{}\s]*$/.test(data.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g, "@")
+ .replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, "]")
+ .replace(/(?:^|:|,)(?:\s*\[)+/g, "")) ) {
+
+ // Try to use the native JSON parser first
+ return window.JSON && window.JSON.parse ?
+ window.JSON.parse( data ) :
+ (new Function("return " + data))();
+
+ } else {
+ jQuery.error( "Invalid JSON: " + data );
+ }
+ },
+
+ noop: function() {},
+
+ // Evalulates a script in a global context
+ globalEval: function( data ) {
+ if ( data && rnotwhite.test(data) ) {
+ // Inspired by code by Andrea Giammarchi
+ // http://webreflection.blogspot.com/2007/08/global-scope-evaluation-and-dom.html
+ var head = document.getElementsByTagName("head")[0] || document.documentElement,
+ script = document.createElement("script");
+
+ script.type = "text/javascript";
+
+ if ( jQuery.support.scriptEval ) {
+ script.appendChild( document.createTextNode( data ) );
+ } else {
+ script.text = data;
+ }
+
+ // Use insertBefore instead of appendChild to circumvent an IE6 bug.
+ // This arises when a base node is used (#2709).
+ head.insertBefore( script, head.firstChild );
+ head.removeChild( script );
+ }
+ },
+
+ nodeName: function( elem, name ) {
+ return elem.nodeName && elem.nodeName.toUpperCase() === name.toUpperCase();
+ },
+
+ // args is for internal usage only
+ each: function( object, callback, args ) {
+ var name, i = 0,
+ length = object.length,
+ isObj = length === undefined || jQuery.isFunction(object);
+
+ if ( args ) {
+ if ( isObj ) {
+ for ( name in object ) {
+ if ( callback.apply( object[ name ], args ) === false ) {
+ break;
+ }
+ }
+ } else {
+ for ( ; i < length; ) {
+ if ( callback.apply( object[ i++ ], args ) === false ) {
+ break;
+ }
+ }
+ }
+
+ // A special, fast, case for the most common use of each
+ } else {
+ if ( isObj ) {
+ for ( name in object ) {
+ if ( callback.call( object[ name ], name, object[ name ] ) === false ) {
+ break;
+ }
+ }
+ } else {
+ for ( var value = object[0];
+ i < length && callback.call( value, i, value ) !== false; value = object[++i] ) {}
+ }
+ }
+
+ return object;
+ },
+
+ trim: function( text ) {
+ return (text || "").replace( rtrim, "" );
+ },
+
+ // results is for internal usage only
+ makeArray: function( array, results ) {
+ var ret = results || [];
+
+ if ( array != null ) {
+ // The window, strings (and functions) also have 'length'
+ // The extra typeof function check is to prevent crashes
+ // in Safari 2 (See: #3039)
+ if ( array.length == null || typeof array === "string" || jQuery.isFunction(array) || (typeof array !== "function" && array.setInterval) ) {
+ push.call( ret, array );
+ } else {
+ jQuery.merge( ret, array );
+ }
+ }
+
+ return ret;
+ },
+
+ inArray: function( elem, array ) {
+ if ( array.indexOf ) {
+ return array.indexOf( elem );
+ }
+
+ for ( var i = 0, length = array.length; i < length; i++ ) {
+ if ( array[ i ] === elem ) {
+ return i;
+ }
+ }
+
+ return -1;
+ },
+
+ merge: function( first, second ) {
+ var i = first.length, j = 0;
+
+ if ( typeof second.length === "number" ) {
+ for ( var l = second.length; j < l; j++ ) {
+ first[ i++ ] = second[ j ];
+ }
+
+ } else {
+ while ( second[j] !== undefined ) {
+ first[ i++ ] = second[ j++ ];
+ }
+ }
+
+ first.length = i;
+
+ return first;
+ },
+
+ grep: function( elems, callback, inv ) {
+ var ret = [];
+
+ // Go through the array, only saving the items
+ // that pass the validator function
+ for ( var i = 0, length = elems.length; i < length; i++ ) {
+ if ( !inv !== !callback( elems[ i ], i ) ) {
+ ret.push( elems[ i ] );
+ }
+ }
+
+ return ret;
+ },
+
+ // arg is for internal usage only
+ map: function( elems, callback, arg ) {
+ var ret = [], value;
+
+ // Go through the array, translating each of the items to their
+ // new value (or values).
+ for ( var i = 0, length = elems.length; i < length; i++ ) {
+ value = callback( elems[ i ], i, arg );
+
+ if ( value != null ) {
+ ret[ ret.length ] = value;
+ }
+ }
+
+ return ret.concat.apply( [], ret );
+ },
+
+ // A global GUID counter for objects
+ guid: 1,
+
+ proxy: function( fn, proxy, thisObject ) {
+ if ( arguments.length === 2 ) {
+ if ( typeof proxy === "string" ) {
+ thisObject = fn;
+ fn = thisObject[ proxy ];
+ proxy = undefined;
+
+ } else if ( proxy && !jQuery.isFunction( proxy ) ) {
+ thisObject = proxy;
+ proxy = undefined;
+ }
+ }
+
+ if ( !proxy && fn ) {
+ proxy = function() {
+ return fn.apply( thisObject || this, arguments );
+ };
+ }
+
+ // Set the guid of unique handler to the same of original handler, so it can be removed
+ if ( fn ) {
+ proxy.guid = fn.guid = fn.guid || proxy.guid || jQuery.guid++;
+ }
+
+ // So proxy can be declared as an argument
+ return proxy;
+ },
+
+ // Use of jQuery.browser is frowned upon.
+ // More details: http://docs.jquery.com/Utilities/jQuery.browser
+ uaMatch: function( ua ) {
+ ua = ua.toLowerCase();
+
+ var match = /(webkit)[ \/]([\w.]+)/.exec( ua ) ||
+ /(opera)(?:.*version)?[ \/]([\w.]+)/.exec( ua ) ||
+ /(msie) ([\w.]+)/.exec( ua ) ||
+ !/compatible/.test( ua ) && /(mozilla)(?:.*? rv:([\w.]+))?/.exec( ua ) ||
+ [];
+
+ return { browser: match[1] || "", version: match[2] || "0" };
+ },
+
+ browser: {}
+});
+
+browserMatch = jQuery.uaMatch( userAgent );
+if ( browserMatch.browser ) {
+ jQuery.browser[ browserMatch.browser ] = true;
+ jQuery.browser.version = browserMatch.version;
+}
+
+// Deprecated, use jQuery.browser.webkit instead
+if ( jQuery.browser.webkit ) {
+ jQuery.browser.safari = true;
+}
+
+if ( indexOf ) {
+ jQuery.inArray = function( elem, array ) {
+ return indexOf.call( array, elem );
+ };
+}
+
+// All jQuery objects should point back to these
+rootjQuery = jQuery(document);
+
+// Cleanup functions for the document ready method
+if ( document.addEventListener ) {
+ DOMContentLoaded = function() {
+ document.removeEventListener( "DOMContentLoaded", DOMContentLoaded, false );
+ jQuery.ready();
+ };
+
+} else if ( document.attachEvent ) {
+ DOMContentLoaded = function() {
+ // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443).
+ if ( document.readyState === "complete" ) {
+ document.detachEvent( "onreadystatechange", DOMContentLoaded );
+ jQuery.ready();
+ }
+ };
+}
+
+// The DOM ready check for Internet Explorer
+function doScrollCheck() {
+ if ( jQuery.isReady ) {
+ return;
+ }
+
+ try {
+ // If IE is used, use the trick by Diego Perini
+ // http://javascript.nwbox.com/IEContentLoaded/
+ document.documentElement.doScroll("left");
+ } catch( error ) {
+ setTimeout( doScrollCheck, 1 );
+ return;
+ }
+
+ // and execute any waiting functions
+ jQuery.ready();
+}
+
+function evalScript( i, elem ) {
+ if ( elem.src ) {
+ jQuery.ajax({
+ url: elem.src,
+ async: false,
+ dataType: "script"
+ });
+ } else {
+ jQuery.globalEval( elem.text || elem.textContent || elem.innerHTML || "" );
+ }
+
+ if ( elem.parentNode ) {
+ elem.parentNode.removeChild( elem );
+ }
+}
+
+// Mutifunctional method to get and set values to a collection
+// The value/s can be optionally by executed if its a function
+function access( elems, key, value, exec, fn, pass ) {
+ var length = elems.length;
+
+ // Setting many attributes
+ if ( typeof key === "object" ) {
+ for ( var k in key ) {
+ access( elems, k, key[k], exec, fn, value );
+ }
+ return elems;
+ }
+
+ // Setting one attribute
+ if ( value !== undefined ) {
+ // Optionally, function values get executed if exec is true
+ exec = !pass && exec && jQuery.isFunction(value);
+
+ for ( var i = 0; i < length; i++ ) {
+ fn( elems[i], key, exec ? value.call( elems[i], i, fn( elems[i], key ) ) : value, pass );
+ }
+
+ return elems;
+ }
+
+ // Getting an attribute
+ return length ? fn( elems[0], key ) : undefined;
+}
+
+function now() {
+ return (new Date).getTime();
+}
+(function() {
+
+ jQuery.support = {};
+
+ var root = document.documentElement,
+ script = document.createElement("script"),
+ div = document.createElement("div"),
+ id = "script" + now();
+
+ div.style.display = "none";
+ div.innerHTML = " <link/><table></table><a href='/a' style='color:red;float:left;opacity:.55;'>a</a><input type='checkbox'/>";
+
+ var all = div.getElementsByTagName("*"),
+ a = div.getElementsByTagName("a")[0];
+
+ // Can't get basic test support
+ if ( !all || !all.length || !a ) {
+ return;
+ }
+
+ jQuery.support = {
+ // IE strips leading whitespace when .innerHTML is used
+ leadingWhitespace: div.firstChild.nodeType === 3,
+
+ // Make sure that tbody elements aren't automatically inserted
+ // IE will insert them into empty tables
+ tbody: !div.getElementsByTagName("tbody").length,
+
+ // Make sure that link elements get serialized correctly by innerHTML
+ // This requires a wrapper element in IE
+ htmlSerialize: !!div.getElementsByTagName("link").length,
+
+ // Get the style information from getAttribute
+ // (IE uses .cssText insted)
+ style: /red/.test( a.getAttribute("style") ),
+
+ // Make sure that URLs aren't manipulated
+ // (IE normalizes it by default)
+ hrefNormalized: a.getAttribute("href") === "/a",
+
+ // Make sure that element opacity exists
+ // (IE uses filter instead)
+ // Use a regex to work around a WebKit issue. See #5145
+ opacity: /^0.55$/.test( a.style.opacity ),
+
+ // Verify style float existence
+ // (IE uses styleFloat instead of cssFloat)
+ cssFloat: !!a.style.cssFloat,
+
+ // Make sure that if no value is specified for a checkbox
+ // that it defaults to "on".
+ // (WebKit defaults to "" instead)
+ checkOn: div.getElementsByTagName("input")[0].value === "on",
+
+ // Make sure that a selected-by-default option has a working selected property.
+ // (WebKit defaults to false instead of true, IE too, if it's in an optgroup)
+ optSelected: document.createElement("select").appendChild( document.createElement("option") ).selected,
+
+ parentNode: div.removeChild( div.appendChild( document.createElement("div") ) ).parentNode === null,
+
+ // Will be defined later
+ deleteExpando: true,
+ checkClone: false,
+ scriptEval: false,
+ noCloneEvent: true,
+ boxModel: null
+ };
+
+ script.type = "text/javascript";
+ try {
+ script.appendChild( document.createTextNode( "window." + id + "=1;" ) );
+ } catch(e) {}
+
+ root.insertBefore( script, root.firstChild );
+
+ // Make sure that the execution of code works by injecting a script
+ // tag with appendChild/createTextNode
+ // (IE doesn't support this, fails, and uses .text instead)
+ if ( window[ id ] ) {
+ jQuery.support.scriptEval = true;
+ delete window[ id ];
+ }
+
+ // Test to see if it's possible to delete an expando from an element
+ // Fails in Internet Explorer
+ try {
+ delete script.test;
+
+ } catch(e) {
+ jQuery.support.deleteExpando = false;
+ }
+
+ root.removeChild( script );
+
+ if ( div.attachEvent && div.fireEvent ) {
+ div.attachEvent("onclick", function click() {
+ // Cloning a node shouldn't copy over any
+ // bound event handlers (IE does this)
+ jQuery.support.noCloneEvent = false;
+ div.detachEvent("onclick", click);
+ });
+ div.cloneNode(true).fireEvent("onclick");
+ }
+
+ div = document.createElement("div");
+ div.innerHTML = "<input type='radio' name='radiotest' checked='checked'/>";
+
+ var fragment = document.createDocumentFragment();
+ fragment.appendChild( div.firstChild );
+
+ // WebKit doesn't clone checked state correctly in fragments
+ jQuery.support.checkClone = fragment.cloneNode(true).cloneNode(true).lastChild.checked;
+
+ // Figure out if the W3C box model works as expected
+ // document.body must exist before we can do this
+ jQuery(function() {
+ var div = document.createElement("div");
+ div.style.width = div.style.paddingLeft = "1px";
+
+ document.body.appendChild( div );
+ jQuery.boxModel = jQuery.support.boxModel = div.offsetWidth === 2;
+ document.body.removeChild( div ).style.display = 'none';
+
+ div = null;
+ });
+
+ // Technique from Juriy Zaytsev
+ // http://thinkweb2.com/projects/prototype/detecting-event-support-without-browser-sniffing/
+ var eventSupported = function( eventName ) {
+ var el = document.createElement("div");
+ eventName = "on" + eventName;
+
+ var isSupported = (eventName in el);
+ if ( !isSupported ) {
+ el.setAttribute(eventName, "return;");
+ isSupported = typeof el[eventName] === "function";
+ }
+ el = null;
+
+ return isSupported;
+ };
+
+ jQuery.support.submitBubbles = eventSupported("submit");
+ jQuery.support.changeBubbles = eventSupported("change");
+
+ // release memory in IE
+ root = script = div = all = a = null;
+})();
+
+jQuery.props = {
+ "for": "htmlFor",
+ "class": "className",
+ readonly: "readOnly",
+ maxlength: "maxLength",
+ cellspacing: "cellSpacing",
+ rowspan: "rowSpan",
+ colspan: "colSpan",
+ tabindex: "tabIndex",
+ usemap: "useMap",
+ frameborder: "frameBorder"
+};
+var expando = "jQuery" + now(), uuid = 0, windowData = {};
+
+jQuery.extend({
+ cache: {},
+
+ expando:expando,
+
+ // The following elements throw uncatchable exceptions if you
+ // attempt to add expando properties to them.
+ noData: {
+ "embed": true,
+ "object": true,
+ "applet": true
+ },
+
+ data: function( elem, name, data ) {
+ if ( elem.nodeName && jQuery.noData[elem.nodeName.toLowerCase()] ) {
+ return;
+ }
+
+ elem = elem == window ?
+ windowData :
+ elem;
+
+ var id = elem[ expando ], cache = jQuery.cache, thisCache;
+
+ if ( !id && typeof name === "string" && data === undefined ) {
+ return null;
+ }
+
+ // Compute a unique ID for the element
+ if ( !id ) {
+ id = ++uuid;
+ }
+
+ // Avoid generating a new cache unless none exists and we
+ // want to manipulate it.
+ if ( typeof name === "object" ) {
+ elem[ expando ] = id;
+ thisCache = cache[ id ] = jQuery.extend(true, {}, name);
+
+ } else if ( !cache[ id ] ) {
+ elem[ expando ] = id;
+ cache[ id ] = {};
+ }
+
+ thisCache = cache[ id ];
+
+ // Prevent overriding the named cache with undefined values
+ if ( data !== undefined ) {
+ thisCache[ name ] = data;
+ }
+
+ return typeof name === "string" ? thisCache[ name ] : thisCache;
+ },
+
+ removeData: function( elem, name ) {
+ if ( elem.nodeName && jQuery.noData[elem.nodeName.toLowerCase()] ) {
+ return;
+ }
+
+ elem = elem == window ?
+ windowData :
+ elem;
+
+ var id = elem[ expando ], cache = jQuery.cache, thisCache = cache[ id ];
+
+ // If we want to remove a specific section of the element's data
+ if ( name ) {
+ if ( thisCache ) {
+ // Remove the section of cache data
+ delete thisCache[ name ];
+
+ // If we've removed all the data, remove the element's cache
+ if ( jQuery.isEmptyObject(thisCache) ) {
+ jQuery.removeData( elem );
+ }
+ }
+
+ // Otherwise, we want to remove all of the element's data
+ } else {
+ if ( jQuery.support.deleteExpando ) {
+ delete elem[ jQuery.expando ];
+
+ } else if ( elem.removeAttribute ) {
+ elem.removeAttribute( jQuery.expando );
+ }
+
+ // Completely remove the data cache
+ delete cache[ id ];
+ }
+ }
+});
+
+jQuery.fn.extend({
+ data: function( key, value ) {
+ if ( typeof key === "undefined" && this.length ) {
+ return jQuery.data( this[0] );
+
+ } else if ( typeof key === "object" ) {
+ return this.each(function() {
+ jQuery.data( this, key );
+ });
+ }
+
+ var parts = key.split(".");
+ parts[1] = parts[1] ? "." + parts[1] : "";
+
+ if ( value === undefined ) {
+ var data = this.triggerHandler("getData" + parts[1] + "!", [parts[0]]);
+
+ if ( data === undefined && this.length ) {
+ data = jQuery.data( this[0], key );
+ }
+ return data === undefined && parts[1] ?
+ this.data( parts[0] ) :
+ data;
+ } else {
+ return this.trigger("setData" + parts[1] + "!", [parts[0], value]).each(function() {
+ jQuery.data( this, key, value );
+ });
+ }
+ },
+
+ removeData: function( key ) {
+ return this.each(function() {
+ jQuery.removeData( this, key );
+ });
+ }
+});
+jQuery.extend({
+ queue: function( elem, type, data ) {
+ if ( !elem ) {
+ return;
+ }
+
+ type = (type || "fx") + "queue";
+ var q = jQuery.data( elem, type );
+
+ // Speed up dequeue by getting out quickly if this is just a lookup
+ if ( !data ) {
+ return q || [];
+ }
+
+ if ( !q || jQuery.isArray(data) ) {
+ q = jQuery.data( elem, type, jQuery.makeArray(data) );
+
+ } else {
+ q.push( data );
+ }
+
+ return q;
+ },
+
+ dequeue: function( elem, type ) {
+ type = type || "fx";
+
+ var queue = jQuery.queue( elem, type ), fn = queue.shift();
+
+ // If the fx queue is dequeued, always remove the progress sentinel
+ if ( fn === "inprogress" ) {
+ fn = queue.shift();
+ }
+
+ if ( fn ) {
+ // Add a progress sentinel to prevent the fx queue from being
+ // automatically dequeued
+ if ( type === "fx" ) {
+ queue.unshift("inprogress");
+ }
+
+ fn.call(elem, function() {
+ jQuery.dequeue(elem, type);
+ });
+ }
+ }
+});
+
+jQuery.fn.extend({
+ queue: function( type, data ) {
+ if ( typeof type !== "string" ) {
+ data = type;
+ type = "fx";
+ }
+
+ if ( data === undefined ) {
+ return jQuery.queue( this[0], type );
+ }
+ return this.each(function( i, elem ) {
+ var queue = jQuery.queue( this, type, data );
+
+ if ( type === "fx" && queue[0] !== "inprogress" ) {
+ jQuery.dequeue( this, type );
+ }
+ });
+ },
+ dequeue: function( type ) {
+ return this.each(function() {
+ jQuery.dequeue( this, type );
+ });
+ },
+
+ // Based off of the plugin by Clint Helfers, with permission.
+ // http://blindsignals.com/index.php/2009/07/jquery-delay/
+ delay: function( time, type ) {
+ time = jQuery.fx ? jQuery.fx.speeds[time] || time : time;
+ type = type || "fx";
+
+ return this.queue( type, function() {
+ var elem = this;
+ setTimeout(function() {
+ jQuery.dequeue( elem, type );
+ }, time );
+ });
+ },
+
+ clearQueue: function( type ) {
+ return this.queue( type || "fx", [] );
+ }
+});
+var rclass = /[\n\t]/g,
+ rspace = /\s+/,
+ rreturn = /\r/g,
+ rspecialurl = /href|src|style/,
+ rtype = /(button|input)/i,
+ rfocusable = /(button|input|object|select|textarea)/i,
+ rclickable = /^(a|area)$/i,
+ rradiocheck = /radio|checkbox/;
+
+jQuery.fn.extend({
+ attr: function( name, value ) {
+ return access( this, name, value, true, jQuery.attr );
+ },
+
+ removeAttr: function( name, fn ) {
+ return this.each(function(){
+ jQuery.attr( this, name, "" );
+ if ( this.nodeType === 1 ) {
+ this.removeAttribute( name );
+ }
+ });
+ },
+
+ addClass: function( value ) {
+ if ( jQuery.isFunction(value) ) {
+ return this.each(function(i) {
+ var self = jQuery(this);
+ self.addClass( value.call(this, i, self.attr("class")) );
+ });
+ }
+
+ if ( value && typeof value === "string" ) {
+ var classNames = (value || "").split( rspace );
+
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ var elem = this[i];
+
+ if ( elem.nodeType === 1 ) {
+ if ( !elem.className ) {
+ elem.className = value;
+
+ } else {
+ var className = " " + elem.className + " ", setClass = elem.className;
+ for ( var c = 0, cl = classNames.length; c < cl; c++ ) {
+ if ( className.indexOf( " " + classNames[c] + " " ) < 0 ) {
+ setClass += " " + classNames[c];
+ }
+ }
+ elem.className = jQuery.trim( setClass );
+ }
+ }
+ }
+ }
+
+ return this;
+ },
+
+ removeClass: function( value ) {
+ if ( jQuery.isFunction(value) ) {
+ return this.each(function(i) {
+ var self = jQuery(this);
+ self.removeClass( value.call(this, i, self.attr("class")) );
+ });
+ }
+
+ if ( (value && typeof value === "string") || value === undefined ) {
+ var classNames = (value || "").split(rspace);
+
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ var elem = this[i];
+
+ if ( elem.nodeType === 1 && elem.className ) {
+ if ( value ) {
+ var className = (" " + elem.className + " ").replace(rclass, " ");
+ for ( var c = 0, cl = classNames.length; c < cl; c++ ) {
+ className = className.replace(" " + classNames[c] + " ", " ");
+ }
+ elem.className = jQuery.trim( className );
+
+ } else {
+ elem.className = "";
+ }
+ }
+ }
+ }
+
+ return this;
+ },
+
+ toggleClass: function( value, stateVal ) {
+ var type = typeof value, isBool = typeof stateVal === "boolean";
+
+ if ( jQuery.isFunction( value ) ) {
+ return this.each(function(i) {
+ var self = jQuery(this);
+ self.toggleClass( value.call(this, i, self.attr("class"), stateVal), stateVal );
+ });
+ }
+
+ return this.each(function() {
+ if ( type === "string" ) {
+ // toggle individual class names
+ var className, i = 0, self = jQuery(this),
+ state = stateVal,
+ classNames = value.split( rspace );
+
+ while ( (className = classNames[ i++ ]) ) {
+ // check each className given, space seperated list
+ state = isBool ? state : !self.hasClass( className );
+ self[ state ? "addClass" : "removeClass" ]( className );
+ }
+
+ } else if ( type === "undefined" || type === "boolean" ) {
+ if ( this.className ) {
+ // store className if set
+ jQuery.data( this, "__className__", this.className );
+ }
+
+ // toggle whole className
+ this.className = this.className || value === false ? "" : jQuery.data( this, "__className__" ) || "";
+ }
+ });
+ },
+
+ hasClass: function( selector ) {
+ var className = " " + selector + " ";
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ if ( (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) > -1 ) {
+ return true;
+ }
+ }
+
+ return false;
+ },
+
+ val: function( value ) {
+ if ( value === undefined ) {
+ var elem = this[0];
+
+ if ( elem ) {
+ if ( jQuery.nodeName( elem, "option" ) ) {
+ return (elem.attributes.value || {}).specified ? elem.value : elem.text;
+ }
+
+ // We need to handle select boxes special
+ if ( jQuery.nodeName( elem, "select" ) ) {
+ var index = elem.selectedIndex,
+ values = [],
+ options = elem.options,
+ one = elem.type === "select-one";
+
+ // Nothing was selected
+ if ( index < 0 ) {
+ return null;
+ }
+
+ // Loop through all the selected options
+ for ( var i = one ? index : 0, max = one ? index + 1 : options.length; i < max; i++ ) {
+ var option = options[ i ];
+
+ if ( option.selected ) {
+ // Get the specifc value for the option
+ value = jQuery(option).val();
+
+ // We don't need an array for one selects
+ if ( one ) {
+ return value;
+ }
+
+ // Multi-Selects return an array
+ values.push( value );
+ }
+ }
+
+ return values;
+ }
+
+ // Handle the case where in Webkit "" is returned instead of "on" if a value isn't specified
+ if ( rradiocheck.test( elem.type ) && !jQuery.support.checkOn ) {
+ return elem.getAttribute("value") === null ? "on" : elem.value;
+ }
+
+
+ // Everything else, we just grab the value
+ return (elem.value || "").replace(rreturn, "");
+
+ }
+
+ return undefined;
+ }
+
+ var isFunction = jQuery.isFunction(value);
+
+ return this.each(function(i) {
+ var self = jQuery(this), val = value;
+
+ if ( this.nodeType !== 1 ) {
+ return;
+ }
+
+ if ( isFunction ) {
+ val = value.call(this, i, self.val());
+ }
+
+ // Typecast each time if the value is a Function and the appended
+ // value is therefore different each time.
+ if ( typeof val === "number" ) {
+ val += "";
+ }
+
+ if ( jQuery.isArray(val) && rradiocheck.test( this.type ) ) {
+ this.checked = jQuery.inArray( self.val(), val ) >= 0;
+
+ } else if ( jQuery.nodeName( this, "select" ) ) {
+ var values = jQuery.makeArray(val);
+
+ jQuery( "option", this ).each(function() {
+ this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0;
+ });
+
+ if ( !values.length ) {
+ this.selectedIndex = -1;
+ }
+
+ } else {
+ this.value = val;
+ }
+ });
+ }
+});
+
+jQuery.extend({
+ attrFn: {
+ val: true,
+ css: true,
+ html: true,
+ text: true,
+ data: true,
+ width: true,
+ height: true,
+ offset: true
+ },
+
+ attr: function( elem, name, value, pass ) {
+ // don't set attributes on text and comment nodes
+ if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 ) {
+ return undefined;
+ }
+
+ if ( pass && name in jQuery.attrFn ) {
+ return jQuery(elem)[name](value);
+ }
+
+ var notxml = elem.nodeType !== 1 || !jQuery.isXMLDoc( elem ),
+ // Whether we are setting (or getting)
+ set = value !== undefined;
+
+ // Try to normalize/fix the name
+ name = notxml && jQuery.props[ name ] || name;
+
+ // Only do all the following if this is a node (faster for style)
+ if ( elem.nodeType === 1 ) {
+ // These attributes require special treatment
+ var special = rspecialurl.test( name );
+
+ // Safari mis-reports the default selected property of an option
+ // Accessing the parent's selectedIndex property fixes it
+ if ( name === "selected" && !jQuery.support.optSelected ) {
+ var parent = elem.parentNode;
+ if ( parent ) {
+ parent.selectedIndex;
+
+ // Make sure that it also works with optgroups, see #5701
+ if ( parent.parentNode ) {
+ parent.parentNode.selectedIndex;
+ }
+ }
+ }
+
+ // If applicable, access the attribute via the DOM 0 way
+ if ( name in elem && notxml && !special ) {
+ if ( set ) {
+ // We can't allow the type property to be changed (since it causes problems in IE)
+ if ( name === "type" && rtype.test( elem.nodeName ) && elem.parentNode ) {
+ jQuery.error( "type property can't be changed" );
+ }
+
+ elem[ name ] = value;
+ }
+
+ // browsers index elements by id/name on forms, give priority to attributes.
+ if ( jQuery.nodeName( elem, "form" ) && elem.getAttributeNode(name) ) {
+ return elem.getAttributeNode( name ).nodeValue;
+ }
+
+ // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set
+ // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/
+ if ( name === "tabIndex" ) {
+ var attributeNode = elem.getAttributeNode( "tabIndex" );
+
+ return attributeNode && attributeNode.specified ?
+ attributeNode.value :
+ rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ?
+ 0 :
+ undefined;
+ }
+
+ return elem[ name ];
+ }
+
+ if ( !jQuery.support.style && notxml && name === "style" ) {
+ if ( set ) {
+ elem.style.cssText = "" + value;
+ }
+
+ return elem.style.cssText;
+ }
+
+ if ( set ) {
+ // convert the value to a string (all browsers do this but IE) see #1070
+ elem.setAttribute( name, "" + value );
+ }
+
+ var attr = !jQuery.support.hrefNormalized && notxml && special ?
+ // Some attributes require a special call on IE
+ elem.getAttribute( name, 2 ) :
+ elem.getAttribute( name );
+
+ // Non-existent attributes return null, we normalize to undefined
+ return attr === null ? undefined : attr;
+ }
+
+ // elem is actually elem.style ... set the style
+ // Using attr for specific style information is now deprecated. Use style instead.
+ return jQuery.style( elem, name, value );
+ }
+});
+var rnamespaces = /\.(.*)$/,
+ fcleanup = function( nm ) {
+ return nm.replace(/[^\w\s\.\|`]/g, function( ch ) {
+ return "\\" + ch;
+ });
+ };
+
+/*
+ * A number of helper functions used for managing events.
+ * Many of the ideas behind this code originated from
+ * Dean Edwards' addEvent library.
+ */
+jQuery.event = {
+
+ // Bind an event to an element
+ // Original by Dean Edwards
+ add: function( elem, types, handler, data ) {
+ if ( elem.nodeType === 3 || elem.nodeType === 8 ) {
+ return;
+ }
+
+ // For whatever reason, IE has trouble passing the window object
+ // around, causing it to be cloned in the process
+ if ( elem.setInterval && ( elem !== window && !elem.frameElement ) ) {
+ elem = window;
+ }
+
+ var handleObjIn, handleObj;
+
+ if ( handler.handler ) {
+ handleObjIn = handler;
+ handler = handleObjIn.handler;
+ }
+
+ // Make sure that the function being executed has a unique ID
+ if ( !handler.guid ) {
+ handler.guid = jQuery.guid++;
+ }
+
+ // Init the element's event structure
+ var elemData = jQuery.data( elem );
+
+ // If no elemData is found then we must be trying to bind to one of the
+ // banned noData elements
+ if ( !elemData ) {
+ return;
+ }
+
+ var events = elemData.events = elemData.events || {},
+ eventHandle = elemData.handle, eventHandle;
+
+ if ( !eventHandle ) {
+ elemData.handle = eventHandle = function() {
+ // Handle the second event of a trigger and when
+ // an event is called after a page has unloaded
+ return typeof jQuery !== "undefined" && !jQuery.event.triggered ?
+ jQuery.event.handle.apply( eventHandle.elem, arguments ) :
+ undefined;
+ };
+ }
+
+ // Add elem as a property of the handle function
+ // This is to prevent a memory leak with non-native events in IE.
+ eventHandle.elem = elem;
+
+ // Handle multiple events separated by a space
+ // jQuery(...).bind("mouseover mouseout", fn);
+ types = types.split(" ");
+
+ var type, i = 0, namespaces;
+
+ while ( (type = types[ i++ ]) ) {
+ handleObj = handleObjIn ?
+ jQuery.extend({}, handleObjIn) :
+ { handler: handler, data: data };
+
+ // Namespaced event handlers
+ if ( type.indexOf(".") > -1 ) {
+ namespaces = type.split(".");
+ type = namespaces.shift();
+ handleObj.namespace = namespaces.slice(0).sort().join(".");
+
+ } else {
+ namespaces = [];
+ handleObj.namespace = "";
+ }
+
+ handleObj.type = type;
+ handleObj.guid = handler.guid;
+
+ // Get the current list of functions bound to this event
+ var handlers = events[ type ],
+ special = jQuery.event.special[ type ] || {};
+
+ // Init the event handler queue
+ if ( !handlers ) {
+ handlers = events[ type ] = [];
+
+ // Check for a special event handler
+ // Only use addEventListener/attachEvent if the special
+ // events handler returns false
+ if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) {
+ // Bind the global event handler to the element
+ if ( elem.addEventListener ) {
+ elem.addEventListener( type, eventHandle, false );
+
+ } else if ( elem.attachEvent ) {
+ elem.attachEvent( "on" + type, eventHandle );
+ }
+ }
+ }
+
+ if ( special.add ) {
+ special.add.call( elem, handleObj );
+
+ if ( !handleObj.handler.guid ) {
+ handleObj.handler.guid = handler.guid;
+ }
+ }
+
+ // Add the function to the element's handler list
+ handlers.push( handleObj );
+
+ // Keep track of which events have been used, for global triggering
+ jQuery.event.global[ type ] = true;
+ }
+
+ // Nullify elem to prevent memory leaks in IE
+ elem = null;
+ },
+
+ global: {},
+
+ // Detach an event or set of events from an element
+ remove: function( elem, types, handler, pos ) {
+ // don't do events on text and comment nodes
+ if ( elem.nodeType === 3 || elem.nodeType === 8 ) {
+ return;
+ }
+
+ var ret, type, fn, i = 0, all, namespaces, namespace, special, eventType, handleObj, origType,
+ elemData = jQuery.data( elem ),
+ events = elemData && elemData.events;
+
+ if ( !elemData || !events ) {
+ return;
+ }
+
+ // types is actually an event object here
+ if ( types && types.type ) {
+ handler = types.handler;
+ types = types.type;
+ }
+
+ // Unbind all events for the element
+ if ( !types || typeof types === "string" && types.charAt(0) === "." ) {
+ types = types || "";
+
+ for ( type in events ) {
+ jQuery.event.remove( elem, type + types );
+ }
+
+ return;
+ }
+
+ // Handle multiple events separated by a space
+ // jQuery(...).unbind("mouseover mouseout", fn);
+ types = types.split(" ");
+
+ while ( (type = types[ i++ ]) ) {
+ origType = type;
+ handleObj = null;
+ all = type.indexOf(".") < 0;
+ namespaces = [];
+
+ if ( !all ) {
+ // Namespaced event handlers
+ namespaces = type.split(".");
+ type = namespaces.shift();
+
+ namespace = new RegExp("(^|\\.)" +
+ jQuery.map( namespaces.slice(0).sort(), fcleanup ).join("\\.(?:.*\\.)?") + "(\\.|$)")
+ }
+
+ eventType = events[ type ];
+
+ if ( !eventType ) {
+ continue;
+ }
+
+ if ( !handler ) {
+ for ( var j = 0; j < eventType.length; j++ ) {
+ handleObj = eventType[ j ];
+
+ if ( all || namespace.test( handleObj.namespace ) ) {
+ jQuery.event.remove( elem, origType, handleObj.handler, j );
+ eventType.splice( j--, 1 );
+ }
+ }
+
+ continue;
+ }
+
+ special = jQuery.event.special[ type ] || {};
+
+ for ( var j = pos || 0; j < eventType.length; j++ ) {
+ handleObj = eventType[ j ];
+
+ if ( handler.guid === handleObj.guid ) {
+ // remove the given handler for the given type
+ if ( all || namespace.test( handleObj.namespace ) ) {
+ if ( pos == null ) {
+ eventType.splice( j--, 1 );
+ }
+
+ if ( special.remove ) {
+ special.remove.call( elem, handleObj );
+ }
+ }
+
+ if ( pos != null ) {
+ break;
+ }
+ }
+ }
+
+ // remove generic event handler if no more handlers exist
+ if ( eventType.length === 0 || pos != null && eventType.length === 1 ) {
+ if ( !special.teardown || special.teardown.call( elem, namespaces ) === false ) {
+ removeEvent( elem, type, elemData.handle );
+ }
+
+ ret = null;
+ delete events[ type ];
+ }
+ }
+
+ // Remove the expando if it's no longer used
+ if ( jQuery.isEmptyObject( events ) ) {
+ var handle = elemData.handle;
+ if ( handle ) {
+ handle.elem = null;
+ }
+
+ delete elemData.events;
+ delete elemData.handle;
+
+ if ( jQuery.isEmptyObject( elemData ) ) {
+ jQuery.removeData( elem );
+ }
+ }
+ },
+
+ // bubbling is internal
+ trigger: function( event, data, elem /*, bubbling */ ) {
+ // Event object or event type
+ var type = event.type || event,
+ bubbling = arguments[3];
+
+ if ( !bubbling ) {
+ event = typeof event === "object" ?
+ // jQuery.Event object
+ event[expando] ? event :
+ // Object literal
+ jQuery.extend( jQuery.Event(type), event ) :
+ // Just the event type (string)
+ jQuery.Event(type);
+
+ if ( type.indexOf("!") >= 0 ) {
+ event.type = type = type.slice(0, -1);
+ event.exclusive = true;
+ }
+
+ // Handle a global trigger
+ if ( !elem ) {
+ // Don't bubble custom events when global (to avoid too much overhead)
+ event.stopPropagation();
+
+ // Only trigger if we've ever bound an event for it
+ if ( jQuery.event.global[ type ] ) {
+ jQuery.each( jQuery.cache, function() {
+ if ( this.events && this.events[type] ) {
+ jQuery.event.trigger( event, data, this.handle.elem );
+ }
+ });
+ }
+ }
+
+ // Handle triggering a single element
+
+ // don't do events on text and comment nodes
+ if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 ) {
+ return undefined;
+ }
+
+ // Clean up in case it is reused
+ event.result = undefined;
+ event.target = elem;
+
+ // Clone the incoming data, if any
+ data = jQuery.makeArray( data );
+ data.unshift( event );
+ }
+
+ event.currentTarget = elem;
+
+ // Trigger the event, it is assumed that "handle" is a function
+ var handle = jQuery.data( elem, "handle" );
+ if ( handle ) {
+ handle.apply( elem, data );
+ }
+
+ var parent = elem.parentNode || elem.ownerDocument;
+
+ // Trigger an inline bound script
+ try {
+ if ( !(elem && elem.nodeName && jQuery.noData[elem.nodeName.toLowerCase()]) ) {
+ if ( elem[ "on" + type ] && elem[ "on" + type ].apply( elem, data ) === false ) {
+ event.result = false;
+ }
+ }
+
+ // prevent IE from throwing an error for some elements with some event types, see #3533
+ } catch (e) {}
+
+ if ( !event.isPropagationStopped() && parent ) {
+ jQuery.event.trigger( event, data, parent, true );
+
+ } else if ( !event.isDefaultPrevented() ) {
+ var target = event.target, old,
+ isClick = jQuery.nodeName(target, "a") && type === "click",
+ special = jQuery.event.special[ type ] || {};
+
+ if ( (!special._default || special._default.call( elem, event ) === false) &&
+ !isClick && !(target && target.nodeName && jQuery.noData[target.nodeName.toLowerCase()]) ) {
+
+ try {
+ if ( target[ type ] ) {
+ // Make sure that we don't accidentally re-trigger the onFOO events
+ old = target[ "on" + type ];
+
+ if ( old ) {
+ target[ "on" + type ] = null;
+ }
+
+ jQuery.event.triggered = true;
+ target[ type ]();
+ }
+
+ // prevent IE from throwing an error for some elements with some event types, see #3533
+ } catch (e) {}
+
+ if ( old ) {
+ target[ "on" + type ] = old;
+ }
+
+ jQuery.event.triggered = false;
+ }
+ }
+ },
+
+ handle: function( event ) {
+ var all, handlers, namespaces, namespace, events;
+
+ event = arguments[0] = jQuery.event.fix( event || window.event );
+ event.currentTarget = this;
+
+ // Namespaced event handlers
+ all = event.type.indexOf(".") < 0 && !event.exclusive;
+
+ if ( !all ) {
+ namespaces = event.type.split(".");
+ event.type = namespaces.shift();
+ namespace = new RegExp("(^|\\.)" + namespaces.slice(0).sort().join("\\.(?:.*\\.)?") + "(\\.|$)");
+ }
+
+ var events = jQuery.data(this, "events"), handlers = events[ event.type ];
+
+ if ( events && handlers ) {
+ // Clone the handlers to prevent manipulation
+ handlers = handlers.slice(0);
+
+ for ( var j = 0, l = handlers.length; j < l; j++ ) {
+ var handleObj = handlers[ j ];
+
+ // Filter the functions by class
+ if ( all || namespace.test( handleObj.namespace ) ) {
+ // Pass in a reference to the handler function itself
+ // So that we can later remove it
+ event.handler = handleObj.handler;
+ event.data = handleObj.data;
+ event.handleObj = handleObj;
+
+ var ret = handleObj.handler.apply( this, arguments );
+
+ if ( ret !== undefined ) {
+ event.result = ret;
+ if ( ret === false ) {
+ event.preventDefault();
+ event.stopPropagation();
+ }
+ }
+
+ if ( event.isImmediatePropagationStopped() ) {
+ break;
+ }
+ }
+ }
+ }
+
+ return event.result;
+ },
+
+ props: "altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),
+
+ fix: function( event ) {
+ if ( event[ expando ] ) {
+ return event;
+ }
+
+ // store a copy of the original event object
+ // and "clone" to set read-only properties
+ var originalEvent = event;
+ event = jQuery.Event( originalEvent );
+
+ for ( var i = this.props.length, prop; i; ) {
+ prop = this.props[ --i ];
+ event[ prop ] = originalEvent[ prop ];
+ }
+
+ // Fix target property, if necessary
+ if ( !event.target ) {
+ event.target = event.srcElement || document; // Fixes #1925 where srcElement might not be defined either
+ }
+
+ // check if target is a textnode (safari)
+ if ( event.target.nodeType === 3 ) {
+ event.target = event.target.parentNode;
+ }
+
+ // Add relatedTarget, if necessary
+ if ( !event.relatedTarget && event.fromElement ) {
+ event.relatedTarget = event.fromElement === event.target ? event.toElement : event.fromElement;
+ }
+
+ // Calculate pageX/Y if missing and clientX/Y available
+ if ( event.pageX == null && event.clientX != null ) {
+ var doc = document.documentElement, body = document.body;
+ event.pageX = event.clientX + (doc && doc.scrollLeft || body && body.scrollLeft || 0) - (doc && doc.clientLeft || body && body.clientLeft || 0);
+ event.pageY = event.clientY + (doc && doc.scrollTop || body && body.scrollTop || 0) - (doc && doc.clientTop || body && body.clientTop || 0);
+ }
+
+ // Add which for key events
+ if ( !event.which && ((event.charCode || event.charCode === 0) ? event.charCode : event.keyCode) ) {
+ event.which = event.charCode || event.keyCode;
+ }
+
+ // Add metaKey to non-Mac browsers (use ctrl for PC's and Meta for Macs)
+ if ( !event.metaKey && event.ctrlKey ) {
+ event.metaKey = event.ctrlKey;
+ }
+
+ // Add which for click: 1 === left; 2 === middle; 3 === right
+ // Note: button is not normalized, so don't use it
+ if ( !event.which && event.button !== undefined ) {
+ event.which = (event.button & 1 ? 1 : ( event.button & 2 ? 3 : ( event.button & 4 ? 2 : 0 ) ));
+ }
+
+ return event;
+ },
+
+ // Deprecated, use jQuery.guid instead
+ guid: 1E8,
+
+ // Deprecated, use jQuery.proxy instead
+ proxy: jQuery.proxy,
+
+ special: {
+ ready: {
+ // Make sure the ready event is setup
+ setup: jQuery.bindReady,
+ teardown: jQuery.noop
+ },
+
+ live: {
+ add: function( handleObj ) {
+ jQuery.event.add( this, handleObj.origType, jQuery.extend({}, handleObj, {handler: liveHandler}) );
+ },
+
+ remove: function( handleObj ) {
+ var remove = true,
+ type = handleObj.origType.replace(rnamespaces, "");
+
+ jQuery.each( jQuery.data(this, "events").live || [], function() {
+ if ( type === this.origType.replace(rnamespaces, "") ) {
+ remove = false;
+ return false;
+ }
+ });
+
+ if ( remove ) {
+ jQuery.event.remove( this, handleObj.origType, liveHandler );
+ }
+ }
+
+ },
+
+ beforeunload: {
+ setup: function( data, namespaces, eventHandle ) {
+ // We only want to do this special case on windows
+ if ( this.setInterval ) {
+ this.onbeforeunload = eventHandle;
+ }
+
+ return false;
+ },
+ teardown: function( namespaces, eventHandle ) {
+ if ( this.onbeforeunload === eventHandle ) {
+ this.onbeforeunload = null;
+ }
+ }
+ }
+ }
+};
+
+var removeEvent = document.removeEventListener ?
+ function( elem, type, handle ) {
+ elem.removeEventListener( type, handle, false );
+ } :
+ function( elem, type, handle ) {
+ elem.detachEvent( "on" + type, handle );
+ };
+
+jQuery.Event = function( src ) {
+ // Allow instantiation without the 'new' keyword
+ if ( !this.preventDefault ) {
+ return new jQuery.Event( src );
+ }
+
+ // Event object
+ if ( src && src.type ) {
+ this.originalEvent = src;
+ this.type = src.type;
+ // Event type
+ } else {
+ this.type = src;
+ }
+
+ // timeStamp is buggy for some events on Firefox(#3843)
+ // So we won't rely on the native value
+ this.timeStamp = now();
+
+ // Mark it as fixed
+ this[ expando ] = true;
+};
+
+function returnFalse() {
+ return false;
+}
+function returnTrue() {
+ return true;
+}
+
+// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding
+// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html
+jQuery.Event.prototype = {
+ preventDefault: function() {
+ this.isDefaultPrevented = returnTrue;
+
+ var e = this.originalEvent;
+ if ( !e ) {
+ return;
+ }
+
+ // if preventDefault exists run it on the original event
+ if ( e.preventDefault ) {
+ e.preventDefault();
+ }
+ // otherwise set the returnValue property of the original event to false (IE)
+ e.returnValue = false;
+ },
+ stopPropagation: function() {
+ this.isPropagationStopped = returnTrue;
+
+ var e = this.originalEvent;
+ if ( !e ) {
+ return;
+ }
+ // if stopPropagation exists run it on the original event
+ if ( e.stopPropagation ) {
+ e.stopPropagation();
+ }
+ // otherwise set the cancelBubble property of the original event to true (IE)
+ e.cancelBubble = true;
+ },
+ stopImmediatePropagation: function() {
+ this.isImmediatePropagationStopped = returnTrue;
+ this.stopPropagation();
+ },
+ isDefaultPrevented: returnFalse,
+ isPropagationStopped: returnFalse,
+ isImmediatePropagationStopped: returnFalse
+};
+
+// Checks if an event happened on an element within another element
+// Used in jQuery.event.special.mouseenter and mouseleave handlers
+var withinElement = function( event ) {
+ // Check if mouse(over|out) are still within the same parent element
+ var parent = event.relatedTarget;
+
+ // Firefox sometimes assigns relatedTarget a XUL element
+ // which we cannot access the parentNode property of
+ try {
+ // Traverse up the tree
+ while ( parent && parent !== this ) {
+ parent = parent.parentNode;
+ }
+
+ if ( parent !== this ) {
+ // set the correct event type
+ event.type = event.data;
+
+ // handle event if we actually just moused on to a non sub-element
+ jQuery.event.handle.apply( this, arguments );
+ }
+
+ // assuming we've left the element since we most likely mousedover a xul element
+ } catch(e) { }
+},
+
+// In case of event delegation, we only need to rename the event.type,
+// liveHandler will take care of the rest.
+delegate = function( event ) {
+ event.type = event.data;
+ jQuery.event.handle.apply( this, arguments );
+};
+
+// Create mouseenter and mouseleave events
+jQuery.each({
+ mouseenter: "mouseover",
+ mouseleave: "mouseout"
+}, function( orig, fix ) {
+ jQuery.event.special[ orig ] = {
+ setup: function( data ) {
+ jQuery.event.add( this, fix, data && data.selector ? delegate : withinElement, orig );
+ },
+ teardown: function( data ) {
+ jQuery.event.remove( this, fix, data && data.selector ? delegate : withinElement );
+ }
+ };
+});
+
+// submit delegation
+if ( !jQuery.support.submitBubbles ) {
+
+ jQuery.event.special.submit = {
+ setup: function( data, namespaces ) {
+ if ( this.nodeName.toLowerCase() !== "form" ) {
+ jQuery.event.add(this, "click.specialSubmit", function( e ) {
+ var elem = e.target, type = elem.type;
+
+ if ( (type === "submit" || type === "image") && jQuery( elem ).closest("form").length ) {
+ return trigger( "submit", this, arguments );
+ }
+ });
+
+ jQuery.event.add(this, "keypress.specialSubmit", function( e ) {
+ var elem = e.target, type = elem.type;
+
+ if ( (type === "text" || type === "password") && jQuery( elem ).closest("form").length && e.keyCode === 13 ) {
+ return trigger( "submit", this, arguments );
+ }
+ });
+
+ } else {
+ return false;
+ }
+ },
+
+ teardown: function( namespaces ) {
+ jQuery.event.remove( this, ".specialSubmit" );
+ }
+ };
+
+}
+
+// change delegation, happens here so we have bind.
+if ( !jQuery.support.changeBubbles ) {
+
+ var formElems = /textarea|input|select/i,
+
+ changeFilters,
+
+ getVal = function( elem ) {
+ var type = elem.type, val = elem.value;
+
+ if ( type === "radio" || type === "checkbox" ) {
+ val = elem.checked;
+
+ } else if ( type === "select-multiple" ) {
+ val = elem.selectedIndex > -1 ?
+ jQuery.map( elem.options, function( elem ) {
+ return elem.selected;
+ }).join("-") :
+ "";
+
+ } else if ( elem.nodeName.toLowerCase() === "select" ) {
+ val = elem.selectedIndex;
+ }
+
+ return val;
+ },
+
+ testChange = function testChange( e ) {
+ var elem = e.target, data, val;
+
+ if ( !formElems.test( elem.nodeName ) || elem.readOnly ) {
+ return;
+ }
+
+ data = jQuery.data( elem, "_change_data" );
+ val = getVal(elem);
+
+ // the current data will be also retrieved by beforeactivate
+ if ( e.type !== "focusout" || elem.type !== "radio" ) {
+ jQuery.data( elem, "_change_data", val );
+ }
+
+ if ( data === undefined || val === data ) {
+ return;
+ }
+
+ if ( data != null || val ) {
+ e.type = "change";
+ return jQuery.event.trigger( e, arguments[1], elem );
+ }
+ };
+
+ jQuery.event.special.change = {
+ filters: {
+ focusout: testChange,
+
+ click: function( e ) {
+ var elem = e.target, type = elem.type;
+
+ if ( type === "radio" || type === "checkbox" || elem.nodeName.toLowerCase() === "select" ) {
+ return testChange.call( this, e );
+ }
+ },
+
+ // Change has to be called before submit
+ // Keydown will be called before keypress, which is used in submit-event delegation
+ keydown: function( e ) {
+ var elem = e.target, type = elem.type;
+
+ if ( (e.keyCode === 13 && elem.nodeName.toLowerCase() !== "textarea") ||
+ (e.keyCode === 32 && (type === "checkbox" || type === "radio")) ||
+ type === "select-multiple" ) {
+ return testChange.call( this, e );
+ }
+ },
+
+ // Beforeactivate happens also before the previous element is blurred
+ // with this event you can't trigger a change event, but you can store
+ // information/focus[in] is not needed anymore
+ beforeactivate: function( e ) {
+ var elem = e.target;
+ jQuery.data( elem, "_change_data", getVal(elem) );
+ }
+ },
+
+ setup: function( data, namespaces ) {
+ if ( this.type === "file" ) {
+ return false;
+ }
+
+ for ( var type in changeFilters ) {
+ jQuery.event.add( this, type + ".specialChange", changeFilters[type] );
+ }
+
+ return formElems.test( this.nodeName );
+ },
+
+ teardown: function( namespaces ) {
+ jQuery.event.remove( this, ".specialChange" );
+
+ return formElems.test( this.nodeName );
+ }
+ };
+
+ changeFilters = jQuery.event.special.change.filters;
+}
+
+function trigger( type, elem, args ) {
+ args[0].type = type;
+ return jQuery.event.handle.apply( elem, args );
+}
+
+// Create "bubbling" focus and blur events
+if ( document.addEventListener ) {
+ jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) {
+ jQuery.event.special[ fix ] = {
+ setup: function() {
+ this.addEventListener( orig, handler, true );
+ },
+ teardown: function() {
+ this.removeEventListener( orig, handler, true );
+ }
+ };
+
+ function handler( e ) {
+ e = jQuery.event.fix( e );
+ e.type = fix;
+ return jQuery.event.handle.call( this, e );
+ }
+ });
+}
+
+jQuery.each(["bind", "one"], function( i, name ) {
+ jQuery.fn[ name ] = function( type, data, fn ) {
+ // Handle object literals
+ if ( typeof type === "object" ) {
+ for ( var key in type ) {
+ this[ name ](key, data, type[key], fn);
+ }
+ return this;
+ }
+
+ if ( jQuery.isFunction( data ) ) {
+ fn = data;
+ data = undefined;
+ }
+
+ var handler = name === "one" ? jQuery.proxy( fn, function( event ) {
+ jQuery( this ).unbind( event, handler );
+ return fn.apply( this, arguments );
+ }) : fn;
+
+ if ( type === "unload" && name !== "one" ) {
+ this.one( type, data, fn );
+
+ } else {
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ jQuery.event.add( this[i], type, handler, data );
+ }
+ }
+
+ return this;
+ };
+});
+
+jQuery.fn.extend({
+ unbind: function( type, fn ) {
+ // Handle object literals
+ if ( typeof type === "object" && !type.preventDefault ) {
+ for ( var key in type ) {
+ this.unbind(key, type[key]);
+ }
+
+ } else {
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ jQuery.event.remove( this[i], type, fn );
+ }
+ }
+
+ return this;
+ },
+
+ delegate: function( selector, types, data, fn ) {
+ return this.live( types, data, fn, selector );
+ },
+
+ undelegate: function( selector, types, fn ) {
+ if ( arguments.length === 0 ) {
+ return this.unbind( "live" );
+
+ } else {
+ return this.die( types, null, fn, selector );
+ }
+ },
+
+ trigger: function( type, data ) {
+ return this.each(function() {
+ jQuery.event.trigger( type, data, this );
+ });
+ },
+
+ triggerHandler: function( type, data ) {
+ if ( this[0] ) {
+ var event = jQuery.Event( type );
+ event.preventDefault();
+ event.stopPropagation();
+ jQuery.event.trigger( event, data, this[0] );
+ return event.result;
+ }
+ },
+
+ toggle: function( fn ) {
+ // Save reference to arguments for access in closure
+ var args = arguments, i = 1;
+
+ // link all the functions, so any of them can unbind this click handler
+ while ( i < args.length ) {
+ jQuery.proxy( fn, args[ i++ ] );
+ }
+
+ return this.click( jQuery.proxy( fn, function( event ) {
+ // Figure out which function to execute
+ var lastToggle = ( jQuery.data( this, "lastToggle" + fn.guid ) || 0 ) % i;
+ jQuery.data( this, "lastToggle" + fn.guid, lastToggle + 1 );
+
+ // Make sure that clicks stop
+ event.preventDefault();
+
+ // and execute the function
+ return args[ lastToggle ].apply( this, arguments ) || false;
+ }));
+ },
+
+ hover: function( fnOver, fnOut ) {
+ return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver );
+ }
+});
+
+var liveMap = {
+ focus: "focusin",
+ blur: "focusout",
+ mouseenter: "mouseover",
+ mouseleave: "mouseout"
+};
+
+jQuery.each(["live", "die"], function( i, name ) {
+ jQuery.fn[ name ] = function( types, data, fn, origSelector /* Internal Use Only */ ) {
+ var type, i = 0, match, namespaces, preType,
+ selector = origSelector || this.selector,
+ context = origSelector ? this : jQuery( this.context );
+
+ if ( jQuery.isFunction( data ) ) {
+ fn = data;
+ data = undefined;
+ }
+
+ types = (types || "").split(" ");
+
+ while ( (type = types[ i++ ]) != null ) {
+ match = rnamespaces.exec( type );
+ namespaces = "";
+
+ if ( match ) {
+ namespaces = match[0];
+ type = type.replace( rnamespaces, "" );
+ }
+
+ if ( type === "hover" ) {
+ types.push( "mouseenter" + namespaces, "mouseleave" + namespaces );
+ continue;
+ }
+
+ preType = type;
+
+ if ( type === "focus" || type === "blur" ) {
+ types.push( liveMap[ type ] + namespaces );
+ type = type + namespaces;
+
+ } else {
+ type = (liveMap[ type ] || type) + namespaces;
+ }
+
+ if ( name === "live" ) {
+ // bind live handler
+ context.each(function(){
+ jQuery.event.add( this, liveConvert( type, selector ),
+ { data: data, selector: selector, handler: fn, origType: type, origHandler: fn, preType: preType } );
+ });
+
+ } else {
+ // unbind live handler
+ context.unbind( liveConvert( type, selector ), fn );
+ }
+ }
+
+ return this;
+ }
+});
+
+function liveHandler( event ) {
+ var stop, elems = [], selectors = [], args = arguments,
+ related, match, handleObj, elem, j, i, l, data,
+ events = jQuery.data( this, "events" );
+
+ // Make sure we avoid non-left-click bubbling in Firefox (#3861)
+ if ( event.liveFired === this || !events || !events.live || event.button && event.type === "click" ) {
+ return;
+ }
+
+ event.liveFired = this;
+
+ var live = events.live.slice(0);
+
+ for ( j = 0; j < live.length; j++ ) {
+ handleObj = live[j];
+
+ if ( handleObj.origType.replace( rnamespaces, "" ) === event.type ) {
+ selectors.push( handleObj.selector );
+
+ } else {
+ live.splice( j--, 1 );
+ }
+ }
+
+ match = jQuery( event.target ).closest( selectors, event.currentTarget );
+
+ for ( i = 0, l = match.length; i < l; i++ ) {
+ for ( j = 0; j < live.length; j++ ) {
+ handleObj = live[j];
+
+ if ( match[i].selector === handleObj.selector ) {
+ elem = match[i].elem;
+ related = null;
+
+ // Those two events require additional checking
+ if ( handleObj.preType === "mouseenter" || handleObj.preType === "mouseleave" ) {
+ related = jQuery( event.relatedTarget ).closest( handleObj.selector )[0];
+ }
+
+ if ( !related || related !== elem ) {
+ elems.push({ elem: elem, handleObj: handleObj });
+ }
+ }
+ }
+ }
+
+ for ( i = 0, l = elems.length; i < l; i++ ) {
+ match = elems[i];
+ event.currentTarget = match.elem;
+ event.data = match.handleObj.data;
+ event.handleObj = match.handleObj;
+
+ if ( match.handleObj.origHandler.apply( match.elem, args ) === false ) {
+ stop = false;
+ break;
+ }
+ }
+
+ return stop;
+}
+
+function liveConvert( type, selector ) {
+ return "live." + (type && type !== "*" ? type + "." : "") + selector.replace(/\./g, "`").replace(/ /g, "&");
+}
+
+jQuery.each( ("blur focus focusin focusout load resize scroll unload click dblclick " +
+ "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " +
+ "change select submit keydown keypress keyup error").split(" "), function( i, name ) {
+
+ // Handle event binding
+ jQuery.fn[ name ] = function( fn ) {
+ return fn ? this.bind( name, fn ) : this.trigger( name );
+ };
+
+ if ( jQuery.attrFn ) {
+ jQuery.attrFn[ name ] = true;
+ }
+});
+
+// Prevent memory leaks in IE
+// Window isn't included so as not to unbind existing unload events
+// More info:
+// - http://isaacschlueter.com/2006/10/msie-memory-leaks/
+if ( window.attachEvent && !window.addEventListener ) {
+ window.attachEvent("onunload", function() {
+ for ( var id in jQuery.cache ) {
+ if ( jQuery.cache[ id ].handle ) {
+ // Try/Catch is to handle iframes being unloaded, see #4280
+ try {
+ jQuery.event.remove( jQuery.cache[ id ].handle.elem );
+ } catch(e) {}
+ }
+ }
+ });
+}
+/*!
+ * Sizzle CSS Selector Engine - v1.0
+ * Copyright 2009, The Dojo Foundation
+ * Released under the MIT, BSD, and GPL Licenses.
+ * More information: http://sizzlejs.com/
+ */
+(function(){
+
+var chunker = /((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^[\]]*\]|['"][^'"]*['"]|[^[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,
+ done = 0,
+ toString = Object.prototype.toString,
+ hasDuplicate = false,
+ baseHasDuplicate = true;
+
+// Here we check if the JavaScript engine is using some sort of
+// optimization where it does not always call our comparision
+// function. If that is the case, discard the hasDuplicate value.
+// Thus far that includes Google Chrome.
+[0, 0].sort(function(){
+ baseHasDuplicate = false;
+ return 0;
+});
+
+var Sizzle = function(selector, context, results, seed) {
+ results = results || [];
+ var origContext = context = context || document;
+
+ if ( context.nodeType !== 1 && context.nodeType !== 9 ) {
+ return [];
+ }
+
+ if ( !selector || typeof selector !== "string" ) {
+ return results;
+ }
+
+ var parts = [], m, set, checkSet, extra, prune = true, contextXML = isXML(context),
+ soFar = selector;
+
+ // Reset the position of the chunker regexp (start from head)
+ while ( (chunker.exec(""), m = chunker.exec(soFar)) !== null ) {
+ soFar = m[3];
+
+ parts.push( m[1] );
+
+ if ( m[2] ) {
+ extra = m[3];
+ break;
+ }
+ }
+
+ if ( parts.length > 1 && origPOS.exec( selector ) ) {
+ if ( parts.length === 2 && Expr.relative[ parts[0] ] ) {
+ set = posProcess( parts[0] + parts[1], context );
+ } else {
+ set = Expr.relative[ parts[0] ] ?
+ [ context ] :
+ Sizzle( parts.shift(), context );
+
+ while ( parts.length ) {
+ selector = parts.shift();
+
+ if ( Expr.relative[ selector ] ) {
+ selector += parts.shift();
+ }
+
+ set = posProcess( selector, set );
+ }
+ }
+ } else {
+ // Take a shortcut and set the context if the root selector is an ID
+ // (but not if it'll be faster if the inner selector is an ID)
+ if ( !seed && parts.length > 1 && context.nodeType === 9 && !contextXML &&
+ Expr.match.ID.test(parts[0]) && !Expr.match.ID.test(parts[parts.length - 1]) ) {
+ var ret = Sizzle.find( parts.shift(), context, contextXML );
+ context = ret.expr ? Sizzle.filter( ret.expr, ret.set )[0] : ret.set[0];
+ }
+
+ if ( context ) {
+ var ret = seed ?
+ { expr: parts.pop(), set: makeArray(seed) } :
+ Sizzle.find( parts.pop(), parts.length === 1 && (parts[0] === "~" || parts[0] === "+") && context.parentNode ? context.parentNode : context, contextXML );
+ set = ret.expr ? Sizzle.filter( ret.expr, ret.set ) : ret.set;
+
+ if ( parts.length > 0 ) {
+ checkSet = makeArray(set);
+ } else {
+ prune = false;
+ }
+
+ while ( parts.length ) {
+ var cur = parts.pop(), pop = cur;
+
+ if ( !Expr.relative[ cur ] ) {
+ cur = "";
+ } else {
+ pop = parts.pop();
+ }
+
+ if ( pop == null ) {
+ pop = context;
+ }
+
+ Expr.relative[ cur ]( checkSet, pop, contextXML );
+ }
+ } else {
+ checkSet = parts = [];
+ }
+ }
+
+ if ( !checkSet ) {
+ checkSet = set;
+ }
+
+ if ( !checkSet ) {
+ Sizzle.error( cur || selector );
+ }
+
+ if ( toString.call(checkSet) === "[object Array]" ) {
+ if ( !prune ) {
+ results.push.apply( results, checkSet );
+ } else if ( context && context.nodeType === 1 ) {
+ for ( var i = 0; checkSet[i] != null; i++ ) {
+ if ( checkSet[i] && (checkSet[i] === true || checkSet[i].nodeType === 1 && contains(context, checkSet[i])) ) {
+ results.push( set[i] );
+ }
+ }
+ } else {
+ for ( var i = 0; checkSet[i] != null; i++ ) {
+ if ( checkSet[i] && checkSet[i].nodeType === 1 ) {
+ results.push( set[i] );
+ }
+ }
+ }
+ } else {
+ makeArray( checkSet, results );
+ }
+
+ if ( extra ) {
+ Sizzle( extra, origContext, results, seed );
+ Sizzle.uniqueSort( results );
+ }
+
+ return results;
+};
+
+Sizzle.uniqueSort = function(results){
+ if ( sortOrder ) {
+ hasDuplicate = baseHasDuplicate;
+ results.sort(sortOrder);
+
+ if ( hasDuplicate ) {
+ for ( var i = 1; i < results.length; i++ ) {
+ if ( results[i] === results[i-1] ) {
+ results.splice(i--, 1);
+ }
+ }
+ }
+ }
+
+ return results;
+};
+
+Sizzle.matches = function(expr, set){
+ return Sizzle(expr, null, null, set);
+};
+
+Sizzle.find = function(expr, context, isXML){
+ var set, match;
+
+ if ( !expr ) {
+ return [];
+ }
+
+ for ( var i = 0, l = Expr.order.length; i < l; i++ ) {
+ var type = Expr.order[i], match;
+
+ if ( (match = Expr.leftMatch[ type ].exec( expr )) ) {
+ var left = match[1];
+ match.splice(1,1);
+
+ if ( left.substr( left.length - 1 ) !== "\\" ) {
+ match[1] = (match[1] || "").replace(/\\/g, "");
+ set = Expr.find[ type ]( match, context, isXML );
+ if ( set != null ) {
+ expr = expr.replace( Expr.match[ type ], "" );
+ break;
+ }
+ }
+ }
+ }
+
+ if ( !set ) {
+ set = context.getElementsByTagName("*");
+ }
+
+ return {set: set, expr: expr};
+};
+
+Sizzle.filter = function(expr, set, inplace, not){
+ var old = expr, result = [], curLoop = set, match, anyFound,
+ isXMLFilter = set && set[0] && isXML(set[0]);
+
+ while ( expr && set.length ) {
+ for ( var type in Expr.filter ) {
+ if ( (match = Expr.leftMatch[ type ].exec( expr )) != null && match[2] ) {
+ var filter = Expr.filter[ type ], found, item, left = match[1];
+ anyFound = false;
+
+ match.splice(1,1);
+
+ if ( left.substr( left.length - 1 ) === "\\" ) {
+ continue;
+ }
+
+ if ( curLoop === result ) {
+ result = [];
+ }
+
+ if ( Expr.preFilter[ type ] ) {
+ match = Expr.preFilter[ type ]( match, curLoop, inplace, result, not, isXMLFilter );
+
+ if ( !match ) {
+ anyFound = found = true;
+ } else if ( match === true ) {
+ continue;
+ }
+ }
+
+ if ( match ) {
+ for ( var i = 0; (item = curLoop[i]) != null; i++ ) {
+ if ( item ) {
+ found = filter( item, match, i, curLoop );
+ var pass = not ^ !!found;
+
+ if ( inplace && found != null ) {
+ if ( pass ) {
+ anyFound = true;
+ } else {
+ curLoop[i] = false;
+ }
+ } else if ( pass ) {
+ result.push( item );
+ anyFound = true;
+ }
+ }
+ }
+ }
+
+ if ( found !== undefined ) {
+ if ( !inplace ) {
+ curLoop = result;
+ }
+
+ expr = expr.replace( Expr.match[ type ], "" );
+
+ if ( !anyFound ) {
+ return [];
+ }
+
+ break;
+ }
+ }
+ }
+
+ // Improper expression
+ if ( expr === old ) {
+ if ( anyFound == null ) {
+ Sizzle.error( expr );
+ } else {
+ break;
+ }
+ }
+
+ old = expr;
+ }
+
+ return curLoop;
+};
+
+Sizzle.error = function( msg ) {
+ throw "Syntax error, unrecognized expression: " + msg;
+};
+
+var Expr = Sizzle.selectors = {
+ order: [ "ID", "NAME", "TAG" ],
+ match: {
+ ID: /#((?:[\w\u00c0-\uFFFF-]|\\.)+)/,
+ CLASS: /\.((?:[\w\u00c0-\uFFFF-]|\\.)+)/,
+ NAME: /\[name=['"]*((?:[\w\u00c0-\uFFFF-]|\\.)+)['"]*\]/,
+ ATTR: /\[\s*((?:[\w\u00c0-\uFFFF-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,
+ TAG: /^((?:[\w\u00c0-\uFFFF\*-]|\\.)+)/,
+ CHILD: /:(only|nth|last|first)-child(?:\((even|odd|[\dn+-]*)\))?/,
+ POS: /:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^-]|$)/,
+ PSEUDO: /:((?:[\w\u00c0-\uFFFF-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/
+ },
+ leftMatch: {},
+ attrMap: {
+ "class": "className",
+ "for": "htmlFor"
+ },
+ attrHandle: {
+ href: function(elem){
+ return elem.getAttribute("href");
+ }
+ },
+ relative: {
+ "+": function(checkSet, part){
+ var isPartStr = typeof part === "string",
+ isTag = isPartStr && !/\W/.test(part),
+ isPartStrNotTag = isPartStr && !isTag;
+
+ if ( isTag ) {
+ part = part.toLowerCase();
+ }
+
+ for ( var i = 0, l = checkSet.length, elem; i < l; i++ ) {
+ if ( (elem = checkSet[i]) ) {
+ while ( (elem = elem.previousSibling) && elem.nodeType !== 1 ) {}
+
+ checkSet[i] = isPartStrNotTag || elem && elem.nodeName.toLowerCase() === part ?
+ elem || false :
+ elem === part;
+ }
+ }
+
+ if ( isPartStrNotTag ) {
+ Sizzle.filter( part, checkSet, true );
+ }
+ },
+ ">": function(checkSet, part){
+ var isPartStr = typeof part === "string";
+
+ if ( isPartStr && !/\W/.test(part) ) {
+ part = part.toLowerCase();
+
+ for ( var i = 0, l = checkSet.length; i < l; i++ ) {
+ var elem = checkSet[i];
+ if ( elem ) {
+ var parent = elem.parentNode;
+ checkSet[i] = parent.nodeName.toLowerCase() === part ? parent : false;
+ }
+ }
+ } else {
+ for ( var i = 0, l = checkSet.length; i < l; i++ ) {
+ var elem = checkSet[i];
+ if ( elem ) {
+ checkSet[i] = isPartStr ?
+ elem.parentNode :
+ elem.parentNode === part;
+ }
+ }
+
+ if ( isPartStr ) {
+ Sizzle.filter( part, checkSet, true );
+ }
+ }
+ },
+ "": function(checkSet, part, isXML){
+ var doneName = done++, checkFn = dirCheck;
+
+ if ( typeof part === "string" && !/\W/.test(part) ) {
+ var nodeCheck = part = part.toLowerCase();
+ checkFn = dirNodeCheck;
+ }
+
+ checkFn("parentNode", part, doneName, checkSet, nodeCheck, isXML);
+ },
+ "~": function(checkSet, part, isXML){
+ var doneName = done++, checkFn = dirCheck;
+
+ if ( typeof part === "string" && !/\W/.test(part) ) {
+ var nodeCheck = part = part.toLowerCase();
+ checkFn = dirNodeCheck;
+ }
+
+ checkFn("previousSibling", part, doneName, checkSet, nodeCheck, isXML);
+ }
+ },
+ find: {
+ ID: function(match, context, isXML){
+ if ( typeof context.getElementById !== "undefined" && !isXML ) {
+ var m = context.getElementById(match[1]);
+ return m ? [m] : [];
+ }
+ },
+ NAME: function(match, context){
+ if ( typeof context.getElementsByName !== "undefined" ) {
+ var ret = [], results = context.getElementsByName(match[1]);
+
+ for ( var i = 0, l = results.length; i < l; i++ ) {
+ if ( results[i].getAttribute("name") === match[1] ) {
+ ret.push( results[i] );
+ }
+ }
+
+ return ret.length === 0 ? null : ret;
+ }
+ },
+ TAG: function(match, context){
+ return context.getElementsByTagName(match[1]);
+ }
+ },
+ preFilter: {
+ CLASS: function(match, curLoop, inplace, result, not, isXML){
+ match = " " + match[1].replace(/\\/g, "") + " ";
+
+ if ( isXML ) {
+ return match;
+ }
+
+ for ( var i = 0, elem; (elem = curLoop[i]) != null; i++ ) {
+ if ( elem ) {
+ if ( not ^ (elem.className && (" " + elem.className + " ").replace(/[\t\n]/g, " ").indexOf(match) >= 0) ) {
+ if ( !inplace ) {
+ result.push( elem );
+ }
+ } else if ( inplace ) {
+ curLoop[i] = false;
+ }
+ }
+ }
+
+ return false;
+ },
+ ID: function(match){
+ return match[1].replace(/\\/g, "");
+ },
+ TAG: function(match, curLoop){
+ return match[1].toLowerCase();
+ },
+ CHILD: function(match){
+ if ( match[1] === "nth" ) {
+ // parse equations like 'even', 'odd', '5', '2n', '3n+2', '4n-1', '-n+6'
+ var test = /(-?)(\d*)n((?:\+|-)?\d*)/.exec(
+ match[2] === "even" && "2n" || match[2] === "odd" && "2n+1" ||
+ !/\D/.test( match[2] ) && "0n+" + match[2] || match[2]);
+
+ // calculate the numbers (first)n+(last) including if they are negative
+ match[2] = (test[1] + (test[2] || 1)) - 0;
+ match[3] = test[3] - 0;
+ }
+
+ // TODO: Move to normal caching system
+ match[0] = done++;
+
+ return match;
+ },
+ ATTR: function(match, curLoop, inplace, result, not, isXML){
+ var name = match[1].replace(/\\/g, "");
+
+ if ( !isXML && Expr.attrMap[name] ) {
+ match[1] = Expr.attrMap[name];
+ }
+
+ if ( match[2] === "~=" ) {
+ match[4] = " " + match[4] + " ";
+ }
+
+ return match;
+ },
+ PSEUDO: function(match, curLoop, inplace, result, not){
+ if ( match[1] === "not" ) {
+ // If we're dealing with a complex expression, or a simple one
+ if ( ( chunker.exec(match[3]) || "" ).length > 1 || /^\w/.test(match[3]) ) {
+ match[3] = Sizzle(match[3], null, null, curLoop);
+ } else {
+ var ret = Sizzle.filter(match[3], curLoop, inplace, true ^ not);
+ if ( !inplace ) {
+ result.push.apply( result, ret );
+ }
+ return false;
+ }
+ } else if ( Expr.match.POS.test( match[0] ) || Expr.match.CHILD.test( match[0] ) ) {
+ return true;
+ }
+
+ return match;
+ },
+ POS: function(match){
+ match.unshift( true );
+ return match;
+ }
+ },
+ filters: {
+ enabled: function(elem){
+ return elem.disabled === false && elem.type !== "hidden";
+ },
+ disabled: function(elem){
+ return elem.disabled === true;
+ },
+ checked: function(elem){
+ return elem.checked === true;
+ },
+ selected: function(elem){
+ // Accessing this property makes selected-by-default
+ // options in Safari work properly
+ elem.parentNode.selectedIndex;
+ return elem.selected === true;
+ },
+ parent: function(elem){
+ return !!elem.firstChild;
+ },
+ empty: function(elem){
+ return !elem.firstChild;
+ },
+ has: function(elem, i, match){
+ return !!Sizzle( match[3], elem ).length;
+ },
+ header: function(elem){
+ return /h\d/i.test( elem.nodeName );
+ },
+ text: function(elem){
+ return "text" === elem.type;
+ },
+ radio: function(elem){
+ return "radio" === elem.type;
+ },
+ checkbox: function(elem){
+ return "checkbox" === elem.type;
+ },
+ file: function(elem){
+ return "file" === elem.type;
+ },
+ password: function(elem){
+ return "password" === elem.type;
+ },
+ submit: function(elem){
+ return "submit" === elem.type;
+ },
+ image: function(elem){
+ return "image" === elem.type;
+ },
+ reset: function(elem){
+ return "reset" === elem.type;
+ },
+ button: function(elem){
+ return "button" === elem.type || elem.nodeName.toLowerCase() === "button";
+ },
+ input: function(elem){
+ return /input|select|textarea|button/i.test(elem.nodeName);
+ }
+ },
+ setFilters: {
+ first: function(elem, i){
+ return i === 0;
+ },
+ last: function(elem, i, match, array){
+ return i === array.length - 1;
+ },
+ even: function(elem, i){
+ return i % 2 === 0;
+ },
+ odd: function(elem, i){
+ return i % 2 === 1;
+ },
+ lt: function(elem, i, match){
+ return i < match[3] - 0;
+ },
+ gt: function(elem, i, match){
+ return i > match[3] - 0;
+ },
+ nth: function(elem, i, match){
+ return match[3] - 0 === i;
+ },
+ eq: function(elem, i, match){
+ return match[3] - 0 === i;
+ }
+ },
+ filter: {
+ PSEUDO: function(elem, match, i, array){
+ var name = match[1], filter = Expr.filters[ name ];
+
+ if ( filter ) {
+ return filter( elem, i, match, array );
+ } else if ( name === "contains" ) {
+ return (elem.textContent || elem.innerText || getText([ elem ]) || "").indexOf(match[3]) >= 0;
+ } else if ( name === "not" ) {
+ var not = match[3];
+
+ for ( var i = 0, l = not.length; i < l; i++ ) {
+ if ( not[i] === elem ) {
+ return false;
+ }
+ }
+
+ return true;
+ } else {
+ Sizzle.error( "Syntax error, unrecognized expression: " + name );
+ }
+ },
+ CHILD: function(elem, match){
+ var type = match[1], node = elem;
+ switch (type) {
+ case 'only':
+ case 'first':
+ while ( (node = node.previousSibling) ) {
+ if ( node.nodeType === 1 ) {
+ return false;
+ }
+ }
+ if ( type === "first" ) {
+ return true;
+ }
+ node = elem;
+ case 'last':
+ while ( (node = node.nextSibling) ) {
+ if ( node.nodeType === 1 ) {
+ return false;
+ }
+ }
+ return true;
+ case 'nth':
+ var first = match[2], last = match[3];
+
+ if ( first === 1 && last === 0 ) {
+ return true;
+ }
+
+ var doneName = match[0],
+ parent = elem.parentNode;
+
+ if ( parent && (parent.sizcache !== doneName || !elem.nodeIndex) ) {
+ var count = 0;
+ for ( node = parent.firstChild; node; node = node.nextSibling ) {
+ if ( node.nodeType === 1 ) {
+ node.nodeIndex = ++count;
+ }
+ }
+ parent.sizcache = doneName;
+ }
+
+ var diff = elem.nodeIndex - last;
+ if ( first === 0 ) {
+ return diff === 0;
+ } else {
+ return ( diff % first === 0 && diff / first >= 0 );
+ }
+ }
+ },
+ ID: function(elem, match){
+ return elem.nodeType === 1 && elem.getAttribute("id") === match;
+ },
+ TAG: function(elem, match){
+ return (match === "*" && elem.nodeType === 1) || elem.nodeName.toLowerCase() === match;
+ },
+ CLASS: function(elem, match){
+ return (" " + (elem.className || elem.getAttribute("class")) + " ")
+ .indexOf( match ) > -1;
+ },
+ ATTR: function(elem, match){
+ var name = match[1],
+ result = Expr.attrHandle[ name ] ?
+ Expr.attrHandle[ name ]( elem ) :
+ elem[ name ] != null ?
+ elem[ name ] :
+ elem.getAttribute( name ),
+ value = result + "",
+ type = match[2],
+ check = match[4];
+
+ return result == null ?
+ type === "!=" :
+ type === "=" ?
+ value === check :
+ type === "*=" ?
+ value.indexOf(check) >= 0 :
+ type === "~=" ?
+ (" " + value + " ").indexOf(check) >= 0 :
+ !check ?
+ value && result !== false :
+ type === "!=" ?
+ value !== check :
+ type === "^=" ?
+ value.indexOf(check) === 0 :
+ type === "$=" ?
+ value.substr(value.length - check.length) === check :
+ type === "|=" ?
+ value === check || value.substr(0, check.length + 1) === check + "-" :
+ false;
+ },
+ POS: function(elem, match, i, array){
+ var name = match[2], filter = Expr.setFilters[ name ];
+
+ if ( filter ) {
+ return filter( elem, i, match, array );
+ }
+ }
+ }
+};
+
+var origPOS = Expr.match.POS;
+
+for ( var type in Expr.match ) {
+ Expr.match[ type ] = new RegExp( Expr.match[ type ].source + /(?![^\[]*\])(?![^\(]*\))/.source );
+ Expr.leftMatch[ type ] = new RegExp( /(^(?:.|\r|\n)*?)/.source + Expr.match[ type ].source.replace(/\\(\d+)/g, function(all, num){
+ return "\\" + (num - 0 + 1);
+ }));
+}
+
+var makeArray = function(array, results) {
+ array = Array.prototype.slice.call( array, 0 );
+
+ if ( results ) {
+ results.push.apply( results, array );
+ return results;
+ }
+
+ return array;
+};
+
+// Perform a simple check to determine if the browser is capable of
+// converting a NodeList to an array using builtin methods.
+// Also verifies that the returned array holds DOM nodes
+// (which is not the case in the Blackberry browser)
+try {
+ Array.prototype.slice.call( document.documentElement.childNodes, 0 )[0].nodeType;
+
+// Provide a fallback method if it does not work
+} catch(e){
+ makeArray = function(array, results) {
+ var ret = results || [];
+
+ if ( toString.call(array) === "[object Array]" ) {
+ Array.prototype.push.apply( ret, array );
+ } else {
+ if ( typeof array.length === "number" ) {
+ for ( var i = 0, l = array.length; i < l; i++ ) {
+ ret.push( array[i] );
+ }
+ } else {
+ for ( var i = 0; array[i]; i++ ) {
+ ret.push( array[i] );
+ }
+ }
+ }
+
+ return ret;
+ };
+}
+
+var sortOrder;
+
+if ( document.documentElement.compareDocumentPosition ) {
+ sortOrder = function( a, b ) {
+ if ( !a.compareDocumentPosition || !b.compareDocumentPosition ) {
+ if ( a == b ) {
+ hasDuplicate = true;
+ }
+ return a.compareDocumentPosition ? -1 : 1;
+ }
+
+ var ret = a.compareDocumentPosition(b) & 4 ? -1 : a === b ? 0 : 1;
+ if ( ret === 0 ) {
+ hasDuplicate = true;
+ }
+ return ret;
+ };
+} else if ( "sourceIndex" in document.documentElement ) {
+ sortOrder = function( a, b ) {
+ if ( !a.sourceIndex || !b.sourceIndex ) {
+ if ( a == b ) {
+ hasDuplicate = true;
+ }
+ return a.sourceIndex ? -1 : 1;
+ }
+
+ var ret = a.sourceIndex - b.sourceIndex;
+ if ( ret === 0 ) {
+ hasDuplicate = true;
+ }
+ return ret;
+ };
+} else if ( document.createRange ) {
+ sortOrder = function( a, b ) {
+ if ( !a.ownerDocument || !b.ownerDocument ) {
+ if ( a == b ) {
+ hasDuplicate = true;
+ }
+ return a.ownerDocument ? -1 : 1;
+ }
+
+ var aRange = a.ownerDocument.createRange(), bRange = b.ownerDocument.createRange();
+ aRange.setStart(a, 0);
+ aRange.setEnd(a, 0);
+ bRange.setStart(b, 0);
+ bRange.setEnd(b, 0);
+ var ret = aRange.compareBoundaryPoints(Range.START_TO_END, bRange);
+ if ( ret === 0 ) {
+ hasDuplicate = true;
+ }
+ return ret;
+ };
+}
+
+// Utility function for retreiving the text value of an array of DOM nodes
+function getText( elems ) {
+ var ret = "", elem;
+
+ for ( var i = 0; elems[i]; i++ ) {
+ elem = elems[i];
+
+ // Get the text from text nodes and CDATA nodes
+ if ( elem.nodeType === 3 || elem.nodeType === 4 ) {
+ ret += elem.nodeValue;
+
+ // Traverse everything else, except comment nodes
+ } else if ( elem.nodeType !== 8 ) {
+ ret += getText( elem.childNodes );
+ }
+ }
+
+ return ret;
+}
+
+// Check to see if the browser returns elements by name when
+// querying by getElementById (and provide a workaround)
+(function(){
+ // We're going to inject a fake input element with a specified name
+ var form = document.createElement("div"),
+ id = "script" + (new Date).getTime();
+ form.innerHTML = "<a name='" + id + "'/>";
+
+ // Inject it into the root element, check its status, and remove it quickly
+ var root = document.documentElement;
+ root.insertBefore( form, root.firstChild );
+
+ // The workaround has to do additional checks after a getElementById
+ // Which slows things down for other browsers (hence the branching)
+ if ( document.getElementById( id ) ) {
+ Expr.find.ID = function(match, context, isXML){
+ if ( typeof context.getElementById !== "undefined" && !isXML ) {
+ var m = context.getElementById(match[1]);
+ return m ? m.id === match[1] || typeof m.getAttributeNode !== "undefined" && m.getAttributeNode("id").nodeValue === match[1] ? [m] : undefined : [];
+ }
+ };
+
+ Expr.filter.ID = function(elem, match){
+ var node = typeof elem.getAttributeNode !== "undefined" && elem.getAttributeNode("id");
+ return elem.nodeType === 1 && node && node.nodeValue === match;
+ };
+ }
+
+ root.removeChild( form );
+ root = form = null; // release memory in IE
+})();
+
+(function(){
+ // Check to see if the browser returns only elements
+ // when doing getElementsByTagName("*")
+
+ // Create a fake element
+ var div = document.createElement("div");
+ div.appendChild( document.createComment("") );
+
+ // Make sure no comments are found
+ if ( div.getElementsByTagName("*").length > 0 ) {
+ Expr.find.TAG = function(match, context){
+ var results = context.getElementsByTagName(match[1]);
+
+ // Filter out possible comments
+ if ( match[1] === "*" ) {
+ var tmp = [];
+
+ for ( var i = 0; results[i]; i++ ) {
+ if ( results[i].nodeType === 1 ) {
+ tmp.push( results[i] );
+ }
+ }
+
+ results = tmp;
+ }
+
+ return results;
+ };
+ }
+
+ // Check to see if an attribute returns normalized href attributes
+ div.innerHTML = "<a href='#'></a>";
+ if ( div.firstChild && typeof div.firstChild.getAttribute !== "undefined" &&
+ div.firstChild.getAttribute("href") !== "#" ) {
+ Expr.attrHandle.href = function(elem){
+ return elem.getAttribute("href", 2);
+ };
+ }
+
+ div = null; // release memory in IE
+})();
+
+if ( document.querySelectorAll ) {
+ (function(){
+ var oldSizzle = Sizzle, div = document.createElement("div");
+ div.innerHTML = "<p class='TEST'></p>";
+
+ // Safari can't handle uppercase or unicode characters when
+ // in quirks mode.
+ if ( div.querySelectorAll && div.querySelectorAll(".TEST").length === 0 ) {
+ return;
+ }
+
+ Sizzle = function(query, context, extra, seed){
+ context = context || document;
+
+ // Only use querySelectorAll on non-XML documents
+ // (ID selectors don't work in non-HTML documents)
+ if ( !seed && context.nodeType === 9 && !isXML(context) ) {
+ try {
+ return makeArray( context.querySelectorAll(query), extra );
+ } catch(e){}
+ }
+
+ return oldSizzle(query, context, extra, seed);
+ };
+
+ for ( var prop in oldSizzle ) {
+ Sizzle[ prop ] = oldSizzle[ prop ];
+ }
+
+ div = null; // release memory in IE
+ })();
+}
+
+(function(){
+ var div = document.createElement("div");
+
+ div.innerHTML = "<div class='test e'></div><div class='test'></div>";
+
+ // Opera can't find a second classname (in 9.6)
+ // Also, make sure that getElementsByClassName actually exists
+ if ( !div.getElementsByClassName || div.getElementsByClassName("e").length === 0 ) {
+ return;
+ }
+
+ // Safari caches class attributes, doesn't catch changes (in 3.2)
+ div.lastChild.className = "e";
+
+ if ( div.getElementsByClassName("e").length === 1 ) {
+ return;
+ }
+
+ Expr.order.splice(1, 0, "CLASS");
+ Expr.find.CLASS = function(match, context, isXML) {
+ if ( typeof context.getElementsByClassName !== "undefined" && !isXML ) {
+ return context.getElementsByClassName(match[1]);
+ }
+ };
+
+ div = null; // release memory in IE
+})();
+
+function dirNodeCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) {
+ for ( var i = 0, l = checkSet.length; i < l; i++ ) {
+ var elem = checkSet[i];
+ if ( elem ) {
+ elem = elem[dir];
+ var match = false;
+
+ while ( elem ) {
+ if ( elem.sizcache === doneName ) {
+ match = checkSet[elem.sizset];
+ break;
+ }
+
+ if ( elem.nodeType === 1 && !isXML ){
+ elem.sizcache = doneName;
+ elem.sizset = i;
+ }
+
+ if ( elem.nodeName.toLowerCase() === cur ) {
+ match = elem;
+ break;
+ }
+
+ elem = elem[dir];
+ }
+
+ checkSet[i] = match;
+ }
+ }
+}
+
+function dirCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) {
+ for ( var i = 0, l = checkSet.length; i < l; i++ ) {
+ var elem = checkSet[i];
+ if ( elem ) {
+ elem = elem[dir];
+ var match = false;
+
+ while ( elem ) {
+ if ( elem.sizcache === doneName ) {
+ match = checkSet[elem.sizset];
+ break;
+ }
+
+ if ( elem.nodeType === 1 ) {
+ if ( !isXML ) {
+ elem.sizcache = doneName;
+ elem.sizset = i;
+ }
+ if ( typeof cur !== "string" ) {
+ if ( elem === cur ) {
+ match = true;
+ break;
+ }
+
+ } else if ( Sizzle.filter( cur, [elem] ).length > 0 ) {
+ match = elem;
+ break;
+ }
+ }
+
+ elem = elem[dir];
+ }
+
+ checkSet[i] = match;
+ }
+ }
+}
+
+var contains = document.compareDocumentPosition ? function(a, b){
+ return !!(a.compareDocumentPosition(b) & 16);
+} : function(a, b){
+ return a !== b && (a.contains ? a.contains(b) : true);
+};
+
+var isXML = function(elem){
+ // documentElement is verified for cases where it doesn't yet exist
+ // (such as loading iframes in IE - #4833)
+ var documentElement = (elem ? elem.ownerDocument || elem : 0).documentElement;
+ return documentElement ? documentElement.nodeName !== "HTML" : false;
+};
+
+var posProcess = function(selector, context){
+ var tmpSet = [], later = "", match,
+ root = context.nodeType ? [context] : context;
+
+ // Position selectors must be done after the filter
+ // And so must :not(positional) so we move all PSEUDOs to the end
+ while ( (match = Expr.match.PSEUDO.exec( selector )) ) {
+ later += match[0];
+ selector = selector.replace( Expr.match.PSEUDO, "" );
+ }
+
+ selector = Expr.relative[selector] ? selector + "*" : selector;
+
+ for ( var i = 0, l = root.length; i < l; i++ ) {
+ Sizzle( selector, root[i], tmpSet );
+ }
+
+ return Sizzle.filter( later, tmpSet );
+};
+
+// EXPOSE
+jQuery.find = Sizzle;
+jQuery.expr = Sizzle.selectors;
+jQuery.expr[":"] = jQuery.expr.filters;
+jQuery.unique = Sizzle.uniqueSort;
+jQuery.text = getText;
+jQuery.isXMLDoc = isXML;
+jQuery.contains = contains;
+
+return;
+
+window.Sizzle = Sizzle;
+
+})();
+var runtil = /Until$/,
+ rparentsprev = /^(?:parents|prevUntil|prevAll)/,
+ // Note: This RegExp should be improved, or likely pulled from Sizzle
+ rmultiselector = /,/,
+ slice = Array.prototype.slice;
+
+// Implement the identical functionality for filter and not
+var winnow = function( elements, qualifier, keep ) {
+ if ( jQuery.isFunction( qualifier ) ) {
+ return jQuery.grep(elements, function( elem, i ) {
+ return !!qualifier.call( elem, i, elem ) === keep;
+ });
+
+ } else if ( qualifier.nodeType ) {
+ return jQuery.grep(elements, function( elem, i ) {
+ return (elem === qualifier) === keep;
+ });
+
+ } else if ( typeof qualifier === "string" ) {
+ var filtered = jQuery.grep(elements, function( elem ) {
+ return elem.nodeType === 1;
+ });
+
+ if ( isSimple.test( qualifier ) ) {
+ return jQuery.filter(qualifier, filtered, !keep);
+ } else {
+ qualifier = jQuery.filter( qualifier, filtered );
+ }
+ }
+
+ return jQuery.grep(elements, function( elem, i ) {
+ return (jQuery.inArray( elem, qualifier ) >= 0) === keep;
+ });
+};
+
+jQuery.fn.extend({
+ find: function( selector ) {
+ var ret = this.pushStack( "", "find", selector ), length = 0;
+
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ length = ret.length;
+ jQuery.find( selector, this[i], ret );
+
+ if ( i > 0 ) {
+ // Make sure that the results are unique
+ for ( var n = length; n < ret.length; n++ ) {
+ for ( var r = 0; r < length; r++ ) {
+ if ( ret[r] === ret[n] ) {
+ ret.splice(n--, 1);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return ret;
+ },
+
+ has: function( target ) {
+ var targets = jQuery( target );
+ return this.filter(function() {
+ for ( var i = 0, l = targets.length; i < l; i++ ) {
+ if ( jQuery.contains( this, targets[i] ) ) {
+ return true;
+ }
+ }
+ });
+ },
+
+ not: function( selector ) {
+ return this.pushStack( winnow(this, selector, false), "not", selector);
+ },
+
+ filter: function( selector ) {
+ return this.pushStack( winnow(this, selector, true), "filter", selector );
+ },
+
+ is: function( selector ) {
+ return !!selector && jQuery.filter( selector, this ).length > 0;
+ },
+
+ closest: function( selectors, context ) {
+ if ( jQuery.isArray( selectors ) ) {
+ var ret = [], cur = this[0], match, matches = {}, selector;
+
+ if ( cur && selectors.length ) {
+ for ( var i = 0, l = selectors.length; i < l; i++ ) {
+ selector = selectors[i];
+
+ if ( !matches[selector] ) {
+ matches[selector] = jQuery.expr.match.POS.test( selector ) ?
+ jQuery( selector, context || this.context ) :
+ selector;
+ }
+ }
+
+ while ( cur && cur.ownerDocument && cur !== context ) {
+ for ( selector in matches ) {
+ match = matches[selector];
+
+ if ( match.jquery ? match.index(cur) > -1 : jQuery(cur).is(match) ) {
+ ret.push({ selector: selector, elem: cur });
+ delete matches[selector];
+ }
+ }
+ cur = cur.parentNode;
+ }
+ }
+
+ return ret;
+ }
+
+ var pos = jQuery.expr.match.POS.test( selectors ) ?
+ jQuery( selectors, context || this.context ) : null;
+
+ return this.map(function( i, cur ) {
+ while ( cur && cur.ownerDocument && cur !== context ) {
+ if ( pos ? pos.index(cur) > -1 : jQuery(cur).is(selectors) ) {
+ return cur;
+ }
+ cur = cur.parentNode;
+ }
+ return null;
+ });
+ },
+
+ // Determine the position of an element within
+ // the matched set of elements
+ index: function( elem ) {
+ if ( !elem || typeof elem === "string" ) {
+ return jQuery.inArray( this[0],
+ // If it receives a string, the selector is used
+ // If it receives nothing, the siblings are used
+ elem ? jQuery( elem ) : this.parent().children() );
+ }
+ // Locate the position of the desired element
+ return jQuery.inArray(
+ // If it receives a jQuery object, the first element is used
+ elem.jquery ? elem[0] : elem, this );
+ },
+
+ add: function( selector, context ) {
+ var set = typeof selector === "string" ?
+ jQuery( selector, context || this.context ) :
+ jQuery.makeArray( selector ),
+ all = jQuery.merge( this.get(), set );
+
+ return this.pushStack( isDisconnected( set[0] ) || isDisconnected( all[0] ) ?
+ all :
+ jQuery.unique( all ) );
+ },
+
+ andSelf: function() {
+ return this.add( this.prevObject );
+ }
+});
+
+// A painfully simple check to see if an element is disconnected
+// from a document (should be improved, where feasible).
+function isDisconnected( node ) {
+ return !node || !node.parentNode || node.parentNode.nodeType === 11;
+}
+
+jQuery.each({
+ parent: function( elem ) {
+ var parent = elem.parentNode;
+ return parent && parent.nodeType !== 11 ? parent : null;
+ },
+ parents: function( elem ) {
+ return jQuery.dir( elem, "parentNode" );
+ },
+ parentsUntil: function( elem, i, until ) {
+ return jQuery.dir( elem, "parentNode", until );
+ },
+ next: function( elem ) {
+ return jQuery.nth( elem, 2, "nextSibling" );
+ },
+ prev: function( elem ) {
+ return jQuery.nth( elem, 2, "previousSibling" );
+ },
+ nextAll: function( elem ) {
+ return jQuery.dir( elem, "nextSibling" );
+ },
+ prevAll: function( elem ) {
+ return jQuery.dir( elem, "previousSibling" );
+ },
+ nextUntil: function( elem, i, until ) {
+ return jQuery.dir( elem, "nextSibling", until );
+ },
+ prevUntil: function( elem, i, until ) {
+ return jQuery.dir( elem, "previousSibling", until );
+ },
+ siblings: function( elem ) {
+ return jQuery.sibling( elem.parentNode.firstChild, elem );
+ },
+ children: function( elem ) {
+ return jQuery.sibling( elem.firstChild );
+ },
+ contents: function( elem ) {
+ return jQuery.nodeName( elem, "iframe" ) ?
+ elem.contentDocument || elem.contentWindow.document :
+ jQuery.makeArray( elem.childNodes );
+ }
+}, function( name, fn ) {
+ jQuery.fn[ name ] = function( until, selector ) {
+ var ret = jQuery.map( this, fn, until );
+
+ if ( !runtil.test( name ) ) {
+ selector = until;
+ }
+
+ if ( selector && typeof selector === "string" ) {
+ ret = jQuery.filter( selector, ret );
+ }
+
+ ret = this.length > 1 ? jQuery.unique( ret ) : ret;
+
+ if ( (this.length > 1 || rmultiselector.test( selector )) && rparentsprev.test( name ) ) {
+ ret = ret.reverse();
+ }
+
+ return this.pushStack( ret, name, slice.call(arguments).join(",") );
+ };
+});
+
+jQuery.extend({
+ filter: function( expr, elems, not ) {
+ if ( not ) {
+ expr = ":not(" + expr + ")";
+ }
+
+ return jQuery.find.matches(expr, elems);
+ },
+
+ dir: function( elem, dir, until ) {
+ var matched = [], cur = elem[dir];
+ while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) {
+ if ( cur.nodeType === 1 ) {
+ matched.push( cur );
+ }
+ cur = cur[dir];
+ }
+ return matched;
+ },
+
+ nth: function( cur, result, dir, elem ) {
+ result = result || 1;
+ var num = 0;
+
+ for ( ; cur; cur = cur[dir] ) {
+ if ( cur.nodeType === 1 && ++num === result ) {
+ break;
+ }
+ }
+
+ return cur;
+ },
+
+ sibling: function( n, elem ) {
+ var r = [];
+
+ for ( ; n; n = n.nextSibling ) {
+ if ( n.nodeType === 1 && n !== elem ) {
+ r.push( n );
+ }
+ }
+
+ return r;
+ }
+});
+var rinlinejQuery = / jQuery\d+="(?:\d+|null)"/g,
+ rleadingWhitespace = /^\s+/,
+ rxhtmlTag = /(<([\w:]+)[^>]*?)\/>/g,
+ rselfClosing = /^(?:area|br|col|embed|hr|img|input|link|meta|param)$/i,
+ rtagName = /<([\w:]+)/,
+ rtbody = /<tbody/i,
+ rhtml = /<|&#?\w+;/,
+ rnocache = /<script|<object|<embed|<option|<style/i,
+ rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i, // checked="checked" or checked (html5)
+ fcloseTag = function( all, front, tag ) {
+ return rselfClosing.test( tag ) ?
+ all :
+ front + "></" + tag + ">";
+ },
+ wrapMap = {
+ option: [ 1, "<select multiple='multiple'>", "</select>" ],
+ legend: [ 1, "<fieldset>", "</fieldset>" ],
+ thead: [ 1, "<table>", "</table>" ],
+ tr: [ 2, "<table><tbody>", "</tbody></table>" ],
+ td: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ],
+ col: [ 2, "<table><tbody></tbody><colgroup>", "</colgroup></table>" ],
+ area: [ 1, "<map>", "</map>" ],
+ _default: [ 0, "", "" ]
+ };
+
+wrapMap.optgroup = wrapMap.option;
+wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead;
+wrapMap.th = wrapMap.td;
+
+// IE can't serialize <link> and <script> tags normally
+if ( !jQuery.support.htmlSerialize ) {
+ wrapMap._default = [ 1, "div<div>", "</div>" ];
+}
+
+jQuery.fn.extend({
+ text: function( text ) {
+ if ( jQuery.isFunction(text) ) {
+ return this.each(function(i) {
+ var self = jQuery(this);
+ self.text( text.call(this, i, self.text()) );
+ });
+ }
+
+ if ( typeof text !== "object" && text !== undefined ) {
+ return this.empty().append( (this[0] && this[0].ownerDocument || document).createTextNode( text ) );
+ }
+
+ return jQuery.text( this );
+ },
+
+ wrapAll: function( html ) {
+ if ( jQuery.isFunction( html ) ) {
+ return this.each(function(i) {
+ jQuery(this).wrapAll( html.call(this, i) );
+ });
+ }
+
+ if ( this[0] ) {
+ // The elements to wrap the target around
+ var wrap = jQuery( html, this[0].ownerDocument ).eq(0).clone(true);
+
+ if ( this[0].parentNode ) {
+ wrap.insertBefore( this[0] );
+ }
+
+ wrap.map(function() {
+ var elem = this;
+
+ while ( elem.firstChild && elem.firstChild.nodeType === 1 ) {
+ elem = elem.firstChild;
+ }
+
+ return elem;
+ }).append(this);
+ }
+
+ return this;
+ },
+
+ wrapInner: function( html ) {
+ if ( jQuery.isFunction( html ) ) {
+ return this.each(function(i) {
+ jQuery(this).wrapInner( html.call(this, i) );
+ });
+ }
+
+ return this.each(function() {
+ var self = jQuery( this ), contents = self.contents();
+
+ if ( contents.length ) {
+ contents.wrapAll( html );
+
+ } else {
+ self.append( html );
+ }
+ });
+ },
+
+ wrap: function( html ) {
+ return this.each(function() {
+ jQuery( this ).wrapAll( html );
+ });
+ },
+
+ unwrap: function() {
+ return this.parent().each(function() {
+ if ( !jQuery.nodeName( this, "body" ) ) {
+ jQuery( this ).replaceWith( this.childNodes );
+ }
+ }).end();
+ },
+
+ append: function() {
+ return this.domManip(arguments, true, function( elem ) {
+ if ( this.nodeType === 1 ) {
+ this.appendChild( elem );
+ }
+ });
+ },
+
+ prepend: function() {
+ return this.domManip(arguments, true, function( elem ) {
+ if ( this.nodeType === 1 ) {
+ this.insertBefore( elem, this.firstChild );
+ }
+ });
+ },
+
+ before: function() {
+ if ( this[0] && this[0].parentNode ) {
+ return this.domManip(arguments, false, function( elem ) {
+ this.parentNode.insertBefore( elem, this );
+ });
+ } else if ( arguments.length ) {
+ var set = jQuery(arguments[0]);
+ set.push.apply( set, this.toArray() );
+ return this.pushStack( set, "before", arguments );
+ }
+ },
+
+ after: function() {
+ if ( this[0] && this[0].parentNode ) {
+ return this.domManip(arguments, false, function( elem ) {
+ this.parentNode.insertBefore( elem, this.nextSibling );
+ });
+ } else if ( arguments.length ) {
+ var set = this.pushStack( this, "after", arguments );
+ set.push.apply( set, jQuery(arguments[0]).toArray() );
+ return set;
+ }
+ },
+
+ // keepData is for internal use only--do not document
+ remove: function( selector, keepData ) {
+ for ( var i = 0, elem; (elem = this[i]) != null; i++ ) {
+ if ( !selector || jQuery.filter( selector, [ elem ] ).length ) {
+ if ( !keepData && elem.nodeType === 1 ) {
+ jQuery.cleanData( elem.getElementsByTagName("*") );
+ jQuery.cleanData( [ elem ] );
+ }
+
+ if ( elem.parentNode ) {
+ elem.parentNode.removeChild( elem );
+ }
+ }
+ }
+
+ return this;
+ },
+
+ empty: function() {
+ for ( var i = 0, elem; (elem = this[i]) != null; i++ ) {
+ // Remove element nodes and prevent memory leaks
+ if ( elem.nodeType === 1 ) {
+ jQuery.cleanData( elem.getElementsByTagName("*") );
+ }
+
+ // Remove any remaining nodes
+ while ( elem.firstChild ) {
+ elem.removeChild( elem.firstChild );
+ }
+ }
+
+ return this;
+ },
+
+ clone: function( events ) {
+ // Do the clone
+ var ret = this.map(function() {
+ if ( !jQuery.support.noCloneEvent && !jQuery.isXMLDoc(this) ) {
+ // IE copies events bound via attachEvent when
+ // using cloneNode. Calling detachEvent on the
+ // clone will also remove the events from the orignal
+ // In order to get around this, we use innerHTML.
+ // Unfortunately, this means some modifications to
+ // attributes in IE that are actually only stored
+ // as properties will not be copied (such as the
+ // the name attribute on an input).
+ var html = this.outerHTML, ownerDocument = this.ownerDocument;
+ if ( !html ) {
+ var div = ownerDocument.createElement("div");
+ div.appendChild( this.cloneNode(true) );
+ html = div.innerHTML;
+ }
+
+ return jQuery.clean([html.replace(rinlinejQuery, "")
+ // Handle the case in IE 8 where action=/test/> self-closes a tag
+ .replace(/=([^="'>\s]+\/)>/g, '="$1">')
+ .replace(rleadingWhitespace, "")], ownerDocument)[0];
+ } else {
+ return this.cloneNode(true);
+ }
+ });
+
+ // Copy the events from the original to the clone
+ if ( events === true ) {
+ cloneCopyEvent( this, ret );
+ cloneCopyEvent( this.find("*"), ret.find("*") );
+ }
+
+ // Return the cloned set
+ return ret;
+ },
+
+ html: function( value ) {
+ if ( value === undefined ) {
+ return this[0] && this[0].nodeType === 1 ?
+ this[0].innerHTML.replace(rinlinejQuery, "") :
+ null;
+
+ // See if we can take a shortcut and just use innerHTML
+ } else if ( typeof value === "string" && !rnocache.test( value ) &&
+ (jQuery.support.leadingWhitespace || !rleadingWhitespace.test( value )) &&
+ !wrapMap[ (rtagName.exec( value ) || ["", ""])[1].toLowerCase() ] ) {
+
+ value = value.replace(rxhtmlTag, fcloseTag);
+
+ try {
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ // Remove element nodes and prevent memory leaks
+ if ( this[i].nodeType === 1 ) {
+ jQuery.cleanData( this[i].getElementsByTagName("*") );
+ this[i].innerHTML = value;
+ }
+ }
+
+ // If using innerHTML throws an exception, use the fallback method
+ } catch(e) {
+ this.empty().append( value );
+ }
+
+ } else if ( jQuery.isFunction( value ) ) {
+ this.each(function(i){
+ var self = jQuery(this), old = self.html();
+ self.empty().append(function(){
+ return value.call( this, i, old );
+ });
+ });
+
+ } else {
+ this.empty().append( value );
+ }
+
+ return this;
+ },
+
+ replaceWith: function( value ) {
+ if ( this[0] && this[0].parentNode ) {
+ // Make sure that the elements are removed from the DOM before they are inserted
+ // this can help fix replacing a parent with child elements
+ if ( jQuery.isFunction( value ) ) {
+ return this.each(function(i) {
+ var self = jQuery(this), old = self.html();
+ self.replaceWith( value.call( this, i, old ) );
+ });
+ }
+
+ if ( typeof value !== "string" ) {
+ value = jQuery(value).detach();
+ }
+
+ return this.each(function() {
+ var next = this.nextSibling, parent = this.parentNode;
+
+ jQuery(this).remove();
+
+ if ( next ) {
+ jQuery(next).before( value );
+ } else {
+ jQuery(parent).append( value );
+ }
+ });
+ } else {
+ return this.pushStack( jQuery(jQuery.isFunction(value) ? value() : value), "replaceWith", value );
+ }
+ },
+
+ detach: function( selector ) {
+ return this.remove( selector, true );
+ },
+
+ domManip: function( args, table, callback ) {
+ var results, first, value = args[0], scripts = [], fragment, parent;
+
+ // We can't cloneNode fragments that contain checked, in WebKit
+ if ( !jQuery.support.checkClone && arguments.length === 3 && typeof value === "string" && rchecked.test( value ) ) {
+ return this.each(function() {
+ jQuery(this).domManip( args, table, callback, true );
+ });
+ }
+
+ if ( jQuery.isFunction(value) ) {
+ return this.each(function(i) {
+ var self = jQuery(this);
+ args[0] = value.call(this, i, table ? self.html() : undefined);
+ self.domManip( args, table, callback );
+ });
+ }
+
+ if ( this[0] ) {
+ parent = value && value.parentNode;
+
+ // If we're in a fragment, just use that instead of building a new one
+ if ( jQuery.support.parentNode && parent && parent.nodeType === 11 && parent.childNodes.length === this.length ) {
+ results = { fragment: parent };
+
+ } else {
+ results = buildFragment( args, this, scripts );
+ }
+
+ fragment = results.fragment;
+
+ if ( fragment.childNodes.length === 1 ) {
+ first = fragment = fragment.firstChild;
+ } else {
+ first = fragment.firstChild;
+ }
+
+ if ( first ) {
+ table = table && jQuery.nodeName( first, "tr" );
+
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ callback.call(
+ table ?
+ root(this[i], first) :
+ this[i],
+ i > 0 || results.cacheable || this.length > 1 ?
+ fragment.cloneNode(true) :
+ fragment
+ );
+ }
+ }
+
+ if ( scripts.length ) {
+ jQuery.each( scripts, evalScript );
+ }
+ }
+
+ return this;
+
+ function root( elem, cur ) {
+ return jQuery.nodeName(elem, "table") ?
+ (elem.getElementsByTagName("tbody")[0] ||
+ elem.appendChild(elem.ownerDocument.createElement("tbody"))) :
+ elem;
+ }
+ }
+});
+
+function cloneCopyEvent(orig, ret) {
+ var i = 0;
+
+ ret.each(function() {
+ if ( this.nodeName !== (orig[i] && orig[i].nodeName) ) {
+ return;
+ }
+
+ var oldData = jQuery.data( orig[i++] ), curData = jQuery.data( this, oldData ), events = oldData && oldData.events;
+
+ if ( events ) {
+ delete curData.handle;
+ curData.events = {};
+
+ for ( var type in events ) {
+ for ( var handler in events[ type ] ) {
+ jQuery.event.add( this, type, events[ type ][ handler ], events[ type ][ handler ].data );
+ }
+ }
+ }
+ });
+}
+
+function buildFragment( args, nodes, scripts ) {
+ var fragment, cacheable, cacheresults,
+ doc = (nodes && nodes[0] ? nodes[0].ownerDocument || nodes[0] : document);
+
+ // Only cache "small" (1/2 KB) strings that are associated with the main document
+ // Cloning options loses the selected state, so don't cache them
+ // IE 6 doesn't like it when you put <object> or <embed> elements in a fragment
+ // Also, WebKit does not clone 'checked' attributes on cloneNode, so don't cache
+ if ( args.length === 1 && typeof args[0] === "string" && args[0].length < 512 && doc === document &&
+ !rnocache.test( args[0] ) && (jQuery.support.checkClone || !rchecked.test( args[0] )) ) {
+
+ cacheable = true;
+ cacheresults = jQuery.fragments[ args[0] ];
+ if ( cacheresults ) {
+ if ( cacheresults !== 1 ) {
+ fragment = cacheresults;
+ }
+ }
+ }
+
+ if ( !fragment ) {
+ fragment = doc.createDocumentFragment();
+ jQuery.clean( args, doc, fragment, scripts );
+ }
+
+ if ( cacheable ) {
+ jQuery.fragments[ args[0] ] = cacheresults ? fragment : 1;
+ }
+
+ return { fragment: fragment, cacheable: cacheable };
+}
+
+jQuery.fragments = {};
+
+jQuery.each({
+ appendTo: "append",
+ prependTo: "prepend",
+ insertBefore: "before",
+ insertAfter: "after",
+ replaceAll: "replaceWith"
+}, function( name, original ) {
+ jQuery.fn[ name ] = function( selector ) {
+ var ret = [], insert = jQuery( selector ),
+ parent = this.length === 1 && this[0].parentNode;
+
+ if ( parent && parent.nodeType === 11 && parent.childNodes.length === 1 && insert.length === 1 ) {
+ insert[ original ]( this[0] );
+ return this;
+
+ } else {
+ for ( var i = 0, l = insert.length; i < l; i++ ) {
+ var elems = (i > 0 ? this.clone(true) : this).get();
+ jQuery.fn[ original ].apply( jQuery(insert[i]), elems );
+ ret = ret.concat( elems );
+ }
+
+ return this.pushStack( ret, name, insert.selector );
+ }
+ };
+});
+
+jQuery.extend({
+ clean: function( elems, context, fragment, scripts ) {
+ context = context || document;
+
+ // !context.createElement fails in IE with an error but returns typeof 'object'
+ if ( typeof context.createElement === "undefined" ) {
+ context = context.ownerDocument || context[0] && context[0].ownerDocument || document;
+ }
+
+ var ret = [];
+
+ for ( var i = 0, elem; (elem = elems[i]) != null; i++ ) {
+ if ( typeof elem === "number" ) {
+ elem += "";
+ }
+
+ if ( !elem ) {
+ continue;
+ }
+
+ // Convert html string into DOM nodes
+ if ( typeof elem === "string" && !rhtml.test( elem ) ) {
+ elem = context.createTextNode( elem );
+
+ } else if ( typeof elem === "string" ) {
+ // Fix "XHTML"-style tags in all browsers
+ elem = elem.replace(rxhtmlTag, fcloseTag);
+
+ // Trim whitespace, otherwise indexOf won't work as expected
+ var tag = (rtagName.exec( elem ) || ["", ""])[1].toLowerCase(),
+ wrap = wrapMap[ tag ] || wrapMap._default,
+ depth = wrap[0],
+ div = context.createElement("div");
+
+ // Go to html and back, then peel off extra wrappers
+ div.innerHTML = wrap[1] + elem + wrap[2];
+
+ // Move to the right depth
+ while ( depth-- ) {
+ div = div.lastChild;
+ }
+
+ // Remove IE's autoinserted <tbody> from table fragments
+ if ( !jQuery.support.tbody ) {
+
+ // String was a <table>, *may* have spurious <tbody>
+ var hasBody = rtbody.test(elem),
+ tbody = tag === "table" && !hasBody ?
+ div.firstChild && div.firstChild.childNodes :
+
+ // String was a bare <thead> or <tfoot>
+ wrap[1] === "<table>" && !hasBody ?
+ div.childNodes :
+ [];
+
+ for ( var j = tbody.length - 1; j >= 0 ; --j ) {
+ if ( jQuery.nodeName( tbody[ j ], "tbody" ) && !tbody[ j ].childNodes.length ) {
+ tbody[ j ].parentNode.removeChild( tbody[ j ] );
+ }
+ }
+
+ }
+
+ // IE completely kills leading whitespace when innerHTML is used
+ if ( !jQuery.support.leadingWhitespace && rleadingWhitespace.test( elem ) ) {
+ div.insertBefore( context.createTextNode( rleadingWhitespace.exec(elem)[0] ), div.firstChild );
+ }
+
+ elem = div.childNodes;
+ }
+
+ if ( elem.nodeType ) {
+ ret.push( elem );
+ } else {
+ ret = jQuery.merge( ret, elem );
+ }
+ }
+
+ if ( fragment ) {
+ for ( var i = 0; ret[i]; i++ ) {
+ if ( scripts && jQuery.nodeName( ret[i], "script" ) && (!ret[i].type || ret[i].type.toLowerCase() === "text/javascript") ) {
+ scripts.push( ret[i].parentNode ? ret[i].parentNode.removeChild( ret[i] ) : ret[i] );
+
+ } else {
+ if ( ret[i].nodeType === 1 ) {
+ ret.splice.apply( ret, [i + 1, 0].concat(jQuery.makeArray(ret[i].getElementsByTagName("script"))) );
+ }
+ fragment.appendChild( ret[i] );
+ }
+ }
+ }
+
+ return ret;
+ },
+
+ cleanData: function( elems ) {
+ var data, id, cache = jQuery.cache,
+ special = jQuery.event.special,
+ deleteExpando = jQuery.support.deleteExpando;
+
+ for ( var i = 0, elem; (elem = elems[i]) != null; i++ ) {
+ id = elem[ jQuery.expando ];
+
+ if ( id ) {
+ data = cache[ id ];
+
+ if ( data.events ) {
+ for ( var type in data.events ) {
+ if ( special[ type ] ) {
+ jQuery.event.remove( elem, type );
+
+ } else {
+ removeEvent( elem, type, data.handle );
+ }
+ }
+ }
+
+ if ( deleteExpando ) {
+ delete elem[ jQuery.expando ];
+
+ } else if ( elem.removeAttribute ) {
+ elem.removeAttribute( jQuery.expando );
+ }
+
+ delete cache[ id ];
+ }
+ }
+ }
+});
+// exclude the following css properties to add px
+var rexclude = /z-?index|font-?weight|opacity|zoom|line-?height/i,
+ ralpha = /alpha\([^)]*\)/,
+ ropacity = /opacity=([^)]*)/,
+ rfloat = /float/i,
+ rdashAlpha = /-([a-z])/ig,
+ rupper = /([A-Z])/g,
+ rnumpx = /^-?\d+(?:px)?$/i,
+ rnum = /^-?\d/,
+
+ cssShow = { position: "absolute", visibility: "hidden", display:"block" },
+ cssWidth = [ "Left", "Right" ],
+ cssHeight = [ "Top", "Bottom" ],
+
+ // cache check for defaultView.getComputedStyle
+ getComputedStyle = document.defaultView && document.defaultView.getComputedStyle,
+ // normalize float css property
+ styleFloat = jQuery.support.cssFloat ? "cssFloat" : "styleFloat",
+ fcamelCase = function( all, letter ) {
+ return letter.toUpperCase();
+ };
+
+jQuery.fn.css = function( name, value ) {
+ return access( this, name, value, true, function( elem, name, value ) {
+ if ( value === undefined ) {
+ return jQuery.curCSS( elem, name );
+ }
+
+ if ( typeof value === "number" && !rexclude.test(name) ) {
+ value += "px";
+ }
+
+ jQuery.style( elem, name, value );
+ });
+};
+
+jQuery.extend({
+ style: function( elem, name, value ) {
+ // don't set styles on text and comment nodes
+ if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 ) {
+ return undefined;
+ }
+
+ // ignore negative width and height values #1599
+ if ( (name === "width" || name === "height") && parseFloat(value) < 0 ) {
+ value = undefined;
+ }
+
+ var style = elem.style || elem, set = value !== undefined;
+
+ // IE uses filters for opacity
+ if ( !jQuery.support.opacity && name === "opacity" ) {
+ if ( set ) {
+ // IE has trouble with opacity if it does not have layout
+ // Force it by setting the zoom level
+ style.zoom = 1;
+
+ // Set the alpha filter to set the opacity
+ var opacity = parseInt( value, 10 ) + "" === "NaN" ? "" : "alpha(opacity=" + value * 100 + ")";
+ var filter = style.filter || jQuery.curCSS( elem, "filter" ) || "";
+ style.filter = ralpha.test(filter) ? filter.replace(ralpha, opacity) : opacity;
+ }
+
+ return style.filter && style.filter.indexOf("opacity=") >= 0 ?
+ (parseFloat( ropacity.exec(style.filter)[1] ) / 100) + "":
+ "";
+ }
+
+ // Make sure we're using the right name for getting the float value
+ if ( rfloat.test( name ) ) {
+ name = styleFloat;
+ }
+
+ name = name.replace(rdashAlpha, fcamelCase);
+
+ if ( set ) {
+ style[ name ] = value;
+ }
+
+ return style[ name ];
+ },
+
+ css: function( elem, name, force, extra ) {
+ if ( name === "width" || name === "height" ) {
+ var val, props = cssShow, which = name === "width" ? cssWidth : cssHeight;
+
+ function getWH() {
+ val = name === "width" ? elem.offsetWidth : elem.offsetHeight;
+
+ if ( extra === "border" ) {
+ return;
+ }
+
+ jQuery.each( which, function() {
+ if ( !extra ) {
+ val -= parseFloat(jQuery.curCSS( elem, "padding" + this, true)) || 0;
+ }
+
+ if ( extra === "margin" ) {
+ val += parseFloat(jQuery.curCSS( elem, "margin" + this, true)) || 0;
+ } else {
+ val -= parseFloat(jQuery.curCSS( elem, "border" + this + "Width", true)) || 0;
+ }
+ });
+ }
+
+ if ( elem.offsetWidth !== 0 ) {
+ getWH();
+ } else {
+ jQuery.swap( elem, props, getWH );
+ }
+
+ return Math.max(0, Math.round(val));
+ }
+
+ return jQuery.curCSS( elem, name, force );
+ },
+
+ curCSS: function( elem, name, force ) {
+ var ret, style = elem.style, filter;
+
+ // IE uses filters for opacity
+ if ( !jQuery.support.opacity && name === "opacity" && elem.currentStyle ) {
+ ret = ropacity.test(elem.currentStyle.filter || "") ?
+ (parseFloat(RegExp.$1) / 100) + "" :
+ "";
+
+ return ret === "" ?
+ "1" :
+ ret;
+ }
+
+ // Make sure we're using the right name for getting the float value
+ if ( rfloat.test( name ) ) {
+ name = styleFloat;
+ }
+
+ if ( !force && style && style[ name ] ) {
+ ret = style[ name ];
+
+ } else if ( getComputedStyle ) {
+
+ // Only "float" is needed here
+ if ( rfloat.test( name ) ) {
+ name = "float";
+ }
+
+ name = name.replace( rupper, "-$1" ).toLowerCase();
+
+ var defaultView = elem.ownerDocument.defaultView;
+
+ if ( !defaultView ) {
+ return null;
+ }
+
+ var computedStyle = defaultView.getComputedStyle( elem, null );
+
+ if ( computedStyle ) {
+ ret = computedStyle.getPropertyValue( name );
+ }
+
+ // We should always get a number back from opacity
+ if ( name === "opacity" && ret === "" ) {
+ ret = "1";
+ }
+
+ } else if ( elem.currentStyle ) {
+ var camelCase = name.replace(rdashAlpha, fcamelCase);
+
+ ret = elem.currentStyle[ name ] || elem.currentStyle[ camelCase ];
+
+ // From the awesome hack by Dean Edwards
+ // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291
+
+ // If we're not dealing with a regular pixel number
+ // but a number that has a weird ending, we need to convert it to pixels
+ if ( !rnumpx.test( ret ) && rnum.test( ret ) ) {
+ // Remember the original values
+ var left = style.left, rsLeft = elem.runtimeStyle.left;
+
+ // Put in the new values to get a computed value out
+ elem.runtimeStyle.left = elem.currentStyle.left;
+ style.left = camelCase === "fontSize" ? "1em" : (ret || 0);
+ ret = style.pixelLeft + "px";
+
+ // Revert the changed values
+ style.left = left;
+ elem.runtimeStyle.left = rsLeft;
+ }
+ }
+
+ return ret;
+ },
+
+ // A method for quickly swapping in/out CSS properties to get correct calculations
+ swap: function( elem, options, callback ) {
+ var old = {};
+
+ // Remember the old values, and insert the new ones
+ for ( var name in options ) {
+ old[ name ] = elem.style[ name ];
+ elem.style[ name ] = options[ name ];
+ }
+
+ callback.call( elem );
+
+ // Revert the old values
+ for ( var name in options ) {
+ elem.style[ name ] = old[ name ];
+ }
+ }
+});
+
+if ( jQuery.expr && jQuery.expr.filters ) {
+ jQuery.expr.filters.hidden = function( elem ) {
+ var width = elem.offsetWidth, height = elem.offsetHeight,
+ skip = elem.nodeName.toLowerCase() === "tr";
+
+ return width === 0 && height === 0 && !skip ?
+ true :
+ width > 0 && height > 0 && !skip ?
+ false :
+ jQuery.curCSS(elem, "display") === "none";
+ };
+
+ jQuery.expr.filters.visible = function( elem ) {
+ return !jQuery.expr.filters.hidden( elem );
+ };
+}
+var jsc = now(),
+ rscript = /<script(.|\s)*?\/script>/gi,
+ rselectTextarea = /select|textarea/i,
+ rinput = /color|date|datetime|email|hidden|month|number|password|range|search|tel|text|time|url|week/i,
+ jsre = /=\?(&|$)/,
+ rquery = /\?/,
+ rts = /(\?|&)_=.*?(&|$)/,
+ rurl = /^(\w+:)?\/\/([^\/?#]+)/,
+ r20 = /%20/g,
+
+ // Keep a copy of the old load method
+ _load = jQuery.fn.load;
+
+jQuery.fn.extend({
+ load: function( url, params, callback ) {
+ if ( typeof url !== "string" ) {
+ return _load.call( this, url );
+
+ // Don't do a request if no elements are being requested
+ } else if ( !this.length ) {
+ return this;
+ }
+
+ var off = url.indexOf(" ");
+ if ( off >= 0 ) {
+ var selector = url.slice(off, url.length);
+ url = url.slice(0, off);
+ }
+
+ // Default to a GET request
+ var type = "GET";
+
+ // If the second parameter was provided
+ if ( params ) {
+ // If it's a function
+ if ( jQuery.isFunction( params ) ) {
+ // We assume that it's the callback
+ callback = params;
+ params = null;
+
+ // Otherwise, build a param string
+ } else if ( typeof params === "object" ) {
+ params = jQuery.param( params, jQuery.ajaxSettings.traditional );
+ type = "POST";
+ }
+ }
+
+ var self = this;
+
+ // Request the remote document
+ jQuery.ajax({
+ url: url,
+ type: type,
+ dataType: "html",
+ data: params,
+ complete: function( res, status ) {
+ // If successful, inject the HTML into all the matched elements
+ if ( status === "success" || status === "notmodified" ) {
+ // See if a selector was specified
+ self.html( selector ?
+ // Create a dummy div to hold the results
+ jQuery("<div />")
+ // inject the contents of the document in, removing the scripts
+ // to avoid any 'Permission Denied' errors in IE
+ .append(res.responseText.replace(rscript, ""))
+
+ // Locate the specified elements
+ .find(selector) :
+
+ // If not, just inject the full result
+ res.responseText );
+ }
+
+ if ( callback ) {
+ self.each( callback, [res.responseText, status, res] );
+ }
+ }
+ });
+
+ return this;
+ },
+
+ serialize: function() {
+ return jQuery.param(this.serializeArray());
+ },
+ serializeArray: function() {
+ return this.map(function() {
+ return this.elements ? jQuery.makeArray(this.elements) : this;
+ })
+ .filter(function() {
+ return this.name && !this.disabled &&
+ (this.checked || rselectTextarea.test(this.nodeName) ||
+ rinput.test(this.type));
+ })
+ .map(function( i, elem ) {
+ var val = jQuery(this).val();
+
+ return val == null ?
+ null :
+ jQuery.isArray(val) ?
+ jQuery.map( val, function( val, i ) {
+ return { name: elem.name, value: val };
+ }) :
+ { name: elem.name, value: val };
+ }).get();
+ }
+});
+
+// Attach a bunch of functions for handling common AJAX events
+jQuery.each( "ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "), function( i, o ) {
+ jQuery.fn[o] = function( f ) {
+ return this.bind(o, f);
+ };
+});
+
+jQuery.extend({
+
+ get: function( url, data, callback, type ) {
+ // shift arguments if data argument was omited
+ if ( jQuery.isFunction( data ) ) {
+ type = type || callback;
+ callback = data;
+ data = null;
+ }
+
+ return jQuery.ajax({
+ type: "GET",
+ url: url,
+ data: data,
+ success: callback,
+ dataType: type
+ });
+ },
+
+ getScript: function( url, callback ) {
+ return jQuery.get(url, null, callback, "script");
+ },
+
+ getJSON: function( url, data, callback ) {
+ return jQuery.get(url, data, callback, "json");
+ },
+
+ post: function( url, data, callback, type ) {
+ // shift arguments if data argument was omited
+ if ( jQuery.isFunction( data ) ) {
+ type = type || callback;
+ callback = data;
+ data = {};
+ }
+
+ return jQuery.ajax({
+ type: "POST",
+ url: url,
+ data: data,
+ success: callback,
+ dataType: type
+ });
+ },
+
+ ajaxSetup: function( settings ) {
+ jQuery.extend( jQuery.ajaxSettings, settings );
+ },
+
+ ajaxSettings: {
+ url: location.href,
+ global: true,
+ type: "GET",
+ contentType: "application/x-www-form-urlencoded",
+ processData: true,
+ async: true,
+ /*
+ timeout: 0,
+ data: null,
+ username: null,
+ password: null,
+ traditional: false,
+ */
+ // Create the request object; Microsoft failed to properly
+ // implement the XMLHttpRequest in IE7 (can't request local files),
+ // so we use the ActiveXObject when it is available
+ // This function can be overriden by calling jQuery.ajaxSetup
+ xhr: window.XMLHttpRequest && (window.location.protocol !== "file:" || !window.ActiveXObject) ?
+ function() {
+ return new window.XMLHttpRequest();
+ } :
+ function() {
+ try {
+ return new window.ActiveXObject("Microsoft.XMLHTTP");
+ } catch(e) {}
+ },
+ accepts: {
+ xml: "application/xml, text/xml",
+ html: "text/html",
+ script: "text/javascript, application/javascript",
+ json: "application/json, text/javascript",
+ text: "text/plain",
+ _default: "*/*"
+ }
+ },
+
+ // Last-Modified header cache for next request
+ lastModified: {},
+ etag: {},
+
+ ajax: function( origSettings ) {
+ var s = jQuery.extend(true, {}, jQuery.ajaxSettings, origSettings);
+
+ var jsonp, status, data,
+ callbackContext = origSettings && origSettings.context || s,
+ type = s.type.toUpperCase();
+
+ // convert data if not already a string
+ if ( s.data && s.processData && typeof s.data !== "string" ) {
+ s.data = jQuery.param( s.data, s.traditional );
+ }
+
+ // Handle JSONP Parameter Callbacks
+ if ( s.dataType === "jsonp" ) {
+ if ( type === "GET" ) {
+ if ( !jsre.test( s.url ) ) {
+ s.url += (rquery.test( s.url ) ? "&" : "?") + (s.jsonp || "callback") + "=?";
+ }
+ } else if ( !s.data || !jsre.test(s.data) ) {
+ s.data = (s.data ? s.data + "&" : "") + (s.jsonp || "callback") + "=?";
+ }
+ s.dataType = "json";
+ }
+
+ // Build temporary JSONP function
+ if ( s.dataType === "json" && (s.data && jsre.test(s.data) || jsre.test(s.url)) ) {
+ jsonp = s.jsonpCallback || ("jsonp" + jsc++);
+
+ // Replace the =? sequence both in the query string and the data
+ if ( s.data ) {
+ s.data = (s.data + "").replace(jsre, "=" + jsonp + "$1");
+ }
+
+ s.url = s.url.replace(jsre, "=" + jsonp + "$1");
+
+ // We need to make sure
+ // that a JSONP style response is executed properly
+ s.dataType = "script";
+
+ // Handle JSONP-style loading
+ window[ jsonp ] = window[ jsonp ] || function( tmp ) {
+ data = tmp;
+ success();
+ complete();
+ // Garbage collect
+ window[ jsonp ] = undefined;
+
+ try {
+ delete window[ jsonp ];
+ } catch(e) {}
+
+ if ( head ) {
+ head.removeChild( script );
+ }
+ };
+ }
+
+ if ( s.dataType === "script" && s.cache === null ) {
+ s.cache = false;
+ }
+
+ if ( s.cache === false && type === "GET" ) {
+ var ts = now();
+
+ // try replacing _= if it is there
+ var ret = s.url.replace(rts, "$1_=" + ts + "$2");
+
+ // if nothing was replaced, add timestamp to the end
+ s.url = ret + ((ret === s.url) ? (rquery.test(s.url) ? "&" : "?") + "_=" + ts : "");
+ }
+
+ // If data is available, append data to url for get requests
+ if ( s.data && type === "GET" ) {
+ s.url += (rquery.test(s.url) ? "&" : "?") + s.data;
+ }
+
+ // Watch for a new set of requests
+ if ( s.global && ! jQuery.active++ ) {
+ jQuery.event.trigger( "ajaxStart" );
+ }
+
+ // Matches an absolute URL, and saves the domain
+ var parts = rurl.exec( s.url ),
+ remote = parts && (parts[1] && parts[1] !== location.protocol || parts[2] !== location.host);
+
+ // If we're requesting a remote document
+ // and trying to load JSON or Script with a GET
+ if ( s.dataType === "script" && type === "GET" && remote ) {
+ var head = document.getElementsByTagName("head")[0] || document.documentElement;
+ var script = document.createElement("script");
+ script.src = s.url;
+ if ( s.scriptCharset ) {
+ script.charset = s.scriptCharset;
+ }
+
+ // Handle Script loading
+ if ( !jsonp ) {
+ var done = false;
+
+ // Attach handlers for all browsers
+ script.onload = script.onreadystatechange = function() {
+ if ( !done && (!this.readyState ||
+ this.readyState === "loaded" || this.readyState === "complete") ) {
+ done = true;
+ success();
+ complete();
+
+ // Handle memory leak in IE
+ script.onload = script.onreadystatechange = null;
+ if ( head && script.parentNode ) {
+ head.removeChild( script );
+ }
+ }
+ };
+ }
+
+ // Use insertBefore instead of appendChild to circumvent an IE6 bug.
+ // This arises when a base node is used (#2709 and #4378).
+ head.insertBefore( script, head.firstChild );
+
+ // We handle everything using the script element injection
+ return undefined;
+ }
+
+ var requestDone = false;
+
+ // Create the request object
+ var xhr = s.xhr();
+
+ if ( !xhr ) {
+ return;
+ }
+
+ // Open the socket
+ // Passing null username, generates a login popup on Opera (#2865)
+ if ( s.username ) {
+ xhr.open(type, s.url, s.async, s.username, s.password);
+ } else {
+ xhr.open(type, s.url, s.async);
+ }
+
+ // Need an extra try/catch for cross domain requests in Firefox 3
+ try {
+ // Set the correct header, if data is being sent
+ if ( s.data || origSettings && origSettings.contentType ) {
+ xhr.setRequestHeader("Content-Type", s.contentType);
+ }
+
+ // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
+ if ( s.ifModified ) {
+ if ( jQuery.lastModified[s.url] ) {
+ xhr.setRequestHeader("If-Modified-Since", jQuery.lastModified[s.url]);
+ }
+
+ if ( jQuery.etag[s.url] ) {
+ xhr.setRequestHeader("If-None-Match", jQuery.etag[s.url]);
+ }
+ }
+
+ // Set header so the called script knows that it's an XMLHttpRequest
+ // Only send the header if it's not a remote XHR
+ if ( !remote ) {
+ xhr.setRequestHeader("X-Requested-With", "XMLHttpRequest");
+ }
+
+ // Set the Accepts header for the server, depending on the dataType
+ xhr.setRequestHeader("Accept", s.dataType && s.accepts[ s.dataType ] ?
+ s.accepts[ s.dataType ] + ", */*" :
+ s.accepts._default );
+ } catch(e) {}
+
+ // Allow custom headers/mimetypes and early abort
+ if ( s.beforeSend && s.beforeSend.call(callbackContext, xhr, s) === false ) {
+ // Handle the global AJAX counter
+ if ( s.global && ! --jQuery.active ) {
+ jQuery.event.trigger( "ajaxStop" );
+ }
+
+ // close opended socket
+ xhr.abort();
+ return false;
+ }
+
+ if ( s.global ) {
+ trigger("ajaxSend", [xhr, s]);
+ }
+
+ // Wait for a response to come back
+ var onreadystatechange = xhr.onreadystatechange = function( isTimeout ) {
+ // The request was aborted
+ if ( !xhr || xhr.readyState === 0 || isTimeout === "abort" ) {
+ // Opera doesn't call onreadystatechange before this point
+ // so we simulate the call
+ if ( !requestDone ) {
+ complete();
+ }
+
+ requestDone = true;
+ if ( xhr ) {
+ xhr.onreadystatechange = jQuery.noop;
+ }
+
+ // The transfer is complete and the data is available, or the request timed out
+ } else if ( !requestDone && xhr && (xhr.readyState === 4 || isTimeout === "timeout") ) {
+ requestDone = true;
+ xhr.onreadystatechange = jQuery.noop;
+
+ status = isTimeout === "timeout" ?
+ "timeout" :
+ !jQuery.httpSuccess( xhr ) ?
+ "error" :
+ s.ifModified && jQuery.httpNotModified( xhr, s.url ) ?
+ "notmodified" :
+ "success";
+
+ var errMsg;
+
+ if ( status === "success" ) {
+ // Watch for, and catch, XML document parse errors
+ try {
+ // process the data (runs the xml through httpData regardless of callback)
+ data = jQuery.httpData( xhr, s.dataType, s );
+ } catch(err) {
+ status = "parsererror";
+ errMsg = err;
+ }
+ }
+
+ // Make sure that the request was successful or notmodified
+ if ( status === "success" || status === "notmodified" ) {
+ // JSONP handles its own success callback
+ if ( !jsonp ) {
+ success();
+ }
+ } else {
+ jQuery.handleError(s, xhr, status, errMsg);
+ }
+
+ // Fire the complete handlers
+ complete();
+
+ if ( isTimeout === "timeout" ) {
+ xhr.abort();
+ }
+
+ // Stop memory leaks
+ if ( s.async ) {
+ xhr = null;
+ }
+ }
+ };
+
+ // Override the abort handler, if we can (IE doesn't allow it, but that's OK)
+ // Opera doesn't fire onreadystatechange at all on abort
+ try {
+ var oldAbort = xhr.abort;
+ xhr.abort = function() {
+ if ( xhr ) {
+ oldAbort.call( xhr );
+ }
+
+ onreadystatechange( "abort" );
+ };
+ } catch(e) { }
+
+ // Timeout checker
+ if ( s.async && s.timeout > 0 ) {
+ setTimeout(function() {
+ // Check to see if the request is still happening
+ if ( xhr && !requestDone ) {
+ onreadystatechange( "timeout" );
+ }
+ }, s.timeout);
+ }
+
+ // Send the data
+ try {
+ xhr.send( type === "POST" || type === "PUT" || type === "DELETE" ? s.data : null );
+ } catch(e) {
+ jQuery.handleError(s, xhr, null, e);
+ // Fire the complete handlers
+ complete();
+ }
+
+ // firefox 1.5 doesn't fire statechange for sync requests
+ if ( !s.async ) {
+ onreadystatechange();
+ }
+
+ function success() {
+ // If a local callback was specified, fire it and pass it the data
+ if ( s.success ) {
+ s.success.call( callbackContext, data, status, xhr );
+ }
+
+ // Fire the global callback
+ if ( s.global ) {
+ trigger( "ajaxSuccess", [xhr, s] );
+ }
+ }
+
+ function complete() {
+ // Process result
+ if ( s.complete ) {
+ s.complete.call( callbackContext, xhr, status);
+ }
+
+ // The request was completed
+ if ( s.global ) {
+ trigger( "ajaxComplete", [xhr, s] );
+ }
+
+ // Handle the global AJAX counter
+ if ( s.global && ! --jQuery.active ) {
+ jQuery.event.trigger( "ajaxStop" );
+ }
+ }
+
+ function trigger(type, args) {
+ (s.context ? jQuery(s.context) : jQuery.event).trigger(type, args);
+ }
+
+ // return XMLHttpRequest to allow aborting the request etc.
+ return xhr;
+ },
+
+ handleError: function( s, xhr, status, e ) {
+ // If a local callback was specified, fire it
+ if ( s.error ) {
+ s.error.call( s.context || s, xhr, status, e );
+ }
+
+ // Fire the global callback
+ if ( s.global ) {
+ (s.context ? jQuery(s.context) : jQuery.event).trigger( "ajaxError", [xhr, s, e] );
+ }
+ },
+
+ // Counter for holding the number of active queries
+ active: 0,
+
+ // Determines if an XMLHttpRequest was successful or not
+ httpSuccess: function( xhr ) {
+ try {
+ // IE error sometimes returns 1223 when it should be 204 so treat it as success, see #1450
+ return !xhr.status && location.protocol === "file:" ||
+ // Opera returns 0 when status is 304
+ ( xhr.status >= 200 && xhr.status < 300 ) ||
+ xhr.status === 304 || xhr.status === 1223 || xhr.status === 0;
+ } catch(e) {}
+
+ return false;
+ },
+
+ // Determines if an XMLHttpRequest returns NotModified
+ httpNotModified: function( xhr, url ) {
+ var lastModified = xhr.getResponseHeader("Last-Modified"),
+ etag = xhr.getResponseHeader("Etag");
+
+ if ( lastModified ) {
+ jQuery.lastModified[url] = lastModified;
+ }
+
+ if ( etag ) {
+ jQuery.etag[url] = etag;
+ }
+
+ // Opera returns 0 when status is 304
+ return xhr.status === 304 || xhr.status === 0;
+ },
+
+ httpData: function( xhr, type, s ) {
+ var ct = xhr.getResponseHeader("content-type") || "",
+ xml = type === "xml" || !type && ct.indexOf("xml") >= 0,
+ data = xml ? xhr.responseXML : xhr.responseText;
+
+ if ( xml && data.documentElement.nodeName === "parsererror" ) {
+ jQuery.error( "parsererror" );
+ }
+
+ // Allow a pre-filtering function to sanitize the response
+ // s is checked to keep backwards compatibility
+ if ( s && s.dataFilter ) {
+ data = s.dataFilter( data, type );
+ }
+
+ // The filter can actually parse the response
+ if ( typeof data === "string" ) {
+ // Get the JavaScript object, if JSON is used.
+ if ( type === "json" || !type && ct.indexOf("json") >= 0 ) {
+ data = jQuery.parseJSON( data );
+
+ // If the type is "script", eval it in global context
+ } else if ( type === "script" || !type && ct.indexOf("javascript") >= 0 ) {
+ jQuery.globalEval( data );
+ }
+ }
+
+ return data;
+ },
+
+ // Serialize an array of form elements or a set of
+ // key/values into a query string
+ param: function( a, traditional ) {
+ var s = [];
+
+ // Set traditional to true for jQuery <= 1.3.2 behavior.
+ if ( traditional === undefined ) {
+ traditional = jQuery.ajaxSettings.traditional;
+ }
+
+ // If an array was passed in, assume that it is an array of form elements.
+ if ( jQuery.isArray(a) || a.jquery ) {
+ // Serialize the form elements
+ jQuery.each( a, function() {
+ add( this.name, this.value );
+ });
+
+ } else {
+ // If traditional, encode the "old" way (the way 1.3.2 or older
+ // did it), otherwise encode params recursively.
+ for ( var prefix in a ) {
+ buildParams( prefix, a[prefix] );
+ }
+ }
+
+ // Return the resulting serialization
+ return s.join("&").replace(r20, "+");
+
+ function buildParams( prefix, obj ) {
+ if ( jQuery.isArray(obj) ) {
+ // Serialize array item.
+ jQuery.each( obj, function( i, v ) {
+ if ( traditional || /\[\]$/.test( prefix ) ) {
+ // Treat each array item as a scalar.
+ add( prefix, v );
+ } else {
+ // If array item is non-scalar (array or object), encode its
+ // numeric index to resolve deserialization ambiguity issues.
+ // Note that rack (as of 1.0.0) can't currently deserialize
+ // nested arrays properly, and attempting to do so may cause
+ // a server error. Possible fixes are to modify rack's
+ // deserialization algorithm or to provide an option or flag
+ // to force array serialization to be shallow.
+ buildParams( prefix + "[" + ( typeof v === "object" || jQuery.isArray(v) ? i : "" ) + "]", v );
+ }
+ });
+
+ } else if ( !traditional && obj != null && typeof obj === "object" ) {
+ // Serialize object item.
+ jQuery.each( obj, function( k, v ) {
+ buildParams( prefix + "[" + k + "]", v );
+ });
+
+ } else {
+ // Serialize scalar item.
+ add( prefix, obj );
+ }
+ }
+
+ function add( key, value ) {
+ // If value is a function, invoke it and return its value
+ value = jQuery.isFunction(value) ? value() : value;
+ s[ s.length ] = encodeURIComponent(key) + "=" + encodeURIComponent(value);
+ }
+ }
+});
+var elemdisplay = {},
+ rfxtypes = /toggle|show|hide/,
+ rfxnum = /^([+-]=)?([\d+-.]+)(.*)$/,
+ timerId,
+ fxAttrs = [
+ // height animations
+ [ "height", "marginTop", "marginBottom", "paddingTop", "paddingBottom" ],
+ // width animations
+ [ "width", "marginLeft", "marginRight", "paddingLeft", "paddingRight" ],
+ // opacity animations
+ [ "opacity" ]
+ ];
+
+jQuery.fn.extend({
+ show: function( speed, callback ) {
+ if ( speed || speed === 0) {
+ return this.animate( genFx("show", 3), speed, callback);
+
+ } else {
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ var old = jQuery.data(this[i], "olddisplay");
+
+ this[i].style.display = old || "";
+
+ if ( jQuery.css(this[i], "display") === "none" ) {
+ var nodeName = this[i].nodeName, display;
+
+ if ( elemdisplay[ nodeName ] ) {
+ display = elemdisplay[ nodeName ];
+
+ } else {
+ var elem = jQuery("<" + nodeName + " />").appendTo("body");
+
+ display = elem.css("display");
+
+ if ( display === "none" ) {
+ display = "block";
+ }
+
+ elem.remove();
+
+ elemdisplay[ nodeName ] = display;
+ }
+
+ jQuery.data(this[i], "olddisplay", display);
+ }
+ }
+
+ // Set the display of the elements in a second loop
+ // to avoid the constant reflow
+ for ( var j = 0, k = this.length; j < k; j++ ) {
+ this[j].style.display = jQuery.data(this[j], "olddisplay") || "";
+ }
+
+ return this;
+ }
+ },
+
+ hide: function( speed, callback ) {
+ if ( speed || speed === 0 ) {
+ return this.animate( genFx("hide", 3), speed, callback);
+
+ } else {
+ for ( var i = 0, l = this.length; i < l; i++ ) {
+ var old = jQuery.data(this[i], "olddisplay");
+ if ( !old && old !== "none" ) {
+ jQuery.data(this[i], "olddisplay", jQuery.css(this[i], "display"));
+ }
+ }
+
+ // Set the display of the elements in a second loop
+ // to avoid the constant reflow
+ for ( var j = 0, k = this.length; j < k; j++ ) {
+ this[j].style.display = "none";
+ }
+
+ return this;
+ }
+ },
+
+ // Save the old toggle function
+ _toggle: jQuery.fn.toggle,
+
+ toggle: function( fn, fn2 ) {
+ var bool = typeof fn === "boolean";
+
+ if ( jQuery.isFunction(fn) && jQuery.isFunction(fn2) ) {
+ this._toggle.apply( this, arguments );
+
+ } else if ( fn == null || bool ) {
+ this.each(function() {
+ var state = bool ? fn : jQuery(this).is(":hidden");
+ jQuery(this)[ state ? "show" : "hide" ]();
+ });
+
+ } else {
+ this.animate(genFx("toggle", 3), fn, fn2);
+ }
+
+ return this;
+ },
+
+ fadeTo: function( speed, to, callback ) {
+ return this.filter(":hidden").css("opacity", 0).show().end()
+ .animate({opacity: to}, speed, callback);
+ },
+
+ animate: function( prop, speed, easing, callback ) {
+ var optall = jQuery.speed(speed, easing, callback);
+
+ if ( jQuery.isEmptyObject( prop ) ) {
+ return this.each( optall.complete );
+ }
+
+ return this[ optall.queue === false ? "each" : "queue" ](function() {
+ var opt = jQuery.extend({}, optall), p,
+ hidden = this.nodeType === 1 && jQuery(this).is(":hidden"),
+ self = this;
+
+ for ( p in prop ) {
+ var name = p.replace(rdashAlpha, fcamelCase);
+
+ if ( p !== name ) {
+ prop[ name ] = prop[ p ];
+ delete prop[ p ];
+ p = name;
+ }
+
+ if ( prop[p] === "hide" && hidden || prop[p] === "show" && !hidden ) {
+ return opt.complete.call(this);
+ }
+
+ if ( ( p === "height" || p === "width" ) && this.style ) {
+ // Store display property
+ opt.display = jQuery.css(this, "display");
+
+ // Make sure that nothing sneaks out
+ opt.overflow = this.style.overflow;
+ }
+
+ if ( jQuery.isArray( prop[p] ) ) {
+ // Create (if needed) and add to specialEasing
+ (opt.specialEasing = opt.specialEasing || {})[p] = prop[p][1];
+ prop[p] = prop[p][0];
+ }
+ }
+
+ if ( opt.overflow != null ) {
+ this.style.overflow = "hidden";
+ }
+
+ opt.curAnim = jQuery.extend({}, prop);
+
+ jQuery.each( prop, function( name, val ) {
+ var e = new jQuery.fx( self, opt, name );
+
+ if ( rfxtypes.test(val) ) {
+ e[ val === "toggle" ? hidden ? "show" : "hide" : val ]( prop );
+
+ } else {
+ var parts = rfxnum.exec(val),
+ start = e.cur(true) || 0;
+
+ if ( parts ) {
+ var end = parseFloat( parts[2] ),
+ unit = parts[3] || "px";
+
+ // We need to compute starting value
+ if ( unit !== "px" ) {
+ self.style[ name ] = (end || 1) + unit;
+ start = ((end || 1) / e.cur(true)) * start;
+ self.style[ name ] = start + unit;
+ }
+
+ // If a +=/-= token was provided, we're doing a relative animation
+ if ( parts[1] ) {
+ end = ((parts[1] === "-=" ? -1 : 1) * end) + start;
+ }
+
+ e.custom( start, end, unit );
+
+ } else {
+ e.custom( start, val, "" );
+ }
+ }
+ });
+
+ // For JS strict compliance
+ return true;
+ });
+ },
+
+ stop: function( clearQueue, gotoEnd ) {
+ var timers = jQuery.timers;
+
+ if ( clearQueue ) {
+ this.queue([]);
+ }
+
+ this.each(function() {
+ // go in reverse order so anything added to the queue during the loop is ignored
+ for ( var i = timers.length - 1; i >= 0; i-- ) {
+ if ( timers[i].elem === this ) {
+ if (gotoEnd) {
+ // force the next step to be the last
+ timers[i](true);
+ }
+
+ timers.splice(i, 1);
+ }
+ }
+ });
+
+ // start the next in the queue if the last step wasn't forced
+ if ( !gotoEnd ) {
+ this.dequeue();
+ }
+
+ return this;
+ }
+
+});
+
+// Generate shortcuts for custom animations
+jQuery.each({
+ slideDown: genFx("show", 1),
+ slideUp: genFx("hide", 1),
+ slideToggle: genFx("toggle", 1),
+ fadeIn: { opacity: "show" },
+ fadeOut: { opacity: "hide" }
+}, function( name, props ) {
+ jQuery.fn[ name ] = function( speed, callback ) {
+ return this.animate( props, speed, callback );
+ };
+});
+
+jQuery.extend({
+ speed: function( speed, easing, fn ) {
+ var opt = speed && typeof speed === "object" ? speed : {
+ complete: fn || !fn && easing ||
+ jQuery.isFunction( speed ) && speed,
+ duration: speed,
+ easing: fn && easing || easing && !jQuery.isFunction(easing) && easing
+ };
+
+ opt.duration = jQuery.fx.off ? 0 : typeof opt.duration === "number" ? opt.duration :
+ jQuery.fx.speeds[opt.duration] || jQuery.fx.speeds._default;
+
+ // Queueing
+ opt.old = opt.complete;
+ opt.complete = function() {
+ if ( opt.queue !== false ) {
+ jQuery(this).dequeue();
+ }
+ if ( jQuery.isFunction( opt.old ) ) {
+ opt.old.call( this );
+ }
+ };
+
+ return opt;
+ },
+
+ easing: {
+ linear: function( p, n, firstNum, diff ) {
+ return firstNum + diff * p;
+ },
+ swing: function( p, n, firstNum, diff ) {
+ return ((-Math.cos(p*Math.PI)/2) + 0.5) * diff + firstNum;
+ }
+ },
+
+ timers: [],
+
+ fx: function( elem, options, prop ) {
+ this.options = options;
+ this.elem = elem;
+ this.prop = prop;
+
+ if ( !options.orig ) {
+ options.orig = {};
+ }
+ }
+
+});
+
+jQuery.fx.prototype = {
+ // Simple function for setting a style value
+ update: function() {
+ if ( this.options.step ) {
+ this.options.step.call( this.elem, this.now, this );
+ }
+
+ (jQuery.fx.step[this.prop] || jQuery.fx.step._default)( this );
+
+ // Set display property to block for height/width animations
+ if ( ( this.prop === "height" || this.prop === "width" ) && this.elem.style ) {
+ this.elem.style.display = "block";
+ }
+ },
+
+ // Get the current size
+ cur: function( force ) {
+ if ( this.elem[this.prop] != null && (!this.elem.style || this.elem.style[this.prop] == null) ) {
+ return this.elem[ this.prop ];
+ }
+
+ var r = parseFloat(jQuery.css(this.elem, this.prop, force));
+ return r && r > -10000 ? r : parseFloat(jQuery.curCSS(this.elem, this.prop)) || 0;
+ },
+
+ // Start an animation from one number to another
+ custom: function( from, to, unit ) {
+ this.startTime = now();
+ this.start = from;
+ this.end = to;
+ this.unit = unit || this.unit || "px";
+ this.now = this.start;
+ this.pos = this.state = 0;
+
+ var self = this;
+ function t( gotoEnd ) {
+ return self.step(gotoEnd);
+ }
+
+ t.elem = this.elem;
+
+ if ( t() && jQuery.timers.push(t) && !timerId ) {
+ timerId = setInterval(jQuery.fx.tick, 13);
+ }
+ },
+
+ // Simple 'show' function
+ show: function() {
+ // Remember where we started, so that we can go back to it later
+ this.options.orig[this.prop] = jQuery.style( this.elem, this.prop );
+ this.options.show = true;
+
+ // Begin the animation
+ // Make sure that we start at a small width/height to avoid any
+ // flash of content
+ this.custom(this.prop === "width" || this.prop === "height" ? 1 : 0, this.cur());
+
+ // Start by showing the element
+ jQuery( this.elem ).show();
+ },
+
+ // Simple 'hide' function
+ hide: function() {
+ // Remember where we started, so that we can go back to it later
+ this.options.orig[this.prop] = jQuery.style( this.elem, this.prop );
+ this.options.hide = true;
+
+ // Begin the animation
+ this.custom(this.cur(), 0);
+ },
+
+ // Each step of an animation
+ step: function( gotoEnd ) {
+ var t = now(), done = true;
+
+ if ( gotoEnd || t >= this.options.duration + this.startTime ) {
+ this.now = this.end;
+ this.pos = this.state = 1;
+ this.update();
+
+ this.options.curAnim[ this.prop ] = true;
+
+ for ( var i in this.options.curAnim ) {
+ if ( this.options.curAnim[i] !== true ) {
+ done = false;
+ }
+ }
+
+ if ( done ) {
+ if ( this.options.display != null ) {
+ // Reset the overflow
+ this.elem.style.overflow = this.options.overflow;
+
+ // Reset the display
+ var old = jQuery.data(this.elem, "olddisplay");
+ this.elem.style.display = old ? old : this.options.display;
+
+ if ( jQuery.css(this.elem, "display") === "none" ) {
+ this.elem.style.display = "block";
+ }
+ }
+
+ // Hide the element if the "hide" operation was done
+ if ( this.options.hide ) {
+ jQuery(this.elem).hide();
+ }
+
+ // Reset the properties, if the item has been hidden or shown
+ if ( this.options.hide || this.options.show ) {
+ for ( var p in this.options.curAnim ) {
+ jQuery.style(this.elem, p, this.options.orig[p]);
+ }
+ }
+
+ // Execute the complete function
+ this.options.complete.call( this.elem );
+ }
+
+ return false;
+
+ } else {
+ var n = t - this.startTime;
+ this.state = n / this.options.duration;
+
+ // Perform the easing function, defaults to swing
+ var specialEasing = this.options.specialEasing && this.options.specialEasing[this.prop];
+ var defaultEasing = this.options.easing || (jQuery.easing.swing ? "swing" : "linear");
+ this.pos = jQuery.easing[specialEasing || defaultEasing](this.state, n, 0, 1, this.options.duration);
+ this.now = this.start + ((this.end - this.start) * this.pos);
+
+ // Perform the next step of the animation
+ this.update();
+ }
+
+ return true;
+ }
+};
+
+jQuery.extend( jQuery.fx, {
+ tick: function() {
+ var timers = jQuery.timers;
+
+ for ( var i = 0; i < timers.length; i++ ) {
+ if ( !timers[i]() ) {
+ timers.splice(i--, 1);
+ }
+ }
+
+ if ( !timers.length ) {
+ jQuery.fx.stop();
+ }
+ },
+
+ stop: function() {
+ clearInterval( timerId );
+ timerId = null;
+ },
+
+ speeds: {
+ slow: 600,
+ fast: 200,
+ // Default speed
+ _default: 400
+ },
+
+ step: {
+ opacity: function( fx ) {
+ jQuery.style(fx.elem, "opacity", fx.now);
+ },
+
+ _default: function( fx ) {
+ if ( fx.elem.style && fx.elem.style[ fx.prop ] != null ) {
+ fx.elem.style[ fx.prop ] = (fx.prop === "width" || fx.prop === "height" ? Math.max(0, fx.now) : fx.now) + fx.unit;
+ } else {
+ fx.elem[ fx.prop ] = fx.now;
+ }
+ }
+ }
+});
+
+if ( jQuery.expr && jQuery.expr.filters ) {
+ jQuery.expr.filters.animated = function( elem ) {
+ return jQuery.grep(jQuery.timers, function( fn ) {
+ return elem === fn.elem;
+ }).length;
+ };
+}
+
+function genFx( type, num ) {
+ var obj = {};
+
+ jQuery.each( fxAttrs.concat.apply([], fxAttrs.slice(0,num)), function() {
+ obj[ this ] = type;
+ });
+
+ return obj;
+}
+if ( "getBoundingClientRect" in document.documentElement ) {
+ jQuery.fn.offset = function( options ) {
+ var elem = this[0];
+
+ if ( options ) {
+ return this.each(function( i ) {
+ jQuery.offset.setOffset( this, options, i );
+ });
+ }
+
+ if ( !elem || !elem.ownerDocument ) {
+ return null;
+ }
+
+ if ( elem === elem.ownerDocument.body ) {
+ return jQuery.offset.bodyOffset( elem );
+ }
+
+ var box = elem.getBoundingClientRect(), doc = elem.ownerDocument, body = doc.body, docElem = doc.documentElement,
+ clientTop = docElem.clientTop || body.clientTop || 0, clientLeft = docElem.clientLeft || body.clientLeft || 0,
+ top = box.top + (self.pageYOffset || jQuery.support.boxModel && docElem.scrollTop || body.scrollTop ) - clientTop,
+ left = box.left + (self.pageXOffset || jQuery.support.boxModel && docElem.scrollLeft || body.scrollLeft) - clientLeft;
+
+ return { top: top, left: left };
+ };
+
+} else {
+ jQuery.fn.offset = function( options ) {
+ var elem = this[0];
+
+ if ( options ) {
+ return this.each(function( i ) {
+ jQuery.offset.setOffset( this, options, i );
+ });
+ }
+
+ if ( !elem || !elem.ownerDocument ) {
+ return null;
+ }
+
+ if ( elem === elem.ownerDocument.body ) {
+ return jQuery.offset.bodyOffset( elem );
+ }
+
+ jQuery.offset.initialize();
+
+ var offsetParent = elem.offsetParent, prevOffsetParent = elem,
+ doc = elem.ownerDocument, computedStyle, docElem = doc.documentElement,
+ body = doc.body, defaultView = doc.defaultView,
+ prevComputedStyle = defaultView ? defaultView.getComputedStyle( elem, null ) : elem.currentStyle,
+ top = elem.offsetTop, left = elem.offsetLeft;
+
+ while ( (elem = elem.parentNode) && elem !== body && elem !== docElem ) {
+ if ( jQuery.offset.supportsFixedPosition && prevComputedStyle.position === "fixed" ) {
+ break;
+ }
+
+ computedStyle = defaultView ? defaultView.getComputedStyle(elem, null) : elem.currentStyle;
+ top -= elem.scrollTop;
+ left -= elem.scrollLeft;
+
+ if ( elem === offsetParent ) {
+ top += elem.offsetTop;
+ left += elem.offsetLeft;
+
+ if ( jQuery.offset.doesNotAddBorder && !(jQuery.offset.doesAddBorderForTableAndCells && /^t(able|d|h)$/i.test(elem.nodeName)) ) {
+ top += parseFloat( computedStyle.borderTopWidth ) || 0;
+ left += parseFloat( computedStyle.borderLeftWidth ) || 0;
+ }
+
+ prevOffsetParent = offsetParent, offsetParent = elem.offsetParent;
+ }
+
+ if ( jQuery.offset.subtractsBorderForOverflowNotVisible && computedStyle.overflow !== "visible" ) {
+ top += parseFloat( computedStyle.borderTopWidth ) || 0;
+ left += parseFloat( computedStyle.borderLeftWidth ) || 0;
+ }
+
+ prevComputedStyle = computedStyle;
+ }
+
+ if ( prevComputedStyle.position === "relative" || prevComputedStyle.position === "static" ) {
+ top += body.offsetTop;
+ left += body.offsetLeft;
+ }
+
+ if ( jQuery.offset.supportsFixedPosition && prevComputedStyle.position === "fixed" ) {
+ top += Math.max( docElem.scrollTop, body.scrollTop );
+ left += Math.max( docElem.scrollLeft, body.scrollLeft );
+ }
+
+ return { top: top, left: left };
+ };
+}
+
+jQuery.offset = {
+ initialize: function() {
+ var body = document.body, container = document.createElement("div"), innerDiv, checkDiv, table, td, bodyMarginTop = parseFloat( jQuery.curCSS(body, "marginTop", true) ) || 0,
+ html = "<div style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;'><div></div></div><table style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;' cellpadding='0' cellspacing='0'><tr><td></td></tr></table>";
+
+ jQuery.extend( container.style, { position: "absolute", top: 0, left: 0, margin: 0, border: 0, width: "1px", height: "1px", visibility: "hidden" } );
+
+ container.innerHTML = html;
+ body.insertBefore( container, body.firstChild );
+ innerDiv = container.firstChild;
+ checkDiv = innerDiv.firstChild;
+ td = innerDiv.nextSibling.firstChild.firstChild;
+
+ this.doesNotAddBorder = (checkDiv.offsetTop !== 5);
+ this.doesAddBorderForTableAndCells = (td.offsetTop === 5);
+
+ checkDiv.style.position = "fixed", checkDiv.style.top = "20px";
+ // safari subtracts parent border width here which is 5px
+ this.supportsFixedPosition = (checkDiv.offsetTop === 20 || checkDiv.offsetTop === 15);
+ checkDiv.style.position = checkDiv.style.top = "";
+
+ innerDiv.style.overflow = "hidden", innerDiv.style.position = "relative";
+ this.subtractsBorderForOverflowNotVisible = (checkDiv.offsetTop === -5);
+
+ this.doesNotIncludeMarginInBodyOffset = (body.offsetTop !== bodyMarginTop);
+
+ body.removeChild( container );
+ body = container = innerDiv = checkDiv = table = td = null;
+ jQuery.offset.initialize = jQuery.noop;
+ },
+
+ bodyOffset: function( body ) {
+ var top = body.offsetTop, left = body.offsetLeft;
+
+ jQuery.offset.initialize();
+
+ if ( jQuery.offset.doesNotIncludeMarginInBodyOffset ) {
+ top += parseFloat( jQuery.curCSS(body, "marginTop", true) ) || 0;
+ left += parseFloat( jQuery.curCSS(body, "marginLeft", true) ) || 0;
+ }
+
+ return { top: top, left: left };
+ },
+
+ setOffset: function( elem, options, i ) {
+ // set position first, in-case top/left are set even on static elem
+ if ( /static/.test( jQuery.curCSS( elem, "position" ) ) ) {
+ elem.style.position = "relative";
+ }
+ var curElem = jQuery( elem ),
+ curOffset = curElem.offset(),
+ curTop = parseInt( jQuery.curCSS( elem, "top", true ), 10 ) || 0,
+ curLeft = parseInt( jQuery.curCSS( elem, "left", true ), 10 ) || 0;
+
+ if ( jQuery.isFunction( options ) ) {
+ options = options.call( elem, i, curOffset );
+ }
+
+ var props = {
+ top: (options.top - curOffset.top) + curTop,
+ left: (options.left - curOffset.left) + curLeft
+ };
+
+ if ( "using" in options ) {
+ options.using.call( elem, props );
+ } else {
+ curElem.css( props );
+ }
+ }
+};
+
+
+jQuery.fn.extend({
+ position: function() {
+ if ( !this[0] ) {
+ return null;
+ }
+
+ var elem = this[0],
+
+ // Get *real* offsetParent
+ offsetParent = this.offsetParent(),
+
+ // Get correct offsets
+ offset = this.offset(),
+ parentOffset = /^body|html$/i.test(offsetParent[0].nodeName) ? { top: 0, left: 0 } : offsetParent.offset();
+
+ // Subtract element margins
+ // note: when an element has margin: auto the offsetLeft and marginLeft
+ // are the same in Safari causing offset.left to incorrectly be 0
+ offset.top -= parseFloat( jQuery.curCSS(elem, "marginTop", true) ) || 0;
+ offset.left -= parseFloat( jQuery.curCSS(elem, "marginLeft", true) ) || 0;
+
+ // Add offsetParent borders
+ parentOffset.top += parseFloat( jQuery.curCSS(offsetParent[0], "borderTopWidth", true) ) || 0;
+ parentOffset.left += parseFloat( jQuery.curCSS(offsetParent[0], "borderLeftWidth", true) ) || 0;
+
+ // Subtract the two offsets
+ return {
+ top: offset.top - parentOffset.top,
+ left: offset.left - parentOffset.left
+ };
+ },
+
+ offsetParent: function() {
+ return this.map(function() {
+ var offsetParent = this.offsetParent || document.body;
+ while ( offsetParent && (!/^body|html$/i.test(offsetParent.nodeName) && jQuery.css(offsetParent, "position") === "static") ) {
+ offsetParent = offsetParent.offsetParent;
+ }
+ return offsetParent;
+ });
+ }
+});
+
+
+// Create scrollLeft and scrollTop methods
+jQuery.each( ["Left", "Top"], function( i, name ) {
+ var method = "scroll" + name;
+
+ jQuery.fn[ method ] = function(val) {
+ var elem = this[0], win;
+
+ if ( !elem ) {
+ return null;
+ }
+
+ if ( val !== undefined ) {
+ // Set the scroll offset
+ return this.each(function() {
+ win = getWindow( this );
+
+ if ( win ) {
+ win.scrollTo(
+ !i ? val : jQuery(win).scrollLeft(),
+ i ? val : jQuery(win).scrollTop()
+ );
+
+ } else {
+ this[ method ] = val;
+ }
+ });
+ } else {
+ win = getWindow( elem );
+
+ // Return the scroll offset
+ return win ? ("pageXOffset" in win) ? win[ i ? "pageYOffset" : "pageXOffset" ] :
+ jQuery.support.boxModel && win.document.documentElement[ method ] ||
+ win.document.body[ method ] :
+ elem[ method ];
+ }
+ };
+});
+
+function getWindow( elem ) {
+ return ("scrollTo" in elem && elem.document) ?
+ elem :
+ elem.nodeType === 9 ?
+ elem.defaultView || elem.parentWindow :
+ false;
+}
+// Create innerHeight, innerWidth, outerHeight and outerWidth methods
+jQuery.each([ "Height", "Width" ], function( i, name ) {
+
+ var type = name.toLowerCase();
+
+ // innerHeight and innerWidth
+ jQuery.fn["inner" + name] = function() {
+ return this[0] ?
+ jQuery.css( this[0], type, false, "padding" ) :
+ null;
+ };
+
+ // outerHeight and outerWidth
+ jQuery.fn["outer" + name] = function( margin ) {
+ return this[0] ?
+ jQuery.css( this[0], type, false, margin ? "margin" : "border" ) :
+ null;
+ };
+
+ jQuery.fn[ type ] = function( size ) {
+ // Get window width or height
+ var elem = this[0];
+ if ( !elem ) {
+ return size == null ? null : this;
+ }
+
+ if ( jQuery.isFunction( size ) ) {
+ return this.each(function( i ) {
+ var self = jQuery( this );
+ self[ type ]( size.call( this, i, self[ type ]() ) );
+ });
+ }
+
+ return ("scrollTo" in elem && elem.document) ? // does it walk and quack like a window?
+ // Everyone else use document.documentElement or document.body depending on Quirks vs Standards mode
+ elem.document.compatMode === "CSS1Compat" && elem.document.documentElement[ "client" + name ] ||
+ elem.document.body[ "client" + name ] :
+
+ // Get document width or height
+ (elem.nodeType === 9) ? // is it a document
+ // Either scroll[Width/Height] or offset[Width/Height], whichever is greater
+ Math.max(
+ elem.documentElement["client" + name],
+ elem.body["scroll" + name], elem.documentElement["scroll" + name],
+ elem.body["offset" + name], elem.documentElement["offset" + name]
+ ) :
+
+ // Get or set width or height on the element
+ size === undefined ?
+ // Get width or height on the element
+ jQuery.css( elem, type ) :
+
+ // Set the width or height on the element (default to pixels if value is unitless)
+ this.css( type, typeof size === "string" ? size : size + "px" );
+ };
+
+});
+// Expose jQuery to the global object
+window.jQuery = window.$ = jQuery;
+
+})(window);
diff --git a/1.1.x/share/www/script/jquery.resizer.js b/1.1.x/share/www/script/jquery.resizer.js
new file mode 100644
index 00000000..42f0cc77
--- /dev/null
+++ b/1.1.x/share/www/script/jquery.resizer.js
@@ -0,0 +1,84 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+(function($) {
+
+ $.fn.makeResizable = function(options) {
+ options = options || {};
+ options.always = options.always || false;
+ options.grippie = options.grippie || null;
+ options.horizontal = options.horizontal || false;
+ options.minWidth = options.minWidth || 100;
+ options.maxWidth = options.maxWidth || null;
+ options.vertical = options.vertical || false;
+ options.minHeight = options.minHeight || 32;
+ options.maxHeight = options.maxHeight || null;
+
+ return this.each(function() {
+ if ($(this).is("textarea") && !options.always &&
+ $.browser.safari && parseInt($.browser.version) >= 522)
+ return this; // safari3 and later provides textarea resizing natively
+
+ var grippie = options.grippie;
+ if (!grippie) grippie = $("<div></div>").appendTo(this.parentNode);
+ grippie.addClass("grippie");
+ if (options.horizontal && options.vertical) {
+ grippie.css("cursor", "nwse-resize");
+ } else if (options.horizontal) {
+ grippie.css("cursor", "col-resize");
+ } else if (options.vertical) {
+ grippie.css("cursor", "row-resize");
+ }
+
+ var elem = $(this);
+ grippie.mousedown(function(e) {
+ var pos = {x: e.screenX, y: e.screenY};
+ var dimensions = {width: elem.width(), height: elem.height()};
+ $(document)
+ .mousemove(function(e) {
+ if (options.horizontal) {
+ var offset = e.screenX - pos.x;
+ if (offset) {
+ var newWidth = dimensions.width + offset;
+ if (newWidth >= options.minWidth &&
+ (!options.maxWidth || newWidth <= options.maxWidth)) {
+ elem.width(newWidth);
+ dimensions.width = newWidth;
+ }
+ pos.x = e.screenX;
+ }
+ }
+ if (options.vertical) {
+ var offset = e.screenY - pos.y;
+ if (offset) {
+ var newHeight = dimensions.height + offset;
+ if (newHeight >= options.minHeight &&
+ (!options.maxHeight || newHeight <= options.maxHeight)) {
+ elem.height(newHeight);
+ dimensions.height = newHeight;
+ }
+ pos.y = e.screenY;
+ }
+ }
+ document.onselectstart = function() { return false }; // for IE
+ return false;
+ })
+ .one("mouseup", function() {
+ $(document).unbind("mousemove");
+ document.onselectstart = null; // for IE
+ });
+ return true;
+ });
+ });
+ }
+
+})(jQuery);
diff --git a/1.1.x/share/www/script/jquery.suggest.js b/1.1.x/share/www/script/jquery.suggest.js
new file mode 100644
index 00000000..fd94375a
--- /dev/null
+++ b/1.1.x/share/www/script/jquery.suggest.js
@@ -0,0 +1,163 @@
+// http://svn.apache.org/repos/asf/couchdb/trunk/share/www/script/jquery.suggest.js
+
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+(function($) {
+
+ function suggest(elem, options) {
+ var timer = null;
+ var prevVal = null;
+ var cache = {};
+ var cacheKeys = [];
+
+ var input = $(elem).attr("autocomplete", "off");
+ var dropdown = $('<ul style="display: none; position: absolute; z-index: 10000"></ul>')
+ .addClass(options.dropdownClass).appendTo(document.body);
+
+ input
+ .blur(function() {
+ if (timer) clearTimeout(timer);
+ setTimeout(function() { dropdown.hide() }, 200);
+ })
+ .keydown(function(e) {
+ if ($.inArray(e.keyCode, [16, 17, 18, 20, 144, 91, 93, 224]) != -1) {
+ return; // ignore modifier keys
+ }
+ if (timer) clearTimeout(timer);
+ if ($.inArray(e.keyCode, [38, 40]) != -1 ||
+ (dropdown.is(":visible") && (e.keyCode == 27 ||
+ ($.inArray(e.keyCode, [9, 13]) != -1 && getSelection())))) {
+ switch(e.keyCode) {
+ case 38: // up
+ moveUp();
+ break;
+ case 40: // down
+ moveDown();
+ break;
+ case 9: // tab
+ case 13: // return
+ commit();
+ if (e.keyCode == 9) return true;
+ break;
+ case 27: // escape
+ dropdown.hide();
+ break;
+ }
+ e.preventDefault(); e.stopPropagation();
+ return false;
+ } else {
+ timer = setTimeout(function() { suggest() }, options.delay);
+ }
+ });
+
+ function suggest(force) {
+ var newVal = $.trim(input.val());
+ if (force || newVal != prevVal) {
+ if (force || newVal.length >= options.minChars) {
+ if (options.cache && cache.hasOwnProperty(newVal)) {
+ show(cache[newVal].items, cache[newVal].render);
+ } else {
+ options.callback.apply(elem, [newVal, function(items, render) {
+ if (options.cache) {
+ if (cacheKeys.length >= options.cacheLimit) {
+ delete cache[cacheKeys.shift()];
+ }
+ cache[newVal] = {items: items, render: render};
+ cacheKeys.push(newVal);
+ }
+ show(items, render);
+ }]);
+ }
+ } else {
+ dropdown.hide();
+ }
+ prevVal = newVal;
+ }
+ }
+
+ function show(items, render) {
+ if (!items) return;
+ if (!items.length) { dropdown.hide(); return; }
+ var offset = input.offset();
+ dropdown.empty().css({
+ top: (offset.top + input.outerHeight()) + "px", left: offset.left + "px",
+ minWidth: input.css("width")
+ });
+ render = render || function(idx, value) { return value; }
+ for (var i = 0; i < items.length; i++) {
+ var item = $("<li></li>").data("value", items[i]);
+ var rendered = render(i, items[i]);
+ if (typeof(rendered) == "string") {
+ item.text(rendered);
+ } else {
+ item.append(rendered);
+ }
+ item.appendTo(dropdown);
+ }
+ dropdown.slideDown("fast");
+ dropdown.children("li").click(function(e) {
+ $(this).addClass("selected");
+ commit();
+ });
+ }
+
+ function commit() {
+ var sel = getSelection();
+ if (sel) {
+ prevVal = sel.data("value");
+ input.val(prevVal);
+ if (options.select) {
+ options.select.apply(elem, [prevVal]);
+ }
+ dropdown.hide();
+ }
+ if (timer) clearTimeout(timer)
+ }
+
+ function getSelection() {
+ if (!dropdown.is(":visible")) return null;
+ var sel = dropdown.children("li.selected");
+ return sel.length ? sel : null;
+ }
+
+ function moveDown() {
+ if (!dropdown.is(":visible")) suggest(true);
+ var sel = getSelection();
+ if (sel) sel.removeClass("selected").next().addClass("selected");
+ else dropdown.children("li:first-child").addClass("selected");
+ }
+
+ function moveUp() {
+ if (!dropdown.is(":visible")) suggest(true);
+ var sel = getSelection();
+ if (sel) sel.removeClass("selected").prev().addClass("selected");
+ else dropdown.children("li:last-child").addClass("selected");
+ }
+ }
+
+ $.fn.suggest = function(callback, options) {
+ options = $.extend({
+ cache: true,
+ cacheLimit: 10,
+ callback: callback,
+ delay: 250,
+ dropdownClass: "suggest-dropdown",
+ minChars: 1,
+ select: null
+ }, options || {});
+ return this.each(function() {
+ suggest(this, options);
+ });
+ };
+
+})(jQuery);
diff --git a/1.1.x/share/www/script/json2.js b/1.1.x/share/www/script/json2.js
new file mode 100644
index 00000000..a1a3b170
--- /dev/null
+++ b/1.1.x/share/www/script/json2.js
@@ -0,0 +1,482 @@
+/*
+ http://www.JSON.org/json2.js
+ 2010-03-20
+
+ Public Domain.
+
+ NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+ See http://www.JSON.org/js.html
+
+
+ This code should be minified before deployment.
+ See http://javascript.crockford.com/jsmin.html
+
+ USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO
+ NOT CONTROL.
+
+
+ This file creates a global JSON object containing two methods: stringify
+ and parse.
+
+ JSON.stringify(value, replacer, space)
+ value any JavaScript value, usually an object or array.
+
+ replacer an optional parameter that determines how object
+ values are stringified for objects. It can be a
+ function or an array of strings.
+
+ space an optional parameter that specifies the indentation
+ of nested structures. If it is omitted, the text will
+ be packed without extra whitespace. If it is a number,
+ it will specify the number of spaces to indent at each
+ level. If it is a string (such as '\t' or '&nbsp;'),
+ it contains the characters used to indent at each level.
+
+ This method produces a JSON text from a JavaScript value.
+
+ When an object value is found, if the object contains a toJSON
+ method, its toJSON method will be called and the result will be
+ stringified. A toJSON method does not serialize: it returns the
+ value represented by the name/value pair that should be serialized,
+ or undefined if nothing should be serialized. The toJSON method
+ will be passed the key associated with the value, and this will be
+ bound to the value
+
+ For example, this would serialize Dates as ISO strings.
+
+ Date.prototype.toJSON = function (key) {
+ function f(n) {
+ // Format integers to have at least two digits.
+ return n < 10 ? '0' + n : n;
+ }
+
+ return this.getUTCFullYear() + '-' +
+ f(this.getUTCMonth() + 1) + '-' +
+ f(this.getUTCDate()) + 'T' +
+ f(this.getUTCHours()) + ':' +
+ f(this.getUTCMinutes()) + ':' +
+ f(this.getUTCSeconds()) + 'Z';
+ };
+
+ You can provide an optional replacer method. It will be passed the
+ key and value of each member, with this bound to the containing
+ object. The value that is returned from your method will be
+ serialized. If your method returns undefined, then the member will
+ be excluded from the serialization.
+
+ If the replacer parameter is an array of strings, then it will be
+ used to select the members to be serialized. It filters the results
+ such that only members with keys listed in the replacer array are
+ stringified.
+
+ Values that do not have JSON representations, such as undefined or
+ functions, will not be serialized. Such values in objects will be
+ dropped; in arrays they will be replaced with null. You can use
+ a replacer function to replace those with JSON values.
+ JSON.stringify(undefined) returns undefined.
+
+ The optional space parameter produces a stringification of the
+ value that is filled with line breaks and indentation to make it
+ easier to read.
+
+ If the space parameter is a non-empty string, then that string will
+ be used for indentation. If the space parameter is a number, then
+ the indentation will be that many spaces.
+
+ Example:
+
+ text = JSON.stringify(['e', {pluribus: 'unum'}]);
+ // text is '["e",{"pluribus":"unum"}]'
+
+
+ text = JSON.stringify(['e', {pluribus: 'unum'}], null, '\t');
+ // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]'
+
+ text = JSON.stringify([new Date()], function (key, value) {
+ return this[key] instanceof Date ?
+ 'Date(' + this[key] + ')' : value;
+ });
+ // text is '["Date(---current time---)"]'
+
+
+ JSON.parse(text, reviver)
+ This method parses a JSON text to produce an object or array.
+ It can throw a SyntaxError exception.
+
+ The optional reviver parameter is a function that can filter and
+ transform the results. It receives each of the keys and values,
+ and its return value is used instead of the original value.
+ If it returns what it received, then the structure is not modified.
+ If it returns undefined then the member is deleted.
+
+ Example:
+
+ // Parse the text. Values that look like ISO date strings will
+ // be converted to Date objects.
+
+ myData = JSON.parse(text, function (key, value) {
+ var a;
+ if (typeof value === 'string') {
+ a =
+/^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value);
+ if (a) {
+ return new Date(Date.UTC(+a[1], +a[2] - 1, +a[3], +a[4],
+ +a[5], +a[6]));
+ }
+ }
+ return value;
+ });
+
+ myData = JSON.parse('["Date(09/09/2001)"]', function (key, value) {
+ var d;
+ if (typeof value === 'string' &&
+ value.slice(0, 5) === 'Date(' &&
+ value.slice(-1) === ')') {
+ d = new Date(value.slice(5, -1));
+ if (d) {
+ return d;
+ }
+ }
+ return value;
+ });
+
+
+ This is a reference implementation. You are free to copy, modify, or
+ redistribute.
+*/
+
+/*jslint evil: true, strict: false */
+
+/*members "", "\b", "\t", "\n", "\f", "\r", "\"", JSON, "\\", apply,
+ call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours,
+ getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join,
+ lastIndex, length, parse, prototype, push, replace, slice, stringify,
+ test, toJSON, toString, valueOf
+*/
+
+
+// Create a JSON object only if one does not already exist. We create the
+// methods in a closure to avoid creating global variables.
+
+if (!this.JSON) {
+ this.JSON = {};
+}
+
+(function () {
+
+ function f(n) {
+ // Format integers to have at least two digits.
+ return n < 10 ? '0' + n : n;
+ }
+
+ if (typeof Date.prototype.toJSON !== 'function') {
+
+ Date.prototype.toJSON = function (key) {
+
+ return isFinite(this.valueOf()) ?
+ this.getUTCFullYear() + '-' +
+ f(this.getUTCMonth() + 1) + '-' +
+ f(this.getUTCDate()) + 'T' +
+ f(this.getUTCHours()) + ':' +
+ f(this.getUTCMinutes()) + ':' +
+ f(this.getUTCSeconds()) + 'Z' : null;
+ };
+
+ String.prototype.toJSON =
+ Number.prototype.toJSON =
+ Boolean.prototype.toJSON = function (key) {
+ return this.valueOf();
+ };
+ }
+
+ var cx = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
+ escapable = /[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
+ gap,
+ indent,
+ meta = { // table of character substitutions
+ '\b': '\\b',
+ '\t': '\\t',
+ '\n': '\\n',
+ '\f': '\\f',
+ '\r': '\\r',
+ '"' : '\\"',
+ '\\': '\\\\'
+ },
+ rep;
+
+
+ function quote(string) {
+
+// If the string contains no control characters, no quote characters, and no
+// backslash characters, then we can safely slap some quotes around it.
+// Otherwise we must also replace the offending characters with safe escape
+// sequences.
+
+ escapable.lastIndex = 0;
+ return escapable.test(string) ?
+ '"' + string.replace(escapable, function (a) {
+ var c = meta[a];
+ return typeof c === 'string' ? c :
+ '\\u' + ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
+ }) + '"' :
+ '"' + string + '"';
+ }
+
+
+ function str(key, holder) {
+
+// Produce a string from holder[key].
+
+ var i, // The loop counter.
+ k, // The member key.
+ v, // The member value.
+ length,
+ mind = gap,
+ partial,
+ value = holder[key];
+
+// If the value has a toJSON method, call it to obtain a replacement value.
+
+ if (value && typeof value === 'object' &&
+ typeof value.toJSON === 'function') {
+ value = value.toJSON(key);
+ }
+
+// If we were called with a replacer function, then call the replacer to
+// obtain a replacement value.
+
+ if (typeof rep === 'function') {
+ value = rep.call(holder, key, value);
+ }
+
+// What happens next depends on the value's type.
+
+ switch (typeof value) {
+ case 'string':
+ return quote(value);
+
+ case 'number':
+
+// JSON numbers must be finite. Encode non-finite numbers as null.
+
+ return isFinite(value) ? String(value) : 'null';
+
+ case 'boolean':
+ case 'null':
+
+// If the value is a boolean or null, convert it to a string. Note:
+// typeof null does not produce 'null'. The case is included here in
+// the remote chance that this gets fixed someday.
+
+ return String(value);
+
+// If the type is 'object', we might be dealing with an object or an array or
+// null.
+
+ case 'object':
+
+// Due to a specification blunder in ECMAScript, typeof null is 'object',
+// so watch out for that case.
+
+ if (!value) {
+ return 'null';
+ }
+
+// Make an array to hold the partial results of stringifying this object value.
+
+ gap += indent;
+ partial = [];
+
+// Is the value an array?
+
+ if (Object.prototype.toString.apply(value) === '[object Array]') {
+
+// The value is an array. Stringify every element. Use null as a placeholder
+// for non-JSON values.
+
+ length = value.length;
+ for (i = 0; i < length; i += 1) {
+ partial[i] = str(i, value) || 'null';
+ }
+
+// Join all of the elements together, separated with commas, and wrap them in
+// brackets.
+
+ v = partial.length === 0 ? '[]' :
+ gap ? '[\n' + gap +
+ partial.join(',\n' + gap) + '\n' +
+ mind + ']' :
+ '[' + partial.join(',') + ']';
+ gap = mind;
+ return v;
+ }
+
+// If the replacer is an array, use it to select the members to be stringified.
+
+ if (rep && typeof rep === 'object') {
+ length = rep.length;
+ for (i = 0; i < length; i += 1) {
+ k = rep[i];
+ if (typeof k === 'string') {
+ v = str(k, value);
+ if (v) {
+ partial.push(quote(k) + (gap ? ': ' : ':') + v);
+ }
+ }
+ }
+ } else {
+
+// Otherwise, iterate through all of the keys in the object.
+
+ for (k in value) {
+ if (Object.hasOwnProperty.call(value, k)) {
+ v = str(k, value);
+ if (v) {
+ partial.push(quote(k) + (gap ? ': ' : ':') + v);
+ }
+ }
+ }
+ }
+
+// Join all of the member texts together, separated with commas,
+// and wrap them in braces.
+
+ v = partial.length === 0 ? '{}' :
+ gap ? '{\n' + gap + partial.join(',\n' + gap) + '\n' +
+ mind + '}' : '{' + partial.join(',') + '}';
+ gap = mind;
+ return v;
+ }
+ }
+
+// If the JSON object does not yet have a stringify method, give it one.
+
+ if (typeof JSON.stringify !== 'function') {
+ JSON.stringify = function (value, replacer, space) {
+
+// The stringify method takes a value and an optional replacer, and an optional
+// space parameter, and returns a JSON text. The replacer can be a function
+// that can replace values, or an array of strings that will select the keys.
+// A default replacer method can be provided. Use of the space parameter can
+// produce text that is more easily readable.
+
+ var i;
+ gap = '';
+ indent = '';
+
+// If the space parameter is a number, make an indent string containing that
+// many spaces.
+
+ if (typeof space === 'number') {
+ for (i = 0; i < space; i += 1) {
+ indent += ' ';
+ }
+
+// If the space parameter is a string, it will be used as the indent string.
+
+ } else if (typeof space === 'string') {
+ indent = space;
+ }
+
+// If there is a replacer, it must be a function or an array.
+// Otherwise, throw an error.
+
+ rep = replacer;
+ if (replacer && typeof replacer !== 'function' &&
+ (typeof replacer !== 'object' ||
+ typeof replacer.length !== 'number')) {
+ throw new Error('JSON.stringify');
+ }
+
+// Make a fake root object containing our value under the key of ''.
+// Return the result of stringifying the value.
+
+ return str('', {'': value});
+ };
+ }
+
+
+// If the JSON object does not yet have a parse method, give it one.
+
+ if (typeof JSON.parse !== 'function') {
+ JSON.parse = function (text, reviver) {
+
+// The parse method takes a text and an optional reviver function, and returns
+// a JavaScript value if the text is a valid JSON text.
+
+ var j;
+
+ function walk(holder, key) {
+
+// The walk method is used to recursively walk the resulting structure so
+// that modifications can be made.
+
+ var k, v, value = holder[key];
+ if (value && typeof value === 'object') {
+ for (k in value) {
+ if (Object.hasOwnProperty.call(value, k)) {
+ v = walk(value, k);
+ if (v !== undefined) {
+ value[k] = v;
+ } else {
+ delete value[k];
+ }
+ }
+ }
+ }
+ return reviver.call(holder, key, value);
+ }
+
+
+// Parsing happens in four stages. In the first stage, we replace certain
+// Unicode characters with escape sequences. JavaScript handles many characters
+// incorrectly, either silently deleting them, or treating them as line endings.
+
+ text = String(text);
+ cx.lastIndex = 0;
+ if (cx.test(text)) {
+ text = text.replace(cx, function (a) {
+ return '\\u' +
+ ('0000' + a.charCodeAt(0).toString(16)).slice(-4);
+ });
+ }
+
+// In the second stage, we run the text against regular expressions that look
+// for non-JSON patterns. We are especially concerned with '()' and 'new'
+// because they can cause invocation, and '=' because it can cause mutation.
+// But just to be safe, we want to reject all unexpected forms.
+
+// We split the second stage into 4 regexp operations in order to work around
+// crippling inefficiencies in IE's and Safari's regexp engines. First we
+// replace the JSON backslash pairs with '@' (a non-JSON character). Second, we
+// replace all simple value tokens with ']' characters. Third, we delete all
+// open brackets that follow a colon or comma or that begin the text. Finally,
+// we look to see that the remaining characters are only whitespace or ']' or
+// ',' or ':' or '{' or '}'. If that is so, then the text is safe for eval.
+
+ if (/^[\],:{}\s]*$/.
+test(text.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g, '@').
+replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, ']').
+replace(/(?:^|:|,)(?:\s*\[)+/g, ''))) {
+
+// In the third stage we use the eval function to compile the text into a
+// JavaScript structure. The '{' operator is subject to a syntactic ambiguity
+// in JavaScript: it can begin a block or an object literal. We wrap the text
+// in parens to eliminate the ambiguity.
+
+ j = eval('(' + text + ')');
+
+// In the optional fourth stage, we recursively walk the new structure, passing
+// each name/value pair to a reviver function for possible transformation.
+
+ return typeof reviver === 'function' ?
+ walk({'': j}, '') : j;
+ }
+
+// If the text is not JSON parseable, then a SyntaxError is thrown.
+
+ throw new SyntaxError('JSON.parse');
+ };
+ }
+}());
diff --git a/1.1.x/share/www/script/jspec/jspec.css b/1.1.x/share/www/script/jspec/jspec.css
new file mode 100644
index 00000000..629d41c5
--- /dev/null
+++ b/1.1.x/share/www/script/jspec/jspec.css
@@ -0,0 +1,149 @@
+body.jspec {
+ margin: 45px 0;
+ font: 12px "Helvetica Neue Light", "Lucida Grande", "Calibri", "Arial", sans-serif;
+ background: #efefef url(images/bg.png) top left repeat-x;
+ text-align: center;
+}
+#jspec {
+ margin: 0 auto;
+ padding-top: 30px;
+ width: 1008px;
+ background: url(images/vr.png) top left repeat-y;
+ text-align: left;
+}
+#jspec-top {
+ position: relative;
+ margin: 0 auto;
+ width: 1008px;
+ height: 40px;
+ background: url(images/sprites.bg.png) top left no-repeat;
+}
+#jspec-bottom {
+ margin: 0 auto;
+ width: 1008px;
+ height: 15px;
+ background: url(images/sprites.bg.png) bottom left no-repeat;
+}
+#jspec .loading {
+ margin-top: -45px;
+ width: 1008px;
+ height: 80px;
+ background: url(images/loading.gif) 50% 50% no-repeat;
+}
+#jspec-title {
+ position: absolute;
+ top: 15px;
+ left: 20px;
+ width: 160px;
+ font-size: 22px;
+ font-weight: normal;
+ background: url(images/sprites.png) 0 -126px no-repeat;
+ text-align: center;
+}
+#jspec-title em {
+ font-size: 10px;
+ font-style: normal;
+ color: #BCC8D1;
+}
+#jspec-report * {
+ margin: 0;
+ padding: 0;
+ background: none;
+ border: none;
+}
+#jspec-report {
+ padding: 15px 40px;
+ font: 11px "Helvetica Neue Light", "Lucida Grande", "Calibri", "Arial", sans-serif;
+ color: #7B8D9B;
+}
+#jspec-report.has-failures {
+ padding-bottom: 30px;
+}
+#jspec-report .hidden {
+ display: none;
+}
+#jspec-report .heading {
+ margin-bottom: 15px;
+}
+#jspec-report .heading span {
+ padding-right: 10px;
+}
+#jspec-report .heading .passes em {
+ color: #0ea0eb;
+}
+#jspec-report .heading .failures em {
+ color: #FA1616;
+}
+#jspec-report table {
+ font-size: 11px;
+ border-collapse: collapse;
+}
+#jspec-report td {
+ padding: 8px;
+ text-indent: 30px;
+ color: #7B8D9B;
+}
+#jspec-report tr.body {
+ display: none;
+}
+#jspec-report tr.body pre {
+ margin: 0;
+ padding: 0 0 5px 25px;
+}
+#jspec-report tr.even:hover + tr.body,
+#jspec-report tr.odd:hover + tr.body {
+ display: block;
+}
+#jspec-report tr td:first-child em {
+ display: block;
+ clear: both;
+ font-style: normal;
+ font-weight: normal;
+ color: #7B8D9B;
+}
+#jspec-report tr.even:hover,
+#jspec-report tr.odd:hover {
+ text-shadow: 1px 1px 1px #fff;
+ background: #F2F5F7;
+}
+#jspec-report td + td {
+ padding-right: 0;
+ width: 15px;
+}
+#jspec-report td.pass {
+ background: url(images/sprites.png) 3px -7px no-repeat;
+}
+#jspec-report td.fail {
+ background: url(images/sprites.png) 3px -158px no-repeat;
+ font-weight: bold;
+ color: #FC0D0D;
+}
+#jspec-report td.requires-implementation {
+ background: url(images/sprites.png) 3px -333px no-repeat;
+}
+#jspec-report tr.description td {
+ margin-top: 25px;
+ padding-top: 25px;
+ font-size: 12px;
+ font-weight: bold;
+ text-indent: 0;
+ color: #1a1a1a;
+}
+#jspec-report tr.description:first-child td {
+ border-top: none;
+}
+#jspec-report .assertion {
+ display: block;
+ float: left;
+ margin: 0 0 0 1px;
+ padding: 0;
+ width: 1px;
+ height: 5px;
+ background: #7B8D9B;
+}
+#jspec-report .assertion.failed {
+ background: red;
+}
+.jspec-sandbox {
+ display: none;
+} \ No newline at end of file
diff --git a/1.1.x/share/www/script/jspec/jspec.jquery.js b/1.1.x/share/www/script/jspec/jspec.jquery.js
new file mode 100644
index 00000000..fcad7ab9
--- /dev/null
+++ b/1.1.x/share/www/script/jspec/jspec.jquery.js
@@ -0,0 +1,72 @@
+
+// JSpec - jQuery - Copyright TJ Holowaychuk <tj@vision-media.ca> (MIT Licensed)
+
+JSpec
+.requires('jQuery', 'when using jspec.jquery.js')
+.include({
+ name: 'jQuery',
+
+ // --- Initialize
+
+ init : function() {
+ jQuery.ajaxSetup({ async: false })
+ },
+
+ // --- Utilities
+
+ utilities : {
+ element: jQuery,
+ elements: jQuery,
+ sandbox : function() {
+ return jQuery('<div class="sandbox"></div>')
+ }
+ },
+
+ // --- Matchers
+
+ matchers : {
+ have_tag : "jQuery(expected, actual).length === 1",
+ have_one : "alias have_tag",
+ have_tags : "jQuery(expected, actual).length > 1",
+ have_many : "alias have_tags",
+ have_any : "alias have_tags",
+ have_child : "jQuery(actual).children(expected).length === 1",
+ have_children : "jQuery(actual).children(expected).length > 1",
+ have_text : "jQuery(actual).text() === expected",
+ have_value : "jQuery(actual).val() === expected",
+ be_enabled : "!jQuery(actual).attr('disabled')",
+ have_class : "jQuery(actual).hasClass(expected)",
+
+ be_visible : function(actual) {
+ return jQuery(actual).css('display') != 'none' &&
+ jQuery(actual).css('visibility') != 'hidden' &&
+ jQuery(actual).attr('type') != 'hidden'
+ },
+
+ be_hidden : function(actual) {
+ return !JSpec.does(actual, 'be_visible')
+ },
+
+ have_classes : function(actual) {
+ return !JSpec.any(JSpec.toArray(arguments, 1), function(arg){
+ return !JSpec.does(actual, 'have_class', arg)
+ })
+ },
+
+ have_attr : function(actual, attr, value) {
+ return value ? jQuery(actual).attr(attr) == value:
+ jQuery(actual).attr(attr)
+ },
+
+ 'be disabled selected checked' : function(attr) {
+ return 'jQuery(actual).attr("' + attr + '")'
+ },
+
+ 'have type id title alt href src sel rev name target' : function(attr) {
+ return function(actual, value) {
+ return JSpec.does(actual, 'have_attr', attr, value)
+ }
+ }
+ }
+})
+
diff --git a/1.1.x/share/www/script/jspec/jspec.js b/1.1.x/share/www/script/jspec/jspec.js
new file mode 100644
index 00000000..b2ea4768
--- /dev/null
+++ b/1.1.x/share/www/script/jspec/jspec.js
@@ -0,0 +1,1756 @@
+
+// JSpec - Core - Copyright TJ Holowaychuk <tj@vision-media.ca> (MIT Licensed)
+
+;(function(){
+
+ JSpec = {
+ version : '3.3.2',
+ assert : true,
+ cache : {},
+ suites : [],
+ modules : [],
+ allSuites : [],
+ matchers : {},
+ stubbed : [],
+ options : {},
+ request : 'XMLHttpRequest' in this ? XMLHttpRequest : null,
+ stats : { specs: 0, assertions: 0, failures: 0, passes: 0, specsFinished: 0, suitesFinished: 0 },
+
+ /**
+ * Default context in which bodies are evaluated.
+ *
+ * Replace context simply by setting JSpec.context
+ * to your own like below:
+ *
+ * JSpec.context = { foo : 'bar' }
+ *
+ * Contexts can be changed within any body, this can be useful
+ * in order to provide specific helper methods to specific suites.
+ *
+ * To reset (usually in after hook) simply set to null like below:
+ *
+ * JSpec.context = null
+ *
+ */
+
+ defaultContext : {
+
+ /**
+ * Return an object used for proxy assertions.
+ * This object is used to indicate that an object
+ * should be an instance of _object_, not the constructor
+ * itself.
+ *
+ * @param {function} constructor
+ * @return {hash}
+ * @api public
+ */
+
+ an_instance_of : function(constructor) {
+ return { an_instance_of : constructor }
+ },
+
+ /**
+ * Load fixture at _path_.
+ *
+ * Fixtures are resolved as:
+ *
+ * - <path>
+ * - <path>.html
+ *
+ * @param {string} path
+ * @return {string}
+ * @api public
+ */
+
+ fixture : function(path) {
+ if (JSpec.cache[path]) return JSpec.cache[path]
+ return JSpec.cache[path] =
+ JSpec.tryLoading(JSpec.options.fixturePath + '/' + path) ||
+ JSpec.tryLoading(JSpec.options.fixturePath + '/' + path + '.html')
+ }
+ },
+
+ // --- Objects
+
+ reporters : {
+
+ /**
+ * Report to server.
+ *
+ * Options:
+ * - uri specific uri to report to.
+ * - verbose weither or not to output messages
+ * - failuresOnly output failure messages only
+ *
+ * @api public
+ */
+
+ Server : function(results, options) {
+ var uri = options.uri || window.location.protocol + "//" + window.location.host + '/results'
+ JSpec.post(uri, {
+ stats: JSpec.stats,
+ options: options,
+ results: map(results.allSuites, function(suite) {
+ if (suite.hasSpecs())
+ return {
+ description: suite.description,
+ specs: map(suite.specs, function(spec) {
+ return {
+ description: spec.description,
+ message: !spec.passed() ? spec.failure().message : null,
+ status: spec.requiresImplementation() ? 'pending' :
+ spec.passed() ? 'pass' :
+ 'fail',
+ assertions: map(spec.assertions, function(assertion){
+ return {
+ passed: assertion.passed
+ }
+ })
+ }
+ })
+ }
+ })
+ })
+ if ('close' in main) main.close()
+ },
+
+ /**
+ * Default reporter, outputting to the DOM.
+ *
+ * Options:
+ * - reportToId id of element to output reports to, defaults to 'jspec'
+ * - failuresOnly displays only suites with failing specs
+ *
+ * @api public
+ */
+
+ DOM : function(results, options) {
+ var id = option('reportToId') || 'jspec',
+ report = document.getElementById(id),
+ failuresOnly = option('failuresOnly'),
+ classes = results.stats.failures ? 'has-failures' : ''
+ if (!report) throw 'JSpec requires the element #' + id + ' to output its reports'
+
+ function bodyContents(body) {
+ return JSpec.
+ escape(JSpec.contentsOf(body)).
+ replace(/^ */gm, function(a){ return (new Array(Math.round(a.length / 3))).join(' ') }).
+ replace(/\r\n|\r|\n/gm, '<br/>')
+ }
+
+ report.innerHTML = '<div id="jspec-report" class="' + classes + '"><div class="heading"> \
+ <span class="passes">Passes: <em>' + results.stats.passes + '</em></span> \
+ <span class="failures">Failures: <em>' + results.stats.failures + '</em></span> \
+ <span class="passes">Duration: <em>' + results.duration + '</em> ms</span> \
+ </div><table class="suites">' + map(results.allSuites, function(suite) {
+ var displaySuite = failuresOnly ? suite.ran && !suite.passed() : suite.ran
+ if (displaySuite && suite.hasSpecs())
+ return '<tr class="description"><td colspan="2">' + escape(suite.description) + '</td></tr>' +
+ map(suite.specs, function(i, spec) {
+ return '<tr class="' + (i % 2 ? 'odd' : 'even') + '">' +
+ (spec.requiresImplementation() ?
+ '<td class="requires-implementation" colspan="2">' + escape(spec.description) + '</td>' :
+ (spec.passed() && !failuresOnly) ?
+ '<td class="pass">' + escape(spec.description)+ '</td><td>' + spec.assertionsGraph() + '</td>' :
+ !spec.passed() ?
+ '<td class="fail">' + escape(spec.description) +
+ map(spec.failures(), function(a){ return '<em>' + escape(a.message) + '</em>' }).join('') +
+ '</td><td>' + spec.assertionsGraph() + '</td>' :
+ '') +
+ '<tr class="body"><td colspan="2"><pre>' + bodyContents(spec.body) + '</pre></td></tr>'
+ }).join('') + '</tr>'
+ }).join('') + '</table></div>'
+ },
+
+ /**
+ * Terminal reporter.
+ *
+ * @api public
+ */
+
+ Terminal : function(results, options) {
+ var failuresOnly = option('failuresOnly')
+ print(color("\n Passes: ", 'bold') + color(results.stats.passes, 'green') +
+ color(" Failures: ", 'bold') + color(results.stats.failures, 'red') +
+ color(" Duration: ", 'bold') + color(results.duration, 'green') + " ms \n")
+
+ function indent(string) {
+ return string.replace(/^(.)/gm, ' $1')
+ }
+
+ each(results.allSuites, function(suite) {
+ var displaySuite = failuresOnly ? suite.ran && !suite.passed() : suite.ran
+ if (displaySuite && suite.hasSpecs()) {
+ print(color(' ' + suite.description, 'bold'))
+ each(suite.specs, function(spec){
+ var assertionsGraph = inject(spec.assertions, '', function(graph, assertion){
+ return graph + color('.', assertion.passed ? 'green' : 'red')
+ })
+ if (spec.requiresImplementation())
+ print(color(' ' + spec.description, 'blue') + assertionsGraph)
+ else if (spec.passed() && !failuresOnly)
+ print(color(' ' + spec.description, 'green') + assertionsGraph)
+ else if (!spec.passed())
+ print(color(' ' + spec.description, 'red') + assertionsGraph +
+ "\n" + indent(map(spec.failures(), function(a){ return a.message }).join("\n")) + "\n")
+ })
+ print("")
+ }
+ })
+
+ quit(results.stats.failures)
+ }
+ },
+
+ Assertion : function(matcher, actual, expected, negate) {
+ extend(this, {
+ message: '',
+ passed: false,
+ actual: actual,
+ negate: negate,
+ matcher: matcher,
+ expected: expected,
+
+ // Report assertion results
+
+ report : function() {
+ if (JSpec.assert)
+ this.passed ? JSpec.stats.passes++ : JSpec.stats.failures++
+ return this
+ },
+
+ // Run the assertion
+
+ run : function() {
+ // TODO: remove unshifting
+ expected.unshift(actual)
+ this.result = matcher.match.apply(this, expected)
+ this.passed = negate ? !this.result : this.result
+ if (!this.passed) this.message = matcher.message.call(this, actual, expected, negate, matcher.name)
+ return this
+ }
+ })
+ },
+
+ ProxyAssertion : function(object, method, times, negate) {
+ var self = this
+ var old = object[method]
+
+ // Proxy
+
+ object[method] = function(){
+ args = toArray(arguments)
+ result = old.apply(object, args)
+ self.calls.push({ args : args, result : result })
+ return result
+ }
+
+ // Times
+
+ this.times = {
+ once : 1,
+ twice : 2
+ }[times] || times || 1
+
+ extend(this, {
+ calls: [],
+ message: '',
+ defer: true,
+ passed: false,
+ negate: negate,
+ object: object,
+ method: method,
+
+ // Proxy return value
+
+ and_return : function(result) {
+ this.expectedResult = result
+ return this
+ },
+
+ // Proxy arguments passed
+
+ with_args : function() {
+ this.expectedArgs = toArray(arguments)
+ return this
+ },
+
+ // Check if any calls have failing results
+
+ anyResultsFail : function() {
+ return any(this.calls, function(call){
+ return self.expectedResult.an_instance_of ?
+ call.result.constructor != self.expectedResult.an_instance_of:
+ !equal(self.expectedResult, call.result)
+ })
+ },
+
+ // Check if any calls have passing results
+
+ anyResultsPass : function() {
+ return any(this.calls, function(call){
+ return self.expectedResult.an_instance_of ?
+ call.result.constructor == self.expectedResult.an_instance_of:
+ equal(self.expectedResult, call.result)
+ })
+ },
+
+ // Return the passing result
+
+ passingResult : function() {
+ return this.anyResultsPass().result
+ },
+
+ // Return the failing result
+
+ failingResult : function() {
+ return this.anyResultsFail().result
+ },
+
+ // Check if any arguments fail
+
+ anyArgsFail : function() {
+ return any(this.calls, function(call){
+ return any(self.expectedArgs, function(i, arg){
+ if (arg == null) return call.args[i] == null
+ return arg.an_instance_of ?
+ call.args[i].constructor != arg.an_instance_of:
+ !equal(arg, call.args[i])
+
+ })
+ })
+ },
+
+ // Check if any arguments pass
+
+ anyArgsPass : function() {
+ return any(this.calls, function(call){
+ return any(self.expectedArgs, function(i, arg){
+ return arg.an_instance_of ?
+ call.args[i].constructor == arg.an_instance_of:
+ equal(arg, call.args[i])
+
+ })
+ })
+ },
+
+ // Return the passing args
+
+ passingArgs : function() {
+ return this.anyArgsPass().args
+ },
+
+ // Return the failing args
+
+ failingArgs : function() {
+ return this.anyArgsFail().args
+ },
+
+ // Report assertion results
+
+ report : function() {
+ if (JSpec.assert)
+ this.passed ? ++JSpec.stats.passes : ++JSpec.stats.failures
+ return this
+ },
+
+ // Run the assertion
+
+ run : function() {
+ var methodString = 'expected ' + object.toString() + '.' + method + '()' + (negate ? ' not' : '' )
+
+ function times(n) {
+ return n > 2 ? n + ' times' : { 1: 'once', 2: 'twice' }[n]
+ }
+
+ if (this.expectedResult != null && (negate ? this.anyResultsPass() : this.anyResultsFail()))
+ this.message = methodString + ' to return ' + puts(this.expectedResult) +
+ ' but ' + (negate ? 'it did' : 'got ' + puts(this.failingResult()))
+
+ if (this.expectedArgs && (negate ? !this.expectedResult && this.anyArgsPass() : this.anyArgsFail()))
+ this.message = methodString + ' to be called with ' + puts.apply(this, this.expectedArgs) +
+ ' but was' + (negate ? '' : ' called with ' + puts.apply(this, this.failingArgs()))
+
+ if (negate ? !this.expectedResult && !this.expectedArgs && this.calls.length >= this.times : this.calls.length != this.times)
+ this.message = methodString + ' to be called ' + times(this.times) +
+ ', but ' + (this.calls.length == 0 ? ' was not called' : ' was called ' + times(this.calls.length))
+
+ if (!this.message.length)
+ this.passed = true
+
+ return this
+ }
+ })
+ },
+
+ /**
+ * Specification Suite block object.
+ *
+ * @param {string} description
+ * @param {function} body
+ * @api private
+ */
+
+ Suite : function(description, body) {
+ var self = this
+ extend(this, {
+ body: body,
+ description: description,
+ suites: [],
+ specs: [],
+ ran: false,
+ hooks: { 'before' : [], 'after' : [], 'before_each' : [], 'after_each' : [] },
+
+ // Add a spec to the suite
+
+ addSpec : function(description, body) {
+ var spec = new JSpec.Spec(description, body)
+ this.specs.push(spec)
+ JSpec.stats.specs++ // TODO: abstract
+ spec.suite = this
+ },
+
+ // Add a hook to the suite
+
+ addHook : function(hook, body) {
+ this.hooks[hook].push(body)
+ },
+
+ // Add a nested suite
+
+ addSuite : function(description, body) {
+ var suite = new JSpec.Suite(description, body)
+ JSpec.allSuites.push(suite)
+ suite.name = suite.description
+ suite.description = this.description + ' ' + suite.description
+ this.suites.push(suite)
+ suite.suite = this
+ },
+
+ // Invoke a hook in context to this suite
+
+ hook : function(hook) {
+ if (this.suite) this.suite.hook(hook)
+ each(this.hooks[hook], function(body) {
+ JSpec.evalBody(body, "Error in hook '" + hook + "', suite '" + self.description + "': ")
+ })
+ },
+
+ // Check if nested suites are present
+
+ hasSuites : function() {
+ return this.suites.length
+ },
+
+ // Check if this suite has specs
+
+ hasSpecs : function() {
+ return this.specs.length
+ },
+
+ // Check if the entire suite passed
+
+ passed : function() {
+ return !any(this.specs, function(spec){
+ return !spec.passed()
+ })
+ }
+ })
+ },
+
+ /**
+ * Specification block object.
+ *
+ * @param {string} description
+ * @param {function} body
+ * @api private
+ */
+
+ Spec : function(description, body) {
+ extend(this, {
+ body: body,
+ description: description,
+ assertions: [],
+
+ // Add passing assertion
+
+ pass : function(message) {
+ this.assertions.push({ passed: true, message: message })
+ if (JSpec.assert) ++JSpec.stats.passes
+ },
+
+ // Add failing assertion
+
+ fail : function(message) {
+ this.assertions.push({ passed: false, message: message })
+ if (JSpec.assert) ++JSpec.stats.failures
+ },
+
+ // Run deferred assertions
+
+ runDeferredAssertions : function() {
+ each(this.assertions, function(assertion){
+ if (assertion.defer) assertion.run().report(), hook('afterAssertion', assertion)
+ })
+ },
+
+ // Find first failing assertion
+
+ failure : function() {
+ return find(this.assertions, function(assertion){
+ return !assertion.passed
+ })
+ },
+
+ // Find all failing assertions
+
+ failures : function() {
+ return select(this.assertions, function(assertion){
+ return !assertion.passed
+ })
+ },
+
+ // Weither or not the spec passed
+
+ passed : function() {
+ return !this.failure()
+ },
+
+ // Weither or not the spec requires implementation (no assertions)
+
+ requiresImplementation : function() {
+ return this.assertions.length == 0
+ },
+
+ // Sprite based assertions graph
+
+ assertionsGraph : function() {
+ return map(this.assertions, function(assertion){
+ return '<span class="assertion ' + (assertion.passed ? 'passed' : 'failed') + '"></span>'
+ }).join('')
+ }
+ })
+ },
+
+ Module : function(methods) {
+ extend(this, methods)
+ },
+
+ JSON : {
+
+ /**
+ * Generic sequences.
+ */
+
+ meta : {
+ '\b' : '\\b',
+ '\t' : '\\t',
+ '\n' : '\\n',
+ '\f' : '\\f',
+ '\r' : '\\r',
+ '"' : '\\"',
+ '\\' : '\\\\'
+ },
+
+ /**
+ * Escapable sequences.
+ */
+
+ escapable : /[\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
+
+ /**
+ * JSON encode _object_.
+ *
+ * @param {mixed} object
+ * @return {string}
+ * @api private
+ */
+
+ encode : function(object) {
+ var self = this
+ if (object == undefined || object == null) return 'null'
+ if (object === true) return 'true'
+ if (object === false) return 'false'
+ switch (typeof object) {
+ case 'number': return object
+ case 'string': return this.escapable.test(object) ?
+ '"' + object.replace(this.escapable, function (a) {
+ return typeof self.meta[a] === 'string' ? self.meta[a] :
+ '\\u' + ('0000' + a.charCodeAt(0).toString(16)).slice(-4)
+ }) + '"' :
+ '"' + object + '"'
+ case 'object':
+ if (object.constructor == Array)
+ return '[' + map(object, function(val){
+ return self.encode(val)
+ }).join(', ') + ']'
+ else if (object)
+ return '{' + map(object, function(key, val){
+ return self.encode(key) + ':' + self.encode(val)
+ }).join(', ') + '}'
+ }
+ return 'null'
+ }
+ },
+
+ // --- DSLs
+
+ DSLs : {
+ snake : {
+ expect : function(actual){
+ return JSpec.expect(actual)
+ },
+
+ describe : function(description, body) {
+ return JSpec.currentSuite.addSuite(description, body)
+ },
+
+ it : function(description, body) {
+ return JSpec.currentSuite.addSpec(description, body)
+ },
+
+ before : function(body) {
+ return JSpec.currentSuite.addHook('before', body)
+ },
+
+ after : function(body) {
+ return JSpec.currentSuite.addHook('after', body)
+ },
+
+ before_each : function(body) {
+ return JSpec.currentSuite.addHook('before_each', body)
+ },
+
+ after_each : function(body) {
+ return JSpec.currentSuite.addHook('after_each', body)
+ },
+
+ should_behave_like : function(description) {
+ return JSpec.shareBehaviorsOf(description)
+ }
+ }
+ },
+
+ // --- Methods
+
+ /**
+ * Check if _value_ is 'stop'. For use as a
+ * utility callback function.
+ *
+ * @param {mixed} value
+ * @return {bool}
+ * @api public
+ */
+
+ haveStopped : function(value) {
+ return value === 'stop'
+ },
+
+ /**
+ * Include _object_ which may be a hash or Module instance.
+ *
+ * @param {hash, Module} object
+ * @return {JSpec}
+ * @api public
+ */
+
+ include : function(object) {
+ var module = object.constructor == JSpec.Module ? object : new JSpec.Module(object)
+ this.modules.push(module)
+ if ('init' in module) module.init()
+ if ('utilities' in module) extend(this.defaultContext, module.utilities)
+ if ('matchers' in module) this.addMatchers(module.matchers)
+ if ('reporters' in module) extend(this.reporters, module.reporters)
+ if ('DSLs' in module)
+ each(module.DSLs, function(name, methods){
+ JSpec.DSLs[name] = JSpec.DSLs[name] || {}
+ extend(JSpec.DSLs[name], methods)
+ })
+ return this
+ },
+
+ /**
+ * Add a module hook _name_, which is immediately
+ * called per module with the _args_ given. An array of
+ * hook return values is returned.
+ *
+ * @param {name} string
+ * @param {...} args
+ * @return {array}
+ * @api private
+ */
+
+ hook : function(name, args) {
+ args = toArray(arguments, 1)
+ return inject(JSpec.modules, [], function(results, module){
+ if (typeof module[name] == 'function')
+ results.push(JSpec.evalHook(module, name, args))
+ })
+ },
+
+ /**
+ * Eval _module_ hook _name_ with _args_. Evaluates in context
+ * to the module itself, JSpec, and JSpec.context.
+ *
+ * @param {Module} module
+ * @param {string} name
+ * @param {array} args
+ * @return {mixed}
+ * @api private
+ */
+
+ evalHook : function(module, name, args) {
+ hook('evaluatingHookBody', module, name)
+ try { return module[name].apply(module, args) }
+ catch(e) { error('Error in hook ' + module.name + '.' + name + ': ', e) }
+ },
+
+ /**
+ * Same as hook() however accepts only one _arg_ which is
+ * considered immutable. This function passes the arg
+ * to the first module, then passes the return value of the last
+ * module called, to the following module.
+ *
+ * @param {string} name
+ * @param {mixed} arg
+ * @return {mixed}
+ * @api private
+ */
+
+ hookImmutable : function(name, arg) {
+ return inject(JSpec.modules, arg, function(result, module){
+ if (typeof module[name] == 'function')
+ return JSpec.evalHook(module, name, [result])
+ })
+ },
+
+ /**
+ * Find a suite by its description or name.
+ *
+ * @param {string} description
+ * @return {Suite}
+ * @api private
+ */
+
+ findSuite : function(description) {
+ return find(this.allSuites, function(suite){
+ return suite.name == description || suite.description == description
+ })
+ },
+
+ /**
+ * Share behaviors (specs) of the given suite with
+ * the current suite.
+ *
+ * @param {string} description
+ * @api public
+ */
+
+ shareBehaviorsOf : function(description) {
+ if (suite = this.findSuite(description)) this.copySpecs(suite, this.currentSuite)
+ else throw 'failed to share behaviors. ' + puts(description) + ' is not a valid Suite name'
+ },
+
+ /**
+ * Copy specs from one suite to another.
+ *
+ * @param {Suite} fromSuite
+ * @param {Suite} toSuite
+ * @api public
+ */
+
+ copySpecs : function(fromSuite, toSuite) {
+ each(fromSuite.specs, function(spec){
+ var newSpec = new Object();
+ extend(newSpec, spec);
+ newSpec.assertions = [];
+ toSuite.specs.push(newSpec);
+ })
+ },
+
+ /**
+ * Convert arguments to an array.
+ *
+ * @param {object} arguments
+ * @param {int} offset
+ * @return {array}
+ * @api public
+ */
+
+ toArray : function(arguments, offset) {
+ return Array.prototype.slice.call(arguments, offset || 0)
+ },
+
+ /**
+ * Return ANSI-escaped colored string.
+ *
+ * @param {string} string
+ * @param {string} color
+ * @return {string}
+ * @api public
+ */
+
+ color : function(string, color) {
+ return "\u001B[" + {
+ bold : 1,
+ black : 30,
+ red : 31,
+ green : 32,
+ yellow : 33,
+ blue : 34,
+ magenta : 35,
+ cyan : 36,
+ white : 37
+ }[color] + 'm' + string + "\u001B[0m"
+ },
+
+ /**
+ * Default matcher message callback.
+ *
+ * @api private
+ */
+
+ defaultMatcherMessage : function(actual, expected, negate, name) {
+ return 'expected ' + puts(actual) + ' to ' +
+ (negate ? 'not ' : '') +
+ name.replace(/_/g, ' ') +
+ ' ' + (expected.length > 1 ?
+ puts.apply(this, expected.slice(1)) :
+ '')
+ },
+
+ /**
+ * Normalize a matcher message.
+ *
+ * When no messge callback is present the defaultMatcherMessage
+ * will be assigned, will suffice for most matchers.
+ *
+ * @param {hash} matcher
+ * @return {hash}
+ * @api public
+ */
+
+ normalizeMatcherMessage : function(matcher) {
+ if (typeof matcher.message != 'function')
+ matcher.message = this.defaultMatcherMessage
+ return matcher
+ },
+
+ /**
+ * Normalize a matcher body
+ *
+ * This process allows the following conversions until
+ * the matcher is in its final normalized hash state.
+ *
+ * - '==' becomes 'actual == expected'
+ * - 'actual == expected' becomes 'return actual == expected'
+ * - function(actual, expected) { return actual == expected } becomes
+ * { match : function(actual, expected) { return actual == expected }}
+ *
+ * @param {mixed} body
+ * @return {hash}
+ * @api public
+ */
+
+ normalizeMatcherBody : function(body) {
+ switch (body.constructor) {
+ case String:
+ if (captures = body.match(/^alias (\w+)/)) return JSpec.matchers[last(captures)]
+ if (body.length < 4) body = 'actual ' + body + ' expected'
+ return { match: function(actual, expected) { return eval(body) }}
+
+ case Function:
+ return { match: body }
+
+ default:
+ return body
+ }
+ },
+
+ /**
+ * Get option value. This method first checks if
+ * the option key has been set via the query string,
+ * otherwise returning the options hash value.
+ *
+ * @param {string} key
+ * @return {mixed}
+ * @api public
+ */
+
+ option : function(key) {
+ return (value = query(key)) !== null ? value :
+ JSpec.options[key] || null
+ },
+
+ /**
+ * Check if object _a_, is equal to object _b_.
+ *
+ * @param {object} a
+ * @param {object} b
+ * @return {bool}
+ * @api private
+ */
+
+ equal: function(a, b) {
+ if (typeof a != typeof b) return
+ if (a === b) return true
+ if (a instanceof RegExp)
+ return a.toString() === b.toString()
+ if (a instanceof Date)
+ return Number(a) === Number(b)
+ if (typeof a != 'object') return
+ if (a.length !== undefined)
+ if (a.length !== b.length) return
+ else
+ for (var i = 0, len = a.length; i < len; ++i)
+ if (!equal(a[i], b[i]))
+ return
+ for (var key in a)
+ if (!equal(a[key], b[key]))
+ return
+ return true
+ },
+
+ /**
+ * Return last element of an array.
+ *
+ * @param {array} array
+ * @return {object}
+ * @api public
+ */
+
+ last : function(array) {
+ return array[array.length - 1]
+ },
+
+ /**
+ * Convert object(s) to a print-friend string.
+ *
+ * @param {...} object
+ * @return {string}
+ * @api public
+ */
+
+ puts : function(object) {
+ if (arguments.length > 1)
+ return map(toArray(arguments), function(arg){
+ return puts(arg)
+ }).join(', ')
+ if (object === undefined) return 'undefined'
+ if (object === null) return 'null'
+ if (object === true) return 'true'
+ if (object === false) return 'false'
+ if (object.an_instance_of) return 'an instance of ' + object.an_instance_of.name
+ if (object.jquery && object.selector.length > 0) return 'selector ' + puts(object.selector)
+ if (object.jquery) return object.get(0).outerHTML
+ if (object.nodeName) return object.outerHTML
+ switch (object.constructor) {
+ case Function: return object.name || object
+ case String:
+ return '"' + object
+ .replace(/"/g, '\\"')
+ .replace(/\n/g, '\\n')
+ .replace(/\t/g, '\\t')
+ + '"'
+ case Array:
+ return inject(object, '[', function(b, v){
+ return b + ', ' + puts(v)
+ }).replace('[,', '[') + ' ]'
+ case Object:
+ object.__hit__ = true
+ return inject(object, '{', function(b, k, v) {
+ if (k == '__hit__') return b
+ return b + ', ' + k + ': ' + (v && v.__hit__ ? '<circular reference>' : puts(v))
+ }).replace('{,', '{') + ' }'
+ default:
+ return object.toString()
+ }
+ },
+
+ /**
+ * Escape HTML.
+ *
+ * @param {string} html
+ * @return {string}
+ * @api public
+ */
+
+ escape : function(html) {
+ return html.toString()
+ .replace(/&/gmi, '&amp;')
+ .replace(/"/gmi, '&quot;')
+ .replace(/>/gmi, '&gt;')
+ .replace(/</gmi, '&lt;')
+ },
+
+ /**
+ * Perform an assertion without reporting.
+ *
+ * This method is primarily used for internal
+ * matchers in order retain DRYness. May be invoked
+ * like below:
+ *
+ * does('foo', 'eql', 'foo')
+ * does([1,2], 'include', 1, 2)
+ *
+ * External hooks are not run for internal assertions
+ * performed by does().
+ *
+ * @param {mixed} actual
+ * @param {string} matcher
+ * @param {...} expected
+ * @return {mixed}
+ * @api private
+ */
+
+ does : function(actual, matcher, expected) {
+ var assertion = new JSpec.Assertion(JSpec.matchers[matcher], actual, toArray(arguments, 2))
+ return assertion.run().result
+ },
+
+ /**
+ * Perform an assertion.
+ *
+ * expect(true).to('be', true)
+ * expect('foo').not_to('include', 'bar')
+ * expect([1, [2]]).to('include', 1, [2])
+ *
+ * @param {mixed} actual
+ * @return {hash}
+ * @api public
+ */
+
+ expect : function(actual) {
+ function assert(matcher, args, negate) {
+ var expected = toArray(args, 1)
+ matcher.negate = negate
+ assertion = new JSpec.Assertion(matcher, actual, expected, negate)
+ hook('beforeAssertion', assertion)
+ if (matcher.defer) assertion.run()
+ else JSpec.currentSpec.assertions.push(assertion.run().report()), hook('afterAssertion', assertion)
+ return assertion.result
+ }
+
+ function to(matcher) {
+ return assert(matcher, arguments, false)
+ }
+
+ function not_to(matcher) {
+ return assert(matcher, arguments, true)
+ }
+
+ return {
+ to : to,
+ should : to,
+ not_to: not_to,
+ should_not : not_to
+ }
+ },
+
+ /**
+ * Strim whitespace or chars.
+ *
+ * @param {string} string
+ * @param {string} chars
+ * @return {string}
+ * @api public
+ */
+
+ strip : function(string, chars) {
+ return string.
+ replace(new RegExp('[' + (chars || '\\s') + ']*$'), '').
+ replace(new RegExp('^[' + (chars || '\\s') + ']*'), '')
+ },
+
+ /**
+ * Call an iterator callback with arguments a, or b
+ * depending on the arity of the callback.
+ *
+ * @param {function} callback
+ * @param {mixed} a
+ * @param {mixed} b
+ * @return {mixed}
+ * @api private
+ */
+
+ callIterator : function(callback, a, b) {
+ return callback.length == 1 ? callback(b) : callback(a, b)
+ },
+
+ /**
+ * Extend an object with another.
+ *
+ * @param {object} object
+ * @param {object} other
+ * @api public
+ */
+
+ extend : function(object, other) {
+ each(other, function(property, value){
+ object[property] = value
+ })
+ },
+
+ /**
+ * Iterate an object, invoking the given callback.
+ *
+ * @param {hash, array} object
+ * @param {function} callback
+ * @return {JSpec}
+ * @api public
+ */
+
+ each : function(object, callback) {
+ if (object.constructor == Array)
+ for (var i = 0, len = object.length; i < len; ++i)
+ callIterator(callback, i, object[i])
+ else
+ for (var key in object)
+ if (object.hasOwnProperty(key))
+ callIterator(callback, key, object[key])
+ },
+
+ /**
+ * Iterate with memo.
+ *
+ * @param {hash, array} object
+ * @param {object} memo
+ * @param {function} callback
+ * @return {object}
+ * @api public
+ */
+
+ inject : function(object, memo, callback) {
+ each(object, function(key, value){
+ memo = (callback.length == 2 ?
+ callback(memo, value):
+ callback(memo, key, value)) ||
+ memo
+ })
+ return memo
+ },
+
+ /**
+ * Destub _object_'s _method_. When no _method_ is passed
+ * all stubbed methods are destubbed. When no arguments
+ * are passed every object found in JSpec.stubbed will be
+ * destubbed.
+ *
+ * @param {mixed} object
+ * @param {string} method
+ * @api public
+ */
+
+ destub : function(object, method) {
+ if (method) {
+ if (object['__prototype__' + method])
+ delete object[method]
+ else
+ object[method] = object['__original__' + method]
+ delete object['__prototype__' + method]
+ delete object['__original____' + method]
+ }
+ else if (object) {
+ for (var key in object)
+ if (captures = key.match(/^(?:__prototype__|__original__)(.*)/))
+ destub(object, captures[1])
+ }
+ else
+ while (JSpec.stubbed.length)
+ destub(JSpec.stubbed.shift())
+ },
+
+ /**
+ * Stub _object_'s _method_.
+ *
+ * stub(foo, 'toString').and_return('bar')
+ *
+ * @param {mixed} object
+ * @param {string} method
+ * @return {hash}
+ * @api public
+ */
+
+ stub : function(object, method) {
+ hook('stubbing', object, method)
+ JSpec.stubbed.push(object)
+ var type = object.hasOwnProperty(method) ? '__original__' : '__prototype__'
+ object[type + method] = object[method]
+ object[method] = function(){}
+ return {
+ and_return : function(value) {
+ if (typeof value == 'function') object[method] = value
+ else object[method] = function(){ return value }
+ }
+ }
+ },
+
+ /**
+ * Map callback return values.
+ *
+ * @param {hash, array} object
+ * @param {function} callback
+ * @return {array}
+ * @api public
+ */
+
+ map : function(object, callback) {
+ return inject(object, [], function(memo, key, value){
+ memo.push(callIterator(callback, key, value))
+ })
+ },
+
+ /**
+ * Returns the first matching expression or null.
+ *
+ * @param {hash, array} object
+ * @param {function} callback
+ * @return {mixed}
+ * @api public
+ */
+
+ any : function(object, callback) {
+ return inject(object, null, function(state, key, value){
+ if (state == undefined)
+ return callIterator(callback, key, value) ? value : state
+ })
+ },
+
+ /**
+ * Returns an array of values collected when the callback
+ * given evaluates to true.
+ *
+ * @param {hash, array} object
+ * @return {function} callback
+ * @return {array}
+ * @api public
+ */
+
+ select : function(object, callback) {
+ return inject(object, [], function(selected, key, value){
+ if (callIterator(callback, key, value))
+ selected.push(value)
+ })
+ },
+
+ /**
+ * Define matchers.
+ *
+ * @param {hash} matchers
+ * @api public
+ */
+
+ addMatchers : function(matchers) {
+ each(matchers, function(name, body){
+ JSpec.addMatcher(name, body)
+ })
+ },
+
+ /**
+ * Define a matcher.
+ *
+ * @param {string} name
+ * @param {hash, function, string} body
+ * @api public
+ */
+
+ addMatcher : function(name, body) {
+ hook('addingMatcher', name, body)
+ if (name.indexOf(' ') != -1) {
+ var matchers = name.split(/\s+/)
+ var prefix = matchers.shift()
+ each(matchers, function(name) {
+ JSpec.addMatcher(prefix + '_' + name, body(name))
+ })
+ }
+ this.matchers[name] = this.normalizeMatcherMessage(this.normalizeMatcherBody(body))
+ this.matchers[name].name = name
+ },
+
+ /**
+ * Add a root suite to JSpec.
+ *
+ * @param {string} description
+ * @param {body} function
+ * @api public
+ */
+
+ describe : function(description, body) {
+ var suite = new JSpec.Suite(description, body)
+ hook('addingSuite', suite)
+ this.allSuites.push(suite)
+ this.suites.push(suite)
+ },
+
+ /**
+ * Return the contents of a function body.
+ *
+ * @param {function} body
+ * @return {string}
+ * @api public
+ */
+
+ contentsOf : function(body) {
+ return body.toString().match(/^[^\{]*{((.*\n*)*)}/m)[1]
+ },
+
+ /**
+ * Evaluate a JSpec capture body.
+ *
+ * @param {function} body
+ * @param {string} errorMessage (optional)
+ * @return {Type}
+ * @api private
+ */
+
+ evalBody : function(body, errorMessage) {
+ var dsl = this.DSL || this.DSLs.snake
+ var matchers = this.matchers
+ var context = this.context || this.defaultContext
+ var contents = this.contentsOf(body)
+ hook('evaluatingBody', dsl, matchers, context, contents)
+ try { with (dsl){ with (context) { with (matchers) { eval(contents) }}} }
+ catch(e) { error(errorMessage, e) }
+ },
+
+ /**
+ * Pre-process a string of JSpec.
+ *
+ * @param {string} input
+ * @return {string}
+ * @api private
+ */
+
+ preprocess : function(input) {
+ if (typeof input != 'string') return
+ input = hookImmutable('preprocessing', input)
+ return input.
+ replace(/\t/g, ' ').
+ replace(/\r\n|\n|\r/g, '\n').
+ split('__END__')[0].
+ replace(/([\w\.]+)\.(stub|destub)\((.*?)\)$/gm, '$2($1, $3)').
+ replace(/describe\s+(.*?)$/gm, 'describe($1, function(){').
+ replace(/^\s+it\s+(.*?)$/gm, ' it($1, function(){').
+ replace(/^ *(before_each|after_each|before|after)(?= |\n|$)/gm, 'JSpec.currentSuite.addHook("$1", function(){').
+ replace(/^\s*end(?=\s|$)/gm, '});').
+ replace(/-\{/g, 'function(){').
+ replace(/(\d+)\.\.(\d+)/g, function(_, a, b){ return range(a, b) }).
+ replace(/\.should([_\.]not)?[_\.](\w+)(?: |;|$)(.*)$/gm, '.should$1_$2($3)').
+ replace(/([\/\s]*)(.+?)\.(should(?:[_\.]not)?)[_\.](\w+)\((.*)\)\s*;?$/gm, '$1 expect($2).$3($4, $5)').
+ replace(/, \)/g, ')').
+ replace(/should\.not/g, 'should_not')
+ },
+
+ /**
+ * Create a range string which can be evaluated to a native array.
+ *
+ * @param {int} start
+ * @param {int} end
+ * @return {string}
+ * @api public
+ */
+
+ range : function(start, end) {
+ var current = parseInt(start), end = parseInt(end), values = [current]
+ if (end > current) while (++current <= end) values.push(current)
+ else while (--current >= end) values.push(current)
+ return '[' + values + ']'
+ },
+
+ /**
+ * Report on the results.
+ *
+ * @api public
+ */
+
+ report : function() {
+ this.duration = Number(new Date) - this.start
+ hook('reporting', JSpec.options)
+ new (JSpec.options.reporter || JSpec.reporters.DOM)(JSpec, JSpec.options)
+ },
+
+ /**
+ * Run the spec suites. Options are merged
+ * with JSpec options when present.
+ *
+ * @param {hash} options
+ * @return {JSpec}
+ * @api public
+ */
+
+ run : function(options) {
+ if (any(hook('running'), haveStopped)) return this
+ if (options) extend(this.options, options)
+ this.start = Number(new Date)
+ each(this.suites, function(suite) { JSpec.runSuite(suite) })
+ return this
+ },
+
+ /**
+ * Run a suite.
+ *
+ * @param {Suite} suite
+ * @api public
+ */
+
+ runSuite : function(suite) {
+ this.currentSuite = suite
+ this.evalBody(suite.body)
+ suite.ran = true
+ hook('beforeSuite', suite), suite.hook('before')
+ each(suite.specs, function(spec) {
+ hook('beforeSpec', spec)
+ suite.hook('before_each')
+ JSpec.runSpec(spec)
+ hook('afterSpec', spec)
+ suite.hook('after_each')
+ })
+ if (suite.hasSuites()) {
+ each(suite.suites, function(suite) {
+ JSpec.runSuite(suite)
+ })
+ }
+ hook('afterSuite', suite), suite.hook('after')
+ this.stats.suitesFinished++
+ },
+
+ /**
+ * Report a failure for the current spec.
+ *
+ * @param {string} message
+ * @api public
+ */
+
+ fail : function(message) {
+ JSpec.currentSpec.fail(message)
+ },
+
+ /**
+ * Report a passing assertion for the current spec.
+ *
+ * @param {string} message
+ * @api public
+ */
+
+ pass : function(message) {
+ JSpec.currentSpec.pass(message)
+ },
+
+ /**
+ * Run a spec.
+ *
+ * @param {Spec} spec
+ * @api public
+ */
+
+ runSpec : function(spec) {
+ this.currentSpec = spec
+ try { this.evalBody(spec.body) }
+ catch (e) { fail(e) }
+ spec.runDeferredAssertions()
+ destub()
+ this.stats.specsFinished++
+ this.stats.assertions += spec.assertions.length
+ },
+
+ /**
+ * Require a dependency, with optional message.
+ *
+ * @param {string} dependency
+ * @param {string} message (optional)
+ * @return {JSpec}
+ * @api public
+ */
+
+ requires : function(dependency, message) {
+ hook('requiring', dependency, message)
+ try { eval(dependency) }
+ catch (e) { throw 'JSpec depends on ' + dependency + ' ' + message }
+ return this
+ },
+
+ /**
+ * Query against the current query strings keys
+ * or the queryString specified.
+ *
+ * @param {string} key
+ * @param {string} queryString
+ * @return {string, null}
+ * @api private
+ */
+
+ query : function(key, queryString) {
+ var queryString = (queryString || (main.location ? main.location.search : null) || '').substring(1)
+ return inject(queryString.split('&'), null, function(value, pair){
+ parts = pair.split('=')
+ return parts[0] == key ? parts[1].replace(/%20|\+/gmi, ' ') : value
+ })
+ },
+
+ /**
+ * Throw a JSpec related error.
+ *
+ * @param {string} message
+ * @param {Exception} e
+ * @api public
+ */
+
+ error : function(message, e) {
+ throw (message ? message : '') + e.toString() +
+ (e.line ? ' near line ' + e.line : '')
+ },
+
+ /**
+ * Ad-hoc POST request for JSpec server usage.
+ *
+ * @param {string} uri
+ * @param {string} data
+ * @api private
+ */
+
+ post : function(uri, data) {
+ if (any(hook('posting', uri, data), haveStopped)) return
+ var request = this.xhr()
+ request.open('POST', uri, false)
+ request.setRequestHeader('Content-Type', 'application/json')
+ request.send(JSpec.JSON.encode(data))
+ },
+
+ /**
+ * Instantiate an XMLHttpRequest.
+ *
+ * Here we utilize IE's lame ActiveXObjects first which
+ * allow IE access serve files via the file: protocol, otherwise
+ * we then default to XMLHttpRequest.
+ *
+ * @return {XMLHttpRequest, ActiveXObject}
+ * @api private
+ */
+
+ xhr : function() {
+ return this.ieXhr() || new JSpec.request
+ },
+
+ /**
+ * Return Microsoft piece of crap ActiveXObject.
+ *
+ * @return {ActiveXObject}
+ * @api public
+ */
+
+ ieXhr : function() {
+ function object(str) {
+ try { return new ActiveXObject(str) } catch(e) {}
+ }
+ return object('Msxml2.XMLHTTP.6.0') ||
+ object('Msxml2.XMLHTTP.3.0') ||
+ object('Msxml2.XMLHTTP') ||
+ object('Microsoft.XMLHTTP')
+ },
+
+ /**
+ * Check for HTTP request support.
+ *
+ * @return {bool}
+ * @api private
+ */
+
+ hasXhr : function() {
+ return JSpec.request || 'ActiveXObject' in main
+ },
+
+ /**
+ * Try loading _file_ returning the contents
+ * string or null. Chain to locate / read a file.
+ *
+ * @param {string} file
+ * @return {string}
+ * @api public
+ */
+
+ tryLoading : function(file) {
+ try { return JSpec.load(file) } catch (e) {}
+ },
+
+ /**
+ * Load a _file_'s contents.
+ *
+ * @param {string} file
+ * @param {function} callback
+ * @return {string}
+ * @api public
+ */
+
+ load : function(file, callback) {
+ if (any(hook('loading', file), haveStopped)) return
+ if ('readFile' in main)
+ return readFile(file)
+ else if (this.hasXhr()) {
+ var request = this.xhr()
+ request.open('GET', file, false)
+ request.send(null)
+ if (request.readyState == 4 &&
+ (request.status == 0 ||
+ request.status.toString().charAt(0) == 2))
+ return request.responseText
+ }
+ else
+ error("failed to load `" + file + "'")
+ },
+
+ /**
+ * Load, pre-process, and evaluate a file.
+ *
+ * @param {string} file
+ * @param {JSpec}
+ * @api public
+ */
+
+ exec : function(file) {
+ if (any(hook('executing', file), haveStopped)) return this
+ eval('with (JSpec){' + this.preprocess(this.load(file)) + '}')
+ return this
+ }
+ }
+
+ // --- Node.js support
+
+ if (typeof GLOBAL === 'object' && typeof exports === 'object')
+ quit = process.exit,
+ print = require('sys').puts,
+ readFile = require('fs').readFileSync
+
+ // --- Utility functions
+
+ var main = this,
+ find = JSpec.any,
+ utils = 'haveStopped stub hookImmutable hook destub map any last pass fail range each option inject select \
+ error escape extend puts query strip color does addMatchers callIterator toArray equal'.split(/\s+/)
+ while (utils.length) eval('var ' + utils[0] + ' = JSpec.' + utils.shift())
+ if (!main.setTimeout) main.setTimeout = function(callback){ callback() }
+
+ // --- Matchers
+
+ addMatchers({
+ equal : "===",
+ eql : "equal(actual, expected)",
+ be : "alias equal",
+ be_greater_than : ">",
+ be_less_than : "<",
+ be_at_least : ">=",
+ be_at_most : "<=",
+ be_a : "actual.constructor == expected",
+ be_an : "alias be_a",
+ be_an_instance_of : "actual instanceof expected",
+ be_null : "actual == null",
+ be_true : "actual == true",
+ be_false : "actual == false",
+ be_undefined : "typeof actual == 'undefined'",
+ be_type : "typeof actual == expected",
+ match : "typeof actual == 'string' ? actual.match(expected) : false",
+ respond_to : "typeof actual[expected] == 'function'",
+ have_length : "actual.length == expected",
+ be_within : "actual >= expected[0] && actual <= last(expected)",
+ have_length_within : "actual.length >= expected[0] && actual.length <= last(expected)",
+
+ receive : { defer : true, match : function(actual, method, times) {
+ proxy = new JSpec.ProxyAssertion(actual, method, times, this.negate)
+ JSpec.currentSpec.assertions.push(proxy)
+ return proxy
+ }},
+
+ be_empty : function(actual) {
+ if (actual.constructor == Object && actual.length == undefined)
+ for (var key in actual)
+ return false;
+ return !actual.length
+ },
+
+ include : function(actual) {
+ for (state = true, i = 1; i < arguments.length; i++) {
+ arg = arguments[i]
+ switch (actual.constructor) {
+ case String:
+ case Number:
+ case RegExp:
+ case Function:
+ state = actual.toString().indexOf(arg) !== -1
+ break
+
+ case Object:
+ state = arg in actual
+ break
+
+ case Array:
+ state = any(actual, function(value){ return equal(value, arg) })
+ break
+ }
+ if (!state) return false
+ }
+ return true
+ },
+
+ throw_error : { match : function(actual, expected, message) {
+ try { actual() }
+ catch (e) {
+ this.e = e
+ var assert = function(arg) {
+ switch (arg.constructor) {
+ case RegExp : return arg.test(e.message || e.toString())
+ case String : return arg == (e.message || e.toString())
+ case Function : return e instanceof arg || e.name == arg.name
+ }
+ }
+ return message ? assert(expected) && assert(message) :
+ expected ? assert(expected) :
+ true
+ }
+ }, message : function(actual, expected, negate) {
+ // TODO: refactor when actual is not in expected [0]
+ var message_for = function(i) {
+ if (expected[i] == undefined) return 'exception'
+ switch (expected[i].constructor) {
+ case RegExp : return 'exception matching ' + puts(expected[i])
+ case String : return 'exception of ' + puts(expected[i])
+ case Function : return expected[i].name || 'Error'
+ }
+ }
+ exception = message_for(1) + (expected[2] ? ' and ' + message_for(2) : '')
+ return 'expected ' + exception + (negate ? ' not ' : '' ) +
+ ' to be thrown, but ' + (this.e ? 'got ' + puts(this.e) : 'nothing was')
+ }},
+
+ have : function(actual, length, property) {
+ return actual[property].length == length
+ },
+
+ have_at_least : function(actual, length, property) {
+ return actual[property].length >= length
+ },
+
+ have_at_most :function(actual, length, property) {
+ return actual[property].length <= length
+ },
+
+ have_within : function(actual, range, property) {
+ length = actual[property].length
+ return length >= range.shift() && length <= range.pop()
+ },
+
+ have_prop : function(actual, property, value) {
+ return actual[property] == null ||
+ actual[property] instanceof Function ? false:
+ value == null ? true:
+ does(actual[property], 'eql', value)
+ },
+
+ have_property : function(actual, property, value) {
+ return actual[property] == null ||
+ actual[property] instanceof Function ? false:
+ value == null ? true:
+ value === actual[property]
+ }
+ })
+
+})()
diff --git a/1.1.x/share/www/script/jspec/jspec.xhr.js b/1.1.x/share/www/script/jspec/jspec.xhr.js
new file mode 100644
index 00000000..61648795
--- /dev/null
+++ b/1.1.x/share/www/script/jspec/jspec.xhr.js
@@ -0,0 +1,195 @@
+
+// JSpec - XHR - Copyright TJ Holowaychuk <tj@vision-media.ca> (MIT Licensed)
+
+(function(){
+
+ var lastRequest
+
+ // --- Original XMLHttpRequest
+
+ var OriginalXMLHttpRequest = 'XMLHttpRequest' in this ?
+ XMLHttpRequest :
+ function(){}
+ var OriginalActiveXObject = 'ActiveXObject' in this ?
+ ActiveXObject :
+ undefined
+
+ // --- MockXMLHttpRequest
+
+ var MockXMLHttpRequest = function() {
+ this.requestHeaders = {}
+ }
+
+ MockXMLHttpRequest.prototype = {
+ status: 0,
+ async: true,
+ readyState: 0,
+ responseText: '',
+ abort: function(){},
+ onreadystatechange: function(){},
+
+ /**
+ * Return response headers hash.
+ */
+
+ getAllResponseHeaders : function(){
+ return this.responseHeaders
+ },
+
+ /**
+ * Return case-insensitive value for header _name_.
+ */
+
+ getResponseHeader : function(name) {
+ return this.responseHeaders[name.toLowerCase()]
+ },
+
+ /**
+ * Set case-insensitive _value_ for header _name_.
+ */
+
+ setRequestHeader : function(name, value) {
+ this.requestHeaders[name.toLowerCase()] = value
+ },
+
+ /**
+ * Open mock request.
+ */
+
+ open : function(method, url, async, user, password) {
+ this.user = user
+ this.password = password
+ this.url = url
+ this.readyState = 1
+ this.method = method.toUpperCase()
+ if (async != undefined) this.async = async
+ if (this.async) this.onreadystatechange()
+ },
+
+ /**
+ * Send request _data_.
+ */
+
+ send : function(data) {
+ var self = this
+ this.data = data
+ this.readyState = 4
+ if (this.method == 'HEAD') this.responseText = null
+ this.responseHeaders['content-length'] = (this.responseText || '').length
+ if(this.async) this.onreadystatechange()
+ lastRequest = function(){
+ return self
+ }
+ }
+ }
+
+ // --- Response status codes
+
+ JSpec.statusCodes = {
+ 100: 'Continue',
+ 101: 'Switching Protocols',
+ 200: 'OK',
+ 201: 'Created',
+ 202: 'Accepted',
+ 203: 'Non-Authoritative Information',
+ 204: 'No Content',
+ 205: 'Reset Content',
+ 206: 'Partial Content',
+ 300: 'Multiple Choice',
+ 301: 'Moved Permanently',
+ 302: 'Found',
+ 303: 'See Other',
+ 304: 'Not Modified',
+ 305: 'Use Proxy',
+ 307: 'Temporary Redirect',
+ 400: 'Bad Request',
+ 401: 'Unauthorized',
+ 402: 'Payment Required',
+ 403: 'Forbidden',
+ 404: 'Not Found',
+ 405: 'Method Not Allowed',
+ 406: 'Not Acceptable',
+ 407: 'Proxy Authentication Required',
+ 408: 'Request Timeout',
+ 409: 'Conflict',
+ 410: 'Gone',
+ 411: 'Length Required',
+ 412: 'Precondition Failed',
+ 413: 'Request Entity Too Large',
+ 414: 'Request-URI Too Long',
+ 415: 'Unsupported Media Type',
+ 416: 'Requested Range Not Satisfiable',
+ 417: 'Expectation Failed',
+ 422: 'Unprocessable Entity',
+ 500: 'Internal Server Error',
+ 501: 'Not Implemented',
+ 502: 'Bad Gateway',
+ 503: 'Service Unavailable',
+ 504: 'Gateway Timeout',
+ 505: 'HTTP Version Not Supported'
+ }
+
+ /**
+ * Mock XMLHttpRequest requests.
+ *
+ * mockRequest().and_return('some data', 'text/plain', 200, { 'X-SomeHeader' : 'somevalue' })
+ *
+ * @return {hash}
+ * @api public
+ */
+
+ function mockRequest() {
+ return { and_return : function(body, type, status, headers) {
+ XMLHttpRequest = MockXMLHttpRequest
+ ActiveXObject = false
+ status = status || 200
+ headers = headers || {}
+ headers['content-type'] = type
+ JSpec.extend(XMLHttpRequest.prototype, {
+ responseText: body,
+ responseHeaders: headers,
+ status: status,
+ statusText: JSpec.statusCodes[status]
+ })
+ }}
+ }
+
+ /**
+ * Unmock XMLHttpRequest requests.
+ *
+ * @api public
+ */
+
+ function unmockRequest() {
+ XMLHttpRequest = OriginalXMLHttpRequest
+ ActiveXObject = OriginalActiveXObject
+ }
+
+ JSpec.include({
+ name: 'Mock XHR',
+
+ // --- Utilities
+
+ utilities : {
+ mockRequest: mockRequest,
+ unmockRequest: unmockRequest
+ },
+
+ // --- Hooks
+
+ afterSpec : function() {
+ unmockRequest()
+ },
+
+ // --- DSLs
+
+ DSLs : {
+ snake : {
+ mock_request: mockRequest,
+ unmock_request: unmockRequest,
+ last_request: function(){ return lastRequest() }
+ }
+ }
+
+ })
+})() \ No newline at end of file
diff --git a/1.1.x/share/www/script/oauth.js b/1.1.x/share/www/script/oauth.js
new file mode 100644
index 00000000..ada00a27
--- /dev/null
+++ b/1.1.x/share/www/script/oauth.js
@@ -0,0 +1,511 @@
+/*
+ * Copyright 2008 Netflix, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Here's some JavaScript software for implementing OAuth.
+
+ This isn't as useful as you might hope. OAuth is based around
+ allowing tools and websites to talk to each other. However,
+ JavaScript running in web browsers is hampered by security
+ restrictions that prevent code running on one website from
+ accessing data stored or served on another.
+
+ Before you start hacking, make sure you understand the limitations
+ posed by cross-domain XMLHttpRequest.
+
+ On the bright side, some platforms use JavaScript as their
+ language, but enable the programmer to access other web sites.
+ Examples include Google Gadgets, and Microsoft Vista Sidebar.
+ For those platforms, this library should come in handy.
+*/
+
+// The HMAC-SHA1 signature method calls b64_hmac_sha1, defined by
+// http://pajhome.org.uk/crypt/md5/sha1.js
+
+/* An OAuth message is represented as an object like this:
+ {method: "GET", action: "http://server.com/path", parameters: ...}
+
+ The parameters may be either a map {name: value, name2: value2}
+ or an Array of name-value pairs [[name, value], [name2, value2]].
+ The latter representation is more powerful: it supports parameters
+ in a specific sequence, or several parameters with the same name;
+ for example [["a", 1], ["b", 2], ["a", 3]].
+
+ Parameter names and values are NOT percent-encoded in an object.
+ They must be encoded before transmission and decoded after reception.
+ For example, this message object:
+ {method: "GET", action: "http://server/path", parameters: {p: "x y"}}
+ ... can be transmitted as an HTTP request that begins:
+ GET /path?p=x%20y HTTP/1.0
+ (This isn't a valid OAuth request, since it lacks a signature etc.)
+ Note that the object "x y" is transmitted as x%20y. To encode
+ parameters, you can call OAuth.addToURL, OAuth.formEncode or
+ OAuth.getAuthorization.
+
+ This message object model harmonizes with the browser object model for
+ input elements of an form, whose value property isn't percent encoded.
+ The browser encodes each value before transmitting it. For example,
+ see consumer.setInputs in example/consumer.js.
+ */
+var OAuth; if (OAuth == null) OAuth = {};
+
+OAuth.setProperties = function setProperties(into, from) {
+ if (into != null && from != null) {
+ for (var key in from) {
+ into[key] = from[key];
+ }
+ }
+ return into;
+}
+
+OAuth.setProperties(OAuth, // utility functions
+{
+ percentEncode: function percentEncode(s) {
+ if (s == null) {
+ return "";
+ }
+ if (s instanceof Array) {
+ var e = "";
+ for (var i = 0; i < s.length; ++i) {
+ if (e != "") e += '&';
+ e += percentEncode(s[i]);
+ }
+ return e;
+ }
+ s = encodeURIComponent(s);
+ // Now replace the values which encodeURIComponent doesn't do
+ // encodeURIComponent ignores: - _ . ! ~ * ' ( )
+ // OAuth dictates the only ones you can ignore are: - _ . ~
+ // Source: http://developer.mozilla.org/en/docs/Core_JavaScript_1.5_Reference:Global_Functions:encodeURIComponent
+ s = s.replace(/\!/g, "%21");
+ s = s.replace(/\*/g, "%2A");
+ s = s.replace(/\'/g, "%27");
+ s = s.replace(/\(/g, "%28");
+ s = s.replace(/\)/g, "%29");
+ return s;
+ }
+,
+ decodePercent: function decodePercent(s) {
+ if (s != null) {
+ // Handle application/x-www-form-urlencoded, which is defined by
+ // http://www.w3.org/TR/html4/interact/forms.html#h-17.13.4.1
+ s = s.replace(/\+/g, " ");
+ }
+ return decodeURIComponent(s);
+ }
+,
+ /** Convert the given parameters to an Array of name-value pairs. */
+ getParameterList: function getParameterList(parameters) {
+ if (parameters == null) {
+ return [];
+ }
+ if (typeof parameters != "object") {
+ return decodeForm(parameters + "");
+ }
+ if (parameters instanceof Array) {
+ return parameters;
+ }
+ var list = [];
+ for (var p in parameters) {
+ list.push([p, parameters[p]]);
+ }
+ return list;
+ }
+,
+ /** Convert the given parameters to a map from name to value. */
+ getParameterMap: function getParameterMap(parameters) {
+ if (parameters == null) {
+ return {};
+ }
+ if (typeof parameters != "object") {
+ return getParameterMap(decodeForm(parameters + ""));
+ }
+ if (parameters instanceof Array) {
+ var map = {};
+ for (var p = 0; p < parameters.length; ++p) {
+ var key = parameters[p][0];
+ if (map[key] === undefined) { // first value wins
+ map[key] = parameters[p][1];
+ }
+ }
+ return map;
+ }
+ return parameters;
+ }
+,
+ getParameter: function getParameter(parameters, name) {
+ if (parameters instanceof Array) {
+ for (var p = 0; p < parameters.length; ++p) {
+ if (parameters[p][0] == name) {
+ return parameters[p][1]; // first value wins
+ }
+ }
+ } else {
+ return OAuth.getParameterMap(parameters)[name];
+ }
+ return null;
+ }
+,
+ formEncode: function formEncode(parameters) {
+ var form = "";
+ var list = OAuth.getParameterList(parameters);
+ for (var p = 0; p < list.length; ++p) {
+ var value = list[p][1];
+ if (value == null) value = "";
+ if (form != "") form += '&';
+ form += OAuth.percentEncode(list[p][0])
+ +'='+ OAuth.percentEncode(value);
+ }
+ return form;
+ }
+,
+ decodeForm: function decodeForm(form) {
+ var list = [];
+ var nvps = form.split('&');
+ for (var n = 0; n < nvps.length; ++n) {
+ var nvp = nvps[n];
+ if (nvp == "") {
+ continue;
+ }
+ var equals = nvp.indexOf('=');
+ var name;
+ var value;
+ if (equals < 0) {
+ name = OAuth.decodePercent(nvp);
+ value = null;
+ } else {
+ name = OAuth.decodePercent(nvp.substring(0, equals));
+ value = OAuth.decodePercent(nvp.substring(equals + 1));
+ }
+ list.push([name, value]);
+ }
+ return list;
+ }
+,
+ setParameter: function setParameter(message, name, value) {
+ var parameters = message.parameters;
+ if (parameters instanceof Array) {
+ for (var p = 0; p < parameters.length; ++p) {
+ if (parameters[p][0] == name) {
+ if (value === undefined) {
+ parameters.splice(p, 1);
+ } else {
+ parameters[p][1] = value;
+ value = undefined;
+ }
+ }
+ }
+ if (value !== undefined) {
+ parameters.push([name, value]);
+ }
+ } else {
+ parameters = OAuth.getParameterMap(parameters);
+ parameters[name] = value;
+ message.parameters = parameters;
+ }
+ }
+,
+ setParameters: function setParameters(message, parameters) {
+ var list = OAuth.getParameterList(parameters);
+ for (var i = 0; i < list.length; ++i) {
+ OAuth.setParameter(message, list[i][0], list[i][1]);
+ }
+ }
+,
+ /** Fill in parameters to help construct a request message.
+ This function doesn't fill in every parameter.
+ The accessor object should be like:
+ {consumerKey:'foo', consumerSecret:'bar', accessorSecret:'nurn', token:'krelm', tokenSecret:'blah'}
+ The accessorSecret property is optional.
+ */
+ completeRequest: function completeRequest(message, accessor) {
+ if (message.method == null) {
+ message.method = "GET";
+ }
+ var map = OAuth.getParameterMap(message.parameters);
+ if (map.oauth_consumer_key == null) {
+ OAuth.setParameter(message, "oauth_consumer_key", accessor.consumerKey || "");
+ }
+ if (map.oauth_token == null && accessor.token != null) {
+ OAuth.setParameter(message, "oauth_token", accessor.token);
+ }
+ if (map.oauth_version == null) {
+ OAuth.setParameter(message, "oauth_version", "1.0");
+ }
+ if (map.oauth_timestamp == null) {
+ OAuth.setParameter(message, "oauth_timestamp", OAuth.timestamp());
+ }
+ if (map.oauth_nonce == null) {
+ OAuth.setParameter(message, "oauth_nonce", OAuth.nonce(6));
+ }
+ OAuth.SignatureMethod.sign(message, accessor);
+ }
+,
+ setTimestampAndNonce: function setTimestampAndNonce(message) {
+ OAuth.setParameter(message, "oauth_timestamp", OAuth.timestamp());
+ OAuth.setParameter(message, "oauth_nonce", OAuth.nonce(6));
+ }
+,
+ addToURL: function addToURL(url, parameters) {
+ newURL = url;
+ if (parameters != null) {
+ var toAdd = OAuth.formEncode(parameters);
+ if (toAdd.length > 0) {
+ var q = url.indexOf('?');
+ if (q < 0) newURL += '?';
+ else newURL += '&';
+ newURL += toAdd;
+ }
+ }
+ return newURL;
+ }
+,
+ /** Construct the value of the Authorization header for an HTTP request. */
+ getAuthorizationHeader: function getAuthorizationHeader(realm, parameters) {
+ var header = 'OAuth realm="' + OAuth.percentEncode(realm) + '"';
+ var list = OAuth.getParameterList(parameters);
+ for (var p = 0; p < list.length; ++p) {
+ var parameter = list[p];
+ var name = parameter[0];
+ if (name.indexOf("oauth_") == 0) {
+ header += ',' + OAuth.percentEncode(name) + '="' + OAuth.percentEncode(parameter[1]) + '"';
+ }
+ }
+ return header;
+ }
+,
+ timestamp: function timestamp() {
+ var d = new Date();
+ return Math.floor(d.getTime()/1000);
+ }
+,
+ nonce: function nonce(length) {
+ var chars = OAuth.nonce.CHARS;
+ var result = "";
+ for (var i = 0; i < length; ++i) {
+ var rnum = Math.floor(Math.random() * chars.length);
+ result += chars.substring(rnum, rnum+1);
+ }
+ return result;
+ }
+});
+
+OAuth.nonce.CHARS = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXTZabcdefghiklmnopqrstuvwxyz";
+
+/** Define a constructor function,
+ without causing trouble to anyone who was using it as a namespace.
+ That is, if parent[name] already existed and had properties,
+ copy those properties into the new constructor.
+ */
+OAuth.declareClass = function declareClass(parent, name, newConstructor) {
+ var previous = parent[name];
+ parent[name] = newConstructor;
+ if (newConstructor != null && previous != null) {
+ for (var key in previous) {
+ if (key != "prototype") {
+ newConstructor[key] = previous[key];
+ }
+ }
+ }
+ return newConstructor;
+}
+
+/** An abstract algorithm for signing messages. */
+OAuth.declareClass(OAuth, "SignatureMethod", function OAuthSignatureMethod(){});
+
+OAuth.setProperties(OAuth.SignatureMethod.prototype, // instance members
+{
+ /** Add a signature to the message. */
+ sign: function sign(message) {
+ var baseString = OAuth.SignatureMethod.getBaseString(message);
+ var signature = this.getSignature(baseString);
+ OAuth.setParameter(message, "oauth_signature", signature);
+ return signature; // just in case someone's interested
+ }
+,
+ /** Set the key string for signing. */
+ initialize: function initialize(name, accessor) {
+ var consumerSecret;
+ if (accessor.accessorSecret != null
+ && name.length > 9
+ && name.substring(name.length-9) == "-Accessor")
+ {
+ consumerSecret = accessor.accessorSecret;
+ } else {
+ consumerSecret = accessor.consumerSecret;
+ }
+ this.key = OAuth.percentEncode(consumerSecret)
+ +"&"+ OAuth.percentEncode(accessor.tokenSecret);
+ }
+});
+
+/* SignatureMethod expects an accessor object to be like this:
+ {tokenSecret: "lakjsdflkj...", consumerSecret: "QOUEWRI..", accessorSecret: "xcmvzc..."}
+ The accessorSecret property is optional.
+ */
+// Class members:
+OAuth.setProperties(OAuth.SignatureMethod, // class members
+{
+ sign: function sign(message, accessor) {
+ var name = OAuth.getParameterMap(message.parameters).oauth_signature_method;
+ if (name == null || name == "") {
+ name = "HMAC-SHA1";
+ OAuth.setParameter(message, "oauth_signature_method", name);
+ }
+ OAuth.SignatureMethod.newMethod(name, accessor).sign(message);
+ }
+,
+ /** Instantiate a SignatureMethod for the given method name. */
+ newMethod: function newMethod(name, accessor) {
+ var impl = OAuth.SignatureMethod.REGISTERED[name];
+ if (impl != null) {
+ var method = new impl();
+ method.initialize(name, accessor);
+ return method;
+ }
+ var err = new Error("signature_method_rejected");
+ var acceptable = "";
+ for (var r in OAuth.SignatureMethod.REGISTERED) {
+ if (acceptable != "") acceptable += '&';
+ acceptable += OAuth.percentEncode(r);
+ }
+ err.oauth_acceptable_signature_methods = acceptable;
+ throw err;
+ }
+,
+ /** A map from signature method name to constructor. */
+ REGISTERED : {}
+,
+ /** Subsequently, the given constructor will be used for the named methods.
+ The constructor will be called with no parameters.
+ The resulting object should usually implement getSignature(baseString).
+ You can easily define such a constructor by calling makeSubclass, below.
+ */
+ registerMethodClass: function registerMethodClass(names, classConstructor) {
+ for (var n = 0; n < names.length; ++n) {
+ OAuth.SignatureMethod.REGISTERED[names[n]] = classConstructor;
+ }
+ }
+,
+ /** Create a subclass of OAuth.SignatureMethod, with the given getSignature function. */
+ makeSubclass: function makeSubclass(getSignatureFunction) {
+ var superClass = OAuth.SignatureMethod;
+ var subClass = function() {
+ superClass.call(this);
+ };
+ subClass.prototype = new superClass();
+ // Delete instance variables from prototype:
+ // delete subclass.prototype... There aren't any.
+ subClass.prototype.getSignature = getSignatureFunction;
+ subClass.prototype.constructor = subClass;
+ return subClass;
+ }
+,
+ getBaseString: function getBaseString(message) {
+ var URL = message.action;
+ var q = URL.indexOf('?');
+ var parameters;
+ if (q < 0) {
+ parameters = message.parameters;
+ } else {
+ // Combine the URL query string with the other parameters:
+ parameters = OAuth.decodeForm(URL.substring(q + 1));
+ var toAdd = OAuth.getParameterList(message.parameters);
+ for (var a = 0; a < toAdd.length; ++a) {
+ parameters.push(toAdd[a]);
+ }
+ }
+ return OAuth.percentEncode(message.method.toUpperCase())
+ +'&'+ OAuth.percentEncode(OAuth.SignatureMethod.normalizeUrl(URL))
+ +'&'+ OAuth.percentEncode(OAuth.SignatureMethod.normalizeParameters(parameters));
+ }
+,
+ normalizeUrl: function normalizeUrl(url) {
+ var uri = OAuth.SignatureMethod.parseUri(url);
+ var scheme = uri.protocol.toLowerCase();
+ var authority = uri.authority.toLowerCase();
+ var dropPort = (scheme == "http" && uri.port == 80)
+ || (scheme == "https" && uri.port == 443);
+ if (dropPort) {
+ // find the last : in the authority
+ var index = authority.lastIndexOf(":");
+ if (index >= 0) {
+ authority = authority.substring(0, index);
+ }
+ }
+ var path = uri.path;
+ if (!path) {
+ path = "/"; // conforms to RFC 2616 section 3.2.2
+ }
+ // we know that there is no query and no fragment here.
+ return scheme + "://" + authority + path;
+ }
+,
+ parseUri: function parseUri (str) {
+ /* This function was adapted from parseUri 1.2.1
+ http://stevenlevithan.com/demo/parseuri/js/assets/parseuri.js
+ */
+ var o = {key: ["source","protocol","authority","userInfo","user","password","host","port","relative","path","directory","file","query","anchor"],
+ parser: {strict: /^(?:([^:\/?#]+):)?(?:\/\/((?:(([^:@]*):?([^:@]*))?@)?([^:\/?#]*)(?::(\d*))?))?((((?:[^?#\/]*\/)*)([^?#]*))(?:\?([^#]*))?(?:#(.*))?)/ }};
+ var m = o.parser.strict.exec(str);
+ var uri = {};
+ var i = 14;
+ while (i--) uri[o.key[i]] = m[i] || "";
+ return uri;
+ }
+,
+ normalizeParameters: function normalizeParameters(parameters) {
+ if (parameters == null) {
+ return "";
+ }
+ var list = OAuth.getParameterList(parameters);
+ var sortable = [];
+ for (var p = 0; p < list.length; ++p) {
+ var nvp = list[p];
+ if (nvp[0] != "oauth_signature") {
+ sortable.push([ OAuth.percentEncode(nvp[0])
+ + " " // because it comes before any character that can appear in a percentEncoded string.
+ + OAuth.percentEncode(nvp[1])
+ , nvp]);
+ }
+ }
+ sortable.sort(function(a,b) {
+ if (a[0] < b[0]) return -1;
+ if (a[0] > b[0]) return 1;
+ return 0;
+ });
+ var sorted = [];
+ for (var s = 0; s < sortable.length; ++s) {
+ sorted.push(sortable[s][1]);
+ }
+ return OAuth.formEncode(sorted);
+ }
+});
+
+OAuth.SignatureMethod.registerMethodClass(["PLAINTEXT", "PLAINTEXT-Accessor"],
+ OAuth.SignatureMethod.makeSubclass(
+ function getSignature(baseString) {
+ return this.key;
+ }
+ ));
+
+OAuth.SignatureMethod.registerMethodClass(["HMAC-SHA1", "HMAC-SHA1-Accessor"],
+ OAuth.SignatureMethod.makeSubclass(
+ function getSignature(baseString) {
+ b64pad = '=';
+ var signature = b64_hmac_sha1(this.key, baseString);
+ return signature;
+ }
+ ));
diff --git a/1.1.x/share/www/script/sha1.js b/1.1.x/share/www/script/sha1.js
new file mode 100644
index 00000000..ee73a634
--- /dev/null
+++ b/1.1.x/share/www/script/sha1.js
@@ -0,0 +1,202 @@
+/*
+ * A JavaScript implementation of the Secure Hash Algorithm, SHA-1, as defined
+ * in FIPS PUB 180-1
+ * Version 2.1a Copyright Paul Johnston 2000 - 2002.
+ * Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
+ * Distributed under the BSD License
+ * See http://pajhome.org.uk/crypt/md5 for details.
+ */
+
+/*
+ * Configurable variables. You may need to tweak these to be compatible with
+ * the server-side, but the defaults work in most cases.
+ */
+var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */
+var b64pad = "="; /* base-64 pad character. "=" for strict RFC compliance */
+var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */
+
+/*
+ * These are the functions you'll usually want to call
+ * They take string arguments and return either hex or base-64 encoded strings
+ */
+function hex_sha1(s){return binb2hex(core_sha1(str2binb(s),s.length * chrsz));}
+function b64_sha1(s){return binb2b64(core_sha1(str2binb(s),s.length * chrsz));}
+function str_sha1(s){return binb2str(core_sha1(str2binb(s),s.length * chrsz));}
+function hex_hmac_sha1(key, data){ return binb2hex(core_hmac_sha1(key, data));}
+function b64_hmac_sha1(key, data){ return binb2b64(core_hmac_sha1(key, data));}
+function str_hmac_sha1(key, data){ return binb2str(core_hmac_sha1(key, data));}
+
+/*
+ * Perform a simple self-test to see if the VM is working
+ */
+function sha1_vm_test()
+{
+ return hex_sha1("abc") == "a9993e364706816aba3e25717850c26c9cd0d89d";
+}
+
+/*
+ * Calculate the SHA-1 of an array of big-endian words, and a bit length
+ */
+function core_sha1(x, len)
+{
+ /* append padding */
+ x[len >> 5] |= 0x80 << (24 - len % 32);
+ x[((len + 64 >> 9) << 4) + 15] = len;
+
+ var w = Array(80);
+ var a = 1732584193;
+ var b = -271733879;
+ var c = -1732584194;
+ var d = 271733878;
+ var e = -1009589776;
+
+ for(var i = 0; i < x.length; i += 16)
+ {
+ var olda = a;
+ var oldb = b;
+ var oldc = c;
+ var oldd = d;
+ var olde = e;
+
+ for(var j = 0; j < 80; j++)
+ {
+ if(j < 16) w[j] = x[i + j];
+ else w[j] = rol(w[j-3] ^ w[j-8] ^ w[j-14] ^ w[j-16], 1);
+ var t = safe_add(safe_add(rol(a, 5), sha1_ft(j, b, c, d)),
+ safe_add(safe_add(e, w[j]), sha1_kt(j)));
+ e = d;
+ d = c;
+ c = rol(b, 30);
+ b = a;
+ a = t;
+ }
+
+ a = safe_add(a, olda);
+ b = safe_add(b, oldb);
+ c = safe_add(c, oldc);
+ d = safe_add(d, oldd);
+ e = safe_add(e, olde);
+ }
+ return Array(a, b, c, d, e);
+
+}
+
+/*
+ * Perform the appropriate triplet combination function for the current
+ * iteration
+ */
+function sha1_ft(t, b, c, d)
+{
+ if(t < 20) return (b & c) | ((~b) & d);
+ if(t < 40) return b ^ c ^ d;
+ if(t < 60) return (b & c) | (b & d) | (c & d);
+ return b ^ c ^ d;
+}
+
+/*
+ * Determine the appropriate additive constant for the current iteration
+ */
+function sha1_kt(t)
+{
+ return (t < 20) ? 1518500249 : (t < 40) ? 1859775393 :
+ (t < 60) ? -1894007588 : -899497514;
+}
+
+/*
+ * Calculate the HMAC-SHA1 of a key and some data
+ */
+function core_hmac_sha1(key, data)
+{
+ var bkey = str2binb(key);
+ if(bkey.length > 16) bkey = core_sha1(bkey, key.length * chrsz);
+
+ var ipad = Array(16), opad = Array(16);
+ for(var i = 0; i < 16; i++)
+ {
+ ipad[i] = bkey[i] ^ 0x36363636;
+ opad[i] = bkey[i] ^ 0x5C5C5C5C;
+ }
+
+ var hash = core_sha1(ipad.concat(str2binb(data)), 512 + data.length * chrsz);
+ return core_sha1(opad.concat(hash), 512 + 160);
+}
+
+/*
+ * Add integers, wrapping at 2^32. This uses 16-bit operations internally
+ * to work around bugs in some JS interpreters.
+ */
+function safe_add(x, y)
+{
+ var lsw = (x & 0xFFFF) + (y & 0xFFFF);
+ var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
+ return (msw << 16) | (lsw & 0xFFFF);
+}
+
+/*
+ * Bitwise rotate a 32-bit number to the left.
+ */
+function rol(num, cnt)
+{
+ return (num << cnt) | (num >>> (32 - cnt));
+}
+
+/*
+ * Convert an 8-bit or 16-bit string to an array of big-endian words
+ * In 8-bit function, characters >255 have their hi-byte silently ignored.
+ */
+function str2binb(str)
+{
+ var bin = Array();
+ var mask = (1 << chrsz) - 1;
+ for(var i = 0; i < str.length * chrsz; i += chrsz)
+ bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (32 - chrsz - i%32);
+ return bin;
+}
+
+/*
+ * Convert an array of big-endian words to a string
+ */
+function binb2str(bin)
+{
+ var str = "";
+ var mask = (1 << chrsz) - 1;
+ for(var i = 0; i < bin.length * 32; i += chrsz)
+ str += String.fromCharCode((bin[i>>5] >>> (32 - chrsz - i%32)) & mask);
+ return str;
+}
+
+/*
+ * Convert an array of big-endian words to a hex string.
+ */
+function binb2hex(binarray)
+{
+ var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
+ var str = "";
+ for(var i = 0; i < binarray.length * 4; i++)
+ {
+ str += hex_tab.charAt((binarray[i>>2] >> ((3 - i%4)*8+4)) & 0xF) +
+ hex_tab.charAt((binarray[i>>2] >> ((3 - i%4)*8 )) & 0xF);
+ }
+ return str;
+}
+
+/*
+ * Convert an array of big-endian words to a base-64 string
+ */
+function binb2b64(binarray)
+{
+ var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ var str = "";
+ for(var i = 0; i < binarray.length * 4; i += 3)
+ {
+ var triplet = (((binarray[i >> 2] >> 8 * (3 - i %4)) & 0xFF) << 16)
+ | (((binarray[i+1 >> 2] >> 8 * (3 - (i+1)%4)) & 0xFF) << 8 )
+ | ((binarray[i+2 >> 2] >> 8 * (3 - (i+2)%4)) & 0xFF);
+ for(var j = 0; j < 4; j++)
+ {
+ if(i * 8 + j * 6 > binarray.length * 32) str += b64pad;
+ else str += tab.charAt((triplet >> 6*(3-j)) & 0x3F);
+ }
+ }
+ return str;
+}
diff --git a/1.1.x/share/www/script/test/all_docs.js b/1.1.x/share/www/script/test/all_docs.js
new file mode 100644
index 00000000..1d83aa95
--- /dev/null
+++ b/1.1.x/share/www/script/test/all_docs.js
@@ -0,0 +1,136 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.all_docs = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // Create some more documents.
+ // Notice the use of the ok member on the return result.
+ T(db.save({_id:"0",a:1,b:1}).ok);
+ T(db.save({_id:"3",a:4,b:16}).ok);
+ T(db.save({_id:"1",a:2,b:4}).ok);
+ T(db.save({_id:"2",a:3,b:9}).ok);
+
+ // Check the all docs
+ var results = db.allDocs();
+ var rows = results.rows;
+
+ T(results.total_rows == results.rows.length);
+
+ for(var i=0; i < rows.length; i++) {
+ T(rows[i].id >= "0" && rows[i].id <= "4");
+ }
+
+ // Check _all_docs with descending=true
+ var desc = db.allDocs({descending:true});
+ T(desc.total_rows == desc.rows.length);
+
+ // Check _all_docs offset
+ var all = db.allDocs({startkey:"2"});
+ T(all.offset == 2);
+
+ // check that the docs show up in the seq view in the order they were created
+ var changes = db.changes();
+ var ids = ["0","3","1","2"];
+ for (var i=0; i < changes.results.length; i++) {
+ var row = changes.results[i];
+ T(row.id == ids[i], "seq order");
+ };
+
+ // it should work in reverse as well
+ changes = db.changes({descending:true});
+ ids = ["2","1","3","0"];
+ for (var i=0; i < changes.results.length; i++) {
+ var row = changes.results[i];
+ T(row.id == ids[i], "descending=true");
+ };
+
+ // check that deletions also show up right
+ var doc1 = db.open("1");
+ var deleted = db.deleteDoc(doc1);
+ T(deleted.ok);
+ changes = db.changes();
+ // the deletion should make doc id 1 have the last seq num
+ T(changes.results.length == 4);
+ T(changes.results[3].id == "1");
+ T(changes.results[3].deleted);
+
+ // do an update
+ var doc2 = db.open("3");
+ doc2.updated = "totally";
+ db.save(doc2);
+ changes = db.changes();
+
+ // the update should make doc id 3 have the last seq num
+ T(changes.results.length == 4);
+ T(changes.results[3].id == "3");
+
+ // ok now lets see what happens with include docs
+ changes = db.changes({include_docs: true});
+ T(changes.results.length == 4);
+ T(changes.results[3].id == "3");
+ T(changes.results[3].doc.updated == "totally");
+
+ T(changes.results[2].doc);
+ T(changes.results[2].doc._deleted);
+
+ rows = db.allDocs({include_docs: true}, ["1"]).rows;
+ TEquals(1, rows.length);
+ TEquals("1", rows[0].key);
+ TEquals("1", rows[0].id);
+ TEquals(true, rows[0].value.deleted);
+ TEquals(null, rows[0].doc);
+
+ // add conflicts
+ var conflictDoc1 = {
+ _id: "3", _rev: "2-aa01552213fafa022e6167113ed01087", value: "X"
+ };
+ var conflictDoc2 = {
+ _id: "3", _rev: "2-ff01552213fafa022e6167113ed01087", value: "Z"
+ };
+ T(db.save(conflictDoc1, {new_edits: false}));
+ T(db.save(conflictDoc2, {new_edits: false}));
+
+ var winRev = db.open("3");
+
+ changes = db.changes({include_docs: true, conflicts: true, style: "all_docs"});
+ TEquals("3", changes.results[3].id);
+ TEquals(3, changes.results[3].changes.length);
+ TEquals(winRev._rev, changes.results[3].changes[0].rev);
+ TEquals("3", changes.results[3].doc._id);
+ TEquals(winRev._rev, changes.results[3].doc._rev);
+ TEquals(true, changes.results[3].doc._conflicts instanceof Array);
+ TEquals(2, changes.results[3].doc._conflicts.length);
+
+ rows = db.allDocs({include_docs: true, conflicts: true}).rows;
+ TEquals(3, rows.length);
+ TEquals("3", rows[2].key);
+ TEquals("3", rows[2].id);
+ TEquals(winRev._rev, rows[2].value.rev);
+ TEquals(winRev._rev, rows[2].doc._rev);
+ TEquals("3", rows[2].doc._id);
+ TEquals(true, rows[2].doc._conflicts instanceof Array);
+ TEquals(2, rows[2].doc._conflicts.length);
+
+ // test the all docs collates sanely
+ db.save({_id: "Z", foo: "Z"});
+ db.save({_id: "a", foo: "a"});
+
+ var rows = db.allDocs({startkey: "Z", endkey: "Z"}).rows;
+ T(rows.length == 1);
+
+ // cleanup
+ db.deleteDb();
+};
diff --git a/1.1.x/share/www/script/test/attachment_conflicts.js b/1.1.x/share/www/script/test/attachment_conflicts.js
new file mode 100644
index 00000000..c400277e
--- /dev/null
+++ b/1.1.x/share/www/script/test/attachment_conflicts.js
@@ -0,0 +1,56 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do some edit conflict detection tests for attachments.
+couchTests.attachment_conflicts = function(debug) {
+
+ var dbA = new CouchDB("test_suite_db_a", {"X-Couch-Full-Commit":"false"});
+ var dbB = new CouchDB("test_suite_db_b", {"X-Couch-Full-Commit":"false"});
+ dbA.deleteDb();
+ dbA.createDb();
+ dbB.deleteDb();
+ dbB.createDb();
+
+ if (debug) debugger;
+
+ T(dbA.save({"_id":"doc", "foo":"bar"}).ok);
+
+ // create conflict
+ T(CouchDB.replicate("test_suite_db_a", "test_suite_db_b").ok);
+
+ var doc = dbA.open("doc");
+ var rev11 = doc._rev;
+ T(dbA.save({"_id":"doc", "foo":"bar2","_rev":rev11}).ok);
+
+ doc = dbB.open("doc");
+ var rev12 = doc._rev;
+ T(dbB.save({"_id":"doc", "foo":"bar3","_rev":rev12}).ok);
+
+ T(CouchDB.replicate("test_suite_db_a", "test_suite_db_b").ok);
+
+ // the attachment
+ var bin_data = "JHAPDO*AU£PN ){(3u[d 93DQ9¡€])} ææøo'∂ƒæ≤çæππ•¥∫¶®#†π¶®¥π€ª®˙π8np";
+
+ doc = dbB.open("doc");
+ var rev13 = doc._rev;
+
+ // test that we can can attach to conflicting documents
+ var xhr = CouchDB.request("PUT", "/test_suite_db_b/doc/attachment.txt", {
+ headers: {
+ "Content-Type": "text/plain;charset=utf-8",
+ "If-Match": rev13
+ },
+ body: bin_data
+ });
+ T(xhr.status == 201);
+
+};
diff --git a/1.1.x/share/www/script/test/attachment_names.js b/1.1.x/share/www/script/test/attachment_names.js
new file mode 100644
index 00000000..777b5ece
--- /dev/null
+++ b/1.1.x/share/www/script/test/attachment_names.js
@@ -0,0 +1,98 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachment_names = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var goodDoc = {
+ _id: "good_doc",
+ _attachments: {
+ "Колян.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ };
+
+ var save_response = db.save(goodDoc);
+ T(save_response.ok);
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/good_doc/Колян.txt");
+ T(xhr.responseText == "This is a base64 encoded text");
+ T(xhr.getResponseHeader("Content-Type") == "text/plain");
+ T(xhr.getResponseHeader("Etag") == '"' + save_response.rev + '"');
+
+ var binAttDoc = {
+ _id: "bin_doc",
+ _attachments:{
+ "foo\x80txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ };
+
+ // inline attachments
+ resp = db.save(binAttDoc);
+ TEquals(true, resp.ok, "attachment_name: inline attachment");
+
+
+ // standalone docs
+ var bin_data = "JHAPDO*AU£PN ){(3u[d 93DQ9¡€])} ææøo'∂ƒæ≤çæππ•¥∫¶®#†π¶®¥π€ª®˙π8np";
+
+
+ var xhr = (CouchDB.request("PUT", "/test_suite_db/bin_doc3/attachment\x80txt", {
+ headers:{"Content-Type":"text/plain;charset=utf-8"},
+ body:bin_data
+ }));
+
+ var resp = JSON.parse(xhr.responseText);
+ TEquals(201, xhr.status, "attachment_name: standalone API");
+ TEquals("Created", xhr.statusText, "attachment_name: standalone API");
+ TEquals(true, resp.ok, "attachment_name: standalone API");
+
+ // bulk docs
+ var docs = { docs: [binAttDoc] };
+
+ var xhr = CouchDB.request("POST", "/test_suite_db/_bulk_docs", {
+ body: JSON.stringify(docs)
+ });
+
+ TEquals(201, xhr.status, "attachment_name: bulk docs");
+ TEquals("Created", xhr.statusText, "attachment_name: bulk docs");
+
+
+ // leading underscores
+ var binAttDoc = {
+ _id: "bin_doc2",
+ _attachments:{
+ "_foo.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ };
+
+ try {
+ db.save(binAttDoc);
+ TEquals(1, 2, "Attachment name with leading underscore saved. Should never show!");
+ } catch (e) {
+ TEquals("bad_request", e.error, "attachment_name: leading underscore");
+ TEquals("Attachment name can't start with '_'", e.reason, "attachment_name: leading underscore");
+ }
+
+ // todo: form uploads, waiting for cmlenz' test case for form uploads
+
+};
diff --git a/1.1.x/share/www/script/test/attachment_paths.js b/1.1.x/share/www/script/test/attachment_paths.js
new file mode 100644
index 00000000..3f6ffb7c
--- /dev/null
+++ b/1.1.x/share/www/script/test/attachment_paths.js
@@ -0,0 +1,153 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachment_paths = function(debug) {
+ if (debug) debugger;
+ var dbNames = ["test_suite_db", "test_suite_db/with_slashes"];
+ for (var i=0; i < dbNames.length; i++) {
+ var db = new CouchDB(dbNames[i]);
+ var dbName = encodeURIComponent(dbNames[i]);
+ db.deleteDb();
+ db.createDb();
+
+ // first just save a regular doc with an attachment that has a slash in the url.
+ // (also gonna run an encoding check case)
+ var binAttDoc = {
+ _id: "bin_doc",
+ _attachments:{
+ "foo/bar.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ },
+ "foo%2Fbaz.txt": {
+ content_type:"text/plain",
+ data: "V2UgbGlrZSBwZXJjZW50IHR3byBGLg=="
+ }
+ }
+ };
+
+ T(db.save(binAttDoc).ok);
+
+ var xhr = CouchDB.request("GET", "/"+dbName+"/bin_doc/foo/bar.txt");
+ T(xhr.responseText == "This is a base64 encoded text");
+ T(xhr.getResponseHeader("Content-Type") == "text/plain");
+
+ // lets try it with an escaped attachment id...
+ // weird that it's at two urls
+ var xhr = CouchDB.request("GET", "/"+dbName+"/bin_doc/foo%2Fbar.txt");
+ T(xhr.status == 200);
+ // xhr.responseText == "This is a base64 encoded text"
+
+ var xhr = CouchDB.request("GET", "/"+dbName+"/bin_doc/foo/baz.txt");
+ T(xhr.status == 404);
+
+ var xhr = CouchDB.request("GET", "/"+dbName+"/bin_doc/foo%252Fbaz.txt");
+ T(xhr.status == 200);
+ T(xhr.responseText == "We like percent two F.");
+
+ // require a _rev to PUT
+ var xhr = CouchDB.request("PUT", "/"+dbName+"/bin_doc/foo/attachment.txt", {
+ headers:{"Content-Type":"text/plain;charset=utf-8"},
+ body:"Just some text"
+ });
+ T(xhr.status == 409);
+
+ var xhr = CouchDB.request("PUT", "/"+dbName+"/bin_doc/foo/bar2.txt?rev=" + binAttDoc._rev, {
+ body:"This is no base64 encoded text",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ T(xhr.status == 201);
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ binAttDoc = db.open("bin_doc");
+
+ T(binAttDoc._attachments["foo/bar.txt"] !== undefined);
+ T(binAttDoc._attachments["foo%2Fbaz.txt"] !== undefined);
+ T(binAttDoc._attachments["foo/bar2.txt"] !== undefined);
+ TEquals("text/plain;charset=utf-8", // thank you Safari
+ binAttDoc._attachments["foo/bar2.txt"].content_type.toLowerCase(),
+ "correct content-type"
+ );
+ T(binAttDoc._attachments["foo/bar2.txt"].length == 30);
+
+ //// now repeat the while thing with a design doc
+
+ // first just save a regular doc with an attachment that has a slash in the url.
+ // (also gonna run an encoding check case)
+ var binAttDoc = {
+ _id: "_design/bin_doc",
+ _attachments:{
+ "foo/bar.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ },
+ "foo%2Fbaz.txt": {
+ content_type:"text/plain",
+ data: "V2UgbGlrZSBwZXJjZW50IHR3byBGLg=="
+ }
+ }
+ };
+
+ T(db.save(binAttDoc).ok);
+
+ var xhr = CouchDB.request("GET", "/"+dbName+"/_design%2Fbin_doc/foo/bar.txt");
+ T(xhr.responseText == "This is a base64 encoded text");
+ T(xhr.getResponseHeader("Content-Type") == "text/plain");
+
+ // lets try it with an escaped attachment id...
+ // weird that it's at two urls
+ var xhr = CouchDB.request("GET", "/"+dbName+"/_design%2Fbin_doc/foo%2Fbar.txt");
+ T(xhr.responseText == "This is a base64 encoded text");
+ T(xhr.status == 200);
+
+ // err, 3 urls
+ var xhr = CouchDB.request("GET", "/"+dbName+"/_design/bin_doc/foo%2Fbar.txt");
+ T(xhr.responseText == "This is a base64 encoded text");
+ T(xhr.status == 200);
+
+ // I mean um, 4 urls
+ var xhr = CouchDB.request("GET", "/"+dbName+"/_design/bin_doc/foo/bar.txt");
+ T(xhr.responseText == "This is a base64 encoded text");
+ T(xhr.status == 200);
+
+ var xhr = CouchDB.request("GET", "/"+dbName+"/_design%2Fbin_doc/foo/baz.txt");
+ T(xhr.status == 404);
+
+ var xhr = CouchDB.request("GET", "/"+dbName+"/_design%2Fbin_doc/foo%252Fbaz.txt");
+ T(xhr.status == 200);
+ T(xhr.responseText == "We like percent two F.");
+
+ // require a _rev to PUT
+ var xhr = CouchDB.request("PUT", "/"+dbName+"/_design%2Fbin_doc/foo/attachment.txt", {
+ headers:{"Content-Type":"text/plain;charset=utf-8"},
+ body:"Just some text"
+ });
+ T(xhr.status == 409);
+
+ var xhr = CouchDB.request("PUT", "/"+dbName+"/_design%2Fbin_doc/foo/bar2.txt?rev=" + binAttDoc._rev, {
+ body:"This is no base64 encoded text",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ T(xhr.status == 201);
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ binAttDoc = db.open("_design/bin_doc");
+
+ T(binAttDoc._attachments["foo/bar.txt"] !== undefined);
+ T(binAttDoc._attachments["foo/bar2.txt"] !== undefined);
+ TEquals("text/plain;charset=utf-8", // thank you Safari
+ binAttDoc._attachments["foo/bar2.txt"].content_type.toLowerCase(),
+ "correct content-type"
+ );
+ T(binAttDoc._attachments["foo/bar2.txt"].length == 30);
+ }
+};
diff --git a/1.1.x/share/www/script/test/attachment_ranges.js b/1.1.x/share/www/script/test/attachment_ranges.js
new file mode 100644
index 00000000..e1d40eae
--- /dev/null
+++ b/1.1.x/share/www/script/test/attachment_ranges.js
@@ -0,0 +1,134 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+couchTests.attachment_ranges = function(debug) {
+ var db = new CouchDB("test_suite_db", {
+ "X-Couch-Full-Commit": "false"
+ });
+ db.deleteDb();
+ db.createDb();
+
+ if (debug) debugger;
+
+ var binAttDoc = {
+ _id: "bin_doc",
+ _attachments: {
+ "foo.txt": {
+ content_type: "application/octet-stream",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ };
+
+ var save_response = db.save(binAttDoc);
+ T(save_response.ok);
+
+ // Fetching the whole entity is a 206.
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt", {
+ headers: {
+ "Range": "bytes=0-28"
+ }
+ });
+ TEquals(206, xhr.status, "fetch 0-28");
+ TEquals("This is a base64 encoded text", xhr.responseText);
+ TEquals("bytes 0-28/29", xhr.getResponseHeader("Content-Range"));
+ TEquals("29", xhr.getResponseHeader("Content-Length"));
+
+ // Fetch the whole entity without an end offset is a 206.
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt", {
+ headers: {
+ "Range": "bytes=0-"
+ }
+ });
+ TEquals(206, xhr.status, "fetch 0-");
+ TEquals("This is a base64 encoded text", xhr.responseText);
+ TEquals("bytes 0-28/29", xhr.getResponseHeader("Content-Range"));
+ TEquals("29", xhr.getResponseHeader("Content-Length"));
+
+ // Badly formed range header is a 200.
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt", {
+ headers: {
+ "Range": "bytes:0-"
+ }
+ });
+ TEquals(200, xhr.status, "fetch with bad range header");
+
+ // Fetch the end of an entity without an end offset is a 206.
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt", {
+ headers: {
+ "Range": "bytes=2-"
+ }
+ });
+ TEquals(206, xhr.status, "fetch 2-");
+ TEquals("is is a base64 encoded text", xhr.responseText);
+ TEquals("bytes 2-28/29", xhr.getResponseHeader("Content-Range"));
+ TEquals("27", xhr.getResponseHeader("Content-Length"));
+
+ // Fetch past the end of the entity is a 206
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt", {
+ headers: {
+ "Range": "bytes=0-29"
+ }
+ });
+ TEquals(206, xhr.status, "fetch 0-29");
+ TEquals("bytes 0-28/29", xhr.getResponseHeader("Content-Range"));
+ TEquals("29", xhr.getResponseHeader("Content-Length"));
+
+ // Fetch first part of entity is a 206
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt", {
+ headers: {
+ "Range": "bytes=0-3"
+ }
+ });
+ TEquals(206, xhr.status, "fetch 0-3");
+ TEquals("This", xhr.responseText);
+ TEquals("4", xhr.getResponseHeader("Content-Length"));
+ TEquals("bytes 0-3/29", xhr.getResponseHeader("Content-Range"));
+
+ // Fetch middle of entity is also a 206
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt", {
+ headers: {
+ "Range": "bytes=10-15"
+ }
+ });
+ TEquals(206, xhr.status, "fetch 10-15");
+ TEquals("base64", xhr.responseText);
+ TEquals("6", xhr.getResponseHeader("Content-Length"));
+ TEquals("bytes 10-15/29", xhr.getResponseHeader("Content-Range"));
+
+ // Fetch end of entity is also a 206
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt", {
+ headers: {
+ "Range": "bytes=-3"
+ }
+ });
+ TEquals(206, xhr.status, "fetch -3");
+ TEquals("ext", xhr.responseText);
+ TEquals("3", xhr.getResponseHeader("Content-Length"));
+ TEquals("bytes 26-28/29", xhr.getResponseHeader("Content-Range"));
+
+ // backward range is 416
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt", {
+ headers: {
+ "Range": "bytes=5-3"
+ }
+ });
+ TEquals(416, xhr.status, "fetch 5-3");
+
+ // range completely outside of entity is 416
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt", {
+ headers: {
+ "Range": "bytes=300-310"
+ }
+ });
+ TEquals(416, xhr.status, "fetch 300-310");
+
+};
diff --git a/1.1.x/share/www/script/test/attachment_views.js b/1.1.x/share/www/script/test/attachment_views.js
new file mode 100644
index 00000000..a92a8ad0
--- /dev/null
+++ b/1.1.x/share/www/script/test/attachment_views.js
@@ -0,0 +1,98 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachment_views= function(debug) {
+
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // count attachments in a view
+
+ db.bulkSave(makeDocs(0, 10));
+
+ db.bulkSave(makeDocs(10, 20, {
+ _attachments:{
+ "foo.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ }));
+
+ db.bulkSave(makeDocs(20, 30, {
+ _attachments:{
+ "foo.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ },
+ "bar.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ }));
+
+ db.bulkSave(makeDocs(30, 40, {
+ _attachments:{
+ "foo.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ },
+ "bar.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ },
+ "baz.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ }));
+
+ var mapFunction = function(doc) {
+ var count = 0;
+
+ for(var idx in doc._attachments) {
+ count = count + 1;
+ }
+
+ emit(parseInt(doc._id), count);
+ };
+
+ var reduceFunction = function(key, values) {
+ return sum(values);
+ };
+
+ var result = db.query(mapFunction, reduceFunction);
+
+ T(result.rows.length == 1);
+ T(result.rows[0].value == 60);
+
+ var result = db.query(mapFunction, reduceFunction, {
+ startkey:10,
+ endkey:19
+ });
+
+ T(result.rows.length == 1);
+ T(result.rows[0].value == 10);
+
+ var result = db.query(mapFunction, reduceFunction, {
+ startkey:20,
+ endkey:29
+ });
+
+ T(result.rows.length == 1);
+ T(result.rows[0].value == 20);
+
+};
diff --git a/1.1.x/share/www/script/test/attachments.js b/1.1.x/share/www/script/test/attachments.js
new file mode 100644
index 00000000..b0cfd2c5
--- /dev/null
+++ b/1.1.x/share/www/script/test/attachments.js
@@ -0,0 +1,275 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments= function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var binAttDoc = {
+ _id: "bin_doc",
+ _attachments:{
+ "foo.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ };
+
+ var save_response = db.save(binAttDoc);
+ T(save_response.ok);
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt");
+ T(xhr.responseText == "This is a base64 encoded text");
+ T(xhr.getResponseHeader("Content-Type") == "text/plain");
+ T(xhr.getResponseHeader("Etag") == '"' + save_response.rev + '"');
+
+ // empty attachment
+ var binAttDoc2 = {
+ _id: "bin_doc2",
+ _attachments:{
+ "foo.txt": {
+ content_type:"text/plain",
+ data: ""
+ }
+ }
+ }
+
+ T(db.save(binAttDoc2).ok);
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc2/foo.txt");
+ T(xhr.responseText.length == 0);
+ T(xhr.getResponseHeader("Content-Type") == "text/plain");
+
+ // test RESTful doc API
+
+ var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc2/foo2.txt?rev=" + binAttDoc2._rev, {
+ body:"This is no base64 encoded text",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ T(xhr.status == 201);
+ TEquals("/bin_doc2/foo2.txt",
+ xhr.getResponseHeader("Location").substr(-18),
+ "should return Location header to newly created or updated attachment");
+
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ binAttDoc2 = db.open("bin_doc2");
+
+ T(binAttDoc2._attachments["foo.txt"] !== undefined);
+ T(binAttDoc2._attachments["foo2.txt"] !== undefined);
+ TEqualsIgnoreCase("text/plain;charset=utf-8", binAttDoc2._attachments["foo2.txt"].content_type);
+ T(binAttDoc2._attachments["foo2.txt"].length == 30);
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc2/foo2.txt");
+ T(xhr.responseText == "This is no base64 encoded text");
+ TEqualsIgnoreCase("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type"));
+
+ // test without rev, should fail
+ var xhr = CouchDB.request("DELETE", "/test_suite_db/bin_doc2/foo2.txt");
+ T(xhr.status == 409);
+
+ // test with rev, should not fail
+ var xhr = CouchDB.request("DELETE", "/test_suite_db/bin_doc2/foo2.txt?rev=" + rev);
+ T(xhr.status == 200);
+ TEquals(null, xhr.getResponseHeader("Location"),
+ "should not return Location header on DELETE request");
+
+ // test binary data
+ var bin_data = "JHAPDO*AU£PN ){(3u[d 93DQ9¡€])} ææøo'∂ƒæ≤çæππ•¥∫¶®#†π¶®¥π€ª®˙π8np";
+ var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc3/attachment.txt", {
+ headers:{"Content-Type":"text/plain;charset=utf-8"},
+ body:bin_data
+ });
+ T(xhr.status == 201);
+ var rev = JSON.parse(xhr.responseText).rev;
+ TEquals('"' + rev + '"', xhr.getResponseHeader("Etag"));
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc3/attachment.txt");
+ T(xhr.responseText == bin_data);
+ TEqualsIgnoreCase("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type"));
+
+ var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc3/attachment.txt", {
+ headers:{"Content-Type":"text/plain;charset=utf-8"},
+ body:bin_data
+ });
+ T(xhr.status == 409);
+
+ var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc3/attachment.txt?rev=" + rev, {
+ headers:{"Content-Type":"text/plain;charset=utf-8"},
+ body:bin_data
+ });
+ T(xhr.status == 201);
+ var rev = JSON.parse(xhr.responseText).rev;
+ TEquals('"' + rev + '"', xhr.getResponseHeader("Etag"));
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc3/attachment.txt");
+ T(xhr.responseText == bin_data);
+ TEqualsIgnoreCase("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type"));
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc3/attachment.txt?rev=" + rev);
+ T(xhr.responseText == bin_data);
+ TEqualsIgnoreCase("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type"));
+
+ var xhr = CouchDB.request("DELETE", "/test_suite_db/bin_doc3/attachment.txt?rev=" + rev);
+ T(xhr.status == 200);
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc3/attachment.txt");
+ T(xhr.status == 404);
+
+ // deleted attachment is still accessible with revision
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc3/attachment.txt?rev=" + rev);
+ T(xhr.status == 200);
+ T(xhr.responseText == bin_data);
+ TEqualsIgnoreCase("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type"));
+
+ // empty attachments
+ var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc4/attachment.txt", {
+ headers:{"Content-Type":"text/plain;charset=utf-8"},
+ body:""
+ });
+ T(xhr.status == 201);
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc4/attachment.txt");
+ T(xhr.status == 200);
+ T(xhr.responseText.length == 0);
+
+ // overwrite previsously empty attachment
+ var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc4/attachment.txt?rev=" + rev, {
+ headers:{"Content-Type":"text/plain;charset=utf-8"},
+ body:"This is a string"
+ });
+ T(xhr.status == 201);
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc4/attachment.txt");
+ T(xhr.status == 200);
+ T(xhr.responseText == "This is a string");
+
+ // Attachment sparseness COUCHDB-220
+
+ var docs = [];
+ for (var i = 0; i < 5; i++) {
+ var doc = {
+ _id: (i).toString(),
+ _attachments:{
+ "foo.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ };
+ docs.push(doc);
+ }
+
+ var saved = db.bulkSave(docs);
+ // now delete the docs, and while we are looping over them, remove the
+ // '_rev' field so we can re-create after deletion.
+ var to_up = [];
+ for (i=0;i<saved.length;i++) {
+ to_up.push({'_id': saved[i]['id'], '_rev': saved[i]['rev'], '_deleted': true});
+ delete docs[i]._rev;
+ }
+ // delete them.
+ var saved2 = db.bulkSave(to_up);
+ // re-create them
+ var saved3 = db.bulkSave(docs);
+
+ var before = db.info().disk_size;
+
+ // Compact it.
+ T(db.compact().ok);
+ T(db.last_req.status == 202);
+ // compaction isn't instantaneous, loop until done
+ while (db.info().compact_running) {};
+
+ var after = db.info().disk_size;
+
+ // Compaction should reduce the database slightly, but not
+ // orders of magnitude (unless attachments introduce sparseness)
+ T(after > before * 0.1, "before: " + before + " after: " + after);
+
+
+ // test large attachments - COUCHDB-366
+ var lorem = CouchDB.request("GET", "/_utils/script/test/lorem.txt").responseText;
+
+ var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc5/lorem.txt", {
+ headers:{"Content-Type":"text/plain;charset=utf-8"},
+ body:lorem
+ });
+ T(xhr.status == 201);
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc5/lorem.txt");
+ T(xhr.responseText == lorem);
+ TEqualsIgnoreCase("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type"));
+
+ // test large inline attachment too
+ var lorem_b64 = CouchDB.request("GET", "/_utils/script/test/lorem_b64.txt").responseText;
+ var doc = db.open("bin_doc5", {attachments:true});
+ T(doc._attachments["lorem.txt"].data == lorem_b64);
+
+ // test etags for attachments.
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc5/lorem.txt");
+ T(xhr.status == 200);
+ var etag = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("GET", "/test_suite_db/bin_doc5/lorem.txt", {
+ headers: {"if-none-match": etag}
+ });
+ T(xhr.status == 304);
+
+ // test COUCHDB-497 - empty attachments
+ var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc5/empty.txt?rev="+rev, {
+ headers:{"Content-Type":"text/plain;charset=utf-8", "Content-Length": "0"},
+ body:""
+ });
+ TEquals(201, xhr.status, "should send 201 Accepted");
+ var rev = JSON.parse(xhr.responseText).rev;
+ var xhr = CouchDB.request("PUT", "/test_suite_db/bin_doc5/empty.txt?rev="+rev, {
+ headers:{"Content-Type":"text/plain;charset=utf-8"}
+ });
+ TEquals(201, xhr.status, "should send 201 Accepted");
+
+ // implicit doc creation allows creating docs with a reserved id. COUCHDB-565
+ var xhr = CouchDB.request("PUT", "/test_suite_db/_nonexistant/attachment.txt", {
+ headers: {"Content-Type":"text/plain;charset=utf-8"},
+ body: "THIS IS AN ATTACHMENT. BOOYA!"
+ });
+ TEquals(400, xhr.status, "should return error code 400 Bad Request");
+
+ // test COUCHDB-809 - stubs should only require the 'stub' field
+ var bin_doc6 = {
+ _id: "bin_doc6",
+ _attachments:{
+ "foo.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ };
+ T(db.save(bin_doc6).ok);
+ // stub out the attachment
+ bin_doc6._attachments["foo.txt"] = { stub: true };
+ T(db.save(bin_doc6).ok == true);
+
+ // wrong rev pos specified
+
+ // stub out the attachment with the wrong revpos
+ bin_doc6._attachments["foo.txt"] = { stub: true, revpos: 10};
+ try {
+ T(db.save(bin_doc6).ok == true);
+ T(false && "Shouldn't get here!");
+ } catch (e) {
+ T(e.error == "missing_stub");
+ }
+};
diff --git a/1.1.x/share/www/script/test/attachments_multipart.js b/1.1.x/share/www/script/test/attachments_multipart.js
new file mode 100644
index 00000000..7f587357
--- /dev/null
+++ b/1.1.x/share/www/script/test/attachments_multipart.js
@@ -0,0 +1,408 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments_multipart= function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // mime multipart
+
+ xhr = CouchDB.request("PUT", "/test_suite_db/multipart", {
+ headers: {"Content-Type": "multipart/related;boundary=\"abc123\""},
+ body:
+ "--abc123\r\n" +
+ "content-type: application/json\r\n" +
+ "\r\n" +
+ JSON.stringify({
+ "body":"This is a body.",
+ "_attachments":{
+ "foo.txt": {
+ "follows":true,
+ "content_type":"application/test",
+ "length":21
+ },
+ "bar.txt": {
+ "follows":true,
+ "content_type":"application/test",
+ "length":20
+ },
+ "baz.txt": {
+ "follows":true,
+ "content_type":"application/test",
+ "length":19
+ }
+ }
+ }) +
+ "\r\n--abc123\r\n" +
+ "\r\n" +
+ "this is 21 chars long" +
+ "\r\n--abc123\r\n" +
+ "\r\n" +
+ "this is 20 chars lon" +
+ "\r\n--abc123\r\n" +
+ "\r\n" +
+ "this is 19 chars lo" +
+ "\r\n--abc123--"
+ });
+
+ var result = JSON.parse(xhr.responseText);
+
+ T(result.ok);
+
+
+
+ TEquals(201, xhr.status, "should send 201 Accepted");
+
+ xhr = CouchDB.request("GET", "/test_suite_db/multipart/foo.txt");
+
+ T(xhr.responseText == "this is 21 chars long");
+
+ xhr = CouchDB.request("GET", "/test_suite_db/multipart/bar.txt");
+
+ T(xhr.responseText == "this is 20 chars lon");
+
+ xhr = CouchDB.request("GET", "/test_suite_db/multipart/baz.txt");
+
+ T(xhr.responseText == "this is 19 chars lo");
+
+ // now edit an attachment
+
+ var doc = db.open("multipart");
+ var firstrev = doc._rev;
+
+ T(doc._attachments["foo.txt"].stub == true);
+ T(doc._attachments["bar.txt"].stub == true);
+ T(doc._attachments["baz.txt"].stub == true);
+
+ //lets change attachment bar
+ delete doc._attachments["bar.txt"].stub; // remove stub member (or could set to false)
+ doc._attachments["bar.txt"].length = 18;
+ doc._attachments["bar.txt"].follows = true;
+ //lets delete attachment baz:
+ delete doc._attachments["baz.txt"];
+
+ var xhr = CouchDB.request("PUT", "/test_suite_db/multipart", {
+ headers: {"Content-Type": "multipart/related;boundary=\"abc123\""},
+ body:
+ "--abc123\r\n" +
+ "content-type: application/json\r\n" +
+ "\r\n" +
+ JSON.stringify(doc) +
+ "\r\n--abc123\r\n" +
+ "\r\n" +
+ "this is 18 chars l" +
+ "\r\n--abc123--"
+ });
+
+ xhr = CouchDB.request("GET", "/test_suite_db/multipart/bar.txt");
+
+ T(xhr.responseText == "this is 18 chars l");
+
+ xhr = CouchDB.request("GET", "/test_suite_db/multipart/baz.txt");
+ T(xhr.status == 404);
+
+ // now test receiving multipart docs
+
+ function getBoundary(xhr) {
+ if (xhr instanceof XMLHttpRequest) {
+ var ctype = xhr.getResponseHeader("Content-Type");
+ } else {
+ var ctype = xhr.headers['Content-Type'];
+ }
+ var ctypeArgs = ctype.split("; ").slice(1);
+ var boundary = null;
+ for(var i=0; i<ctypeArgs.length; i++) {
+ if (ctypeArgs[i].indexOf("boundary=") == 0) {
+ boundary = ctypeArgs[i].split("=")[1];
+ if (boundary.charAt(0) == '"') {
+ // stringified boundary, parse as json
+ // (will maybe not if there are escape quotes)
+ boundary = JSON.parse(boundary);
+ }
+ }
+ }
+ return boundary;
+ }
+
+ function parseMultipart(xhr) {
+ var boundary = getBoundary(xhr);
+ if (xhr instanceof XMLHttpRequest) {
+ var mimetext = xhr.responseText;
+ } else {
+ var mimetext = xhr.body;
+ }
+ // strip off leading boundary
+ var leading = "--" + boundary + "\r\n";
+ var last = "\r\n--" + boundary + "--";
+
+ // strip off leading and trailing boundary
+ var leadingIdx = mimetext.indexOf(leading) + leading.length;
+ var trailingIdx = mimetext.indexOf(last);
+ mimetext = mimetext.slice(leadingIdx, trailingIdx);
+
+ // now split the sections
+ var sections = mimetext.split(new RegExp("\\r\\n--" + boundary));
+
+ // spilt out the headers for each section
+ for(var i=0; i < sections.length; i++) {
+ var section = sections[i];
+ var headerEndIdx = section.indexOf("\r\n\r\n");
+ var headersraw = section.slice(0, headerEndIdx).split(/\r\n/);
+ var body = section.slice(headerEndIdx + 4);
+ var headers = {};
+ for(var j=0; j<headersraw.length; j++) {
+ var tmp = headersraw[j].split(": ");
+ headers[tmp[0]] = tmp[1];
+ }
+ sections[i] = {"headers":headers, "body":body};
+ }
+
+ return sections;
+ }
+
+
+ xhr = CouchDB.request("GET", "/test_suite_db/multipart?attachments=true",
+ {headers:{"accept": "multipart/related,*/*;"}});
+
+ T(xhr.status == 200);
+
+ // parse out the multipart
+
+ var sections = parseMultipart(xhr);
+
+ T(sections.length == 3);
+
+ // The first section is the json doc. Check it's content-type. It contains
+ // the metadata for all the following attachments
+
+ T(sections[0].headers['content-type'] == "application/json");
+
+ var doc = JSON.parse(sections[0].body);
+
+ T(doc._attachments['foo.txt'].follows == true);
+ T(doc._attachments['bar.txt'].follows == true);
+
+ T(sections[1].body == "this is 21 chars long");
+ T(sections[2].body == "this is 18 chars l");
+
+ // now get attachments incrementally (only the attachments changes since
+ // a certain rev).
+
+ xhr = CouchDB.request("GET", "/test_suite_db/multipart?atts_since=[\"" + firstrev + "\"]",
+ {headers:{"accept": "multipart/related, */*"}});
+
+ T(xhr.status == 200);
+
+ var sections = parseMultipart(xhr);
+
+ T(sections.length == 2);
+
+ var doc = JSON.parse(sections[0].body);
+
+ T(doc._attachments['foo.txt'].stub == true);
+ T(doc._attachments['bar.txt'].follows == true);
+
+ T(sections[1].body == "this is 18 chars l");
+
+ // try the atts_since parameter together with the open_revs parameter
+ xhr = CouchDB.request(
+ "GET",
+ '/test_suite_db/multipart?open_revs=["' +
+ doc._rev + '"]&atts_since=["' + firstrev + '"]',
+ {headers: {"accept": "multipart/mixed"}}
+ );
+
+ T(xhr.status === 200);
+
+ sections = parseMultipart(xhr);
+ // 1 section, with a multipart/related Content-Type
+ T(sections.length === 1);
+ T(sections[0].headers['Content-Type'].indexOf('multipart/related;') === 0);
+
+ var innerSections = parseMultipart(sections[0]);
+ // 2 inner sections: a document body section plus an attachment data section
+ T(innerSections.length === 2);
+ T(innerSections[0].headers['content-type'] === 'application/json');
+
+ doc = JSON.parse(innerSections[0].body);
+
+ T(doc._attachments['foo.txt'].stub === true);
+ T(doc._attachments['bar.txt'].follows === true);
+
+ T(innerSections[1].body === "this is 18 chars l");
+
+ // try it with a rev that doesn't exist (should get all attachments)
+
+ xhr = CouchDB.request("GET", "/test_suite_db/multipart?atts_since=[\"1-2897589\"]",
+ {headers:{"accept": "multipart/related,*/*;"}});
+
+ T(xhr.status == 200);
+
+ var sections = parseMultipart(xhr);
+
+ T(sections.length == 3);
+
+ var doc = JSON.parse(sections[0].body);
+
+ T(doc._attachments['foo.txt'].follows == true);
+ T(doc._attachments['bar.txt'].follows == true);
+
+ T(sections[1].body == "this is 21 chars long");
+ T(sections[2].body == "this is 18 chars l");
+
+ // try it with a rev that doesn't exist, and one that does
+
+ xhr = CouchDB.request("GET", "/test_suite_db/multipart?atts_since=[\"1-2897589\",\"" + firstrev + "\"]",
+ {headers:{"accept": "multipart/related,*/*;"}});
+
+ T(xhr.status == 200);
+
+ var sections = parseMultipart(xhr);
+
+ T(sections.length == 2);
+
+ var doc = JSON.parse(sections[0].body);
+
+ T(doc._attachments['foo.txt'].stub == true);
+ T(doc._attachments['bar.txt'].follows == true);
+
+ T(sections[1].body == "this is 18 chars l");
+
+
+ // check that with the document multipart/mixed API it's possible to receive
+ // attachments in compressed form (if they're stored in compressed form)
+
+ var server_config = [
+ {
+ section: "attachments",
+ key: "compression_level",
+ value: "8"
+ },
+ {
+ section: "attachments",
+ key: "compressible_types",
+ value: "text/plain"
+ }
+ ];
+
+ function testMultipartAttCompression() {
+ var doc = { _id: "foobar" };
+ var lorem =
+ CouchDB.request("GET", "/_utils/script/test/lorem.txt").responseText;
+ var helloData = "hello world";
+
+ TEquals(true, db.save(doc).ok);
+
+ var firstRev = doc._rev;
+ var xhr = CouchDB.request(
+ "PUT",
+ "/" + db.name + "/" + doc._id + "/data.bin?rev=" + firstRev,
+ {
+ body: helloData,
+ headers: {"Content-Type": "application/binary"}
+ }
+ );
+ TEquals(201, xhr.status);
+
+ var secondRev = db.open(doc._id)._rev;
+ xhr = CouchDB.request(
+ "PUT",
+ "/" + db.name + "/" + doc._id + "/lorem.txt?rev=" + secondRev,
+ {
+ body: lorem,
+ headers: {"Content-Type": "text/plain"}
+ }
+ );
+ TEquals(201, xhr.status);
+
+ var thirdRev = db.open(doc._id)._rev;
+
+ xhr = CouchDB.request(
+ "GET",
+ '/' + db.name + '/' + doc._id + '?open_revs=["' + thirdRev + '"]',
+ {
+ headers: {
+ "Accept": "multipart/mixed",
+ "X-CouchDB-Send-Encoded-Atts": "true"
+ }
+ }
+ );
+ TEquals(200, xhr.status);
+
+ var sections = parseMultipart(xhr);
+ // 1 section, with a multipart/related Content-Type
+ TEquals(1, sections.length);
+ TEquals(0,
+ sections[0].headers['Content-Type'].indexOf('multipart/related;'));
+
+ var innerSections = parseMultipart(sections[0]);
+ // 3 inner sections: a document body section plus 2 attachment data sections
+ TEquals(3, innerSections.length);
+ TEquals('application/json', innerSections[0].headers['content-type']);
+
+ doc = JSON.parse(innerSections[0].body);
+
+ TEquals(true, doc._attachments['lorem.txt'].follows);
+ TEquals("gzip", doc._attachments['lorem.txt'].encoding);
+ TEquals(true, doc._attachments['data.bin'].follows);
+ T(doc._attachments['data.bin'] !== "gzip");
+
+ if (innerSections[1].body === helloData) {
+ T(innerSections[2].body !== lorem);
+ } else if (innerSections[2].body === helloData) {
+ T(innerSections[1].body !== lorem);
+ } else {
+ T(false, "Could not found data.bin attachment data");
+ }
+
+ // now test that it works together with the atts_since parameter
+
+ xhr = CouchDB.request(
+ "GET",
+ '/' + db.name + '/' + doc._id + '?open_revs=["' + thirdRev + '"]' +
+ '&atts_since=["' + secondRev + '"]',
+ {
+ headers: {
+ "Accept": "multipart/mixed",
+ "X-CouchDB-Send-Encoded-Atts": "true"
+ }
+ }
+ );
+ TEquals(200, xhr.status);
+
+ sections = parseMultipart(xhr);
+ // 1 section, with a multipart/related Content-Type
+ TEquals(1, sections.length);
+ TEquals(0,
+ sections[0].headers['Content-Type'].indexOf('multipart/related;'));
+
+ innerSections = parseMultipart(sections[0]);
+ // 2 inner sections: a document body section plus 1 attachment data section
+ TEquals(2, innerSections.length);
+ TEquals('application/json', innerSections[0].headers['content-type']);
+
+ doc = JSON.parse(innerSections[0].body);
+
+ TEquals(true, doc._attachments['lorem.txt'].follows);
+ TEquals("gzip", doc._attachments['lorem.txt'].encoding);
+ TEquals("undefined", typeof doc._attachments['data.bin'].follows);
+ TEquals(true, doc._attachments['data.bin'].stub);
+ T(innerSections[1].body !== lorem);
+ }
+
+ run_on_modified_server(server_config, testMultipartAttCompression);
+
+ // cleanup
+ db.deleteDb();
+};
diff --git a/1.1.x/share/www/script/test/auth_cache.js b/1.1.x/share/www/script/test/auth_cache.js
new file mode 100644
index 00000000..e48f7370
--- /dev/null
+++ b/1.1.x/share/www/script/test/auth_cache.js
@@ -0,0 +1,280 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.auth_cache = function(debug) {
+
+ if (debug) debugger;
+
+ // Simple secret key generator
+ function generateSecret(length) {
+ var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" +
+ "0123456789+/";
+ var secret = '';
+ for (var i = 0; i < length; i++) {
+ secret += tab.charAt(Math.floor(Math.random() * 64));
+ }
+ return secret;
+ }
+
+ var authDb = new CouchDB("test_suite_users", {"X-Couch-Full-Commit":"false"});
+ var server_config = [
+ {
+ section: "couch_httpd_auth",
+ key: "authentication_db",
+ value: authDb.name
+ },
+ {
+ section: "couch_httpd_auth",
+ key: "auth_cache_size",
+ value: "3"
+ },
+ {
+ section: "httpd",
+ key: "authentication_handlers",
+ value: "{couch_httpd_auth, default_authentication_handler}"
+ },
+ {
+ section: "couch_httpd_auth",
+ key: "secret",
+ value: generateSecret(64)
+ }
+ ];
+
+
+ function hits() {
+ var hits = CouchDB.requestStats("couchdb", "auth_cache_hits", true);
+ return hits.current || 0;
+ }
+
+
+ function misses() {
+ var misses = CouchDB.requestStats("couchdb", "auth_cache_misses", true);
+ return misses.current || 0;
+ }
+
+
+ function testFun() {
+ var hits_before,
+ misses_before,
+ hits_after,
+ misses_after;
+
+ var fdmanana = CouchDB.prepareUserDoc({
+ name: "fdmanana",
+ roles: ["dev"]
+ }, "qwerty");
+
+ T(authDb.save(fdmanana).ok);
+
+ var chris = CouchDB.prepareUserDoc({
+ name: "chris",
+ roles: ["dev", "mafia", "white_costume"]
+ }, "the_god_father");
+
+ T(authDb.save(chris).ok);
+
+ var joe = CouchDB.prepareUserDoc({
+ name: "joe",
+ roles: ["erlnager"]
+ }, "functional");
+
+ T(authDb.save(joe).ok);
+
+ var johndoe = CouchDB.prepareUserDoc({
+ name: "johndoe",
+ roles: ["user"]
+ }, "123456");
+
+ T(authDb.save(johndoe).ok);
+
+ hits_before = hits();
+ misses_before = misses();
+
+ T(CouchDB.login("fdmanana", "qwerty").ok);
+
+ hits_after = hits();
+ misses_after = misses();
+
+ T(misses_after === (misses_before + 1));
+ T(hits_after === hits_before);
+
+ hits_before = hits_after;
+ misses_before = misses_after;
+
+ T(CouchDB.logout().ok);
+ T(CouchDB.login("fdmanana", "qwerty").ok);
+
+ hits_after = hits();
+ misses_after = misses();
+
+ T(misses_after === misses_before);
+ T(hits_after === (hits_before + 1));
+
+ hits_before = hits_after;
+ misses_before = misses_after;
+
+ T(CouchDB.logout().ok);
+ T(CouchDB.login("chris", "the_god_father").ok);
+
+ hits_after = hits();
+ misses_after = misses();
+
+ T(misses_after === (misses_before + 1));
+ T(hits_after === hits_before);
+
+ hits_before = hits_after;
+ misses_before = misses_after;
+
+ T(CouchDB.logout().ok);
+ T(CouchDB.login("joe", "functional").ok);
+
+ hits_after = hits();
+ misses_after = misses();
+
+ T(misses_after === (misses_before + 1));
+ T(hits_after === hits_before);
+
+ hits_before = hits_after;
+ misses_before = misses_after;
+
+ T(CouchDB.logout().ok);
+ T(CouchDB.login("johndoe", "123456").ok);
+
+ hits_after = hits();
+ misses_after = misses();
+
+ T(misses_after === (misses_before + 1));
+ T(hits_after === hits_before);
+
+ hits_before = hits_after;
+ misses_before = misses_after;
+
+ T(CouchDB.logout().ok);
+ T(CouchDB.login("joe", "functional").ok);
+
+ hits_after = hits();
+ misses_after = misses();
+
+ // it's an MRU cache, joe was removed from cache to add johndoe
+ T(misses_after === (misses_before + 1));
+ T(hits_after === hits_before);
+
+ hits_before = hits_after;
+ misses_before = misses_after;
+
+ T(CouchDB.logout().ok);
+ T(CouchDB.login("fdmanana", "qwerty").ok);
+
+ hits_after = hits();
+ misses_after = misses();
+
+ T(misses_after === misses_before);
+ T(hits_after === (hits_before + 1));
+
+ hits_before = hits_after;
+ misses_before = misses_after;
+
+ var new_salt = CouchDB.newUuids(1)[0];
+ var new_passwd = hex_sha1("foobar" + new_salt);
+ fdmanana.salt = new_salt;
+ fdmanana.password_sha = new_passwd;
+
+ T(authDb.save(fdmanana).ok);
+ T(CouchDB.logout().ok);
+
+ // cache was refreshed
+ T(CouchDB.login("fdmanana", "qwerty").error === "unauthorized");
+ T(CouchDB.login("fdmanana", "foobar").ok);
+
+ hits_after = hits();
+ misses_after = misses();
+
+ T(misses_after === misses_before);
+ T(hits_after === (hits_before + 2));
+
+ T(CouchDB.logout().ok);
+
+ hits_before = hits_after;
+ misses_before = misses_after;
+
+ // and yet another update
+ new_salt = CouchDB.newUuids(1)[0];
+ new_passwd = hex_sha1("javascript" + new_salt);
+ fdmanana.salt = new_salt;
+ fdmanana.password_sha = new_passwd;
+
+ T(authDb.save(fdmanana).ok);
+ T(CouchDB.logout().ok);
+
+ // cache was refreshed
+ T(CouchDB.login("fdmanana", "foobar").error === "unauthorized");
+ T(CouchDB.login("fdmanana", "javascript").ok);
+
+ hits_after = hits();
+ misses_after = misses();
+
+ T(misses_after === misses_before);
+ T(hits_after === (hits_before + 2));
+
+ T(authDb.deleteDoc(fdmanana).ok);
+ T(CouchDB.logout().ok);
+
+ hits_before = hits_after;
+ misses_before = misses_after;
+
+ T(CouchDB.login("fdmanana", "javascript").error === "unauthorized");
+
+ hits_after = hits();
+ misses_after = misses();
+
+ T(misses_after === misses_before);
+ T(hits_after === (hits_before + 1));
+
+ // login, compact authentication DB, login again and verify that
+ // there was a cache hit
+ hits_before = hits_after;
+ misses_before = misses_after;
+
+ T(CouchDB.login("johndoe", "123456").ok);
+
+ hits_after = hits();
+ misses_after = misses();
+
+ T(misses_after === (misses_before + 1));
+ T(hits_after === hits_before);
+
+ T(CouchDB.logout().ok);
+ T(authDb.compact().ok);
+
+ while (authDb.info().compact_running);
+
+ hits_before = hits_after;
+ misses_before = misses_after;
+
+ T(CouchDB.login("johndoe", "123456").ok);
+
+ hits_after = hits();
+ misses_after = misses();
+
+ T(misses_after === misses_before);
+ T(hits_after === (hits_before + 1));
+
+ T(CouchDB.logout().ok);
+ }
+
+
+ authDb.deleteDb();
+ run_on_modified_server(server_config, testFun);
+
+ // cleanup
+ authDb.deleteDb();
+} \ No newline at end of file
diff --git a/1.1.x/share/www/script/test/basics.js b/1.1.x/share/www/script/test/basics.js
new file mode 100644
index 00000000..30c27c11
--- /dev/null
+++ b/1.1.x/share/www/script/test/basics.js
@@ -0,0 +1,249 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do some basic tests.
+couchTests.basics = function(debug) {
+ var result = JSON.parse(CouchDB.request("GET", "/").responseText);
+ T(result.couchdb == "Welcome");
+
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+
+ // bug COUCHDB-100: DELETE on non-existent DB returns 500 instead of 404
+ db.deleteDb();
+
+ db.createDb();
+
+ // PUT on existing DB should return 412 instead of 500
+ xhr = CouchDB.request("PUT", "/test_suite_db/");
+ T(xhr.status == 412);
+ if (debug) debugger;
+
+ // creating a new DB should return Location header
+ // and it should work for dbs with slashes (COUCHDB-411)
+ var dbnames = ["test_suite_db", "test_suite_db%2Fwith_slashes"];
+ dbnames.forEach(function(dbname) {
+ xhr = CouchDB.request("DELETE", "/" + dbname);
+ xhr = CouchDB.request("PUT", "/" + dbname);
+ TEquals(dbname,
+ xhr.getResponseHeader("Location").substr(-dbname.length),
+ "should return Location header to newly created document");
+ TEquals(CouchDB.protocol,
+ xhr.getResponseHeader("Location").substr(0, CouchDB.protocol.length),
+ "should return absolute Location header to newly created document");
+ });
+
+ // Get the database info, check the db_name
+ T(db.info().db_name == "test_suite_db");
+ T(CouchDB.allDbs().indexOf("test_suite_db") != -1);
+
+ // Get the database info, check the doc_count
+ T(db.info().doc_count == 0);
+
+ // create a document and save it to the database
+ var doc = {_id:"0",a:1,b:1};
+ var result = db.save(doc);
+
+ T(result.ok==true); // return object has an ok member with a value true
+ T(result.id); // the _id of the document is set.
+ T(result.rev); // the revision id of the document is set.
+
+ // Verify the input doc is now set with the doc id and rev
+ // (for caller convenience).
+ T(doc._id == result.id && doc._rev == result.rev);
+
+ var id = result.id; // save off the id for later
+
+ // make sure the revs_info status is good
+ var doc = db.open(id, {revs_info:true});
+ T(doc._revs_info[0].status == "available");
+
+ // make sure you can do a seq=true option
+ var doc = db.open(id, {local_seq:true});
+ T(doc._local_seq == 1);
+
+
+ // Create some more documents.
+ // Notice the use of the ok member on the return result.
+ T(db.save({_id:"1",a:2,b:4}).ok);
+ T(db.save({_id:"2",a:3,b:9}).ok);
+ T(db.save({_id:"3",a:4,b:16}).ok);
+
+ // Check the database doc count
+ T(db.info().doc_count == 4);
+
+ // Test a simple map functions
+
+ // create a map function that selects all documents whose "a" member
+ // has a value of 4, and then returns the document's b value.
+ var mapFunction = function(doc){
+ if (doc.a==4)
+ emit(null, doc.b);
+ };
+
+ var results = db.query(mapFunction);
+
+ // verify only one document found and the result value (doc.b).
+ T(results.total_rows == 1 && results.rows[0].value == 16);
+
+ // reopen document we saved earlier
+ var existingDoc = db.open(id);
+
+ T(existingDoc.a==1);
+
+ //modify and save
+ existingDoc.a=4;
+ db.save(existingDoc);
+
+ // redo the map query
+ results = db.query(mapFunction);
+
+ // the modified document should now be in the results.
+ T(results.total_rows == 2);
+
+ // write 2 more documents
+ T(db.save({a:3,b:9}).ok);
+ T(db.save({a:4,b:16}).ok);
+
+ results = db.query(mapFunction);
+
+ // 1 more document should now be in the result.
+ T(results.total_rows == 3);
+ T(db.info().doc_count == 6);
+
+ var reduceFunction = function(keys, values){
+ return sum(values);
+ };
+
+ results = db.query(mapFunction, reduceFunction);
+
+ T(results.rows[0].value == 33);
+
+ // delete a document
+ T(db.deleteDoc(existingDoc).ok);
+
+ // make sure we can't open the doc
+ T(db.open(existingDoc._id) == null);
+
+ results = db.query(mapFunction);
+
+ // 1 less document should now be in the results.
+ T(results.total_rows == 2);
+ T(db.info().doc_count == 5);
+
+ // make sure we can still open the old rev of the deleted doc
+ T(db.open(existingDoc._id, {rev: existingDoc._rev}) != null);
+ // make sure restart works
+ T(db.ensureFullCommit().ok);
+ restartServer();
+
+ // make sure we can still open
+ T(db.open(existingDoc._id, {rev: existingDoc._rev}) != null);
+
+ // test that the POST response has a Location header
+ var xhr = CouchDB.request("POST", "/test_suite_db", {
+ body: JSON.stringify({"foo":"bar"}),
+ headers: {"Content-Type": "application/json"}
+ });
+ var resp = JSON.parse(xhr.responseText);
+ T(resp.ok);
+ var loc = xhr.getResponseHeader("Location");
+ T(loc, "should have a Location header");
+ var locs = loc.split('/');
+ T(locs[locs.length-1] == resp.id);
+ T(locs[locs.length-2] == "test_suite_db");
+
+ // test that that POST's with an _id aren't overriden with a UUID.
+ var xhr = CouchDB.request("POST", "/test_suite_db", {
+ headers: {"Content-Type": "application/json"},
+ body: JSON.stringify({"_id": "oppossum", "yar": "matey"})
+ });
+ var resp = JSON.parse(xhr.responseText);
+ T(resp.ok);
+ T(resp.id == "oppossum");
+ var doc = db.open("oppossum");
+ T(doc.yar == "matey");
+
+ // document put's should return a Location header
+ var xhr = CouchDB.request("PUT", "/test_suite_db/newdoc", {
+ body: JSON.stringify({"a":1})
+ });
+ TEquals("/test_suite_db/newdoc",
+ xhr.getResponseHeader("Location").substr(-21),
+ "should return Location header to newly created document");
+ TEquals(CouchDB.protocol,
+ xhr.getResponseHeader("Location").substr(0, CouchDB.protocol.length),
+ "should return absolute Location header to newly created document");
+
+ // deleting a non-existent doc should be 404
+ xhr = CouchDB.request("DELETE", "/test_suite_db/doc-does-not-exist");
+ T(xhr.status == 404);
+
+ // Check for invalid document members
+ var bad_docs = [
+ ["goldfish", {"_zing": 4}],
+ ["zebrafish", {"_zoom": "hello"}],
+ ["mudfish", {"zane": "goldfish", "_fan": "something smells delicious"}],
+ ["tastyfish", {"_bing": {"wha?": "soda can"}}]
+ ];
+ var test_doc = function(info) {
+ var data = JSON.stringify(info[1]);
+ xhr = CouchDB.request("PUT", "/test_suite_db/" + info[0], {body: data});
+ T(xhr.status == 500);
+ result = JSON.parse(xhr.responseText);
+ T(result.error == "doc_validation");
+
+ xhr = CouchDB.request("POST", "/test_suite_db/", {
+ headers: {"Content-Type": "application/json"},
+ body: data
+ });
+ T(xhr.status == 500);
+ result = JSON.parse(xhr.responseText);
+ T(result.error == "doc_validation");
+ };
+ bad_docs.forEach(test_doc);
+
+ // Check some common error responses.
+ // PUT body not an object
+ xhr = CouchDB.request("PUT", "/test_suite_db/bar", {body: "[]"});
+ T(xhr.status == 400);
+ result = JSON.parse(xhr.responseText);
+ T(result.error == "bad_request");
+ T(result.reason == "Document must be a JSON object");
+
+ // Body of a _bulk_docs is not an object
+ xhr = CouchDB.request("POST", "/test_suite_db/_bulk_docs", {body: "[]"});
+ T(xhr.status == 400);
+ result = JSON.parse(xhr.responseText);
+ T(result.error == "bad_request");
+ T(result.reason == "Request body must be a JSON object");
+
+ // Body of an _all_docs multi-get is not a {"key": [...]} structure.
+ xhr = CouchDB.request("POST", "/test_suite_db/_all_docs", {body: "[]"});
+ T(xhr.status == 400);
+ result = JSON.parse(xhr.responseText);
+ T(result.error == "bad_request");
+ T(result.reason == "Request body must be a JSON object");
+ var data = "{\"keys\": 1}";
+ xhr = CouchDB.request("POST", "/test_suite_db/_all_docs", {body:data});
+ T(xhr.status == 400);
+ result = JSON.parse(xhr.responseText);
+ T(result.error == "bad_request");
+ T(result.reason == "`keys` member must be a array.");
+
+ // oops, the doc id got lost in code nirwana
+ xhr = CouchDB.request("DELETE", "/test_suite_db/?rev=foobarbaz");
+ TEquals(400, xhr.status, "should return a bad request");
+ result = JSON.parse(xhr.responseText);
+ TEquals("bad_request", result.error);
+ TEquals("You tried to DELETE a database with a ?=rev parameter. Did you mean to DELETE a document instead?", result.reason);
+};
diff --git a/1.1.x/share/www/script/test/batch_save.js b/1.1.x/share/www/script/test/batch_save.js
new file mode 100644
index 00000000..a1b00192
--- /dev/null
+++ b/1.1.x/share/www/script/test/batch_save.js
@@ -0,0 +1,48 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.batch_save = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var i
+ for(i=0; i < 100; i++) {
+ T(db.save({_id:i.toString(),a:i,b:i}, {batch : "ok"}).ok);
+
+ // test that response is 202 Accepted
+ T(db.last_req.status == 202);
+ }
+
+ for(i=0; i < 100; i++) {
+ // attempt to save the same document a bunch of times
+ T(db.save({_id:"foo",a:i,b:i}, {batch : "ok"}).ok);
+
+ // test that response is 202 Accepted
+ T(db.last_req.status == 202);
+ }
+
+ while(db.allDocs().total_rows != 101){};
+
+ // repeat the tests for POST
+ for(i=0; i < 100; i++) {
+ var resp = db.request("POST", db.uri + "?batch=ok", {
+ headers: {"Content-Type": "application/json"},
+ body: JSON.stringify({a:1})
+ });
+ T(JSON.parse(resp.responseText).ok);
+ }
+
+ while(db.allDocs().total_rows != 201){};
+
+};
diff --git a/1.1.x/share/www/script/test/bulk_docs.js b/1.1.x/share/www/script/test/bulk_docs.js
new file mode 100644
index 00000000..9095e6b3
--- /dev/null
+++ b/1.1.x/share/www/script/test/bulk_docs.js
@@ -0,0 +1,100 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.bulk_docs = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(5);
+
+ // Create the docs
+ var results = db.bulkSave(docs);
+
+ T(results.length == 5);
+ for (var i = 0; i < 5; i++) {
+ T(results[i].id == docs[i]._id);
+ T(results[i].rev);
+ // Update the doc
+ docs[i].string = docs[i].string + ".00";
+ }
+
+ // Save the docs
+ results = db.bulkSave(docs);
+ T(results.length == 5);
+ for (i = 0; i < 5; i++) {
+ T(results[i].id == i.toString());
+
+ // set the delete flag to delete the docs in the next step
+ docs[i]._deleted = true;
+ }
+
+ // now test a bulk update with a conflict
+ // open and save
+ var doc = db.open("0");
+ db.save(doc);
+
+ // Now bulk delete the docs
+ results = db.bulkSave(docs);
+
+ // doc "0" should be a conflict
+ T(results.length == 5);
+ T(results[0].id == "0");
+ T(results[0].error == "conflict");
+ T(typeof results[0].rev === "undefined"); // no rev member when a conflict
+
+ // but the rest are not
+ for (i = 1; i < 5; i++) {
+ T(results[i].id == i.toString());
+ T(results[i].rev);
+ T(db.open(docs[i]._id) == null);
+ }
+
+ // now force a conflict to to save
+
+ // save doc 0, this will cause a conflict when we save docs[0]
+ var doc = db.open("0");
+ docs[0] = db.open("0");
+ db.save(doc);
+
+ docs[0].shooby = "dooby";
+
+ // Now save the bulk docs, When we use all_or_nothing, we don't get conflict
+ // checking, all docs are saved regardless of conflict status, or none are
+ // saved.
+ results = db.bulkSave(docs,{all_or_nothing:true});
+ T(results.error === undefined);
+
+ var doc = db.open("0", {conflicts:true});
+ var docConflict = db.open("0", {rev:doc._conflicts[0]});
+
+ T(doc.shooby == "dooby" || docConflict.shooby == "dooby");
+
+ // verify creating a document with no id returns a new id
+ var req = CouchDB.request("POST", "/test_suite_db/_bulk_docs", {
+ body: JSON.stringify({"docs": [{"foo":"bar"}]})
+ });
+ results = JSON.parse(req.responseText);
+
+ T(results[0].id != "");
+ T(results[0].rev != "");
+
+
+ // Regression test for failure on update/delete
+ var newdoc = {"_id": "foobar", "body": "baz"};
+ T(db.save(newdoc).ok);
+ var update = {"_id": newdoc._id, "_rev": newdoc._rev, "body": "blam"};
+ var torem = {"_id": newdoc._id, "_rev": newdoc._rev, "_deleted": true};
+ results = db.bulkSave([update, torem]);
+ T(results[0].error == "conflict" || results[1].error == "conflict");
+};
diff --git a/1.1.x/share/www/script/test/changes.js b/1.1.x/share/www/script/test/changes.js
new file mode 100644
index 00000000..5998f48c
--- /dev/null
+++ b/1.1.x/share/www/script/test/changes.js
@@ -0,0 +1,509 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+function jsonp(obj) {
+ T(jsonp_flag == 0);
+ T(obj.results.length == 1 && obj.last_seq == 1, "jsonp");
+ jsonp_flag = 1;
+}
+
+couchTests.changes = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"true"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var req = CouchDB.request("GET", "/test_suite_db/_changes");
+ var resp = JSON.parse(req.responseText);
+
+ T(resp.results.length == 0 && resp.last_seq == 0, "empty db");
+ var docFoo = {_id:"foo", bar:1};
+ T(db.save(docFoo).ok);
+ T(db.ensureFullCommit().ok);
+ T(db.open(docFoo._id)._id == docFoo._id);
+
+ req = CouchDB.request("GET", "/test_suite_db/_changes");
+ var resp = JSON.parse(req.responseText);
+
+ T(resp.last_seq == 1);
+ T(resp.results.length == 1, "one doc db");
+ T(resp.results[0].changes[0].rev == docFoo._rev);
+
+ // test with callback
+
+ run_on_modified_server(
+ [{section: "httpd",
+ key: "allow_jsonp",
+ value: "true"}],
+ function() {
+ var xhr = CouchDB.request("GET", "/test_suite_db/_changes?callback=jsonp");
+ T(xhr.status == 200);
+ jsonp_flag = 0;
+ eval(xhr.responseText);
+ T(jsonp_flag == 1);
+ });
+
+ req = CouchDB.request("GET", "/test_suite_db/_changes?feed=continuous&timeout=10");
+ var lines = req.responseText.split("\n");
+ T(JSON.parse(lines[0]).changes[0].rev == docFoo._rev);
+ T(JSON.parse(lines[1]).last_seq == 1);
+
+ var xhr;
+
+ try {
+ xhr = CouchDB.newXhr();
+ } catch (err) {
+ }
+
+ // poor man's browser detection
+ var is_safari = false;
+ if(typeof(navigator) == "undefined") {
+ is_safari = true; // For CouchHTTP based runners
+ } else if(navigator.userAgent.match(/AppleWebKit/)) {
+ is_safari = true;
+ };
+ if (!is_safari && xhr) {
+ // Only test the continuous stuff if we have a real XHR object
+ // with real async support.
+
+ // WebKit (last checked on nightly #47686) does fail on processing
+ // the async-request properly while javascript is executed.
+
+ xhr.open("GET", "/test_suite_db/_changes?feed=continuous&timeout=500", true);
+ xhr.send("");
+
+ var docBar = {_id:"bar", bar:1};
+ db.save(docBar);
+
+ var lines, change1, change2;
+ waitForSuccess(function() {
+ lines = xhr.responseText.split("\n");
+ change1 = JSON.parse(lines[0]);
+ change2 = JSON.parse(lines[1]);
+ if (change2.seq != 2) {
+ throw "bad seq, try again";
+ }
+ }, "bar-only");
+
+ T(change1.seq == 1);
+ T(change1.id == "foo");
+
+ T(change2.seq == 2);
+ T(change2.id == "bar");
+ T(change2.changes[0].rev == docBar._rev);
+
+
+ var docBaz = {_id:"baz", baz:1};
+ db.save(docBaz);
+
+ var change3;
+ waitForSuccess(function() {
+ lines = xhr.responseText.split("\n");
+ change3 = JSON.parse(lines[2]);
+ if (change3.seq != 3) {
+ throw "bad seq, try again";
+ }
+ });
+
+ T(change3.seq == 3);
+ T(change3.id == "baz");
+ T(change3.changes[0].rev == docBaz._rev);
+
+
+ xhr = CouchDB.newXhr();
+
+ //verify the hearbeat newlines are sent
+ xhr.open("GET", "/test_suite_db/_changes?feed=continuous&heartbeat=10&timeout=500", true);
+ xhr.send("");
+
+ var str;
+ waitForSuccess(function() {
+ str = xhr.responseText;
+ if (str.charAt(str.length - 1) != "\n" || str.charAt(str.length - 2) != "\n") {
+ throw("keep waiting");
+ }
+ }, "heartbeat");
+
+ T(str.charAt(str.length - 1) == "\n");
+ T(str.charAt(str.length - 2) == "\n");
+
+ // otherwise we'll continue to receive heartbeats forever
+ xhr.abort();
+
+ // test longpolling
+ xhr = CouchDB.newXhr();
+
+ xhr.open("GET", "/test_suite_db/_changes?feed=longpoll", true);
+ xhr.send("");
+
+ waitForSuccess(function() {
+ lines = xhr.responseText.split("\n");
+ if (lines[5] != '"last_seq":3}') {
+ throw("still waiting");
+ }
+ }, "last_seq");
+
+ xhr = CouchDB.newXhr();
+
+ xhr.open("GET", "/test_suite_db/_changes?feed=longpoll&since=3", true);
+ xhr.send("");
+
+ var docBarz = {_id:"barz", bar:1};
+ db.save(docBarz);
+
+ var parse_changes_line = function(line) {
+ if (line.charAt(line.length-1) == ",") {
+ var linetrimmed = line.substring(0, line.length-1);
+ } else {
+ var linetrimmed = line;
+ }
+ return JSON.parse(linetrimmed);
+ };
+
+ waitForSuccess(function() {
+ lines = xhr.responseText.split("\n");
+ if (lines[3] != '"last_seq":4}') {
+ throw("still waiting");
+ }
+ }, "change_lines");
+
+ var change = parse_changes_line(lines[1]);
+ T(change.seq == 4);
+ T(change.id == "barz");
+ T(change.changes[0].rev == docBarz._rev);
+ T(lines[3]=='"last_seq":4}');
+
+
+ }
+
+ // test the filtered changes
+ var ddoc = {
+ _id : "_design/changes_filter",
+ "filters" : {
+ "bop" : "function(doc, req) { return (doc.bop);}",
+ "dynamic" : stringFun(function(doc, req) {
+ var field = req.query.field;
+ return doc[field];
+ }),
+ "userCtx" : stringFun(function(doc, req) {
+ return doc.user && (doc.user == req.userCtx.name);
+ }),
+ "conflicted" : "function(doc, req) { return (doc._conflicts);}"
+ },
+ options : {
+ local_seq : true
+ },
+ views : {
+ local_seq : {
+ map : "function(doc) {emit(doc._local_seq, null)}"
+ }
+ }
+ };
+
+ db.save(ddoc);
+
+ var req = CouchDB.request("GET", "/test_suite_db/_changes?filter=changes_filter/bop");
+ var resp = JSON.parse(req.responseText);
+ T(resp.results.length == 0);
+
+ db.save({"bop" : "foom"});
+ db.save({"bop" : false});
+
+ var req = CouchDB.request("GET", "/test_suite_db/_changes?filter=changes_filter/bop");
+ var resp = JSON.parse(req.responseText);
+ T(resp.results.length == 1, "filtered/bop");
+
+ req = CouchDB.request("GET", "/test_suite_db/_changes?filter=changes_filter/dynamic&field=woox");
+ resp = JSON.parse(req.responseText);
+ T(resp.results.length == 0);
+
+ req = CouchDB.request("GET", "/test_suite_db/_changes?filter=changes_filter/dynamic&field=bop");
+ resp = JSON.parse(req.responseText);
+ T(resp.results.length == 1, "changes_filter/dynamic&field=bop");
+
+ if (!is_safari && xhr) { // full test requires parallel connections
+ // filter with longpoll
+ // longpoll filters full history when run without a since seq
+ xhr = CouchDB.newXhr();
+ xhr.open("GET", "/test_suite_db/_changes?feed=longpoll&filter=changes_filter/bop", false);
+ xhr.send("");
+ var resp = JSON.parse(xhr.responseText);
+ T(resp.last_seq == 7);
+ // longpoll waits until a matching change before returning
+ xhr = CouchDB.newXhr();
+ xhr.open("GET", "/test_suite_db/_changes?feed=longpoll&since=7&filter=changes_filter/bop", true);
+ xhr.send("");
+ db.save({"_id":"falsy", "bop" : ""}); // empty string is falsy
+ db.save({"_id":"bingo","bop" : "bingo"});
+
+ waitForSuccess(function() {
+ resp = JSON.parse(xhr.responseText);
+ }, "longpoll-since");
+
+ T(resp.last_seq == 9);
+ T(resp.results && resp.results.length > 0 && resp.results[0]["id"] == "bingo", "filter the correct update");
+ xhr.abort();
+
+ var timeout = 500;
+ var last_seq = 10;
+ while (true) {
+
+ // filter with continuous
+ xhr = CouchDB.newXhr();
+ xhr.open("GET", "/test_suite_db/_changes?feed=continuous&filter=changes_filter/bop&timeout="+timeout, true);
+ xhr.send("");
+
+ db.save({"_id":"rusty", "bop" : "plankton"});
+ T(xhr.readyState != 4, "test client too slow");
+ var rusty = db.open("rusty", {cache_bust : new Date()});
+ T(rusty._id == "rusty");
+
+ waitForSuccess(function() { // throws an error after 5 seconds
+ if (xhr.readyState != 4) {
+ throw("still waiting");
+ }
+ }, "continuous-rusty");
+ lines = xhr.responseText.split("\n");
+ var good = false;
+ try {
+ JSON.parse(lines[3]);
+ good = true;
+ } catch(e) {
+ }
+ if (good) {
+ T(JSON.parse(lines[1]).id == "bingo", lines[1]);
+ T(JSON.parse(lines[2]).id == "rusty", lines[2]);
+ T(JSON.parse(lines[3]).last_seq == last_seq, lines[3]);
+ break;
+ } else {
+ xhr.abort();
+ db.deleteDoc(rusty);
+ timeout = timeout * 2;
+ last_seq = last_seq + 2;
+ }
+ }
+ }
+ // error conditions
+
+ // non-existing design doc
+ var req = CouchDB.request("GET",
+ "/test_suite_db/_changes?filter=nothingtosee/bop");
+ TEquals(404, req.status, "should return 404 for non existant design doc");
+
+ // non-existing filter
+ var req = CouchDB.request("GET",
+ "/test_suite_db/_changes?filter=changes_filter/movealong");
+ TEquals(404, req.status, "should return 404 for non existant filter fun");
+
+ // both
+ var req = CouchDB.request("GET",
+ "/test_suite_db/_changes?filter=nothingtosee/movealong");
+ TEquals(404, req.status,
+ "should return 404 for non existant design doc and filter fun");
+
+ // changes get all_docs style with deleted docs
+ var doc = {a:1};
+ db.save(doc);
+ db.deleteDoc(doc);
+ var req = CouchDB.request("GET",
+ "/test_suite_db/_changes?filter=changes_filter/bop&style=all_docs");
+ var resp = JSON.parse(req.responseText);
+ var expect = (!is_safari && xhr) ? 3: 1;
+ TEquals(expect, resp.results.length, "should return matching rows");
+
+ // test for userCtx
+ run_on_modified_server(
+ [{section: "httpd",
+ key: "authentication_handlers",
+ value: "{couch_httpd_auth, special_test_authentication_handler}"},
+ {section:"httpd",
+ key: "WWW-Authenticate",
+ value: "X-Couch-Test-Auth"}],
+
+ function() {
+ var authOpts = {"headers":{"WWW-Authenticate": "X-Couch-Test-Auth Chris Anderson:mp3"}};
+
+ var req = CouchDB.request("GET", "/_session", authOpts);
+ var resp = JSON.parse(req.responseText);
+
+ T(db.save({"user" : "Noah Slater"}).ok);
+ var req = CouchDB.request("GET", "/test_suite_db/_changes?filter=changes_filter/userCtx", authOpts);
+ var resp = JSON.parse(req.responseText);
+ T(resp.results.length == 0);
+
+ var docResp = db.save({"user" : "Chris Anderson"});
+ T(docResp.ok);
+ T(db.ensureFullCommit().ok);
+ req = CouchDB.request("GET", "/test_suite_db/_changes?filter=changes_filter/userCtx", authOpts);
+ resp = JSON.parse(req.responseText);
+ T(resp.results.length == 1, "userCtx");
+ T(resp.results[0].id == docResp.id);
+ }
+ );
+
+ req = CouchDB.request("GET", "/test_suite_db/_changes?limit=1");
+ resp = JSON.parse(req.responseText);
+ TEquals(1, resp.results.length);
+
+ //filter includes _conflicts
+ var id = db.save({'food' : 'pizza'}).id;
+ db.bulkSave([{_id: id, 'food' : 'pasta'}], {all_or_nothing:true});
+
+ req = CouchDB.request("GET", "/test_suite_db/_changes?filter=changes_filter/conflicted");
+ resp = JSON.parse(req.responseText);
+ T(resp.results.length == 1, "filter=changes_filter/conflicted");
+
+ // test with erlang filter function
+ run_on_modified_server([{
+ section: "native_query_servers",
+ key: "erlang",
+ value: "{couch_native_process, start_link, []}"
+ }], function() {
+ var erl_ddoc = {
+ _id: "_design/erlang",
+ language: "erlang",
+ filters: {
+ foo:
+ 'fun({Doc}, Req) -> ' +
+ ' Value = couch_util:get_value(<<"value">>, Doc),' +
+ ' (Value rem 2) =:= 0' +
+ 'end.'
+ }
+ };
+
+ db.deleteDb();
+ db.createDb();
+ T(db.save(erl_ddoc).ok);
+
+ var req = CouchDB.request("GET", "/test_suite_db/_changes?filter=erlang/foo");
+ var resp = JSON.parse(req.responseText);
+ T(resp.results.length === 0);
+
+ T(db.save({_id: "doc1", value : 1}).ok);
+ T(db.save({_id: "doc2", value : 2}).ok);
+ T(db.save({_id: "doc3", value : 3}).ok);
+ T(db.save({_id: "doc4", value : 4}).ok);
+
+ var req = CouchDB.request("GET", "/test_suite_db/_changes?filter=erlang/foo");
+ var resp = JSON.parse(req.responseText);
+ T(resp.results.length === 2);
+ T(resp.results[0].id === "doc2");
+ T(resp.results[1].id === "doc4");
+
+ // test filtering on docids
+ //
+
+ var options = {
+ headers: {"Content-Type": "application/json"},
+ body: JSON.stringify({"doc_ids": ["something", "anotherthing", "andmore"]})
+ };
+
+ var req = CouchDB.request("POST", "/test_suite_db/_changes?filter=_doc_ids", options);
+ var resp = JSON.parse(req.responseText);
+ T(resp.results.length === 0);
+
+ T(db.save({"_id":"something", "bop" : "plankton"}).ok);
+ var req = CouchDB.request("POST", "/test_suite_db/_changes?filter=_doc_ids", options);
+ var resp = JSON.parse(req.responseText);
+ T(resp.results.length === 1);
+ T(resp.results[0].id === "something");
+
+ T(db.save({"_id":"anotherthing", "bop" : "plankton"}).ok);
+ var req = CouchDB.request("POST", "/test_suite_db/_changes?filter=_doc_ids", options);
+ var resp = JSON.parse(req.responseText);
+ T(resp.results.length === 2);
+ T(resp.results[0].id === "something");
+ T(resp.results[1].id === "anotherthing");
+
+ var docids = JSON.stringify(["something", "anotherthing", "andmore"]),
+ req = CouchDB.request("GET", "/test_suite_db/_changes?filter=_doc_ids&doc_ids="+docids, options);
+ var resp = JSON.parse(req.responseText);
+ T(resp.results.length === 2);
+ T(resp.results[0].id === "something");
+ T(resp.results[1].id === "anotherthing");
+
+ var req = CouchDB.request("GET", "/test_suite_db/_changes?filter=_design");
+ var resp = JSON.parse(req.responseText);
+ T(resp.results.length === 1);
+ T(resp.results[0].id === "_design/erlang");
+
+
+ if (!is_safari && xhr) {
+ // filter docids with continuous
+ xhr = CouchDB.newXhr();
+ xhr.open("POST", "/test_suite_db/_changes?feed=continuous&timeout=500&since=7&filter=_doc_ids", true);
+ xhr.setRequestHeader("Content-Type", "application/json");
+
+ xhr.send(options.body);
+
+ T(db.save({"_id":"andmore", "bop" : "plankton"}).ok);
+
+
+ waitForSuccess(function() {
+ if (xhr.readyState != 4) {
+ throw("still waiting");
+ }
+ }, "andmore-only");
+
+ var line = JSON.parse(xhr.responseText.split("\n")[0]);
+ T(line.seq == 8);
+ T(line.id == "andmore");
+ }
+
+ });
+
+ // COUCHDB-1037 - empty result for ?limit=1&filter=foo/bar in some cases
+ T(db.deleteDb());
+ T(db.createDb());
+
+ ddoc = {
+ _id: "_design/testdocs",
+ filters: {
+ testdocsonly: (function(doc, req) {
+ return (typeof doc.integer === "number");
+ }).toString()
+ }
+ };
+ T(db.save(ddoc));
+
+ ddoc = {
+ _id: "_design/foobar",
+ foo: "bar"
+ };
+ T(db.save(ddoc));
+
+ db.bulkSave(makeDocs(0, 5));
+
+ req = CouchDB.request("GET", "/" + db.name + "/_changes");
+ resp = JSON.parse(req.responseText);
+ TEquals(7, resp.last_seq);
+ TEquals(7, resp.results.length);
+
+ req = CouchDB.request(
+ "GET", "/"+ db.name + "/_changes?limit=1&filter=testdocs/testdocsonly");
+ resp = JSON.parse(req.responseText);
+ TEquals(3, resp.last_seq);
+ TEquals(1, resp.results.length);
+ TEquals("0", resp.results[0].id);
+
+ req = CouchDB.request(
+ "GET", "/" + db.name + "/_changes?limit=2&filter=testdocs/testdocsonly");
+ resp = JSON.parse(req.responseText);
+ TEquals(4, resp.last_seq);
+ TEquals(2, resp.results.length);
+ TEquals("0", resp.results[0].id);
+ TEquals("1", resp.results[1].id);
+
+ // cleanup
+ db.deleteDb();
+};
+
diff --git a/1.1.x/share/www/script/test/compact.js b/1.1.x/share/www/script/test/compact.js
new file mode 100644
index 00000000..805a3b08
--- /dev/null
+++ b/1.1.x/share/www/script/test/compact.js
@@ -0,0 +1,59 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.compact = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+ var docs = makeDocs(0, 20);
+ db.bulkSave(docs);
+
+ var binAttDoc = {
+ _id: "bin_doc",
+ _attachments:{
+ "foo.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ };
+
+ T(db.save(binAttDoc).ok);
+
+ var originalsize = db.info().disk_size;
+ var start_time = db.info().instance_start_time;
+
+ for(var i in docs) {
+ db.deleteDoc(docs[i]);
+ }
+ T(db.ensureFullCommit().ok);
+ var deletesize = db.info().disk_size;
+ T(deletesize > originalsize);
+ T(db.setDbProperty("_revs_limit", 666).ok);
+
+ T(db.compact().ok);
+ T(db.last_req.status == 202);
+ // compaction isn't instantaneous, loop until done
+ while (db.info().compact_running) {};
+ T(db.info().instance_start_time == start_time);
+ T(db.getDbProperty("_revs_limit") === 666);
+
+ T(db.ensureFullCommit().ok);
+ restartServer();
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt");
+ T(xhr.responseText == "This is a base64 encoded text");
+ T(xhr.getResponseHeader("Content-Type") == "text/plain");
+ T(db.info().doc_count == 1);
+ T(db.info().disk_size < deletesize);
+
+};
diff --git a/1.1.x/share/www/script/test/config.js b/1.1.x/share/www/script/test/config.js
new file mode 100644
index 00000000..e83ecfd9
--- /dev/null
+++ b/1.1.x/share/www/script/test/config.js
@@ -0,0 +1,163 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.config = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // test that /_config returns all the settings
+ var xhr = CouchDB.request("GET", "/_config");
+ var config = JSON.parse(xhr.responseText);
+
+ /*
+ if we run on standard ports, we can't extract
+ the number from the URL. Instead we try to guess
+ from the protocol what port we are running on.
+ If we can't guess, we don't test for the port.
+ Overengineering FTW.
+ */
+ var server_port = CouchDB.host.split(':');
+ if(server_port.length == 1 && CouchDB.inBrowser) {
+ if(CouchDB.protocol == "http://") {
+ port = 80;
+ }
+ if(CouchDB.protocol == "https://") {
+ port = 443;
+ }
+ } else {
+ port = server_port.pop();
+ }
+
+ if(CouchDB.protocol == "http://") {
+ config_port = config.httpd.port;
+ }
+ if(CouchDB.protocol == "https://") {
+ config_port = config.ssl.port;
+ }
+
+ if(port) {
+ TEquals(config_port, port, "ports should match");
+ }
+
+ T(config.couchdb.database_dir);
+ T(config.daemons.httpd);
+ T(config.httpd_global_handlers._config);
+ T(config.log.level);
+ T(config.query_servers.javascript);
+
+ // test that settings can be altered, and that an undefined whitelist allows any change
+ TEquals(undefined, config.httpd.config_whitelist, "Default whitelist is empty");
+ xhr = CouchDB.request("PUT", "/_config/test/foo",{
+ body : JSON.stringify("bar"),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ T(xhr.status == 200);
+ xhr = CouchDB.request("GET", "/_config/test");
+ config = JSON.parse(xhr.responseText);
+ T(config.foo == "bar");
+
+ // you can get a single key
+ xhr = CouchDB.request("GET", "/_config/test/foo");
+ config = JSON.parse(xhr.responseText);
+ T(config == "bar");
+
+ // Non-term whitelist values allow further modification of the whitelist.
+ xhr = CouchDB.request("PUT", "/_config/httpd/config_whitelist",{
+ body : JSON.stringify("!This is an invalid Erlang term!"),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status, "Set config whitelist to an invalid Erlang term");
+ xhr = CouchDB.request("DELETE", "/_config/httpd/config_whitelist",{
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status, "Modify whitelist despite it being invalid syntax");
+
+ // Non-list whitelist values allow further modification of the whitelist.
+ xhr = CouchDB.request("PUT", "/_config/httpd/config_whitelist",{
+ body : JSON.stringify("{[yes, a_valid_erlang_term, but_unfortunately, not_a_list]}"),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status, "Set config whitelist to an non-list term");
+ xhr = CouchDB.request("DELETE", "/_config/httpd/config_whitelist",{
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status, "Modify whitelist despite it not being a list");
+
+ // Keys not in the whitelist may not be modified.
+ xhr = CouchDB.request("PUT", "/_config/httpd/config_whitelist",{
+ body : JSON.stringify("[{httpd,config_whitelist}, {test,foo}]"),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status, "Set config whitelist to something valid");
+
+ ["PUT", "DELETE"].forEach(function(method) {
+ ["test/not_foo", "not_test/foo", "neither_test/nor_foo"].forEach(function(pair) {
+ var path = "/_config/" + pair;
+ var test_name = method + " to " + path + " disallowed: not whitelisted";
+
+ xhr = CouchDB.request(method, path, {
+ body : JSON.stringify("Bummer! " + test_name),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(400, xhr.status, test_name);
+ });
+ });
+
+ // Keys in the whitelist may be modified.
+ ["PUT", "DELETE"].forEach(function(method) {
+ xhr = CouchDB.request(method, "/_config/test/foo",{
+ body : JSON.stringify(method + " to whitelisted config variable"),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status, "Keys in the whitelist may be modified");
+ });
+
+ // Non-2-tuples in the whitelist are ignored
+ xhr = CouchDB.request("PUT", "/_config/httpd/config_whitelist",{
+ body : JSON.stringify("[{httpd,config_whitelist}, these, {are}, {nOt, 2, tuples}," +
+ " [so], [they, will], [all, become, noops], {test,foo}]"),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status, "Set config whitelist with some inert values");
+ ["PUT", "DELETE"].forEach(function(method) {
+ xhr = CouchDB.request(method, "/_config/test/foo",{
+ body : JSON.stringify(method + " to whitelisted config variable"),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status, "Update whitelisted variable despite invalid entries");
+ });
+
+ // Atoms, binaries, and strings suffice as whitelist sections and keys.
+ ["{test,foo}", '{"test","foo"}', '{<<"test">>,<<"foo">>}'].forEach(function(pair) {
+ xhr = CouchDB.request("PUT", "/_config/httpd/config_whitelist",{
+ body : JSON.stringify("[{httpd,config_whitelist}, " + pair + "]"),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status, "Set config whitelist to include " + pair);
+
+ var pair_format = {"t":"tuple", '"':"string", "<":"binary"}[pair[1]];
+ ["PUT", "DELETE"].forEach(function(method) {
+ xhr = CouchDB.request(method, "/_config/test/foo",{
+ body : JSON.stringify(method + " with " + pair_format),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status, "Whitelist works with " + pair_format);
+ });
+ });
+
+ xhr = CouchDB.request("DELETE", "/_config/httpd/config_whitelist",{
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status, "Reset config whitelist to undefined");
+};
diff --git a/1.1.x/share/www/script/test/conflicts.js b/1.1.x/share/www/script/test/conflicts.js
new file mode 100644
index 00000000..7258bc31
--- /dev/null
+++ b/1.1.x/share/www/script/test/conflicts.js
@@ -0,0 +1,64 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do some edit conflict detection tests
+couchTests.conflicts = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // create a doc and save
+ var doc = {_id:"foo",a:1,b:1};
+ T(db.save(doc).ok);
+
+ // reopen
+ var doc2 = db.open(doc._id);
+
+ // ensure the revisions are the same
+ T(doc._id == doc2._id && doc._rev == doc2._rev);
+
+ // edit the documents.
+ doc.a = 2;
+ doc2.a = 3;
+
+ // save one document
+ T(db.save(doc).ok);
+
+ // save the other document
+ try {
+ db.save(doc2); // this should generate a conflict exception
+ T("no save conflict 1" && false); // we shouldn't hit here
+ } catch (e) {
+ T(e.error == "conflict");
+ }
+
+ var changes = db.changes();
+
+ T(changes.results.length == 1);
+
+ // Now clear out the _rev member and save. This indicates this document is
+ // new, not based on an existing revision.
+ doc2._rev = undefined;
+ try {
+ db.save(doc2); // this should generate a conflict exception
+ T("no save conflict 2" && false); // we shouldn't hit here
+ } catch (e) {
+ T(e.error == "conflict");
+ }
+
+ // Now delete the document from the database
+ T(db.deleteDoc(doc).ok);
+
+ T(db.save(doc2).ok); // we can save a new document over a deletion without
+ // knowing the deletion rev.
+};
diff --git a/1.1.x/share/www/script/test/content_negotiation.js b/1.1.x/share/www/script/test/content_negotiation.js
new file mode 100644
index 00000000..c79df948
--- /dev/null
+++ b/1.1.x/share/www/script/test/content_negotiation.js
@@ -0,0 +1,39 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.content_negotiation = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+ var xhr;
+
+ // with no accept header
+ var req = CouchDB.newXhr();
+ req.open("GET", "/test_suite_db/", false);
+ req.send("");
+ TEquals("text/plain;charset=utf-8", req.getResponseHeader("Content-Type"));
+
+ // make sure JSON responses end in a newline
+ var text = req.responseText;
+ TEquals("\n", text[text.length-1]);
+
+ xhr = CouchDB.request("GET", "/test_suite_db/", {
+ headers: {"Accept": "text/html;text/plain;*/*"}
+ });
+ TEquals("text/plain;charset=utf-8", xhr.getResponseHeader("Content-Type"));
+
+ xhr = CouchDB.request("GET", "/test_suite_db/", {
+ headers: {"Accept": "application/json"}
+ });
+ TEquals("application/json", xhr.getResponseHeader("Content-Type"));
+};
diff --git a/1.1.x/share/www/script/test/cookie_auth.js b/1.1.x/share/www/script/test/cookie_auth.js
new file mode 100644
index 00000000..8ad993cc
--- /dev/null
+++ b/1.1.x/share/www/script/test/cookie_auth.js
@@ -0,0 +1,256 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.cookie_auth = function(debug) {
+ // This tests cookie-based authentication.
+
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // Simple secret key generator
+ function generateSecret(length) {
+ var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ var secret = '';
+ for (var i=0; i<length; i++) {
+ secret += tab.charAt(Math.floor(Math.random() * 64));
+ }
+ return secret;
+ }
+
+ // this function will be called on the modified server
+ var testFun = function () {
+ try {
+ // try using an invalid cookie
+ var usersDb = new CouchDB("test_suite_users", {"X-Couch-Full-Commit":"false"});
+ usersDb.deleteDb();
+ usersDb.createDb();
+
+ // test that the users db is born with the auth ddoc
+ var ddoc = usersDb.open("_design/_auth");
+ T(ddoc.validate_doc_update);
+
+ // TODO test that changing the config so an existing db becomes the users db installs the ddoc also
+
+ var password = "3.141592653589";
+
+ // Create a user
+ var jasonUserDoc = CouchDB.prepareUserDoc({
+ name: "Jason Davies",
+ roles: ["dev"]
+ }, password);
+ T(usersDb.save(jasonUserDoc).ok);
+
+ var checkDoc = usersDb.open(jasonUserDoc._id);
+ T(checkDoc.name == "Jason Davies");
+
+ var jchrisUserDoc = CouchDB.prepareUserDoc({
+ name: "jchris@apache.org"
+ }, "funnybone");
+ T(usersDb.save(jchrisUserDoc).ok);
+
+ // make sure we cant create duplicate users
+ var duplicateJchrisDoc = CouchDB.prepareUserDoc({
+ name: "jchris@apache.org"
+ }, "eh, Boo-Boo?");
+
+ try {
+ usersDb.save(duplicateJchrisDoc);
+ T(false && "Can't create duplicate user names. Should have thrown an error.");
+ } catch (e) {
+ T(e.error == "conflict");
+ T(usersDb.last_req.status == 409);
+ }
+
+ // we can't create _names
+ var underscoreUserDoc = CouchDB.prepareUserDoc({
+ name: "_why"
+ }, "copperfield");
+
+ try {
+ usersDb.save(underscoreUserDoc);
+ T(false && "Can't create underscore user names. Should have thrown an error.");
+ } catch (e) {
+ T(e.error == "forbidden");
+ T(usersDb.last_req.status == 403);
+ }
+
+ // we can't create docs with malformed ids
+ var badIdDoc = CouchDB.prepareUserDoc({
+ name: "foo"
+ }, "bar");
+
+ badIdDoc._id = "org.apache.couchdb:w00x";
+
+ try {
+ usersDb.save(badIdDoc);
+ T(false && "Can't create malformed docids. Should have thrown an error.");
+ } catch (e) {
+ T(e.error == "forbidden");
+ T(usersDb.last_req.status == 403);
+ }
+
+ // login works
+ T(CouchDB.login('Jason Davies', password).ok);
+ T(CouchDB.session().userCtx.name == 'Jason Davies');
+
+ // JSON login works
+ var xhr = CouchDB.request("POST", "/_session", {
+ headers: {"Content-Type": "application/json"},
+ body: JSON.stringify({
+ name: 'Jason Davies',
+ password: password
+ })
+ });
+
+ T(JSON.parse(xhr.responseText).ok);
+ T(CouchDB.session().userCtx.name == 'Jason Davies');
+
+ // update one's own credentials document
+ jasonUserDoc.foo=2;
+ T(usersDb.save(jasonUserDoc).ok);
+ T(CouchDB.session().userCtx.roles.indexOf("_admin") == -1);
+ // can't delete another users doc unless you are admin
+ try {
+ usersDb.deleteDoc(jchrisUserDoc);
+ T(false && "Can't delete other users docs. Should have thrown an error.");
+ } catch (e) {
+ T(e.error == "forbidden");
+ T(usersDb.last_req.status == 403);
+ }
+
+ // TODO should login() throw an exception here?
+ T(!CouchDB.login('Jason Davies', "2.71828").ok);
+ T(!CouchDB.login('Robert Allen Zimmerman', 'd00d').ok);
+
+ // a failed login attempt should log you out
+ T(CouchDB.session().userCtx.name != 'Jason Davies');
+
+ // test redirect
+ xhr = CouchDB.request("POST", "/_session?next=/", {
+ headers: {"Content-Type": "application/x-www-form-urlencoded"},
+ body: "name=Jason%20Davies&password="+encodeURIComponent(password)
+ });
+ // should this be a redirect code instead of 200?
+ // The cURL adapter is returning the expected 302 here.
+ // I imagine this has to do with whether the client is willing
+ // to follow the redirect, ie, the browser follows and does a
+ // GET on the returned Location
+ if (xhr.status == 200) {
+ T(/Welcome/.test(xhr.responseText));
+ } else {
+ T(xhr.status == 302);
+ T(xhr.getResponseHeader("Location"));
+ }
+
+ // test users db validations
+ //
+ // test that you can't update docs unless you are logged in as the user (or are admin)
+ T(CouchDB.login("jchris@apache.org", "funnybone").ok);
+ T(CouchDB.session().userCtx.name == "jchris@apache.org");
+ T(CouchDB.session().userCtx.roles.length == 0);
+
+ jasonUserDoc.foo=3;
+
+ try {
+ usersDb.save(jasonUserDoc);
+ T(false && "Can't update someone else's user doc. Should have thrown an error.");
+ } catch (e) {
+ T(e.error == "forbidden");
+ T(usersDb.last_req.status == 403);
+ }
+
+ // test that you can't edit roles unless you are admin
+ jchrisUserDoc.roles = ["foo"];
+
+ try {
+ usersDb.save(jchrisUserDoc);
+ T(false && "Can't set roles unless you are admin. Should have thrown an error.");
+ } catch (e) {
+ T(e.error == "forbidden");
+ T(usersDb.last_req.status == 403);
+ }
+
+ T(CouchDB.logout().ok);
+ T(CouchDB.session().userCtx.roles[0] == "_admin");
+
+ jchrisUserDoc.foo = ["foo"];
+ T(usersDb.save(jchrisUserDoc).ok);
+
+ // test that you can't save system (underscore) roles even if you are admin
+ jchrisUserDoc.roles = ["_bar"];
+
+ try {
+ usersDb.save(jchrisUserDoc);
+ T(false && "Can't add system roles to user's db. Should have thrown an error.");
+ } catch (e) {
+ T(e.error == "forbidden");
+ T(usersDb.last_req.status == 403);
+ }
+
+ // make sure the foo role has been applied
+ T(CouchDB.login("jchris@apache.org", "funnybone").ok);
+ T(CouchDB.session().userCtx.name == "jchris@apache.org");
+ T(CouchDB.session().userCtx.roles.indexOf("_admin") == -1);
+ T(CouchDB.session().userCtx.roles.indexOf("foo") != -1);
+
+ // now let's make jchris a server admin
+ T(CouchDB.logout().ok);
+ T(CouchDB.session().userCtx.roles[0] == "_admin");
+ T(CouchDB.session().userCtx.name == null);
+
+ // set the -hashed- password so the salt matches
+ // todo ask on the ML about this
+ run_on_modified_server([{section: "admins",
+ key: "jchris@apache.org", value: "funnybone"}], function() {
+ T(CouchDB.login("jchris@apache.org", "funnybone").ok);
+ T(CouchDB.session().userCtx.name == "jchris@apache.org");
+ T(CouchDB.session().userCtx.roles.indexOf("_admin") != -1);
+ // test that jchris still has the foo role
+ T(CouchDB.session().userCtx.roles.indexOf("foo") != -1);
+
+ // should work even when user doc has no password
+ jchrisUserDoc = usersDb.open(jchrisUserDoc._id);
+ delete jchrisUserDoc.salt;
+ delete jchrisUserDoc.password_sha;
+ T(usersDb.save(jchrisUserDoc).ok);
+ T(CouchDB.logout().ok);
+ T(CouchDB.login("jchris@apache.org", "funnybone").ok);
+ var s = CouchDB.session();
+ T(s.userCtx.name == "jchris@apache.org");
+ T(s.userCtx.roles.indexOf("_admin") != -1);
+ // test session info
+ T(s.info.authenticated == "cookie");
+ T(s.info.authentication_db == "test_suite_users");
+ // test that jchris still has the foo role
+ T(CouchDB.session().userCtx.roles.indexOf("foo") != -1);
+ });
+
+ } finally {
+ // Make sure we erase any auth cookies so we don't affect other tests
+ T(CouchDB.logout().ok);
+ }
+ };
+
+ run_on_modified_server(
+ [{section: "httpd",
+ key: "authentication_handlers",
+ value: "{couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}"},
+ {section: "couch_httpd_auth",
+ key: "secret", value: generateSecret(64)},
+ {section: "couch_httpd_auth",
+ key: "authentication_db", value: "test_suite_users"}],
+ testFun
+ );
+
+};
diff --git a/1.1.x/share/www/script/test/copy_doc.js b/1.1.x/share/www/script/test/copy_doc.js
new file mode 100644
index 00000000..99e3c7fe
--- /dev/null
+++ b/1.1.x/share/www/script/test/copy_doc.js
@@ -0,0 +1,51 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.copy_doc = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // copy a doc
+ T(db.save({_id:"doc_to_be_copied",v:1}).ok);
+ var xhr = CouchDB.request("COPY", "/test_suite_db/doc_to_be_copied", {
+ headers: {"Destination":"doc_that_was_copied"}
+ });
+
+ T(xhr.status == 201);
+ T(db.open("doc_that_was_copied").v == 1);
+
+ // COPY with existing target
+ T(db.save({_id:"doc_to_be_copied2",v:1}).ok);
+ var doc = db.save({_id:"doc_to_be_overwritten",v:2});
+ T(doc.ok);
+
+ // error condition
+ var xhr = CouchDB.request("COPY", "/test_suite_db/doc_to_be_copied2", {
+ headers: {"Destination":"doc_to_be_overwritten"}
+ });
+ T(xhr.status == 409); // conflict
+
+ var xhr = CouchDB.request("COPY", "/test_suite_db/doc_to_be_copied2");
+ T(xhr.status == 400); // bad request (no Destination header)
+
+ var rev = db.open("doc_to_be_overwritten")._rev;
+ var xhr = CouchDB.request("COPY", "/test_suite_db/doc_to_be_copied2", {
+ headers: {"Destination":"doc_to_be_overwritten?rev=" + rev}
+ });
+ T(xhr.status == 201);
+
+ var over = db.open("doc_to_be_overwritten");
+ T(rev != over._rev);
+ T(over.v == 1);
+};
diff --git a/1.1.x/share/www/script/test/delayed_commits.js b/1.1.x/share/www/script/test/delayed_commits.js
new file mode 100644
index 00000000..dbb072fb
--- /dev/null
+++ b/1.1.x/share/www/script/test/delayed_commits.js
@@ -0,0 +1,154 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.delayed_commits = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ run_on_modified_server(
+ [{section: "couchdb",
+ key: "delayed_commits",
+ value: "true"}],
+
+ function () {
+ // By default, couchdb doesn't fully commit documents to disk right away,
+ // it waits about a second to batch the full commit flush along with any
+ // other updates. If it crashes or is restarted you may lose the most
+ // recent commits.
+
+ T(db.save({_id:"1",a:2,b:4}).ok);
+ T(db.open("1") != null);
+
+ restartServer();
+
+ T(db.open("1") == null); // lost the update.
+ // note if we waited > 1 sec before the restart, the doc would likely
+ // commit.
+
+
+ // Retry the same thing but with full commits on.
+
+ var db2 = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"true"});
+
+ T(db2.save({_id:"1",a:2,b:4}).ok);
+ T(db2.open("1") != null);
+
+ restartServer();
+
+ T(db2.open("1") != null);
+
+ // You can update but without committing immediately, and then ensure
+ // everything is commited in the last step.
+
+ T(db.save({_id:"2",a:2,b:4}).ok);
+ T(db.open("2") != null);
+ T(db.ensureFullCommit().ok);
+ restartServer();
+
+ T(db.open("2") != null);
+
+ // However, it's possible even when flushed, that the server crashed between
+ // the update and the commit, and you don't want to check to make sure
+ // every doc you updated actually made it to disk. So record the instance
+ // start time of the database before the updates and then check it again
+ // after the flush (the instance start time is returned by the flush
+ // operation). if they are the same, we know everything was updated
+ // safely.
+
+ // First try it with a crash.
+
+ var instanceStartTime = db.info().instance_start_time;
+
+ T(db.save({_id:"3",a:2,b:4}).ok);
+ T(db.open("3") != null);
+
+ restartServer();
+
+ var commitResult = db.ensureFullCommit();
+ T(commitResult.ok && commitResult.instance_start_time != instanceStartTime);
+ // start times don't match, meaning the server lost our change
+
+ T(db.open("3") == null); // yup lost it
+
+ // retry with no server restart
+
+ var instanceStartTime = db.info().instance_start_time;
+
+ T(db.save({_id:"4",a:2,b:4}).ok);
+ T(db.open("4") != null);
+
+ var commitResult = db.ensureFullCommit();
+ T(commitResult.ok && commitResult.instance_start_time == instanceStartTime);
+ // Successful commit, start times match!
+
+ restartServer();
+
+ T(db.open("4") != null);
+ });
+
+ // Now test that when we exceed the max_dbs_open, pending commits are safely
+ // written.
+ T(db.save({_id:"5",foo:"bar"}).ok);
+ var max = 2;
+ run_on_modified_server(
+ [{section: "couchdb",
+ key: "delayed_commits",
+ value: "true"},
+ {section: "couchdb",
+ key: "max_dbs_open",
+ value: max.toString()}],
+
+ function () {
+ for(var i=0; i<max; i++) {
+ var dbi = new CouchDB("test_suite_db" + i);
+ dbi.deleteDb();
+ dbi.createDb();
+ }
+ T(db.open("5").foo=="bar");
+ for(var i=0; i<max+1; i++) {
+ var dbi = new CouchDB("test_suite_db" + i);
+ dbi.deleteDb();
+ }
+ });
+
+
+ // Test that a conflict can't cause delayed commits to fail
+ run_on_modified_server(
+ [{section: "couchdb",
+ key: "delayed_commits",
+ value: "true"}],
+
+ function() {
+ //First save a document and commit it
+ T(db.save({_id:"6",a:2,b:4}).ok);
+ T(db.ensureFullCommit().ok);
+ //Generate a conflict
+ try {
+ db.save({_id:"6",a:2,b:4});
+ } catch( e) {
+ T(e.error == "conflict");
+ }
+ //Wait for the delayed commit interval to pass
+ var time = new Date();
+ while(new Date() - time < 2000);
+ //Save a new doc
+ T(db.save({_id:"7",a:2,b:4}).ok);
+ //Wait for the delayed commit interval to pass
+ var time = new Date();
+ while(new Date() - time < 2000);
+ //Crash the server and make sure the last doc was written
+ restartServer();
+ T(db.open("7") != null);
+ });
+};
diff --git a/1.1.x/share/www/script/test/design_docs.js b/1.1.x/share/www/script/test/design_docs.js
new file mode 100644
index 00000000..702f0441
--- /dev/null
+++ b/1.1.x/share/www/script/test/design_docs.js
@@ -0,0 +1,427 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.design_docs = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ var db2 = new CouchDB("test_suite_db_a", {"X-Couch-Full-Commit":"false"});
+
+ if (debug) debugger;
+
+ db.deleteDb();
+ db.createDb();
+ db2.deleteDb();
+ db2.createDb();
+
+ var server_config = [
+ {
+ section: "query_server_config",
+ key: "reduce_limit",
+ value: "false"
+ }
+ ];
+
+ var testFun = function() {
+ var numDocs = 500;
+
+ function makebigstring(power) {
+ var str = "a";
+ while(power-- > 0) {
+ str = str + str;
+ }
+ return str;
+ }
+
+ var designDoc = {
+ _id: "_design/test",
+ language: "javascript",
+ whatever : {
+ stringzone : "exports.string = 'plankton';",
+ commonjs : {
+ whynot : "exports.test = require('../stringzone'); " +
+ "exports.foo = require('whatever/stringzone');",
+ upper : "exports.testing = require('./whynot').test.string.toUpperCase()+" +
+ "module.id+require('./whynot').foo.string",
+ circular_one: "require('./circular_two'); exports.name = 'One';",
+ circular_two: "require('./circular_one'); exports.name = 'Two';"
+ },
+ // paths relative to parent
+ idtest1: {
+ a: {
+ b: {d: "module.exports = require('../c/e').id;"},
+ c: {e: "exports.id = module.id;"}
+ }
+ },
+ // multiple paths relative to parent
+ idtest2: {
+ a: {
+ b: {d: "module.exports = require('../../a/c/e').id;"},
+ c: {e: "exports.id = module.id;"}
+ }
+ },
+ // paths relative to module
+ idtest3: {
+ a: {
+ b: "module.exports = require('./c/d').id;",
+ c: {
+ d: "module.exports = require('./e');",
+ e: "exports.id = module.id;"
+ }
+ }
+ },
+ // paths relative to module and parent
+ idtest4: {
+ a: {
+ b: "module.exports = require('../a/./c/d').id;",
+ c: {
+ d: "module.exports = require('./e');",
+ e: "exports.id = module.id;"
+ }
+ }
+ },
+ // paths relative to root
+ idtest5: {
+ a: "module.exports = require('whatever/idtest5/b').id;",
+ b: "exports.id = module.id;"
+ }
+ },
+ views: {
+ all_docs_twice: {
+ map:
+ (function(doc) {
+ emit(doc.integer, null);
+ emit(doc.integer, null);
+ }).toString()
+ },
+ no_docs: {
+ map:
+ (function(doc) {
+ }).toString()
+ },
+ single_doc: {
+ map:
+ (function(doc) {
+ if (doc._id === "1") {
+ emit(1, null);
+ }
+ }).toString()
+ },
+ summate: {
+ map:
+ (function(doc) {
+ emit(doc.integer, doc.integer);
+ }).toString(),
+ reduce:
+ (function(keys, values) {
+ return sum(values);
+ }).toString()
+ },
+ summate2: {
+ map:
+ (function(doc) {
+ emit(doc.integer, doc.integer);
+ }).toString(),
+ reduce:
+ (function(keys, values) {
+ return sum(values);
+ }).toString()
+ },
+ huge_src_and_results: {
+ map:
+ (function(doc) {
+ if (doc._id === "1") {
+ emit(makebigstring(16), null);
+ }
+ }).toString(),
+ reduce:
+ (function(keys, values) {
+ return makebigstring(16);
+ }).toString()
+ },
+ lib : {
+ baz : "exports.baz = 'bam';",
+ foo : {
+ foo : "exports.foo = 'bar';",
+ boom : "exports.boom = 'ok';",
+ zoom : "exports.zoom = 'yeah';"
+ }
+ },
+ commonjs : {
+ map :
+ (function(doc) {
+ emit(null, require('views/lib/foo/boom').boom);
+ }).toString()
+ }
+ },
+ shows: {
+ simple:
+ (function() {
+ return 'ok';
+ }).toString(),
+ requirey:
+ (function() {
+ var lib = require('whatever/commonjs/upper');
+ return lib.testing;
+ }).toString(),
+ circular:
+ (function() {
+ var lib = require('whatever/commonjs/upper');
+ return JSON.stringify(this);
+ }).toString(),
+ circular_require:
+ (function() {
+ return require('whatever/commonjs/circular_one').name;
+ }).toString(),
+ idtest1: (function() {
+ return require('whatever/idtest1/a/b/d');
+ }).toString(),
+ idtest2: (function() {
+ return require('whatever/idtest2/a/b/d');
+ }).toString(),
+ idtest3: (function() {
+ return require('whatever/idtest3/a/b');
+ }).toString(),
+ idtest4: (function() {
+ return require('whatever/idtest4/a/b');
+ }).toString(),
+ idtest5: (function() {
+ return require('whatever/idtest5/a');
+ }).toString()
+ }
+ }; // designDoc
+
+ var xhr = CouchDB.request(
+ "PUT", "/test_suite_db_a/_design/test", {body: JSON.stringify(designDoc)}
+ );
+ var resp = JSON.parse(xhr.responseText);
+
+ TEquals(resp.rev, db.save(designDoc).rev);
+
+ // test that editing a show fun on the ddoc results in a change in output
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_show/simple");
+ T(xhr.status == 200);
+ TEquals(xhr.responseText, "ok");
+
+ designDoc.shows.simple = (function() {
+ return 'ko';
+ }).toString();
+ T(db.save(designDoc).ok);
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_show/simple");
+ T(xhr.status == 200);
+ TEquals(xhr.responseText, "ko");
+
+ xhr = CouchDB.request(
+ "GET", "/test_suite_db_a/_design/test/_show/simple?cache=buster"
+ );
+ T(xhr.status == 200);
+ TEquals("ok", xhr.responseText, 'query server used wrong ddoc');
+
+ // test commonjs require
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_show/requirey");
+ T(xhr.status == 200);
+ TEquals("PLANKTONwhatever/commonjs/upperplankton", xhr.responseText);
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_show/circular");
+ T(xhr.status == 200);
+ TEquals("javascript", JSON.parse(xhr.responseText).language);
+
+ // test circular commonjs dependencies
+ xhr = CouchDB.request(
+ "GET",
+ "/test_suite_db/_design/test/_show/circular_require"
+ );
+ TEquals(200, xhr.status);
+ TEquals("One", xhr.responseText);
+
+ // Test that changes to the design doc properly invalidate cached modules:
+
+ // update the designDoc and replace
+ designDoc.whatever.commonjs.circular_one = "exports.name = 'Updated';"
+ T(db.save(designDoc).ok);
+
+ // request circular_require show function again and check the response has
+ // changed
+ xhr = CouchDB.request(
+ "GET",
+ "/test_suite_db/_design/test/_show/circular_require"
+ );
+ TEquals(200, xhr.status);
+ TEquals("Updated", xhr.responseText);
+
+
+ // test module id values are as expected:
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_show/idtest1");
+ TEquals(200, xhr.status);
+ TEquals("whatever/idtest1/a/c/e", xhr.responseText);
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_show/idtest2");
+ TEquals(200, xhr.status);
+ TEquals("whatever/idtest2/a/c/e", xhr.responseText);
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_show/idtest3");
+ TEquals(200, xhr.status);
+ TEquals("whatever/idtest3/a/c/e", xhr.responseText);
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_show/idtest4");
+ TEquals(200, xhr.status);
+ TEquals("whatever/idtest4/a/c/e", xhr.responseText);
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_show/idtest5");
+ TEquals(200, xhr.status);
+ TEquals("whatever/idtest5/b", xhr.responseText);
+
+
+ var prev_view_sig = db.designInfo("_design/test").view_index.signature;
+ var prev_view_size = db.designInfo("_design/test").view_index.disk_size;
+
+ db.bulkSave(makeDocs(1, numDocs + 1));
+ T(db.ensureFullCommit().ok);
+
+ // test that we get correct design doc info back,
+ // and also that GET /db/_design/test/_info
+ // hasn't triggered an update of the views
+ db.view("test/summate", {stale: "ok"}); // make sure view group's open
+ for (var i = 0; i < 2; i++) {
+ var dinfo = db.designInfo("_design/test");
+ TEquals("test", dinfo.name);
+ var vinfo = dinfo.view_index;
+ TEquals(prev_view_size, vinfo.disk_size, "view group disk size didn't change");
+ TEquals(false, vinfo.compact_running);
+ TEquals(prev_view_sig, vinfo.signature, 'ddoc sig');
+ // wait some time (there were issues where an update
+ // of the views had been triggered in the background)
+ var start = new Date().getTime();
+ while (new Date().getTime() < start + 2000);
+ TEquals(0, db.view("test/all_docs_twice", {stale: "ok"}).total_rows, 'view info');
+ TEquals(0, db.view("test/single_doc", {stale: "ok"}).total_rows, 'view info');
+ TEquals(0, db.view("test/summate", {stale: "ok"}).rows.length, 'view info');
+ T(db.ensureFullCommit().ok);
+ restartServer();
+ };
+
+ db.bulkSave(makeDocs(numDocs + 1, numDocs * 2 + 1));
+ T(db.ensureFullCommit().ok);
+
+ // open view group
+ db.view("test/summate", {stale: "ok"});
+ // wait so the views can get initialized
+ var start = new Date().getTime();
+ while (new Date().getTime() < start + 2000);
+
+ // test that POST /db/_view_cleanup
+ // doesn't trigger an update of the views
+ var len1 = db.view("test/all_docs_twice", {stale: "ok"}).total_rows;
+ var len2 = db.view("test/single_doc", {stale: "ok"}).total_rows;
+ var len3 = db.view("test/summate", {stale: "ok"}).rows.length;
+ for (i = 0; i < 2; i++) {
+ T(db.viewCleanup().ok);
+ // wait some time (there were issues where an update
+ // of the views had been triggered in the background)
+ start = new Date().getTime();
+ while (new Date().getTime() < start + 2000);
+ TEquals(len1, db.view("test/all_docs_twice", {stale: "ok"}).total_rows, 'view cleanup');
+ TEquals(len2, db.view("test/single_doc", {stale: "ok"}).total_rows, 'view cleanup');
+ TEquals(len3, db.view("test/summate", {stale: "ok"}).rows.length, 'view cleanup');
+ T(db.ensureFullCommit().ok);
+ restartServer();
+ // we'll test whether the view group stays closed
+ // and the views stay uninitialized (they should!)
+ len1 = len2 = len3 = 0;
+ };
+
+ // test commonjs in map functions
+ resp = db.view("test/commonjs", {limit:1});
+ T(resp.rows[0].value == 'ok');
+
+ // test that the _all_docs view returns correctly with keys
+ var results = db.allDocs({startkey:"_design", endkey:"_design0"});
+ T(results.rows.length == 1);
+
+ for (i = 0; i < 2; i++) {
+ var rows = db.view("test/all_docs_twice").rows;
+ for (var j = 0; j < numDocs; j++) {
+ T(rows[2 * j].key == (j + 1));
+ T(rows[(2 * j) + 1].key == (j + 1));
+ };
+ T(db.view("test/no_docs").total_rows == 0);
+ T(db.view("test/single_doc").total_rows == 1);
+ T(db.ensureFullCommit().ok);
+ restartServer();
+ };
+
+ // test when language not specified, Javascript is implied
+ var designDoc2 = {
+ _id: "_design/test2",
+ // language: "javascript",
+ views: {
+ single_doc: {
+ map:
+ (function(doc) {
+ if (doc._id === "1") {
+ emit(1, null);
+ }
+ }).toString()
+ }
+ }
+ };
+
+ T(db.save(designDoc2).ok);
+ T(db.view("test2/single_doc").total_rows == 1);
+
+ var summate = function(N) {
+ return (N + 1) * (N / 2);
+ };
+ var result = db.view("test/summate");
+ T(result.rows[0].value == summate(numDocs * 2));
+
+ result = db.view("test/summate", {startkey: 4, endkey: 4});
+ T(result.rows[0].value == 4);
+
+ result = db.view("test/summate", {startkey: 4, endkey: 5});
+ T(result.rows[0].value == 9);
+
+ result = db.view("test/summate", {startkey: 4, endkey: 6});
+ T(result.rows[0].value == 15);
+
+ // test start_key and end_key aliases
+ result = db.view("test/summate", {start_key: 4, end_key: 6});
+ T(result.rows[0].value == 15);
+
+ // Verify that a shared index (view def is an exact copy of "summate")
+ // does not confuse the reduce stage
+ result = db.view("test/summate2", {startkey: 4, endkey: 6});
+ T(result.rows[0].value == 15);
+
+ for(i = 1; i < (numDocs / 2); i += 30) {
+ result = db.view("test/summate", {startkey: i, endkey: (numDocs - i)});
+ T(result.rows[0].value == summate(numDocs - i) - summate(i - 1));
+ }
+
+ T(db.deleteDoc(designDoc).ok);
+ T(db.open(designDoc._id) == null);
+ T(db.view("test/no_docs") == null);
+
+ T(db.ensureFullCommit().ok);
+ restartServer();
+ T(db.open(designDoc._id) == null);
+ T(db.view("test/no_docs") == null);
+
+ // trigger ddoc cleanup
+ T(db.viewCleanup().ok);
+ }; // enf of testFun
+
+ run_on_modified_server(server_config, testFun);
+
+ // cleanup
+ db.deleteDb();
+ db2.deleteDb();
+};
diff --git a/1.1.x/share/www/script/test/design_options.js b/1.1.x/share/www/script/test/design_options.js
new file mode 100644
index 00000000..05764e24
--- /dev/null
+++ b/1.1.x/share/www/script/test/design_options.js
@@ -0,0 +1,74 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.design_options = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ //// test the includes_design option
+ var map = "function (doc) {emit(null, doc._id);}";
+ var withseq = "function(doc) {emit(doc._local_seq, null)}"
+
+ // we need a design doc even to test temp views with it
+ var designDoc = {
+ _id:"_design/fu",
+ language: "javascript",
+ options: {
+ include_design: true,
+ local_seq: true
+ },
+ views: {
+ data: {"map": map},
+ with_seq : {"map" : withseq}
+ }
+ };
+ T(db.save(designDoc).ok);
+
+ // should work for temp views
+ var rows = db.query(map, null, {options:{include_design: true}}).rows;
+ T(rows.length == 1);
+ T(rows[0].value == "_design/fu");
+
+ rows = db.query(map).rows;
+ T(rows.length == 0);
+
+ // when true, should include design docs in views
+ rows = db.view("fu/data").rows;
+ T(rows.length == 1);
+ T(rows[0].value == "_design/fu");
+
+ // when false, should not
+ designDoc.options.include_design = false;
+ delete designDoc._rev;
+ designDoc._id = "_design/bingo";
+ T(db.save(designDoc).ok);
+ rows = db.view("bingo/data").rows;
+ T(rows.length == 0);
+
+ // should default to false
+ delete designDoc.options;
+ delete designDoc._rev;
+ designDoc._id = "_design/bango";
+ T(db.save(designDoc).ok);
+ rows = db.view("bango/data").rows;
+ T(rows.length == 0);
+
+ // should also have local_seq in the view
+ var resp = db.save({});
+ rows = db.view("fu/with_seq").rows;
+ T(rows[0].key == 1)
+ T(rows[1].key == 2)
+ var doc = db.open(resp.id);
+ db.deleteDoc(doc);
+};
diff --git a/1.1.x/share/www/script/test/design_paths.js b/1.1.x/share/www/script/test/design_paths.js
new file mode 100644
index 00000000..426a252c
--- /dev/null
+++ b/1.1.x/share/www/script/test/design_paths.js
@@ -0,0 +1,72 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.design_paths = function(debug) {
+ if (debug) debugger;
+ var dbNames = ["test_suite_db", "test_suite_db/with_slashes"];
+ for (var i=0; i < dbNames.length; i++) {
+ var db = new CouchDB(dbNames[i]);
+ var dbName = encodeURIComponent(dbNames[i]);
+ db.deleteDb();
+ db.createDb();
+
+ // create a ddoc w bulk_docs
+ db.bulkSave([{
+ _id : "_design/test",
+ views : {
+ "testing" : {
+ "map" : "function(){emit(1,1)}"
+ }
+ }
+ }]);
+
+ // ddoc is getable
+ var xhr = CouchDB.request("GET", "/"+dbName+"/_design/test");
+ var resp = JSON.parse(xhr.responseText);
+ T(resp._id == "_design/test");
+
+ // it's at 2 urls...
+ var xhr = CouchDB.request("GET", "/"+dbName+"/_design%2Ftest");
+ var resp = JSON.parse(xhr.responseText);
+ T(resp._id == "_design/test");
+
+ // ensure that views are addressable
+ resp = db.view("test/testing")
+ T(resp.total_rows == 0)
+
+ // create a ddoc by putting to url with raw slash
+ var xhr = CouchDB.request("PUT", "/"+dbName+"/_design/test2",{
+ body : JSON.stringify({
+ _id : "_design/test2",
+ views : {
+ "testing" : {
+ "map" : "function(){emit(1,1)}"
+ }
+ }
+ })
+ });
+
+ // ddoc is getable
+ var xhr = CouchDB.request("GET", "/"+dbName+"/_design/test2");
+ var resp = JSON.parse(xhr.responseText);
+ T(resp._id == "_design/test2");
+
+ // it's at 2 urls...
+ var xhr = CouchDB.request("GET", "/"+dbName+"/_design%2Ftest2");
+ var resp = JSON.parse(xhr.responseText);
+ T(resp._id == "_design/test2");
+
+ // ensure that views are addressable
+ resp = db.view("test2/testing");
+ T(resp.total_rows == 0);
+ };
+};
diff --git a/1.1.x/share/www/script/test/erlang_views.js b/1.1.x/share/www/script/test/erlang_views.js
new file mode 100644
index 00000000..7eddab40
--- /dev/null
+++ b/1.1.x/share/www/script/test/erlang_views.js
@@ -0,0 +1,133 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.erlang_views = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+
+
+ run_on_modified_server(
+ [{section: "native_query_servers",
+ key: "erlang",
+ value: "{couch_native_process, start_link, []}"}],
+ function() {
+ // Note we just do some basic 'smoke tests' here - the
+ // test/query_server_spec.rb tests have more comprehensive tests
+ var doc = {_id: "1", integer: 1, string: "str1", array: [1, 2, 3]};
+ T(db.save(doc).ok);
+
+ var mfun = 'fun({Doc}) -> ' +
+ ' K = couch_util:get_value(<<"integer">>, Doc, null), ' +
+ ' V = couch_util:get_value(<<"string">>, Doc, null), ' +
+ ' Emit(K, V) ' +
+ 'end.';
+
+ // emitting a key value that is undefined should result in that row not
+ // being included in the view results
+ var results = db.query(mfun, null, null, null, "erlang");
+ T(results.total_rows == 1);
+ T(results.rows[0].key == 1);
+ T(results.rows[0].value == "str1");
+
+ // check simple reduction - another doc with same key.
+ var doc = {_id: "2", integer: 1, string: "str2"};
+ T(db.save(doc).ok);
+ rfun = "fun(Keys, Values, ReReduce) -> length(Values) end.";
+ results = db.query(mfun, rfun, null, null, "erlang");
+ T(results.rows[0].value == 2);
+
+ // simple 'list' tests
+ var designDoc = {
+ _id:"_design/erlview",
+ language: "erlang",
+ shows: {
+ simple:
+ 'fun(Doc, {Req}) -> ' +
+ ' {Info} = couch_util:get_value(<<"info">>, Req, {[]}), ' +
+ ' Purged = couch_util:get_value(<<"purge_seq">>, Info, -1), ' +
+ ' Verb = couch_util:get_value(<<"method">>, Req, <<"not_get">>), ' +
+ ' R = list_to_binary(io_lib:format("~b - ~s", [Purged, Verb])), ' +
+ ' {[{<<"code">>, 200}, {<<"headers">>, {[]}}, {<<"body">>, R}]} ' +
+ 'end.'
+ },
+ lists: {
+ simple_list :
+ 'fun(Head, {Req}) -> ' +
+ ' Send(<<"head">>), ' +
+ ' Fun = fun({Row}, _) -> ' +
+ ' Val = couch_util:get_value(<<"value">>, Row, -1), ' +
+ ' Send(list_to_binary(integer_to_list(Val))), ' +
+ ' {ok, nil} ' +
+ ' end, ' +
+ ' {ok, _} = FoldRows(Fun, nil), ' +
+ ' <<"tail">> ' +
+ 'end. '
+ },
+ views: {
+ simple_view : {
+ map: mfun,
+ reduce: rfun
+ }
+ }
+ };
+ T(db.save(designDoc).ok);
+
+ var url = "/test_suite_db/_design/erlview/_show/simple/1";
+ var xhr = CouchDB.request("GET", url);
+ T(xhr.status == 200, "standard get should be 200");
+ T(xhr.responseText == "0 - GET");
+
+ var url = "/test_suite_db/_design/erlview/_list/simple_list/simple_view";
+ var xhr = CouchDB.request("GET", url);
+ T(xhr.status == 200, "standard get should be 200");
+ T(xhr.responseText == "head2tail");
+
+ // Larger dataset
+
+ db.deleteDb();
+ db.createDb();
+ var words = "foo bar abc def baz xxyz".split(/\s+/);
+
+ var docs = [];
+ for(var i = 0; i < 250; i++) {
+ var body = [];
+ for(var j = 0; j < 100; j++) {
+ body.push({
+ word: words[j%words.length],
+ count: j
+ });
+ }
+ docs.push({
+ "_id": "test-" + i,
+ "words": body
+ });
+ }
+ T(db.bulkSave(docs).length, 250, "Saved big doc set.");
+
+ var mfun = 'fun({Doc}) -> ' +
+ 'Words = couch_util:get_value(<<"words">>, Doc), ' +
+ 'lists:foreach(fun({Word}) -> ' +
+ 'WordString = couch_util:get_value(<<"word">>, Word), ' +
+ 'Count = couch_util:get_value(<<"count">>, Word), ' +
+ 'Emit(WordString , Count) ' +
+ 'end, Words) ' +
+ 'end.';
+
+ var rfun = 'fun(Keys, Values, RR) -> length(Values) end.';
+ var results = db.query(mfun, rfun, null, null, "erlang");
+ T(results.rows[0].key === null, "Returned a reduced value.");
+ T(results.rows[0].value > 0, "Reduce value exists.");
+ });
+};
diff --git a/1.1.x/share/www/script/test/etags_head.js b/1.1.x/share/www/script/test/etags_head.js
new file mode 100644
index 00000000..63e29994
--- /dev/null
+++ b/1.1.x/share/www/script/test/etags_head.js
@@ -0,0 +1,78 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.etags_head = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var xhr;
+
+ // create a new doc
+ xhr = CouchDB.request("PUT", "/test_suite_db/1", {
+ body: "{}"
+ });
+ T(xhr.status == 201);
+
+ // extract the ETag header values
+ var etag = xhr.getResponseHeader("etag");
+
+ // get the doc and verify the headers match
+ xhr = CouchDB.request("GET", "/test_suite_db/1");
+ T(etag == xhr.getResponseHeader("etag"));
+
+ // 'head' the doc and verify the headers match
+ xhr = CouchDB.request("HEAD", "/test_suite_db/1", {
+ headers: {"if-none-match": "s"}
+ });
+ T(etag == xhr.getResponseHeader("etag"));
+
+ // replace a doc
+ xhr = CouchDB.request("PUT", "/test_suite_db/1", {
+ body: "{}",
+ headers: {"if-match": etag}
+ });
+ T(xhr.status == 201);
+
+ // extract the new ETag value
+ var etagOld= etag;
+ etag = xhr.getResponseHeader("etag");
+
+ // fail to replace a doc
+ xhr = CouchDB.request("PUT", "/test_suite_db/1", {
+ body: "{}"
+ });
+ T(xhr.status == 409);
+
+ // verify get w/Etag
+ xhr = CouchDB.request("GET", "/test_suite_db/1", {
+ headers: {"if-none-match": etagOld}
+ });
+ T(xhr.status == 200);
+ xhr = CouchDB.request("GET", "/test_suite_db/1", {
+ headers: {"if-none-match": etag}
+ });
+ T(xhr.status == 304);
+
+ // fail to delete a doc
+ xhr = CouchDB.request("DELETE", "/test_suite_db/1", {
+ headers: {"if-match": etagOld}
+ });
+ T(xhr.status == 409);
+
+ //now do it for real
+ xhr = CouchDB.request("DELETE", "/test_suite_db/1", {
+ headers: {"if-match": etag}
+ });
+ T(xhr.status == 200);
+};
diff --git a/1.1.x/share/www/script/test/etags_views.js b/1.1.x/share/www/script/test/etags_views.js
new file mode 100644
index 00000000..f556d6ac
--- /dev/null
+++ b/1.1.x/share/www/script/test/etags_views.js
@@ -0,0 +1,212 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.etags_views = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"true"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var designDoc = {
+ _id: "_design/etags",
+ language: "javascript",
+ views : {
+ fooView: {
+ map: stringFun(function(doc) {
+ if (doc.foo) {
+ emit("bar", 1);
+ }
+ }),
+ },
+ basicView : {
+ map : stringFun(function(doc) {
+ if(doc.integer && doc.string) {
+ emit(doc.integer, doc.string);
+ }
+ })
+ },
+ withReduce : {
+ map : stringFun(function(doc) {
+ if(doc.integer && doc.string) {
+ emit(doc.integer, doc.string);
+ }
+ }),
+ reduce : stringFun(function(keys, values, rereduce) {
+ if (rereduce) {
+ return sum(values);
+ } else {
+ return values.length;
+ }
+ })
+ }
+ }
+ };
+ T(db.save(designDoc).ok);
+ db.bulkSave(makeDocs(0, 10));
+
+ var xhr;
+
+ // verify get w/Etag on map view
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/basicView");
+ T(xhr.status == 200);
+ var etag = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/basicView", {
+ headers: {"if-none-match": etag}
+ });
+ T(xhr.status == 304);
+
+ // verify ETag doesn't change when an update
+ // doesn't change the view group's index
+ T(db.save({"_id":"doc1", "foo":"bar"}).ok);
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/basicView");
+ var etag1 = xhr.getResponseHeader("etag");
+ T(etag1 == etag);
+
+ // Verify that purges affect etags
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/fooView");
+ var foo_etag = xhr.getResponseHeader("etag");
+ var doc1 = db.open("doc1");
+ xhr = CouchDB.request("POST", "/test_suite_db/_purge", {
+ body: JSON.stringify({"doc1":[doc1._rev]})
+ });
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/fooView");
+ var etag1 = xhr.getResponseHeader("etag");
+ T(etag1 != foo_etag);
+
+ // Test that _purge didn't affect the other view etags.
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/basicView");
+ var etag1 = xhr.getResponseHeader("etag");
+ T(etag1 == etag);
+
+ // verify different views in the same view group may have different ETags
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/fooView");
+ var etag1 = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/basicView");
+ var etag2 = xhr.getResponseHeader("etag");
+ T(etag1 != etag2);
+
+ // verify ETag changes when an update changes the view group's index.
+ db.bulkSave(makeDocs(10, 20));
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/basicView");
+ var etag1 = xhr.getResponseHeader("etag");
+ T(etag1 != etag);
+
+ // verify ETag is the same after a restart
+ restartServer();
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/basicView");
+ var etag2 = xhr.getResponseHeader("etag");
+ T(etag1 == etag2);
+
+ // reduce view
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/withReduce");
+ T(xhr.status == 200);
+ var etag = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/withReduce",{
+ headers: {"if-none-match": etag}
+ });
+ T(xhr.status == 304);
+
+ // verify ETag doesn't change when an update
+ // doesn't change the view group's index
+ T(db.save({"_id":"doc3", "foo":"bar"}).ok);
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/withReduce");
+ var etag1 = xhr.getResponseHeader("etag");
+ T(etag1 == etag);
+ // purge
+ var doc3 = db.open("doc3");
+ xhr = CouchDB.request("POST", "/test_suite_db/_purge", {
+ body: JSON.stringify({"doc3":[doc3._rev]})
+ });
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/withReduce");
+ var etag1 = xhr.getResponseHeader("etag");
+ T(etag1 == etag);
+
+ // verify different views in the same view group may have different ETags
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/fooView");
+ var etag1 = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/withReduce");
+ var etag2 = xhr.getResponseHeader("etag");
+ T(etag1 != etag2);
+
+ // verify ETag changes when an update changes the view group's index
+ db.bulkSave(makeDocs(20, 30));
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/withReduce");
+ var etag1 = xhr.getResponseHeader("etag");
+ T(etag1 != etag);
+
+ // verify ETag is the same after a restart
+ restartServer();
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/etags/_view/withReduce");
+ var etag2 = xhr.getResponseHeader("etag");
+ T(etag1 == etag2);
+
+ // confirm ETag changes with different POST bodies
+ xhr = CouchDB.request("POST", "/test_suite_db/_design/etags/_view/basicView",
+ {body: JSON.stringify({keys:[1]})}
+ );
+ var etag1 = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("POST", "/test_suite_db/_design/etags/_view/basicView",
+ {body: JSON.stringify({keys:[2]})}
+ );
+ var etag2 = xhr.getResponseHeader("etag");
+ T(etag1 != etag2, "POST to map view generates key-depdendent ETags");
+
+ xhr = CouchDB.request("POST",
+ "/test_suite_db/_design/etags/_view/withReduce?group=true",
+ {body: JSON.stringify({keys:[1]})}
+ );
+ etag1 = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("POST",
+ "/test_suite_db/_design/etags/_view/withReduce?group=true",
+ {body: JSON.stringify({keys:[2]})}
+ );
+ etag2 = xhr.getResponseHeader("etag");
+ T(etag1 != etag2, "POST to reduce view generates key-depdendent ETags");
+
+ // all docs
+ xhr = CouchDB.request("GET", "/test_suite_db/_all_docs");
+ T(xhr.status == 200);
+ var etag = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("GET", "/test_suite_db/_all_docs", {
+ headers: {"if-none-match": etag}
+ });
+ T(xhr.status == 304);
+
+ // _changes
+ xhr = CouchDB.request("GET", "/test_suite_db/_changes");
+ T(xhr.status == 200);
+ var etag = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("GET", "/test_suite_db/_changes", {
+ headers: {"if-none-match": etag}
+ });
+ T(xhr.status == 304);
+
+ // list etag
+ // in the list test for now
+
+ // A new database should have unique _all_docs etags.
+ db.deleteDb();
+ db.createDb();
+ db.save({a: 1});
+ xhr = CouchDB.request("GET", "/test_suite_db/_all_docs");
+ var etag = xhr.getResponseHeader("etag");
+ db.deleteDb();
+ db.createDb();
+ db.save({a: 2});
+ xhr = CouchDB.request("GET", "/test_suite_db/_all_docs");
+ var new_etag = xhr.getResponseHeader("etag");
+ T(etag != new_etag);
+ // but still be cacheable
+ xhr = CouchDB.request("GET", "/test_suite_db/_all_docs");
+ T(new_etag == xhr.getResponseHeader("etag"));
+
+};
diff --git a/1.1.x/share/www/script/test/form_submit.js b/1.1.x/share/www/script/test/form_submit.js
new file mode 100644
index 00000000..39833d1a
--- /dev/null
+++ b/1.1.x/share/www/script/test/form_submit.js
@@ -0,0 +1,26 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do some basic tests.
+couchTests.form_submit = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+
+ // PUT on existing DB should return 412 instead of 500
+ var json = "{}";
+ var xhr = CouchDB.request("POST", "/test_suite_db/baz", {body: json});
+ T(xhr.status == 415);
+ result = JSON.parse(xhr.responseText);
+ T(result.error, "bad_content_type");
+ T(result.reason, "Invalid Content-Type header for form upload");
+};
diff --git a/1.1.x/share/www/script/test/http.js b/1.1.x/share/www/script/test/http.js
new file mode 100644
index 00000000..5f46af52
--- /dev/null
+++ b/1.1.x/share/www/script/test/http.js
@@ -0,0 +1,54 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.http = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+
+ // bug COUCHDB-100: DELETE on non-existent DB returns 500 instead of 404
+ db.deleteDb();
+
+ db.createDb();
+
+ // PUT on existing DB should return 412 instead of 500
+ if (debug) debugger;
+
+ var xhr = CouchDB.request("PUT", "/test_suite_db/test", {body: "{}"});
+ var host = CouchDB.host;
+
+ TEquals(CouchDB.protocol + host + "/test_suite_db/test",
+ xhr.getResponseHeader("Location"),
+ "should include ip address");
+
+ xhr = CouchDB.request("PUT", "/test_suite_db/test2", {
+ body: "{}",
+ headers: {"X-Forwarded-Host": "mysite.com"}
+ });
+
+ TEquals(CouchDB.protocol + "mysite.com/test_suite_db/test2",
+ xhr.getResponseHeader("Location"),
+ "should include X-Forwarded-Host");
+
+ run_on_modified_server([{
+ section:"httpd",
+ key:"x_forwarded_host",
+ value:"X-Host"}],
+ function() {
+ xhr = CouchDB.request("PUT", "/test_suite_db/test3", {
+ body: "{}",
+ headers: {"X-Host": "mysite2.com"}
+ });
+ TEquals(CouchDB.protocol + "mysite2.com/test_suite_db/test3",
+ xhr.getResponseHeader("Location"),
+ "should include X-Host");
+ });
+}
diff --git a/1.1.x/share/www/script/test/invalid_docids.js b/1.1.x/share/www/script/test/invalid_docids.js
new file mode 100644
index 00000000..d0195b02
--- /dev/null
+++ b/1.1.x/share/www/script/test/invalid_docids.js
@@ -0,0 +1,77 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.invalid_docids = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // Test _local explicitly first.
+ T(db.save({"_id": "_local/foo"}).ok);
+ T(db.open("_local/foo")._id == "_local/foo");
+
+ var urls = [
+ "/test_suite_db/_local",
+ "/test_suite_db/_local/",
+ "/test_suite_db/_local%2F",
+ "/test_suite_db/_local/foo/bar",
+ ];
+
+ urls.forEach(function(u) {
+ var res = db.request("PUT", u, {"body": "{}"});
+ T(res.status == 400);
+ T(JSON.parse(res.responseText).error == "bad_request");
+ });
+
+ //Test non-string
+ try {
+ db.save({"_id": 1});
+ T(1 == 0, "doc id must be string");
+ } catch(e) {
+ T(db.last_req.status == 400);
+ T(e.error == "bad_request");
+ }
+
+ // Via PUT with _id not in body.
+ var res = res = db.request("PUT", "/test_suite_db/_other", {"body": "{}"});
+ T(res.status == 400);
+ T(JSON.parse(res.responseText).error == "bad_request");
+
+ // Accidental POST to form handling code.
+ res = db.request("POST", "/test_suite_db/_tmp_view", {"body": "{}"});
+ T(res.status == 400);
+ T(JSON.parse(res.responseText).error == "bad_request");
+
+ // Test invalid _prefix
+ try {
+ db.save({"_id": "_invalid"});
+ T(1 == 0, "doc id may not start with underscore");
+ } catch(e) {
+ T(db.last_req.status == 400);
+ T(e.error == "bad_request");
+ }
+
+ // Test _bulk_docs explicitly.
+ var docs = [{"_id": "_design/foo"}, {"_id": "_local/bar"}];
+ db.bulkSave(docs);
+ docs.forEach(function(d) {T(db.open(d._id)._id == d._id);});
+
+ docs = [{"_id": "_invalid"}];
+ try {
+ db.bulkSave(docs);
+ T(1 == 0, "doc id may not start with underscore, even in bulk docs");
+ } catch(e) {
+ T(db.last_req.status == 400);
+ T(e.error == "bad_request");
+ }
+};
diff --git a/1.1.x/share/www/script/test/jsonp.js b/1.1.x/share/www/script/test/jsonp.js
new file mode 100644
index 00000000..9aba7189
--- /dev/null
+++ b/1.1.x/share/www/script/test/jsonp.js
@@ -0,0 +1,82 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Verify callbacks ran
+var jsonp_flag = 0;
+
+// Callbacks
+function jsonp_no_chunk(doc) {
+ T(jsonp_flag == 0);
+ T(doc._id == "0");
+ jsonp_flag = 1;
+}
+
+function jsonp_chunk(doc) {
+ T(jsonp_flag == 0);
+ T(doc.total_rows == 1);
+ jsonp_flag = 1;
+}
+
+// Do some jsonp tests.
+couchTests.jsonp = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var doc = {_id:"0",a:0,b:0};
+ T(db.save(doc).ok);
+
+ // callback param is ignored unless jsonp is configured
+ var xhr = CouchDB.request("GET", "/test_suite_db/0?callback=jsonp_not_configured");
+ JSON.parse(xhr.responseText);
+
+ run_on_modified_server(
+ [{section: "httpd",
+ key: "allow_jsonp",
+ value: "true"}],
+ function() {
+
+ // Test unchunked callbacks.
+ var xhr = CouchDB.request("GET", "/test_suite_db/0?callback=jsonp_no_chunk");
+ T(xhr.status == 200);
+ jsonp_flag = 0;
+ eval(xhr.responseText);
+ T(jsonp_flag == 1);
+ xhr = CouchDB.request("GET", "/test_suite_db/0?callback=foo\"");
+ T(xhr.status == 400);
+
+ // Test chunked responses
+ var doc = {_id:"1",a:1,b:1};
+ T(db.save(doc).ok);
+
+ var designDoc = {
+ _id:"_design/test",
+ language: "javascript",
+ views: {
+ all_docs: {map: "function(doc) {if(doc.a) emit(null, doc.a);}"}
+ }
+ };
+ T(db.save(designDoc).ok);
+
+ var url = "/test_suite_db/_design/test/_view/all_docs?callback=jsonp_chunk";
+ xhr = CouchDB.request("GET", url);
+ T(xhr.status == 200);
+ jsonp_flag = 0;
+ eval(xhr.responseText);
+ T(jsonp_flag == 1);
+ xhr = CouchDB.request("GET", url + "\'");
+ T(xhr.status == 400);
+ });
+
+
+};
diff --git a/1.1.x/share/www/script/test/large_docs.js b/1.1.x/share/www/script/test/large_docs.js
new file mode 100644
index 00000000..b84648b7
--- /dev/null
+++ b/1.1.x/share/www/script/test/large_docs.js
@@ -0,0 +1,33 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.large_docs = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var longtext = "0123456789\n";
+
+ for (var i=0; i<10; i++) {
+ longtext = longtext + longtext
+ }
+ T(db.save({"longtest":longtext}).ok);
+ T(db.save({"longtest":longtext}).ok);
+ T(db.save({"longtest":longtext}).ok);
+ T(db.save({"longtest":longtext}).ok);
+
+ // query all documents, and return the doc.foo member as a key.
+ results = db.query(function(doc){
+ emit(null, doc.longtest);
+ });
+};
diff --git a/1.1.x/share/www/script/test/list_views.js b/1.1.x/share/www/script/test/list_views.js
new file mode 100644
index 00000000..2c1ac321
--- /dev/null
+++ b/1.1.x/share/www/script/test/list_views.js
@@ -0,0 +1,475 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.list_views = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var designDoc = {
+ _id:"_design/lists",
+ language: "javascript",
+ views : {
+ basicView : {
+ map : stringFun(function(doc) {
+ emit(doc.integer, doc.string);
+ })
+ },
+ withReduce : {
+ map : stringFun(function(doc) {
+ emit(doc.integer, doc.string);
+ }),
+ reduce : stringFun(function(keys, values, rereduce) {
+ if (rereduce) {
+ return sum(values);
+ } else {
+ return values.length;
+ }
+ })
+ }
+ },
+ lists: {
+ basicBasic : stringFun(function(head, req) {
+ send("head");
+ var row;
+ while(row = getRow()) {
+ log("row: "+toJSON(row));
+ send(row.key);
+ };
+ return "tail";
+ }),
+ basicJSON : stringFun(function(head, req) {
+ start({"headers":{"Content-Type" : "application/json"}});
+ send('{"head":'+toJSON(head)+', ');
+ send('"req":'+toJSON(req)+', ');
+ send('"rows":[');
+ var row, sep = '';
+ while (row = getRow()) {
+ send(sep + toJSON(row));
+ sep = ', ';
+ }
+ return "]}";
+ }),
+ simpleForm: stringFun(function(head, req) {
+ log("simpleForm");
+ send('<ul>');
+ var row, row_number = 0, prevKey, firstKey = null;
+ while (row = getRow()) {
+ row_number += 1;
+ if (!firstKey) firstKey = row.key;
+ prevKey = row.key;
+ send('\n<li>Key: '+row.key
+ +' Value: '+row.value
+ +' LineNo: '+row_number+'</li>');
+ }
+ return '</ul><p>FirstKey: '+ firstKey + ' LastKey: '+ prevKey+'</p>';
+ }),
+ acceptSwitch: stringFun(function(head, req) {
+ // respondWith takes care of setting the proper headers
+ provides("html", function() {
+ send("HTML <ul>");
+
+ var row, num = 0;
+ while (row = getRow()) {
+ num ++;
+ send('\n<li>Key: '
+ +row.key+' Value: '+row.value
+ +' LineNo: '+num+'</li>');
+ }
+
+ // tail
+ return '</ul>';
+ });
+
+ provides("xml", function() {
+ send('<feed xmlns="http://www.w3.org/2005/Atom">'
+ +'<title>Test XML Feed</title>');
+
+ while (row = getRow()) {
+ var entry = new XML('<entry/>');
+ entry.id = row.id;
+ entry.title = row.key;
+ entry.content = row.value;
+ send(entry);
+ }
+ return "</feed>";
+ });
+ }),
+ qsParams: stringFun(function(head, req) {
+ return toJSON(req.query) + "\n";
+ }),
+ stopIter: stringFun(function(req) {
+ send("head");
+ var row, row_number = 0;
+ while(row = getRow()) {
+ if(row_number > 2) break;
+ send(" " + row_number);
+ row_number += 1;
+ };
+ return " tail";
+ }),
+ stopIter2: stringFun(function(head, req) {
+ provides("html", function() {
+ send("head");
+ var row, row_number = 0;
+ while(row = getRow()) {
+ if(row_number > 2) break;
+ send(" " + row_number);
+ row_number += 1;
+ };
+ return " tail";
+ });
+ }),
+ tooManyGetRows : stringFun(function() {
+ send("head");
+ var row;
+ while(row = getRow()) {
+ send(row.key);
+ };
+ getRow();
+ getRow();
+ getRow();
+ row = getRow();
+ return "after row: "+toJSON(row);
+ }),
+ emptyList: stringFun(function() {
+ return " ";
+ }),
+ rowError : stringFun(function(head, req) {
+ send("head");
+ var row = getRow();
+ send(fooBarBam); // intentional error
+ return "tail";
+ }),
+ docReference : stringFun(function(head, req) {
+ send("head");
+ var row = getRow();
+ send(row.doc.integer);
+ return "tail";
+ }),
+ secObj: stringFun(function(head, req) {
+ return toJSON(req.secObj);
+ })
+ }
+ };
+ var viewOnlyDesignDoc = {
+ _id:"_design/views",
+ language: "javascript",
+ views : {
+ basicView : {
+ map : stringFun(function(doc) {
+ emit(-doc.integer, doc.string);
+ })
+ }
+ }
+ };
+ var erlListDoc = {
+ _id: "_design/erlang",
+ language: "erlang",
+ lists: {
+ simple:
+ 'fun(Head, {Req}) -> ' +
+ ' Send(<<"[">>), ' +
+ ' Fun = fun({Row}, Sep) -> ' +
+ ' Val = couch_util:get_value(<<"key">>, Row, 23), ' +
+ ' Send(list_to_binary(Sep ++ integer_to_list(Val))), ' +
+ ' {ok, ","} ' +
+ ' end, ' +
+ ' {ok, _} = FoldRows(Fun, ""), ' +
+ ' Send(<<"]">>) ' +
+ 'end.'
+ }
+ };
+
+ T(db.save(designDoc).ok);
+
+ var docs = makeDocs(0, 10);
+ db.bulkSave(docs);
+
+ var view = db.view('lists/basicView');
+ T(view.total_rows == 10);
+
+ // standard get
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/basicBasic/basicView");
+ T(xhr.status == 200, "standard get should be 200");
+ T(/head0123456789tail/.test(xhr.responseText));
+
+
+ // test that etags are available
+ var etag = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/basicBasic/basicView", {
+ headers: {"if-none-match": etag}
+ });
+ T(xhr.status == 304);
+
+ // confirm ETag changes with different POST bodies
+ xhr = CouchDB.request("POST", "/test_suite_db/_design/lists/_list/basicBasic/basicView",
+ {body: JSON.stringify({keys:[1]})}
+ );
+ var etag1 = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("POST", "/test_suite_db/_design/lists/_list/basicBasic/basicView",
+ {body: JSON.stringify({keys:[2]})}
+ );
+ var etag2 = xhr.getResponseHeader("etag");
+ T(etag1 != etag2, "POST to map _list generates key-depdendent ETags");
+
+ // test the richness of the arguments
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/basicJSON/basicView?update_seq=true");
+ T(xhr.status == 200, "standard get should be 200");
+ var resp = JSON.parse(xhr.responseText);
+ TEquals(10, resp.head.total_rows);
+ TEquals(0, resp.head.offset);
+ TEquals(11, resp.head.update_seq);
+
+ T(resp.rows.length == 10);
+ TEquals(resp.rows[0], {"id": "0","key": 0,"value": "0"});
+
+ TEquals(resp.req.info.db_name, "test_suite_db");
+ TEquals(resp.req.method, "GET");
+ TEquals(resp.req.path, [
+ "test_suite_db",
+ "_design",
+ "lists",
+ "_list",
+ "basicJSON",
+ "basicView"
+ ]);
+ T(resp.req.headers.Accept);
+ T(resp.req.headers.Host);
+ T(resp.req.headers["User-Agent"]);
+ T(resp.req.cookie);
+
+ // get with query params
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/basicView?startkey=3&endkey=8");
+ T(xhr.status == 200, "with query params");
+ T(!(/Key: 1/.test(xhr.responseText)));
+ T(/FirstKey: 3/.test(xhr.responseText));
+ T(/LastKey: 8/.test(xhr.responseText));
+
+ // with 0 rows
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/basicView?startkey=30");
+ T(xhr.status == 200, "0 rows");
+ T(/<\/ul>/.test(xhr.responseText));
+
+ //too many Get Rows
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/tooManyGetRows/basicView");
+ T(xhr.status == 200, "tooManyGetRows");
+ T(/9after row: null/.test(xhr.responseText));
+
+
+ // reduce with 0 rows
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/withReduce?startkey=30");
+ T(xhr.status == 200, "reduce 0 rows");
+ T(/LastKey: undefined/.test(xhr.responseText));
+
+ // when there is a reduce present, but not used
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/withReduce?reduce=false");
+ T(xhr.status == 200, "reduce false");
+ T(/Key: 1/.test(xhr.responseText));
+
+
+ // when there is a reduce present, and used
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/withReduce?group=true");
+ T(xhr.status == 200, "group reduce");
+ T(/Key: 1/.test(xhr.responseText));
+
+ // there should be etags on reduce as well
+ var etag = xhr.getResponseHeader("etag");
+ T(etag, "Etags should be served with reduce lists");
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/withReduce?group=true", {
+ headers: {"if-none-match": etag}
+ });
+ T(xhr.status == 304);
+
+ // confirm ETag changes with different POST bodies
+ xhr = CouchDB.request("POST", "/test_suite_db/_design/lists/_list/simpleForm/withReduce?group=true",
+ {body: JSON.stringify({keys:[1]})}
+ );
+ var etag1 = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("POST", "/test_suite_db/_design/lists/_list/simpleForm/withReduce?group=true",
+ {body: JSON.stringify({keys:[2]})}
+ );
+ var etag2 = xhr.getResponseHeader("etag");
+ T(etag1 != etag2, "POST to reduce _list generates key-depdendent ETags");
+
+ // verify the etags expire correctly
+ var docs = makeDocs(11, 12);
+ db.bulkSave(docs);
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/withReduce?group=true", {
+ headers: {"if-none-match": etag}
+ });
+ T(xhr.status == 200, "reduce etag");
+
+ // empty list
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/emptyList/basicView");
+ T(xhr.responseText.match(/^ $/));
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/emptyList/withReduce?group=true");
+ T(xhr.responseText.match(/^ $/));
+
+ // multi-key fetch
+ var xhr = CouchDB.request("POST", "/test_suite_db/_design/lists/_list/simpleForm/basicView", {
+ body: '{"keys":[2,4,5,7]}'
+ });
+ T(xhr.status == 200, "multi key");
+ T(!(/Key: 1 /.test(xhr.responseText)));
+ T(/Key: 2/.test(xhr.responseText));
+ T(/FirstKey: 2/.test(xhr.responseText));
+ T(/LastKey: 7/.test(xhr.responseText));
+
+ // multi-key fetch with GET
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/simpleForm/basicView" +
+ "?keys=[2,4,5,7]");
+
+ T(xhr.status == 200, "multi key");
+ T(!(/Key: 1 /.test(xhr.responseText)));
+ T(/Key: 2/.test(xhr.responseText));
+ T(/FirstKey: 2/.test(xhr.responseText));
+ T(/LastKey: 7/.test(xhr.responseText));
+
+ // no multi-key fetch allowed when group=false
+ xhr = CouchDB.request("POST", "/test_suite_db/_design/lists/_list/simpleForm/withReduce?group=false", {
+ body: '{"keys":[2,4,5,7]}'
+ });
+ T(xhr.status == 400);
+ T(/query_parse_error/.test(xhr.responseText));
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/rowError/basicView");
+ T(/ReferenceError/.test(xhr.responseText));
+
+
+ // with include_docs and a reference to the doc.
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/docReference/basicView?include_docs=true");
+ T(xhr.responseText.match(/head0tail/));
+
+ // now with extra qs params
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/qsParams/basicView?foo=blam");
+ T(xhr.responseText.match(/blam/));
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/stopIter/basicView");
+ // T(xhr.getResponseHeader("Content-Type") == "text/plain");
+ T(xhr.responseText.match(/^head 0 1 2 tail$/) && "basic stop");
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/stopIter2/basicView", {
+ headers : {
+ "Accept" : "text/html"
+ }
+ });
+ T(xhr.responseText.match(/^head 0 1 2 tail$/) && "stop 2");
+
+ // aborting iteration with reduce
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/stopIter/withReduce?group=true");
+ T(xhr.responseText.match(/^head 0 1 2 tail$/) && "reduce stop");
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/stopIter2/withReduce?group=true", {
+ headers : {
+ "Accept" : "text/html"
+ }
+ });
+ T(xhr.responseText.match(/^head 0 1 2 tail$/) && "reduce stop 2");
+
+ // with accept headers for HTML
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/acceptSwitch/basicView", {
+ headers: {
+ "Accept": 'text/html'
+ }
+ });
+ T(xhr.getResponseHeader("Content-Type") == "text/html; charset=utf-8");
+ T(xhr.responseText.match(/HTML/));
+ T(xhr.responseText.match(/Value/));
+
+ // now with xml
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/acceptSwitch/basicView", {
+ headers: {
+ "Accept": 'application/xml'
+ }
+ });
+ T(xhr.getResponseHeader("Content-Type") == "application/xml");
+ T(xhr.responseText.match(/XML/));
+ T(xhr.responseText.match(/entry/));
+
+ // Test we can run lists and views from separate docs.
+ T(db.save(viewOnlyDesignDoc).ok);
+ var url = "/test_suite_db/_design/lists/_list/simpleForm/views/basicView" +
+ "?startkey=-3";
+ xhr = CouchDB.request("GET", url);
+ T(xhr.status == 200, "multiple design docs.");
+ T(!(/Key: -4/.test(xhr.responseText)));
+ T(/FirstKey: -3/.test(xhr.responseText));
+ T(/LastKey: 0/.test(xhr.responseText));
+
+ // Test we do multi-key requests on lists and views in separate docs.
+ var url = "/test_suite_db/_design/lists/_list/simpleForm/views/basicView";
+ xhr = CouchDB.request("POST", url, {
+ body: '{"keys":[-2,-4,-5,-7]}'
+ });
+
+ T(xhr.status == 200, "multi key separate docs");
+ T(!(/Key: -3/.test(xhr.responseText)));
+ T(/Key: -7/.test(xhr.responseText));
+ T(/FirstKey: -2/.test(xhr.responseText));
+ T(/LastKey: -7/.test(xhr.responseText));
+
+ // Test if secObj is available
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/lists/_list/secObj/basicView");
+ T(xhr.status == 200, "standard get should be 200");
+ var resp = JSON.parse(xhr.responseText);
+ T(typeof(resp) == "object");
+
+ var erlViewTest = function() {
+ T(db.save(erlListDoc).ok);
+ var url = "/test_suite_db/_design/erlang/_list/simple/views/basicView" +
+ "?startkey=-3";
+ xhr = CouchDB.request("GET", url);
+ T(xhr.status == 200, "multiple languages in design docs.");
+ var list = JSON.parse(xhr.responseText);
+ T(list.length == 4);
+ for(var i = 0; i < list.length; i++)
+ {
+ T(list[i] + 3 == i);
+ }
+ };
+
+
+
+ run_on_modified_server([{
+ section: "native_query_servers",
+ key: "erlang",
+ value: "{couch_native_process, start_link, []}"
+ }], erlViewTest);
+
+ // COUCHDB-1113
+ var ddoc = {
+ _id: "_design/test",
+ views: {
+ me: {
+ map: (function(doc) { emit(null,null)}).toString()
+ }
+ },
+ lists: {
+ you: (function(head, req) {
+ var row;
+ while(row = getRow()) {
+ send(row);
+ }
+ }).toString()
+ }
+ };
+ db.save(ddoc);
+
+ var resp = CouchDB.request("GET", "/" + db.name + "/_design/test/_list/you/me", {
+ headers: {
+ "Content-Type": "application/x-www-form-urlencoded"
+ }
+ });
+ TEquals(200, resp.status, "should return a 200 response");
+};
diff --git a/1.1.x/share/www/script/test/lorem.txt b/1.1.x/share/www/script/test/lorem.txt
new file mode 100644
index 00000000..0ef85bab
--- /dev/null
+++ b/1.1.x/share/www/script/test/lorem.txt
@@ -0,0 +1,103 @@
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus nunc sapien, porta id pellentesque at, elementum et felis. Curabitur condimentum ante in metus iaculis quis congue diam commodo. Donec eleifend ante sed nulla dapibus convallis. Ut cursus aliquam neque, vel porttitor tellus interdum ut. Sed pharetra lacinia adipiscing. In tristique tristique felis non tincidunt. Nulla auctor mauris a velit cursus ultricies. In at libero quis justo consectetur laoreet. Nullam id ultrices nunc. Donec non turpis nulla, eu lacinia ante. Nunc eu orci et turpis pretium venenatis. Nam molestie, lacus at dignissim elementum, ante libero consectetur libero, ut lacinia lacus urna et purus. Nullam lorem ipsum, dapibus vel ullamcorper a, malesuada a metus. Sed porta adipiscing magna, quis pulvinar purus mattis fringilla. Integer pellentesque sapien in neque tristique ac iaculis libero ultricies. Ut eget pharetra purus.
+
+Nulla in convallis tellus. Proin tincidunt suscipit vulputate. Suspendisse potenti. Nullam tristique justo mi, a tristique ligula. Duis convallis aliquam iaculis. Nulla dictum fringilla congue. Suspendisse ac leo lectus, ac aliquam justo. Ut porttitor commodo mi sed luctus. Nulla at enim lorem. Nunc eu justo sapien, a blandit odio. Curabitur faucibus sollicitudin dolor, id lacinia sem auctor in. Donec varius nunc at lectus sagittis nec luctus arcu pharetra. Nunc sed metus justo. Cras vel mauris diam. Ut feugiat felis eget neque pharetra vestibulum consectetur massa facilisis. Quisque consectetur luctus nisi quis tincidunt. Vivamus cursus cursus quam non blandit. Pellentesque et velit lacus. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.
+
+In et dolor vitae orci adipiscing congue. Aliquam gravida nibh at nisl gravida molestie. Curabitur a bibendum sapien. Aliquam tincidunt, nulla nec pretium lobortis, odio augue tincidunt arcu, a lobortis odio sem ut purus. Donec accumsan mattis nunc vitae lacinia. Suspendisse potenti. Integer commodo nisl quis nibh interdum non fringilla dui sodales. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. In hac habitasse platea dictumst. Etiam ullamcorper, mi id feugiat bibendum, purus neque cursus mauris, id sodales quam nisi id velit. Sed lectus leo, tincidunt vel rhoncus imperdiet, blandit in leo. Integer quis magna nulla. Donec vel nisl magna, ut rhoncus dui. Aliquam gravida, nulla nec eleifend luctus, neque nibh pharetra ante, quis egestas elit metus a mi. Nunc nec augue quam. Morbi tincidunt tristique varius. Suspendisse iaculis elit feugiat magna pellentesque ultricies. Vestibulum aliquam tortor non ante ullamcorper fringilla. Donec iaculis mi quis mauris ornare vestibulum.
+
+In a magna nisi, a ultricies massa. Donec elit neque, viverra non tempor quis, fringilla in metus. Integer odio odio, euismod vitae mollis sed, sodales eget libero. Donec nec massa in felis ornare pharetra at nec tellus. Nunc lorem dolor, pretium vel auctor in, volutpat vitae felis. Maecenas rhoncus, orci vel blandit euismod, turpis erat tincidunt ante, elementum adipiscing nisl urna in nisi. Phasellus sagittis, enim sed accumsan consequat, urna augue lobortis erat, non malesuada quam metus sollicitudin ante. In leo purus, dignissim quis varius vel, pellentesque et nibh. In sed tortor iaculis libero mollis pellentesque id vitae lectus. In hac habitasse platea dictumst. Phasellus mauris enim, posuere eget luctus ac, iaculis et quam. Vivamus et nibh diam, elementum egestas tellus. Aenean vulputate malesuada est. Sed posuere porta diam a sodales. Proin eu sem non velit facilisis venenatis sed a turpis.
+
+Pellentesque sed risus a ante vulputate lobortis sit amet eu nisl. Suspendisse ut eros mi, a rhoncus lacus. Curabitur fermentum vehicula tellus, a ornare mi condimentum vel. Integer molestie volutpat viverra. Integer posuere euismod venenatis. Proin ac mauris sed nulla pharetra porttitor. Duis vel dui in risus sodales auctor sit amet non enim. Maecenas mollis lacus at ligula faucibus sodales. Cras vel neque arcu. Sed tincidunt tortor pretium nisi interdum quis dictum arcu laoreet. Morbi pretium ultrices feugiat. Maecenas convallis augue nec felis malesuada malesuada scelerisque mauris placerat. Sed at magna enim, at fringilla dolor. Quisque ut mattis dui. Praesent consectetur ante viverra nisi blandit pharetra. Quisque metus elit, dignissim vitae fermentum sit amet, fringilla imperdiet odio. Cras eget purus eget tellus feugiat luctus a ac purus. Cras vitae nisl vel augue rhoncus porttitor sit amet quis lorem. Donec interdum pellentesque adipiscing. Phasellus neque libero, aliquam in mattis vitae, consectetur adipiscing nibh.
+
+Donec nec nulla urna, ac sagittis lectus. Suspendisse non elit sed mi auctor facilisis vitae et lectus. Fusce ac vulputate mauris. Morbi condimentum ultrices metus, et accumsan purus malesuada at. Maecenas lobortis ante sed massa dictum vitae venenatis elit commodo. Proin tellus eros, adipiscing sed dignissim vitae, tempor eget ante. Aenean id tellus nec magna cursus pharetra vitae vel enim. Morbi vestibulum pharetra est in vulputate. Aliquam vitae metus arcu, id aliquet nulla. Phasellus ligula est, hendrerit nec iaculis ut, volutpat vel eros. Suspendisse vitae urna turpis, placerat adipiscing diam. Phasellus feugiat vestibulum neque eu dapibus. Nulla facilisi. Duis tortor felis, euismod sit amet aliquet in, volutpat nec turpis. Mauris rhoncus ipsum ut purus eleifend ut lobortis lectus dapibus. Quisque non erat lorem. Vivamus posuere imperdiet iaculis. Ut ligula lacus, eleifend at tempor id, auctor eu leo.
+
+Donec mi enim, laoreet pulvinar mollis eu, malesuada viverra nunc. In vitae metus vitae neque tempor dapibus. Maecenas tincidunt purus a felis aliquam placerat. Nulla facilisi. Suspendisse placerat pharetra mattis. Integer tempor malesuada justo at tempus. Maecenas vehicula lorem a sapien bibendum vel iaculis risus feugiat. Pellentesque diam erat, dapibus et pellentesque quis, molestie ut massa. Vivamus iaculis interdum massa id bibendum. Quisque ut mauris dui, sit amet varius elit. Vestibulum elit lorem, rutrum non consectetur ut, laoreet nec nunc. Donec nec mauris ante. Curabitur ut est sed odio pharetra laoreet. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur purus risus, laoreet sed porta id, sagittis vel ipsum. Maecenas nibh diam, cursus et varius sit amet, fringilla sed magna. Nullam id neque eu leo faucibus mollis. Duis nec adipiscing mauris. Suspendisse sollicitudin, enim eu pulvinar commodo, erat augue ultrices mi, a tristique magna sem non libero.
+
+Sed in metus nulla. Praesent nec adipiscing sapien. Donec laoreet, velit non rutrum vestibulum, ligula neque adipiscing turpis, at auctor sapien elit ut massa. Nullam aliquam, enim vel posuere rutrum, justo erat laoreet est, vel fringilla lacus nisi non lectus. Etiam lectus nunc, laoreet et placerat at, venenatis quis libero. Praesent in placerat elit. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Pellentesque fringilla augue eu nibh placerat dictum. Nunc porttitor tristique diam, eu aliquam enim aliquet vel. Aliquam lacinia interdum ipsum, in posuere metus luctus vel. Vivamus et nisl a eros semper elementum. Donec venenatis orci at diam tristique sollicitudin. In eu eros sed odio rutrum luctus non nec tellus.
+
+Nulla nec felis elit. Nullam in ipsum in ipsum consequat fringilla quis vel tortor. Phasellus non massa nisi, sit amet aliquam urna. Sed fermentum nibh vitae lacus tincidunt nec tincidunt massa bibendum. Etiam elit dui, facilisis sit amet vehicula nec, iaculis at sapien. Ut at massa id dui ultrices volutpat ut ac libero. Fusce ipsum mi, bibendum a lacinia et, pulvinar eget mauris. Proin faucibus urna ut lorem elementum vulputate. Duis quam leo, malesuada non euismod ut, blandit facilisis mauris. Suspendisse sit amet magna id velit tincidunt aliquet nec eu dolor. Curabitur bibendum lorem vel felis tempus dapibus. Aliquam erat volutpat. Aenean cursus tortor nec dui aliquet porta. Aenean commodo iaculis suscipit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Quisque sit amet ornare elit. Nam ligula risus, vestibulum nec mattis in, condimentum ac ante. Donec fringilla, justo et ultrices faucibus, tellus est volutpat massa, vitae commodo sapien diam non risus. Vivamus at arcu gravida purus mollis feugiat.
+
+Nulla a turpis quis sapien commodo dignissim eu quis justo. Maecenas eu lorem odio, ut hendrerit velit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin facilisis porttitor ullamcorper. Praesent mollis dignissim massa, laoreet aliquet velit pellentesque non. Nunc facilisis convallis tristique. Mauris porttitor ante at tellus convallis placerat. Morbi aliquet nisi ac nisl pulvinar id dictum nisl mollis. Sed ornare sem et risus placerat lobortis id eget elit. Integer consequat, magna id suscipit pharetra, nulla velit suscipit orci, ut interdum augue augue quis quam. Fusce pretium aliquet vulputate. Mauris blandit dictum molestie. Proin nulla nibh, bibendum eu placerat at, tincidunt ac nisl. Nullam vulputate metus ut libero rutrum ultricies. Nunc sit amet dui mauris. Suspendisse adipiscing lacus in augue eleifend mollis.
+
+Duis pretium ultrices mattis. Nam euismod risus a erat lacinia bibendum. Morbi massa tortor, consectetur id eleifend id, pellentesque vel tortor. Praesent urna lorem, porttitor at condimentum vitae, luctus eget elit. Maecenas fringilla quam convallis est hendrerit viverra. Etiam vehicula, sapien non pulvinar adipiscing, nisi massa vestibulum est, id interdum mauris velit eu est. Vestibulum est arcu, facilisis at ultricies non, vulputate id sapien. Vestibulum ipsum metus, pharetra nec pellentesque id, facilisis id sapien. Donec rutrum odio et lacus ultricies ullamcorper. Integer sed est ut mi posuere tincidunt quis non leo. Morbi tellus justo, ultricies sit amet ultrices quis, facilisis vitae magna. Donec ligula metus, pellentesque non tristique ac, vestibulum sed erat. Aliquam erat volutpat.
+
+Nam dignissim, nisl eget consequat euismod, sem lectus auctor orci, ut porttitor lacus dui ac neque. In hac habitasse platea dictumst. Fusce egestas porta facilisis. In hac habitasse platea dictumst. Mauris cursus rhoncus risus ac euismod. Quisque vitae risus a tellus venenatis convallis. Curabitur laoreet sapien eu quam luctus lobortis. Vivamus sollicitudin sodales dolor vitae sodales. Suspendisse pharetra laoreet aliquet. Maecenas ullamcorper orci vel tortor luctus iaculis ut vitae metus. Vestibulum ut arcu ac tellus mattis eleifend eget vehicula elit.
+
+In sed feugiat eros. Donec bibendum ullamcorper diam, eu faucibus mauris dictum sed. Duis tincidunt justo in neque accumsan dictum. Maecenas in rutrum sapien. Ut id feugiat lacus. Nulla facilisi. Nunc ac lorem id quam varius cursus a et elit. Aenean posuere libero eu tortor vehicula ut ullamcorper odio consequat. Sed in dignissim dui. Curabitur iaculis tempor quam nec placerat. Aliquam venenatis nibh et justo iaculis lacinia. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque tempus magna sed mi aliquet eget varius odio congue.
+
+Integer sem sem, semper in vestibulum vitae, lobortis quis erat. Duis ante lectus, fermentum sed tempor sit amet, placerat sit amet sem. Mauris congue tincidunt ipsum. Ut viverra, lacus vel varius pharetra, purus enim pulvinar ipsum, non pellentesque enim justo non erat. Fusce ipsum orci, ultrices sed pellentesque at, hendrerit laoreet enim. Nunc blandit mollis pretium. Ut mollis, nulla aliquam sodales vestibulum, libero lorem tempus tortor, a pellentesque nibh elit a ipsum. Phasellus fermentum ligula at neque adipiscing sollicitudin. Suspendisse id ipsum arcu. Sed tincidunt placerat viverra. Donec libero augue, porttitor sit amet varius eget, rutrum nec lacus. Proin blandit orci sit amet diam dictum id porttitor risus iaculis. Integer lacinia feugiat leo, vitae auctor turpis eleifend vel. Suspendisse lorem quam, pretium id bibendum sed, viverra vitae tortor. Nullam ultricies libero eu risus convallis eget ullamcorper nisi elementum. Mauris nulla elit, bibendum id vulputate vitae, imperdiet rutrum lorem. Curabitur eget dignissim orci. Sed semper tellus ipsum, at blandit dui. Integer dapibus facilisis sodales. Vivamus sollicitudin varius est, quis ornare justo cursus id.
+
+Nunc vel ullamcorper mi. Suspendisse potenti. Nunc et urna a augue scelerisque ultrices non quis mi. In quis porttitor elit. Aenean quis erat nulla, a venenatis tellus. Fusce vestibulum nisi sed leo adipiscing dignissim. Nunc interdum, lorem et lacinia vestibulum, quam est mattis magna, sit amet volutpat elit augue at libero. Cras gravida dui quis velit lobortis condimentum et eleifend ligula. Phasellus ac metus quam, id venenatis mi. Aliquam ut turpis ac tellus dapibus dapibus eu in mi. Quisque eget nibh eros. Fusce consectetur leo velit.
+
+Vestibulum semper egestas mauris. Morbi vestibulum sem sem. Aliquam venenatis, felis sed eleifend porta, mauris diam semper arcu, sit amet ultricies est sapien sit amet libero. Vestibulum dui orci, ornare condimentum mollis nec, molestie ac eros. Proin vitae mollis velit. Praesent eget felis mi. Maecenas eu vulputate nisi. Vestibulum varius, arcu in ultricies vestibulum, nibh leo sagittis odio, ut bibendum nisl mi nec diam. Integer at enim feugiat nulla semper bibendum ut a velit. Proin at nisi ut lorem aliquam varius eget quis elit. Nullam nec odio vel lectus congue consequat adipiscing ac mi. Fusce vitae laoreet libero. Curabitur sit amet sem neque, nec posuere enim. Curabitur at massa a sem gravida iaculis nec et nibh. Sed vitae dui vitae leo tincidunt pretium a aliquam erat. Suspendisse ultricies odio at metus tempor in pellentesque arcu ultricies.
+
+Sed aliquam mattis quam, in vulputate sapien ultrices in. Pellentesque quis velit sed dui hendrerit cursus. Pellentesque non nunc lacus, a semper metus. Fusce euismod velit quis diam suscipit consequat. Praesent commodo accumsan neque. Proin viverra, ipsum non tristique ultrices, velit velit facilisis lorem, vel rutrum neque eros ac nisi. Suspendisse felis massa, faucibus in volutpat ac, dapibus et odio. Pellentesque id tellus sit amet risus ultricies ullamcorper non nec sapien. Nam placerat viverra ullamcorper. Nam placerat porttitor sapien nec pulvinar. Curabitur vel odio sit amet odio accumsan aliquet vitae a lectus. Pellentesque lobortis viverra consequat. Mauris elementum cursus nulla, sit amet hendrerit justo dictum sed. Maecenas diam odio, fringilla ac congue quis, adipiscing ut elit.
+
+Aliquam lorem eros, pharetra nec egestas vitae, mattis nec risus. Mauris arcu massa, sodales eget gravida sed, viverra vitae turpis. Ut ligula urna, euismod ac tincidunt eu, faucibus sed felis. Praesent mollis, ipsum quis rhoncus dignissim, odio sem venenatis nulla, at consequat felis augue vel erat. Nam fermentum feugiat volutpat. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Etiam vitae dui in nisi adipiscing ultricies non eu justo. Donec tristique ultricies adipiscing. Nulla sodales, nunc a tristique elementum, erat neque egestas nisl, at hendrerit orci sapien sed libero. Vivamus a mauris turpis, quis laoreet ipsum. Nunc nec mi et nisl pellentesque scelerisque. Vivamus volutpat, justo tristique lacinia condimentum, erat justo ultrices urna, elementum viverra eros augue non libero. Sed mollis mollis arcu, at fermentum diam suscipit quis.
+
+Etiam sit amet nibh justo, posuere volutpat nunc. Morbi pellentesque neque in orci volutpat eu scelerisque lorem dictum. Mauris mollis iaculis est, nec sagittis sapien consequat id. Nunc nec malesuada odio. Duis quis suscipit odio. Mauris purus dui, sodales id mattis sit amet, posuere in arcu. Phasellus porta elementum convallis. Maecenas at orci et mi vulputate sollicitudin in in turpis. Pellentesque cursus adipiscing neque sit amet commodo. Fusce ut mi eu lectus porttitor volutpat et nec felis.
+
+Curabitur scelerisque eros quis nisl viverra vel ultrices velit vestibulum. Sed lobortis pulvinar sapien ac venenatis. Sed ante nibh, rhoncus eget dictum in, mollis ut nisi. Phasellus facilisis mi non lorem tristique non eleifend sem fringilla. Integer ut augue est. In venenatis tincidunt scelerisque. Etiam ante dui, posuere quis malesuada vitae, malesuada a arcu. Aenean faucibus venenatis sapien, ut facilisis nisi blandit vel. Aenean ac lorem eu sem fermentum placerat. Proin neque purus, aliquet ut tincidunt ut, convallis sit amet eros. Phasellus vehicula ullamcorper enim non vehicula. Etiam porta odio ut ipsum adipiscing egestas id a odio. Pellentesque blandit, sapien ut pulvinar interdum, mi nulla hendrerit elit, in tempor diam enim a urna. In tellus odio, ornare sed condimentum a, mattis eu augue.
+
+Fusce hendrerit porttitor euismod. Donec malesuada egestas turpis, et ultricies felis elementum vitae. Nullam in sem nibh. Nullam ultricies hendrerit justo sit amet lobortis. Sed tincidunt, mauris at ornare laoreet, sapien purus elementum elit, nec porttitor nisl purus et erat. Donec felis nisi, rutrum ullamcorper gravida ac, tincidunt sit amet urna. Proin vel justo vitae eros sagittis bibendum a ut nibh. Phasellus sodales laoreet tincidunt. Maecenas odio massa, condimentum id aliquet ut, rhoncus vel lectus. Duis pharetra consectetur sapien. Phasellus posuere ultricies massa, non rhoncus risus aliquam tempus.
+
+Praesent venenatis magna id sem dictum eu vehicula ipsum vulputate. Sed a convallis sapien. Sed justo dolor, rhoncus vel rutrum mattis, sollicitudin ut risus. Nullam sit amet convallis est. Etiam non tincidunt ligula. Fusce suscipit pretium elit at ullamcorper. Quisque sollicitudin, diam id interdum porta, metus ipsum volutpat libero, id venenatis felis orci non velit. Suspendisse potenti. Mauris rutrum, tortor sit amet pellentesque tincidunt, erat quam ultricies odio, id aliquam elit leo nec leo. Pellentesque justo eros, rutrum at feugiat nec, porta et tellus. Aenean eget metus lectus.
+
+Praesent euismod, turpis quis laoreet consequat, neque ante imperdiet quam, ac semper tortor nibh in nulla. Integer scelerisque eros vehicula urna lacinia ac facilisis mauris accumsan. Phasellus at mauris nibh. Curabitur enim ante, rutrum sed adipiscing hendrerit, pellentesque non augue. In hac habitasse platea dictumst. Nam tempus euismod massa a dictum. Donec sit amet justo ac diam ultricies ultricies. Sed tincidunt erat quis quam tempus vel interdum erat rhoncus. In hac habitasse platea dictumst. Vestibulum vehicula varius sem eget interdum. Cras bibendum leo nec felis venenatis sed pharetra sem feugiat. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Sed quam orci, mollis eget sagittis accumsan, vulputate sit amet dui. Praesent eu elementum arcu.
+
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum nisl metus, hendrerit ut laoreet sed, consectetur at purus. Duis interdum congue lobortis. Nullam sed massa porta felis eleifend consequat sit amet nec metus. Aliquam placerat dictum erat at eleifend. Vestibulum libero ante, ullamcorper a porttitor suscipit, accumsan vel nisi. Donec et magna neque. Nam elementum ultrices justo, eget sollicitudin sapien imperdiet eget. Nullam auctor dictum nunc, at feugiat odio vestibulum a. Sed erat nulla, viverra hendrerit commodo id, ullamcorper ac orci. Phasellus pellentesque feugiat suscipit. Etiam egestas fermentum enim. Etiam gravida interdum tellus ac laoreet. Morbi mattis aliquet eros, non tempor erat ullamcorper in. Etiam pulvinar interdum turpis ac vehicula. Sed quam justo, accumsan id consectetur a, aliquet sed leo. Aenean vitae blandit mauris.
+
+In sed eros augue, non rutrum odio. Etiam vitae dui neque, in tristique massa. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Maecenas dictum elit at lectus tempor non pharetra nisl hendrerit. Sed sed quam eu lectus ultrices malesuada tincidunt a est. Nam vel eros risus. Maecenas eros elit, blandit fermentum tempor eget, lobortis id diam. Vestibulum lacinia lacus vitae magna volutpat eu dignissim eros convallis. Vivamus ac velit tellus, a congue neque. Integer mi nulla, varius non luctus in, dictum sit amet sem. Ut laoreet, sapien sit amet scelerisque porta, purus sapien vestibulum nibh, sed luctus libero massa ac elit. Donec iaculis odio eget odio sagittis nec venenatis lorem blandit.
+
+Aliquam imperdiet tellus posuere justo vehicula sed vestibulum ante tristique. Fusce feugiat faucibus purus nec molestie. Nulla tempor neque id magna iaculis quis sollicitudin eros semper. Praesent viverra sagittis luctus. Morbi sit amet magna sed odio gravida varius. Ut nisi libero, vulputate feugiat pretium tempus, egestas sit amet justo. Pellentesque consequat tempor nisi in lobortis. Sed fermentum convallis dui ac sollicitudin. Integer auctor augue eget tellus tempus fringilla. Proin nec dolor sapien, nec tristique nibh. Aliquam a velit at mi mattis aliquet.
+
+Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Aliquam ultrices erat non turpis auctor id ornare mauris sagittis. Quisque porttitor, tellus ut convallis sagittis, mi libero feugiat tellus, rhoncus placerat ipsum tortor id risus. Donec tincidunt feugiat leo. Cras id mi neque, eu malesuada eros. Ut molestie magna quis libero placerat malesuada. Aliquam erat volutpat. Aliquam non mauris lorem, in adipiscing metus. Donec eget ipsum in elit commodo ornare bibendum a nibh. Vivamus odio erat, placerat ac vestibulum eget, malesuada ut nisi. Etiam suscipit sollicitudin leo semper sollicitudin. Sed rhoncus risus sit amet sem eleifend dictum pretium sapien egestas. Nulla at urna nunc, vel aliquet leo. Praesent ultricies, mi eu pretium lobortis, erat nibh euismod leo, sit amet gravida sapien eros et turpis. Donec lacinia venenatis lectus, non lacinia mi hendrerit sit amet. Integer sed felis vel orci aliquam pulvinar. Phasellus et risus id erat euismod tincidunt. Sed luctus tempor nisi, nec tempor ipsum elementum eget. Integer nisl tortor, viverra in dapibus at, mattis ac erat. Curabitur nec dui lectus.
+
+Phasellus suscipit, tortor eu varius fringilla, sapien magna egestas risus, ut suscipit dui mauris quis velit. Cras a sapien quis sapien hendrerit tristique a sit amet elit. Pellentesque dui arcu, malesuada et sodales sit amet, dapibus vel quam. Sed non adipiscing ligula. Ut vulputate purus at nisl posuere sodales. Maecenas diam velit, tincidunt id mattis eu, aliquam ac nisi. Maecenas pretium, augue a sagittis suscipit, leo ligula eleifend dolor, mollis feugiat odio augue non eros. Pellentesque scelerisque orci pretium quam mollis at lobortis dui facilisis. Morbi congue metus id tortor porta fringilla. Sed lorem mi, molestie fermentum sagittis at, gravida a nisi. Donec eu vestibulum velit. In viverra, enim eu elementum sodales, enim odio dapibus urna, eget commodo nisl mauris ut odio. Curabitur nec enim nulla. In nec elit ipsum. Nunc in massa suscipit magna elementum faucibus in nec ipsum. Nullam suscipit malesuada elementum. Etiam sed mi in nibh ultricies venenatis nec pharetra magna. In purus ante, rhoncus vel placerat sed, fermentum sit amet dui. Sed at sodales velit.
+
+Duis suscipit pellentesque pellentesque. Praesent porta lobortis cursus. Quisque sagittis velit non tellus bibendum at sollicitudin lacus aliquet. Sed nibh risus, blandit a aliquet eget, vehicula et est. Suspendisse facilisis bibendum aliquam. Fusce consectetur convallis erat, eget mollis diam fermentum sollicitudin. Quisque tincidunt porttitor pretium. Nullam id nisl et urna vulputate dapibus. Donec quis lorem urna. Quisque id justo nec nunc blandit convallis. Nunc volutpat, massa sollicitudin adipiscing vestibulum, massa urna congue lectus, sit amet ultricies augue orci convallis turpis. Nulla at lorem elit. Nunc tristique, quam facilisis commodo porttitor, lacus ligula accumsan nisi, et laoreet justo ante vitae eros. Curabitur sed augue arcu. Phasellus porttitor vestibulum felis, ut consectetur arcu tempor non. In justo risus, semper et suscipit id, ullamcorper at urna. Quisque tincidunt, urna nec aliquam tristique, nibh odio faucibus augue, in ornare enim turpis accumsan dolor. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Suspendisse sodales varius turpis eu fermentum.
+
+Morbi ultricies diam eget massa posuere lobortis. Aliquam volutpat pellentesque enim eu porttitor. Donec lacus felis, consectetur a pretium vitae, bibendum non enim. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Etiam ut nibh a quam pellentesque auctor ut id velit. Duis lacinia justo eget mi placerat bibendum. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec velit tortor, tempus nec tristique id, aliquet sit amet turpis. Praesent et neque nec magna porta fringilla. Morbi id egestas eros. Donec semper tincidunt ullamcorper. Phasellus tempus lacinia hendrerit. Quisque faucibus pretium neque non convallis. Nunc malesuada accumsan rhoncus. Cras lobortis, sem sed fringilla convallis, augue velit semper nisl, commodo varius nisi diam ac leo.
+
+Quisque interdum tellus ac ante posuere ut cursus lorem egestas. Nulla facilisi. Aenean sed massa nec nisi scelerisque vulputate. Etiam convallis consectetur iaculis. Maecenas ac purus ut ante dignissim auctor ac quis lorem. Pellentesque suscipit tincidunt orci. Fusce aliquam dapibus orci, at bibendum ipsum adipiscing eget. Morbi pellentesque hendrerit quam, nec placerat urna vulputate sed. Quisque vel diam lorem. Praesent id diam quis enim elementum rhoncus sagittis eget purus. Quisque fringilla bibendum leo in laoreet. Vestibulum id nibh risus, non elementum metus. Ut a felis diam, non mollis nisl. Cras elit ante, ullamcorper quis iaculis eu, sodales vel est. Curabitur quis lobortis dolor. Aliquam mattis gravida metus pellentesque vulputate.
+
+Ut id augue id dolor luctus euismod et quis velit. Maecenas enim dolor, tempus sit amet hendrerit eu, faucibus vitae neque. Proin sit amet varius elit. Proin varius felis ullamcorper purus dignissim consequat. Cras cursus tempus eros. Nunc ultrices venenatis ullamcorper. Aliquam et feugiat tellus. Phasellus sit amet vestibulum elit. Phasellus ac purus lacus, et accumsan eros. Morbi ultrices, purus a porta sodales, odio metus posuere neque, nec elementum risus turpis sit amet magna. Sed est quam, ultricies at congue adipiscing, lobortis in justo. Proin iaculis dictum nunc, eu laoreet quam varius vitae. Donec sit amet feugiat turpis. Mauris sit amet magna quam, ac consectetur dui. Curabitur eget magna tellus, eu pharetra felis. Donec sit amet tortor nisl. Aliquam et tortor facilisis lacus tincidunt commodo. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Curabitur nunc magna, ultricies id convallis at, ullamcorper vitae massa.
+
+Phasellus viverra iaculis placerat. Nulla consequat dolor sit amet erat dignissim posuere. Nulla lacinia augue vitae mi tempor gravida. Phasellus non tempor tellus. Quisque non enim semper tortor sagittis facilisis. Aliquam urna felis, egestas at posuere nec, aliquet eu nibh. Praesent sed vestibulum enim. Mauris iaculis velit dui, et fringilla enim. Nulla nec nisi orci. Sed volutpat, justo eget fringilla adipiscing, nisl nulla condimentum libero, sed sodales est est et odio. Cras ipsum dui, varius eu elementum consequat, faucibus in leo. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.
+
+Ut malesuada molestie eleifend. Curabitur id enim dui, eu tincidunt nibh. Mauris sit amet ante leo. Duis turpis ipsum, bibendum sed mattis sit amet, accumsan quis dolor. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Aenean a imperdiet metus. Quisque sollicitudin felis id neque tempor scelerisque. Donec at orci felis. Vivamus tempus convallis auctor. Donec interdum euismod lobortis. Sed at lacus nec odio dignissim mollis. Sed sapien orci, porttitor tempus accumsan vel, tincidunt nec ante. Nunc rhoncus egestas dapibus. Suspendisse fermentum dictum fringilla. Nullam nisi justo, eleifend a consectetur convallis, porttitor et tortor. Proin vitae lorem non dolor suscipit lacinia eu eget nulla.
+
+Suspendisse egestas, sapien sit amet blandit scelerisque, nulla arcu tristique dui, a porta justo quam vitae arcu. In metus libero, bibendum non volutpat ut, laoreet vel turpis. Nunc faucibus velit eu ipsum commodo nec iaculis eros volutpat. Vivamus congue auctor elit sed suscipit. Duis commodo, libero eu vestibulum feugiat, leo mi dapibus tellus, in placerat nisl dui at est. Vestibulum viverra tristique lorem, ornare egestas erat rutrum a. Nullam at augue massa, ut consectetur ipsum. Pellentesque malesuada, velit ut lobortis sagittis, nisi massa semper odio, malesuada semper purus nisl vel lectus. Nunc dui sem, mattis vitae laoreet vitae, sollicitudin ac leo. Nulla vel fermentum est.
+
+Vivamus in odio a nisi dignissim rhoncus in in lacus. Donec et nisl tortor. Donec sagittis consequat mi, vel placerat tellus convallis id. Aliquam facilisis rutrum nisl sed pretium. Donec et lacinia nisl. Aliquam erat volutpat. Curabitur ac pulvinar tellus. Nullam varius lobortis porta. Cras dapibus, ligula ut porta ultricies, leo lacus viverra purus, quis mollis urna risus eu leo. Nunc malesuada consectetur purus, vel auctor lectus scelerisque posuere. Maecenas dui massa, vestibulum bibendum blandit non, interdum eget mauris. Phasellus est ante, pulvinar at imperdiet quis, imperdiet vel urna. Quisque eget volutpat orci. Quisque et arcu purus, ut faucibus velit.
+
+Praesent sed ipsum urna. Praesent sagittis varius magna, id commodo dolor malesuada ac. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Quisque sit amet nunc eu sem ornare tempor. Mauris id dolor nec erat convallis porta in lobortis nisi. Curabitur hendrerit rhoncus tortor eu hendrerit. Pellentesque eu ante vel elit luctus eleifend quis viverra nulla. Suspendisse odio diam, euismod eu porttitor molestie, sollicitudin sit amet nulla. Sed ante urna, dictum bibendum rhoncus et, blandit nec ante. Suspendisse tortor augue, accumsan quis suscipit id, accumsan sit amet erat. Donec pharetra varius lobortis. Maecenas ipsum diam, faucibus eu tempus id, convallis nec enim. Duis arcu turpis, fringilla nec egestas ut, dignissim tristique nulla. Curabitur suscipit dui non justo ultrices pharetra. Aliquam erat volutpat. Nulla facilisi. Quisque id felis eu sem aliquam fringilla.
+
+Etiam quis augue in tellus consequat eleifend. Aenean dignissim congue felis id elementum. Duis fringilla varius ipsum, nec suscipit leo semper vel. Ut sollicitudin, orci a tincidunt accumsan, diam lectus laoreet lacus, vel fermentum quam est vel eros. Aliquam fringilla sapien ac sapien faucibus convallis. Aliquam id nunc eu justo consequat tincidunt. Quisque nec nisl dui. Phasellus augue lectus, varius vitae auctor vel, rutrum at risus. Vivamus lacinia leo quis neque ultrices nec elementum felis fringilla. Proin vel porttitor lectus.
+
+Curabitur sapien lorem, mollis ut accumsan non, ultricies et metus. Curabitur vel lorem quis sapien fringilla laoreet. Morbi id urna ac orci elementum blandit eget volutpat neque. Pellentesque sem odio, iaculis eu pharetra vitae, cursus in quam. Nulla molestie ligula id massa luctus et pulvinar nisi pulvinar. Nunc fermentum augue a lacus fringilla rhoncus porttitor erat dictum. Nunc sit amet tellus et dui viverra auctor euismod at nisl. In sed congue magna. Proin et tortor ut augue placerat dignissim a eu justo. Morbi porttitor porta lobortis. Pellentesque nibh lacus, adipiscing ut tristique quis, consequat vitae velit. Maecenas ut luctus libero. Vivamus auctor odio et erat semper sagittis. Vivamus interdum velit in risus mattis quis dictum ante rhoncus. In sagittis porttitor eros, at lobortis metus ultrices vel. Curabitur non aliquam nisl. Vestibulum luctus feugiat suscipit. Etiam non lacus vel nulla egestas iaculis id quis risus.
+
+Etiam in auctor urna. Fusce ultricies molestie convallis. In hac habitasse platea dictumst. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Mauris iaculis lorem faucibus purus gravida at convallis turpis sollicitudin. Suspendisse at velit lorem, a fermentum ipsum. Etiam condimentum, dui vel condimentum elementum, sapien sem blandit sapien, et pharetra leo neque et lectus. Nunc viverra urna iaculis augue ultrices ac porttitor lacus dignissim. Aliquam ut turpis dui. Sed eget aliquet felis. In bibendum nibh sit amet sapien accumsan accumsan pharetra magna molestie.
+
+Mauris aliquet urna eget lectus adipiscing at congue turpis consequat. Vivamus tincidunt fermentum risus et feugiat. Nulla molestie ullamcorper nibh sed facilisis. Phasellus et cursus purus. Nam cursus, dui dictum ultrices viverra, erat risus varius elit, eu molestie dui eros quis quam. Aliquam et ante neque, ac consectetur dui. Donec condimentum erat id elit dictum sed accumsan leo sagittis. Proin consequat congue risus, vel tincidunt leo imperdiet eu. Vestibulum malesuada turpis eu metus imperdiet pretium. Aliquam condimentum ultrices nibh, eu semper enim eleifend a. Etiam condimentum nisl quam.
+
+Pellentesque id molestie nisl. Maecenas et lectus at justo molestie viverra sit amet sit amet ligula. Nullam non porttitor magna. Quisque elementum arcu cursus tortor rutrum lobortis. Morbi sit amet lectus vitae enim euismod dignissim eget at neque. Vivamus consequat vehicula dui, vitae auctor augue dignissim in. In tempus sem quis justo tincidunt sit amet auctor turpis lobortis. Pellentesque non est nunc. Vestibulum mollis fringilla interdum. Maecenas ipsum dolor, pharetra id tristique mattis, luctus vitae urna. Ut ullamcorper arcu eget elit convallis mollis. Pellentesque condimentum, massa ac hendrerit tempor, mauris purus blandit justo, et pharetra leo justo a est. Duis arcu augue, facilisis vel dignissim sed, aliquam quis magna. Quisque non consequat dolor. Suspendisse a ultrices leo.
+
+Donec vitae pretium nibh. Maecenas bibendum bibendum diam in placerat. Ut accumsan, mi vitae vestibulum euismod, nunc justo vulputate nisi, non placerat mi urna et diam. Maecenas malesuada lorem ut arcu mattis mollis. Nulla facilisi. Donec est leo, bibendum eu pulvinar in, cursus vel metus. Aliquam erat volutpat. Nullam feugiat porttitor neque in vulputate. Quisque nec mi eu magna consequat cursus non at arcu. Etiam risus metus, sollicitudin et ultrices at, tincidunt sed nunc. Sed eget scelerisque augue. Ut fringilla venenatis sem non eleifend. Nunc mattis, risus sit amet vulputate varius, risus justo egestas mauris, id interdum odio ipsum et nisl. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi id erat odio, nec pulvinar enim.
+
+Curabitur ac fermentum quam. Morbi eu eros sapien, vitae tempus dolor. Mauris vestibulum blandit enim ut venenatis. Aliquam egestas, eros at consectetur tincidunt, lorem augue iaculis est, nec mollis felis arcu in nunc. Sed in odio sed libero pellentesque volutpat vitae a ante. Morbi commodo volutpat tellus, ut viverra purus placerat fermentum. Integer iaculis facilisis arcu, at gravida lorem bibendum at. Aenean id eros eget est sagittis convallis sed et dui. Donec eu pulvinar tellus. Nunc dignissim rhoncus tellus, at pellentesque metus luctus at. Sed ornare aliquam diam, a porttitor leo sollicitudin sed. Nam vitae lectus lacus. Integer adipiscing quam neque, blandit posuere libero. Sed libero nunc, egestas sodales tempus sed, cursus blandit tellus. Vestibulum mi purus, ultricies quis placerat vel, molestie at dui.
+
+Nulla commodo odio justo. Pellentesque non ornare diam. In consectetur sapien ac nunc sagittis malesuada. Morbi ullamcorper tempor erat nec rutrum. Duis ut commodo justo. Cras est orci, consectetur sed interdum sed, scelerisque sit amet nulla. Vestibulum justo nulla, pellentesque a tempus et, dapibus et arcu. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique, eros nec congue adipiscing, ligula sem rhoncus felis, at ornare tellus mauris ac risus. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin mauris dui, tempor fermentum dictum et, cursus a leo. Maecenas nec nisl a tellus pellentesque rhoncus. Nullam ultrices euismod dui eu congue.
+
+In nec tempor risus. In faucibus nisi eget diam dignissim consequat. Donec pulvinar ante nec enim mattis rutrum. Vestibulum leo augue, molestie nec dapibus in, dictum at enim. Integer aliquam, lorem eu vulputate lacinia, mi orci tempor enim, eget mattis ligula magna a magna. Praesent sed erat ut tortor interdum viverra. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla facilisi. Maecenas sit amet lectus lacus. Nunc vitae purus id ligula laoreet condimentum. Duis auctor tortor vel dui pulvinar a facilisis arcu dignissim. In hac habitasse platea dictumst. Donec sollicitudin pellentesque egestas. Sed sed sem justo. Maecenas laoreet hendrerit mauris, ut porttitor lorem iaculis ac. Quisque molestie sem quis lorem tempor rutrum. Phasellus nibh mauris, rhoncus in consectetur non, aliquet eu massa.
+
+Curabitur velit arcu, pretium porta placerat quis, varius ut metus. Vestibulum vulputate tincidunt justo, vitae porttitor lectus imperdiet sit amet. Vivamus enim dolor, sollicitudin ut semper non, ornare ornare dui. Aliquam tempor fermentum sapien eget condimentum. Curabitur laoreet bibendum ante, in euismod lacus lacinia eu. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Suspendisse potenti. Sed at libero eu tortor tempus scelerisque. Nulla facilisi. Nullam vitae neque id justo viverra rhoncus pretium at libero. Etiam est urna, aliquam vel pulvinar non, ornare vel purus.
+
+Nulla varius, nisi eget condimentum semper, metus est dictum odio, vel mattis risus est sed velit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nunc non est nec tellus ultricies mattis ut eget velit. Integer condimentum ante id lorem blandit lacinia. Donec vel tortor augue, in condimentum nisi. Pellentesque pellentesque nulla ut nulla porttitor quis sodales enim rutrum. Sed augue risus, euismod a aliquet at, vulputate non libero. Nullam nibh odio, dignissim fermentum pulvinar ac, congue eu mi. Duis tincidunt, nibh id venenatis placerat, diam turpis gravida leo, sit amet mollis massa dolor quis mauris. Vivamus scelerisque sodales arcu et dapibus. Suspendisse potenti. Cras quis tellus arcu, quis laoreet sem. Fusce porttitor, sapien vel tristique sodales, velit leo porta arcu, quis pellentesque nunc metus non odio. Nam arcu libero, ullamcorper ut pharetra non, dignissim et velit. Quisque dolor lorem, vehicula sit amet scelerisque in, varius at nulla. Pellentesque vitae sem eget tortor iaculis pulvinar. Sed nunc justo, euismod gravida pulvinar eget, gravida eget turpis. Cras vel dictum nisi. Nullam nulla libero, gravida sit amet aliquam quis, commodo vitae odio. Cras vitae nibh nec dui placerat semper.
+
+Vivamus at fringilla eros. Vivamus at nisl id massa commodo feugiat quis non massa. Morbi tellus urna, auctor sit amet elementum sed, rutrum non lectus. Nulla feugiat dui in sapien ornare et imperdiet est ornare. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Vestibulum semper rutrum tempor. Sed in felis nibh, sed aliquam enim. Curabitur ut quam scelerisque velit placerat dictum. Donec eleifend vehicula purus, eu vestibulum sapien rutrum eu. Vivamus in odio vel est vulputate iaculis. Nunc rutrum feugiat pretium.
+
+Maecenas ipsum neque, auctor quis lacinia vitae, euismod ac orci. Donec molestie massa consequat est porta ac porta purus tincidunt. Nam bibendum leo nec lacus mollis non condimentum dolor rhoncus. Nulla ac volutpat lorem. Nullam erat purus, convallis eget commodo id, varius quis augue. Nullam aliquam egestas mi, vel suscipit nisl mattis consequat. Quisque vel egestas sapien. Nunc lorem velit, convallis nec laoreet et, aliquet eget massa. Nam et nibh ac dui vehicula aliquam quis eu augue. Cras vel magna ut elit rhoncus interdum iaculis volutpat nisl. Suspendisse arcu lorem, varius rhoncus tempor id, pulvinar sed tortor. Pellentesque ultricies laoreet odio ac dignissim. Aliquam diam arcu, placerat quis egestas eget, facilisis eu nunc. Mauris vulputate, nisl sit amet mollis interdum, risus tortor ornare orci, sed egestas orci eros non diam. Vestibulum hendrerit, metus quis placerat pellentesque, enim purus faucibus dui, sit amet ultricies lectus ipsum id lorem. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Praesent eget diam odio, eu bibendum elit. In vestibulum orci eu erat tincidunt tristique.
+
+Cras consectetur ante eu turpis placerat sollicitudin. Mauris et lacus tortor, eget pharetra velit. Donec accumsan ultrices tempor. Donec at nibh a elit condimentum dapibus. Integer sit amet vulputate ante. Suspendisse potenti. In sodales laoreet massa vitae lacinia. Morbi vel lacus feugiat arcu vulputate molestie. Aliquam massa magna, ullamcorper accumsan gravida quis, rhoncus pulvinar nulla. Praesent sit amet ipsum diam, sit amet lacinia neque. In et sapien augue. Etiam enim elit, ultrices vel rutrum id, scelerisque non enim.
+
+Proin et egestas neque. Praesent et ipsum dolor. Nunc non varius nisl. Fusce in tortor nisi. Maecenas convallis neque in ligula blandit quis vehicula leo mollis. Pellentesque sagittis blandit leo, dapibus pellentesque leo ultrices ac. Curabitur ac egestas libero. Donec pretium pharetra pretium. Fusce imperdiet, turpis eu aliquam porta, ante elit eleifend risus, luctus auctor arcu ante ut nunc. Vivamus in leo felis, vitae eleifend lacus. Donec tempus aliquam purus porttitor tristique. Suspendisse diam neque, suscipit feugiat fringilla non, eleifend sit nullam.
diff --git a/1.1.x/share/www/script/test/lorem_b64.txt b/1.1.x/share/www/script/test/lorem_b64.txt
new file mode 100644
index 00000000..8a21d79e
--- /dev/null
+++ b/1.1.x/share/www/script/test/lorem_b64.txt
@@ -0,0 +1 @@
+TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gUGhhc2VsbHVzIG51bmMgc2FwaWVuLCBwb3J0YSBpZCBwZWxsZW50ZXNxdWUgYXQsIGVsZW1lbnR1bSBldCBmZWxpcy4gQ3VyYWJpdHVyIGNvbmRpbWVudHVtIGFudGUgaW4gbWV0dXMgaWFjdWxpcyBxdWlzIGNvbmd1ZSBkaWFtIGNvbW1vZG8uIERvbmVjIGVsZWlmZW5kIGFudGUgc2VkIG51bGxhIGRhcGlidXMgY29udmFsbGlzLiBVdCBjdXJzdXMgYWxpcXVhbSBuZXF1ZSwgdmVsIHBvcnR0aXRvciB0ZWxsdXMgaW50ZXJkdW0gdXQuIFNlZCBwaGFyZXRyYSBsYWNpbmlhIGFkaXBpc2NpbmcuIEluIHRyaXN0aXF1ZSB0cmlzdGlxdWUgZmVsaXMgbm9uIHRpbmNpZHVudC4gTnVsbGEgYXVjdG9yIG1hdXJpcyBhIHZlbGl0IGN1cnN1cyB1bHRyaWNpZXMuIEluIGF0IGxpYmVybyBxdWlzIGp1c3RvIGNvbnNlY3RldHVyIGxhb3JlZXQuIE51bGxhbSBpZCB1bHRyaWNlcyBudW5jLiBEb25lYyBub24gdHVycGlzIG51bGxhLCBldSBsYWNpbmlhIGFudGUuIE51bmMgZXUgb3JjaSBldCB0dXJwaXMgcHJldGl1bSB2ZW5lbmF0aXMuIE5hbSBtb2xlc3RpZSwgbGFjdXMgYXQgZGlnbmlzc2ltIGVsZW1lbnR1bSwgYW50ZSBsaWJlcm8gY29uc2VjdGV0dXIgbGliZXJvLCB1dCBsYWNpbmlhIGxhY3VzIHVybmEgZXQgcHVydXMuIE51bGxhbSBsb3JlbSBpcHN1bSwgZGFwaWJ1cyB2ZWwgdWxsYW1jb3JwZXIgYSwgbWFsZXN1YWRhIGEgbWV0dXMuIFNlZCBwb3J0YSBhZGlwaXNjaW5nIG1hZ25hLCBxdWlzIHB1bHZpbmFyIHB1cnVzIG1hdHRpcyBmcmluZ2lsbGEuIEludGVnZXIgcGVsbGVudGVzcXVlIHNhcGllbiBpbiBuZXF1ZSB0cmlzdGlxdWUgYWMgaWFjdWxpcyBsaWJlcm8gdWx0cmljaWVzLiBVdCBlZ2V0IHBoYXJldHJhIHB1cnVzLgoKTnVsbGEgaW4gY29udmFsbGlzIHRlbGx1cy4gUHJvaW4gdGluY2lkdW50IHN1c2NpcGl0IHZ1bHB1dGF0ZS4gU3VzcGVuZGlzc2UgcG90ZW50aS4gTnVsbGFtIHRyaXN0aXF1ZSBqdXN0byBtaSwgYSB0cmlzdGlxdWUgbGlndWxhLiBEdWlzIGNvbnZhbGxpcyBhbGlxdWFtIGlhY3VsaXMuIE51bGxhIGRpY3R1bSBmcmluZ2lsbGEgY29uZ3VlLiBTdXNwZW5kaXNzZSBhYyBsZW8gbGVjdHVzLCBhYyBhbGlxdWFtIGp1c3RvLiBVdCBwb3J0dGl0b3IgY29tbW9kbyBtaSBzZWQgbHVjdHVzLiBOdWxsYSBhdCBlbmltIGxvcmVtLiBOdW5jIGV1IGp1c3RvIHNhcGllbiwgYSBibGFuZGl0IG9kaW8uIEN1cmFiaXR1ciBmYXVjaWJ1cyBzb2xsaWNpdHVkaW4gZG9sb3IsIGlkIGxhY2luaWEgc2VtIGF1Y3RvciBpbi4gRG9uZWMgdmFyaXVzIG51bmMgYXQgbGVjdHVzIHNhZ2l0dGlzIG5lYyBsdWN0dXMgYXJjdSBwaGFyZXRyYS4gTnVuYyBzZWQgbWV0dXMganVzdG8uIENyYXMgdmVsIG1hdXJpcyBkaWFtLiBVdCBmZXVnaWF0IGZlbGlzIGVnZXQgbmVxdWUgcGhhcmV0cmEgdmVzdGlidWx1bSBjb25zZWN0ZXR1ciBtYXNzYSBmYWNpbGlzaXMuIFF1aXNxdWUgY29uc2VjdGV0dXIgbHVjdHVzIG5pc2kgcXVpcyB0aW5jaWR1bnQuIFZpdmFtdXMgY3Vyc3VzIGN1cnN1cyBxdWFtIG5vbiBibGFuZGl0LiBQZWxsZW50ZXNxdWUgZXQgdmVsaXQgbGFjdXMuIFBlbGxlbnRlc3F1ZSBoYWJpdGFudCBtb3JiaSB0cmlzdGlxdWUgc2VuZWN0dXMgZXQgbmV0dXMgZXQgbWFsZXN1YWRhIGZhbWVzIGFjIHR1cnBpcyBlZ2VzdGFzLgoKSW4gZXQgZG9sb3Igdml0YWUgb3JjaSBhZGlwaXNjaW5nIGNvbmd1ZS4gQWxpcXVhbSBncmF2aWRhIG5pYmggYXQgbmlzbCBncmF2aWRhIG1vbGVzdGllLiBDdXJhYml0dXIgYSBiaWJlbmR1bSBzYXBpZW4uIEFsaXF1YW0gdGluY2lkdW50LCBudWxsYSBuZWMgcHJldGl1bSBsb2JvcnRpcywgb2RpbyBhdWd1ZSB0aW5jaWR1bnQgYXJjdSwgYSBsb2JvcnRpcyBvZGlvIHNlbSB1dCBwdXJ1cy4gRG9uZWMgYWNjdW1zYW4gbWF0dGlzIG51bmMgdml0YWUgbGFjaW5pYS4gU3VzcGVuZGlzc2UgcG90ZW50aS4gSW50ZWdlciBjb21tb2RvIG5pc2wgcXVpcyBuaWJoIGludGVyZHVtIG5vbiBmcmluZ2lsbGEgZHVpIHNvZGFsZXMuIENsYXNzIGFwdGVudCB0YWNpdGkgc29jaW9zcXUgYWQgbGl0b3JhIHRvcnF1ZW50IHBlciBjb251YmlhIG5vc3RyYSwgcGVyIGluY2VwdG9zIGhpbWVuYWVvcy4gSW4gaGFjIGhhYml0YXNzZSBwbGF0ZWEgZGljdHVtc3QuIEV0aWFtIHVsbGFtY29ycGVyLCBtaSBpZCBmZXVnaWF0IGJpYmVuZHVtLCBwdXJ1cyBuZXF1ZSBjdXJzdXMgbWF1cmlzLCBpZCBzb2RhbGVzIHF1YW0gbmlzaSBpZCB2ZWxpdC4gU2VkIGxlY3R1cyBsZW8sIHRpbmNpZHVudCB2ZWwgcmhvbmN1cyBpbXBlcmRpZXQsIGJsYW5kaXQgaW4gbGVvLiBJbnRlZ2VyIHF1aXMgbWFnbmEgbnVsbGEuIERvbmVjIHZlbCBuaXNsIG1hZ25hLCB1dCByaG9uY3VzIGR1aS4gQWxpcXVhbSBncmF2aWRhLCBudWxsYSBuZWMgZWxlaWZlbmQgbHVjdHVzLCBuZXF1ZSBuaWJoIHBoYXJldHJhIGFudGUsIHF1aXMgZWdlc3RhcyBlbGl0IG1ldHVzIGEgbWkuIE51bmMgbmVjIGF1Z3VlIHF1YW0uIE1vcmJpIHRpbmNpZHVudCB0cmlzdGlxdWUgdmFyaXVzLiBTdXNwZW5kaXNzZSBpYWN1bGlzIGVsaXQgZmV1Z2lhdCBtYWduYSBwZWxsZW50ZXNxdWUgdWx0cmljaWVzLiBWZXN0aWJ1bHVtIGFsaXF1YW0gdG9ydG9yIG5vbiBhbnRlIHVsbGFtY29ycGVyIGZyaW5naWxsYS4gRG9uZWMgaWFjdWxpcyBtaSBxdWlzIG1hdXJpcyBvcm5hcmUgdmVzdGlidWx1bS4KCkluIGEgbWFnbmEgbmlzaSwgYSB1bHRyaWNpZXMgbWFzc2EuIERvbmVjIGVsaXQgbmVxdWUsIHZpdmVycmEgbm9uIHRlbXBvciBxdWlzLCBmcmluZ2lsbGEgaW4gbWV0dXMuIEludGVnZXIgb2RpbyBvZGlvLCBldWlzbW9kIHZpdGFlIG1vbGxpcyBzZWQsIHNvZGFsZXMgZWdldCBsaWJlcm8uIERvbmVjIG5lYyBtYXNzYSBpbiBmZWxpcyBvcm5hcmUgcGhhcmV0cmEgYXQgbmVjIHRlbGx1cy4gTnVuYyBsb3JlbSBkb2xvciwgcHJldGl1bSB2ZWwgYXVjdG9yIGluLCB2b2x1dHBhdCB2aXRhZSBmZWxpcy4gTWFlY2VuYXMgcmhvbmN1cywgb3JjaSB2ZWwgYmxhbmRpdCBldWlzbW9kLCB0dXJwaXMgZXJhdCB0aW5jaWR1bnQgYW50ZSwgZWxlbWVudHVtIGFkaXBpc2NpbmcgbmlzbCB1cm5hIGluIG5pc2kuIFBoYXNlbGx1cyBzYWdpdHRpcywgZW5pbSBzZWQgYWNjdW1zYW4gY29uc2VxdWF0LCB1cm5hIGF1Z3VlIGxvYm9ydGlzIGVyYXQsIG5vbiBtYWxlc3VhZGEgcXVhbSBtZXR1cyBzb2xsaWNpdHVkaW4gYW50ZS4gSW4gbGVvIHB1cnVzLCBkaWduaXNzaW0gcXVpcyB2YXJpdXMgdmVsLCBwZWxsZW50ZXNxdWUgZXQgbmliaC4gSW4gc2VkIHRvcnRvciBpYWN1bGlzIGxpYmVybyBtb2xsaXMgcGVsbGVudGVzcXVlIGlkIHZpdGFlIGxlY3R1cy4gSW4gaGFjIGhhYml0YXNzZSBwbGF0ZWEgZGljdHVtc3QuIFBoYXNlbGx1cyBtYXVyaXMgZW5pbSwgcG9zdWVyZSBlZ2V0IGx1Y3R1cyBhYywgaWFjdWxpcyBldCBxdWFtLiBWaXZhbXVzIGV0IG5pYmggZGlhbSwgZWxlbWVudHVtIGVnZXN0YXMgdGVsbHVzLiBBZW5lYW4gdnVscHV0YXRlIG1hbGVzdWFkYSBlc3QuIFNlZCBwb3N1ZXJlIHBvcnRhIGRpYW0gYSBzb2RhbGVzLiBQcm9pbiBldSBzZW0gbm9uIHZlbGl0IGZhY2lsaXNpcyB2ZW5lbmF0aXMgc2VkIGEgdHVycGlzLgoKUGVsbGVudGVzcXVlIHNlZCByaXN1cyBhIGFudGUgdnVscHV0YXRlIGxvYm9ydGlzIHNpdCBhbWV0IGV1IG5pc2wuIFN1c3BlbmRpc3NlIHV0IGVyb3MgbWksIGEgcmhvbmN1cyBsYWN1cy4gQ3VyYWJpdHVyIGZlcm1lbnR1bSB2ZWhpY3VsYSB0ZWxsdXMsIGEgb3JuYXJlIG1pIGNvbmRpbWVudHVtIHZlbC4gSW50ZWdlciBtb2xlc3RpZSB2b2x1dHBhdCB2aXZlcnJhLiBJbnRlZ2VyIHBvc3VlcmUgZXVpc21vZCB2ZW5lbmF0aXMuIFByb2luIGFjIG1hdXJpcyBzZWQgbnVsbGEgcGhhcmV0cmEgcG9ydHRpdG9yLiBEdWlzIHZlbCBkdWkgaW4gcmlzdXMgc29kYWxlcyBhdWN0b3Igc2l0IGFtZXQgbm9uIGVuaW0uIE1hZWNlbmFzIG1vbGxpcyBsYWN1cyBhdCBsaWd1bGEgZmF1Y2lidXMgc29kYWxlcy4gQ3JhcyB2ZWwgbmVxdWUgYXJjdS4gU2VkIHRpbmNpZHVudCB0b3J0b3IgcHJldGl1bSBuaXNpIGludGVyZHVtIHF1aXMgZGljdHVtIGFyY3UgbGFvcmVldC4gTW9yYmkgcHJldGl1bSB1bHRyaWNlcyBmZXVnaWF0LiBNYWVjZW5hcyBjb252YWxsaXMgYXVndWUgbmVjIGZlbGlzIG1hbGVzdWFkYSBtYWxlc3VhZGEgc2NlbGVyaXNxdWUgbWF1cmlzIHBsYWNlcmF0LiBTZWQgYXQgbWFnbmEgZW5pbSwgYXQgZnJpbmdpbGxhIGRvbG9yLiBRdWlzcXVlIHV0IG1hdHRpcyBkdWkuIFByYWVzZW50IGNvbnNlY3RldHVyIGFudGUgdml2ZXJyYSBuaXNpIGJsYW5kaXQgcGhhcmV0cmEuIFF1aXNxdWUgbWV0dXMgZWxpdCwgZGlnbmlzc2ltIHZpdGFlIGZlcm1lbnR1bSBzaXQgYW1ldCwgZnJpbmdpbGxhIGltcGVyZGlldCBvZGlvLiBDcmFzIGVnZXQgcHVydXMgZWdldCB0ZWxsdXMgZmV1Z2lhdCBsdWN0dXMgYSBhYyBwdXJ1cy4gQ3JhcyB2aXRhZSBuaXNsIHZlbCBhdWd1ZSByaG9uY3VzIHBvcnR0aXRvciBzaXQgYW1ldCBxdWlzIGxvcmVtLiBEb25lYyBpbnRlcmR1bSBwZWxsZW50ZXNxdWUgYWRpcGlzY2luZy4gUGhhc2VsbHVzIG5lcXVlIGxpYmVybywgYWxpcXVhbSBpbiBtYXR0aXMgdml0YWUsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgbmliaC4KCkRvbmVjIG5lYyBudWxsYSB1cm5hLCBhYyBzYWdpdHRpcyBsZWN0dXMuIFN1c3BlbmRpc3NlIG5vbiBlbGl0IHNlZCBtaSBhdWN0b3IgZmFjaWxpc2lzIHZpdGFlIGV0IGxlY3R1cy4gRnVzY2UgYWMgdnVscHV0YXRlIG1hdXJpcy4gTW9yYmkgY29uZGltZW50dW0gdWx0cmljZXMgbWV0dXMsIGV0IGFjY3Vtc2FuIHB1cnVzIG1hbGVzdWFkYSBhdC4gTWFlY2VuYXMgbG9ib3J0aXMgYW50ZSBzZWQgbWFzc2EgZGljdHVtIHZpdGFlIHZlbmVuYXRpcyBlbGl0IGNvbW1vZG8uIFByb2luIHRlbGx1cyBlcm9zLCBhZGlwaXNjaW5nIHNlZCBkaWduaXNzaW0gdml0YWUsIHRlbXBvciBlZ2V0IGFudGUuIEFlbmVhbiBpZCB0ZWxsdXMgbmVjIG1hZ25hIGN1cnN1cyBwaGFyZXRyYSB2aXRhZSB2ZWwgZW5pbS4gTW9yYmkgdmVzdGlidWx1bSBwaGFyZXRyYSBlc3QgaW4gdnVscHV0YXRlLiBBbGlxdWFtIHZpdGFlIG1ldHVzIGFyY3UsIGlkIGFsaXF1ZXQgbnVsbGEuIFBoYXNlbGx1cyBsaWd1bGEgZXN0LCBoZW5kcmVyaXQgbmVjIGlhY3VsaXMgdXQsIHZvbHV0cGF0IHZlbCBlcm9zLiBTdXNwZW5kaXNzZSB2aXRhZSB1cm5hIHR1cnBpcywgcGxhY2VyYXQgYWRpcGlzY2luZyBkaWFtLiBQaGFzZWxsdXMgZmV1Z2lhdCB2ZXN0aWJ1bHVtIG5lcXVlIGV1IGRhcGlidXMuIE51bGxhIGZhY2lsaXNpLiBEdWlzIHRvcnRvciBmZWxpcywgZXVpc21vZCBzaXQgYW1ldCBhbGlxdWV0IGluLCB2b2x1dHBhdCBuZWMgdHVycGlzLiBNYXVyaXMgcmhvbmN1cyBpcHN1bSB1dCBwdXJ1cyBlbGVpZmVuZCB1dCBsb2JvcnRpcyBsZWN0dXMgZGFwaWJ1cy4gUXVpc3F1ZSBub24gZXJhdCBsb3JlbS4gVml2YW11cyBwb3N1ZXJlIGltcGVyZGlldCBpYWN1bGlzLiBVdCBsaWd1bGEgbGFjdXMsIGVsZWlmZW5kIGF0IHRlbXBvciBpZCwgYXVjdG9yIGV1IGxlby4KCkRvbmVjIG1pIGVuaW0sIGxhb3JlZXQgcHVsdmluYXIgbW9sbGlzIGV1LCBtYWxlc3VhZGEgdml2ZXJyYSBudW5jLiBJbiB2aXRhZSBtZXR1cyB2aXRhZSBuZXF1ZSB0ZW1wb3IgZGFwaWJ1cy4gTWFlY2VuYXMgdGluY2lkdW50IHB1cnVzIGEgZmVsaXMgYWxpcXVhbSBwbGFjZXJhdC4gTnVsbGEgZmFjaWxpc2kuIFN1c3BlbmRpc3NlIHBsYWNlcmF0IHBoYXJldHJhIG1hdHRpcy4gSW50ZWdlciB0ZW1wb3IgbWFsZXN1YWRhIGp1c3RvIGF0IHRlbXB1cy4gTWFlY2VuYXMgdmVoaWN1bGEgbG9yZW0gYSBzYXBpZW4gYmliZW5kdW0gdmVsIGlhY3VsaXMgcmlzdXMgZmV1Z2lhdC4gUGVsbGVudGVzcXVlIGRpYW0gZXJhdCwgZGFwaWJ1cyBldCBwZWxsZW50ZXNxdWUgcXVpcywgbW9sZXN0aWUgdXQgbWFzc2EuIFZpdmFtdXMgaWFjdWxpcyBpbnRlcmR1bSBtYXNzYSBpZCBiaWJlbmR1bS4gUXVpc3F1ZSB1dCBtYXVyaXMgZHVpLCBzaXQgYW1ldCB2YXJpdXMgZWxpdC4gVmVzdGlidWx1bSBlbGl0IGxvcmVtLCBydXRydW0gbm9uIGNvbnNlY3RldHVyIHV0LCBsYW9yZWV0IG5lYyBudW5jLiBEb25lYyBuZWMgbWF1cmlzIGFudGUuIEN1cmFiaXR1ciB1dCBlc3Qgc2VkIG9kaW8gcGhhcmV0cmEgbGFvcmVldC4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gQ3VyYWJpdHVyIHB1cnVzIHJpc3VzLCBsYW9yZWV0IHNlZCBwb3J0YSBpZCwgc2FnaXR0aXMgdmVsIGlwc3VtLiBNYWVjZW5hcyBuaWJoIGRpYW0sIGN1cnN1cyBldCB2YXJpdXMgc2l0IGFtZXQsIGZyaW5naWxsYSBzZWQgbWFnbmEuIE51bGxhbSBpZCBuZXF1ZSBldSBsZW8gZmF1Y2lidXMgbW9sbGlzLiBEdWlzIG5lYyBhZGlwaXNjaW5nIG1hdXJpcy4gU3VzcGVuZGlzc2Ugc29sbGljaXR1ZGluLCBlbmltIGV1IHB1bHZpbmFyIGNvbW1vZG8sIGVyYXQgYXVndWUgdWx0cmljZXMgbWksIGEgdHJpc3RpcXVlIG1hZ25hIHNlbSBub24gbGliZXJvLgoKU2VkIGluIG1ldHVzIG51bGxhLiBQcmFlc2VudCBuZWMgYWRpcGlzY2luZyBzYXBpZW4uIERvbmVjIGxhb3JlZXQsIHZlbGl0IG5vbiBydXRydW0gdmVzdGlidWx1bSwgbGlndWxhIG5lcXVlIGFkaXBpc2NpbmcgdHVycGlzLCBhdCBhdWN0b3Igc2FwaWVuIGVsaXQgdXQgbWFzc2EuIE51bGxhbSBhbGlxdWFtLCBlbmltIHZlbCBwb3N1ZXJlIHJ1dHJ1bSwganVzdG8gZXJhdCBsYW9yZWV0IGVzdCwgdmVsIGZyaW5naWxsYSBsYWN1cyBuaXNpIG5vbiBsZWN0dXMuIEV0aWFtIGxlY3R1cyBudW5jLCBsYW9yZWV0IGV0IHBsYWNlcmF0IGF0LCB2ZW5lbmF0aXMgcXVpcyBsaWJlcm8uIFByYWVzZW50IGluIHBsYWNlcmF0IGVsaXQuIENsYXNzIGFwdGVudCB0YWNpdGkgc29jaW9zcXUgYWQgbGl0b3JhIHRvcnF1ZW50IHBlciBjb251YmlhIG5vc3RyYSwgcGVyIGluY2VwdG9zIGhpbWVuYWVvcy4gUGVsbGVudGVzcXVlIGZyaW5naWxsYSBhdWd1ZSBldSBuaWJoIHBsYWNlcmF0IGRpY3R1bS4gTnVuYyBwb3J0dGl0b3IgdHJpc3RpcXVlIGRpYW0sIGV1IGFsaXF1YW0gZW5pbSBhbGlxdWV0IHZlbC4gQWxpcXVhbSBsYWNpbmlhIGludGVyZHVtIGlwc3VtLCBpbiBwb3N1ZXJlIG1ldHVzIGx1Y3R1cyB2ZWwuIFZpdmFtdXMgZXQgbmlzbCBhIGVyb3Mgc2VtcGVyIGVsZW1lbnR1bS4gRG9uZWMgdmVuZW5hdGlzIG9yY2kgYXQgZGlhbSB0cmlzdGlxdWUgc29sbGljaXR1ZGluLiBJbiBldSBlcm9zIHNlZCBvZGlvIHJ1dHJ1bSBsdWN0dXMgbm9uIG5lYyB0ZWxsdXMuCgpOdWxsYSBuZWMgZmVsaXMgZWxpdC4gTnVsbGFtIGluIGlwc3VtIGluIGlwc3VtIGNvbnNlcXVhdCBmcmluZ2lsbGEgcXVpcyB2ZWwgdG9ydG9yLiBQaGFzZWxsdXMgbm9uIG1hc3NhIG5pc2ksIHNpdCBhbWV0IGFsaXF1YW0gdXJuYS4gU2VkIGZlcm1lbnR1bSBuaWJoIHZpdGFlIGxhY3VzIHRpbmNpZHVudCBuZWMgdGluY2lkdW50IG1hc3NhIGJpYmVuZHVtLiBFdGlhbSBlbGl0IGR1aSwgZmFjaWxpc2lzIHNpdCBhbWV0IHZlaGljdWxhIG5lYywgaWFjdWxpcyBhdCBzYXBpZW4uIFV0IGF0IG1hc3NhIGlkIGR1aSB1bHRyaWNlcyB2b2x1dHBhdCB1dCBhYyBsaWJlcm8uIEZ1c2NlIGlwc3VtIG1pLCBiaWJlbmR1bSBhIGxhY2luaWEgZXQsIHB1bHZpbmFyIGVnZXQgbWF1cmlzLiBQcm9pbiBmYXVjaWJ1cyB1cm5hIHV0IGxvcmVtIGVsZW1lbnR1bSB2dWxwdXRhdGUuIER1aXMgcXVhbSBsZW8sIG1hbGVzdWFkYSBub24gZXVpc21vZCB1dCwgYmxhbmRpdCBmYWNpbGlzaXMgbWF1cmlzLiBTdXNwZW5kaXNzZSBzaXQgYW1ldCBtYWduYSBpZCB2ZWxpdCB0aW5jaWR1bnQgYWxpcXVldCBuZWMgZXUgZG9sb3IuIEN1cmFiaXR1ciBiaWJlbmR1bSBsb3JlbSB2ZWwgZmVsaXMgdGVtcHVzIGRhcGlidXMuIEFsaXF1YW0gZXJhdCB2b2x1dHBhdC4gQWVuZWFuIGN1cnN1cyB0b3J0b3IgbmVjIGR1aSBhbGlxdWV0IHBvcnRhLiBBZW5lYW4gY29tbW9kbyBpYWN1bGlzIHN1c2NpcGl0LiBWZXN0aWJ1bHVtIGFudGUgaXBzdW0gcHJpbWlzIGluIGZhdWNpYnVzIG9yY2kgbHVjdHVzIGV0IHVsdHJpY2VzIHBvc3VlcmUgY3ViaWxpYSBDdXJhZTsgUXVpc3F1ZSBzaXQgYW1ldCBvcm5hcmUgZWxpdC4gTmFtIGxpZ3VsYSByaXN1cywgdmVzdGlidWx1bSBuZWMgbWF0dGlzIGluLCBjb25kaW1lbnR1bSBhYyBhbnRlLiBEb25lYyBmcmluZ2lsbGEsIGp1c3RvIGV0IHVsdHJpY2VzIGZhdWNpYnVzLCB0ZWxsdXMgZXN0IHZvbHV0cGF0IG1hc3NhLCB2aXRhZSBjb21tb2RvIHNhcGllbiBkaWFtIG5vbiByaXN1cy4gVml2YW11cyBhdCBhcmN1IGdyYXZpZGEgcHVydXMgbW9sbGlzIGZldWdpYXQuCgpOdWxsYSBhIHR1cnBpcyBxdWlzIHNhcGllbiBjb21tb2RvIGRpZ25pc3NpbSBldSBxdWlzIGp1c3RvLiBNYWVjZW5hcyBldSBsb3JlbSBvZGlvLCB1dCBoZW5kcmVyaXQgdmVsaXQuIEN1bSBzb2NpaXMgbmF0b3F1ZSBwZW5hdGlidXMgZXQgbWFnbmlzIGRpcyBwYXJ0dXJpZW50IG1vbnRlcywgbmFzY2V0dXIgcmlkaWN1bHVzIG11cy4gUHJvaW4gZmFjaWxpc2lzIHBvcnR0aXRvciB1bGxhbWNvcnBlci4gUHJhZXNlbnQgbW9sbGlzIGRpZ25pc3NpbSBtYXNzYSwgbGFvcmVldCBhbGlxdWV0IHZlbGl0IHBlbGxlbnRlc3F1ZSBub24uIE51bmMgZmFjaWxpc2lzIGNvbnZhbGxpcyB0cmlzdGlxdWUuIE1hdXJpcyBwb3J0dGl0b3IgYW50ZSBhdCB0ZWxsdXMgY29udmFsbGlzIHBsYWNlcmF0LiBNb3JiaSBhbGlxdWV0IG5pc2kgYWMgbmlzbCBwdWx2aW5hciBpZCBkaWN0dW0gbmlzbCBtb2xsaXMuIFNlZCBvcm5hcmUgc2VtIGV0IHJpc3VzIHBsYWNlcmF0IGxvYm9ydGlzIGlkIGVnZXQgZWxpdC4gSW50ZWdlciBjb25zZXF1YXQsIG1hZ25hIGlkIHN1c2NpcGl0IHBoYXJldHJhLCBudWxsYSB2ZWxpdCBzdXNjaXBpdCBvcmNpLCB1dCBpbnRlcmR1bSBhdWd1ZSBhdWd1ZSBxdWlzIHF1YW0uIEZ1c2NlIHByZXRpdW0gYWxpcXVldCB2dWxwdXRhdGUuIE1hdXJpcyBibGFuZGl0IGRpY3R1bSBtb2xlc3RpZS4gUHJvaW4gbnVsbGEgbmliaCwgYmliZW5kdW0gZXUgcGxhY2VyYXQgYXQsIHRpbmNpZHVudCBhYyBuaXNsLiBOdWxsYW0gdnVscHV0YXRlIG1ldHVzIHV0IGxpYmVybyBydXRydW0gdWx0cmljaWVzLiBOdW5jIHNpdCBhbWV0IGR1aSBtYXVyaXMuIFN1c3BlbmRpc3NlIGFkaXBpc2NpbmcgbGFjdXMgaW4gYXVndWUgZWxlaWZlbmQgbW9sbGlzLgoKRHVpcyBwcmV0aXVtIHVsdHJpY2VzIG1hdHRpcy4gTmFtIGV1aXNtb2QgcmlzdXMgYSBlcmF0IGxhY2luaWEgYmliZW5kdW0uIE1vcmJpIG1hc3NhIHRvcnRvciwgY29uc2VjdGV0dXIgaWQgZWxlaWZlbmQgaWQsIHBlbGxlbnRlc3F1ZSB2ZWwgdG9ydG9yLiBQcmFlc2VudCB1cm5hIGxvcmVtLCBwb3J0dGl0b3IgYXQgY29uZGltZW50dW0gdml0YWUsIGx1Y3R1cyBlZ2V0IGVsaXQuIE1hZWNlbmFzIGZyaW5naWxsYSBxdWFtIGNvbnZhbGxpcyBlc3QgaGVuZHJlcml0IHZpdmVycmEuIEV0aWFtIHZlaGljdWxhLCBzYXBpZW4gbm9uIHB1bHZpbmFyIGFkaXBpc2NpbmcsIG5pc2kgbWFzc2EgdmVzdGlidWx1bSBlc3QsIGlkIGludGVyZHVtIG1hdXJpcyB2ZWxpdCBldSBlc3QuIFZlc3RpYnVsdW0gZXN0IGFyY3UsIGZhY2lsaXNpcyBhdCB1bHRyaWNpZXMgbm9uLCB2dWxwdXRhdGUgaWQgc2FwaWVuLiBWZXN0aWJ1bHVtIGlwc3VtIG1ldHVzLCBwaGFyZXRyYSBuZWMgcGVsbGVudGVzcXVlIGlkLCBmYWNpbGlzaXMgaWQgc2FwaWVuLiBEb25lYyBydXRydW0gb2RpbyBldCBsYWN1cyB1bHRyaWNpZXMgdWxsYW1jb3JwZXIuIEludGVnZXIgc2VkIGVzdCB1dCBtaSBwb3N1ZXJlIHRpbmNpZHVudCBxdWlzIG5vbiBsZW8uIE1vcmJpIHRlbGx1cyBqdXN0bywgdWx0cmljaWVzIHNpdCBhbWV0IHVsdHJpY2VzIHF1aXMsIGZhY2lsaXNpcyB2aXRhZSBtYWduYS4gRG9uZWMgbGlndWxhIG1ldHVzLCBwZWxsZW50ZXNxdWUgbm9uIHRyaXN0aXF1ZSBhYywgdmVzdGlidWx1bSBzZWQgZXJhdC4gQWxpcXVhbSBlcmF0IHZvbHV0cGF0LgoKTmFtIGRpZ25pc3NpbSwgbmlzbCBlZ2V0IGNvbnNlcXVhdCBldWlzbW9kLCBzZW0gbGVjdHVzIGF1Y3RvciBvcmNpLCB1dCBwb3J0dGl0b3IgbGFjdXMgZHVpIGFjIG5lcXVlLiBJbiBoYWMgaGFiaXRhc3NlIHBsYXRlYSBkaWN0dW1zdC4gRnVzY2UgZWdlc3RhcyBwb3J0YSBmYWNpbGlzaXMuIEluIGhhYyBoYWJpdGFzc2UgcGxhdGVhIGRpY3R1bXN0LiBNYXVyaXMgY3Vyc3VzIHJob25jdXMgcmlzdXMgYWMgZXVpc21vZC4gUXVpc3F1ZSB2aXRhZSByaXN1cyBhIHRlbGx1cyB2ZW5lbmF0aXMgY29udmFsbGlzLiBDdXJhYml0dXIgbGFvcmVldCBzYXBpZW4gZXUgcXVhbSBsdWN0dXMgbG9ib3J0aXMuIFZpdmFtdXMgc29sbGljaXR1ZGluIHNvZGFsZXMgZG9sb3Igdml0YWUgc29kYWxlcy4gU3VzcGVuZGlzc2UgcGhhcmV0cmEgbGFvcmVldCBhbGlxdWV0LiBNYWVjZW5hcyB1bGxhbWNvcnBlciBvcmNpIHZlbCB0b3J0b3IgbHVjdHVzIGlhY3VsaXMgdXQgdml0YWUgbWV0dXMuIFZlc3RpYnVsdW0gdXQgYXJjdSBhYyB0ZWxsdXMgbWF0dGlzIGVsZWlmZW5kIGVnZXQgdmVoaWN1bGEgZWxpdC4KCkluIHNlZCBmZXVnaWF0IGVyb3MuIERvbmVjIGJpYmVuZHVtIHVsbGFtY29ycGVyIGRpYW0sIGV1IGZhdWNpYnVzIG1hdXJpcyBkaWN0dW0gc2VkLiBEdWlzIHRpbmNpZHVudCBqdXN0byBpbiBuZXF1ZSBhY2N1bXNhbiBkaWN0dW0uIE1hZWNlbmFzIGluIHJ1dHJ1bSBzYXBpZW4uIFV0IGlkIGZldWdpYXQgbGFjdXMuIE51bGxhIGZhY2lsaXNpLiBOdW5jIGFjIGxvcmVtIGlkIHF1YW0gdmFyaXVzIGN1cnN1cyBhIGV0IGVsaXQuIEFlbmVhbiBwb3N1ZXJlIGxpYmVybyBldSB0b3J0b3IgdmVoaWN1bGEgdXQgdWxsYW1jb3JwZXIgb2RpbyBjb25zZXF1YXQuIFNlZCBpbiBkaWduaXNzaW0gZHVpLiBDdXJhYml0dXIgaWFjdWxpcyB0ZW1wb3IgcXVhbSBuZWMgcGxhY2VyYXQuIEFsaXF1YW0gdmVuZW5hdGlzIG5pYmggZXQganVzdG8gaWFjdWxpcyBsYWNpbmlhLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gUGVsbGVudGVzcXVlIHRlbXB1cyBtYWduYSBzZWQgbWkgYWxpcXVldCBlZ2V0IHZhcml1cyBvZGlvIGNvbmd1ZS4KCkludGVnZXIgc2VtIHNlbSwgc2VtcGVyIGluIHZlc3RpYnVsdW0gdml0YWUsIGxvYm9ydGlzIHF1aXMgZXJhdC4gRHVpcyBhbnRlIGxlY3R1cywgZmVybWVudHVtIHNlZCB0ZW1wb3Igc2l0IGFtZXQsIHBsYWNlcmF0IHNpdCBhbWV0IHNlbS4gTWF1cmlzIGNvbmd1ZSB0aW5jaWR1bnQgaXBzdW0uIFV0IHZpdmVycmEsIGxhY3VzIHZlbCB2YXJpdXMgcGhhcmV0cmEsIHB1cnVzIGVuaW0gcHVsdmluYXIgaXBzdW0sIG5vbiBwZWxsZW50ZXNxdWUgZW5pbSBqdXN0byBub24gZXJhdC4gRnVzY2UgaXBzdW0gb3JjaSwgdWx0cmljZXMgc2VkIHBlbGxlbnRlc3F1ZSBhdCwgaGVuZHJlcml0IGxhb3JlZXQgZW5pbS4gTnVuYyBibGFuZGl0IG1vbGxpcyBwcmV0aXVtLiBVdCBtb2xsaXMsIG51bGxhIGFsaXF1YW0gc29kYWxlcyB2ZXN0aWJ1bHVtLCBsaWJlcm8gbG9yZW0gdGVtcHVzIHRvcnRvciwgYSBwZWxsZW50ZXNxdWUgbmliaCBlbGl0IGEgaXBzdW0uIFBoYXNlbGx1cyBmZXJtZW50dW0gbGlndWxhIGF0IG5lcXVlIGFkaXBpc2Npbmcgc29sbGljaXR1ZGluLiBTdXNwZW5kaXNzZSBpZCBpcHN1bSBhcmN1LiBTZWQgdGluY2lkdW50IHBsYWNlcmF0IHZpdmVycmEuIERvbmVjIGxpYmVybyBhdWd1ZSwgcG9ydHRpdG9yIHNpdCBhbWV0IHZhcml1cyBlZ2V0LCBydXRydW0gbmVjIGxhY3VzLiBQcm9pbiBibGFuZGl0IG9yY2kgc2l0IGFtZXQgZGlhbSBkaWN0dW0gaWQgcG9ydHRpdG9yIHJpc3VzIGlhY3VsaXMuIEludGVnZXIgbGFjaW5pYSBmZXVnaWF0IGxlbywgdml0YWUgYXVjdG9yIHR1cnBpcyBlbGVpZmVuZCB2ZWwuIFN1c3BlbmRpc3NlIGxvcmVtIHF1YW0sIHByZXRpdW0gaWQgYmliZW5kdW0gc2VkLCB2aXZlcnJhIHZpdGFlIHRvcnRvci4gTnVsbGFtIHVsdHJpY2llcyBsaWJlcm8gZXUgcmlzdXMgY29udmFsbGlzIGVnZXQgdWxsYW1jb3JwZXIgbmlzaSBlbGVtZW50dW0uIE1hdXJpcyBudWxsYSBlbGl0LCBiaWJlbmR1bSBpZCB2dWxwdXRhdGUgdml0YWUsIGltcGVyZGlldCBydXRydW0gbG9yZW0uIEN1cmFiaXR1ciBlZ2V0IGRpZ25pc3NpbSBvcmNpLiBTZWQgc2VtcGVyIHRlbGx1cyBpcHN1bSwgYXQgYmxhbmRpdCBkdWkuIEludGVnZXIgZGFwaWJ1cyBmYWNpbGlzaXMgc29kYWxlcy4gVml2YW11cyBzb2xsaWNpdHVkaW4gdmFyaXVzIGVzdCwgcXVpcyBvcm5hcmUganVzdG8gY3Vyc3VzIGlkLgoKTnVuYyB2ZWwgdWxsYW1jb3JwZXIgbWkuIFN1c3BlbmRpc3NlIHBvdGVudGkuIE51bmMgZXQgdXJuYSBhIGF1Z3VlIHNjZWxlcmlzcXVlIHVsdHJpY2VzIG5vbiBxdWlzIG1pLiBJbiBxdWlzIHBvcnR0aXRvciBlbGl0LiBBZW5lYW4gcXVpcyBlcmF0IG51bGxhLCBhIHZlbmVuYXRpcyB0ZWxsdXMuIEZ1c2NlIHZlc3RpYnVsdW0gbmlzaSBzZWQgbGVvIGFkaXBpc2NpbmcgZGlnbmlzc2ltLiBOdW5jIGludGVyZHVtLCBsb3JlbSBldCBsYWNpbmlhIHZlc3RpYnVsdW0sIHF1YW0gZXN0IG1hdHRpcyBtYWduYSwgc2l0IGFtZXQgdm9sdXRwYXQgZWxpdCBhdWd1ZSBhdCBsaWJlcm8uIENyYXMgZ3JhdmlkYSBkdWkgcXVpcyB2ZWxpdCBsb2JvcnRpcyBjb25kaW1lbnR1bSBldCBlbGVpZmVuZCBsaWd1bGEuIFBoYXNlbGx1cyBhYyBtZXR1cyBxdWFtLCBpZCB2ZW5lbmF0aXMgbWkuIEFsaXF1YW0gdXQgdHVycGlzIGFjIHRlbGx1cyBkYXBpYnVzIGRhcGlidXMgZXUgaW4gbWkuIFF1aXNxdWUgZWdldCBuaWJoIGVyb3MuIEZ1c2NlIGNvbnNlY3RldHVyIGxlbyB2ZWxpdC4KClZlc3RpYnVsdW0gc2VtcGVyIGVnZXN0YXMgbWF1cmlzLiBNb3JiaSB2ZXN0aWJ1bHVtIHNlbSBzZW0uIEFsaXF1YW0gdmVuZW5hdGlzLCBmZWxpcyBzZWQgZWxlaWZlbmQgcG9ydGEsIG1hdXJpcyBkaWFtIHNlbXBlciBhcmN1LCBzaXQgYW1ldCB1bHRyaWNpZXMgZXN0IHNhcGllbiBzaXQgYW1ldCBsaWJlcm8uIFZlc3RpYnVsdW0gZHVpIG9yY2ksIG9ybmFyZSBjb25kaW1lbnR1bSBtb2xsaXMgbmVjLCBtb2xlc3RpZSBhYyBlcm9zLiBQcm9pbiB2aXRhZSBtb2xsaXMgdmVsaXQuIFByYWVzZW50IGVnZXQgZmVsaXMgbWkuIE1hZWNlbmFzIGV1IHZ1bHB1dGF0ZSBuaXNpLiBWZXN0aWJ1bHVtIHZhcml1cywgYXJjdSBpbiB1bHRyaWNpZXMgdmVzdGlidWx1bSwgbmliaCBsZW8gc2FnaXR0aXMgb2RpbywgdXQgYmliZW5kdW0gbmlzbCBtaSBuZWMgZGlhbS4gSW50ZWdlciBhdCBlbmltIGZldWdpYXQgbnVsbGEgc2VtcGVyIGJpYmVuZHVtIHV0IGEgdmVsaXQuIFByb2luIGF0IG5pc2kgdXQgbG9yZW0gYWxpcXVhbSB2YXJpdXMgZWdldCBxdWlzIGVsaXQuIE51bGxhbSBuZWMgb2RpbyB2ZWwgbGVjdHVzIGNvbmd1ZSBjb25zZXF1YXQgYWRpcGlzY2luZyBhYyBtaS4gRnVzY2Ugdml0YWUgbGFvcmVldCBsaWJlcm8uIEN1cmFiaXR1ciBzaXQgYW1ldCBzZW0gbmVxdWUsIG5lYyBwb3N1ZXJlIGVuaW0uIEN1cmFiaXR1ciBhdCBtYXNzYSBhIHNlbSBncmF2aWRhIGlhY3VsaXMgbmVjIGV0IG5pYmguIFNlZCB2aXRhZSBkdWkgdml0YWUgbGVvIHRpbmNpZHVudCBwcmV0aXVtIGEgYWxpcXVhbSBlcmF0LiBTdXNwZW5kaXNzZSB1bHRyaWNpZXMgb2RpbyBhdCBtZXR1cyB0ZW1wb3IgaW4gcGVsbGVudGVzcXVlIGFyY3UgdWx0cmljaWVzLgoKU2VkIGFsaXF1YW0gbWF0dGlzIHF1YW0sIGluIHZ1bHB1dGF0ZSBzYXBpZW4gdWx0cmljZXMgaW4uIFBlbGxlbnRlc3F1ZSBxdWlzIHZlbGl0IHNlZCBkdWkgaGVuZHJlcml0IGN1cnN1cy4gUGVsbGVudGVzcXVlIG5vbiBudW5jIGxhY3VzLCBhIHNlbXBlciBtZXR1cy4gRnVzY2UgZXVpc21vZCB2ZWxpdCBxdWlzIGRpYW0gc3VzY2lwaXQgY29uc2VxdWF0LiBQcmFlc2VudCBjb21tb2RvIGFjY3Vtc2FuIG5lcXVlLiBQcm9pbiB2aXZlcnJhLCBpcHN1bSBub24gdHJpc3RpcXVlIHVsdHJpY2VzLCB2ZWxpdCB2ZWxpdCBmYWNpbGlzaXMgbG9yZW0sIHZlbCBydXRydW0gbmVxdWUgZXJvcyBhYyBuaXNpLiBTdXNwZW5kaXNzZSBmZWxpcyBtYXNzYSwgZmF1Y2lidXMgaW4gdm9sdXRwYXQgYWMsIGRhcGlidXMgZXQgb2Rpby4gUGVsbGVudGVzcXVlIGlkIHRlbGx1cyBzaXQgYW1ldCByaXN1cyB1bHRyaWNpZXMgdWxsYW1jb3JwZXIgbm9uIG5lYyBzYXBpZW4uIE5hbSBwbGFjZXJhdCB2aXZlcnJhIHVsbGFtY29ycGVyLiBOYW0gcGxhY2VyYXQgcG9ydHRpdG9yIHNhcGllbiBuZWMgcHVsdmluYXIuIEN1cmFiaXR1ciB2ZWwgb2RpbyBzaXQgYW1ldCBvZGlvIGFjY3Vtc2FuIGFsaXF1ZXQgdml0YWUgYSBsZWN0dXMuIFBlbGxlbnRlc3F1ZSBsb2JvcnRpcyB2aXZlcnJhIGNvbnNlcXVhdC4gTWF1cmlzIGVsZW1lbnR1bSBjdXJzdXMgbnVsbGEsIHNpdCBhbWV0IGhlbmRyZXJpdCBqdXN0byBkaWN0dW0gc2VkLiBNYWVjZW5hcyBkaWFtIG9kaW8sIGZyaW5naWxsYSBhYyBjb25ndWUgcXVpcywgYWRpcGlzY2luZyB1dCBlbGl0LgoKQWxpcXVhbSBsb3JlbSBlcm9zLCBwaGFyZXRyYSBuZWMgZWdlc3RhcyB2aXRhZSwgbWF0dGlzIG5lYyByaXN1cy4gTWF1cmlzIGFyY3UgbWFzc2EsIHNvZGFsZXMgZWdldCBncmF2aWRhIHNlZCwgdml2ZXJyYSB2aXRhZSB0dXJwaXMuIFV0IGxpZ3VsYSB1cm5hLCBldWlzbW9kIGFjIHRpbmNpZHVudCBldSwgZmF1Y2lidXMgc2VkIGZlbGlzLiBQcmFlc2VudCBtb2xsaXMsIGlwc3VtIHF1aXMgcmhvbmN1cyBkaWduaXNzaW0sIG9kaW8gc2VtIHZlbmVuYXRpcyBudWxsYSwgYXQgY29uc2VxdWF0IGZlbGlzIGF1Z3VlIHZlbCBlcmF0LiBOYW0gZmVybWVudHVtIGZldWdpYXQgdm9sdXRwYXQuIENsYXNzIGFwdGVudCB0YWNpdGkgc29jaW9zcXUgYWQgbGl0b3JhIHRvcnF1ZW50IHBlciBjb251YmlhIG5vc3RyYSwgcGVyIGluY2VwdG9zIGhpbWVuYWVvcy4gRXRpYW0gdml0YWUgZHVpIGluIG5pc2kgYWRpcGlzY2luZyB1bHRyaWNpZXMgbm9uIGV1IGp1c3RvLiBEb25lYyB0cmlzdGlxdWUgdWx0cmljaWVzIGFkaXBpc2NpbmcuIE51bGxhIHNvZGFsZXMsIG51bmMgYSB0cmlzdGlxdWUgZWxlbWVudHVtLCBlcmF0IG5lcXVlIGVnZXN0YXMgbmlzbCwgYXQgaGVuZHJlcml0IG9yY2kgc2FwaWVuIHNlZCBsaWJlcm8uIFZpdmFtdXMgYSBtYXVyaXMgdHVycGlzLCBxdWlzIGxhb3JlZXQgaXBzdW0uIE51bmMgbmVjIG1pIGV0IG5pc2wgcGVsbGVudGVzcXVlIHNjZWxlcmlzcXVlLiBWaXZhbXVzIHZvbHV0cGF0LCBqdXN0byB0cmlzdGlxdWUgbGFjaW5pYSBjb25kaW1lbnR1bSwgZXJhdCBqdXN0byB1bHRyaWNlcyB1cm5hLCBlbGVtZW50dW0gdml2ZXJyYSBlcm9zIGF1Z3VlIG5vbiBsaWJlcm8uIFNlZCBtb2xsaXMgbW9sbGlzIGFyY3UsIGF0IGZlcm1lbnR1bSBkaWFtIHN1c2NpcGl0IHF1aXMuCgpFdGlhbSBzaXQgYW1ldCBuaWJoIGp1c3RvLCBwb3N1ZXJlIHZvbHV0cGF0IG51bmMuIE1vcmJpIHBlbGxlbnRlc3F1ZSBuZXF1ZSBpbiBvcmNpIHZvbHV0cGF0IGV1IHNjZWxlcmlzcXVlIGxvcmVtIGRpY3R1bS4gTWF1cmlzIG1vbGxpcyBpYWN1bGlzIGVzdCwgbmVjIHNhZ2l0dGlzIHNhcGllbiBjb25zZXF1YXQgaWQuIE51bmMgbmVjIG1hbGVzdWFkYSBvZGlvLiBEdWlzIHF1aXMgc3VzY2lwaXQgb2Rpby4gTWF1cmlzIHB1cnVzIGR1aSwgc29kYWxlcyBpZCBtYXR0aXMgc2l0IGFtZXQsIHBvc3VlcmUgaW4gYXJjdS4gUGhhc2VsbHVzIHBvcnRhIGVsZW1lbnR1bSBjb252YWxsaXMuIE1hZWNlbmFzIGF0IG9yY2kgZXQgbWkgdnVscHV0YXRlIHNvbGxpY2l0dWRpbiBpbiBpbiB0dXJwaXMuIFBlbGxlbnRlc3F1ZSBjdXJzdXMgYWRpcGlzY2luZyBuZXF1ZSBzaXQgYW1ldCBjb21tb2RvLiBGdXNjZSB1dCBtaSBldSBsZWN0dXMgcG9ydHRpdG9yIHZvbHV0cGF0IGV0IG5lYyBmZWxpcy4KCkN1cmFiaXR1ciBzY2VsZXJpc3F1ZSBlcm9zIHF1aXMgbmlzbCB2aXZlcnJhIHZlbCB1bHRyaWNlcyB2ZWxpdCB2ZXN0aWJ1bHVtLiBTZWQgbG9ib3J0aXMgcHVsdmluYXIgc2FwaWVuIGFjIHZlbmVuYXRpcy4gU2VkIGFudGUgbmliaCwgcmhvbmN1cyBlZ2V0IGRpY3R1bSBpbiwgbW9sbGlzIHV0IG5pc2kuIFBoYXNlbGx1cyBmYWNpbGlzaXMgbWkgbm9uIGxvcmVtIHRyaXN0aXF1ZSBub24gZWxlaWZlbmQgc2VtIGZyaW5naWxsYS4gSW50ZWdlciB1dCBhdWd1ZSBlc3QuIEluIHZlbmVuYXRpcyB0aW5jaWR1bnQgc2NlbGVyaXNxdWUuIEV0aWFtIGFudGUgZHVpLCBwb3N1ZXJlIHF1aXMgbWFsZXN1YWRhIHZpdGFlLCBtYWxlc3VhZGEgYSBhcmN1LiBBZW5lYW4gZmF1Y2lidXMgdmVuZW5hdGlzIHNhcGllbiwgdXQgZmFjaWxpc2lzIG5pc2kgYmxhbmRpdCB2ZWwuIEFlbmVhbiBhYyBsb3JlbSBldSBzZW0gZmVybWVudHVtIHBsYWNlcmF0LiBQcm9pbiBuZXF1ZSBwdXJ1cywgYWxpcXVldCB1dCB0aW5jaWR1bnQgdXQsIGNvbnZhbGxpcyBzaXQgYW1ldCBlcm9zLiBQaGFzZWxsdXMgdmVoaWN1bGEgdWxsYW1jb3JwZXIgZW5pbSBub24gdmVoaWN1bGEuIEV0aWFtIHBvcnRhIG9kaW8gdXQgaXBzdW0gYWRpcGlzY2luZyBlZ2VzdGFzIGlkIGEgb2Rpby4gUGVsbGVudGVzcXVlIGJsYW5kaXQsIHNhcGllbiB1dCBwdWx2aW5hciBpbnRlcmR1bSwgbWkgbnVsbGEgaGVuZHJlcml0IGVsaXQsIGluIHRlbXBvciBkaWFtIGVuaW0gYSB1cm5hLiBJbiB0ZWxsdXMgb2Rpbywgb3JuYXJlIHNlZCBjb25kaW1lbnR1bSBhLCBtYXR0aXMgZXUgYXVndWUuCgpGdXNjZSBoZW5kcmVyaXQgcG9ydHRpdG9yIGV1aXNtb2QuIERvbmVjIG1hbGVzdWFkYSBlZ2VzdGFzIHR1cnBpcywgZXQgdWx0cmljaWVzIGZlbGlzIGVsZW1lbnR1bSB2aXRhZS4gTnVsbGFtIGluIHNlbSBuaWJoLiBOdWxsYW0gdWx0cmljaWVzIGhlbmRyZXJpdCBqdXN0byBzaXQgYW1ldCBsb2JvcnRpcy4gU2VkIHRpbmNpZHVudCwgbWF1cmlzIGF0IG9ybmFyZSBsYW9yZWV0LCBzYXBpZW4gcHVydXMgZWxlbWVudHVtIGVsaXQsIG5lYyBwb3J0dGl0b3IgbmlzbCBwdXJ1cyBldCBlcmF0LiBEb25lYyBmZWxpcyBuaXNpLCBydXRydW0gdWxsYW1jb3JwZXIgZ3JhdmlkYSBhYywgdGluY2lkdW50IHNpdCBhbWV0IHVybmEuIFByb2luIHZlbCBqdXN0byB2aXRhZSBlcm9zIHNhZ2l0dGlzIGJpYmVuZHVtIGEgdXQgbmliaC4gUGhhc2VsbHVzIHNvZGFsZXMgbGFvcmVldCB0aW5jaWR1bnQuIE1hZWNlbmFzIG9kaW8gbWFzc2EsIGNvbmRpbWVudHVtIGlkIGFsaXF1ZXQgdXQsIHJob25jdXMgdmVsIGxlY3R1cy4gRHVpcyBwaGFyZXRyYSBjb25zZWN0ZXR1ciBzYXBpZW4uIFBoYXNlbGx1cyBwb3N1ZXJlIHVsdHJpY2llcyBtYXNzYSwgbm9uIHJob25jdXMgcmlzdXMgYWxpcXVhbSB0ZW1wdXMuCgpQcmFlc2VudCB2ZW5lbmF0aXMgbWFnbmEgaWQgc2VtIGRpY3R1bSBldSB2ZWhpY3VsYSBpcHN1bSB2dWxwdXRhdGUuIFNlZCBhIGNvbnZhbGxpcyBzYXBpZW4uIFNlZCBqdXN0byBkb2xvciwgcmhvbmN1cyB2ZWwgcnV0cnVtIG1hdHRpcywgc29sbGljaXR1ZGluIHV0IHJpc3VzLiBOdWxsYW0gc2l0IGFtZXQgY29udmFsbGlzIGVzdC4gRXRpYW0gbm9uIHRpbmNpZHVudCBsaWd1bGEuIEZ1c2NlIHN1c2NpcGl0IHByZXRpdW0gZWxpdCBhdCB1bGxhbWNvcnBlci4gUXVpc3F1ZSBzb2xsaWNpdHVkaW4sIGRpYW0gaWQgaW50ZXJkdW0gcG9ydGEsIG1ldHVzIGlwc3VtIHZvbHV0cGF0IGxpYmVybywgaWQgdmVuZW5hdGlzIGZlbGlzIG9yY2kgbm9uIHZlbGl0LiBTdXNwZW5kaXNzZSBwb3RlbnRpLiBNYXVyaXMgcnV0cnVtLCB0b3J0b3Igc2l0IGFtZXQgcGVsbGVudGVzcXVlIHRpbmNpZHVudCwgZXJhdCBxdWFtIHVsdHJpY2llcyBvZGlvLCBpZCBhbGlxdWFtIGVsaXQgbGVvIG5lYyBsZW8uIFBlbGxlbnRlc3F1ZSBqdXN0byBlcm9zLCBydXRydW0gYXQgZmV1Z2lhdCBuZWMsIHBvcnRhIGV0IHRlbGx1cy4gQWVuZWFuIGVnZXQgbWV0dXMgbGVjdHVzLgoKUHJhZXNlbnQgZXVpc21vZCwgdHVycGlzIHF1aXMgbGFvcmVldCBjb25zZXF1YXQsIG5lcXVlIGFudGUgaW1wZXJkaWV0IHF1YW0sIGFjIHNlbXBlciB0b3J0b3IgbmliaCBpbiBudWxsYS4gSW50ZWdlciBzY2VsZXJpc3F1ZSBlcm9zIHZlaGljdWxhIHVybmEgbGFjaW5pYSBhYyBmYWNpbGlzaXMgbWF1cmlzIGFjY3Vtc2FuLiBQaGFzZWxsdXMgYXQgbWF1cmlzIG5pYmguIEN1cmFiaXR1ciBlbmltIGFudGUsIHJ1dHJ1bSBzZWQgYWRpcGlzY2luZyBoZW5kcmVyaXQsIHBlbGxlbnRlc3F1ZSBub24gYXVndWUuIEluIGhhYyBoYWJpdGFzc2UgcGxhdGVhIGRpY3R1bXN0LiBOYW0gdGVtcHVzIGV1aXNtb2QgbWFzc2EgYSBkaWN0dW0uIERvbmVjIHNpdCBhbWV0IGp1c3RvIGFjIGRpYW0gdWx0cmljaWVzIHVsdHJpY2llcy4gU2VkIHRpbmNpZHVudCBlcmF0IHF1aXMgcXVhbSB0ZW1wdXMgdmVsIGludGVyZHVtIGVyYXQgcmhvbmN1cy4gSW4gaGFjIGhhYml0YXNzZSBwbGF0ZWEgZGljdHVtc3QuIFZlc3RpYnVsdW0gdmVoaWN1bGEgdmFyaXVzIHNlbSBlZ2V0IGludGVyZHVtLiBDcmFzIGJpYmVuZHVtIGxlbyBuZWMgZmVsaXMgdmVuZW5hdGlzIHNlZCBwaGFyZXRyYSBzZW0gZmV1Z2lhdC4gQ3VtIHNvY2lpcyBuYXRvcXVlIHBlbmF0aWJ1cyBldCBtYWduaXMgZGlzIHBhcnR1cmllbnQgbW9udGVzLCBuYXNjZXR1ciByaWRpY3VsdXMgbXVzLiBTZWQgcXVhbSBvcmNpLCBtb2xsaXMgZWdldCBzYWdpdHRpcyBhY2N1bXNhbiwgdnVscHV0YXRlIHNpdCBhbWV0IGR1aS4gUHJhZXNlbnQgZXUgZWxlbWVudHVtIGFyY3UuCgpMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBWZXN0aWJ1bHVtIG5pc2wgbWV0dXMsIGhlbmRyZXJpdCB1dCBsYW9yZWV0IHNlZCwgY29uc2VjdGV0dXIgYXQgcHVydXMuIER1aXMgaW50ZXJkdW0gY29uZ3VlIGxvYm9ydGlzLiBOdWxsYW0gc2VkIG1hc3NhIHBvcnRhIGZlbGlzIGVsZWlmZW5kIGNvbnNlcXVhdCBzaXQgYW1ldCBuZWMgbWV0dXMuIEFsaXF1YW0gcGxhY2VyYXQgZGljdHVtIGVyYXQgYXQgZWxlaWZlbmQuIFZlc3RpYnVsdW0gbGliZXJvIGFudGUsIHVsbGFtY29ycGVyIGEgcG9ydHRpdG9yIHN1c2NpcGl0LCBhY2N1bXNhbiB2ZWwgbmlzaS4gRG9uZWMgZXQgbWFnbmEgbmVxdWUuIE5hbSBlbGVtZW50dW0gdWx0cmljZXMganVzdG8sIGVnZXQgc29sbGljaXR1ZGluIHNhcGllbiBpbXBlcmRpZXQgZWdldC4gTnVsbGFtIGF1Y3RvciBkaWN0dW0gbnVuYywgYXQgZmV1Z2lhdCBvZGlvIHZlc3RpYnVsdW0gYS4gU2VkIGVyYXQgbnVsbGEsIHZpdmVycmEgaGVuZHJlcml0IGNvbW1vZG8gaWQsIHVsbGFtY29ycGVyIGFjIG9yY2kuIFBoYXNlbGx1cyBwZWxsZW50ZXNxdWUgZmV1Z2lhdCBzdXNjaXBpdC4gRXRpYW0gZWdlc3RhcyBmZXJtZW50dW0gZW5pbS4gRXRpYW0gZ3JhdmlkYSBpbnRlcmR1bSB0ZWxsdXMgYWMgbGFvcmVldC4gTW9yYmkgbWF0dGlzIGFsaXF1ZXQgZXJvcywgbm9uIHRlbXBvciBlcmF0IHVsbGFtY29ycGVyIGluLiBFdGlhbSBwdWx2aW5hciBpbnRlcmR1bSB0dXJwaXMgYWMgdmVoaWN1bGEuIFNlZCBxdWFtIGp1c3RvLCBhY2N1bXNhbiBpZCBjb25zZWN0ZXR1ciBhLCBhbGlxdWV0IHNlZCBsZW8uIEFlbmVhbiB2aXRhZSBibGFuZGl0IG1hdXJpcy4KCkluIHNlZCBlcm9zIGF1Z3VlLCBub24gcnV0cnVtIG9kaW8uIEV0aWFtIHZpdGFlIGR1aSBuZXF1ZSwgaW4gdHJpc3RpcXVlIG1hc3NhLiBWZXN0aWJ1bHVtIGFudGUgaXBzdW0gcHJpbWlzIGluIGZhdWNpYnVzIG9yY2kgbHVjdHVzIGV0IHVsdHJpY2VzIHBvc3VlcmUgY3ViaWxpYSBDdXJhZTsgTWFlY2VuYXMgZGljdHVtIGVsaXQgYXQgbGVjdHVzIHRlbXBvciBub24gcGhhcmV0cmEgbmlzbCBoZW5kcmVyaXQuIFNlZCBzZWQgcXVhbSBldSBsZWN0dXMgdWx0cmljZXMgbWFsZXN1YWRhIHRpbmNpZHVudCBhIGVzdC4gTmFtIHZlbCBlcm9zIHJpc3VzLiBNYWVjZW5hcyBlcm9zIGVsaXQsIGJsYW5kaXQgZmVybWVudHVtIHRlbXBvciBlZ2V0LCBsb2JvcnRpcyBpZCBkaWFtLiBWZXN0aWJ1bHVtIGxhY2luaWEgbGFjdXMgdml0YWUgbWFnbmEgdm9sdXRwYXQgZXUgZGlnbmlzc2ltIGVyb3MgY29udmFsbGlzLiBWaXZhbXVzIGFjIHZlbGl0IHRlbGx1cywgYSBjb25ndWUgbmVxdWUuIEludGVnZXIgbWkgbnVsbGEsIHZhcml1cyBub24gbHVjdHVzIGluLCBkaWN0dW0gc2l0IGFtZXQgc2VtLiBVdCBsYW9yZWV0LCBzYXBpZW4gc2l0IGFtZXQgc2NlbGVyaXNxdWUgcG9ydGEsIHB1cnVzIHNhcGllbiB2ZXN0aWJ1bHVtIG5pYmgsIHNlZCBsdWN0dXMgbGliZXJvIG1hc3NhIGFjIGVsaXQuIERvbmVjIGlhY3VsaXMgb2RpbyBlZ2V0IG9kaW8gc2FnaXR0aXMgbmVjIHZlbmVuYXRpcyBsb3JlbSBibGFuZGl0LgoKQWxpcXVhbSBpbXBlcmRpZXQgdGVsbHVzIHBvc3VlcmUganVzdG8gdmVoaWN1bGEgc2VkIHZlc3RpYnVsdW0gYW50ZSB0cmlzdGlxdWUuIEZ1c2NlIGZldWdpYXQgZmF1Y2lidXMgcHVydXMgbmVjIG1vbGVzdGllLiBOdWxsYSB0ZW1wb3IgbmVxdWUgaWQgbWFnbmEgaWFjdWxpcyBxdWlzIHNvbGxpY2l0dWRpbiBlcm9zIHNlbXBlci4gUHJhZXNlbnQgdml2ZXJyYSBzYWdpdHRpcyBsdWN0dXMuIE1vcmJpIHNpdCBhbWV0IG1hZ25hIHNlZCBvZGlvIGdyYXZpZGEgdmFyaXVzLiBVdCBuaXNpIGxpYmVybywgdnVscHV0YXRlIGZldWdpYXQgcHJldGl1bSB0ZW1wdXMsIGVnZXN0YXMgc2l0IGFtZXQganVzdG8uIFBlbGxlbnRlc3F1ZSBjb25zZXF1YXQgdGVtcG9yIG5pc2kgaW4gbG9ib3J0aXMuIFNlZCBmZXJtZW50dW0gY29udmFsbGlzIGR1aSBhYyBzb2xsaWNpdHVkaW4uIEludGVnZXIgYXVjdG9yIGF1Z3VlIGVnZXQgdGVsbHVzIHRlbXB1cyBmcmluZ2lsbGEuIFByb2luIG5lYyBkb2xvciBzYXBpZW4sIG5lYyB0cmlzdGlxdWUgbmliaC4gQWxpcXVhbSBhIHZlbGl0IGF0IG1pIG1hdHRpcyBhbGlxdWV0LgoKUGVsbGVudGVzcXVlIGhhYml0YW50IG1vcmJpIHRyaXN0aXF1ZSBzZW5lY3R1cyBldCBuZXR1cyBldCBtYWxlc3VhZGEgZmFtZXMgYWMgdHVycGlzIGVnZXN0YXMuIEFsaXF1YW0gdWx0cmljZXMgZXJhdCBub24gdHVycGlzIGF1Y3RvciBpZCBvcm5hcmUgbWF1cmlzIHNhZ2l0dGlzLiBRdWlzcXVlIHBvcnR0aXRvciwgdGVsbHVzIHV0IGNvbnZhbGxpcyBzYWdpdHRpcywgbWkgbGliZXJvIGZldWdpYXQgdGVsbHVzLCByaG9uY3VzIHBsYWNlcmF0IGlwc3VtIHRvcnRvciBpZCByaXN1cy4gRG9uZWMgdGluY2lkdW50IGZldWdpYXQgbGVvLiBDcmFzIGlkIG1pIG5lcXVlLCBldSBtYWxlc3VhZGEgZXJvcy4gVXQgbW9sZXN0aWUgbWFnbmEgcXVpcyBsaWJlcm8gcGxhY2VyYXQgbWFsZXN1YWRhLiBBbGlxdWFtIGVyYXQgdm9sdXRwYXQuIEFsaXF1YW0gbm9uIG1hdXJpcyBsb3JlbSwgaW4gYWRpcGlzY2luZyBtZXR1cy4gRG9uZWMgZWdldCBpcHN1bSBpbiBlbGl0IGNvbW1vZG8gb3JuYXJlIGJpYmVuZHVtIGEgbmliaC4gVml2YW11cyBvZGlvIGVyYXQsIHBsYWNlcmF0IGFjIHZlc3RpYnVsdW0gZWdldCwgbWFsZXN1YWRhIHV0IG5pc2kuIEV0aWFtIHN1c2NpcGl0IHNvbGxpY2l0dWRpbiBsZW8gc2VtcGVyIHNvbGxpY2l0dWRpbi4gU2VkIHJob25jdXMgcmlzdXMgc2l0IGFtZXQgc2VtIGVsZWlmZW5kIGRpY3R1bSBwcmV0aXVtIHNhcGllbiBlZ2VzdGFzLiBOdWxsYSBhdCB1cm5hIG51bmMsIHZlbCBhbGlxdWV0IGxlby4gUHJhZXNlbnQgdWx0cmljaWVzLCBtaSBldSBwcmV0aXVtIGxvYm9ydGlzLCBlcmF0IG5pYmggZXVpc21vZCBsZW8sIHNpdCBhbWV0IGdyYXZpZGEgc2FwaWVuIGVyb3MgZXQgdHVycGlzLiBEb25lYyBsYWNpbmlhIHZlbmVuYXRpcyBsZWN0dXMsIG5vbiBsYWNpbmlhIG1pIGhlbmRyZXJpdCBzaXQgYW1ldC4gSW50ZWdlciBzZWQgZmVsaXMgdmVsIG9yY2kgYWxpcXVhbSBwdWx2aW5hci4gUGhhc2VsbHVzIGV0IHJpc3VzIGlkIGVyYXQgZXVpc21vZCB0aW5jaWR1bnQuIFNlZCBsdWN0dXMgdGVtcG9yIG5pc2ksIG5lYyB0ZW1wb3IgaXBzdW0gZWxlbWVudHVtIGVnZXQuIEludGVnZXIgbmlzbCB0b3J0b3IsIHZpdmVycmEgaW4gZGFwaWJ1cyBhdCwgbWF0dGlzIGFjIGVyYXQuIEN1cmFiaXR1ciBuZWMgZHVpIGxlY3R1cy4KClBoYXNlbGx1cyBzdXNjaXBpdCwgdG9ydG9yIGV1IHZhcml1cyBmcmluZ2lsbGEsIHNhcGllbiBtYWduYSBlZ2VzdGFzIHJpc3VzLCB1dCBzdXNjaXBpdCBkdWkgbWF1cmlzIHF1aXMgdmVsaXQuIENyYXMgYSBzYXBpZW4gcXVpcyBzYXBpZW4gaGVuZHJlcml0IHRyaXN0aXF1ZSBhIHNpdCBhbWV0IGVsaXQuIFBlbGxlbnRlc3F1ZSBkdWkgYXJjdSwgbWFsZXN1YWRhIGV0IHNvZGFsZXMgc2l0IGFtZXQsIGRhcGlidXMgdmVsIHF1YW0uIFNlZCBub24gYWRpcGlzY2luZyBsaWd1bGEuIFV0IHZ1bHB1dGF0ZSBwdXJ1cyBhdCBuaXNsIHBvc3VlcmUgc29kYWxlcy4gTWFlY2VuYXMgZGlhbSB2ZWxpdCwgdGluY2lkdW50IGlkIG1hdHRpcyBldSwgYWxpcXVhbSBhYyBuaXNpLiBNYWVjZW5hcyBwcmV0aXVtLCBhdWd1ZSBhIHNhZ2l0dGlzIHN1c2NpcGl0LCBsZW8gbGlndWxhIGVsZWlmZW5kIGRvbG9yLCBtb2xsaXMgZmV1Z2lhdCBvZGlvIGF1Z3VlIG5vbiBlcm9zLiBQZWxsZW50ZXNxdWUgc2NlbGVyaXNxdWUgb3JjaSBwcmV0aXVtIHF1YW0gbW9sbGlzIGF0IGxvYm9ydGlzIGR1aSBmYWNpbGlzaXMuIE1vcmJpIGNvbmd1ZSBtZXR1cyBpZCB0b3J0b3IgcG9ydGEgZnJpbmdpbGxhLiBTZWQgbG9yZW0gbWksIG1vbGVzdGllIGZlcm1lbnR1bSBzYWdpdHRpcyBhdCwgZ3JhdmlkYSBhIG5pc2kuIERvbmVjIGV1IHZlc3RpYnVsdW0gdmVsaXQuIEluIHZpdmVycmEsIGVuaW0gZXUgZWxlbWVudHVtIHNvZGFsZXMsIGVuaW0gb2RpbyBkYXBpYnVzIHVybmEsIGVnZXQgY29tbW9kbyBuaXNsIG1hdXJpcyB1dCBvZGlvLiBDdXJhYml0dXIgbmVjIGVuaW0gbnVsbGEuIEluIG5lYyBlbGl0IGlwc3VtLiBOdW5jIGluIG1hc3NhIHN1c2NpcGl0IG1hZ25hIGVsZW1lbnR1bSBmYXVjaWJ1cyBpbiBuZWMgaXBzdW0uIE51bGxhbSBzdXNjaXBpdCBtYWxlc3VhZGEgZWxlbWVudHVtLiBFdGlhbSBzZWQgbWkgaW4gbmliaCB1bHRyaWNpZXMgdmVuZW5hdGlzIG5lYyBwaGFyZXRyYSBtYWduYS4gSW4gcHVydXMgYW50ZSwgcmhvbmN1cyB2ZWwgcGxhY2VyYXQgc2VkLCBmZXJtZW50dW0gc2l0IGFtZXQgZHVpLiBTZWQgYXQgc29kYWxlcyB2ZWxpdC4KCkR1aXMgc3VzY2lwaXQgcGVsbGVudGVzcXVlIHBlbGxlbnRlc3F1ZS4gUHJhZXNlbnQgcG9ydGEgbG9ib3J0aXMgY3Vyc3VzLiBRdWlzcXVlIHNhZ2l0dGlzIHZlbGl0IG5vbiB0ZWxsdXMgYmliZW5kdW0gYXQgc29sbGljaXR1ZGluIGxhY3VzIGFsaXF1ZXQuIFNlZCBuaWJoIHJpc3VzLCBibGFuZGl0IGEgYWxpcXVldCBlZ2V0LCB2ZWhpY3VsYSBldCBlc3QuIFN1c3BlbmRpc3NlIGZhY2lsaXNpcyBiaWJlbmR1bSBhbGlxdWFtLiBGdXNjZSBjb25zZWN0ZXR1ciBjb252YWxsaXMgZXJhdCwgZWdldCBtb2xsaXMgZGlhbSBmZXJtZW50dW0gc29sbGljaXR1ZGluLiBRdWlzcXVlIHRpbmNpZHVudCBwb3J0dGl0b3IgcHJldGl1bS4gTnVsbGFtIGlkIG5pc2wgZXQgdXJuYSB2dWxwdXRhdGUgZGFwaWJ1cy4gRG9uZWMgcXVpcyBsb3JlbSB1cm5hLiBRdWlzcXVlIGlkIGp1c3RvIG5lYyBudW5jIGJsYW5kaXQgY29udmFsbGlzLiBOdW5jIHZvbHV0cGF0LCBtYXNzYSBzb2xsaWNpdHVkaW4gYWRpcGlzY2luZyB2ZXN0aWJ1bHVtLCBtYXNzYSB1cm5hIGNvbmd1ZSBsZWN0dXMsIHNpdCBhbWV0IHVsdHJpY2llcyBhdWd1ZSBvcmNpIGNvbnZhbGxpcyB0dXJwaXMuIE51bGxhIGF0IGxvcmVtIGVsaXQuIE51bmMgdHJpc3RpcXVlLCBxdWFtIGZhY2lsaXNpcyBjb21tb2RvIHBvcnR0aXRvciwgbGFjdXMgbGlndWxhIGFjY3Vtc2FuIG5pc2ksIGV0IGxhb3JlZXQganVzdG8gYW50ZSB2aXRhZSBlcm9zLiBDdXJhYml0dXIgc2VkIGF1Z3VlIGFyY3UuIFBoYXNlbGx1cyBwb3J0dGl0b3IgdmVzdGlidWx1bSBmZWxpcywgdXQgY29uc2VjdGV0dXIgYXJjdSB0ZW1wb3Igbm9uLiBJbiBqdXN0byByaXN1cywgc2VtcGVyIGV0IHN1c2NpcGl0IGlkLCB1bGxhbWNvcnBlciBhdCB1cm5hLiBRdWlzcXVlIHRpbmNpZHVudCwgdXJuYSBuZWMgYWxpcXVhbSB0cmlzdGlxdWUsIG5pYmggb2RpbyBmYXVjaWJ1cyBhdWd1ZSwgaW4gb3JuYXJlIGVuaW0gdHVycGlzIGFjY3Vtc2FuIGRvbG9yLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gU3VzcGVuZGlzc2Ugc29kYWxlcyB2YXJpdXMgdHVycGlzIGV1IGZlcm1lbnR1bS4KCk1vcmJpIHVsdHJpY2llcyBkaWFtIGVnZXQgbWFzc2EgcG9zdWVyZSBsb2JvcnRpcy4gQWxpcXVhbSB2b2x1dHBhdCBwZWxsZW50ZXNxdWUgZW5pbSBldSBwb3J0dGl0b3IuIERvbmVjIGxhY3VzIGZlbGlzLCBjb25zZWN0ZXR1ciBhIHByZXRpdW0gdml0YWUsIGJpYmVuZHVtIG5vbiBlbmltLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gRXRpYW0gdXQgbmliaCBhIHF1YW0gcGVsbGVudGVzcXVlIGF1Y3RvciB1dCBpZCB2ZWxpdC4gRHVpcyBsYWNpbmlhIGp1c3RvIGVnZXQgbWkgcGxhY2VyYXQgYmliZW5kdW0uIEN1bSBzb2NpaXMgbmF0b3F1ZSBwZW5hdGlidXMgZXQgbWFnbmlzIGRpcyBwYXJ0dXJpZW50IG1vbnRlcywgbmFzY2V0dXIgcmlkaWN1bHVzIG11cy4gRG9uZWMgdmVsaXQgdG9ydG9yLCB0ZW1wdXMgbmVjIHRyaXN0aXF1ZSBpZCwgYWxpcXVldCBzaXQgYW1ldCB0dXJwaXMuIFByYWVzZW50IGV0IG5lcXVlIG5lYyBtYWduYSBwb3J0YSBmcmluZ2lsbGEuIE1vcmJpIGlkIGVnZXN0YXMgZXJvcy4gRG9uZWMgc2VtcGVyIHRpbmNpZHVudCB1bGxhbWNvcnBlci4gUGhhc2VsbHVzIHRlbXB1cyBsYWNpbmlhIGhlbmRyZXJpdC4gUXVpc3F1ZSBmYXVjaWJ1cyBwcmV0aXVtIG5lcXVlIG5vbiBjb252YWxsaXMuIE51bmMgbWFsZXN1YWRhIGFjY3Vtc2FuIHJob25jdXMuIENyYXMgbG9ib3J0aXMsIHNlbSBzZWQgZnJpbmdpbGxhIGNvbnZhbGxpcywgYXVndWUgdmVsaXQgc2VtcGVyIG5pc2wsIGNvbW1vZG8gdmFyaXVzIG5pc2kgZGlhbSBhYyBsZW8uCgpRdWlzcXVlIGludGVyZHVtIHRlbGx1cyBhYyBhbnRlIHBvc3VlcmUgdXQgY3Vyc3VzIGxvcmVtIGVnZXN0YXMuIE51bGxhIGZhY2lsaXNpLiBBZW5lYW4gc2VkIG1hc3NhIG5lYyBuaXNpIHNjZWxlcmlzcXVlIHZ1bHB1dGF0ZS4gRXRpYW0gY29udmFsbGlzIGNvbnNlY3RldHVyIGlhY3VsaXMuIE1hZWNlbmFzIGFjIHB1cnVzIHV0IGFudGUgZGlnbmlzc2ltIGF1Y3RvciBhYyBxdWlzIGxvcmVtLiBQZWxsZW50ZXNxdWUgc3VzY2lwaXQgdGluY2lkdW50IG9yY2kuIEZ1c2NlIGFsaXF1YW0gZGFwaWJ1cyBvcmNpLCBhdCBiaWJlbmR1bSBpcHN1bSBhZGlwaXNjaW5nIGVnZXQuIE1vcmJpIHBlbGxlbnRlc3F1ZSBoZW5kcmVyaXQgcXVhbSwgbmVjIHBsYWNlcmF0IHVybmEgdnVscHV0YXRlIHNlZC4gUXVpc3F1ZSB2ZWwgZGlhbSBsb3JlbS4gUHJhZXNlbnQgaWQgZGlhbSBxdWlzIGVuaW0gZWxlbWVudHVtIHJob25jdXMgc2FnaXR0aXMgZWdldCBwdXJ1cy4gUXVpc3F1ZSBmcmluZ2lsbGEgYmliZW5kdW0gbGVvIGluIGxhb3JlZXQuIFZlc3RpYnVsdW0gaWQgbmliaCByaXN1cywgbm9uIGVsZW1lbnR1bSBtZXR1cy4gVXQgYSBmZWxpcyBkaWFtLCBub24gbW9sbGlzIG5pc2wuIENyYXMgZWxpdCBhbnRlLCB1bGxhbWNvcnBlciBxdWlzIGlhY3VsaXMgZXUsIHNvZGFsZXMgdmVsIGVzdC4gQ3VyYWJpdHVyIHF1aXMgbG9ib3J0aXMgZG9sb3IuIEFsaXF1YW0gbWF0dGlzIGdyYXZpZGEgbWV0dXMgcGVsbGVudGVzcXVlIHZ1bHB1dGF0ZS4KClV0IGlkIGF1Z3VlIGlkIGRvbG9yIGx1Y3R1cyBldWlzbW9kIGV0IHF1aXMgdmVsaXQuIE1hZWNlbmFzIGVuaW0gZG9sb3IsIHRlbXB1cyBzaXQgYW1ldCBoZW5kcmVyaXQgZXUsIGZhdWNpYnVzIHZpdGFlIG5lcXVlLiBQcm9pbiBzaXQgYW1ldCB2YXJpdXMgZWxpdC4gUHJvaW4gdmFyaXVzIGZlbGlzIHVsbGFtY29ycGVyIHB1cnVzIGRpZ25pc3NpbSBjb25zZXF1YXQuIENyYXMgY3Vyc3VzIHRlbXB1cyBlcm9zLiBOdW5jIHVsdHJpY2VzIHZlbmVuYXRpcyB1bGxhbWNvcnBlci4gQWxpcXVhbSBldCBmZXVnaWF0IHRlbGx1cy4gUGhhc2VsbHVzIHNpdCBhbWV0IHZlc3RpYnVsdW0gZWxpdC4gUGhhc2VsbHVzIGFjIHB1cnVzIGxhY3VzLCBldCBhY2N1bXNhbiBlcm9zLiBNb3JiaSB1bHRyaWNlcywgcHVydXMgYSBwb3J0YSBzb2RhbGVzLCBvZGlvIG1ldHVzIHBvc3VlcmUgbmVxdWUsIG5lYyBlbGVtZW50dW0gcmlzdXMgdHVycGlzIHNpdCBhbWV0IG1hZ25hLiBTZWQgZXN0IHF1YW0sIHVsdHJpY2llcyBhdCBjb25ndWUgYWRpcGlzY2luZywgbG9ib3J0aXMgaW4ganVzdG8uIFByb2luIGlhY3VsaXMgZGljdHVtIG51bmMsIGV1IGxhb3JlZXQgcXVhbSB2YXJpdXMgdml0YWUuIERvbmVjIHNpdCBhbWV0IGZldWdpYXQgdHVycGlzLiBNYXVyaXMgc2l0IGFtZXQgbWFnbmEgcXVhbSwgYWMgY29uc2VjdGV0dXIgZHVpLiBDdXJhYml0dXIgZWdldCBtYWduYSB0ZWxsdXMsIGV1IHBoYXJldHJhIGZlbGlzLiBEb25lYyBzaXQgYW1ldCB0b3J0b3IgbmlzbC4gQWxpcXVhbSBldCB0b3J0b3IgZmFjaWxpc2lzIGxhY3VzIHRpbmNpZHVudCBjb21tb2RvLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gQ3VyYWJpdHVyIG51bmMgbWFnbmEsIHVsdHJpY2llcyBpZCBjb252YWxsaXMgYXQsIHVsbGFtY29ycGVyIHZpdGFlIG1hc3NhLgoKUGhhc2VsbHVzIHZpdmVycmEgaWFjdWxpcyBwbGFjZXJhdC4gTnVsbGEgY29uc2VxdWF0IGRvbG9yIHNpdCBhbWV0IGVyYXQgZGlnbmlzc2ltIHBvc3VlcmUuIE51bGxhIGxhY2luaWEgYXVndWUgdml0YWUgbWkgdGVtcG9yIGdyYXZpZGEuIFBoYXNlbGx1cyBub24gdGVtcG9yIHRlbGx1cy4gUXVpc3F1ZSBub24gZW5pbSBzZW1wZXIgdG9ydG9yIHNhZ2l0dGlzIGZhY2lsaXNpcy4gQWxpcXVhbSB1cm5hIGZlbGlzLCBlZ2VzdGFzIGF0IHBvc3VlcmUgbmVjLCBhbGlxdWV0IGV1IG5pYmguIFByYWVzZW50IHNlZCB2ZXN0aWJ1bHVtIGVuaW0uIE1hdXJpcyBpYWN1bGlzIHZlbGl0IGR1aSwgZXQgZnJpbmdpbGxhIGVuaW0uIE51bGxhIG5lYyBuaXNpIG9yY2kuIFNlZCB2b2x1dHBhdCwganVzdG8gZWdldCBmcmluZ2lsbGEgYWRpcGlzY2luZywgbmlzbCBudWxsYSBjb25kaW1lbnR1bSBsaWJlcm8sIHNlZCBzb2RhbGVzIGVzdCBlc3QgZXQgb2Rpby4gQ3JhcyBpcHN1bSBkdWksIHZhcml1cyBldSBlbGVtZW50dW0gY29uc2VxdWF0LCBmYXVjaWJ1cyBpbiBsZW8uIFBlbGxlbnRlc3F1ZSBoYWJpdGFudCBtb3JiaSB0cmlzdGlxdWUgc2VuZWN0dXMgZXQgbmV0dXMgZXQgbWFsZXN1YWRhIGZhbWVzIGFjIHR1cnBpcyBlZ2VzdGFzLgoKVXQgbWFsZXN1YWRhIG1vbGVzdGllIGVsZWlmZW5kLiBDdXJhYml0dXIgaWQgZW5pbSBkdWksIGV1IHRpbmNpZHVudCBuaWJoLiBNYXVyaXMgc2l0IGFtZXQgYW50ZSBsZW8uIER1aXMgdHVycGlzIGlwc3VtLCBiaWJlbmR1bSBzZWQgbWF0dGlzIHNpdCBhbWV0LCBhY2N1bXNhbiBxdWlzIGRvbG9yLiBWZXN0aWJ1bHVtIGFudGUgaXBzdW0gcHJpbWlzIGluIGZhdWNpYnVzIG9yY2kgbHVjdHVzIGV0IHVsdHJpY2VzIHBvc3VlcmUgY3ViaWxpYSBDdXJhZTsgQWVuZWFuIGEgaW1wZXJkaWV0IG1ldHVzLiBRdWlzcXVlIHNvbGxpY2l0dWRpbiBmZWxpcyBpZCBuZXF1ZSB0ZW1wb3Igc2NlbGVyaXNxdWUuIERvbmVjIGF0IG9yY2kgZmVsaXMuIFZpdmFtdXMgdGVtcHVzIGNvbnZhbGxpcyBhdWN0b3IuIERvbmVjIGludGVyZHVtIGV1aXNtb2QgbG9ib3J0aXMuIFNlZCBhdCBsYWN1cyBuZWMgb2RpbyBkaWduaXNzaW0gbW9sbGlzLiBTZWQgc2FwaWVuIG9yY2ksIHBvcnR0aXRvciB0ZW1wdXMgYWNjdW1zYW4gdmVsLCB0aW5jaWR1bnQgbmVjIGFudGUuIE51bmMgcmhvbmN1cyBlZ2VzdGFzIGRhcGlidXMuIFN1c3BlbmRpc3NlIGZlcm1lbnR1bSBkaWN0dW0gZnJpbmdpbGxhLiBOdWxsYW0gbmlzaSBqdXN0bywgZWxlaWZlbmQgYSBjb25zZWN0ZXR1ciBjb252YWxsaXMsIHBvcnR0aXRvciBldCB0b3J0b3IuIFByb2luIHZpdGFlIGxvcmVtIG5vbiBkb2xvciBzdXNjaXBpdCBsYWNpbmlhIGV1IGVnZXQgbnVsbGEuCgpTdXNwZW5kaXNzZSBlZ2VzdGFzLCBzYXBpZW4gc2l0IGFtZXQgYmxhbmRpdCBzY2VsZXJpc3F1ZSwgbnVsbGEgYXJjdSB0cmlzdGlxdWUgZHVpLCBhIHBvcnRhIGp1c3RvIHF1YW0gdml0YWUgYXJjdS4gSW4gbWV0dXMgbGliZXJvLCBiaWJlbmR1bSBub24gdm9sdXRwYXQgdXQsIGxhb3JlZXQgdmVsIHR1cnBpcy4gTnVuYyBmYXVjaWJ1cyB2ZWxpdCBldSBpcHN1bSBjb21tb2RvIG5lYyBpYWN1bGlzIGVyb3Mgdm9sdXRwYXQuIFZpdmFtdXMgY29uZ3VlIGF1Y3RvciBlbGl0IHNlZCBzdXNjaXBpdC4gRHVpcyBjb21tb2RvLCBsaWJlcm8gZXUgdmVzdGlidWx1bSBmZXVnaWF0LCBsZW8gbWkgZGFwaWJ1cyB0ZWxsdXMsIGluIHBsYWNlcmF0IG5pc2wgZHVpIGF0IGVzdC4gVmVzdGlidWx1bSB2aXZlcnJhIHRyaXN0aXF1ZSBsb3JlbSwgb3JuYXJlIGVnZXN0YXMgZXJhdCBydXRydW0gYS4gTnVsbGFtIGF0IGF1Z3VlIG1hc3NhLCB1dCBjb25zZWN0ZXR1ciBpcHN1bS4gUGVsbGVudGVzcXVlIG1hbGVzdWFkYSwgdmVsaXQgdXQgbG9ib3J0aXMgc2FnaXR0aXMsIG5pc2kgbWFzc2Egc2VtcGVyIG9kaW8sIG1hbGVzdWFkYSBzZW1wZXIgcHVydXMgbmlzbCB2ZWwgbGVjdHVzLiBOdW5jIGR1aSBzZW0sIG1hdHRpcyB2aXRhZSBsYW9yZWV0IHZpdGFlLCBzb2xsaWNpdHVkaW4gYWMgbGVvLiBOdWxsYSB2ZWwgZmVybWVudHVtIGVzdC4KClZpdmFtdXMgaW4gb2RpbyBhIG5pc2kgZGlnbmlzc2ltIHJob25jdXMgaW4gaW4gbGFjdXMuIERvbmVjIGV0IG5pc2wgdG9ydG9yLiBEb25lYyBzYWdpdHRpcyBjb25zZXF1YXQgbWksIHZlbCBwbGFjZXJhdCB0ZWxsdXMgY29udmFsbGlzIGlkLiBBbGlxdWFtIGZhY2lsaXNpcyBydXRydW0gbmlzbCBzZWQgcHJldGl1bS4gRG9uZWMgZXQgbGFjaW5pYSBuaXNsLiBBbGlxdWFtIGVyYXQgdm9sdXRwYXQuIEN1cmFiaXR1ciBhYyBwdWx2aW5hciB0ZWxsdXMuIE51bGxhbSB2YXJpdXMgbG9ib3J0aXMgcG9ydGEuIENyYXMgZGFwaWJ1cywgbGlndWxhIHV0IHBvcnRhIHVsdHJpY2llcywgbGVvIGxhY3VzIHZpdmVycmEgcHVydXMsIHF1aXMgbW9sbGlzIHVybmEgcmlzdXMgZXUgbGVvLiBOdW5jIG1hbGVzdWFkYSBjb25zZWN0ZXR1ciBwdXJ1cywgdmVsIGF1Y3RvciBsZWN0dXMgc2NlbGVyaXNxdWUgcG9zdWVyZS4gTWFlY2VuYXMgZHVpIG1hc3NhLCB2ZXN0aWJ1bHVtIGJpYmVuZHVtIGJsYW5kaXQgbm9uLCBpbnRlcmR1bSBlZ2V0IG1hdXJpcy4gUGhhc2VsbHVzIGVzdCBhbnRlLCBwdWx2aW5hciBhdCBpbXBlcmRpZXQgcXVpcywgaW1wZXJkaWV0IHZlbCB1cm5hLiBRdWlzcXVlIGVnZXQgdm9sdXRwYXQgb3JjaS4gUXVpc3F1ZSBldCBhcmN1IHB1cnVzLCB1dCBmYXVjaWJ1cyB2ZWxpdC4KClByYWVzZW50IHNlZCBpcHN1bSB1cm5hLiBQcmFlc2VudCBzYWdpdHRpcyB2YXJpdXMgbWFnbmEsIGlkIGNvbW1vZG8gZG9sb3IgbWFsZXN1YWRhIGFjLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gUXVpc3F1ZSBzaXQgYW1ldCBudW5jIGV1IHNlbSBvcm5hcmUgdGVtcG9yLiBNYXVyaXMgaWQgZG9sb3IgbmVjIGVyYXQgY29udmFsbGlzIHBvcnRhIGluIGxvYm9ydGlzIG5pc2kuIEN1cmFiaXR1ciBoZW5kcmVyaXQgcmhvbmN1cyB0b3J0b3IgZXUgaGVuZHJlcml0LiBQZWxsZW50ZXNxdWUgZXUgYW50ZSB2ZWwgZWxpdCBsdWN0dXMgZWxlaWZlbmQgcXVpcyB2aXZlcnJhIG51bGxhLiBTdXNwZW5kaXNzZSBvZGlvIGRpYW0sIGV1aXNtb2QgZXUgcG9ydHRpdG9yIG1vbGVzdGllLCBzb2xsaWNpdHVkaW4gc2l0IGFtZXQgbnVsbGEuIFNlZCBhbnRlIHVybmEsIGRpY3R1bSBiaWJlbmR1bSByaG9uY3VzIGV0LCBibGFuZGl0IG5lYyBhbnRlLiBTdXNwZW5kaXNzZSB0b3J0b3IgYXVndWUsIGFjY3Vtc2FuIHF1aXMgc3VzY2lwaXQgaWQsIGFjY3Vtc2FuIHNpdCBhbWV0IGVyYXQuIERvbmVjIHBoYXJldHJhIHZhcml1cyBsb2JvcnRpcy4gTWFlY2VuYXMgaXBzdW0gZGlhbSwgZmF1Y2lidXMgZXUgdGVtcHVzIGlkLCBjb252YWxsaXMgbmVjIGVuaW0uIER1aXMgYXJjdSB0dXJwaXMsIGZyaW5naWxsYSBuZWMgZWdlc3RhcyB1dCwgZGlnbmlzc2ltIHRyaXN0aXF1ZSBudWxsYS4gQ3VyYWJpdHVyIHN1c2NpcGl0IGR1aSBub24ganVzdG8gdWx0cmljZXMgcGhhcmV0cmEuIEFsaXF1YW0gZXJhdCB2b2x1dHBhdC4gTnVsbGEgZmFjaWxpc2kuIFF1aXNxdWUgaWQgZmVsaXMgZXUgc2VtIGFsaXF1YW0gZnJpbmdpbGxhLgoKRXRpYW0gcXVpcyBhdWd1ZSBpbiB0ZWxsdXMgY29uc2VxdWF0IGVsZWlmZW5kLiBBZW5lYW4gZGlnbmlzc2ltIGNvbmd1ZSBmZWxpcyBpZCBlbGVtZW50dW0uIER1aXMgZnJpbmdpbGxhIHZhcml1cyBpcHN1bSwgbmVjIHN1c2NpcGl0IGxlbyBzZW1wZXIgdmVsLiBVdCBzb2xsaWNpdHVkaW4sIG9yY2kgYSB0aW5jaWR1bnQgYWNjdW1zYW4sIGRpYW0gbGVjdHVzIGxhb3JlZXQgbGFjdXMsIHZlbCBmZXJtZW50dW0gcXVhbSBlc3QgdmVsIGVyb3MuIEFsaXF1YW0gZnJpbmdpbGxhIHNhcGllbiBhYyBzYXBpZW4gZmF1Y2lidXMgY29udmFsbGlzLiBBbGlxdWFtIGlkIG51bmMgZXUganVzdG8gY29uc2VxdWF0IHRpbmNpZHVudC4gUXVpc3F1ZSBuZWMgbmlzbCBkdWkuIFBoYXNlbGx1cyBhdWd1ZSBsZWN0dXMsIHZhcml1cyB2aXRhZSBhdWN0b3IgdmVsLCBydXRydW0gYXQgcmlzdXMuIFZpdmFtdXMgbGFjaW5pYSBsZW8gcXVpcyBuZXF1ZSB1bHRyaWNlcyBuZWMgZWxlbWVudHVtIGZlbGlzIGZyaW5naWxsYS4gUHJvaW4gdmVsIHBvcnR0aXRvciBsZWN0dXMuCgpDdXJhYml0dXIgc2FwaWVuIGxvcmVtLCBtb2xsaXMgdXQgYWNjdW1zYW4gbm9uLCB1bHRyaWNpZXMgZXQgbWV0dXMuIEN1cmFiaXR1ciB2ZWwgbG9yZW0gcXVpcyBzYXBpZW4gZnJpbmdpbGxhIGxhb3JlZXQuIE1vcmJpIGlkIHVybmEgYWMgb3JjaSBlbGVtZW50dW0gYmxhbmRpdCBlZ2V0IHZvbHV0cGF0IG5lcXVlLiBQZWxsZW50ZXNxdWUgc2VtIG9kaW8sIGlhY3VsaXMgZXUgcGhhcmV0cmEgdml0YWUsIGN1cnN1cyBpbiBxdWFtLiBOdWxsYSBtb2xlc3RpZSBsaWd1bGEgaWQgbWFzc2EgbHVjdHVzIGV0IHB1bHZpbmFyIG5pc2kgcHVsdmluYXIuIE51bmMgZmVybWVudHVtIGF1Z3VlIGEgbGFjdXMgZnJpbmdpbGxhIHJob25jdXMgcG9ydHRpdG9yIGVyYXQgZGljdHVtLiBOdW5jIHNpdCBhbWV0IHRlbGx1cyBldCBkdWkgdml2ZXJyYSBhdWN0b3IgZXVpc21vZCBhdCBuaXNsLiBJbiBzZWQgY29uZ3VlIG1hZ25hLiBQcm9pbiBldCB0b3J0b3IgdXQgYXVndWUgcGxhY2VyYXQgZGlnbmlzc2ltIGEgZXUganVzdG8uIE1vcmJpIHBvcnR0aXRvciBwb3J0YSBsb2JvcnRpcy4gUGVsbGVudGVzcXVlIG5pYmggbGFjdXMsIGFkaXBpc2NpbmcgdXQgdHJpc3RpcXVlIHF1aXMsIGNvbnNlcXVhdCB2aXRhZSB2ZWxpdC4gTWFlY2VuYXMgdXQgbHVjdHVzIGxpYmVyby4gVml2YW11cyBhdWN0b3Igb2RpbyBldCBlcmF0IHNlbXBlciBzYWdpdHRpcy4gVml2YW11cyBpbnRlcmR1bSB2ZWxpdCBpbiByaXN1cyBtYXR0aXMgcXVpcyBkaWN0dW0gYW50ZSByaG9uY3VzLiBJbiBzYWdpdHRpcyBwb3J0dGl0b3IgZXJvcywgYXQgbG9ib3J0aXMgbWV0dXMgdWx0cmljZXMgdmVsLiBDdXJhYml0dXIgbm9uIGFsaXF1YW0gbmlzbC4gVmVzdGlidWx1bSBsdWN0dXMgZmV1Z2lhdCBzdXNjaXBpdC4gRXRpYW0gbm9uIGxhY3VzIHZlbCBudWxsYSBlZ2VzdGFzIGlhY3VsaXMgaWQgcXVpcyByaXN1cy4KCkV0aWFtIGluIGF1Y3RvciB1cm5hLiBGdXNjZSB1bHRyaWNpZXMgbW9sZXN0aWUgY29udmFsbGlzLiBJbiBoYWMgaGFiaXRhc3NlIHBsYXRlYSBkaWN0dW1zdC4gVmVzdGlidWx1bSBhbnRlIGlwc3VtIHByaW1pcyBpbiBmYXVjaWJ1cyBvcmNpIGx1Y3R1cyBldCB1bHRyaWNlcyBwb3N1ZXJlIGN1YmlsaWEgQ3VyYWU7IE1hdXJpcyBpYWN1bGlzIGxvcmVtIGZhdWNpYnVzIHB1cnVzIGdyYXZpZGEgYXQgY29udmFsbGlzIHR1cnBpcyBzb2xsaWNpdHVkaW4uIFN1c3BlbmRpc3NlIGF0IHZlbGl0IGxvcmVtLCBhIGZlcm1lbnR1bSBpcHN1bS4gRXRpYW0gY29uZGltZW50dW0sIGR1aSB2ZWwgY29uZGltZW50dW0gZWxlbWVudHVtLCBzYXBpZW4gc2VtIGJsYW5kaXQgc2FwaWVuLCBldCBwaGFyZXRyYSBsZW8gbmVxdWUgZXQgbGVjdHVzLiBOdW5jIHZpdmVycmEgdXJuYSBpYWN1bGlzIGF1Z3VlIHVsdHJpY2VzIGFjIHBvcnR0aXRvciBsYWN1cyBkaWduaXNzaW0uIEFsaXF1YW0gdXQgdHVycGlzIGR1aS4gU2VkIGVnZXQgYWxpcXVldCBmZWxpcy4gSW4gYmliZW5kdW0gbmliaCBzaXQgYW1ldCBzYXBpZW4gYWNjdW1zYW4gYWNjdW1zYW4gcGhhcmV0cmEgbWFnbmEgbW9sZXN0aWUuCgpNYXVyaXMgYWxpcXVldCB1cm5hIGVnZXQgbGVjdHVzIGFkaXBpc2NpbmcgYXQgY29uZ3VlIHR1cnBpcyBjb25zZXF1YXQuIFZpdmFtdXMgdGluY2lkdW50IGZlcm1lbnR1bSByaXN1cyBldCBmZXVnaWF0LiBOdWxsYSBtb2xlc3RpZSB1bGxhbWNvcnBlciBuaWJoIHNlZCBmYWNpbGlzaXMuIFBoYXNlbGx1cyBldCBjdXJzdXMgcHVydXMuIE5hbSBjdXJzdXMsIGR1aSBkaWN0dW0gdWx0cmljZXMgdml2ZXJyYSwgZXJhdCByaXN1cyB2YXJpdXMgZWxpdCwgZXUgbW9sZXN0aWUgZHVpIGVyb3MgcXVpcyBxdWFtLiBBbGlxdWFtIGV0IGFudGUgbmVxdWUsIGFjIGNvbnNlY3RldHVyIGR1aS4gRG9uZWMgY29uZGltZW50dW0gZXJhdCBpZCBlbGl0IGRpY3R1bSBzZWQgYWNjdW1zYW4gbGVvIHNhZ2l0dGlzLiBQcm9pbiBjb25zZXF1YXQgY29uZ3VlIHJpc3VzLCB2ZWwgdGluY2lkdW50IGxlbyBpbXBlcmRpZXQgZXUuIFZlc3RpYnVsdW0gbWFsZXN1YWRhIHR1cnBpcyBldSBtZXR1cyBpbXBlcmRpZXQgcHJldGl1bS4gQWxpcXVhbSBjb25kaW1lbnR1bSB1bHRyaWNlcyBuaWJoLCBldSBzZW1wZXIgZW5pbSBlbGVpZmVuZCBhLiBFdGlhbSBjb25kaW1lbnR1bSBuaXNsIHF1YW0uCgpQZWxsZW50ZXNxdWUgaWQgbW9sZXN0aWUgbmlzbC4gTWFlY2VuYXMgZXQgbGVjdHVzIGF0IGp1c3RvIG1vbGVzdGllIHZpdmVycmEgc2l0IGFtZXQgc2l0IGFtZXQgbGlndWxhLiBOdWxsYW0gbm9uIHBvcnR0aXRvciBtYWduYS4gUXVpc3F1ZSBlbGVtZW50dW0gYXJjdSBjdXJzdXMgdG9ydG9yIHJ1dHJ1bSBsb2JvcnRpcy4gTW9yYmkgc2l0IGFtZXQgbGVjdHVzIHZpdGFlIGVuaW0gZXVpc21vZCBkaWduaXNzaW0gZWdldCBhdCBuZXF1ZS4gVml2YW11cyBjb25zZXF1YXQgdmVoaWN1bGEgZHVpLCB2aXRhZSBhdWN0b3IgYXVndWUgZGlnbmlzc2ltIGluLiBJbiB0ZW1wdXMgc2VtIHF1aXMganVzdG8gdGluY2lkdW50IHNpdCBhbWV0IGF1Y3RvciB0dXJwaXMgbG9ib3J0aXMuIFBlbGxlbnRlc3F1ZSBub24gZXN0IG51bmMuIFZlc3RpYnVsdW0gbW9sbGlzIGZyaW5naWxsYSBpbnRlcmR1bS4gTWFlY2VuYXMgaXBzdW0gZG9sb3IsIHBoYXJldHJhIGlkIHRyaXN0aXF1ZSBtYXR0aXMsIGx1Y3R1cyB2aXRhZSB1cm5hLiBVdCB1bGxhbWNvcnBlciBhcmN1IGVnZXQgZWxpdCBjb252YWxsaXMgbW9sbGlzLiBQZWxsZW50ZXNxdWUgY29uZGltZW50dW0sIG1hc3NhIGFjIGhlbmRyZXJpdCB0ZW1wb3IsIG1hdXJpcyBwdXJ1cyBibGFuZGl0IGp1c3RvLCBldCBwaGFyZXRyYSBsZW8ganVzdG8gYSBlc3QuIER1aXMgYXJjdSBhdWd1ZSwgZmFjaWxpc2lzIHZlbCBkaWduaXNzaW0gc2VkLCBhbGlxdWFtIHF1aXMgbWFnbmEuIFF1aXNxdWUgbm9uIGNvbnNlcXVhdCBkb2xvci4gU3VzcGVuZGlzc2UgYSB1bHRyaWNlcyBsZW8uCgpEb25lYyB2aXRhZSBwcmV0aXVtIG5pYmguIE1hZWNlbmFzIGJpYmVuZHVtIGJpYmVuZHVtIGRpYW0gaW4gcGxhY2VyYXQuIFV0IGFjY3Vtc2FuLCBtaSB2aXRhZSB2ZXN0aWJ1bHVtIGV1aXNtb2QsIG51bmMganVzdG8gdnVscHV0YXRlIG5pc2ksIG5vbiBwbGFjZXJhdCBtaSB1cm5hIGV0IGRpYW0uIE1hZWNlbmFzIG1hbGVzdWFkYSBsb3JlbSB1dCBhcmN1IG1hdHRpcyBtb2xsaXMuIE51bGxhIGZhY2lsaXNpLiBEb25lYyBlc3QgbGVvLCBiaWJlbmR1bSBldSBwdWx2aW5hciBpbiwgY3Vyc3VzIHZlbCBtZXR1cy4gQWxpcXVhbSBlcmF0IHZvbHV0cGF0LiBOdWxsYW0gZmV1Z2lhdCBwb3J0dGl0b3IgbmVxdWUgaW4gdnVscHV0YXRlLiBRdWlzcXVlIG5lYyBtaSBldSBtYWduYSBjb25zZXF1YXQgY3Vyc3VzIG5vbiBhdCBhcmN1LiBFdGlhbSByaXN1cyBtZXR1cywgc29sbGljaXR1ZGluIGV0IHVsdHJpY2VzIGF0LCB0aW5jaWR1bnQgc2VkIG51bmMuIFNlZCBlZ2V0IHNjZWxlcmlzcXVlIGF1Z3VlLiBVdCBmcmluZ2lsbGEgdmVuZW5hdGlzIHNlbSBub24gZWxlaWZlbmQuIE51bmMgbWF0dGlzLCByaXN1cyBzaXQgYW1ldCB2dWxwdXRhdGUgdmFyaXVzLCByaXN1cyBqdXN0byBlZ2VzdGFzIG1hdXJpcywgaWQgaW50ZXJkdW0gb2RpbyBpcHN1bSBldCBuaXNsLiBMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBNb3JiaSBpZCBlcmF0IG9kaW8sIG5lYyBwdWx2aW5hciBlbmltLgoKQ3VyYWJpdHVyIGFjIGZlcm1lbnR1bSBxdWFtLiBNb3JiaSBldSBlcm9zIHNhcGllbiwgdml0YWUgdGVtcHVzIGRvbG9yLiBNYXVyaXMgdmVzdGlidWx1bSBibGFuZGl0IGVuaW0gdXQgdmVuZW5hdGlzLiBBbGlxdWFtIGVnZXN0YXMsIGVyb3MgYXQgY29uc2VjdGV0dXIgdGluY2lkdW50LCBsb3JlbSBhdWd1ZSBpYWN1bGlzIGVzdCwgbmVjIG1vbGxpcyBmZWxpcyBhcmN1IGluIG51bmMuIFNlZCBpbiBvZGlvIHNlZCBsaWJlcm8gcGVsbGVudGVzcXVlIHZvbHV0cGF0IHZpdGFlIGEgYW50ZS4gTW9yYmkgY29tbW9kbyB2b2x1dHBhdCB0ZWxsdXMsIHV0IHZpdmVycmEgcHVydXMgcGxhY2VyYXQgZmVybWVudHVtLiBJbnRlZ2VyIGlhY3VsaXMgZmFjaWxpc2lzIGFyY3UsIGF0IGdyYXZpZGEgbG9yZW0gYmliZW5kdW0gYXQuIEFlbmVhbiBpZCBlcm9zIGVnZXQgZXN0IHNhZ2l0dGlzIGNvbnZhbGxpcyBzZWQgZXQgZHVpLiBEb25lYyBldSBwdWx2aW5hciB0ZWxsdXMuIE51bmMgZGlnbmlzc2ltIHJob25jdXMgdGVsbHVzLCBhdCBwZWxsZW50ZXNxdWUgbWV0dXMgbHVjdHVzIGF0LiBTZWQgb3JuYXJlIGFsaXF1YW0gZGlhbSwgYSBwb3J0dGl0b3IgbGVvIHNvbGxpY2l0dWRpbiBzZWQuIE5hbSB2aXRhZSBsZWN0dXMgbGFjdXMuIEludGVnZXIgYWRpcGlzY2luZyBxdWFtIG5lcXVlLCBibGFuZGl0IHBvc3VlcmUgbGliZXJvLiBTZWQgbGliZXJvIG51bmMsIGVnZXN0YXMgc29kYWxlcyB0ZW1wdXMgc2VkLCBjdXJzdXMgYmxhbmRpdCB0ZWxsdXMuIFZlc3RpYnVsdW0gbWkgcHVydXMsIHVsdHJpY2llcyBxdWlzIHBsYWNlcmF0IHZlbCwgbW9sZXN0aWUgYXQgZHVpLgoKTnVsbGEgY29tbW9kbyBvZGlvIGp1c3RvLiBQZWxsZW50ZXNxdWUgbm9uIG9ybmFyZSBkaWFtLiBJbiBjb25zZWN0ZXR1ciBzYXBpZW4gYWMgbnVuYyBzYWdpdHRpcyBtYWxlc3VhZGEuIE1vcmJpIHVsbGFtY29ycGVyIHRlbXBvciBlcmF0IG5lYyBydXRydW0uIER1aXMgdXQgY29tbW9kbyBqdXN0by4gQ3JhcyBlc3Qgb3JjaSwgY29uc2VjdGV0dXIgc2VkIGludGVyZHVtIHNlZCwgc2NlbGVyaXNxdWUgc2l0IGFtZXQgbnVsbGEuIFZlc3RpYnVsdW0ganVzdG8gbnVsbGEsIHBlbGxlbnRlc3F1ZSBhIHRlbXB1cyBldCwgZGFwaWJ1cyBldCBhcmN1LiBMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBNb3JiaSB0cmlzdGlxdWUsIGVyb3MgbmVjIGNvbmd1ZSBhZGlwaXNjaW5nLCBsaWd1bGEgc2VtIHJob25jdXMgZmVsaXMsIGF0IG9ybmFyZSB0ZWxsdXMgbWF1cmlzIGFjIHJpc3VzLiBWZXN0aWJ1bHVtIGFudGUgaXBzdW0gcHJpbWlzIGluIGZhdWNpYnVzIG9yY2kgbHVjdHVzIGV0IHVsdHJpY2VzIHBvc3VlcmUgY3ViaWxpYSBDdXJhZTsgUHJvaW4gbWF1cmlzIGR1aSwgdGVtcG9yIGZlcm1lbnR1bSBkaWN0dW0gZXQsIGN1cnN1cyBhIGxlby4gTWFlY2VuYXMgbmVjIG5pc2wgYSB0ZWxsdXMgcGVsbGVudGVzcXVlIHJob25jdXMuIE51bGxhbSB1bHRyaWNlcyBldWlzbW9kIGR1aSBldSBjb25ndWUuCgpJbiBuZWMgdGVtcG9yIHJpc3VzLiBJbiBmYXVjaWJ1cyBuaXNpIGVnZXQgZGlhbSBkaWduaXNzaW0gY29uc2VxdWF0LiBEb25lYyBwdWx2aW5hciBhbnRlIG5lYyBlbmltIG1hdHRpcyBydXRydW0uIFZlc3RpYnVsdW0gbGVvIGF1Z3VlLCBtb2xlc3RpZSBuZWMgZGFwaWJ1cyBpbiwgZGljdHVtIGF0IGVuaW0uIEludGVnZXIgYWxpcXVhbSwgbG9yZW0gZXUgdnVscHV0YXRlIGxhY2luaWEsIG1pIG9yY2kgdGVtcG9yIGVuaW0sIGVnZXQgbWF0dGlzIGxpZ3VsYSBtYWduYSBhIG1hZ25hLiBQcmFlc2VudCBzZWQgZXJhdCB1dCB0b3J0b3IgaW50ZXJkdW0gdml2ZXJyYS4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gTnVsbGEgZmFjaWxpc2kuIE1hZWNlbmFzIHNpdCBhbWV0IGxlY3R1cyBsYWN1cy4gTnVuYyB2aXRhZSBwdXJ1cyBpZCBsaWd1bGEgbGFvcmVldCBjb25kaW1lbnR1bS4gRHVpcyBhdWN0b3IgdG9ydG9yIHZlbCBkdWkgcHVsdmluYXIgYSBmYWNpbGlzaXMgYXJjdSBkaWduaXNzaW0uIEluIGhhYyBoYWJpdGFzc2UgcGxhdGVhIGRpY3R1bXN0LiBEb25lYyBzb2xsaWNpdHVkaW4gcGVsbGVudGVzcXVlIGVnZXN0YXMuIFNlZCBzZWQgc2VtIGp1c3RvLiBNYWVjZW5hcyBsYW9yZWV0IGhlbmRyZXJpdCBtYXVyaXMsIHV0IHBvcnR0aXRvciBsb3JlbSBpYWN1bGlzIGFjLiBRdWlzcXVlIG1vbGVzdGllIHNlbSBxdWlzIGxvcmVtIHRlbXBvciBydXRydW0uIFBoYXNlbGx1cyBuaWJoIG1hdXJpcywgcmhvbmN1cyBpbiBjb25zZWN0ZXR1ciBub24sIGFsaXF1ZXQgZXUgbWFzc2EuCgpDdXJhYml0dXIgdmVsaXQgYXJjdSwgcHJldGl1bSBwb3J0YSBwbGFjZXJhdCBxdWlzLCB2YXJpdXMgdXQgbWV0dXMuIFZlc3RpYnVsdW0gdnVscHV0YXRlIHRpbmNpZHVudCBqdXN0bywgdml0YWUgcG9ydHRpdG9yIGxlY3R1cyBpbXBlcmRpZXQgc2l0IGFtZXQuIFZpdmFtdXMgZW5pbSBkb2xvciwgc29sbGljaXR1ZGluIHV0IHNlbXBlciBub24sIG9ybmFyZSBvcm5hcmUgZHVpLiBBbGlxdWFtIHRlbXBvciBmZXJtZW50dW0gc2FwaWVuIGVnZXQgY29uZGltZW50dW0uIEN1cmFiaXR1ciBsYW9yZWV0IGJpYmVuZHVtIGFudGUsIGluIGV1aXNtb2QgbGFjdXMgbGFjaW5pYSBldS4gUGVsbGVudGVzcXVlIGhhYml0YW50IG1vcmJpIHRyaXN0aXF1ZSBzZW5lY3R1cyBldCBuZXR1cyBldCBtYWxlc3VhZGEgZmFtZXMgYWMgdHVycGlzIGVnZXN0YXMuIFN1c3BlbmRpc3NlIHBvdGVudGkuIFNlZCBhdCBsaWJlcm8gZXUgdG9ydG9yIHRlbXB1cyBzY2VsZXJpc3F1ZS4gTnVsbGEgZmFjaWxpc2kuIE51bGxhbSB2aXRhZSBuZXF1ZSBpZCBqdXN0byB2aXZlcnJhIHJob25jdXMgcHJldGl1bSBhdCBsaWJlcm8uIEV0aWFtIGVzdCB1cm5hLCBhbGlxdWFtIHZlbCBwdWx2aW5hciBub24sIG9ybmFyZSB2ZWwgcHVydXMuCgpOdWxsYSB2YXJpdXMsIG5pc2kgZWdldCBjb25kaW1lbnR1bSBzZW1wZXIsIG1ldHVzIGVzdCBkaWN0dW0gb2RpbywgdmVsIG1hdHRpcyByaXN1cyBlc3Qgc2VkIHZlbGl0LiBDdW0gc29jaWlzIG5hdG9xdWUgcGVuYXRpYnVzIGV0IG1hZ25pcyBkaXMgcGFydHVyaWVudCBtb250ZXMsIG5hc2NldHVyIHJpZGljdWx1cyBtdXMuIE51bmMgbm9uIGVzdCBuZWMgdGVsbHVzIHVsdHJpY2llcyBtYXR0aXMgdXQgZWdldCB2ZWxpdC4gSW50ZWdlciBjb25kaW1lbnR1bSBhbnRlIGlkIGxvcmVtIGJsYW5kaXQgbGFjaW5pYS4gRG9uZWMgdmVsIHRvcnRvciBhdWd1ZSwgaW4gY29uZGltZW50dW0gbmlzaS4gUGVsbGVudGVzcXVlIHBlbGxlbnRlc3F1ZSBudWxsYSB1dCBudWxsYSBwb3J0dGl0b3IgcXVpcyBzb2RhbGVzIGVuaW0gcnV0cnVtLiBTZWQgYXVndWUgcmlzdXMsIGV1aXNtb2QgYSBhbGlxdWV0IGF0LCB2dWxwdXRhdGUgbm9uIGxpYmVyby4gTnVsbGFtIG5pYmggb2RpbywgZGlnbmlzc2ltIGZlcm1lbnR1bSBwdWx2aW5hciBhYywgY29uZ3VlIGV1IG1pLiBEdWlzIHRpbmNpZHVudCwgbmliaCBpZCB2ZW5lbmF0aXMgcGxhY2VyYXQsIGRpYW0gdHVycGlzIGdyYXZpZGEgbGVvLCBzaXQgYW1ldCBtb2xsaXMgbWFzc2EgZG9sb3IgcXVpcyBtYXVyaXMuIFZpdmFtdXMgc2NlbGVyaXNxdWUgc29kYWxlcyBhcmN1IGV0IGRhcGlidXMuIFN1c3BlbmRpc3NlIHBvdGVudGkuIENyYXMgcXVpcyB0ZWxsdXMgYXJjdSwgcXVpcyBsYW9yZWV0IHNlbS4gRnVzY2UgcG9ydHRpdG9yLCBzYXBpZW4gdmVsIHRyaXN0aXF1ZSBzb2RhbGVzLCB2ZWxpdCBsZW8gcG9ydGEgYXJjdSwgcXVpcyBwZWxsZW50ZXNxdWUgbnVuYyBtZXR1cyBub24gb2Rpby4gTmFtIGFyY3UgbGliZXJvLCB1bGxhbWNvcnBlciB1dCBwaGFyZXRyYSBub24sIGRpZ25pc3NpbSBldCB2ZWxpdC4gUXVpc3F1ZSBkb2xvciBsb3JlbSwgdmVoaWN1bGEgc2l0IGFtZXQgc2NlbGVyaXNxdWUgaW4sIHZhcml1cyBhdCBudWxsYS4gUGVsbGVudGVzcXVlIHZpdGFlIHNlbSBlZ2V0IHRvcnRvciBpYWN1bGlzIHB1bHZpbmFyLiBTZWQgbnVuYyBqdXN0bywgZXVpc21vZCBncmF2aWRhIHB1bHZpbmFyIGVnZXQsIGdyYXZpZGEgZWdldCB0dXJwaXMuIENyYXMgdmVsIGRpY3R1bSBuaXNpLiBOdWxsYW0gbnVsbGEgbGliZXJvLCBncmF2aWRhIHNpdCBhbWV0IGFsaXF1YW0gcXVpcywgY29tbW9kbyB2aXRhZSBvZGlvLiBDcmFzIHZpdGFlIG5pYmggbmVjIGR1aSBwbGFjZXJhdCBzZW1wZXIuCgpWaXZhbXVzIGF0IGZyaW5naWxsYSBlcm9zLiBWaXZhbXVzIGF0IG5pc2wgaWQgbWFzc2EgY29tbW9kbyBmZXVnaWF0IHF1aXMgbm9uIG1hc3NhLiBNb3JiaSB0ZWxsdXMgdXJuYSwgYXVjdG9yIHNpdCBhbWV0IGVsZW1lbnR1bSBzZWQsIHJ1dHJ1bSBub24gbGVjdHVzLiBOdWxsYSBmZXVnaWF0IGR1aSBpbiBzYXBpZW4gb3JuYXJlIGV0IGltcGVyZGlldCBlc3Qgb3JuYXJlLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gVmVzdGlidWx1bSBzZW1wZXIgcnV0cnVtIHRlbXBvci4gU2VkIGluIGZlbGlzIG5pYmgsIHNlZCBhbGlxdWFtIGVuaW0uIEN1cmFiaXR1ciB1dCBxdWFtIHNjZWxlcmlzcXVlIHZlbGl0IHBsYWNlcmF0IGRpY3R1bS4gRG9uZWMgZWxlaWZlbmQgdmVoaWN1bGEgcHVydXMsIGV1IHZlc3RpYnVsdW0gc2FwaWVuIHJ1dHJ1bSBldS4gVml2YW11cyBpbiBvZGlvIHZlbCBlc3QgdnVscHV0YXRlIGlhY3VsaXMuIE51bmMgcnV0cnVtIGZldWdpYXQgcHJldGl1bS4KCk1hZWNlbmFzIGlwc3VtIG5lcXVlLCBhdWN0b3IgcXVpcyBsYWNpbmlhIHZpdGFlLCBldWlzbW9kIGFjIG9yY2kuIERvbmVjIG1vbGVzdGllIG1hc3NhIGNvbnNlcXVhdCBlc3QgcG9ydGEgYWMgcG9ydGEgcHVydXMgdGluY2lkdW50LiBOYW0gYmliZW5kdW0gbGVvIG5lYyBsYWN1cyBtb2xsaXMgbm9uIGNvbmRpbWVudHVtIGRvbG9yIHJob25jdXMuIE51bGxhIGFjIHZvbHV0cGF0IGxvcmVtLiBOdWxsYW0gZXJhdCBwdXJ1cywgY29udmFsbGlzIGVnZXQgY29tbW9kbyBpZCwgdmFyaXVzIHF1aXMgYXVndWUuIE51bGxhbSBhbGlxdWFtIGVnZXN0YXMgbWksIHZlbCBzdXNjaXBpdCBuaXNsIG1hdHRpcyBjb25zZXF1YXQuIFF1aXNxdWUgdmVsIGVnZXN0YXMgc2FwaWVuLiBOdW5jIGxvcmVtIHZlbGl0LCBjb252YWxsaXMgbmVjIGxhb3JlZXQgZXQsIGFsaXF1ZXQgZWdldCBtYXNzYS4gTmFtIGV0IG5pYmggYWMgZHVpIHZlaGljdWxhIGFsaXF1YW0gcXVpcyBldSBhdWd1ZS4gQ3JhcyB2ZWwgbWFnbmEgdXQgZWxpdCByaG9uY3VzIGludGVyZHVtIGlhY3VsaXMgdm9sdXRwYXQgbmlzbC4gU3VzcGVuZGlzc2UgYXJjdSBsb3JlbSwgdmFyaXVzIHJob25jdXMgdGVtcG9yIGlkLCBwdWx2aW5hciBzZWQgdG9ydG9yLiBQZWxsZW50ZXNxdWUgdWx0cmljaWVzIGxhb3JlZXQgb2RpbyBhYyBkaWduaXNzaW0uIEFsaXF1YW0gZGlhbSBhcmN1LCBwbGFjZXJhdCBxdWlzIGVnZXN0YXMgZWdldCwgZmFjaWxpc2lzIGV1IG51bmMuIE1hdXJpcyB2dWxwdXRhdGUsIG5pc2wgc2l0IGFtZXQgbW9sbGlzIGludGVyZHVtLCByaXN1cyB0b3J0b3Igb3JuYXJlIG9yY2ksIHNlZCBlZ2VzdGFzIG9yY2kgZXJvcyBub24gZGlhbS4gVmVzdGlidWx1bSBoZW5kcmVyaXQsIG1ldHVzIHF1aXMgcGxhY2VyYXQgcGVsbGVudGVzcXVlLCBlbmltIHB1cnVzIGZhdWNpYnVzIGR1aSwgc2l0IGFtZXQgdWx0cmljaWVzIGxlY3R1cyBpcHN1bSBpZCBsb3JlbS4gQ2xhc3MgYXB0ZW50IHRhY2l0aSBzb2Npb3NxdSBhZCBsaXRvcmEgdG9ycXVlbnQgcGVyIGNvbnViaWEgbm9zdHJhLCBwZXIgaW5jZXB0b3MgaGltZW5hZW9zLiBQcmFlc2VudCBlZ2V0IGRpYW0gb2RpbywgZXUgYmliZW5kdW0gZWxpdC4gSW4gdmVzdGlidWx1bSBvcmNpIGV1IGVyYXQgdGluY2lkdW50IHRyaXN0aXF1ZS4KCkNyYXMgY29uc2VjdGV0dXIgYW50ZSBldSB0dXJwaXMgcGxhY2VyYXQgc29sbGljaXR1ZGluLiBNYXVyaXMgZXQgbGFjdXMgdG9ydG9yLCBlZ2V0IHBoYXJldHJhIHZlbGl0LiBEb25lYyBhY2N1bXNhbiB1bHRyaWNlcyB0ZW1wb3IuIERvbmVjIGF0IG5pYmggYSBlbGl0IGNvbmRpbWVudHVtIGRhcGlidXMuIEludGVnZXIgc2l0IGFtZXQgdnVscHV0YXRlIGFudGUuIFN1c3BlbmRpc3NlIHBvdGVudGkuIEluIHNvZGFsZXMgbGFvcmVldCBtYXNzYSB2aXRhZSBsYWNpbmlhLiBNb3JiaSB2ZWwgbGFjdXMgZmV1Z2lhdCBhcmN1IHZ1bHB1dGF0ZSBtb2xlc3RpZS4gQWxpcXVhbSBtYXNzYSBtYWduYSwgdWxsYW1jb3JwZXIgYWNjdW1zYW4gZ3JhdmlkYSBxdWlzLCByaG9uY3VzIHB1bHZpbmFyIG51bGxhLiBQcmFlc2VudCBzaXQgYW1ldCBpcHN1bSBkaWFtLCBzaXQgYW1ldCBsYWNpbmlhIG5lcXVlLiBJbiBldCBzYXBpZW4gYXVndWUuIEV0aWFtIGVuaW0gZWxpdCwgdWx0cmljZXMgdmVsIHJ1dHJ1bSBpZCwgc2NlbGVyaXNxdWUgbm9uIGVuaW0uCgpQcm9pbiBldCBlZ2VzdGFzIG5lcXVlLiBQcmFlc2VudCBldCBpcHN1bSBkb2xvci4gTnVuYyBub24gdmFyaXVzIG5pc2wuIEZ1c2NlIGluIHRvcnRvciBuaXNpLiBNYWVjZW5hcyBjb252YWxsaXMgbmVxdWUgaW4gbGlndWxhIGJsYW5kaXQgcXVpcyB2ZWhpY3VsYSBsZW8gbW9sbGlzLiBQZWxsZW50ZXNxdWUgc2FnaXR0aXMgYmxhbmRpdCBsZW8sIGRhcGlidXMgcGVsbGVudGVzcXVlIGxlbyB1bHRyaWNlcyBhYy4gQ3VyYWJpdHVyIGFjIGVnZXN0YXMgbGliZXJvLiBEb25lYyBwcmV0aXVtIHBoYXJldHJhIHByZXRpdW0uIEZ1c2NlIGltcGVyZGlldCwgdHVycGlzIGV1IGFsaXF1YW0gcG9ydGEsIGFudGUgZWxpdCBlbGVpZmVuZCByaXN1cywgbHVjdHVzIGF1Y3RvciBhcmN1IGFudGUgdXQgbnVuYy4gVml2YW11cyBpbiBsZW8gZmVsaXMsIHZpdGFlIGVsZWlmZW5kIGxhY3VzLiBEb25lYyB0ZW1wdXMgYWxpcXVhbSBwdXJ1cyBwb3J0dGl0b3IgdHJpc3RpcXVlLiBTdXNwZW5kaXNzZSBkaWFtIG5lcXVlLCBzdXNjaXBpdCBmZXVnaWF0IGZyaW5naWxsYSBub24sIGVsZWlmZW5kIHNpdCBudWxsYW0uCg== \ No newline at end of file
diff --git a/1.1.x/share/www/script/test/lots_of_docs.js b/1.1.x/share/www/script/test/lots_of_docs.js
new file mode 100644
index 00000000..2fe702b1
--- /dev/null
+++ b/1.1.x/share/www/script/test/lots_of_docs.js
@@ -0,0 +1,55 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// test saving a semi-large quanitity of documents and do some view queries.
+couchTests.lots_of_docs = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // keep number lowish for now to keep tests fasts. Crank up manually to
+ // to really test.
+ var numDocsToCreate = 500;
+
+ for(var i=0; i < numDocsToCreate; i += 100) {
+ var createNow = Math.min(numDocsToCreate - i, 100);
+ var docs = makeDocs(i, i + createNow);
+ db.bulkSave(docs);
+ }
+
+ // query all documents, and return the doc.integer member as a key.
+ results = db.query(function(doc){ emit(doc.integer, null) });
+
+ T(results.total_rows == numDocsToCreate);
+
+ // validate the keys are ordered ascending
+ for(var i=0; i<numDocsToCreate; i++) {
+ T(results.rows[i].key==i);
+ }
+
+ // do the query again, but with descending output
+ results = db.query(function(doc){ emit(doc.integer, null) }, null, {
+ descending: true
+ });
+
+ T(results.total_rows == numDocsToCreate);
+
+ // validate the keys are ordered descending
+ for(var i=0; i<numDocsToCreate; i++) {
+ T(results.rows[numDocsToCreate-1-i].key==i);
+ }
+
+ // Check _all_docs with descending=true again (now that there are many docs)
+ var desc = db.allDocs({descending:true});
+ T(desc.total_rows == desc.rows.length);
+};
diff --git a/1.1.x/share/www/script/test/method_override.js b/1.1.x/share/www/script/test/method_override.js
new file mode 100644
index 00000000..0bb4c61f
--- /dev/null
+++ b/1.1.x/share/www/script/test/method_override.js
@@ -0,0 +1,40 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
+couchTests.method_override = function(debug) {
+ var result = JSON.parse(CouchDB.request("GET", "/").responseText);
+ T(result.couchdb == "Welcome");
+
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+
+ db.createDb();
+
+ var doc = {bob : "connie"};
+ xhr = CouchDB.request("POST", "/test_suite_db/fnord", {body: JSON.stringify(doc), headers:{"X-HTTP-Method-Override" : "PUT"}});
+ T(xhr.status == 201);
+
+ doc = db.open("fnord");
+ T(doc.bob == "connie");
+
+ xhr = CouchDB.request("POST", "/test_suite_db/fnord?rev=" + doc._rev, {headers:{"X-HTTP-Method-Override" : "DELETE"}});
+ T(xhr.status == 200);
+
+ xhr = CouchDB.request("GET", "/test_suite_db/fnord2", {body: JSON.stringify(doc), headers:{"X-HTTP-Method-Override" : "PUT"}});
+ // Method Override is ignored when original Method isn't POST
+ T(xhr.status == 404);
+
+ doc = db.open("fnord");
+ T(doc == null);
+
+};
diff --git a/1.1.x/share/www/script/test/multiple_rows.js b/1.1.x/share/www/script/test/multiple_rows.js
new file mode 100644
index 00000000..4f6fcd3b
--- /dev/null
+++ b/1.1.x/share/www/script/test/multiple_rows.js
@@ -0,0 +1,80 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.multiple_rows = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var nc = {_id:"NC", cities:["Charlotte", "Raleigh"]};
+ var ma = {_id:"MA", cities:["Boston", "Lowell", "Worcester", "Cambridge", "Springfield"]};
+ var fl = {_id:"FL", cities:["Miami", "Tampa", "Orlando", "Springfield"]};
+
+ T(db.save(nc).ok);
+ T(db.save(ma).ok);
+ T(db.save(fl).ok);
+
+ var generateListOfCitiesAndState = "function(doc) {" +
+ " for (var i = 0; i < doc.cities.length; i++)" +
+ " emit(doc.cities[i] + \", \" + doc._id, null);" +
+ "}";
+
+ var results = db.query(generateListOfCitiesAndState);
+ var rows = results.rows;
+
+ T(rows[0].key == "Boston, MA");
+ T(rows[1].key == "Cambridge, MA");
+ T(rows[2].key == "Charlotte, NC");
+ T(rows[3].key == "Lowell, MA");
+ T(rows[4].key == "Miami, FL");
+ T(rows[5].key == "Orlando, FL");
+ T(rows[6].key == "Raleigh, NC");
+ T(rows[7].key == "Springfield, FL");
+ T(rows[8].key == "Springfield, MA");
+ T(rows[9].key == "Tampa, FL");
+ T(rows[10].key == "Worcester, MA");
+
+ // add another city to NC
+ nc.cities.push("Wilmington");
+ T(db.save(nc).ok);
+
+ var results = db.query(generateListOfCitiesAndState);
+ var rows = results.rows;
+
+ T(rows[0].key == "Boston, MA");
+ T(rows[1].key == "Cambridge, MA");
+ T(rows[2].key == "Charlotte, NC");
+ T(rows[3].key == "Lowell, MA");
+ T(rows[4].key == "Miami, FL");
+ T(rows[5].key == "Orlando, FL");
+ T(rows[6].key == "Raleigh, NC");
+ T(rows[7].key == "Springfield, FL");
+ T(rows[8].key == "Springfield, MA");
+ T(rows[9].key == "Tampa, FL");
+ T(rows[10].key == "Wilmington, NC");
+ T(rows[11].key == "Worcester, MA");
+
+ // now delete MA
+ T(db.deleteDoc(ma).ok);
+
+ var results = db.query(generateListOfCitiesAndState);
+ var rows = results.rows;
+
+ T(rows[0].key == "Charlotte, NC");
+ T(rows[1].key == "Miami, FL");
+ T(rows[2].key == "Orlando, FL");
+ T(rows[3].key == "Raleigh, NC");
+ T(rows[4].key == "Springfield, FL");
+ T(rows[5].key == "Tampa, FL");
+ T(rows[6].key == "Wilmington, NC");
+};
diff --git a/1.1.x/share/www/script/test/oauth.js b/1.1.x/share/www/script/test/oauth.js
new file mode 100644
index 00000000..82ebe8a4
--- /dev/null
+++ b/1.1.x/share/www/script/test/oauth.js
@@ -0,0 +1,267 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.oauth = function(debug) {
+ // This tests OAuth authentication.
+
+ var authorization_url = "/_oauth/authorize";
+
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var dbA = new CouchDB("test_suite_db_a", {"X-Couch-Full-Commit":"false"});
+ var dbB = new CouchDB("test_suite_db_b", {"X-Couch-Full-Commit":"false"});
+ dbA.deleteDb();
+ dbA.createDb();
+ dbB.deleteDb();
+ dbB.createDb();
+
+ // Simple secret key generator
+ function generateSecret(length) {
+ var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ var secret = '';
+ for (var i=0; i<length; i++) {
+ secret += tab.charAt(Math.floor(Math.random() * 64));
+ }
+ return secret;
+ }
+
+ function oauthRequest(method, path, message, accessor) {
+ message.action = path;
+ message.method = method || 'GET';
+ OAuth.SignatureMethod.sign(message, accessor);
+ var parameters = message.parameters;
+ if (method == "POST" || method == "GET") {
+ if (method == "GET") {
+ return CouchDB.request("GET", OAuth.addToURL(path, parameters));
+ } else {
+ return CouchDB.request("POST", path, {
+ headers: {"Content-Type": "application/x-www-form-urlencoded"},
+ body: OAuth.formEncode(parameters)
+ });
+ }
+ } else {
+ return CouchDB.request(method, path, {
+ headers: {Authorization: OAuth.getAuthorizationHeader('', parameters)}
+ });
+ }
+ }
+
+ var consumerSecret = generateSecret(64);
+ var tokenSecret = generateSecret(64);
+ var admintokenSecret = generateSecret(64);
+ var testadminPassword = "ohsosecret";
+
+ var adminBasicAuthHeaderValue = function() {
+ var retval = 'Basic ' + binb2b64(str2binb("testadmin:" + testadminPassword));
+ return retval;
+ }
+
+ var host = CouchDB.host;
+ var dbPair = {
+ source: {
+ url: CouchDB.protocol + host + "/test_suite_db_a",
+ auth: {
+ oauth: {
+ consumer_key: "key",
+ consumer_secret: consumerSecret,
+ token_secret: tokenSecret,
+ token: "foo"
+ }
+ }
+ },
+ target: {
+ url: CouchDB.protocol + host + "/test_suite_db_b",
+ headers: {"Authorization": adminBasicAuthHeaderValue()}
+ }
+ };
+
+ // this function will be called on the modified server
+ var testFun = function () {
+ try {
+ CouchDB.request("PUT", CouchDB.protocol + host + "/_config/admins/testadmin", {
+ headers: {"X-Couch-Persist": "false"},
+ body: JSON.stringify(testadminPassword)
+ });
+ var i = 0;
+ waitForSuccess(function() {
+ //loop until the couch server has processed the password
+ i += 1;
+ var xhr = CouchDB.request("GET", CouchDB.protocol + host + "/_config/admins/testadmin?foo="+i,{
+ headers: {
+ "Authorization": adminBasicAuthHeaderValue()
+ }});
+ if (xhr.responseText.indexOf("\"-hashed-") != 0) {
+ throw("still waiting");
+ }
+ }, "wait-for-admin");
+
+ CouchDB.newUuids(2); // so we have one to make the salt
+
+ CouchDB.request("PUT", CouchDB.protocol + host + "/_config/couch_httpd_auth/require_valid_user", {
+ headers: {
+ "X-Couch-Persist": "false",
+ "Authorization": adminBasicAuthHeaderValue()
+ },
+ body: JSON.stringify("true")
+ });
+
+ var usersDb = new CouchDB("test_suite_users", {
+ "X-Couch-Full-Commit":"false",
+ "Authorization": adminBasicAuthHeaderValue()
+ });
+ usersDb.deleteDb();
+ usersDb.createDb();
+
+ // Create a user
+ var jasonUserDoc = CouchDB.prepareUserDoc({
+ name: "jason",
+ roles: ["test"]
+ }, "testpassword");
+ T(usersDb.save(jasonUserDoc).ok);
+
+
+ var accessor = {
+ consumerSecret: consumerSecret,
+ tokenSecret: tokenSecret
+ };
+ var adminAccessor = {
+ consumerSecret: consumerSecret,
+ tokenSecret: admintokenSecret
+ };
+
+ var signatureMethods = ["PLAINTEXT", "HMAC-SHA1"];
+ var consumerKeys = {key: 200, nonexistent_key: 400};
+ for (var i=0; i<signatureMethods.length; i++) {
+ for (var consumerKey in consumerKeys) {
+ var expectedCode = consumerKeys[consumerKey];
+ var message = {
+ parameters: {
+ oauth_signature_method: signatureMethods[i],
+ oauth_consumer_key: consumerKey,
+ oauth_token: "foo",
+ oauth_token_secret: tokenSecret,
+ oauth_version: "1.0"
+ }
+ };
+
+ // Get request token via Authorization header
+ xhr = oauthRequest("GET", CouchDB.protocol + host + "/_oauth/request_token", message, accessor);
+ T(xhr.status == expectedCode);
+
+ // GET request token via query parameters
+ xhr = oauthRequest("GET", CouchDB.protocol + host + "/_oauth/request_token", message, accessor);
+ T(xhr.status == expectedCode);
+
+ responseMessage = OAuth.decodeForm(xhr.responseText);
+
+ // Obtaining User Authorization
+ //Only needed for 3-legged OAuth
+ //xhr = CouchDB.request("GET", authorization_url + '?oauth_token=' + responseMessage.oauth_token);
+ //T(xhr.status == expectedCode);
+
+ xhr = oauthRequest("GET", CouchDB.protocol + host + "/_session", message, accessor);
+ T(xhr.status == expectedCode);
+ if (xhr.status == expectedCode == 200) {
+ data = JSON.parse(xhr.responseText);
+ T(data.name == "jason");
+ T(data.roles[0] == "test");
+ }
+
+ xhr = oauthRequest("GET", CouchDB.protocol + host + "/_session?foo=bar", message, accessor);
+ T(xhr.status == expectedCode);
+
+ // Test HEAD method
+ xhr = oauthRequest("HEAD", CouchDB.protocol + host + "/_session?foo=bar", message, accessor);
+ T(xhr.status == expectedCode);
+
+ // Replication
+ var dbA = new CouchDB("test_suite_db_a", {
+ "X-Couch-Full-Commit":"false",
+ "Authorization": adminBasicAuthHeaderValue()
+ });
+ T(dbA.save({_id:"_design/"+i+consumerKey}).ok);
+ var result = CouchDB.replicate(dbPair.source, dbPair.target, {
+ headers: {"Authorization": adminBasicAuthHeaderValue()}
+ });
+ T(result.ok);
+
+ // Test auth via admin user defined in .ini
+ var message = {
+ parameters: {
+ oauth_signature_method: signatureMethods[i],
+ oauth_consumer_key: consumerKey,
+ oauth_token: "bar",
+ oauth_token_secret: admintokenSecret,
+ oauth_version: "1.0"
+ }
+ };
+ xhr = oauthRequest("GET", CouchDB.protocol + host + "/_session?foo=bar", message, adminAccessor);
+ if (xhr.status == expectedCode == 200) {
+ data = JSON.parse(xhr.responseText);
+ T(data.name == "testadmin");
+ T(data.roles[0] == "_admin");
+ }
+
+ // Test when the user's token doesn't exist.
+ message.parameters.oauth_token = "not a token!";
+ xhr = oauthRequest("GET", CouchDB.protocol + host + "/_session?foo=bar",
+ message, adminAccessor);
+ T(xhr.status == 400, "Request should be invalid.");
+ }
+ }
+ } finally {
+ var xhr = CouchDB.request("PUT", CouchDB.protocol + host + "/_config/couch_httpd_auth/require_valid_user", {
+ headers: {
+ "Authorization": adminBasicAuthHeaderValue(),
+ "X-Couch-Persist": "false"
+ },
+ body: JSON.stringify("false")
+ });
+ T(xhr.status == 200);
+
+ var xhr = CouchDB.request("DELETE", CouchDB.protocol + host + "/_config/admins/testadmin", {
+ headers: {
+ "Authorization": adminBasicAuthHeaderValue(),
+ "X-Couch-Persist": "false"
+ }
+ });
+ T(xhr.status == 200);
+ }
+ };
+
+ run_on_modified_server(
+ [
+ {section: "httpd",
+ key: "WWW-Authenticate", value: 'OAuth'},
+ {section: "couch_httpd_auth",
+ key: "secret", value: generateSecret(64)},
+ {section: "couch_httpd_auth",
+ key: "authentication_db", value: "test_suite_users"},
+ {section: "oauth_consumer_secrets",
+ key: "key", value: consumerSecret},
+ {section: "oauth_token_users",
+ key: "foo", value: "jason"},
+ {section: "oauth_token_users",
+ key: "bar", value: "testadmin"},
+ {section: "oauth_token_secrets",
+ key: "foo", value: tokenSecret},
+ {section: "oauth_token_secrets",
+ key: "bar", value: admintokenSecret},
+ {section: "couch_httpd_oauth",
+ key: "authorization_url", value: authorization_url}
+ ],
+ testFun
+ );
+};
diff --git a/1.1.x/share/www/script/test/proxyauth.js b/1.1.x/share/www/script/test/proxyauth.js
new file mode 100644
index 00000000..40af0089
--- /dev/null
+++ b/1.1.x/share/www/script/test/proxyauth.js
@@ -0,0 +1,130 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+
+
+couchTests.proxyauth = function(debug) {
+ // this test proxy authentification handler
+
+ var usersDb = new CouchDB("test_suite_users", {"X-Couch-Full-Commit":"false"});
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+
+ if (debug) debugger;
+
+ // Simple secret key generator
+ function generateSecret(length) {
+ var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ var secret = '';
+ for (var i=0; i<length; i++) {
+ secret += tab.charAt(Math.floor(Math.random() * 64));
+ }
+ return secret;
+ }
+
+ var secret = generateSecret(64);
+
+ function TestFun() {
+ usersDb.deleteDb();
+ usersDb.createDb();
+ db.deleteDb();
+ db.createDb();
+
+ var benoitcUserDoc = CouchDB.prepareUserDoc({
+ name: "benoitc@apache.org"
+ }, "test");
+ T(usersDb.save(benoitcUserDoc).ok);
+
+ T(CouchDB.session().userCtx.name == null);
+
+ // test that you can use basic auth aginst the users db
+ var s = CouchDB.session({
+ headers : {
+ "Authorization" : "Basic YmVub2l0Y0BhcGFjaGUub3JnOnRlc3Q="
+ }
+ });
+ T(s.userCtx.name == "benoitc@apache.org");
+ T(s.info.authenticated == "default");
+
+ CouchDB.logout();
+
+ var headers = {
+ "X-Auth-CouchDB-UserName": "benoitc@apache.org",
+ "X-Auth-CouchDB-Roles": "test",
+ "X-Auth-CouchDB-Token": hex_hmac_sha1(secret, "benoitc@apache.org")
+ };
+
+ var designDoc = {
+ _id:"_design/test",
+ language: "javascript",
+
+ shows: {
+ "welcome": stringFun(function(doc,req) {
+ return "Welcome " + req.userCtx["name"];
+ }),
+ "role": stringFun(function(doc, req) {
+ return req.userCtx['roles'][0];
+ })
+ }
+ };
+
+ db.save(designDoc);
+
+ var req = CouchDB.request("GET", "/test_suite_db/_design/test/_show/welcome",
+ {headers: headers});
+ T(req.responseText == "Welcome benoitc@apache.org");
+
+ req = CouchDB.request("GET", "/test_suite_db/_design/test/_show/role",
+ {headers: headers});
+ T(req.responseText == "test");
+
+ var xhr = CouchDB.request("PUT", "/_config/couch_httpd_auth/proxy_use_secret",{
+ body : JSON.stringify("true"),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ T(xhr.status == 200);
+
+ req = CouchDB.request("GET", "/test_suite_db/_design/test/_show/welcome",
+ {headers: headers});
+ T(req.responseText == "Welcome benoitc@apache.org");
+
+ req = CouchDB.request("GET", "/test_suite_db/_design/test/_show/role",
+ {headers: headers});
+ T(req.responseText == "test");
+
+ }
+
+ run_on_modified_server(
+ [{section: "httpd",
+ key: "authentication_handlers",
+ value:"{couch_httpd_auth, proxy_authentification_handler}, {couch_httpd_auth, default_authentication_handler}"},
+ {section: "couch_httpd_auth",
+ key: "authentication_db",
+ value: "test_suite_users"},
+ {section: "couch_httpd_auth",
+ key: "secret",
+ value: secret},
+ {section: "couch_httpd_auth",
+ key: "x_auth_username",
+ value: "X-Auth-CouchDB-UserName"},
+ {section: "couch_httpd_auth",
+ key: "x_auth_roles",
+ value: "X-Auth-CouchDB-Roles"},
+ {section: "couch_httpd_auth",
+ key: "x_auth_token",
+ value: "X-Auth-CouchDB-Token"},
+ {section: "couch_httpd_auth",
+ key: "proxy_use_secret",
+ value: "false"}],
+ TestFun
+ );
+
+}; \ No newline at end of file
diff --git a/1.1.x/share/www/script/test/purge.js b/1.1.x/share/www/script/test/purge.js
new file mode 100644
index 00000000..29689137
--- /dev/null
+++ b/1.1.x/share/www/script/test/purge.js
@@ -0,0 +1,145 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.purge = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ /*
+ purge is not to be confused with a document deletion. It removes the
+ document and all edit history from the local instance of the database.
+ */
+
+ var numDocs = 10;
+
+ var designDoc = {
+ _id:"_design/test",
+ language: "javascript",
+ views: {
+ all_docs_twice: {map: "function(doc) { emit(doc.integer, null); emit(doc.integer, null) }"},
+ single_doc: {map: "function(doc) { if (doc._id == \"1\") { emit(1, null) }}"}
+ }
+ };
+
+ T(db.save(designDoc).ok);
+
+ db.bulkSave(makeDocs(1, numDocs + 1));
+
+ // go ahead and validate the views before purging
+ var rows = db.view("test/all_docs_twice").rows;
+ for (var i = 0; i < numDocs; i++) {
+ T(rows[2*i].key == i+1);
+ T(rows[(2*i)+1].key == i+1);
+ }
+ T(db.view("test/single_doc").total_rows == 1);
+
+ var info = db.info();
+ var doc1 = db.open("1");
+ var doc2 = db.open("2");
+
+ // purge the documents
+ var xhr = CouchDB.request("POST", "/test_suite_db/_purge", {
+ body: JSON.stringify({"1":[doc1._rev], "2":[doc2._rev]})
+ });
+ T(xhr.status == 200);
+
+ var result = JSON.parse(xhr.responseText);
+ var newInfo = db.info();
+
+ // purging increments the update sequence
+ T(info.update_seq+1 == newInfo.update_seq);
+ // and it increments the purge_seq
+ T(info.purge_seq+1 == newInfo.purge_seq);
+ T(result.purge_seq == newInfo.purge_seq);
+
+ T(result.purged["1"][0] == doc1._rev);
+ T(result.purged["2"][0] == doc2._rev);
+
+ T(db.open("1") == null);
+ T(db.open("2") == null);
+
+ var rows = db.view("test/all_docs_twice").rows;
+ for (var i = 2; i < numDocs; i++) {
+ T(rows[2*(i-2)].key == i+1);
+ T(rows[(2*(i-2))+1].key == i+1);
+ }
+ T(db.view("test/single_doc").total_rows == 0);
+
+ // purge sequences are preserved after compaction (COUCHDB-1021)
+ T(db.compact().ok);
+ T(db.last_req.status == 202);
+ // compaction isn't instantaneous, loop until done
+ while (db.info().compact_running) {};
+ var compactInfo = db.info();
+ T(compactInfo.purge_seq == newInfo.purge_seq);
+
+ // purge documents twice in a row without loading views
+ // (causes full view rebuilds)
+
+ var doc3 = db.open("3");
+ var doc4 = db.open("4");
+
+ xhr = CouchDB.request("POST", "/test_suite_db/_purge", {
+ body: JSON.stringify({"3":[doc3._rev]})
+ });
+
+ T(xhr.status == 200);
+
+ xhr = CouchDB.request("POST", "/test_suite_db/_purge", {
+ body: JSON.stringify({"4":[doc4._rev]})
+ });
+
+ T(xhr.status == 200);
+ result = JSON.parse(xhr.responseText);
+ T(result.purge_seq == db.info().purge_seq);
+
+ var rows = db.view("test/all_docs_twice").rows;
+ for (var i = 4; i < numDocs; i++) {
+ T(rows[2*(i-4)].key == i+1);
+ T(rows[(2*(i-4))+1].key == i+1);
+ }
+ T(db.view("test/single_doc").total_rows == 0);
+
+ // COUCHDB-1065
+ var dbA = new CouchDB("test_suite_db_a");
+ var dbB = new CouchDB("test_suite_db_b");
+ dbA.deleteDb();
+ dbA.createDb();
+ dbB.deleteDb();
+ dbB.createDb();
+ var docA = {_id:"test", a:1};
+ var docB = {_id:"test", a:2};
+ dbA.save(docA);
+ dbB.save(docB);
+ CouchDB.replicate(dbA.name, dbB.name);
+ var xhr = CouchDB.request("POST", "/" + dbB.name + "/_purge", {
+ body: JSON.stringify({"test":[docA._rev]})
+ });
+ TEquals(200, xhr.status, "single rev purge after replication succeeds");
+
+ var xhr = CouchDB.request("GET", "/" + dbB.name + "/test?rev=" + docA._rev);
+ TEquals(404, xhr.status, "single rev purge removes revision");
+
+ var xhr = CouchDB.request("POST", "/" + dbB.name + "/_purge", {
+ body: JSON.stringify({"test":[docB._rev]})
+ });
+ TEquals(200, xhr.status, "single rev purge after replication succeeds");
+ var xhr = CouchDB.request("GET", "/" + dbB.name + "/test?rev=" + docB._rev);
+ TEquals(404, xhr.status, "single rev purge removes revision");
+
+ var xhr = CouchDB.request("POST", "/" + dbB.name + "/_purge", {
+ body: JSON.stringify({"test":[docA._rev, docB._rev]})
+ });
+ TEquals(200, xhr.status, "all rev purge after replication succeeds");
+};
diff --git a/1.1.x/share/www/script/test/reader_acl.js b/1.1.x/share/www/script/test/reader_acl.js
new file mode 100644
index 00000000..cc249ea4
--- /dev/null
+++ b/1.1.x/share/www/script/test/reader_acl.js
@@ -0,0 +1,198 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.reader_acl = function(debug) {
+ // this tests read access control
+
+ var usersDb = new CouchDB("test_suite_users", {"X-Couch-Full-Commit":"false"});
+ var secretDb = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ function testFun() {
+ try {
+ usersDb.deleteDb();
+ usersDb.createDb();
+ secretDb.deleteDb();
+ secretDb.createDb();
+
+ // create a user with top-secret-clearance
+ var jchrisUserDoc = CouchDB.prepareUserDoc({
+ name: "jchris@apache.org",
+ roles : ["top-secret"]
+ }, "funnybone");
+ T(usersDb.save(jchrisUserDoc).ok);
+ usersDb.ensureFullCommit();
+
+ T(CouchDB.session().userCtx.name == null);
+
+ // set secret db to be read controlled
+ T(secretDb.save({_id:"baz",foo:"bar"}).ok);
+ T(secretDb.open("baz").foo == "bar");
+
+ T(secretDb.setSecObj({
+ "readers" : {
+ roles : ["super-secret-club"],
+ names : ["joe","barb"]
+ }
+ }).ok);
+ } finally {
+ CouchDB.logout();
+ }
+ }
+
+ // split into 2 funs so we can test restart behavior
+ function testFun2() {
+ try {
+ // can't read it as jchris b/c he's missing the needed role
+ T(CouchDB.login("jchris@apache.org", "funnybone").ok);
+ T(CouchDB.session().userCtx.name == "jchris@apache.org");
+
+ try {
+ secretDb.open("baz");
+ T(false && "can't open a doc from a secret db") ;
+ } catch(e) {
+ T(true)
+ }
+
+ CouchDB.logout();
+
+ // make anyone with the top-secret role an admin
+ // db admins are automatically readers
+ T(secretDb.setSecObj({
+ "admins" : {
+ roles : ["top-secret"],
+ names : []
+ },
+ "readers" : {
+ roles : ["super-secret-club"],
+ names : ["joe","barb"]
+ }
+ }).ok);
+
+
+ T(CouchDB.login("jchris@apache.org", "funnybone").ok);
+
+ // db admin can read
+ T(secretDb.open("baz").foo == "bar");
+
+ // and run temp views
+ TEquals(secretDb.query(function(doc) {
+ emit(null, null)
+ }).total_rows, 1);
+
+ CouchDB.logout();
+ T(CouchDB.session().userCtx.roles.indexOf("_admin") != -1);
+
+ // admin now adds the top-secret role to the db's readers
+ // and removes db-admins
+ T(secretDb.setSecObj({
+ "admins" : {
+ roles : [],
+ names : []
+ },
+ "readers" : {
+ roles : ["super-secret-club", "top-secret"],
+ names : ["joe","barb"]
+ }
+ }).ok);
+
+ // server _admin can always read
+ T(secretDb.open("baz").foo == "bar");
+
+ // and run temp views
+ TEquals(secretDb.query(function(doc) {
+ emit(null, null)
+ }).total_rows, 1);
+
+ T(secretDb.save({
+ "_id" : "_design/foo",
+ views : {
+ bar : {
+ map : "function(doc){emit(null, null)}"
+ }
+ }
+ }).ok)
+
+ // now top-secret users can read too
+ T(CouchDB.login("jchris@apache.org", "funnybone").ok);
+ T(CouchDB.session().userCtx.roles.indexOf("_admin") == -1);
+ T(secretDb.open("baz").foo == "bar");
+ // readers can query stored views
+ T(secretDb.view("foo/bar").total_rows == 1);
+
+ // readers can't do temp views
+ try {
+ var results = secretDb.query(function(doc) {
+ emit(null, null);
+ });
+ T(false && "temp view should be admin only");
+ } catch (e) {
+ T(true && "temp view is admin only");
+ }
+
+
+ CouchDB.logout();
+
+ // can't set non string reader names or roles
+ try {
+ secretDb.setSecObj({
+ "readers" : {
+ roles : ["super-secret-club", {"top-secret":"awesome"}],
+ names : ["joe","barb"]
+ }
+ })
+ T(false && "only string roles");
+ } catch (e) {}
+
+ try {
+ secretDb.setSecObj({
+ "readers" : {
+ roles : ["super-secret-club", {"top-secret":"awesome"}],
+ names : ["joe",22]
+ }
+ });
+ T(false && "only string names");
+ } catch (e) {}
+
+ try {
+ secretDb.setSecObj({
+ "readers" : {
+ roles : ["super-secret-club", {"top-secret":"awesome"}],
+ names : "joe"
+ }
+ });
+ T(false && "only lists of names");
+ } catch (e) {}
+ } finally {
+ CouchDB.logout();
+ }
+ };
+
+ run_on_modified_server(
+ [{section: "httpd",
+ key: "authentication_handlers",
+ value: "{couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}"},
+ {section: "couch_httpd_auth",
+ key: "authentication_db", value: "test_suite_users"}],
+ testFun
+ );
+
+ // security changes will always commit synchronously
+ restartServer();
+
+ run_on_modified_server(
+ [{section: "httpd",
+ key: "authentication_handlers",
+ value: "{couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}"},
+ {section: "couch_httpd_auth",
+ key: "authentication_db", value: "test_suite_users"}],
+ testFun2
+ );
+}
diff --git a/1.1.x/share/www/script/test/recreate_doc.js b/1.1.x/share/www/script/test/recreate_doc.js
new file mode 100644
index 00000000..05843558
--- /dev/null
+++ b/1.1.x/share/www/script/test/recreate_doc.js
@@ -0,0 +1,80 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.recreate_doc = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // First create a new document with the ID "foo", and delete it again
+ var doc = {_id: "foo", a: "bar", b: 42};
+ var result = db.save(doc);
+ T(result.ok);
+ var firstRev = result.rev;
+ T(db.deleteDoc(doc).ok);
+
+ // Now create a new document with the same ID, save it, and then modify it
+ for (var i = 0; i < 10; i++) {
+ doc = {_id: "foo"};
+ T(db.save(doc).ok);
+ doc = db.open("foo");
+ doc.a = "baz";
+ T(db.save(doc).ok);
+ T(db.deleteDoc(doc).rev != undefined);
+ }
+
+ try {
+ // COUCHDB-292 now attempt to save the document with a prev that's since
+ // been deleted and this should generate a conflict exception
+ db.save({_id:"foo", _rev:firstRev, bar:1});
+ T("no save conflict 1" && false); // we shouldn't hit here
+ } catch (e) {
+ T(e.error == "conflict");
+ }
+
+ var binAttDoc = {
+ _id: "foo",
+ _rev:firstRev,
+ _attachments:{
+ "foo.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ };
+ try {
+ // same as before, but with binary
+ db.save(binAttDoc);
+ T("no save conflict 2" && false); // we shouldn't hit here
+ } catch (e) {
+ T(e.error == "conflict");
+ }
+
+
+ try {
+ // random non-existant prev rev
+ db.save({_id:"foo", _rev:"1-asfafasdf", bar:1});
+ T("no save conflict 3" && false); // we shouldn't hit here
+ } catch (e) {
+ T(e.error == "conflict");
+ }
+
+ try {
+ // random non-existant prev rev with bin
+ binAttDoc._rev = "1-aasasfasdf";
+ db.save(binAttDoc);
+ T("no save conflict 4" && false); // we shouldn't hit here
+ } catch (e) {
+ T(e.error == "conflict");
+ }
+};
diff --git a/1.1.x/share/www/script/test/reduce.js b/1.1.x/share/www/script/test/reduce.js
new file mode 100644
index 00000000..16c7a7bf
--- /dev/null
+++ b/1.1.x/share/www/script/test/reduce.js
@@ -0,0 +1,185 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.reduce = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+ var numDocs = 500;
+ var docs = makeDocs(1,numDocs + 1);
+ db.bulkSave(docs);
+ var summate = function(N) {return (N+1)*N/2;};
+
+ var map = function (doc) {
+ emit(doc.integer, doc.integer);
+ emit(doc.integer, doc.integer);
+ };
+ var reduce = function (keys, values) { return sum(values); };
+ var result = db.query(map, reduce);
+ T(result.rows[0].value == 2*summate(numDocs));
+
+ result = db.query(map, reduce, {startkey: 4, endkey: 4});
+ T(result.rows[0].value == 8);
+
+ result = db.query(map, reduce, {startkey: 4, endkey: 5});
+ T(result.rows[0].value == 18);
+
+ result = db.query(map, reduce, {startkey: 4, endkey: 6});
+ T(result.rows[0].value == 30);
+
+ result = db.query(map, reduce, {group:true, limit:3});
+ T(result.rows[0].value == 2);
+ T(result.rows[1].value == 4);
+ T(result.rows[2].value == 6);
+
+ for(var i=1; i<numDocs/2; i+=30) {
+ result = db.query(map, reduce, {startkey: i, endkey: numDocs - i});
+ T(result.rows[0].value == 2*(summate(numDocs-i) - summate(i-1)));
+ }
+
+ db.deleteDb();
+ db.createDb();
+
+ for(var i=1; i <= 5; i++) {
+
+ for(var j=0; j < 10; j++) {
+ // these docs are in the order of the keys collation, for clarity
+ var docs = [];
+ docs.push({keys:["a"]});
+ docs.push({keys:["a"]});
+ docs.push({keys:["a", "b"]});
+ docs.push({keys:["a", "b"]});
+ docs.push({keys:["a", "b", "c"]});
+ docs.push({keys:["a", "b", "d"]});
+ docs.push({keys:["a", "c", "d"]});
+ docs.push({keys:["d"]});
+ docs.push({keys:["d", "a"]});
+ docs.push({keys:["d", "b"]});
+ docs.push({keys:["d", "c"]});
+ db.bulkSave(docs);
+ T(db.info().doc_count == ((i - 1) * 10 * 11) + ((j + 1) * 11));
+ }
+
+ map = function (doc) { emit(doc.keys, 1); };
+ reduce = function (keys, values) { return sum(values); };
+
+ var results = db.query(map, reduce, {group:true});
+
+ //group by exact key match
+ T(equals(results.rows[0], {key:["a"],value:20*i}));
+ T(equals(results.rows[1], {key:["a","b"],value:20*i}));
+ T(equals(results.rows[2], {key:["a", "b", "c"],value:10*i}));
+ T(equals(results.rows[3], {key:["a", "b", "d"],value:10*i}));
+
+ // test to make sure group reduce and limit params provide valid json
+ var results = db.query(map, reduce, {group: true, limit: 2});
+ T(equals(results.rows[0], {key: ["a"], value: 20*i}));
+ T(equals(results.rows.length, 2));
+
+ //group by the first element in the key array
+ var results = db.query(map, reduce, {group_level:1});
+ T(equals(results.rows[0], {key:["a"],value:70*i}));
+ T(equals(results.rows[1], {key:["d"],value:40*i}));
+
+ //group by the first 2 elements in the key array
+ var results = db.query(map, reduce, {group_level:2});
+ T(equals(results.rows[0], {key:["a"],value:20*i}));
+ T(equals(results.rows[1], {key:["a","b"],value:40*i}));
+ T(equals(results.rows[2], {key:["a","c"],value:10*i}));
+ T(equals(results.rows[3], {key:["d"],value:10*i}));
+ T(equals(results.rows[4], {key:["d","a"],value:10*i}));
+ T(equals(results.rows[5], {key:["d","b"],value:10*i}));
+ T(equals(results.rows[6], {key:["d","c"],value:10*i}));
+
+ // endkey test with inclusive_end=true
+ var results = db.query(map, reduce, {group_level:2,endkey:["d"],inclusive_end:true});
+ T(equals(results.rows[0], {key:["a"],value:20*i}));
+ T(equals(results.rows[1], {key:["a","b"],value:40*i}));
+ T(equals(results.rows[2], {key:["a","c"],value:10*i}));
+ T(equals(results.rows[3], {key:["d"],value:10*i}));
+ TEquals(4, results.rows.length);
+
+ // endkey test with inclusive_end=false
+ var results = db.query(map, reduce, {group_level:2,endkey:["d"],inclusive_end:false});
+ T(equals(results.rows[0], {key:["a"],value:20*i}));
+ T(equals(results.rows[1], {key:["a","b"],value:40*i}));
+ T(equals(results.rows[2], {key:["a","c"],value:10*i}));
+ TEquals(3, results.rows.length);
+ }
+
+ // now test out more complex reductions that need to use the combine option.
+
+ db.deleteDb();
+ db.createDb();
+
+
+ var map = function (doc) { emit(doc.val, doc.val); };
+ var reduceCombine = function (keys, values, rereduce) {
+ // This computes the standard deviation of the mapped results
+ var stdDeviation=0.0;
+ var count=0;
+ var total=0.0;
+ var sqrTotal=0.0;
+
+ if (!rereduce) {
+ // This is the reduce phase, we are reducing over emitted values from
+ // the map functions.
+ for(var i in values) {
+ total = total + values[i];
+ sqrTotal = sqrTotal + (values[i] * values[i]);
+ }
+ count = values.length;
+ }
+ else {
+ // This is the rereduce phase, we are re-reducing previosuly
+ // reduced values.
+ for(var i in values) {
+ count = count + values[i].count;
+ total = total + values[i].total;
+ sqrTotal = sqrTotal + values[i].sqrTotal;
+ }
+ }
+
+ var variance = (sqrTotal - ((total * total)/count)) / count;
+ stdDeviation = Math.sqrt(variance);
+
+ // the reduce result. It contains enough information to be rereduced
+ // with other reduce results.
+ return {"stdDeviation":stdDeviation,"count":count,
+ "total":total,"sqrTotal":sqrTotal};
+ };
+
+ // Save a bunch a docs.
+
+ for(var i=0; i < 10; i++) {
+ var docs = [];
+ docs.push({val:10});
+ docs.push({val:20});
+ docs.push({val:30});
+ docs.push({val:40});
+ docs.push({val:50});
+ docs.push({val:60});
+ docs.push({val:70});
+ docs.push({val:80});
+ docs.push({val:90});
+ docs.push({val:100});
+ db.bulkSave(docs);
+ }
+
+ var results = db.query(map, reduceCombine);
+
+ var difference = results.rows[0].value.stdDeviation - 28.722813232690143;
+ // account for floating point rounding error
+ T(Math.abs(difference) < 0.0000000001);
+
+};
diff --git a/1.1.x/share/www/script/test/reduce_builtin.js b/1.1.x/share/www/script/test/reduce_builtin.js
new file mode 100644
index 00000000..b3cc3cc7
--- /dev/null
+++ b/1.1.x/share/www/script/test/reduce_builtin.js
@@ -0,0 +1,179 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.reduce_builtin = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var numDocs = 500;
+ var docs = makeDocs(1,numDocs + 1);
+ db.bulkSave(docs);
+
+ var summate = function(N) {return (N+1)*N/2;};
+
+ var sumsqr = function(N) {
+ var acc = 0;
+ for (var i=1; i<=N; ++i) {
+ acc += i*i;
+ }
+ return acc;
+ };
+
+ // this is the same test as the reduce.js test
+ // only we'll let CouchDB run reduce in Erlang
+ var map = function (doc) {
+ emit(doc.integer, doc.integer);
+ emit(doc.integer, doc.integer);
+ };
+
+ var result = db.query(map, "_sum");
+ T(result.rows[0].value == 2*summate(numDocs));
+ result = db.query(map, "_count");
+ T(result.rows[0].value == 1000);
+ result = db.query(map, "_stats");
+ T(result.rows[0].value.sum == 2*summate(numDocs));
+ T(result.rows[0].value.count == 1000);
+ T(result.rows[0].value.min == 1);
+ T(result.rows[0].value.max == 500);
+ T(result.rows[0].value.sumsqr == 2*sumsqr(numDocs));
+
+ result = db.query(map, "_sum", {startkey: 4, endkey: 4});
+ T(result.rows[0].value == 8);
+ result = db.query(map, "_count", {startkey: 4, endkey: 4});
+ T(result.rows[0].value == 2);
+
+ result = db.query(map, "_sum", {startkey: 4, endkey: 5});
+ T(result.rows[0].value == 18);
+ result = db.query(map, "_count", {startkey: 4, endkey: 5});
+ T(result.rows[0].value == 4);
+
+ result = db.query(map, "_sum", {startkey: 4, endkey: 6});
+ T(result.rows[0].value == 30);
+ result = db.query(map, "_count", {startkey: 4, endkey: 6});
+ T(result.rows[0].value == 6);
+
+ result = db.query(map, "_sum", {group:true, limit:3});
+ T(result.rows[0].value == 2);
+ T(result.rows[1].value == 4);
+ T(result.rows[2].value == 6);
+
+ for(var i=1; i<numDocs/2; i+=30) {
+ result = db.query(map, "_sum", {startkey: i, endkey: numDocs - i});
+ T(result.rows[0].value == 2*(summate(numDocs-i) - summate(i-1)));
+ }
+
+ // test for trailing characters after builtin functions, desired behaviour
+ // is to disregard any trailing characters
+ // I think the behavior should be a prefix test, so that even "_statsorama"
+ // or "_stats\nare\awesome" should work just as "_stats" does. - JChris
+
+ var trailing = ["\u000a", "orama", "\nare\nawesome", " ", " \n "];
+
+ for(var i=0; i < trailing.length; i++) {
+ result = db.query(map, "_sum" + trailing[i]);
+ T(result.rows[0].value == 2*summate(numDocs));
+ result = db.query(map, "_count" + trailing[i]);
+ T(result.rows[0].value == 1000);
+ result = db.query(map, "_stats" + trailing[i]);
+ T(result.rows[0].value.sum == 2*summate(numDocs));
+ T(result.rows[0].value.count == 1000);
+ T(result.rows[0].value.min == 1);
+ T(result.rows[0].value.max == 500);
+ T(result.rows[0].value.sumsqr == 2*sumsqr(numDocs));
+ }
+
+ db.deleteDb();
+ db.createDb();
+
+ for(var i=1; i <= 5; i++) {
+
+ for(var j=0; j < 10; j++) {
+ // these docs are in the order of the keys collation, for clarity
+ var docs = [];
+ docs.push({keys:["a"]});
+ docs.push({keys:["a"]});
+ docs.push({keys:["a", "b"]});
+ docs.push({keys:["a", "b"]});
+ docs.push({keys:["a", "b", "c"]});
+ docs.push({keys:["a", "b", "d"]});
+ docs.push({keys:["a", "c", "d"]});
+ docs.push({keys:["d"]});
+ docs.push({keys:["d", "a"]});
+ docs.push({keys:["d", "b"]});
+ docs.push({keys:["d", "c"]});
+ db.bulkSave(docs);
+ T(db.info().doc_count == ((i - 1) * 10 * 11) + ((j + 1) * 11));
+ }
+
+ map = function (doc) { emit(doc.keys, 1); };
+ // with emitted values being 1, count should be the same as sum
+ var builtins = ["_sum", "_count"];
+
+ for (var b=0; b < builtins.length; b++) {
+ var fun = builtins[b];
+ var results = db.query(map, fun, {group:true});
+
+ //group by exact key match
+ T(equals(results.rows[0], {key:["a"],value:20*i}));
+ T(equals(results.rows[1], {key:["a","b"],value:20*i}));
+ T(equals(results.rows[2], {key:["a", "b", "c"],value:10*i}));
+ T(equals(results.rows[3], {key:["a", "b", "d"],value:10*i}));
+
+ // test to make sure group reduce and limit params provide valid json
+ var results = db.query(map, fun, {group: true, limit: 2});
+ T(equals(results.rows[0], {key: ["a"], value: 20*i}));
+ T(equals(results.rows.length, 2));
+
+ //group by the first element in the key array
+ var results = db.query(map, fun, {group_level:1});
+ T(equals(results.rows[0], {key:["a"],value:70*i}));
+ T(equals(results.rows[1], {key:["d"],value:40*i}));
+
+ //group by the first 2 elements in the key array
+ var results = db.query(map, fun, {group_level:2});
+ T(equals(results.rows[0], {key:["a"],value:20*i}));
+ T(equals(results.rows[1], {key:["a","b"],value:40*i}));
+ T(equals(results.rows[2], {key:["a","c"],value:10*i}));
+ T(equals(results.rows[3], {key:["d"],value:10*i}));
+ T(equals(results.rows[4], {key:["d","a"],value:10*i}));
+ T(equals(results.rows[5], {key:["d","b"],value:10*i}));
+ T(equals(results.rows[6], {key:["d","c"],value:10*i}));
+ };
+
+ map = function (doc) { emit(doc.keys, [1, 1]); };
+
+ var results = db.query(map, "_sum", {group:true});
+ T(equals(results.rows[0], {key:["a"],value:[20*i,20*i]}));
+ T(equals(results.rows[1], {key:["a","b"],value:[20*i,20*i]}));
+ T(equals(results.rows[2], {key:["a", "b", "c"],value:[10*i,10*i]}));
+ T(equals(results.rows[3], {key:["a", "b", "d"],value:[10*i,10*i]}));
+
+ var results = db.query(map, "_sum", {group: true, limit: 2});
+ T(equals(results.rows[0], {key: ["a"], value: [20*i,20*i]}));
+ T(equals(results.rows.length, 2));
+
+ var results = db.query(map, "_sum", {group_level:1});
+ T(equals(results.rows[0], {key:["a"],value:[70*i,70*i]}));
+ T(equals(results.rows[1], {key:["d"],value:[40*i,40*i]}));
+
+ var results = db.query(map, "_sum", {group_level:2});
+ T(equals(results.rows[0], {key:["a"],value:[20*i,20*i]}));
+ T(equals(results.rows[1], {key:["a","b"],value:[40*i,40*i]}));
+ T(equals(results.rows[2], {key:["a","c"],value:[10*i,10*i]}));
+ T(equals(results.rows[3], {key:["d"],value:[10*i,10*i]}));
+ T(equals(results.rows[4], {key:["d","a"],value:[10*i,10*i]}));
+ T(equals(results.rows[5], {key:["d","b"],value:[10*i,10*i]}));
+ T(equals(results.rows[6], {key:["d","c"],value:[10*i,10*i]}));
+ }
+}
diff --git a/1.1.x/share/www/script/test/reduce_false.js b/1.1.x/share/www/script/test/reduce_false.js
new file mode 100644
index 00000000..699b258f
--- /dev/null
+++ b/1.1.x/share/www/script/test/reduce_false.js
@@ -0,0 +1,44 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.reduce_false = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var numDocs = 5;
+ var docs = makeDocs(1,numDocs + 1);
+ db.bulkSave(docs);
+ var summate = function(N) {return (N+1)*N/2;};
+
+ var designDoc = {
+ _id:"_design/test",
+ language: "javascript",
+ views: {
+ summate: {map:"function (doc) { emit(doc.integer, doc.integer); }",
+ reduce:"function (keys, values) { return sum(values); }"},
+ }
+ };
+ T(db.save(designDoc).ok);
+
+ // Test that the reduce works
+ var res = db.view('test/summate');
+ T(res.rows.length == 1 && res.rows[0].value == summate(5));
+
+ //Test that we get our docs back
+ res = db.view('test/summate', {reduce: false});
+ T(res.rows.length == 5);
+ for(var i=0; i<5; i++) {
+ T(res.rows[i].value == i+1);
+ }
+};
diff --git a/1.1.x/share/www/script/test/reduce_false_temp.js b/1.1.x/share/www/script/test/reduce_false_temp.js
new file mode 100644
index 00000000..d45f05b2
--- /dev/null
+++ b/1.1.x/share/www/script/test/reduce_false_temp.js
@@ -0,0 +1,37 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.reduce_false_temp = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var numDocs = 5;
+ var docs = makeDocs(1,numDocs + 1);
+ db.bulkSave(docs);
+ var summate = function(N) {return (N+1)*N/2;};
+
+ var mapFun = "function (doc) { emit(doc.integer, doc.integer); }";
+ var reduceFun = "function (keys, values) { return sum(values); }";
+
+ // Test that the reduce works
+ var res = db.query(mapFun, reduceFun);
+ T(res.rows.length == 1 && res.rows[0].value == summate(5));
+
+ //Test that we get our docs back
+ res = db.query(mapFun, reduceFun, {reduce: false});
+ T(res.rows.length == 5);
+ for(var i=0; i<5; i++) {
+ T(res.rows[i].value == i+1);
+ }
+};
diff --git a/1.1.x/share/www/script/test/replication.js b/1.1.x/share/www/script/test/replication.js
new file mode 100644
index 00000000..7f92891e
--- /dev/null
+++ b/1.1.x/share/www/script/test/replication.js
@@ -0,0 +1,792 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.replication = function(debug) {
+ if (debug) debugger;
+ var host = CouchDB.host;
+ var dbPairs = [
+ {source:"test_suite_db_a",
+ target:"test_suite_db_b"},
+ {source:"test_suite_db_a",
+ target:CouchDB.protocol + host + "/test_suite_db_b"},
+ {source:CouchDB.protocol + host + "/test_suite_db_a",
+ target:"test_suite_db_b"},
+ {source:CouchDB.protocol + host + "/test_suite_db_a",
+ target:CouchDB.protocol + host + "/test_suite_db_b"}
+ ];
+ var dbA = new CouchDB("test_suite_db_a", {"X-Couch-Full-Commit":"false"});
+ var dbB = new CouchDB("test_suite_db_b", {"X-Couch-Full-Commit":"false"});
+ var numDocs = 10;
+ var xhr;
+ for (var testPair = 0; testPair < dbPairs.length; testPair++) {
+ var A = dbPairs[testPair].source;
+ var B = dbPairs[testPair].target;
+
+ dbA.deleteDb();
+ dbA.createDb();
+ dbB.deleteDb();
+ dbB.createDb();
+
+ var repTests = {
+ // copy and paste and put your code in. delete unused steps.
+ test_template: new function () {
+ this.init = function(dbA, dbB) {
+ // before anything has happened
+ };
+ this.afterAB1 = function(dbA, dbB) {
+ // called after replicating src=A tgt=B first time.
+ };
+ this.afterBA1 = function(dbA, dbB) {
+ // called after replicating src=B tgt=A first time.
+ };
+ this.afterAB2 = function(dbA, dbB) {
+ // called after replicating src=A tgt=B second time.
+ };
+ this.afterBA2 = function(dbA, dbB) {
+ // etc...
+ };
+ },
+
+ simple_test: new function () {
+ this.init = function(dbA, dbB) {
+ var docs = makeDocs(0, numDocs);
+ dbA.bulkSave(docs);
+ };
+
+ this.afterAB1 = function(dbA, dbB) {
+ for (var j = 0; j < numDocs; j++) {
+ var docA = dbA.open("" + j);
+ var docB = dbB.open("" + j);
+ T(docA._rev == docB._rev);
+ }
+ };
+ },
+
+ deletes_test: new function () {
+ // make sure deletes are replicated
+ this.init = function(dbA, dbB) {
+ T(dbA.save({_id:"foo1",value:"a"}).ok);
+ };
+
+ this.afterAB1 = function(dbA, dbB) {
+ var docA = dbA.open("foo1");
+ var docB = dbB.open("foo1");
+ T(docA._rev == docB._rev);
+
+ dbA.deleteDoc(docA);
+ };
+
+ this.afterAB2 = function(dbA, dbB) {
+ T(dbA.open("foo1") == null);
+ T(dbB.open("foo1") == null);
+ };
+ },
+
+ deleted_test : new function() {
+ // docs created and deleted on a single node are also replicated
+ this.init = function(dbA, dbB) {
+ T(dbA.save({_id:"del1",value:"a"}).ok);
+ var docA = dbA.open("del1");
+ dbA.deleteDoc(docA);
+ };
+
+ this.afterAB1 = function(dbA, dbB) {
+ var rows = dbB.changes().results;
+ var rowCnt = 0;
+ for (var i=0; i < rows.length; i++) {
+ if (rows[i].id == "del1") {
+ rowCnt += 1;
+ T(rows[i].deleted == true);
+ }
+ };
+ T(rowCnt == 1);
+ };
+ },
+
+ slashes_in_ids_test: new function () {
+ // make sure docs with slashes in id replicate properly
+ this.init = function(dbA, dbB) {
+ dbA.save({ _id:"abc/def", val:"one" });
+ };
+
+ this.afterAB1 = function(dbA, dbB) {
+ var docA = dbA.open("abc/def");
+ var docB = dbB.open("abc/def");
+ T(docA._rev == docB._rev);
+ };
+ },
+
+ design_docs_test: new function() {
+ // make sure design docs replicate properly
+ this.init = function(dbA, dbB) {
+ dbA.save({ _id:"_design/test" });
+ };
+
+ this.afterAB1 = function() {
+ var docA = dbA.open("_design/test");
+ var docB = dbB.open("_design/test");
+ T(docA._rev == docB._rev);
+ };
+ },
+
+ attachments_test: new function () {
+ // Test attachments
+ this.init = function(dbA, dbB) {
+ dbA.save({
+ _id:"bin_doc",
+ _attachments:{
+ "foo+bar.txt": {
+ "type":"base64",
+ "data": "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ });
+ // make sure on design docs as well
+ dbA.save({
+ _id:"_design/with_bin",
+ _attachments:{
+ "foo+bar.txt": {
+ "type":"base64",
+ "data": "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ });
+ };
+
+ this.afterAB1 = function(dbA, dbB) {
+ var xhr = CouchDB.request("GET",
+ "/test_suite_db_a/bin_doc/foo%2Bbar.txt");
+ T(xhr.responseText == "This is a base64 encoded text");
+
+ xhr = CouchDB.request("GET",
+ "/test_suite_db_b/bin_doc/foo%2Bbar.txt");
+ T(xhr.responseText == "This is a base64 encoded text");
+
+ // and the design-doc
+ xhr = CouchDB.request("GET",
+ "/test_suite_db_a/_design/with_bin/foo%2Bbar.txt");
+ T(xhr.responseText == "This is a base64 encoded text");
+
+ xhr = CouchDB.request("GET",
+ "/test_suite_db_b/_design/with_bin/foo%2Bbar.txt");
+ T(xhr.responseText == "This is a base64 encoded text");
+ };
+ },
+
+ conflicts_test: new function () {
+ // test conflicts
+ this.init = function(dbA, dbB) {
+ dbA.save({_id:"foo",value:"a"});
+ dbB.save({_id:"foo",value:"b"});
+ };
+
+ this.afterBA1 = function(dbA, dbB) {
+ var docA = dbA.open("foo", {conflicts: true});
+ var docB = dbB.open("foo", {conflicts: true});
+
+ // make sure the same rev is in each db
+ T(docA._rev === docB._rev);
+
+ // make sure the conflicts are the same in each db
+ T(docA._conflicts[0] === docB._conflicts[0]);
+
+ // delete a conflict.
+ dbA.deleteDoc({_id:"foo", _rev:docA._conflicts[0]});
+ };
+
+ this.afterBA2 = function(dbA, dbB) {
+ // open documents and include the conflict meta data
+ var docA = dbA.open("foo", {conflicts: true, deleted_conflicts: true});
+ var docB = dbB.open("foo", {conflicts: true, deleted_conflicts: true});
+
+ // We should have no conflicts this time
+ T(typeof docA._conflicts === "undefined");
+ T(typeof docB._conflicts === "undefined");
+
+ // They show up as deleted conflicts instead
+ T(docA._deleted_conflicts[0] == docB._deleted_conflicts[0]);
+ };
+ }
+ };
+
+ var test;
+ for(test in repTests) {
+ if(repTests[test].init) {
+ repTests[test].init(dbA, dbB);
+ }
+ }
+
+ var result = CouchDB.replicate(A, B);
+
+ var seqA = result.source_last_seq;
+ T(0 == result.history[0].start_last_seq);
+ T(typeof result.history[1] === "undefined");
+
+ for(test in repTests) {
+ if(repTests[test].afterAB1) repTests[test].afterAB1(dbA, dbB);
+ }
+
+ result = CouchDB.replicate(B, A);
+
+ var seqB = result.source_last_seq;
+ T(0 == result.history[0].start_last_seq);
+ T(typeof result.history[1] === "undefined");
+
+ for(test in repTests) {
+ if(repTests[test].afterBA1) repTests[test].afterBA1(dbA, dbB);
+ }
+
+ var result2 = CouchDB.replicate(A, B);
+
+ // each successful replication produces a new session id
+ T(result2.session_id != result.session_id);
+
+ T(seqA < result2.source_last_seq);
+ T(seqA == result2.history[0].start_last_seq);
+ T(result2.history[1].end_last_seq == seqA);
+
+ seqA = result2.source_last_seq;
+
+ for(test in repTests) {
+ if(repTests[test].afterAB2) repTests[test].afterAB2(dbA, dbB);
+ }
+
+ result = CouchDB.replicate(B, A);
+
+ T(seqB < result.source_last_seq);
+ T(seqB == result.history[0].start_last_seq);
+ T(result.history[1].end_last_seq == seqB);
+
+ seqB = result.source_last_seq;
+
+ for(test in repTests) {
+ if(repTests[test].afterBA2) repTests[test].afterBA2(dbA, dbB);
+ }
+
+ // do an replication where nothing has changed
+ result2 = CouchDB.replicate(B, A);
+ T(result2.no_changes == true);
+ T(result2.session_id == result.session_id);
+ }
+
+ // test optional automatic creation of the target db
+
+ var dbA = new CouchDB("test_suite_db_a", {"X-Couch-Full-Commit":"false"});
+ var dbB = new CouchDB("test_suite_db_b", {"X-Couch-Full-Commit":"false"});
+
+ dbA.deleteDb();
+ dbA.createDb();
+ dbB.deleteDb();
+
+ // local
+ CouchDB.replicate(dbA.name, "test_suite_db_b", {
+ body: {"create_target": true}
+ });
+ TEquals("test_suite_db_b", dbB.info().db_name,
+ "Target database should exist");
+
+ // remote
+ dbB.deleteDb();
+ CouchDB.replicate(dbA.name, CouchDB.protocol + CouchDB.host + "/test_suite_db_b", {
+ body: {"create_target": true}
+ });
+ TEquals("test_suite_db_b", dbB.info().db_name,
+ "Target database should exist");
+
+ // continuous
+ var continuousResult = CouchDB.replicate(dbA.name, "test_suite_db_b", {
+ body: {"continuous": true}
+ });
+ T(continuousResult.ok);
+ T(continuousResult._local_id);
+
+ var cancelResult = CouchDB.replicate(dbA.name, "test_suite_db_b", {
+ body: {"cancel": true}
+ });
+ T(cancelResult.ok);
+ T(continuousResult._local_id == cancelResult._local_id);
+
+ try {
+ var cancelResult2 = CouchDB.replicate(dbA.name, "test_suite_db_b", {
+ body: {"cancel": true}
+ });
+ } catch (e) {
+ T(e.error == "not_found");
+ }
+ // test replication object option doc_ids
+
+ var dbA = new CouchDB("test_suite_rep_docs_db_a", {"X-Couch-Full-Commit":"false"});
+ var dbB = new CouchDB("test_suite_rep_docs_db_b", {"X-Couch-Full-Commit":"false"});
+
+ dbA.deleteDb();
+ dbA.createDb();
+
+ var all_docs = [
+ {
+ _id: "foo1",
+ value: "a"
+ },
+ {
+ _id: "foo2",
+ value: "b"
+ },
+ {
+ _id: "foo3",
+ value: "c"
+ },
+ {
+ _id: "slashed/foo",
+ value: "s"
+ },
+ {
+ _id: "_design/foobar",
+ language: "javascript",
+ value: "I am a design doc",
+ filters: {
+ idfilter: (function(doc, req) {
+ return doc.value == Number(req.filter_value);
+ }).toString()
+ },
+ views: {
+ countview: (function(doc) {
+ emit(doc.value, 1);
+ }).toString()
+ }
+ }
+ ];
+
+ for (var i = 0; i < all_docs.length; i++) {
+ T(dbA.save(all_docs[i]).ok);
+ }
+
+ var dbPairs = [
+ {source:"test_suite_rep_docs_db_a",
+ target:"test_suite_rep_docs_db_b"},
+ {source:"test_suite_rep_docs_db_a",
+ target:CouchDB.protocol + host + "/test_suite_rep_docs_db_b"},
+ {source:CouchDB.protocol + host + "/test_suite_rep_docs_db_a",
+ target:"test_suite_rep_docs_db_b"},
+ {source:CouchDB.protocol + host + "/test_suite_rep_docs_db_a",
+ target:CouchDB.protocol + host + "/test_suite_rep_docs_db_b"}
+ ];
+
+ var target_doc_ids = [
+ ["foo1", "foo3", "foo666"],
+ ["foo1", "foo666"],
+ ["foo666", "foo2"],
+ ["foo2", "foo9999", "foo1"],
+ ["foo3", "slashed/foo"],
+ ["foo3", "slashed%2Ffoo"],
+ ["foo1", "_design/foobar"],
+ ["foo1", "foo1001", "_design%2Ffoobar"]
+ ];
+
+ for (var i = 0; i < dbPairs.length; i++) {
+ var src_db = dbPairs[i].source;
+ var tgt_db = dbPairs[i].target;
+
+ for (var j = 0; j < target_doc_ids.length; j++) {
+ var doc_ids = target_doc_ids[j];
+ var valid_doc_ids = [];
+ var invalid_doc_ids = [];
+
+ for (var p = 0; p < doc_ids.length; p++) {
+ var id = doc_ids[p];
+ var found = false;
+
+ for (var k = 0; k < all_docs.length; k++) {
+ var doc = all_docs[k];
+
+ if (id === doc._id) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ valid_doc_ids.push(id);
+ } else {
+ invalid_doc_ids.push(id);
+ }
+ };
+
+ dbB.deleteDb();
+ dbB.createDb();
+
+ var repResult = CouchDB.replicate(src_db, tgt_db, {
+ body: {"doc_ids": doc_ids}
+ });
+
+ T(repResult.ok);
+ T(repResult.docs_written === valid_doc_ids.length);
+ T(repResult.docs_read === valid_doc_ids.length);
+ T(repResult.doc_write_failures === 0);
+
+ for (var k = 0; k < all_docs.length; k++) {
+ var doc = all_docs[k];
+ var tgt_doc = dbB.open(doc._id);
+
+ if (doc_ids.indexOf(doc._id) >= 0) {
+ T(tgt_doc !== null);
+ T(tgt_doc.value === doc.value);
+ } else {
+ T(tgt_doc === null);
+ }
+ }
+
+ for (var k = 0; k < invalid_doc_ids.length; k++) {
+ var tgt_doc = dbB.open(invalid_doc_ids[k]);
+
+ T(tgt_doc === null);
+ }
+ }
+ }
+
+ // test filtered replication
+ var filterFun1 = (function(doc, req) {
+ if (doc.value < Number(req.query.maxvalue)) {
+ return true;
+ } else {
+ return false;
+ }
+ }).toString();
+
+ var filterFun2 = (function(doc, req) {
+ return true;
+ }).toString();
+
+ var dbPairs = [
+ {source:"test_suite_filtered_rep_db_a",
+ target:"test_suite_filtered_rep_db_b"},
+ {source:"test_suite_filtered_rep_db_a",
+ target:CouchDB.protocol + host + "/test_suite_filtered_rep_db_b"},
+ {source:CouchDB.protocol + host + "/test_suite_filtered_rep_db_a",
+ target:"test_suite_filtered_rep_db_b"},
+ {source:CouchDB.protocol + host + "/test_suite_filtered_rep_db_a",
+ target:CouchDB.protocol + host + "/test_suite_filtered_rep_db_b"}
+ ];
+ var sourceDb = new CouchDB("test_suite_filtered_rep_db_a");
+ var targetDb = new CouchDB("test_suite_filtered_rep_db_b");
+
+ for (var i = 0; i < dbPairs.length; i++) {
+ sourceDb.deleteDb();
+ sourceDb.createDb();
+
+ T(sourceDb.save({_id: "foo1", value: 1}).ok);
+ T(sourceDb.save({_id: "foo2", value: 2}).ok);
+ T(sourceDb.save({_id: "foo3", value: 3}).ok);
+ T(sourceDb.save({_id: "foo4", value: 4}).ok);
+
+ var ddoc = {
+ "_id": "_design/mydesign",
+ "language": "javascript",
+ "filters": {
+ "myfilter": filterFun1
+ }
+ };
+
+ T(sourceDb.save(ddoc).ok);
+
+ targetDb.deleteDb();
+ targetDb.createDb();
+
+ var dbA = dbPairs[i].source;
+ var dbB = dbPairs[i].target;
+
+ var repResult = CouchDB.replicate(dbA, dbB, {
+ body: {
+ "filter" : "mydesign/myfilter",
+ "query_params" : {
+ "maxvalue": "3"
+ }
+ }
+ });
+
+ T(repResult.ok);
+ T(repResult.history instanceof Array);
+ T(repResult.history.length === 1);
+ T(repResult.history[0].docs_written === 2);
+ T(repResult.history[0].docs_read === 2);
+ T(repResult.history[0].doc_write_failures === 0);
+
+ var docFoo1 = targetDb.open("foo1");
+ T(docFoo1 !== null);
+ T(docFoo1.value === 1);
+
+ var docFoo2 = targetDb.open("foo2");
+ T(docFoo2 !== null);
+ T(docFoo2.value === 2);
+
+ var docFoo3 = targetDb.open("foo3");
+ T(docFoo3 === null);
+
+ var docFoo4 = targetDb.open("foo4");
+ T(docFoo4 === null);
+
+ // replication should start from scratch after the filter's code changed
+
+ ddoc.filters.myfilter = filterFun2;
+ T(sourceDb.save(ddoc).ok);
+
+ repResult = CouchDB.replicate(dbA, dbB, {
+ body: {
+ "filter" : "mydesign/myfilter",
+ "query_params" : {
+ "maxvalue": "3"
+ }
+ }
+ });
+
+ T(repResult.ok);
+ T(repResult.history instanceof Array);
+ T(repResult.history.length === 1);
+ T(repResult.history[0].docs_written === 3);
+ T(repResult.history[0].docs_read === 3);
+ T(repResult.history[0].doc_write_failures === 0);
+
+ docFoo1 = targetDb.open("foo1");
+ T(docFoo1 !== null);
+ T(docFoo1.value === 1);
+
+ docFoo2 = targetDb.open("foo2");
+ T(docFoo2 !== null);
+ T(docFoo2.value === 2);
+
+ docFoo3 = targetDb.open("foo3");
+ T(docFoo3 !== null);
+ T(docFoo3.value === 3);
+
+ docFoo4 = targetDb.open("foo4");
+ T(docFoo4 !== null);
+ T(docFoo4.value === 4);
+
+ T(targetDb.open("_design/mydesign") !== null);
+ }
+
+ // test for COUCHDB-868 - design docs' attachments not getting replicated
+ // when doing a pull replication with HTTP basic auth
+ dbA = new CouchDB("test_suite_db_a");
+ dbB = new CouchDB("test_suite_db_b");
+ var usersDb = new CouchDB("test_suite_auth");
+ var lorem = CouchDB.request(
+ "GET", "/_utils/script/test/lorem.txt").responseText;
+ var lorem_b64 = CouchDB.request(
+ "GET", "/_utils/script/test/lorem_b64.txt").responseText;
+
+ usersDb.deleteDb();
+ usersDb.createDb();
+ dbA.deleteDb();
+ dbA.createDb();
+ dbB.deleteDb();
+ dbB.createDb();
+
+ var atts_ddoc = {
+ _id: "_design/i_have_atts",
+ language: "javascript"
+ };
+ T(dbA.save(atts_ddoc).ok);
+
+ var rev = atts_ddoc._rev;
+ var att_1_name = "lorem.txt";
+ var att_2_name = "lorem.dat";
+ var xhr = CouchDB.request(
+ "PUT", "/" + dbA.name + "/" + atts_ddoc._id + "/" + att_1_name + "?rev=" + rev, {
+ headers: {"Content-Type": "text/plain;charset=utf-8"},
+ body: lorem
+ });
+ rev = JSON.parse(xhr.responseText).rev;
+ T(xhr.status === 201);
+ xhr = CouchDB.request(
+ "PUT", "/" + dbA.name + "/" + atts_ddoc._id + "/" + att_2_name + "?rev=" + rev, {
+ headers: {"Content-Type": "application/data"},
+ body: lorem_b64
+ });
+ T(xhr.status === 201);
+
+ var fdmananaUserDoc = CouchDB.prepareUserDoc({
+ name: "fdmanana",
+ roles: ["reader"]
+ }, "qwerty");
+ T(usersDb.save(fdmananaUserDoc).ok);
+
+ T(dbA.setSecObj({
+ admins: {
+ names: [],
+ roles: ["admin"]
+ },
+ readers: {
+ names: [],
+ roles: ["reader"]
+ }
+ }).ok);
+ T(dbB.setSecObj({
+ admins: {
+ names: ["fdmanana"],
+ roles: []
+ }
+ }).ok);
+
+ var server_config = [
+ {
+ section: "couch_httpd_auth",
+ key: "authentication_db",
+ value: usersDb.name
+ },
+ // to prevent admin party mode
+ {
+ section: "admins",
+ key: "joe",
+ value: "erlang"
+ }
+ ];
+
+ var test_fun = function() {
+ T(CouchDB.login("fdmanana", "qwerty").ok);
+ T(CouchDB.session().userCtx.name === "fdmanana");
+ T(CouchDB.session().userCtx.roles.indexOf("_admin") === -1);
+
+ var repResult = CouchDB.replicate(
+ CouchDB.protocol + "fdmanana:qwerty@" + host + "/" + dbA.name,
+ dbB.name
+ );
+ T(repResult.ok === true);
+ T(repResult.history instanceof Array);
+ T(repResult.history.length === 1);
+ T(repResult.history[0].docs_written === 1);
+ T(repResult.history[0].docs_read === 1);
+ T(repResult.history[0].doc_write_failures === 0);
+
+ var atts_ddoc_copy = dbB.open(atts_ddoc._id);
+ T(atts_ddoc_copy !== null);
+ T(typeof atts_ddoc_copy._attachments === "object");
+ T(atts_ddoc_copy._attachments !== null);
+ T(att_1_name in atts_ddoc_copy._attachments);
+ T(att_2_name in atts_ddoc_copy._attachments);
+
+ var xhr = CouchDB.request("GET", "/" + dbB.name + "/" + atts_ddoc._id + "/" + att_1_name);
+ T(xhr.status === 200);
+ T(xhr.responseText === lorem);
+
+ xhr = CouchDB.request("GET", "/" + dbB.name + "/" + atts_ddoc._id + "/" + att_2_name);
+ T(xhr.status === 200);
+ T(xhr.responseText === lorem_b64);
+
+ CouchDB.logout();
+ T(CouchDB.login("joe", "erlang").ok);
+ T(dbA.setSecObj({
+ admins: {
+ names: [],
+ roles: ["bar"]
+ },
+ readers: {
+ names: [],
+ roles: ["foo"]
+ }
+ }).ok);
+ T(dbB.deleteDb().ok === true);
+ T(dbB.createDb().ok === true);
+ T(dbB.setSecObj({
+ admins: {
+ names: ["fdmanana"],
+ roles: []
+ }
+ }).ok);
+ CouchDB.logout();
+
+ T(CouchDB.login("fdmanana", "qwerty").ok);
+ T(CouchDB.session().userCtx.name === "fdmanana");
+ T(CouchDB.session().userCtx.roles.indexOf("_admin") === -1);
+ try {
+ repResult = CouchDB.replicate(
+ CouchDB.protocol + "fdmanana:qwerty@" + host + "/" + dbA.name,
+ dbB.name
+ );
+ T(false, "replication should have failed");
+ } catch(x) {
+ T(x.error === "unauthorized");
+ }
+
+ atts_ddoc_copy = dbB.open(atts_ddoc._id);
+ T(atts_ddoc_copy === null);
+
+ CouchDB.logout();
+ T(CouchDB.login("joe", "erlang").ok);
+ };
+
+ run_on_modified_server(server_config, test_fun);
+
+ // COUCHDB-1093 - filtered and continuous _changes feed dies when the
+ // database is compacted
+ dbA = new CouchDB("test_suite_db_a");
+ dbB = new CouchDB("test_suite_db_b");
+
+ dbA.deleteDb();
+ dbA.createDb();
+ dbB.deleteDb();
+ dbB.createDb();
+
+ var docs = makeDocs(1, 10);
+ docs.push({
+ _id: "_design/foo",
+ language: "javascript",
+ filters: {
+ myfilter: (function(doc, req) { return true; }).toString()
+ }
+ });
+ dbA.bulkSave(docs).ok;
+
+ var repResult = CouchDB.replicate(
+ CouchDB.protocol + host + "/" + dbA.name,
+ dbB.name,
+ {
+ body: {
+ continuous: true,
+ filter: "foo/myfilter"
+ }
+ }
+ );
+ TEquals(true, repResult.ok);
+ TEquals('string', typeof repResult._local_id);
+
+ var xhr = CouchDB.request("GET", "/_active_tasks");
+ var tasks = JSON.parse(xhr.responseText);
+
+ TEquals(true, dbA.compact().ok);
+ while (dbA.info().compact_running) {};
+
+ TEquals(true, dbA.save(makeDocs(30, 31)[0]).ok);
+ xhr = CouchDB.request("GET", "/_active_tasks");
+
+ var tasksAfter = JSON.parse(xhr.responseText);
+ TEquals(tasks.length, tasksAfter.length);
+ T(dbB.open("30") !== null);
+
+ repResult = CouchDB.replicate(
+ CouchDB.protocol + host + "/" + dbA.name,
+ dbB.name,
+ {
+ body: {
+ continuous: true,
+ filter: "foo/myfilter",
+ cancel: true
+ }
+ }
+ );
+ TEquals(true, repResult.ok);
+ TEquals('string', typeof repResult._local_id);
+
+
+ // cleanup
+ dbA.deleteDb();
+ dbB.deleteDb();
+ usersDb.deleteDb();
+};
diff --git a/1.1.x/share/www/script/test/replicator_db.js b/1.1.x/share/www/script/test/replicator_db.js
new file mode 100644
index 00000000..2810352c
--- /dev/null
+++ b/1.1.x/share/www/script/test/replicator_db.js
@@ -0,0 +1,1155 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.replicator_db = function(debug) {
+
+ if (debug) debugger;
+
+ var wait_rep_doc = 500; // number of millisecs to wait after saving a Rep Doc
+ var host = CouchDB.host;
+ var dbA = new CouchDB("test_suite_rep_db_a", {"X-Couch-Full-Commit":"false"});
+ var dbB = new CouchDB("test_suite_rep_db_b", {"X-Couch-Full-Commit":"false"});
+ var repDb = new CouchDB("test_suite_rep_db", {"X-Couch-Full-Commit":"false"});
+ var usersDb = new CouchDB("test_suite_auth", {"X-Couch-Full-Commit":"false"});
+
+ var docs1 = [
+ {
+ _id: "foo1",
+ value: 11
+ },
+ {
+ _id: "foo2",
+ value: 22
+ },
+ {
+ _id: "foo3",
+ value: 33
+ }
+ ];
+
+ function waitForRep(repDb, repDoc, state) {
+ var newRep,
+ t0 = new Date(),
+ t1,
+ ms = 3000;
+
+ do {
+ newRep = repDb.open(repDoc._id);
+ t1 = new Date();
+ } while (((t1 - t0) <= ms) && newRep._replication_state !== state);
+ }
+
+ function waitForSeq(sourceDb, targetDb) {
+ var targetSeq,
+ sourceSeq = sourceDb.info().update_seq,
+ t0 = new Date(),
+ t1,
+ ms = 3000;
+
+ do {
+ targetSeq = targetDb.info().update_seq;
+ t1 = new Date();
+ } while (((t1 - t0) <= ms) && targetSeq < sourceSeq);
+ }
+
+ function waitForDocPos(db, docId, pos) {
+ var doc, curPos, t0, t1,
+ maxWait = 3000;
+
+ doc = db.open(docId);
+ curPos = Number(doc._rev.split("-", 1));
+ t0 = t1 = new Date();
+
+ while ((curPos < pos) && ((t1 - t0) <= maxWait)) {
+ doc = db.open(docId);
+ curPos = Number(doc._rev.split("-", 1));
+ t1 = new Date();
+ }
+
+ return doc;
+ }
+
+ function wait(ms) {
+ var t0 = new Date(), t1;
+ do {
+ CouchDB.request("GET", "/");
+ t1 = new Date();
+ } while ((t1 - t0) <= ms);
+ }
+
+
+ function populate_db(db, docs) {
+ db.deleteDb();
+ db.createDb();
+ for (var i = 0; i < docs.length; i++) {
+ var d = docs[i];
+ delete d._rev;
+ T(db.save(d).ok);
+ }
+ }
+
+ function simple_replication() {
+ populate_db(dbA, docs1);
+ populate_db(dbB, []);
+
+ var repDoc = {
+ _id: "foo_simple_rep",
+ source: dbA.name,
+ target: dbB.name
+ };
+ T(repDb.save(repDoc).ok);
+
+ waitForRep(repDb, repDoc, "completed");
+ for (var i = 0; i < docs1.length; i++) {
+ var doc = docs1[i];
+ var copy = dbB.open(doc._id);
+ T(copy !== null);
+ T(copy.value === doc.value);
+ }
+
+ var repDoc1 = repDb.open(repDoc._id);
+ T(repDoc1 !== null);
+ T(repDoc1.source === repDoc.source);
+ T(repDoc1.target === repDoc.target);
+ T(repDoc1._replication_state === "completed", "simple");
+ T(typeof repDoc1._replication_state_time === "number");
+ T(typeof repDoc1._replication_id === "string");
+ }
+
+
+ function filtered_replication() {
+ var docs2 = docs1.concat([
+ {
+ _id: "_design/mydesign",
+ language : "javascript",
+ filters : {
+ myfilter : (function(doc, req) {
+ return (doc.value % 2) !== Number(req.query.myparam);
+ }).toString()
+ }
+ }
+ ]);
+
+ populate_db(dbA, docs2);
+ populate_db(dbB, []);
+
+ var repDoc = {
+ _id: "foo_filt_rep_doc",
+ source: "http://" + host + "/" + dbA.name,
+ target: dbB.name,
+ filter: "mydesign/myfilter",
+ query_params: {
+ myparam: 1
+ }
+ };
+ T(repDb.save(repDoc).ok);
+
+ waitForRep(repDb, repDoc, "completed");
+ for (var i = 0; i < docs2.length; i++) {
+ var doc = docs2[i];
+ var copy = dbB.open(doc._id);
+
+ if (typeof doc.value === "number") {
+ if ((doc.value % 2) !== 1) {
+ T(copy !== null);
+ T(copy.value === doc.value);
+ } else {
+ T(copy === null);
+ }
+ }
+ }
+
+ var repDoc1 = repDb.open(repDoc._id);
+ T(repDoc1 !== null);
+ T(repDoc1.source === repDoc.source);
+ T(repDoc1.target === repDoc.target);
+ T(repDoc1._replication_state === "completed", "filtered");
+ T(typeof repDoc1._replication_state_time === "number");
+ T(typeof repDoc1._replication_id === "string");
+ }
+
+
+ function continuous_replication() {
+ populate_db(dbA, docs1);
+ populate_db(dbB, []);
+
+ var repDoc = {
+ _id: "foo_cont_rep_doc",
+ source: "http://" + host + "/" + dbA.name,
+ target: dbB.name,
+ continuous: true
+ };
+
+ T(repDb.save(repDoc).ok);
+
+ waitForSeq(dbA, dbB);
+ for (var i = 0; i < docs1.length; i++) {
+ var doc = docs1[i];
+ var copy = dbB.open(doc._id);
+ T(copy !== null);
+ T(copy.value === doc.value);
+ }
+
+ // add another doc to source, it will be replicated to target
+ var docX = {
+ _id: "foo1000",
+ value: 1001
+ };
+
+ T(dbA.save(docX).ok);
+
+ waitForSeq(dbA, dbB);
+ var copy = dbB.open("foo1000");
+ T(copy !== null);
+ T(copy.value === 1001);
+
+ var repDoc1 = repDb.open(repDoc._id);
+ T(repDoc1 !== null);
+ T(repDoc1.source === repDoc.source);
+ T(repDoc1.target === repDoc.target);
+ T(repDoc1._replication_state === "triggered");
+ T(typeof repDoc1._replication_state_time === "number");
+ T(typeof repDoc1._replication_id === "string");
+
+ // add a design doc to source, it will be replicated to target
+ // when the "user_ctx" property is not defined in the replication doc,
+ // the replication will be done under an _admin context, therefore
+ // design docs will be replicated
+ var ddoc = {
+ _id: "_design/foobar",
+ language: "javascript"
+ };
+
+ T(dbA.save(ddoc).ok);
+
+ waitForSeq(dbA, dbB);
+ var ddoc_copy = dbB.open("_design/foobar");
+ T(ddoc_copy !== null);
+ T(ddoc.language === "javascript");
+
+ // update the design doc on source, test that the new revision is replicated
+ ddoc.language = "erlang";
+ T(dbA.save(ddoc).ok);
+ T(ddoc._rev.indexOf("2-") === 0);
+
+ waitForSeq(dbA, dbB);
+ ddoc_copy = dbB.open("_design/foobar");
+ T(ddoc_copy !== null);
+ T(ddoc_copy._rev === ddoc._rev);
+ T(ddoc.language === "erlang");
+
+ // stop replication by deleting the replication document
+ T(repDb.deleteDoc(repDoc1).ok);
+
+ // add another doc to source, it will NOT be replicated to target
+ var docY = {
+ _id: "foo666",
+ value: 999
+ };
+
+ T(dbA.save(docY).ok);
+
+ wait(200); // is there a way to avoid wait here?
+ var copy = dbB.open("foo666");
+ T(copy === null);
+ }
+
+
+ function by_doc_ids_replication() {
+ // to test that we can replicate docs with slashes in their IDs
+ var docs2 = docs1.concat([
+ {
+ _id: "_design/mydesign",
+ language : "javascript"
+ }
+ ]);
+
+ populate_db(dbA, docs2);
+ populate_db(dbB, []);
+
+ var repDoc = {
+ _id: "foo_cont_rep_doc",
+ source: "http://" + host + "/" + dbA.name,
+ target: dbB.name,
+ doc_ids: ["foo666", "foo3", "_design/mydesign", "foo999", "foo1"]
+ };
+ T(repDb.save(repDoc).ok);
+
+ waitForRep(repDb, repDoc, "completed");
+ var copy = dbB.open("foo1");
+ T(copy !== null);
+ T(copy.value === 11);
+
+ copy = dbB.open("foo2");
+ T(copy === null);
+
+ copy = dbB.open("foo3");
+ T(copy !== null);
+ T(copy.value === 33);
+
+ copy = dbB.open("foo666");
+ T(copy === null);
+
+ copy = dbB.open("foo999");
+ T(copy === null);
+
+ copy = dbB.open("_design/mydesign");
+ T(copy !== null);
+ T(copy.language === "javascript");
+ }
+
+
+ function successive_identical_replications() {
+ populate_db(dbA, docs1);
+ populate_db(dbB, []);
+
+ var repDoc1 = {
+ _id: "foo_ident_rep_1",
+ source: dbA.name,
+ target: dbB.name
+ };
+ T(repDb.save(repDoc1).ok);
+
+ waitForRep(repDb, repDoc1, "completed");
+ for (var i = 0; i < docs1.length; i++) {
+ var doc = docs1[i];
+ var copy = dbB.open(doc._id);
+ T(copy !== null);
+ T(copy.value === doc.value);
+ }
+
+ var repDoc1_copy = repDb.open(repDoc1._id);
+ T(repDoc1_copy !== null);
+ T(repDoc1_copy.source === repDoc1.source);
+ T(repDoc1_copy.target === repDoc1.target);
+ T(repDoc1_copy._replication_state === "completed");
+ T(typeof repDoc1_copy._replication_state_time === "number");
+ T(typeof repDoc1_copy._replication_id === "string");
+
+ var newDoc = {
+ _id: "doc666",
+ value: 666
+ };
+ T(dbA.save(newDoc).ok);
+
+ wait(200);
+ var newDoc_copy = dbB.open(newDoc._id);
+ // not replicated because first replication is complete (not continuous)
+ T(newDoc_copy === null);
+
+ var repDoc2 = {
+ _id: "foo_ident_rep_2",
+ source: dbA.name,
+ target: dbB.name
+ };
+ T(repDb.save(repDoc2).ok);
+
+ waitForRep(repDb, repDoc2, "completed");
+ var newDoc_copy = dbB.open(newDoc._id);
+ T(newDoc_copy !== null);
+ T(newDoc_copy.value === newDoc.value);
+
+ var repDoc2_copy = repDb.open(repDoc2._id);
+ T(repDoc2_copy !== null);
+ T(repDoc2_copy.source === repDoc1.source);
+ T(repDoc2_copy.target === repDoc1.target);
+ T(repDoc2_copy._replication_state === "completed");
+ T(typeof repDoc2_copy._replication_state_time === "number");
+ T(typeof repDoc2_copy._replication_id === "string");
+ T(repDoc2_copy._replication_id === repDoc1_copy._replication_id);
+ }
+
+
+ // test the case where multiple replication docs (different IDs)
+ // describe in fact the same replication (source, target, etc)
+ function identical_rep_docs() {
+ populate_db(dbA, docs1);
+ populate_db(dbB, []);
+
+ var repDoc1 = {
+ _id: "foo_dup_rep_doc_1",
+ source: "http://" + host + "/" + dbA.name,
+ target: dbB.name
+ };
+ var repDoc2 = {
+ _id: "foo_dup_rep_doc_2",
+ source: "http://" + host + "/" + dbA.name,
+ target: dbB.name
+ };
+
+ T(repDb.save(repDoc1).ok);
+ T(repDb.save(repDoc2).ok);
+
+ waitForRep(repDb, repDoc1, "completed");
+ for (var i = 0; i < docs1.length; i++) {
+ var doc = docs1[i];
+ var copy = dbB.open(doc._id);
+ T(copy !== null);
+ T(copy.value === doc.value);
+ }
+
+ repDoc1 = repDb.open("foo_dup_rep_doc_1");
+ T(repDoc1 !== null);
+ T(repDoc1._replication_state === "completed", "identical");
+ T(typeof repDoc1._replication_state_time === "number");
+ T(typeof repDoc1._replication_id === "string");
+
+ repDoc2 = repDb.open("foo_dup_rep_doc_2");
+ T(repDoc2 !== null);
+ T(typeof repDoc2._replication_state === "undefined");
+ T(typeof repDoc2._replication_state_time === "undefined");
+ T(repDoc2._replication_id === repDoc1._replication_id);
+ }
+
+
+ // test the case where multiple replication docs (different IDs)
+ // describe in fact the same continuous replication (source, target, etc)
+ function identical_continuous_rep_docs() {
+ populate_db(dbA, docs1);
+ populate_db(dbB, []);
+
+ var repDoc1 = {
+ _id: "foo_dup_cont_rep_doc_1",
+ source: "http://" + host + "/" + dbA.name,
+ target: dbB.name,
+ continuous: true
+ };
+ var repDoc2 = {
+ _id: "foo_dup_cont_rep_doc_2",
+ source: "http://" + host + "/" + dbA.name,
+ target: dbB.name,
+ continuous: true
+ };
+
+ T(repDb.save(repDoc1).ok);
+ T(repDb.save(repDoc2).ok);
+
+ waitForSeq(dbA, dbB);
+ for (var i = 0; i < docs1.length; i++) {
+ var doc = docs1[i];
+ var copy = dbB.open(doc._id);
+ T(copy !== null);
+ T(copy.value === doc.value);
+ }
+
+ repDoc1 = repDb.open("foo_dup_cont_rep_doc_1");
+ T(repDoc1 !== null);
+ T(repDoc1._replication_state === "triggered");
+ T(typeof repDoc1._replication_state_time === "number");
+ T(typeof repDoc1._replication_id === "string");
+
+ repDoc2 = repDb.open("foo_dup_cont_rep_doc_2");
+ T(repDoc2 !== null);
+ T(typeof repDoc2._replication_state === "undefined");
+ T(typeof repDoc2._replication_state_time === "undefined");
+ T(repDoc2._replication_id === repDoc1._replication_id);
+
+ var newDoc = {
+ _id: "foo666",
+ value: 999
+ };
+ T(dbA.save(newDoc).ok);
+
+ waitForSeq(dbA, dbB);
+ var copy = dbB.open("foo666");
+ T(copy !== null);
+ T(copy.value === 999);
+
+ // deleting second replication doc, doesn't affect the 1st one and
+ // neither it stops the replication
+ T(repDb.deleteDoc(repDoc2).ok);
+ repDoc1 = repDb.open("foo_dup_cont_rep_doc_1");
+ T(repDoc1 !== null);
+ T(repDoc1._replication_state === "triggered");
+ T(typeof repDoc1._replication_state_time === "number");
+
+ var newDoc2 = {
+ _id: "foo5000",
+ value: 5000
+ };
+ T(dbA.save(newDoc2).ok);
+
+ waitForSeq(dbA, dbB);
+ var copy = dbB.open("foo5000");
+ T(copy !== null);
+ T(copy.value === 5000);
+
+ // deleting the 1st replication document stops the replication
+ T(repDb.deleteDoc(repDoc1).ok);
+ var newDoc3 = {
+ _id: "foo1983",
+ value: 1983
+ };
+ T(dbA.save(newDoc3).ok);
+
+ wait(wait_rep_doc); //how to remove wait?
+ var copy = dbB.open("foo1983");
+ T(copy === null);
+ }
+
+
+ function test_replication_credentials_delegation() {
+ populate_db(usersDb, []);
+
+ var joeUserDoc = CouchDB.prepareUserDoc({
+ name: "joe",
+ roles: ["god", "erlanger"]
+ }, "erly");
+ T(usersDb.save(joeUserDoc).ok);
+
+ var ddoc = {
+ _id: "_design/beer",
+ language: "javascript"
+ };
+ populate_db(dbA, docs1.concat([ddoc]));
+ populate_db(dbB, []);
+
+ T(dbB.setSecObj({
+ admins: {
+ names: [],
+ roles: ["god"]
+ }
+ }).ok);
+
+ var server_admins_config = [
+ {
+ section: "admins",
+ key: "fdmanana",
+ value: "qwerty"
+ }
+ ];
+
+ run_on_modified_server(server_admins_config, function() {
+
+ T(CouchDB.login("fdmanana", "qwerty").ok);
+ T(CouchDB.session().userCtx.name === "fdmanana");
+ T(CouchDB.session().userCtx.roles.indexOf("_admin") !== -1);
+
+ var repDoc = {
+ _id: "foo_rep_del_doc_1",
+ source: dbA.name,
+ target: dbB.name,
+ user_ctx: {
+ name: "joe",
+ roles: ["erlanger"]
+ }
+ };
+
+ T(repDb.save(repDoc).ok);
+
+ waitForRep(repDb, repDoc, "completed");
+ for (var i = 0; i < docs1.length; i++) {
+ var doc = docs1[i];
+ var copy = dbB.open(doc._id);
+ T(copy !== null);
+ T(copy.value === doc.value);
+ }
+
+ // design doc was not replicated, because joe is not an admin of db B
+ var doc = dbB.open(ddoc._id);
+ T(doc === null);
+
+ // now test the same replication but putting the role "god" in the
+ // delegation user context property
+ var repDoc2 = {
+ _id: "foo_rep_del_doc_2",
+ source: dbA.name,
+ target: dbB.name,
+ user_ctx: {
+ name: "joe",
+ roles: ["erlanger", "god"]
+ }
+ };
+ T(repDb.save(repDoc2).ok);
+
+ waitForRep(repDb, repDoc2, "completed");
+ for (var i = 0; i < docs1.length; i++) {
+ var doc = docs1[i];
+ var copy = dbB.open(doc._id);
+ T(copy !== null);
+ T(copy.value === doc.value);
+ }
+
+ // because anyone with a 'god' role is an admin of db B, a replication
+ // that is delegated to a 'god' role can write design docs to db B
+ doc = dbB.open(ddoc._id);
+ T(doc !== null);
+ T(doc.language === ddoc.language);
+ });
+ }
+
+
+ function continuous_replication_survives_restart() {
+ var origRepDbName = CouchDB.request(
+ "GET", "/_config/replicator/db").responseText;
+
+ repDb.deleteDb();
+
+ var xhr = CouchDB.request("PUT", "/_config/replicator/db", {
+ body : JSON.stringify(repDb.name),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ T(xhr.status === 200);
+
+ populate_db(dbA, docs1);
+ populate_db(dbB, []);
+
+ var repDoc = {
+ _id: "foo_cont_rep_survives_doc",
+ source: "http://" + host + "/" + dbA.name,
+ target: dbB.name,
+ continuous: true
+ };
+
+ T(repDb.save(repDoc).ok);
+
+ waitForSeq(dbA, dbB);
+ for (var i = 0; i < docs1.length; i++) {
+ var doc = docs1[i];
+ var copy = dbB.open(doc._id);
+ T(copy !== null);
+ T(copy.value === doc.value);
+ }
+
+ repDb.ensureFullCommit();
+ dbA.ensureFullCommit();
+
+ restartServer();
+
+ xhr = CouchDB.request("PUT", "/_config/replicator/db", {
+ body : JSON.stringify(repDb.name),
+ headers: {"X-Couch-Persist": "false"}
+ });
+
+ T(xhr.status === 200);
+
+ // add another doc to source, it will be replicated to target
+ var docX = {
+ _id: "foo1000",
+ value: 1001
+ };
+
+ T(dbA.save(docX).ok);
+
+ waitForSeq(dbA, dbB);
+ var copy = dbB.open("foo1000");
+ T(copy !== null);
+ T(copy.value === 1001);
+
+ repDoc = waitForDocPos(repDb, "foo_cont_rep_survives_doc", 3);
+ T(repDoc !== null);
+ T(repDoc.continuous === true);
+
+ // stop replication
+ T(repDb.deleteDoc(repDoc).ok);
+
+ xhr = CouchDB.request("PUT", "/_config/replicator/db", {
+ body : origRepDbName,
+ headers: {"X-Couch-Persist": "false"}
+ });
+ T(xhr.status === 200);
+ }
+
+
+ function rep_db_write_authorization() {
+ populate_db(dbA, docs1);
+ populate_db(dbB, []);
+
+ var server_admins_config = [
+ {
+ section: "admins",
+ key: "fdmanana",
+ value: "qwerty"
+ }
+ ];
+
+ run_on_modified_server(server_admins_config, function() {
+ var repDoc = {
+ _id: "foo_rep_doc",
+ source: dbA.name,
+ target: dbB.name
+ };
+
+ T(CouchDB.login("fdmanana", "qwerty").ok);
+ T(CouchDB.session().userCtx.name === "fdmanana");
+ T(CouchDB.session().userCtx.roles.indexOf("_admin") !== -1);
+
+ T(repDb.save(repDoc).ok);
+
+ waitForRep(repDb, repDoc, "completed");
+
+ for (var i = 0; i < docs1.length; i++) {
+ var doc = docs1[i];
+ var copy = dbB.open(doc._id);
+
+ T(copy !== null);
+ T(copy.value === doc.value);
+ }
+
+ repDoc = repDb.open("foo_rep_doc");
+ T(repDoc !== null);
+ repDoc.target = "test_suite_foo_db";
+ repDoc.create_target = true;
+
+ // Only the replicator can update replication documents.
+ // Admins can only add and delete replication documents.
+ try {
+ repDb.save(repDoc);
+ T(false && "Should have thrown an exception");
+ } catch (x) {
+ T(x["error"] === "forbidden");
+ }
+ });
+ }
+
+
+ function rep_doc_with_bad_rep_id() {
+ populate_db(dbA, docs1);
+ populate_db(dbB, []);
+
+ var repDoc = {
+ _id: "foo_rep",
+ source: dbA.name,
+ target: dbB.name,
+ replication_id: "1234abc"
+ };
+ T(repDb.save(repDoc).ok);
+
+ waitForRep(repDb, repDoc, "completed");
+ for (var i = 0; i < docs1.length; i++) {
+ var doc = docs1[i];
+ var copy = dbB.open(doc._id);
+ T(copy !== null);
+ T(copy.value === doc.value);
+ }
+
+ var repDoc1 = repDb.open(repDoc._id);
+ T(repDoc1 !== null);
+ T(repDoc1.source === repDoc.source);
+ T(repDoc1.target === repDoc.target);
+ T(repDoc1._replication_state === "completed",
+ "replication document with bad replication id failed");
+ T(typeof repDoc1._replication_state_time === "number");
+ T(typeof repDoc1._replication_id === "string");
+ T(repDoc1._replication_id !== "1234abc");
+ }
+
+
+ function swap_rep_db() {
+ var repDb2 = new CouchDB("test_suite_rep_db_2");
+ var dbA = new CouchDB("test_suite_rep_db_a");
+ var dbA_copy = new CouchDB("test_suite_rep_db_a_copy");
+ var dbB = new CouchDB("test_suite_rep_db_b");
+ var dbB_copy = new CouchDB("test_suite_rep_db_b_copy");
+ var dbC = new CouchDB("test_suite_rep_db_c");
+ var dbC_copy = new CouchDB("test_suite_rep_db_c_copy");
+ var repDoc1, repDoc2, repDoc3;
+ var xhr, i, doc, copy, new_doc;
+
+ populate_db(dbA, docs1);
+ populate_db(dbB, docs1);
+ populate_db(dbC, docs1);
+ populate_db(dbA_copy, []);
+ populate_db(dbB_copy, []);
+ populate_db(dbC_copy, []);
+ populate_db(repDb2, []);
+
+ repDoc1 = {
+ _id: "rep1",
+ source: CouchDB.protocol + host + "/" + dbA.name,
+ target: dbA_copy.name,
+ continuous: true
+ };
+ repDoc2 = {
+ _id: "rep2",
+ source: CouchDB.protocol + host + "/" + dbB.name,
+ target: dbB_copy.name,
+ continuous: true
+ };
+ repDoc3 = {
+ _id: "rep3",
+ source: CouchDB.protocol + host + "/" + dbC.name,
+ target: dbC_copy.name,
+ continuous: true
+ };
+
+ TEquals(true, repDb.save(repDoc1).ok);
+ TEquals(true, repDb.save(repDoc2).ok);
+
+ waitForSeq(dbA, dbA_copy);
+ waitForSeq(dbB, dbB_copy);
+
+ xhr = CouchDB.request("PUT", "/_config/replicator/db",{
+ body : JSON.stringify(repDb2.name),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status);
+
+ new_doc = {
+ _id: "foo666",
+ value: 666
+ };
+
+ TEquals(true, dbA.save(new_doc).ok);
+ TEquals(true, dbB.save(new_doc).ok);
+ waitForSeq(dbA, dbA_copy);
+ waitForSeq(dbB, dbB_copy);
+
+ TEquals(true, repDb2.save(repDoc3).ok);
+ waitForSeq(dbC, dbC_copy);
+
+ for (i = 0; i < docs1.length; i++) {
+ doc = docs1[i];
+ copy = dbA_copy.open(doc._id);
+ T(copy !== null);
+ TEquals(doc.value, copy.value);
+ copy = dbB_copy.open(doc._id);
+ T(copy !== null);
+ TEquals(doc.value, copy.value);
+ copy = dbC_copy.open(doc._id);
+ T(copy !== null);
+ TEquals(doc.value, copy.value);
+ }
+
+ // replications rep1 and rep2 should have been stopped when the replicator
+ // database was swapped
+ copy = dbA_copy.open(new_doc._id);
+ TEquals(null, copy);
+ copy = dbB_copy.open(new_doc._id);
+ TEquals(null, copy);
+
+ xhr = CouchDB.request("PUT", "/_config/replicator/db",{
+ body : JSON.stringify(repDb.name),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status);
+
+ // after setting the replicator database to the former, replications rep1
+ // and rep2 should have been resumed, while rep3 was stopped
+ TEquals(true, dbC.save(new_doc).ok);
+ wait(1000);
+
+ waitForSeq(dbA, dbA_copy);
+ waitForSeq(dbB, dbB_copy);
+
+ copy = dbA_copy.open(new_doc._id);
+ T(copy !== null);
+ TEquals(new_doc.value, copy.value);
+ copy = dbB_copy.open(new_doc._id);
+ T(copy !== null);
+ TEquals(new_doc.value, copy.value);
+ copy = dbC_copy.open(new_doc._id);
+ TEquals(null, copy);
+ }
+
+
+ function compact_rep_db() {
+ var dbA_copy = new CouchDB("test_suite_rep_db_a_copy");
+ var dbB_copy = new CouchDB("test_suite_rep_db_b_copy");
+ var repDoc1, repDoc2;
+ var xhr, i, doc, copy, new_doc;
+ var docs = makeDocs(1, 50);
+
+ populate_db(dbA, docs);
+ populate_db(dbB, docs);
+ populate_db(dbA_copy, []);
+ populate_db(dbB_copy, []);
+
+ repDoc1 = {
+ _id: "rep1",
+ source: CouchDB.protocol + host + "/" + dbA.name,
+ target: dbA_copy.name,
+ continuous: true
+ };
+ repDoc2 = {
+ _id: "rep2",
+ source: CouchDB.protocol + host + "/" + dbB.name,
+ target: dbB_copy.name,
+ continuous: true
+ };
+
+ TEquals(true, repDb.save(repDoc1).ok);
+ TEquals(true, repDb.save(repDoc2).ok);
+
+ TEquals(true, repDb.compact().ok);
+ TEquals(202, repDb.last_req.status);
+
+ waitForSeq(dbA, dbA_copy);
+ waitForSeq(dbB, dbB_copy);
+
+ while (repDb.info().compact_running) {};
+
+ for (i = 0; i < docs.length; i++) {
+ copy = dbA_copy.open(docs[i]._id);
+ T(copy !== null);
+ copy = dbB_copy.open(docs[i]._id);
+ T(copy !== null);
+ }
+
+ new_doc = {
+ _id: "foo666",
+ value: 666
+ };
+
+ TEquals(true, dbA.save(new_doc).ok);
+ TEquals(true, dbB.save(new_doc).ok);
+
+ waitForSeq(dbA, dbA_copy);
+ waitForSeq(dbB, dbB_copy);
+
+ copy = dbA.open(new_doc._id);
+ T(copy !== null);
+ TEquals(666, copy.value);
+ copy = dbB.open(new_doc._id);
+ T(copy !== null);
+ TEquals(666, copy.value);
+ }
+
+
+ function error_state_replication() {
+ populate_db(dbA, docs1);
+
+ var repDoc = {
+ _id: "foo_error_rep",
+ source: dbA.name,
+ target: "nonexistent_test_db"
+ };
+ T(repDb.save(repDoc).ok);
+
+ waitForRep(repDb, repDoc, "error");
+ var repDoc1 = repDb.open(repDoc._id);
+ T(repDoc1 !== null);
+ T(repDoc1._replication_state === "error");
+ T(typeof repDoc1._replication_state_time === "number");
+ T(typeof repDoc1._replication_id === "string");
+ }
+
+
+ function rep_doc_field_validation() {
+ var docs = makeDocs(1, 5);
+
+ populate_db(dbA, docs);
+ populate_db(dbB, []);
+
+ var repDoc = {
+ _id: "rep1",
+ target: dbB.name
+ };
+
+ try {
+ repDb.save(repDoc);
+ T(false, "should have failed because source field is missing");
+ } catch (x) {
+ TEquals("forbidden", x.error);
+ }
+
+ repDoc = {
+ _id: "rep1",
+ source: 123,
+ target: dbB.name
+ };
+
+ try {
+ repDb.save(repDoc);
+ T(false, "should have failed because source field is a number");
+ } catch (x) {
+ TEquals("forbidden", x.error);
+ }
+
+ repDoc = {
+ _id: "rep1",
+ source: dbA.name
+ };
+
+ try {
+ repDb.save(repDoc);
+ T(false, "should have failed because target field is missing");
+ } catch (x) {
+ TEquals("forbidden", x.error);
+ }
+
+ repDoc = {
+ _id: "rep1",
+ source: dbA.name,
+ target: null
+ };
+
+ try {
+ repDb.save(repDoc);
+ T(false, "should have failed because target field is null");
+ } catch (x) {
+ TEquals("forbidden", x.error);
+ }
+
+ repDoc = {
+ _id: "rep1",
+ source: dbA.name,
+ target: { url: 123 }
+ };
+
+ try {
+ repDb.save(repDoc);
+ T(false, "should have failed because target.url field is not a string");
+ } catch (x) {
+ TEquals("forbidden", x.error);
+ }
+
+ repDoc = {
+ _id: "rep1",
+ source: dbA.name,
+ target: { url: dbB.name, auth: null }
+ };
+
+ try {
+ repDb.save(repDoc);
+ T(false, "should have failed because target.auth field is null");
+ } catch (x) {
+ TEquals("forbidden", x.error);
+ }
+
+ repDoc = {
+ _id: "rep1",
+ source: dbA.name,
+ target: { url: dbB.name, auth: "foo:bar" }
+ };
+
+ try {
+ repDb.save(repDoc);
+ T(false, "should have failed because target.auth field is not an object");
+ } catch (x) {
+ TEquals("forbidden", x.error);
+ }
+
+ repDoc = {
+ _id: "rep1",
+ source: dbA.name,
+ target: dbB.name,
+ continuous: "true"
+ };
+
+ try {
+ repDb.save(repDoc);
+ T(false, "should have failed because continuous is not a boolean");
+ } catch (x) {
+ TEquals("forbidden", x.error);
+ }
+
+ repDoc = {
+ _id: "rep1",
+ source: dbA.name,
+ target: dbB.name,
+ filter: 123
+ };
+
+ try {
+ repDb.save(repDoc);
+ T(false, "should have failed because filter is not a string");
+ } catch (x) {
+ TEquals("forbidden", x.error);
+ }
+ }
+
+
+ // run all the tests
+ var server_config = [
+ {
+ section: "replicator",
+ key: "db",
+ value: repDb.name
+ }
+ ];
+
+ repDb.deleteDb();
+ run_on_modified_server(server_config, simple_replication);
+
+ repDb.deleteDb();
+ restartServer();
+ run_on_modified_server(server_config, filtered_replication);
+
+ repDb.deleteDb();
+ restartServer();
+ run_on_modified_server(server_config, continuous_replication);
+
+ repDb.deleteDb();
+ restartServer();
+ run_on_modified_server(server_config, by_doc_ids_replication);
+
+ repDb.deleteDb();
+ restartServer();
+ run_on_modified_server(server_config, successive_identical_replications);
+
+ repDb.deleteDb();
+ restartServer();
+ run_on_modified_server(server_config, identical_rep_docs);
+
+ repDb.deleteDb();
+ restartServer();
+ run_on_modified_server(server_config, identical_continuous_rep_docs);
+
+ repDb.deleteDb();
+ restartServer();
+ run_on_modified_server(server_config, rep_db_write_authorization);
+
+ repDb.deleteDb();
+ restartServer();
+ run_on_modified_server(server_config, rep_doc_with_bad_rep_id);
+
+ var server_config_2 = server_config.concat([
+ {
+ section: "couch_httpd_auth",
+ key: "authentication_db",
+ value: usersDb.name
+ }
+ ]);
+ repDb.deleteDb();
+ restartServer();
+ run_on_modified_server(server_config_2, test_replication_credentials_delegation);
+
+ repDb.deleteDb();
+ restartServer();
+ continuous_replication_survives_restart();
+
+ repDb.deleteDb();
+ restartServer();
+ run_on_modified_server(server_config, swap_rep_db);
+
+ repDb.deleteDb();
+ restartServer();
+ run_on_modified_server(server_config, compact_rep_db);
+
+ repDb.deleteDb();
+ restartServer();
+ run_on_modified_server(server_config, rep_doc_field_validation);
+
+/*
+ * Disabled, since error state would be set on the document only after
+ * the exponential backoff retry done by the replicator database listener
+ * terminates, which takes too much time for a unit test.
+ */
+/*
+ * repDb.deleteDb();
+ * restartServer();
+ * run_on_modified_server(server_config, error_state_replication);
+ */
+
+
+ // cleanup
+ repDb.deleteDb();
+ usersDb.deleteDb();
+ dbA.deleteDb();
+ dbB.deleteDb();
+ (new CouchDB("test_suite_rep_db_2")).deleteDb();
+ (new CouchDB("test_suite_rep_db_c")).deleteDb();
+ (new CouchDB("test_suite_rep_db_a_copy")).deleteDb();
+ (new CouchDB("test_suite_rep_db_b_copy")).deleteDb();
+ (new CouchDB("test_suite_rep_db_c_copy")).deleteDb();
+};
diff --git a/1.1.x/share/www/script/test/rev_stemming.js b/1.1.x/share/www/script/test/rev_stemming.js
new file mode 100644
index 00000000..03d91c2a
--- /dev/null
+++ b/1.1.x/share/www/script/test/rev_stemming.js
@@ -0,0 +1,99 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.rev_stemming = function(debug) {
+ var db = new CouchDB("test_suite_db_a", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ var dbB = new CouchDB("test_suite_db_b", {"X-Couch-Full-Commit":"false"});
+ dbB.deleteDb();
+ dbB.createDb();
+ if (debug) debugger;
+
+ var newLimit = 5;
+
+ T(db.getDbProperty("_revs_limit") == 1000);
+
+ var doc = {_id:"foo",foo:0}
+ for( var i=0; i < newLimit + 1; i++) {
+ doc.foo++;
+ T(db.save(doc).ok);
+ }
+ var doc0 = db.open("foo", {revs:true});
+ T(doc0._revisions.ids.length == newLimit + 1);
+
+ var docBar = {_id:"bar",foo:0}
+ for( var i=0; i < newLimit + 1; i++) {
+ docBar.foo++;
+ T(db.save(docBar).ok);
+ }
+ T(db.open("bar", {revs:true})._revisions.ids.length == newLimit + 1);
+
+ T(db.setDbProperty("_revs_limit", newLimit).ok);
+
+ for( var i=0; i < newLimit + 1; i++) {
+ doc.foo++;
+ T(db.save(doc).ok);
+ }
+ doc0 = db.open("foo", {revs:true});
+ T(doc0._revisions.ids.length == newLimit);
+
+
+ // If you replicate after you make more edits than the limit, you'll
+ // cause a spurious edit conflict.
+ CouchDB.replicate("test_suite_db_a", "test_suite_db_b");
+ var docB1 = dbB.open("foo",{conflicts:true})
+ T(docB1._conflicts == null);
+
+ for( var i=0; i < newLimit - 1; i++) {
+ doc.foo++;
+ T(db.save(doc).ok);
+ }
+
+ // one less edit than limit, no conflict
+ CouchDB.replicate("test_suite_db_a", "test_suite_db_b");
+ var docB1 = dbB.open("foo",{conflicts:true})
+ T(docB1._conflicts == null);
+
+ //now we hit the limit
+ for( var i=0; i < newLimit; i++) {
+ doc.foo++;
+ T(db.save(doc).ok);
+ }
+
+ CouchDB.replicate("test_suite_db_a", "test_suite_db_b");
+
+ var docB2 = dbB.open("foo",{conflicts:true});
+
+ // we have a conflict, but the previous replicated rev is always the losing
+ // conflict
+ T(docB2._conflicts[0] == docB1._rev)
+
+ // We having already updated bar before setting the limit, so it's still got
+ // a long rev history. compact to stem the revs.
+
+ T(db.open("bar", {revs:true})._revisions.ids.length == newLimit + 1);
+
+ T(db.compact().ok);
+
+ // compaction isn't instantaneous, loop until done
+ while (db.info().compact_running) {};
+
+ // force reload because ETags don't honour compaction
+ var req = db.request("GET", "/test_suite_db_a/bar?revs=true", {
+ headers:{"if-none-match":"pommes"}
+ });
+
+ var finalDoc = JSON.parse(req.responseText);
+ TEquals(newLimit, finalDoc._revisions.ids.length,
+ "should return a truncated revision list");
+};
diff --git a/1.1.x/share/www/script/test/rewrite.js b/1.1.x/share/www/script/test/rewrite.js
new file mode 100644
index 00000000..86905f8f
--- /dev/null
+++ b/1.1.x/share/www/script/test/rewrite.js
@@ -0,0 +1,410 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+
+
+couchTests.rewrite = function(debug) {
+ // this test _rewrite handler
+
+
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+
+
+ if (debug) debugger;
+ run_on_modified_server(
+ [{section: "httpd",
+ key: "authentication_handlers",
+ value: "{couch_httpd_auth, special_test_authentication_handler}"},
+ {section:"httpd",
+ key: "WWW-Authenticate",
+ value: "X-Couch-Test-Auth"}],
+
+ function(){
+ var designDoc = {
+ _id:"_design/test",
+ language: "javascript",
+ _attachments:{
+ "foo.txt": {
+ content_type:"text/plain",
+ data: "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ },
+ rewrites: [
+ {
+ "from": "foo",
+ "to": "foo.txt"
+ },
+ {
+ "from": "foo2",
+ "to": "foo.txt",
+ "method": "GET"
+ },
+ {
+ "from": "hello/:id",
+ "to": "_update/hello/:id",
+ "method": "PUT"
+ },
+ {
+ "from": "/welcome",
+ "to": "_show/welcome"
+ },
+ {
+ "from": "/welcome/:name",
+ "to": "_show/welcome",
+ "query": {
+ "name": ":name"
+ }
+ },
+ {
+ "from": "/welcome2",
+ "to": "_show/welcome",
+ "query": {
+ "name": "user"
+ }
+ },
+ {
+ "from": "/welcome3/:name",
+ "to": "_update/welcome2/:name",
+ "method": "PUT"
+ },
+ {
+ "from": "/welcome3/:name",
+ "to": "_show/welcome2/:name",
+ "method": "GET"
+ },
+ {
+ "from": "/welcome4/*",
+ "to" : "_show/welcome3",
+ "query": {
+ "name": "*"
+ }
+ },
+ {
+ "from": "/welcome5/*",
+ "to" : "_show/*",
+ "query": {
+ "name": "*"
+ }
+ },
+ {
+ "from": "basicView",
+ "to": "_view/basicView",
+ },
+ {
+ "from": "simpleForm/basicView",
+ "to": "_list/simpleForm/basicView",
+ },
+ {
+ "from": "simpleForm/basicViewFixed",
+ "to": "_list/simpleForm/basicView",
+ "query": {
+ "startkey": 3,
+ "endkey": 8
+ }
+ },
+ {
+ "from": "simpleForm/basicViewPath/:start/:end",
+ "to": "_list/simpleForm/basicView",
+ "query": {
+ "startkey": ":start",
+ "endkey": ":end"
+ }
+ },
+ {
+ "from": "simpleForm/complexView",
+ "to": "_list/simpleForm/complexView",
+ "query": {
+ "key": [1, 2]
+ }
+ },
+ {
+ "from": "simpleForm/complexView2",
+ "to": "_list/simpleForm/complexView",
+ "query": {
+ "key": ["test", {}]
+ }
+ },
+ {
+ "from": "simpleForm/complexView3",
+ "to": "_list/simpleForm/complexView",
+ "query": {
+ "key": ["test", ["test", "essai"]]
+ }
+ },
+ {
+ "from": "simpleForm/complexView4",
+ "to": "_list/simpleForm/complexView2",
+ "query": {
+ "key": {"c": 1}
+ }
+ },
+ {
+ "from": "simpleForm/complexView5/:a/:b",
+ "to": "_list/simpleForm/complexView3",
+ "query": {
+ "key": [":a", ":b"]
+ }
+ },
+ {
+ "from": "simpleForm/complexView6",
+ "to": "_list/simpleForm/complexView3",
+ "query": {
+ "key": [":a", ":b"]
+ }
+ },
+ {
+ "from": "/",
+ "to": "_view/basicView",
+ }
+ ],
+ lists: {
+ simpleForm: stringFun(function(head, req) {
+ log("simpleForm");
+ send('<ul>');
+ var row, row_number = 0, prevKey, firstKey = null;
+ while (row = getRow()) {
+ row_number += 1;
+ if (!firstKey) firstKey = row.key;
+ prevKey = row.key;
+ send('\n<li>Key: '+row.key
+ +' Value: '+row.value
+ +' LineNo: '+row_number+'</li>');
+ }
+ return '</ul><p>FirstKey: '+ firstKey + ' LastKey: '+ prevKey+'</p>';
+ }),
+ },
+ shows: {
+ "welcome": stringFun(function(doc,req) {
+ return "Welcome " + req.query["name"];
+ }),
+ "welcome2": stringFun(function(doc, req) {
+ return "Welcome " + doc.name;
+ }),
+ "welcome3": stringFun(function(doc,req) {
+ return "Welcome " + req.query["name"];
+ })
+ },
+ updates: {
+ "hello" : stringFun(function(doc, req) {
+ if (!doc) {
+ if (req.id) {
+ return [{
+ _id : req.id
+ }, "New World"]
+ }
+ return [null, "Empty World"];
+ }
+ doc.world = "hello";
+ doc.edited_by = req.userCtx;
+ return [doc, "hello doc"];
+ }),
+ "welcome2": stringFun(function(doc, req) {
+ if (!doc) {
+ if (req.id) {
+ return [{
+ _id: req.id,
+ name: req.id
+ }, "New World"]
+ }
+ return [null, "Empty World"];
+ }
+ return [doc, "hello doc"];
+ })
+ },
+ views : {
+ basicView : {
+ map : stringFun(function(doc) {
+ if (doc.integer) {
+ emit(doc.integer, doc.string);
+ }
+
+ })
+ },
+ complexView: {
+ map: stringFun(function(doc) {
+ if (doc.type == "complex") {
+ emit([doc.a, doc.b], doc.string);
+ }
+ })
+ },
+ complexView2: {
+ map: stringFun(function(doc) {
+ if (doc.type == "complex") {
+ emit(doc.a, doc.string);
+ }
+ })
+ },
+ complexView3: {
+ map: stringFun(function(doc) {
+ if (doc.type == "complex") {
+ emit(doc.b, doc.string);
+ }
+ })
+ }
+ }
+ }
+
+ db.save(designDoc);
+
+ var docs = makeDocs(0, 10);
+ db.bulkSave(docs);
+
+ var docs2 = [
+ {"a": 1, "b": 1, "string": "doc 1", "type": "complex"},
+ {"a": 1, "b": 2, "string": "doc 2", "type": "complex"},
+ {"a": "test", "b": {}, "string": "doc 3", "type": "complex"},
+ {"a": "test", "b": ["test", "essai"], "string": "doc 4", "type": "complex"},
+ {"a": {"c": 1}, "b": "", "string": "doc 5", "type": "complex"}
+ ];
+
+ db.bulkSave(docs2);
+
+ // test simple rewriting
+
+ req = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/foo");
+ T(req.responseText == "This is a base64 encoded text");
+ T(req.getResponseHeader("Content-Type") == "text/plain");
+
+ req = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/foo2");
+ T(req.responseText == "This is a base64 encoded text");
+ T(req.getResponseHeader("Content-Type") == "text/plain");
+
+
+ // test POST
+ // hello update world
+
+ var doc = {"word":"plankton", "name":"Rusty"}
+ var resp = db.save(doc);
+ T(resp.ok);
+ var docid = resp.id;
+
+ xhr = CouchDB.request("PUT", "/test_suite_db/_design/test/_rewrite/hello/"+docid);
+ T(xhr.status == 201);
+ T(xhr.responseText == "hello doc");
+ T(/charset=utf-8/.test(xhr.getResponseHeader("Content-Type")))
+
+ doc = db.open(docid);
+ T(doc.world == "hello");
+
+ req = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/welcome?name=user");
+ T(req.responseText == "Welcome user");
+
+ req = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/welcome/user");
+ T(req.responseText == "Welcome user");
+
+ req = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/welcome2");
+ T(req.responseText == "Welcome user");
+
+ xhr = CouchDB.request("PUT", "/test_suite_db/_design/test/_rewrite/welcome3/test");
+ T(xhr.status == 201);
+ T(xhr.responseText == "New World");
+ T(/charset=utf-8/.test(xhr.getResponseHeader("Content-Type")));
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/welcome3/test");
+ T(xhr.responseText == "Welcome test");
+
+ req = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/welcome4/user");
+ T(req.responseText == "Welcome user");
+
+ req = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/welcome5/welcome3");
+ T(req.responseText == "Welcome welcome3");
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/basicView");
+ T(xhr.status == 200, "view call");
+ T(/{"total_rows":9/.test(xhr.responseText));
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/");
+ T(xhr.status == 200, "view call");
+ T(/{"total_rows":9/.test(xhr.responseText));
+
+
+ // get with query params
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/basicView?startkey=3&endkey=8");
+ T(xhr.status == 200, "with query params");
+ T(!(/Key: 1/.test(xhr.responseText)));
+ T(/FirstKey: 3/.test(xhr.responseText));
+ T(/LastKey: 8/.test(xhr.responseText));
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/basicViewFixed");
+ T(xhr.status == 200, "with query params");
+ T(!(/Key: 1/.test(xhr.responseText)));
+ T(/FirstKey: 3/.test(xhr.responseText));
+ T(/LastKey: 8/.test(xhr.responseText));
+
+ // get with query params
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/basicViewFixed?startkey=4");
+ T(xhr.status == 200, "with query params");
+ T(!(/Key: 1/.test(xhr.responseText)));
+ T(/FirstKey: 3/.test(xhr.responseText));
+ T(/LastKey: 8/.test(xhr.responseText));
+
+ // get with query params
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/basicViewPath/3/8");
+ T(xhr.status == 200, "with query params");
+ T(!(/Key: 1/.test(xhr.responseText)));
+ T(/FirstKey: 3/.test(xhr.responseText));
+ T(/LastKey: 8/.test(xhr.responseText));
+
+ // get with query params
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/complexView");
+ T(xhr.status == 200, "with query params");
+ T(/FirstKey: [1, 2]/.test(xhr.responseText));
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/complexView2");
+ T(xhr.status == 200, "with query params");
+ T(/Value: doc 3/.test(xhr.responseText));
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/complexView3");
+ T(xhr.status == 200, "with query params");
+ T(/Value: doc 4/.test(xhr.responseText));
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/complexView4");
+ T(xhr.status == 200, "with query params");
+ T(/Value: doc 5/.test(xhr.responseText));
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/complexView5/test/essai");
+ T(xhr.status == 200, "with query params");
+ T(/Value: doc 4/.test(xhr.responseText));
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/simpleForm/complexView6?a=test&b=essai");
+ T(xhr.status == 200, "with query params");
+ T(/Value: doc 4/.test(xhr.responseText));
+
+ // test path relative to server
+ designDoc.rewrites.push({
+ "from": "uuids",
+ "to": "../../../_uuids"
+ });
+ T(db.save(designDoc).ok);
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/uuids");
+ T(xhr.status == 500);
+ var result = JSON.parse(xhr.responseText);
+ T(result.error == "insecure_rewrite_rule");
+
+ run_on_modified_server(
+ [{section: "httpd",
+ key: "secure_rewrites",
+ value: "false"}],
+ function() {
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/test/_rewrite/uuids?cache=bust");
+ T(xhr.status == 200);
+ var result = JSON.parse(xhr.responseText);
+ T(result.uuids.length == 1);
+ var first = result.uuids[0];
+ });
+
+ });
+
+}
diff --git a/1.1.x/share/www/script/test/security_validation.js b/1.1.x/share/www/script/test/security_validation.js
new file mode 100644
index 00000000..42aa11c9
--- /dev/null
+++ b/1.1.x/share/www/script/test/security_validation.js
@@ -0,0 +1,336 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.security_validation = function(debug) {
+ // This tests couchdb's security and validation features. This does
+ // not test authentication, except to use test authentication code made
+ // specifically for this testing. It is a WWW-Authenticate scheme named
+ // X-Couch-Test-Auth, and the user names and passwords are hard coded
+ // on the server-side.
+ //
+ // We could have used Basic authentication, however the XMLHttpRequest
+ // implementation for Firefox and Safari, and probably other browsers are
+ // broken (Firefox always prompts the user on 401 failures, Safari gives
+ // odd security errors when using different name/passwords, perhaps due
+ // to cross site scripting prevention). These problems essentially make Basic
+ // authentication testing in the browser impossible. But while hard to
+ // test automated in the browser, Basic auth may still useful for real
+ // world use where these bugs/behaviors don't matter.
+ //
+ // So for testing purposes we are using this custom X-Couch-Test-Auth.
+ // It's identical to Basic auth, except it doesn't even base64 encode
+ // the "username:password" string, it's sent completely plain text.
+ // Firefox and Safari both deal with this correctly (which is to say
+ // they correctly do nothing special).
+
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ run_on_modified_server(
+ [{section: "httpd",
+ key: "authentication_handlers",
+ value: "{couch_httpd_auth, special_test_authentication_handler}"},
+ {section:"httpd",
+ key: "WWW-Authenticate",
+ value: "X-Couch-Test-Auth"}],
+
+ function () {
+ // try saving document using the wrong credentials
+ var wrongPasswordDb = new CouchDB("test_suite_db",
+ {"WWW-Authenticate": "X-Couch-Test-Auth Damien Katz:foo"}
+ );
+
+ try {
+ wrongPasswordDb.save({foo:1,author:"Damien Katz"});
+ T(false && "Can't get here. Should have thrown an error 1");
+ } catch (e) {
+ T(e.error == "unauthorized");
+ T(wrongPasswordDb.last_req.status == 401);
+ }
+
+ // test force basic login
+ var resp = wrongPasswordDb.request("GET", "/_session?basic=true");
+ var err = JSON.parse(resp.responseText);
+ T(err.error == "unauthorized");
+ T(resp.status == 401);
+
+ // Create the design doc that will run custom validation code
+ var designDoc = {
+ _id:"_design/test",
+ language: "javascript",
+ validate_doc_update: "(" + (function (newDoc, oldDoc, userCtx, secObj) {
+ if (secObj.admin_override) {
+ if (userCtx.roles.indexOf('_admin') != -1) {
+ // user is admin, they can do anything
+ return true;
+ }
+ }
+ // docs should have an author field.
+ if (!newDoc._deleted && !newDoc.author) {
+ throw {forbidden:
+ "Documents must have an author field"};
+ }
+ if (oldDoc && oldDoc.author != userCtx.name) {
+ throw {unauthorized:
+ "You are not the author of this document. You jerk."};
+ }
+ }).toString() + ")"
+ }
+
+ // Save a document normally
+ var userDb = new CouchDB("test_suite_db",
+ {"WWW-Authenticate": "X-Couch-Test-Auth Damien Katz:pecan pie"}
+ );
+
+ T(userDb.save({_id:"testdoc", foo:1, author:"Damien Katz"}).ok);
+
+ // Attempt to save the design as a non-admin
+ try {
+ userDb.save(designDoc);
+ T(false && "Can't get here. Should have thrown an error on design doc");
+ } catch (e) {
+ T(e.error == "unauthorized");
+ T(userDb.last_req.status == 401);
+ }
+
+ // set user as the admin
+ T(db.setSecObj({
+ admins : {names : ["Damien Katz"]}
+ }).ok);
+
+ T(userDb.save(designDoc).ok);
+
+ var user2Db = new CouchDB("test_suite_db",
+ {"WWW-Authenticate": "X-Couch-Test-Auth Jan Lehnardt:apple"}
+ );
+ // Attempt to save the design as a non-admin (in replication scenario)
+ try {
+ user2Db.save(designDoc, {new_edits : false});
+ T(false && "Can't get here. Should have thrown an error on design doc");
+ } catch (e) {
+ T(e.error == "unauthorized");
+ T(user2Db.last_req.status == 401);
+ }
+
+ // test the _session API
+ var resp = userDb.request("GET", "/_session");
+ var user = JSON.parse(resp.responseText).userCtx;
+ T(user.name == "Damien Katz");
+ // test that the roles are listed properly
+ TEquals(user.roles, []);
+
+
+ // update the document
+ var doc = userDb.open("testdoc");
+ doc.foo=2;
+ T(userDb.save(doc).ok);
+
+ // Save a document that's missing an author field (before and after compaction)
+ for (var i=0; i<2; i++) {
+ try {
+ userDb.save({foo:1});
+ T(false && "Can't get here. Should have thrown an error 2");
+ } catch (e) {
+ T(e.error == "forbidden");
+ T(userDb.last_req.status == 403);
+ }
+ // compact.
+ T(db.compact().ok);
+ T(db.last_req.status == 202);
+ // compaction isn't instantaneous, loop until done
+ while (db.info().compact_running) {};
+ }
+
+ // Now attempt to update the document as a different user, Jan
+ var doc = user2Db.open("testdoc");
+ doc.foo=3;
+ try {
+ user2Db.save(doc);
+ T(false && "Can't get here. Should have thrown an error 3");
+ } catch (e) {
+ T(e.error == "unauthorized");
+ T(user2Db.last_req.status == 401);
+ }
+
+ // Now have Damien change the author to Jan
+ doc = userDb.open("testdoc");
+ doc.author="Jan Lehnardt";
+ T(userDb.save(doc).ok);
+
+ // Now update the document as Jan
+ doc = user2Db.open("testdoc");
+ doc.foo = 3;
+ T(user2Db.save(doc).ok);
+
+ // Damien can't delete it
+ try {
+ userDb.deleteDoc(doc);
+ T(false && "Can't get here. Should have thrown an error 4");
+ } catch (e) {
+ T(e.error == "unauthorized");
+ T(userDb.last_req.status == 401);
+ }
+
+ // admin must save with author field unless admin override
+ var resp = db.request("GET", "/_session");
+ var user = JSON.parse(resp.responseText).userCtx;
+ T(user.name == null);
+ // test that we are admin
+ TEquals(user.roles, ["_admin"]);
+
+ // can't save the doc even though we are admin
+ var doc = db.open("testdoc");
+ doc.foo=3;
+ try {
+ db.save(doc);
+ T(false && "Can't get here. Should have thrown an error 3");
+ } catch (e) {
+ T(e.error == "unauthorized");
+ T(db.last_req.status == 401);
+ }
+
+ // now turn on admin override
+ T(db.setDbProperty("_security", {admin_override : true}).ok);
+ T(db.save(doc).ok);
+
+ // try to do something lame
+ try {
+ db.setDbProperty("_security", ["foo"]);
+ T(false && "can't do this");
+ } catch(e) {}
+
+ // go back to normal
+ T(db.setDbProperty("_security", {admin_override : false}).ok);
+
+ // Now delete document
+ T(user2Db.deleteDoc(doc).ok);
+
+ // now test bulk docs
+ var docs = [{_id:"bahbah",author:"Damien Katz",foo:"bar"},{_id:"fahfah",foo:"baz"}];
+
+ // Create the docs
+ var results = db.bulkSave(docs);
+
+ T(results[0].rev)
+ T(results[0].error == undefined)
+ T(results[1].rev === undefined)
+ T(results[1].error == "forbidden")
+
+ T(db.open("bahbah"));
+ T(db.open("fahfah") == null);
+
+
+ // now all or nothing with a failure
+ var docs = [{_id:"booboo",author:"Damien Katz",foo:"bar"},{_id:"foofoo",foo:"baz"}];
+
+ // Create the docs
+ var results = db.bulkSave(docs, {all_or_nothing:true});
+
+ T(results.errors.length == 1);
+ T(results.errors[0].error == "forbidden");
+ T(db.open("booboo") == null);
+ T(db.open("foofoo") == null);
+
+ // Now test replication
+ var AuthHeaders = {"WWW-Authenticate": "X-Couch-Test-Auth Christopher Lenz:dog food"};
+ var host = CouchDB.host;
+ var dbPairs = [
+ {source:"test_suite_db_a",
+ target:"test_suite_db_b"},
+
+ {source:"test_suite_db_a",
+ target:{url: CouchDB.protocol + host + "/test_suite_db_b",
+ headers: AuthHeaders}},
+
+ {source:{url:CouchDB.protocol + host + "/test_suite_db_a",
+ headers: AuthHeaders},
+ target:"test_suite_db_b"},
+
+ {source:{url:CouchDB.protocol + host + "/test_suite_db_a",
+ headers: AuthHeaders},
+ target:{url:CouchDB.protocol + host + "/test_suite_db_b",
+ headers: AuthHeaders}},
+ ]
+ var adminDbA = new CouchDB("test_suite_db_a", {"X-Couch-Full-Commit":"false"});
+ var adminDbB = new CouchDB("test_suite_db_b", {"X-Couch-Full-Commit":"false"});
+ var dbA = new CouchDB("test_suite_db_a",
+ {"WWW-Authenticate": "X-Couch-Test-Auth Christopher Lenz:dog food"});
+ var dbB = new CouchDB("test_suite_db_b",
+ {"WWW-Authenticate": "X-Couch-Test-Auth Christopher Lenz:dog food"});
+ var xhr;
+ for (var testPair = 0; testPair < dbPairs.length; testPair++) {
+ var A = dbPairs[testPair].source
+ var B = dbPairs[testPair].target
+
+ adminDbA.deleteDb();
+ adminDbA.createDb();
+ adminDbB.deleteDb();
+ adminDbB.createDb();
+
+ // save and replicate a documents that will and will not pass our design
+ // doc validation function.
+ dbA.save({_id:"foo1",value:"a",author:"Noah Slater"});
+ dbA.save({_id:"foo2",value:"a",author:"Christopher Lenz"});
+ dbA.save({_id:"bad1",value:"a"});
+
+ T(CouchDB.replicate(A, B, {headers:AuthHeaders}).ok);
+ T(CouchDB.replicate(B, A, {headers:AuthHeaders}).ok);
+
+ T(dbA.open("foo1"));
+ T(dbB.open("foo1"));
+ T(dbA.open("foo2"));
+ T(dbB.open("foo2"));
+
+ // save the design doc to dbA
+ delete designDoc._rev; // clear rev from previous saves
+ adminDbA.save(designDoc);
+
+ // no affect on already saved docs
+ T(dbA.open("bad1"));
+
+ // Update some docs on dbB. Since the design hasn't replicated, anything
+ // is allowed.
+
+ // this edit will fail validation on replication to dbA (no author)
+ T(dbB.save({_id:"bad2",value:"a"}).ok);
+
+ // this edit will fail security on replication to dbA (wrong author
+ // replicating the change)
+ var foo1 = dbB.open("foo1");
+ foo1.value = "b";
+ dbB.save(foo1);
+
+ // this is a legal edit
+ var foo2 = dbB.open("foo2");
+ foo2.value = "b";
+ dbB.save(foo2);
+
+ var results = CouchDB.replicate(B, A, {headers:AuthHeaders});
+
+ T(results.ok);
+
+ T(results.history[0].docs_written == 1);
+ T(results.history[0].doc_write_failures == 2);
+
+ // bad2 should not be on dbA
+ T(dbA.open("bad2") == null);
+
+ // The edit to foo1 should not have replicated.
+ T(dbA.open("foo1").value == "a");
+
+ // The edit to foo2 should have replicated.
+ T(dbA.open("foo2").value == "b");
+ }
+ });
+};
diff --git a/1.1.x/share/www/script/test/show_documents.js b/1.1.x/share/www/script/test/show_documents.js
new file mode 100644
index 00000000..55ed9698
--- /dev/null
+++ b/1.1.x/share/www/script/test/show_documents.js
@@ -0,0 +1,436 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.show_documents = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var designDoc = {
+ _id:"_design/template",
+ language: "javascript",
+ shows: {
+ "hello" : stringFun(function(doc, req) {
+ log("hello fun");
+ if (doc) {
+ return "Hello World";
+ } else {
+ if(req.id) {
+ return "New World";
+ } else {
+ return "Empty World";
+ }
+ }
+ }),
+ "just-name" : stringFun(function(doc, req) {
+ if (doc) {
+ return {
+ body : "Just " + doc.name
+ };
+ } else {
+ return {
+ body : "No such doc",
+ code : 404
+ };
+ }
+ }),
+ "json" : stringFun(function(doc, req) {
+ return {
+ json : doc
+ }
+ }),
+ "req-info" : stringFun(function(doc, req) {
+ return {
+ json : req
+ }
+ }),
+ "show-deleted" : stringFun(function(doc, req) {
+ if(doc) {
+ return doc._id;
+ } else {
+ return "No doc " + req.id;
+ }
+ }),
+ "render-error" : stringFun(function(doc, req) {
+ return noSuchVariable;
+ }),
+ "empty" : stringFun(function(doc, req) {
+ return "";
+ }),
+ "fail" : stringFun(function(doc, req) {
+ return doc._id;
+ }),
+ "xml-type" : stringFun(function(doc, req) {
+ return {
+ "headers" : {
+ "Content-Type" : "application/xml"
+ },
+ "body" : new XML('<xml><node foo="bar"/></xml>').toXMLString()
+ }
+ }),
+ "no-set-etag" : stringFun(function(doc, req) {
+ return {
+ headers : {
+ "Etag" : "skipped"
+ },
+ "body" : "something"
+ }
+ }),
+ "list-api" : stringFun(function(doc, req) {
+ start({"X-Couch-Test-Header": "Yeah"});
+ send("Hey");
+ }),
+ "list-api-mix" : stringFun(function(doc, req) {
+ start({"X-Couch-Test-Header": "Yeah"});
+ send("Hey ");
+ return "Dude";
+ }),
+ "list-api-mix-with-header" : stringFun(function(doc, req) {
+ start({"X-Couch-Test-Header": "Yeah"});
+ send("Hey ");
+ return {
+ headers: {
+ "X-Couch-Test-Header-Awesome": "Oh Yeah!"
+ },
+ body: "Dude"
+ };
+ }),
+ "accept-switch" : stringFun(function(doc, req) {
+ if (req.headers["Accept"].match(/image/)) {
+ return {
+ // a 16x16 px version of the CouchDB logo
+ "base64" :
+["iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAsV",
+"BMVEUAAAD////////////////////////5ur3rEBn////////////////wDBL/",
+"AADuBAe9EB3IEBz/7+//X1/qBQn2AgP/f3/ilpzsDxfpChDtDhXeCA76AQH/v7",
+"/84eLyWV/uc3bJPEf/Dw/uw8bRWmP1h4zxSlD6YGHuQ0f6g4XyQkXvCA36MDH6",
+"wMH/z8/yAwX64ODeh47BHiv/Ly/20dLQLTj98PDXWmP/Pz//39/wGyJ7Iy9JAA",
+"AADHRSTlMAbw8vf08/bz+Pv19jK/W3AAAAg0lEQVR4Xp3LRQ4DQRBD0QqTm4Y5",
+"zMxw/4OleiJlHeUtv2X6RbNO1Uqj9g0RMCuQO0vBIg4vMFeOpCWIWmDOw82fZx",
+"vaND1c8OG4vrdOqD8YwgpDYDxRgkSm5rwu0nQVBJuMg++pLXZyr5jnc1BaH4GT",
+"LvEliY253nA3pVhQqdPt0f/erJkMGMB8xucAAAAASUVORK5CYII="].join(''),
+ headers : {
+ "Content-Type" : "image/png",
+ "Vary" : "Accept" // we set this for proxy caches
+ }
+ };
+ } else {
+ return {
+ "body" : "accepting text requests",
+ headers : {
+ "Content-Type" : "text/html",
+ "Vary" : "Accept"
+ }
+ };
+ }
+ }),
+ "provides" : stringFun(function(doc, req) {
+ registerType("foo", "application/foo","application/x-foo");
+
+ provides("html", function() {
+ return "Ha ha, you said \"" + doc.word + "\".";
+ });
+
+ provides("xml", function() {
+ var xml = new XML('<xml><node/></xml>');
+ // Becase Safari can't stand to see that dastardly
+ // E4X outside of a string. Outside of tests you
+ // can just use E4X literals.
+ eval('xml.node.@foo = doc.word');
+ log('xml: '+xml.toSource());
+ return xml.toXMLString();
+ });
+
+ provides("foo", function() {
+ return "foofoo";
+ });
+ }),
+ "withSlash": stringFun(function(doc, req) {
+ return { json: doc }
+ }),
+ "secObj": stringFun(function(doc, req) {
+ return { json: req.secObj };
+ })
+ }
+ };
+ T(db.save(designDoc).ok);
+
+ var doc = {"word":"plankton", "name":"Rusty"}
+ var resp = db.save(doc);
+ T(resp.ok);
+ var docid = resp.id;
+
+ // show error
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/");
+ T(xhr.status == 404, 'Should be missing');
+ T(JSON.parse(xhr.responseText).reason == "Invalid path.");
+
+ // hello template world
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/hello/"+docid);
+ T(xhr.responseText == "Hello World", "hello");
+ T(/charset=utf-8/.test(xhr.getResponseHeader("Content-Type")));
+
+
+ // Fix for COUCHDB-379
+ T(equals(xhr.getResponseHeader("Server").substr(0,7), "CouchDB"));
+
+ // // error stacktraces
+ // xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/render-error/"+docid);
+ // T(JSON.parse(xhr.responseText).error == "render_error");
+
+ // hello template world (no docid)
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/hello");
+ T(xhr.responseText == "Empty World");
+
+ // hello template world (no docid)
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/empty");
+ T(xhr.responseText == "");
+
+ // // hello template world (non-existing docid)
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/fail/nonExistingDoc");
+ T(xhr.status == 404);
+ var resp = JSON.parse(xhr.responseText);
+ T(resp.error == "not_found");
+
+ // show with doc
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid);
+ T(xhr.responseText == "Just Rusty");
+
+ // show with missing doc
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/missingdoc");
+ T(xhr.status == 404);
+ TEquals("No such doc", xhr.responseText);
+
+ // show with missing func
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/missing/"+docid);
+ T(xhr.status == 404, "function is missing");
+
+ // missing design doc
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/missingddoc/_show/just-name/"+docid);
+ T(xhr.status == 404);
+ var resp = JSON.parse(xhr.responseText);
+ T(resp.error == "not_found");
+
+ // query parameters
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/req-info/"+docid+"?foo=bar", {
+ headers: {
+ "Accept": "text/html;text/plain;*/*",
+ "X-Foo" : "bar"
+ }
+ });
+ var resp = JSON.parse(xhr.responseText);
+ T(equals(resp.headers["X-Foo"], "bar"));
+ T(equals(resp.query, {foo:"bar"}));
+ T(equals(resp.method, "GET"));
+ T(equals(resp.path[5], docid));
+ T(equals(resp.info.db_name, "test_suite_db"));
+
+ // returning a content-type
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/xml-type/"+docid);
+ T("application/xml" == xhr.getResponseHeader("Content-Type"));
+ T("Accept" == xhr.getResponseHeader("Vary"));
+
+ // accept header switching
+ // different mime has different etag
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/accept-switch/"+docid, {
+ headers: {"Accept": "text/html;text/plain;*/*"}
+ });
+ var ct = xhr.getResponseHeader("Content-Type");
+ T(/text\/html/.test(ct))
+ T("Accept" == xhr.getResponseHeader("Vary"));
+ var etag = xhr.getResponseHeader("etag");
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/accept-switch/"+docid, {
+ headers: {"Accept": "image/png;*/*"}
+ });
+ T(xhr.responseText.match(/PNG/))
+ T("image/png" == xhr.getResponseHeader("Content-Type"));
+ var etag2 = xhr.getResponseHeader("etag");
+ T(etag2 != etag);
+
+ // proper etags
+ // show with doc
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid);
+ // extract the ETag header values
+ etag = xhr.getResponseHeader("etag");
+ // get again with etag in request
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid, {
+ headers: {"if-none-match": etag}
+ });
+ // should be 304
+ T(xhr.status == 304);
+
+ // update the doc
+ doc.name = "Crusty";
+ resp = db.save(doc);
+ T(resp.ok);
+ // req with same etag
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid, {
+ headers: {"if-none-match": etag}
+ });
+ // status is 200
+ T(xhr.status == 200);
+
+ // get new etag and request again
+ etag = xhr.getResponseHeader("etag");
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid, {
+ headers: {"if-none-match": etag}
+ });
+ // should be 304
+ T(xhr.status == 304);
+
+ // update design doc (but not function)
+ designDoc.isChanged = true;
+ T(db.save(designDoc).ok);
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid, {
+ headers: {"if-none-match": etag}
+ });
+ // should not be 304 if we change the doc
+ T(xhr.status != 304, "changed ddoc");
+
+ // update design doc function
+ designDoc.shows["just-name"] = (function(doc, req) {
+ return {
+ body : "Just old " + doc.name
+ };
+ }).toString();
+ T(db.save(designDoc).ok);
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/just-name/"+docid, {
+ headers: {"if-none-match": etag}
+ });
+ // status is 200
+ T(xhr.status == 200);
+
+
+ // JS can't set etag
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/no-set-etag/"+docid);
+ // extract the ETag header values
+ etag = xhr.getResponseHeader("etag");
+ T(etag != "skipped")
+
+ // test the provides mime matcher
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/provides/"+docid, {
+ headers: {
+ "Accept": 'text/html,application/atom+xml; q=0.9'
+ }
+ });
+ var ct = xhr.getResponseHeader("Content-Type");
+ T(/charset=utf-8/.test(ct))
+ T(/text\/html/.test(ct))
+ T(xhr.responseText == "Ha ha, you said \"plankton\".");
+
+ // now with xml
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/provides/"+docid, {
+ headers: {
+ "Accept": 'application/xml'
+ }
+ });
+ T(xhr.getResponseHeader("Content-Type") == "application/xml");
+ T(xhr.responseText.match(/node/));
+ T(xhr.responseText.match(/plankton/));
+
+ // registering types works
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/provides/"+docid, {
+ headers: {
+ "Accept": "application/x-foo"
+ }
+ });
+ T(xhr.getResponseHeader("Content-Type") == "application/x-foo");
+ T(xhr.responseText.match(/foofoo/));
+
+ // test the provides mime matcher without a match
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/provides/"+docid, {
+ headers: {
+ "Accept": 'text/monkeys'
+ }
+ });
+ var rs = JSON.parse(xhr.responseText);
+ T(rs.error == "not_acceptable")
+
+
+ // should fallback on the first one
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/provides/"+docid, {
+ headers: {
+ "Accept": 'application/x-foo, application/xml'
+ }
+ });
+ var ct = xhr.getResponseHeader("Content-Type");
+ T(/application\/xml/.test(ct));
+
+ // test inclusion of conflict state
+ var doc1 = {_id:"foo", a:1};
+ var doc2 = {_id:"foo", a:2};
+ db.save(doc1);
+
+ // create the conflict with an all_or_nothing bulk docs request
+ var docs = [doc2];
+ db.bulkSave(docs, {all_or_nothing:true});
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/json/foo");
+ TEquals(1, JSON.parse(xhr.responseText)._conflicts.length);
+
+ var doc3 = {_id:"a/b/c", a:1};
+ db.save(doc3);
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/withSlash/a/b/c");
+ T(xhr.status == 200);
+
+ // hello template world (non-existing docid)
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/hello/nonExistingDoc");
+ T(xhr.responseText == "New World");
+
+ // test list() compatible API
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/list-api/foo");
+ T(xhr.responseText == "Hey");
+ TEquals("Yeah", xhr.getResponseHeader("X-Couch-Test-Header"), "header should be cool");
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/list-api-mix/foo");
+ T(xhr.responseText == "Hey Dude");
+ TEquals("Yeah", xhr.getResponseHeader("X-Couch-Test-Header"), "header should be cool");
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/list-api-mix-with-header/foo");
+ T(xhr.responseText == "Hey Dude");
+ TEquals("Yeah", xhr.getResponseHeader("X-Couch-Test-Header"), "header should be cool");
+ TEquals("Oh Yeah!", xhr.getResponseHeader("X-Couch-Test-Header-Awesome"), "header should be cool");
+
+ // test deleted docs
+ var doc = {_id:"testdoc",foo:1};
+ db.save(doc);
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/show-deleted/testdoc");
+ TEquals("testdoc", xhr.responseText, "should return 'testdoc'");
+
+ db.deleteDoc(doc);
+ var xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/show-deleted/testdoc");
+ TEquals("No doc testdoc", xhr.responseText, "should return 'no doc testdoc'");
+
+
+ run_on_modified_server(
+ [{section: "httpd",
+ key: "authentication_handlers",
+ value: "{couch_httpd_auth, special_test_authentication_handler}"},
+ {section:"httpd",
+ key: "WWW-Authenticate",
+ value: "X-Couch-Test-Auth"}],
+
+ function() {
+ T(db.setDbProperty("_security", {foo: true}).ok);
+ T(db.save(doc).ok);
+
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/template/_show/secObj");
+ var resp = JSON.parse(xhr.responseText);
+ T(resp.foo == true);
+ }
+ );
+
+};
diff --git a/1.1.x/share/www/script/test/stats.js b/1.1.x/share/www/script/test/stats.js
new file mode 100644
index 00000000..6fb0fbba
--- /dev/null
+++ b/1.1.x/share/www/script/test/stats.js
@@ -0,0 +1,330 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.stats = function(debug) {
+
+ function newDb(name, doSetup) {
+ var db = new CouchDB(name, {"X-Couch-Full-Commit": "false"});
+ if(doSetup) {
+ db.deleteDb();
+ db.createDb();
+ }
+ return db;
+ };
+
+ function getStat(mod, key) {
+ return CouchDB.requestStats(mod, key, true);
+ };
+
+ function doView(db) {
+ var designDoc = {
+ _id:"_design/test", // turn off couch.js id escaping?
+ language: "javascript",
+ views: {
+ all_docs: {map: "function(doc) {emit(doc.integer, null);}"}
+ }
+ };
+ db.save(designDoc);
+ db.view("test/all_docs");
+ };
+
+ function runTest(mod, key, funcs) {
+ var db = newDb("test_suite_db", true);
+ if(funcs.setup) funcs.setup(db);
+ var before = getStat(mod, key).current;
+ if(funcs.run) funcs.run(db);
+ var after = getStat(mod, key).current;
+ if(funcs.test) funcs.test(before, after);
+ }
+
+ if (debug) debugger;
+
+ (function() {
+ var db = newDb("test_suite_db");
+ db.deleteDb();
+
+ var before = getStat("couchdb", "open_databases").current;
+ db.createDb();
+ var after = getStat("couchdb", "open_databases").current;
+ TEquals(before+1, after, "Creating a db increments open db count.");
+ })();
+
+ runTest("couchdb", "open_databases", {
+ setup: function() {restartServer();},
+ run: function(db) {db.open("123");},
+ test: function(before, after) {
+ TEquals(before+1, after, "Opening a db increments open db count.");
+ }
+ });
+
+ runTest("couchdb", "open_databases", {
+ run: function(db) {db.deleteDb();},
+ test: function(before, after) {
+ TEquals(before-1, after, "Deleting a db decrements open db count.");
+ }
+ });
+
+ (function() {
+ restartServer();
+ var max = 5;
+
+ var testFun = function() {
+ var pre_dbs = getStat("couchdb", "open_databases").current || 0;
+ var pre_files = getStat("couchdb", "open_os_files").current || 0;
+
+ var triggered = false;
+ var db = null;
+ for(var i = 0; i < max*2; i++) {
+ while (true) {
+ try {
+ db = newDb("test_suite_db_" + i, true);
+ break;
+ } catch(e) {
+ // all_dbs_active error!
+ triggered = true;
+ }
+ }
+
+ // Trigger a delayed commit
+ db.save({_id: "" + i, "lang": "Awesome!"});
+ }
+ T(triggered, "We managed to force a all_dbs_active error.");
+
+ var open_dbs = getStat("couchdb", "open_databases").current;
+ TEquals(open_dbs > 0, true, "We actually opened some dbs.");
+ TEquals(open_dbs, max, "We only have max db's open.");
+
+ for(var i = 0; i < max * 2; i++) {
+ newDb("test_suite_db_" + i).deleteDb();
+ }
+
+ var post_dbs = getStat("couchdb", "open_databases").current;
+ var post_files = getStat("couchdb", "open_os_files").current;
+ TEquals(pre_dbs, post_dbs, "We have the same number of open dbs.");
+ TEquals(pre_files, post_files, "We have the same number of open files.");
+ };
+
+ run_on_modified_server(
+ [{section: "couchdb", key: "max_dbs_open", value: "5"}],
+ testFun
+ );
+ })();
+
+ // Just fetching the before value is the extra +1 in test
+ runTest("httpd", "requests", {
+ run: function() {CouchDB.request("GET", "/");},
+ test: function(before, after) {
+ TEquals(before+2, after, "Request counts are incremented properly.");
+ }
+ });
+
+ runTest("couchdb", "database_reads", {
+ setup: function(db) {db.save({"_id": "test"});},
+ run: function(db) {db.open("test");},
+ test: function(before, after) {
+ TEquals(before+1, after, "Reading a doc increments docs reads.");
+ }
+ });
+
+ runTest("couchdb", "database_reads", {
+ setup: function(db) {db.save({"_id": "test"});},
+ run: function(db) {db.request("GET", "/");},
+ test: function(before, after) {
+ TEquals(before, after, "Only doc reads increment doc reads.");
+ }
+ });
+
+ runTest("couchdb", "database_reads", {
+ setup: function(db) {db.save({"_id": "test"});},
+ run: function(db) {db.open("test", {"open_revs": "all"});},
+ test: function(before, after) {
+ TEquals(before+1, after, "Reading doc revs increments docs reads.");
+ }
+ });
+
+ runTest("couchdb", "database_writes", {
+ run: function(db) {db.save({"a": "1"});},
+ test: function(before, after) {
+ TEquals(before+1, after, "Saving docs incrememnts doc writes.");
+ }
+ });
+
+ runTest("couchdb", "database_writes", {
+ run: function(db) {
+ CouchDB.request("POST", "/test_suite_db", {
+ headers: {"Content-Type": "application/json"},
+ body: '{"a": "1"}'
+ });
+ },
+ test: function(before, after) {
+ TEquals(before+1, after, "POST'ing new docs increments doc writes.");
+ }
+ });
+
+ runTest("couchdb", "database_writes", {
+ setup: function(db) {db.save({"_id": "test"});},
+ run: function(db) {var doc = db.open("test"); db.save(doc);},
+ test: function(before, after) {
+ TEquals(before+1, after, "Updating docs incrememnts doc writes.");
+ }
+ });
+
+ runTest("couchdb", "database_writes", {
+ setup: function(db) {db.save({"_id": "test"});},
+ run: function(db) {var doc = db.open("test"); db.deleteDoc(doc);},
+ test: function(before, after) {
+ TEquals(before+1, after, "Deleting docs increments doc writes.");
+ }
+ });
+
+ runTest("couchdb", "database_writes", {
+ setup: function(db) {db.save({"_id": "test"});},
+ run: function(db) {
+ CouchDB.request("COPY", "/test_suite_db/test", {
+ headers: {"Destination": "copy_of_test"}
+ });
+ },
+ test: function(before, after) {
+ TEquals(before+1, after, "Copying docs increments doc writes.");
+ }
+ });
+
+ runTest("couchdb", "database_writes", {
+ run: function() {
+ CouchDB.request("PUT", "/test_suite_db/bin_doc2/foo2.txt", {
+ body: "This is no base64 encoded test",
+ headers: {"Content-Type": "text/plain;charset=utf-8"}
+ });
+ },
+ test: function(before, after) {
+ TEquals(before+1, after, "Create with attachment increments doc writes.");
+ }
+ });
+
+ runTest("couchdb", "database_writes", {
+ setup: function(db) {db.save({"_id": "test"});},
+ run: function(db) {
+ var doc = db.open("test");
+ CouchDB.request("PUT", "/test_suite_db/test/foo2.txt?rev=" + doc._rev, {
+ body: "This is no base64 encoded text",
+ headers: {"Content-Type": "text/plainn;charset=utf-8"}
+ });
+ },
+ test: function(before, after) {
+ TEquals(before+1, after, "Adding attachment increments doc writes.");
+ }
+ });
+
+ runTest("httpd", "bulk_requests", {
+ run: function(db) {db.bulkSave(makeDocs(5));},
+ test: function(before, after) {
+ TEquals(before+1, after, "The bulk_requests counter is incremented.");
+ }
+ });
+
+ runTest("httpd", "view_reads", {
+ run: function(db) {doView(db);},
+ test: function(before, after) {
+ TEquals(before+1, after, "Reading a view increments view reads.");
+ }
+ });
+
+ runTest("httpd", "view_reads", {
+ setup: function(db) {db.save({"_id": "test"});},
+ run: function(db) {db.open("test");},
+ test: function(before, after) {
+ TEquals(before, after, "Reading a doc doesn't increment view reads.");
+ }
+ });
+
+ runTest("httpd", "temporary_view_reads", {
+ run: function(db) { db.query(function(doc) { emit(doc._id); }); },
+ test: function(before, after) {
+ TEquals(before+1, after, "Temporary views have their own counter.");
+ }
+ });
+
+ runTest("httpd", "temporary_view_reads", {
+ run: function(db) {doView(db);},
+ test: function(before, after) {
+ TEquals(before, after, "Permanent views don't affect temporary views.");
+ }
+ });
+
+ runTest("httpd", "view_reads", {
+ run: function(db) { db.query(function(doc) { emit(doc._id); }); },
+ test: function(before, after) {
+ TEquals(before, after, "Temporary views don't affect permanent views.");
+ }
+ });
+
+ // Relies on getting the stats values being GET requests.
+ runTest("httpd_request_methods", "GET", {
+ test: function(before, after) {
+ TEquals(before+1, after, "Get requests are incremented properly.");
+ }
+ });
+
+ runTest("httpd_request_methods", "GET", {
+ run: function() {CouchDB.request("POST", "/");},
+ test: function(before, after) {
+ TEquals(before+1, after, "POST requests don't affect GET counter.");
+ }
+ });
+
+ runTest("httpd_request_methods", "POST", {
+ run: function() {CouchDB.request("POST", "/");},
+ test: function(before, after) {
+ TEquals(before+1, after, "POST requests are incremented properly.");
+ }
+ });
+
+ runTest("httpd_status_codes", "404", {
+ run: function() {CouchDB.request("GET", "/nonexistant_db");},
+ test: function(before, after) {
+ TEquals(before+1, after, "Increments 404 counter on db not found.");
+ }
+ });
+
+ runTest("httpd_status_codes", "404", {
+ run: function() {CouchDB.request("GET", "/");},
+ test: function(before, after) {
+ TEquals(before, after, "Getting DB info doesn't increment 404's");
+ }
+ });
+
+ (function() {
+ var aggregates = [
+ "current",
+ "description",
+ "mean",
+ "min",
+ "max",
+ "stddev",
+ "sum"
+ ];
+ var summary = JSON.parse(CouchDB.request("GET", "/_stats", {
+ headers: {"Accept": "application/json"}
+ }).responseText);
+ for(var i in summary) {
+ for(var j in summary[i]) {
+ for(var k in summary[i][j]) {
+ T(aggregates.indexOf(k) >= 0, "Unknown property name: " + j);
+ }
+ for(var k in aggregates) {
+ var mesg = "Missing required property: " + aggregates[k];
+ T(summary[i][j][aggregates[k]] !== undefined, mesg);
+ }
+ }
+ }
+ })();
+};
diff --git a/1.1.x/share/www/script/test/update_documents.js b/1.1.x/share/www/script/test/update_documents.js
new file mode 100644
index 00000000..49d3b68a
--- /dev/null
+++ b/1.1.x/share/www/script/test/update_documents.js
@@ -0,0 +1,168 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+
+couchTests.update_documents = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var designDoc = {
+ _id:"_design/update",
+ language: "javascript",
+ updates: {
+ "hello" : stringFun(function(doc, req) {
+ log(doc);
+ log(req);
+ if (!doc) {
+ if (req.id) {
+ return [
+ // Creates a new document with the PUT docid,
+ { _id : req.id,
+ reqs : [req] },
+ // and returns an HTML response to the client.
+ "<p>New World</p>"];
+ };
+ //
+ return [null, "<p>Empty World</p>"];
+ };
+ // we can update the document inline
+ doc.world = "hello";
+ // we can record aspects of the request or use them in application logic.
+ doc.reqs && doc.reqs.push(req);
+ doc.edited_by = req.userCtx;
+ return [doc, "<p>hello doc</p>"];
+ }),
+ "in-place" : stringFun(function(doc, req) {
+ var field = req.query.field;
+ var value = req.query.value;
+ var message = "set "+field+" to "+value;
+ doc[field] = value;
+ return [doc, message];
+ }),
+ "bump-counter" : stringFun(function(doc, req) {
+ if (!doc.counter) doc.counter = 0;
+ doc.counter += 1;
+ var message = "<h1>bumped it!</h1>";
+ return [doc, message];
+ }),
+ "error" : stringFun(function(doc, req) {
+ superFail.badCrash;
+ }),
+ "xml" : stringFun(function(doc, req) {
+ var xml = new XML('<xml></xml>');
+ xml.title = doc.title;
+ var posted_xml = new XML(req.body);
+ doc.via_xml = posted_xml.foo.toString();
+ var resp = {
+ "headers" : {
+ "Content-Type" : "application/xml"
+ },
+ "body" : xml.toXMLString()
+ };
+
+ return [doc, resp];
+ }),
+ "get-uuid" : stringFun(function(doc, req) {
+ return [null, req.uuid];
+ })
+ }
+ };
+ T(db.save(designDoc).ok);
+
+ var doc = {"word":"plankton", "name":"Rusty"}
+ var resp = db.save(doc);
+ T(resp.ok);
+ var docid = resp.id;
+
+ // update error
+ var xhr = CouchDB.request("POST", "/test_suite_db/_design/update/_update/");
+ T(xhr.status == 404, 'Should be missing');
+ T(JSON.parse(xhr.responseText).reason == "Invalid path.");
+
+ // hello update world
+ xhr = CouchDB.request("PUT", "/test_suite_db/_design/update/_update/hello/"+docid);
+ T(xhr.status == 201);
+ T(xhr.responseText == "<p>hello doc</p>");
+ T(/charset=utf-8/.test(xhr.getResponseHeader("Content-Type")))
+
+ doc = db.open(docid);
+ T(doc.world == "hello");
+
+ // Fix for COUCHDB-379
+ T(equals(xhr.getResponseHeader("Server").substr(0,7), "CouchDB"));
+
+ // hello update world (no docid)
+ xhr = CouchDB.request("POST", "/test_suite_db/_design/update/_update/hello");
+ T(xhr.status == 200);
+ T(xhr.responseText == "<p>Empty World</p>");
+
+ // no GET allowed
+ xhr = CouchDB.request("GET", "/test_suite_db/_design/update/_update/hello");
+ // T(xhr.status == 405); // TODO allow qs to throw error code as well as error message
+ T(JSON.parse(xhr.responseText).error == "method_not_allowed");
+
+ // // hello update world (non-existing docid)
+ xhr = CouchDB.request("GET", "/test_suite_db/nonExistingDoc");
+ T(xhr.status == 404);
+ xhr = CouchDB.request("PUT", "/test_suite_db/_design/update/_update/hello/nonExistingDoc");
+ T(xhr.status == 201);
+ T(xhr.responseText == "<p>New World</p>");
+ xhr = CouchDB.request("GET", "/test_suite_db/nonExistingDoc");
+ T(xhr.status == 200);
+
+ // in place update
+ xhr = CouchDB.request("PUT", "/test_suite_db/_design/update/_update/in-place/"+docid+'?field=title&value=test');
+ T(xhr.status == 201);
+ T(xhr.responseText == "set title to test");
+ doc = db.open(docid);
+ T(doc.title == "test");
+
+ // bump counter
+ xhr = CouchDB.request("PUT", "/test_suite_db/_design/update/_update/bump-counter/"+docid, {
+ headers : {"X-Couch-Full-Commit":"true"}
+ });
+ T(xhr.status == 201);
+ T(xhr.responseText == "<h1>bumped it!</h1>");
+ doc = db.open(docid);
+ T(doc.counter == 1);
+
+ // _update honors full commit if you need it to
+ xhr = CouchDB.request("PUT", "/test_suite_db/_design/update/_update/bump-counter/"+docid, {
+ headers : {"X-Couch-Full-Commit":"true"}
+ });
+
+ var NewRev = xhr.getResponseHeader("X-Couch-Update-NewRev");
+ doc = db.open(docid);
+ T(doc['_rev'] == NewRev);
+
+
+ T(doc.counter == 2);
+
+ // parse xml
+ xhr = CouchDB.request("PUT", "/test_suite_db/_design/update/_update/xml/"+docid, {
+ headers : {"X-Couch-Full-Commit":"true"},
+ "body" : '<xml><foo>bar</foo></xml>'
+ });
+ T(xhr.status == 201);
+ T(xhr.responseText == "<xml>\n <title>test</title>\n</xml>");
+
+ doc = db.open(docid);
+ T(doc.via_xml == "bar");
+
+ // Server provides UUID when POSTing without an ID in the URL
+ xhr = CouchDB.request("POST", "/test_suite_db/_design/update/_update/get-uuid/");
+ T(xhr.status == 200);
+ T(xhr.responseText.length == 32);
+
+};
diff --git a/1.1.x/share/www/script/test/users_db.js b/1.1.x/share/www/script/test/users_db.js
new file mode 100644
index 00000000..1e13e5d7
--- /dev/null
+++ b/1.1.x/share/www/script/test/users_db.js
@@ -0,0 +1,124 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.users_db = function(debug) {
+ // This tests the users db, especially validations
+ // this should also test that you can log into the couch
+
+ var usersDb = new CouchDB("test_suite_users", {"X-Couch-Full-Commit":"false"});
+
+ // test that you can treat "_user" as a db-name
+ // this can complicate people who try to secure the users db with
+ // an http proxy and fail to get both the actual db and the _user path
+ // maybe it's not the right approach...
+ // hard to know what else to do, as we don't let non-admins inspect the config
+ // to determine the actual users db name.
+
+ function testFun() {
+ // test that the validation function is installed
+ var ddoc = usersDb.open("_design/_auth");
+ T(ddoc.validate_doc_update);
+
+ // test that you can login as a user using basic auth
+ var jchrisUserDoc = CouchDB.prepareUserDoc({
+ name: "jchris@apache.org"
+ }, "funnybone");
+ T(usersDb.save(jchrisUserDoc).ok);
+
+ T(CouchDB.session().userCtx.name == null);
+
+ // test that you can use basic auth aginst the users db
+ var s = CouchDB.session({
+ headers : {
+ // base64_encode("jchris@apache.org:funnybone")
+ "Authorization" : "Basic amNocmlzQGFwYWNoZS5vcmc6ZnVubnlib25l"
+ }
+ });
+ T(s.userCtx.name == "jchris@apache.org");
+ T(s.info.authenticated == "default");
+ T(s.info.authentication_db == "test_suite_users");
+ TEquals(["oauth", "cookie", "default"], s.info.authentication_handlers);
+ var s = CouchDB.session({
+ headers : {
+ "Authorization" : "Basic Xzpf" // name and pass of _:_
+ }
+ });
+ T(s.name == null);
+ T(s.info.authenticated == "default");
+
+
+ // ok, now create a conflicting edit on the jchris doc, and make sure there's no login.
+ var jchrisUser2 = JSON.parse(JSON.stringify(jchrisUserDoc));
+ jchrisUser2.foo = "bar";
+ T(usersDb.save(jchrisUser2).ok);
+ try {
+ usersDb.save(jchrisUserDoc);
+ T(false && "should be an update conflict")
+ } catch(e) {
+ T(true);
+ }
+ // save as bulk with new_edits=false to force conflict save
+ var resp = usersDb.bulkSave([jchrisUserDoc],{all_or_nothing : true});
+
+ var jchrisWithConflict = usersDb.open(jchrisUserDoc._id, {conflicts : true});
+ T(jchrisWithConflict._conflicts.length == 1)
+
+ // no login with conflicted user doc
+ try {
+ var s = CouchDB.session({
+ headers : {
+ "Authorization" : "Basic amNocmlzQGFwYWNoZS5vcmc6ZnVubnlib25l"
+ }
+ });
+ T(false && "this will throw")
+ } catch(e) {
+ T(e.error == "unauthorized")
+ T(/conflict/.test(e.reason))
+ }
+
+ // you can delete a user doc
+ s = CouchDB.session().userCtx;
+ T(s.name == null);
+ T(s.roles.indexOf("_admin") !== -1);
+ T(usersDb.deleteDoc(jchrisWithConflict).ok);
+
+ // you can't change doc from type "user"
+ jchrisUserDoc = usersDb.open(jchrisUserDoc._id);
+ jchrisUserDoc.type = "not user";
+ try {
+ usersDb.save(jchrisUserDoc);
+ T(false && "should only allow us to save doc when type == 'user'");
+ } catch(e) {
+ T(e.reason == "doc.type must be user");
+ }
+ jchrisUserDoc.type = "user";
+
+ // "roles" must be an array
+ jchrisUserDoc.roles = "not an array";
+ try {
+ usersDb.save(jchrisUserDoc);
+ T(false && "should only allow us to save doc when roles is an array");
+ } catch(e) {
+ T(e.reason == "doc.roles must be an array");
+ }
+ jchrisUserDoc.roles = [];
+ };
+
+ usersDb.deleteDb();
+ run_on_modified_server(
+ [{section: "couch_httpd_auth",
+ key: "authentication_db", value: usersDb.name}],
+ testFun
+ );
+ usersDb.deleteDb(); // cleanup
+
+}
diff --git a/1.1.x/share/www/script/test/utf8.js b/1.1.x/share/www/script/test/utf8.js
new file mode 100644
index 00000000..b77845d3
--- /dev/null
+++ b/1.1.x/share/www/script/test/utf8.js
@@ -0,0 +1,41 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.utf8 = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var texts = [];
+
+ texts[0] = "1. Ascii: hello"
+ texts[1] = "2. Russian: На берегу пустынных волн"
+ texts[2] = "3. Math: ∮ E⋅da = Q, n → ∞, ∑ f(i) = ∏ g(i),"
+ texts[3] = "4. Geek: STARGΛ̊TE SG-1"
+ texts[4] = "5. Braille: ⡌⠁⠧⠑ ⠼⠁⠒ ⡍⠜⠇⠑⠹⠰⠎ ⡣⠕⠌"
+
+ // check that we can save a reload with full fidelity
+ for (var i=0; i<texts.length; i++) {
+ T(db.save({_id:i.toString(), text:texts[i]}).ok);
+ }
+
+ for (var i=0; i<texts.length; i++) {
+ T(db.open(i.toString()).text == texts[i]);
+ }
+
+ // check that views and key collation don't blow up
+ var rows = db.query(function(doc) { emit(null, doc.text) }).rows;
+ for (var i=0; i<texts.length; i++) {
+ T(rows[i].value == texts[i]);
+ }
+};
diff --git a/1.1.x/share/www/script/test/uuids.js b/1.1.x/share/www/script/test/uuids.js
new file mode 100644
index 00000000..fc33a105
--- /dev/null
+++ b/1.1.x/share/www/script/test/uuids.js
@@ -0,0 +1,120 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.uuids = function(debug) {
+ var etags = [];
+ var testHashBustingHeaders = function(xhr) {
+ T(xhr.getResponseHeader("Cache-Control").match(/no-cache/));
+ T(xhr.getResponseHeader("Pragma") == "no-cache");
+
+ var newetag = xhr.getResponseHeader("ETag");
+ T(etags.indexOf(newetag) < 0);
+ etags[etags.length] = newetag;
+
+ // Removing the time based tests as they break easily when
+ // running CouchDB on a remote server in regards to the browser
+ // running the Futon test suite.
+ //
+ //var currentTime = new Date();
+ //var expiresHeader = Date.parse(xhr.getResponseHeader("Expires"));
+ //var dateHeader = Date.parse(xhr.getResponseHeader("Date"));
+
+ //T(expiresHeader < currentTime);
+ //T(currentTime - dateHeader < 3000);
+ };
+
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // a single UUID without an explicit count
+ var xhr = CouchDB.request("GET", "/_uuids");
+ T(xhr.status == 200);
+ var result = JSON.parse(xhr.responseText);
+ T(result.uuids.length == 1);
+ var first = result.uuids[0];
+ testHashBustingHeaders(xhr);
+
+ // a single UUID with an explicit count
+ xhr = CouchDB.request("GET", "/_uuids?count=1");
+ T(xhr.status == 200);
+ result = JSON.parse(xhr.responseText);
+ T(result.uuids.length == 1);
+ var second = result.uuids[0];
+ T(first != second);
+
+ // no collisions with 1,000 UUIDs
+ xhr = CouchDB.request("GET", "/_uuids?count=1000");
+ T(xhr.status == 200);
+ result = JSON.parse(xhr.responseText);
+ T( result.uuids.length == 1000 );
+ var seen = {};
+ for(var i in result.uuids) {
+ var id = result.uuids[i];
+ T(seen[id] === undefined);
+ seen[id] = 1;
+ }
+
+ // ensure we return a 405 on POST
+ xhr = CouchDB.request("POST", "/_uuids?count=1000");
+ T(xhr.status == 405);
+
+ // Test sequential uuids
+ var seq_testfun = function() {
+ xhr = CouchDB.request("GET", "/_uuids?count=1000");
+ T(xhr.status == 200);
+ result = JSON.parse(xhr.responseText);
+ for(var i = 1; i < result.uuids.length; i++) {
+ T(result.uuids[i].length == 32);
+ T(result.uuids[i-1] < result.uuids[i], "Sequential uuids are ordered.");
+ }
+ };
+
+ run_on_modified_server([{
+ "section": "uuids",
+ "key": "algorithm",
+ "value": "sequential",
+ }],
+ seq_testfun
+ );
+
+ // Test utc_random uuids
+ var utc_testfun = function() {
+ xhr = CouchDB.request("GET", "/_uuids?count=1000");
+ T(xhr.status == 200);
+ result = JSON.parse(xhr.responseText);
+ T(result.uuids[1].length == 32);
+
+ // no collisions
+ var seen = {};
+ for(var i in result.uuids) {
+ var id = result.uuids[i];
+ T(seen[id] === undefined);
+ seen[id] = 1;
+ }
+
+ // roughly ordered
+ var u1 = result.uuids[1].substr(0, 13);
+ var u2 = result.uuids[result.uuids.length-1].substr(0, 13);
+ T(u1 < u2, "UTC uuids are only roughly ordered, so this assertion may fail occasionally. Don't sweat it.");
+ };
+
+ run_on_modified_server([{
+ "section": "uuids",
+ "key": "algorithm",
+ "value": "utc_random"
+ }],
+ utc_testfun
+ );
+
+};
diff --git a/1.1.x/share/www/script/test/view_collation.js b/1.1.x/share/www/script/test/view_collation.js
new file mode 100644
index 00000000..b01a5c50
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_collation.js
@@ -0,0 +1,116 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_collation = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // NOTE, the values are already in their correct sort order. Consider this
+ // a specification of collation of json types.
+
+ var values = [];
+
+ // special values sort before all other types
+ values.push(null);
+ values.push(false);
+ values.push(true);
+
+ // then numbers
+ values.push(1);
+ values.push(2);
+ values.push(3.0);
+ values.push(4);
+
+ // then text, case sensitive
+ values.push("a");
+ values.push("A");
+ values.push("aa");
+ values.push("b");
+ values.push("B");
+ values.push("ba");
+ values.push("bb");
+
+ // then arrays. compared element by element until different.
+ // Longer arrays sort after their prefixes
+ values.push(["a"]);
+ values.push(["b"]);
+ values.push(["b","c"]);
+ values.push(["b","c", "a"]);
+ values.push(["b","d"]);
+ values.push(["b","d", "e"]);
+
+ // then object, compares each key value in the list until different.
+ // larger objects sort after their subset objects.
+ values.push({a:1});
+ values.push({a:2});
+ values.push({b:1});
+ values.push({b:2});
+ values.push({b:2, a:1}); // Member order does matter for collation.
+ // CouchDB preserves member order
+ // but doesn't require that clients will.
+ // (this test might fail if used with a js engine
+ // that doesn't preserve order)
+ values.push({b:2, c:2});
+
+ for (var i=0; i<values.length; i++) {
+ db.save({_id:(i).toString(), foo:values[i]});
+ }
+
+ var queryFun = function(doc) { emit(doc.foo, null); };
+ var rows = db.query(queryFun).rows;
+ for (i=0; i<values.length; i++) {
+ T(equals(rows[i].key, values[i]));
+ }
+
+ // everything has collated correctly. Now to check the descending output
+ rows = db.query(queryFun, null, {descending: true}).rows;
+ for (i=0; i<values.length; i++) {
+ T(equals(rows[i].key, values[values.length - 1 -i]));
+ }
+
+ // now check the key query args
+ for (i=1; i<values.length; i++) {
+ var queryOptions = {key:values[i]};
+ rows = db.query(queryFun, null, queryOptions).rows;
+ T(rows.length == 1 && equals(rows[0].key, values[i]));
+ }
+
+ // test inclusive_end=true (the default)
+ // the inclusive_end=true functionality is limited to endkey currently
+ // if you need inclusive_start=false for startkey, please do implement. ;)
+ var rows = db.query(queryFun, null, {endkey : "b", inclusive_end:true}).rows;
+ T(rows[rows.length-1].key == "b");
+ // descending=true
+ var rows = db.query(queryFun, null, {endkey : "b",
+ descending:true, inclusive_end:true}).rows;
+ T(rows[rows.length-1].key == "b");
+
+ // test inclusive_end=false
+ var rows = db.query(queryFun, null, {endkey : "b", inclusive_end:false}).rows;
+ T(rows[rows.length-1].key == "aa");
+ // descending=true
+ var rows = db.query(queryFun, null, {endkey : "b",
+ descending:true, inclusive_end:false}).rows;
+ T(rows[rows.length-1].key == "B");
+
+ var rows = db.query(queryFun, null, {
+ endkey : "b", endkey_docid: "10",
+ inclusive_end:false}).rows;
+ T(rows[rows.length-1].key == "aa");
+
+ var rows = db.query(queryFun, null, {
+ endkey : "b", endkey_docid: "11",
+ inclusive_end:false}).rows;
+ T(rows[rows.length-1].key == "b");
+};
diff --git a/1.1.x/share/www/script/test/view_collation_raw.js b/1.1.x/share/www/script/test/view_collation_raw.js
new file mode 100644
index 00000000..31624cdb
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_collation_raw.js
@@ -0,0 +1,123 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_collation_raw = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // NOTE, the values are already in their correct sort order. Consider this
+ // a specification of collation of json types.
+
+ var values = [];
+
+ // numbers
+ values.push(1);
+ values.push(2);
+ values.push(3);
+ values.push(4);
+
+ values.push(false);
+ values.push(null);
+ values.push(true);
+
+ // then object, compares each key value in the list until different.
+ // larger objects sort after their subset objects.
+ values.push({a:1});
+ values.push({a:2});
+ values.push({b:1});
+ values.push({b:2});
+ values.push({b:2, a:1}); // Member order does matter for collation.
+ // CouchDB preserves member order
+ // but doesn't require that clients will.
+ // (this test might fail if used with a js engine
+ // that doesn't preserve order)
+ values.push({b:2, c:2});
+
+ // then arrays. compared element by element until different.
+ // Longer arrays sort after their prefixes
+ values.push(["a"]);
+ values.push(["b"]);
+ values.push(["b","c"]);
+ values.push(["b","c", "a"]);
+ values.push(["b","d"]);
+ values.push(["b","d", "e"]);
+
+
+ // then text, case sensitive
+ values.push("A");
+ values.push("B");
+ values.push("a");
+ values.push("aa");
+ values.push("b");
+ values.push("ba");
+ values.push("bb");
+
+ for (var i=0; i<values.length; i++) {
+ db.save({_id:(i).toString(), foo:values[i]});
+ }
+
+ var designDoc = {
+ _id:"_design/test", // turn off couch.js id escaping?
+ language: "javascript",
+ views: {
+ test: {map: "function(doc) { emit(doc.foo, null); }",
+ options: {collation:"raw"}}
+ }
+ }
+ T(db.save(designDoc).ok);
+ var rows = db.view("test/test").rows;
+ for (i=0; i<values.length; i++) {
+ T(equals(rows[i].key, values[i]));
+ }
+
+ // everything has collated correctly. Now to check the descending output
+ rows = db.view("test/test", {descending: true}).rows;
+ for (i=0; i<values.length; i++) {
+ T(equals(rows[i].key, values[values.length - 1 -i]));
+ }
+
+ // now check the key query args
+ for (i=1; i<values.length; i++) {
+ rows = db.view("test/test", {key:values[i]}).rows;
+ T(rows.length == 1 && equals(rows[0].key, values[i]));
+ }
+
+ // test inclusive_end=true (the default)
+ // the inclusive_end=true functionality is limited to endkey currently
+ // if you need inclusive_start=false for startkey, please do implement. ;)
+ var rows = db.view("test/test", {endkey : "b", inclusive_end:true}).rows;
+ T(rows[rows.length-1].key == "b");
+ // descending=true
+ var rows = db.view("test/test", {endkey : "b",
+ descending:true, inclusive_end:true}).rows;
+ T(rows[rows.length-1].key == "b");
+
+ // test inclusive_end=false
+ var rows = db.view("test/test", {endkey : "b", inclusive_end:false}).rows;
+ T(rows[rows.length-1].key == "aa");
+ // descending=true
+ var rows = db.view("test/test", {endkey : "b",
+ descending:true, inclusive_end:false}).rows;
+ T(rows[rows.length-1].key == "ba");
+
+ var rows = db.view("test/test", {
+ endkey : "b", endkey_docid: "10",
+ inclusive_end:false}).rows;
+ T(rows[rows.length-1].key == "aa");
+
+ var rows = db.view("test/test", {
+ endkey : "b", endkey_docid: "11",
+ inclusive_end:false}).rows;
+ T(rows[rows.length-1].key == "aa");
+};
diff --git a/1.1.x/share/www/script/test/view_compaction.js b/1.1.x/share/www/script/test/view_compaction.js
new file mode 100644
index 00000000..a11fb7bd
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_compaction.js
@@ -0,0 +1,104 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_compaction = function(debug) {
+
+ if (debug) debugger;
+
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit": "true"});
+
+ db.deleteDb();
+ db.createDb();
+
+ var ddoc = {
+ _id: "_design/foo",
+ language: "javascript",
+ views: {
+ view1: {
+ map: "function(doc) { emit(doc._id, doc.value) }"
+ },
+ view2: {
+ map: "function(doc) { emit(doc._id, doc.value); }",
+ reduce: "function(keys, values, rereduce) { return sum(values); }"
+ }
+ }
+ };
+ T(db.save(ddoc).ok);
+
+ var docs = makeDocs(0, 1000);
+ db.bulkSave(docs);
+
+ var resp = db.view('foo/view1', {});
+ T(resp.rows.length === 1000);
+
+ resp = db.view('foo/view2', {});
+ T(resp.rows.length === 1);
+
+ resp = db.designInfo("_design/foo");
+ T(resp.view_index.update_seq === 1001);
+
+
+ // update docs
+ for (var i = 0; i < docs.length; i++) {
+ docs[i].integer = docs[i].integer + 1;
+ }
+ db.bulkSave(docs);
+
+
+ resp = db.view('foo/view1', {});
+ T(resp.rows.length === 1000);
+
+ resp = db.view('foo/view2', {});
+ T(resp.rows.length === 1);
+
+ resp = db.designInfo("_design/foo");
+ T(resp.view_index.update_seq === 2001);
+
+
+ // update docs again...
+ for (var i = 0; i < docs.length; i++) {
+ docs[i].integer = docs[i].integer + 2;
+ }
+ db.bulkSave(docs);
+
+
+ resp = db.view('foo/view1', {});
+ T(resp.rows.length === 1000);
+
+ resp = db.view('foo/view2', {});
+ T(resp.rows.length === 1);
+
+ resp = db.designInfo("_design/foo");
+ T(resp.view_index.update_seq === 3001);
+
+ var disk_size_before_compact = resp.view_index.disk_size;
+
+ // compact view group
+ var xhr = CouchDB.request("POST", "/" + db.name + "/_compact" + "/foo");
+ T(JSON.parse(xhr.responseText).ok === true);
+
+ resp = db.designInfo("_design/foo");
+ while (resp.view_index.compact_running === true) {
+ resp = db.designInfo("_design/foo");
+ }
+
+
+ resp = db.view('foo/view1', {});
+ T(resp.rows.length === 1000);
+
+ resp = db.view('foo/view2', {});
+ T(resp.rows.length === 1);
+
+ resp = db.designInfo("_design/foo");
+ T(resp.view_index.update_seq === 3001);
+ T(resp.view_index.disk_size < disk_size_before_compact);
+}; \ No newline at end of file
diff --git a/1.1.x/share/www/script/test/view_conflicts.js b/1.1.x/share/www/script/test/view_conflicts.js
new file mode 100644
index 00000000..96f97d56
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_conflicts.js
@@ -0,0 +1,49 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_conflicts = function(debug) {
+ var dbA = new CouchDB("test_suite_db_a", {"X-Couch-Full-Commit":"false"});
+ dbA.deleteDb();
+ dbA.createDb();
+ var dbB = new CouchDB("test_suite_db_b", {"X-Couch-Full-Commit":"false"});
+ dbB.deleteDb();
+ dbB.createDb();
+ if (debug) debugger;
+
+ var docA = {_id: "foo", bar: 42};
+ T(dbA.save(docA).ok);
+ CouchDB.replicate(dbA.name, dbB.name);
+
+ var docB = dbB.open("foo");
+ docB.bar = 43;
+ dbB.save(docB);
+ docA.bar = 41;
+ dbA.save(docA);
+ CouchDB.replicate(dbA.name, dbB.name);
+
+ var doc = dbB.open("foo", {conflicts: true});
+ T(doc._conflicts.length == 1);
+ var conflictRev = doc._conflicts[0];
+ if (doc.bar == 41) { // A won
+ T(conflictRev == docB._rev);
+ } else { // B won
+ T(doc.bar == 43);
+ T(conflictRev == docA._rev);
+ }
+
+ var results = dbB.query(function(doc) {
+ if (doc._conflicts) {
+ emit(doc._id, doc._conflicts);
+ }
+ });
+ T(results.rows[0].value[0] == conflictRev);
+};
diff --git a/1.1.x/share/www/script/test/view_errors.js b/1.1.x/share/www/script/test/view_errors.js
new file mode 100644
index 00000000..e8bd08e4
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_errors.js
@@ -0,0 +1,189 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_errors = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"true"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ run_on_modified_server(
+ [{section: "couchdb",
+ key: "os_process_timeout",
+ value: "500"}],
+ function() {
+ var doc = {integer: 1, string: "1", array: [1, 2, 3]};
+ T(db.save(doc).ok);
+
+ // emitting a key value that is undefined should result in that row
+ // being included in the view results as null
+ var results = db.query(function(doc) {
+ emit(doc.undef, null);
+ });
+ T(results.total_rows == 1);
+ T(results.rows[0].key == null);
+
+ // if a view function throws an exception, its results are not included in
+ // the view index, but the view does not itself raise an error
+ var results = db.query(function(doc) {
+ doc.undef(); // throws an error
+ });
+ T(results.total_rows == 0);
+
+ // if a view function includes an undefined value in the emitted key or
+ // value, it is treated as null
+ var results = db.query(function(doc) {
+ emit([doc._id, doc.undef], null);
+ });
+ T(results.total_rows == 1);
+ T(results.rows[0].key[1] == null);
+
+ // querying a view with invalid params should give a resonable error message
+ var xhr = CouchDB.request("POST", "/test_suite_db/_temp_view?startkey=foo", {
+ headers: {"Content-Type": "application/json"},
+ body: JSON.stringify({language: "javascript",
+ map : "function(doc){emit(doc.integer)}"
+ })
+ });
+ T(JSON.parse(xhr.responseText).error == "bad_request");
+
+ // content type must be json
+ var xhr = CouchDB.request("POST", "/test_suite_db/_temp_view", {
+ headers: {"Content-Type": "application/x-www-form-urlencoded"},
+ body: JSON.stringify({language: "javascript",
+ map : "function(doc){}"
+ })
+ });
+ T(xhr.status == 415);
+
+ var map = function (doc) {emit(doc.integer, doc.integer);};
+
+ try {
+ db.query(map, null, {group: true});
+ T(0 == 1);
+ } catch(e) {
+ T(e.error == "query_parse_error");
+ }
+
+ var designDoc = {
+ _id:"_design/test",
+ language: "javascript",
+ views: {
+ "no_reduce": {map:"function(doc) {emit(doc._id, null);}"},
+ "with_reduce": {
+ map:"function (doc) {emit(doc.integer, doc.integer)};",
+ reduce:"function (keys, values) { return sum(values); };"}
+ }
+ };
+ T(db.save(designDoc).ok);
+
+ var designDoc2 = {
+ _id:"_design/testbig",
+ language: "javascript",
+ views: {
+ "reduce_too_big" : {
+ map:"function (doc) {emit(doc.integer, doc.integer)};",
+ reduce:"function (keys, values) { var chars = []; for (var i=0; i < 1000; i++) {chars.push('wazzap');};return chars; };"}
+ }
+ };
+ T(db.save(designDoc2).ok);
+
+ try {
+ db.view("test/no_reduce", {group: true});
+ T(0 == 1);
+ } catch(e) {
+ T(db.last_req.status == 400);
+ T(e.error == "query_parse_error");
+ }
+
+ try {
+ db.view("test/no_reduce", {group_level: 1});
+ T(0 == 1);
+ } catch(e) {
+ T(db.last_req.status == 400);
+ T(e.error == "query_parse_error");
+ }
+
+ try {
+ db.view("test/no_reduce", {reduce: true});
+ T(0 == 1);
+ } catch(e) {
+ T(db.last_req.status == 400);
+ T(e.error == "query_parse_error");
+ }
+
+ db.view("test/no_reduce", {reduce: false});
+ TEquals(200, db.last_req.status, "reduce=false for map views (without"
+ + " group or group_level) is allowed");
+
+ try {
+ db.view("test/with_reduce", {group: true, reduce: false});
+ T(0 == 1);
+ } catch(e) {
+ T(db.last_req.status == 400);
+ T(e.error == "query_parse_error");
+ }
+
+ try {
+ db.view("test/with_reduce", {group_level: 1, reduce: false});
+ T(0 == 1);
+ } catch(e) {
+ T(db.last_req.status == 400);
+ T(e.error == "query_parse_error");
+ }
+
+ var designDoc3 = {
+ _id:"_design/infinite",
+ language: "javascript",
+ views: {
+ "infinite_loop" :{map:"function(doc) {while(true){emit(doc,doc);}};"}
+ }
+ };
+ T(db.save(designDoc3).ok);
+
+ try {
+ db.view("infinite/infinite_loop");
+ T(0 == 1);
+ } catch(e) {
+ T(e.error == "os_process_error");
+ }
+
+ // Check error responses for invalid multi-get bodies.
+ var path = "/test_suite_db/_design/test/_view/no_reduce";
+ var xhr = CouchDB.request("POST", path, {body: "[]"});
+ T(xhr.status == 400);
+ result = JSON.parse(xhr.responseText);
+ T(result.error == "bad_request");
+ T(result.reason == "Request body must be a JSON object");
+ var data = "{\"keys\": 1}";
+ xhr = CouchDB.request("POST", path, {body:data});
+ T(xhr.status == 400);
+ result = JSON.parse(xhr.responseText);
+ T(result.error == "bad_request");
+ T(result.reason == "`keys` member must be a array.");
+
+ // if the reduce grows to fast, throw an overflow error
+ var path = "/test_suite_db/_design/testbig/_view/reduce_too_big";
+ xhr = CouchDB.request("GET", path);
+ T(xhr.status == 500);
+ result = JSON.parse(xhr.responseText);
+ T(result.error == "reduce_overflow_error");
+
+ try {
+ db.query(function() {emit(null, null)}, null, {startkey: 2, endkey:1});
+ T(0 == 1);
+ } catch(e) {
+ T(e.error == "query_parse_error");
+ T(e.reason.match(/no rows can match/i));
+ }
+ });
+};
diff --git a/1.1.x/share/www/script/test/view_include_docs.js b/1.1.x/share/www/script/test/view_include_docs.js
new file mode 100644
index 00000000..944c9103
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_include_docs.js
@@ -0,0 +1,192 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_include_docs = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(0, 100);
+ db.bulkSave(docs);
+
+ var designDoc = {
+ _id:"_design/test",
+ language: "javascript",
+ views: {
+ all_docs: {
+ map: "function(doc) { emit(doc.integer, doc.string) }"
+ },
+ with_prev: {
+ map: "function(doc){if(doc.prev) emit(doc._id,{'_rev':doc.prev}); else emit(doc._id,{'_rev':doc._rev});}"
+ },
+ with_id: {
+ map: "function(doc) {if(doc.link_id) { var value = {'_id':doc.link_id}; if (doc.link_rev) {value._rev = doc.link_rev}; emit(doc._id, value);}};"
+ },
+ summate: {
+ map:"function (doc) {emit(doc.integer, doc.integer)};",
+ reduce:"function (keys, values) { return sum(values); };"
+ }
+ }
+ }
+ T(db.save(designDoc).ok);
+
+ var resp = db.view('test/all_docs', {include_docs: true, limit: 2});
+ T(resp.rows.length == 2);
+ T(resp.rows[0].id == "0");
+ T(resp.rows[0].doc._id == "0");
+ T(resp.rows[1].id == "1");
+ T(resp.rows[1].doc._id == "1");
+
+ resp = db.view('test/all_docs', {include_docs: true}, [29, 74]);
+ T(resp.rows.length == 2);
+ T(resp.rows[0].doc._id == "29");
+ T(resp.rows[1].doc.integer == 74);
+
+ resp = db.allDocs({limit: 2, skip: 1, include_docs: true});
+ T(resp.rows.length == 2);
+ T(resp.rows[0].doc.integer == 1);
+ T(resp.rows[1].doc.integer == 10);
+
+ resp = db.allDocs({include_docs: true}, ['not_a_doc']);
+ T(resp.rows.length == 1);
+ T(!resp.rows[0].doc);
+
+ resp = db.allDocs({include_docs: true}, ["1", "foo"]);
+ T(resp.rows.length == 2);
+ T(resp.rows[0].doc.integer == 1);
+ T(!resp.rows[1].doc);
+
+ resp = db.allDocs({include_docs: true, limit: 0});
+ T(resp.rows.length == 0);
+
+ // No reduce support
+ try {
+ resp = db.view('test/summate', {include_docs: true});
+ alert(JSON.stringify(resp));
+ T(0==1);
+ } catch (e) {
+ T(e.error == 'query_parse_error');
+ }
+
+ // Reduce support when reduce=false
+ resp = db.view('test/summate', {reduce: false, include_docs: true});
+ T(resp.rows.length == 100);
+
+ // Not an error with include_docs=false&reduce=true
+ resp = db.view('test/summate', {reduce: true, include_docs: false});
+ T(resp.rows.length == 1);
+ T(resp.rows[0].value == 4950);
+
+ T(db.save({
+ "_id": "link-to-10",
+ "link_id" : "10"
+ }).ok);
+
+ // you can link to another doc from a value.
+ resp = db.view("test/with_id", {key:"link-to-10"});
+ T(resp.rows[0].key == "link-to-10");
+ T(resp.rows[0].value["_id"] == "10");
+
+ resp = db.view("test/with_id", {key:"link-to-10",include_docs: true});
+ T(resp.rows[0].key == "link-to-10");
+ T(resp.rows[0].value["_id"] == "10");
+ T(resp.rows[0].doc._id == "10");
+
+ // Check emitted _rev controls things
+ resp = db.allDocs({include_docs: true}, ["0"]);
+ var before = resp.rows[0].doc;
+
+ var after = db.open("0");
+ after.integer = 100;
+ after.prev = after._rev;
+ resp = db.save(after)
+ T(resp.ok);
+
+ var after = db.open("0");
+ TEquals(resp.rev, after._rev, "fails with firebug running");
+ T(after._rev != after.prev, "passes");
+ TEquals(100, after.integer, "fails with firebug running");
+
+ // should emit the previous revision
+ resp = db.view("test/with_prev", {include_docs: true}, ["0"]);
+ T(resp.rows[0].doc._id == "0");
+ T(resp.rows[0].doc._rev == before._rev);
+ T(!resp.rows[0].doc.prev);
+ T(resp.rows[0].doc.integer == 0);
+
+ var xhr = CouchDB.request("POST", "/test_suite_db/_compact");
+ T(xhr.status == 202)
+ while (db.info().compact_running) {}
+
+ resp = db.view("test/with_prev", {include_docs: true}, ["0", "23"]);
+ T(resp.rows.length == 2);
+ T(resp.rows[0].key == "0");
+ T(resp.rows[0].id == "0");
+ T(!resp.rows[0].doc);
+ T(resp.rows[0].doc == null);
+ T(resp.rows[1].doc.integer == 23);
+
+ // COUCHDB-549 - include_docs=true with conflicts=true
+
+ var dbA = new CouchDB("test_suite_db_a", {"X-Couch-Full-Commit":"false"});
+ var dbB = new CouchDB("test_suite_db_b", {"X-Couch-Full-Commit":"false"});
+
+ dbA.deleteDb();
+ dbA.createDb();
+ dbB.deleteDb();
+ dbB.createDb();
+
+ var ddoc = {
+ _id: "_design/mydesign",
+ language : "javascript",
+ views : {
+ myview : {
+ map: (function(doc) {
+ emit(doc.value, 1);
+ }).toString()
+ }
+ }
+ };
+ TEquals(true, dbA.save(ddoc).ok);
+
+ var doc1a = {_id: "foo", value: 1, str: "1"};
+ TEquals(true, dbA.save(doc1a).ok);
+
+ var doc1b = {_id: "foo", value: 1, str: "666"};
+ TEquals(true, dbB.save(doc1b).ok);
+
+ var doc2 = {_id: "bar", value: 2, str: "2"};
+ TEquals(true, dbA.save(doc2).ok);
+
+ TEquals(true, CouchDB.replicate(dbA.name, dbB.name).ok);
+
+ doc1b = dbB.open("foo", {conflicts: true});
+ TEquals(true, doc1b._conflicts instanceof Array);
+ TEquals(1, doc1b._conflicts.length);
+ var conflictRev = doc1b._conflicts[0];
+
+ doc2 = dbB.open("bar", {conflicts: true});
+ TEquals("undefined", typeof doc2._conflicts);
+
+ resp = dbB.view("mydesign/myview", {include_docs: true, conflicts: true});
+
+ TEquals(2, resp.rows.length);
+ TEquals(true, resp.rows[0].doc._conflicts instanceof Array);
+ TEquals(1, resp.rows[0].doc._conflicts.length);
+ TEquals(conflictRev, resp.rows[0].doc._conflicts[0]);
+ TEquals("undefined", typeof resp.rows[1].doc._conflicts);
+
+ // cleanup
+ dbA.deleteDb();
+ dbB.deleteDb();
+};
diff --git a/1.1.x/share/www/script/test/view_multi_key_all_docs.js b/1.1.x/share/www/script/test/view_multi_key_all_docs.js
new file mode 100644
index 00000000..1113be4d
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_multi_key_all_docs.js
@@ -0,0 +1,91 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_multi_key_all_docs = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(0, 100);
+ db.bulkSave(docs);
+
+ var keys = ["10","15","30","37","50"];
+ var rows = db.allDocs({},keys).rows;
+ T(rows.length == keys.length);
+ for(var i=0; i<rows.length; i++)
+ T(rows[i].id == keys[i]);
+
+ // keys in GET parameters
+ rows = db.allDocs({keys:keys}, null).rows;
+ T(rows.length == keys.length);
+ for(var i=0; i<rows.length; i++)
+ T(rows[i].id == keys[i]);
+
+ rows = db.allDocs({limit: 1}, keys).rows;
+ T(rows.length == 1);
+ T(rows[0].id == keys[0]);
+
+ // keys in GET parameters
+ rows = db.allDocs({limit: 1, keys: keys}, null).rows;
+ T(rows.length == 1);
+ T(rows[0].id == keys[0]);
+
+ rows = db.allDocs({skip: 2}, keys).rows;
+ T(rows.length == 3);
+ for(var i=0; i<rows.length; i++)
+ T(rows[i].id == keys[i+2]);
+
+ // keys in GET parameters
+ rows = db.allDocs({skip: 2, keys: keys}, null).rows;
+ T(rows.length == 3);
+ for(var i=0; i<rows.length; i++)
+ T(rows[i].id == keys[i+2]);
+
+ rows = db.allDocs({descending: "true"}, keys).rows;
+ T(rows.length == keys.length);
+ for(var i=0; i<rows.length; i++)
+ T(rows[i].id == keys[keys.length-i-1]);
+
+ // keys in GET parameters
+ rows = db.allDocs({descending: "true", keys: keys}, null).rows;
+ T(rows.length == keys.length);
+ for(var i=0; i<rows.length; i++)
+ T(rows[i].id == keys[keys.length-i-1]);
+
+ rows = db.allDocs({descending: "true", skip: 3, limit:1}, keys).rows;
+ T(rows.length == 1);
+ T(rows[0].id == keys[1]);
+
+ // keys in GET parameters
+ rows = db.allDocs({descending: "true", skip: 3, limit:1, keys: keys}, null).rows;
+ T(rows.length == 1);
+ T(rows[0].id == keys[1]);
+
+ // Check we get invalid rows when the key doesn't exist
+ rows = db.allDocs({}, [1, "i_dont_exist", "0"]).rows;
+ T(rows.length == 3);
+ T(rows[0].error == "not_found");
+ T(!rows[0].id);
+ T(rows[1].error == "not_found");
+ T(!rows[1].id);
+ T(rows[2].id == rows[2].key && rows[2].key == "0");
+
+ // keys in GET parameters
+ rows = db.allDocs({keys: [1, "i_dont_exist", "0"]}, null).rows;
+ T(rows.length == 3);
+ T(rows[0].error == "not_found");
+ T(!rows[0].id);
+ T(rows[1].error == "not_found");
+ T(!rows[1].id);
+ T(rows[2].id == rows[2].key && rows[2].key == "0");
+};
diff --git a/1.1.x/share/www/script/test/view_multi_key_design.js b/1.1.x/share/www/script/test/view_multi_key_design.js
new file mode 100644
index 00000000..38396955
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_multi_key_design.js
@@ -0,0 +1,216 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_multi_key_design = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(0, 100);
+ db.bulkSave(docs);
+
+ var designDoc = {
+ _id:"_design/test",
+ language: "javascript",
+ views: {
+ all_docs: {
+ map: "function(doc) { emit(doc.integer, doc.string) }"
+ },
+ multi_emit: {
+ map: "function(doc) {for(var i = 0 ; i < 3 ; i++) { emit(i, doc.integer) ; } }"
+ },
+ summate: {
+ map:"function (doc) {emit(doc.integer, doc.integer)};",
+ reduce:"function (keys, values) { return sum(values); };"
+ }
+ }
+ };
+ T(db.save(designDoc).ok);
+
+ // Test that missing keys work too
+ var keys = [101,30,15,37,50];
+ var reduce = db.view("test/summate",{group:true},keys).rows;
+ T(reduce.length == keys.length-1); // 101 is missing
+ for(var i=0; i<reduce.length; i++) {
+ T(keys.indexOf(reduce[i].key) != -1);
+ T(reduce[i].key == reduce[i].value);
+ }
+
+ // First, the goods:
+ var keys = [10,15,30,37,50];
+ var rows = db.view("test/all_docs",{},keys).rows;
+ for(var i=0; i<rows.length; i++) {
+ T(keys.indexOf(rows[i].key) != -1);
+ T(rows[i].key == rows[i].value);
+ }
+
+ // with GET keys
+ rows = db.view("test/all_docs",{keys:keys},null).rows;
+ for(var i=0;i<rows.length; i++) {
+ T(keys.indexOf(rows[i].key) != -1);
+ T(rows[i].key == rows[i].value);
+ }
+
+ var reduce = db.view("test/summate",{group:true},keys).rows;
+ T(reduce.length == keys.length);
+ for(var i=0; i<reduce.length; i++) {
+ T(keys.indexOf(reduce[i].key) != -1);
+ T(reduce[i].key == reduce[i].value);
+ }
+
+ // with GET keys
+ reduce = db.view("test/summate",{group:true,keys:keys},null).rows;
+ T(reduce.length == keys.length);
+ for(var i=0; i<reduce.length; i++) {
+ T(keys.indexOf(reduce[i].key) != -1);
+ T(reduce[i].key == reduce[i].value);
+ }
+
+ // Test that invalid parameter combinations get rejected
+ var badargs = [{startkey:0}, {endkey:0}, {key: 0}, {group_level: 2}];
+ var getbadargs = [{startkey:0, keys:keys}, {endkey:0, keys:keys},
+ {key:0, keys:keys}, {group_level: 2, keys:keys}];
+ for(var i in badargs)
+ {
+ try {
+ db.view("test/all_docs",badargs[i],keys);
+ T(0==1);
+ } catch (e) {
+ T(e.error == "query_parse_error");
+ }
+
+ try {
+ db.view("test/all_docs",getbadargs[i],null);
+ T(0==1);
+ } catch (e) {
+ T(e.error = "query_parse_error");
+ }
+ }
+
+ try {
+ db.view("test/summate",{},keys);
+ T(0==1);
+ } catch (e) {
+ T(e.error == "query_parse_error");
+ }
+
+ try {
+ db.view("test/summate",{keys:keys},null);
+ T(0==1);
+ } catch (e) {
+ T(e.error == "query_parse_error");
+ }
+
+ // Test that a map & reduce containing func support keys when reduce=false
+ var resp = db.view("test/summate", {reduce: false}, keys);
+ T(resp.rows.length == 5);
+
+ resp = db.view("test/summate", {reduce: false, keys: keys}, null);
+ T(resp.rows.length == 5);
+
+ // Check that limiting by startkey_docid and endkey_docid get applied
+ // as expected.
+ var curr = db.view("test/multi_emit", {startkey_docid: 21, endkey_docid: 23}, [0, 2]).rows;
+ var exp_key = [ 0, 0, 0, 2, 2, 2] ;
+ var exp_val = [21, 22, 23, 21, 22, 23] ;
+ T(curr.length == 6);
+ for( var i = 0 ; i < 6 ; i++)
+ {
+ T(curr[i].key == exp_key[i]);
+ T(curr[i].value == exp_val[i]);
+ }
+
+ curr = db.view("test/multi_emit", {startkey_docid: 21, endkey_docid: 23, keys: [0, 2]}, null).rows;
+ T(curr.length == 6);
+ for( var i = 0 ; i < 6 ; i++)
+ {
+ T(curr[i].key == exp_key[i]);
+ T(curr[i].value == exp_val[i]);
+ }
+
+ // Check limit works
+ curr = db.view("test/all_docs", {limit: 1}, keys).rows;
+ T(curr.length == 1);
+ T(curr[0].key == 10);
+
+ curr = db.view("test/all_docs", {limit: 1, keys: keys}, null).rows;
+ T(curr.length == 1);
+ T(curr[0].key == 10);
+
+ // Check offset works
+ curr = db.view("test/multi_emit", {skip: 1}, [0]).rows;
+ T(curr.length == 99);
+ T(curr[0].value == 1);
+
+ curr = db.view("test/multi_emit", {skip: 1, keys: [0]}, null).rows;
+ T(curr.length == 99);
+ T(curr[0].value == 1);
+
+ // Check that dir works
+ curr = db.view("test/multi_emit", {descending: "true"}, [1]).rows;
+ T(curr.length == 100);
+ T(curr[0].value == 99);
+ T(curr[99].value == 0);
+
+ curr = db.view("test/multi_emit", {descending: "true", keys: [1]}, null).rows;
+ T(curr.length == 100);
+ T(curr[0].value == 99);
+ T(curr[99].value == 0);
+
+ // Check a couple combinations
+ curr = db.view("test/multi_emit", {descending: "true", skip: 3, limit: 2}, [2]).rows;
+ T(curr.length, 2);
+ T(curr[0].value == 96);
+ T(curr[1].value == 95);
+
+ curr = db.view("test/multi_emit", {descending: "true", skip: 3, limit: 2, keys: [2]}, null).rows;
+ T(curr.length, 2);
+ T(curr[0].value == 96);
+ T(curr[1].value == 95);
+
+ curr = db.view("test/multi_emit", {skip: 2, limit: 3, startkey_docid: "13"}, [0]).rows;
+ T(curr.length == 3);
+ T(curr[0].value == 15);
+ T(curr[1].value == 16);
+ T(curr[2].value == 17);
+
+ curr = db.view("test/multi_emit", {skip: 2, limit: 3, startkey_docid: "13", keys: [0]}, null).rows;
+ T(curr.length == 3);
+ T(curr[0].value == 15);
+ T(curr[1].value == 16);
+ T(curr[2].value == 17);
+
+ curr = db.view("test/multi_emit",
+ {skip: 1, limit: 5, startkey_docid: "25", endkey_docid: "27"}, [1]).rows;
+ T(curr.length == 2);
+ T(curr[0].value == 26);
+ T(curr[1].value == 27);
+
+ curr = db.view("test/multi_emit",
+ {skip: 1, limit: 5, startkey_docid: "25", endkey_docid: "27", keys: [1]}, null).rows;
+ T(curr.length == 2);
+ T(curr[0].value == 26);
+ T(curr[1].value == 27);
+
+ curr = db.view("test/multi_emit",
+ {skip: 1, limit: 5, startkey_docid: "28", endkey_docid: "26", descending: "true"}, [1]).rows;
+ T(curr.length == 2);
+ T(curr[0].value == 27);
+ T(curr[1].value == 26);
+
+ curr = db.view("test/multi_emit",
+ {skip: 1, limit: 5, startkey_docid: "28", endkey_docid: "26", descending: "true", keys: [1]}, null).rows;
+ T(curr.length == 2);
+ T(curr[0].value == 27);
+ T(curr[1].value == 26);
+};
diff --git a/1.1.x/share/www/script/test/view_multi_key_temp.js b/1.1.x/share/www/script/test/view_multi_key_temp.js
new file mode 100644
index 00000000..55eefda5
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_multi_key_temp.js
@@ -0,0 +1,37 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_multi_key_temp = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(0, 100);
+ db.bulkSave(docs);
+
+ var queryFun = function(doc) { emit(doc.integer, doc.integer) };
+ var reduceFun = function (keys, values) { return sum(values); };
+
+ var keys = [10,15,30,37,50];
+ var rows = db.query(queryFun, null, {}, keys).rows;
+ for(var i=0; i<rows.length; i++) {
+ T(keys.indexOf(rows[i].key) != -1);
+ T(rows[i].key == rows[i].value);
+ }
+
+ var reduce = db.query(queryFun, reduceFun, {group:true}, keys).rows;
+ for(var i=0; i<reduce.length; i++) {
+ T(keys.indexOf(reduce[i].key) != -1);
+ T(reduce[i].key == reduce[i].value);
+ }
+};
diff --git a/1.1.x/share/www/script/test/view_offsets.js b/1.1.x/share/www/script/test/view_offsets.js
new file mode 100644
index 00000000..464a1ae2
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_offsets.js
@@ -0,0 +1,108 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_offsets = function(debug) {
+ if (debug) debugger;
+
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+
+ var designDoc = {
+ _id : "_design/test",
+ views : {
+ offset : {
+ map : "function(doc) { emit([doc.letter, doc.number], doc); }",
+ }
+ }
+ };
+ T(db.save(designDoc).ok);
+
+ var docs = [
+ {_id : "a1", letter : "a", number : 1, foo: "bar"},
+ {_id : "a2", letter : "a", number : 2, foo: "bar"},
+ {_id : "a3", letter : "a", number : 3, foo: "bar"},
+ {_id : "b1", letter : "b", number : 1, foo: "bar"},
+ {_id : "b2", letter : "b", number : 2, foo: "bar"},
+ {_id : "b3", letter : "b", number : 3, foo: "bar"},
+ {_id : "b4", letter : "b", number : 4, foo: "bar"},
+ {_id : "b5", letter : "b", number : 5, foo: "bar"},
+ {_id : "c1", letter : "c", number : 1, foo: "bar"},
+ {_id : "c2", letter : "c", number : 2, foo: "bar"},
+ ];
+ db.bulkSave(docs);
+
+ var check = function(startkey, offset) {
+ var opts = {startkey: startkey, descending: true};
+ T(db.view("test/offset", opts).offset == offset);
+ };
+
+ [
+ [["c", 2], 0],
+ [["c", 1], 1],
+ [["b", 5], 2],
+ [["b", 4], 3],
+ [["b", 3], 4],
+ [["b", 2], 5],
+ [["b", 1], 6],
+ [["a", 3], 7],
+ [["a", 2], 8],
+ [["a", 1], 9]
+ ].forEach(function(row){ check(row[0], row[1]);});
+
+ var runTest = function () {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+
+ var designDoc = {
+ _id : "_design/test",
+ views : {
+ offset : {
+ map : "function(doc) { emit([doc.letter, doc.number], doc);}",
+ }
+ }
+ };
+ T(db.save(designDoc).ok);
+
+ var docs = [
+ {_id : "a1", letter : "a", number : 1, foo : "bar"},
+ {_id : "a2", letter : "a", number : 2, foo : "bar"},
+ {_id : "a3", letter : "a", number : 3, foo : "bar"},
+ {_id : "b1", letter : "b", number : 1, foo : "bar"},
+ {_id : "b2", letter : "b", number : 2, foo : "bar"},
+ {_id : "b3", letter : "b", number : 3, foo : "bar"},
+ {_id : "b4", letter : "b", number : 4, foo : "bar"},
+ {_id : "b5", letter : "b", number : 5, foo : "bar"},
+ {_id : "c1", letter : "c", number : 1, foo : "bar"},
+ {_id : "c2", letter : "c", number : 2, foo : "bar"}
+ ];
+ db.bulkSave(docs);
+
+ var res1 = db.view("test/offset", {
+ startkey: ["b",4], startkey_docid: "b4", endkey: ["b"],
+ limit: 2, descending: true, skip: 1
+ })
+
+ var res2 = db.view("test/offset", {startkey: ["c", 3]});
+ var res3 = db.view("test/offset", {
+ startkey: ["b", 6],
+ endkey: ["b", 7]
+ });
+
+ return res1.offset == 4 && res2.offset == docs.length && res3.offset == 8;
+
+ };
+
+ for(var i = 0; i < 15; i++) T(runTest());
+}
+
diff --git a/1.1.x/share/www/script/test/view_pagination.js b/1.1.x/share/www/script/test/view_pagination.js
new file mode 100644
index 00000000..ed3a7ee1
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_pagination.js
@@ -0,0 +1,147 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_pagination = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(0, 100);
+ db.bulkSave(docs);
+
+ var queryFun = function(doc) { emit(doc.integer, null); };
+ var i;
+
+ // page through the view ascending
+ for (i = 0; i < docs.length; i += 10) {
+ var queryResults = db.query(queryFun, null, {
+ startkey: i,
+ startkey_docid: i,
+ limit: 10
+ });
+ T(queryResults.rows.length == 10);
+ T(queryResults.total_rows == docs.length);
+ T(queryResults.offset == i);
+ var j;
+ for (j = 0; j < 10;j++) {
+ T(queryResults.rows[j].key == i + j);
+ }
+
+ // test aliases start_key and start_key_doc_id
+ queryResults = db.query(queryFun, null, {
+ start_key: i,
+ start_key_doc_id: i,
+ limit: 10
+ });
+ T(queryResults.rows.length == 10);
+ T(queryResults.total_rows == docs.length);
+ T(queryResults.offset == i);
+ for (j = 0; j < 10;j++) {
+ T(queryResults.rows[j].key == i + j);
+ }
+ }
+
+ // page through the view descending
+ for (i = docs.length - 1; i >= 0; i -= 10) {
+ var queryResults = db.query(queryFun, null, {
+ startkey: i,
+ startkey_docid: i,
+ descending: true,
+ limit: 10
+ });
+ T(queryResults.rows.length == 10);
+ T(queryResults.total_rows == docs.length);
+ T(queryResults.offset == docs.length - i - 1);
+ var j;
+ for (j = 0; j < 10; j++) {
+ T(queryResults.rows[j].key == i - j);
+ }
+ }
+
+ // ignore decending=false. CouchDB should just ignore that.
+ for (i = 0; i < docs.length; i += 10) {
+ var queryResults = db.query(queryFun, null, {
+ startkey: i,
+ startkey_docid: i,
+ descending: false,
+ limit: 10
+ });
+ T(queryResults.rows.length == 10);
+ T(queryResults.total_rows == docs.length);
+ T(queryResults.offset == i);
+ var j;
+ for (j = 0; j < 10;j++) {
+ T(queryResults.rows[j].key == i + j);
+ }
+ }
+
+ function testEndkeyDocId(queryResults) {
+ T(queryResults.rows.length == 35);
+ T(queryResults.total_rows == docs.length);
+ T(queryResults.offset == 1);
+ T(queryResults.rows[0].id == "1");
+ T(queryResults.rows[1].id == "10");
+ T(queryResults.rows[2].id == "11");
+ T(queryResults.rows[3].id == "12");
+ T(queryResults.rows[4].id == "13");
+ T(queryResults.rows[5].id == "14");
+ T(queryResults.rows[6].id == "15");
+ T(queryResults.rows[7].id == "16");
+ T(queryResults.rows[8].id == "17");
+ T(queryResults.rows[9].id == "18");
+ T(queryResults.rows[10].id == "19");
+ T(queryResults.rows[11].id == "2");
+ T(queryResults.rows[12].id == "20");
+ T(queryResults.rows[13].id == "21");
+ T(queryResults.rows[14].id == "22");
+ T(queryResults.rows[15].id == "23");
+ T(queryResults.rows[16].id == "24");
+ T(queryResults.rows[17].id == "25");
+ T(queryResults.rows[18].id == "26");
+ T(queryResults.rows[19].id == "27");
+ T(queryResults.rows[20].id == "28");
+ T(queryResults.rows[21].id == "29");
+ T(queryResults.rows[22].id == "3");
+ T(queryResults.rows[23].id == "30");
+ T(queryResults.rows[24].id == "31");
+ T(queryResults.rows[25].id == "32");
+ T(queryResults.rows[26].id == "33");
+ T(queryResults.rows[27].id == "34");
+ T(queryResults.rows[28].id == "35");
+ T(queryResults.rows[29].id == "36");
+ T(queryResults.rows[30].id == "37");
+ T(queryResults.rows[31].id == "38");
+ T(queryResults.rows[32].id == "39");
+ T(queryResults.rows[33].id == "4");
+ T(queryResults.rows[34].id == "40");
+ }
+
+ // test endkey_docid
+ var queryResults = db.query(function(doc) { emit(null, null); }, null, {
+ startkey: null,
+ startkey_docid: 1,
+ endkey: null,
+ endkey_docid: 40
+ });
+ testEndkeyDocId(queryResults);
+
+ // test aliases end_key_doc_id and end_key
+ queryResults = db.query(function(doc) { emit(null, null); }, null, {
+ start_key: null,
+ start_key_doc_id: 1,
+ end_key: null,
+ end_key_doc_id: 40
+ });
+ testEndkeyDocId(queryResults);
+
+ };
diff --git a/1.1.x/share/www/script/test/view_sandboxing.js b/1.1.x/share/www/script/test/view_sandboxing.js
new file mode 100644
index 00000000..02951d9f
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_sandboxing.js
@@ -0,0 +1,140 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_sandboxing = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var doc = {integer: 1, string: "1", array: [1, 2, 3]};
+ T(db.save(doc).ok);
+/*
+ // make sure that attempting to change the document throws an error
+ var results = db.query(function(doc) {
+ doc.integer = 2;
+ emit(null, doc);
+ });
+ T(results.total_rows == 0);
+
+ var results = db.query(function(doc) {
+ doc.array[0] = 0;
+ emit(null, doc);
+ });
+ T(results.total_rows == 0);
+*/
+ // make sure that a view cannot invoke interpreter internals such as the
+ // garbage collector
+ var results = db.query(function(doc) {
+ gc();
+ emit(null, doc);
+ });
+ T(results.total_rows == 0);
+
+ // make sure that a view cannot access the map_funs array defined used by
+ // the view server
+ var results = db.query(function(doc) { map_funs.push(1); emit(null, doc); });
+ T(results.total_rows == 0);
+
+ // make sure that a view cannot access the map_results array defined used by
+ // the view server
+ var results = db.query(function(doc) { map_results.push(1); emit(null, doc); });
+ T(results.total_rows == 0);
+
+ // test for COUCHDB-925
+ // altering 'doc' variable in map function affects other map functions
+ var ddoc = {
+ _id: "_design/foobar",
+ language: "javascript",
+ views: {
+ view1: {
+ map:
+ (function(doc) {
+ if (doc.values) {
+ doc.values = [666];
+ }
+ if (doc.tags) {
+ doc.tags.push("qwerty");
+ }
+ if (doc.tokens) {
+ doc.tokens["c"] = 3;
+ }
+ }).toString()
+ },
+ view2: {
+ map:
+ (function(doc) {
+ if (doc.values) {
+ emit(doc._id, doc.values);
+ }
+ if (doc.tags) {
+ emit(doc._id, doc.tags);
+ }
+ if (doc.tokens) {
+ emit(doc._id, doc.tokens);
+ }
+ }).toString()
+ }
+ }
+ };
+ var doc1 = {
+ _id: "doc1",
+ values: [1, 2, 3]
+ };
+ var doc2 = {
+ _id: "doc2",
+ tags: ["foo", "bar"],
+ tokens: {a: 1, b: 2}
+ };
+
+ db.deleteDb();
+ db.createDb();
+ T(db.save(ddoc).ok);
+ T(db.save(doc1).ok);
+ T(db.save(doc2).ok);
+
+ var view1Results = db.view(
+ "foobar/view1", {bypass_cache: Math.round(Math.random() * 1000)});
+ var view2Results = db.view(
+ "foobar/view2", {bypass_cache: Math.round(Math.random() * 1000)});
+
+ TEquals(0, view1Results.rows.length, "view1 has 0 rows");
+ TEquals(3, view2Results.rows.length, "view2 has 3 rows");
+
+ TEquals(doc1._id, view2Results.rows[0].key);
+ TEquals(doc2._id, view2Results.rows[1].key);
+ TEquals(doc2._id, view2Results.rows[2].key);
+
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=449657
+ TEquals(3, view2Results.rows[0].value.length,
+ "Warning: installed SpiderMonkey version doesn't allow sealing of arrays");
+ if (view2Results.rows[0].value.length === 3) {
+ TEquals(1, view2Results.rows[0].value[0]);
+ TEquals(2, view2Results.rows[0].value[1]);
+ TEquals(3, view2Results.rows[0].value[2]);
+ }
+
+ TEquals(1, view2Results.rows[1].value["a"]);
+ TEquals(2, view2Results.rows[1].value["b"]);
+ TEquals('undefined', typeof view2Results.rows[1].value["c"],
+ "doc2.tokens object was not sealed");
+
+ TEquals(2, view2Results.rows[2].value.length,
+ "Warning: installed SpiderMonkey version doesn't allow sealing of arrays");
+ if (view2Results.rows[2].value.length === 2) {
+ TEquals("foo", view2Results.rows[2].value[0]);
+ TEquals("bar", view2Results.rows[2].value[1]);
+ }
+
+ // cleanup
+ db.deleteDb();
+};
diff --git a/1.1.x/share/www/script/test/view_update_seq.js b/1.1.x/share/www/script/test/view_update_seq.js
new file mode 100644
index 00000000..69b8c42d
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_update_seq.js
@@ -0,0 +1,106 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_update_seq = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"true"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ T(db.info().update_seq == 0);
+
+ var resp = db.allDocs({update_seq:true});
+
+ T(resp.rows.length == 0);
+ T(resp.update_seq == 0);
+
+ var designDoc = {
+ _id:"_design/test",
+ language: "javascript",
+ views: {
+ all_docs: {
+ map: "function(doc) { emit(doc.integer, doc.string) }"
+ },
+ summate: {
+ map:"function (doc) {emit(doc.integer, doc.integer)};",
+ reduce:"function (keys, values) { return sum(values); };"
+ }
+ }
+ };
+ T(db.save(designDoc).ok);
+
+ T(db.info().update_seq == 1);
+
+ resp = db.allDocs({update_seq:true});
+
+ T(resp.rows.length == 1);
+ T(resp.update_seq == 1);
+
+ var docs = makeDocs(0, 100);
+ db.bulkSave(docs);
+
+ resp = db.allDocs({limit: 1});
+ T(resp.rows.length == 1);
+ T(!resp.update_seq, "all docs");
+
+ resp = db.allDocs({limit: 1, update_seq:true});
+ T(resp.rows.length == 1);
+ T(resp.update_seq == 101);
+
+ resp = db.view('test/all_docs', {limit: 1, update_seq:true});
+ T(resp.rows.length == 1);
+ T(resp.update_seq == 101);
+
+ resp = db.view('test/all_docs', {limit: 1, update_seq:false});
+ T(resp.rows.length == 1);
+ T(!resp.update_seq, "view");
+
+ resp = db.view('test/summate', {update_seq:true});
+ T(resp.rows.length == 1);
+ T(resp.update_seq == 101);
+
+ db.save({"id":"0"});
+ resp = db.view('test/all_docs', {limit: 1,stale: "ok", update_seq:true});
+ T(resp.rows.length == 1);
+ T(resp.update_seq == 101);
+
+ db.save({"id":"00"});
+ resp = db.view('test/all_docs',
+ {limit: 1, stale: "update_after", update_seq: true});
+ T(resp.rows.length == 1);
+ T(resp.update_seq == 101);
+
+ // wait 5 seconds for the next assertions to pass in very slow machines
+ var t0 = new Date(), t1;
+ do {
+ CouchDB.request("GET", "/");
+ t1 = new Date();
+ } while ((t1 - t0) < 5000);
+
+ resp = db.view('test/all_docs', {limit: 1, stale: "ok", update_seq: true});
+ T(resp.rows.length == 1);
+ T(resp.update_seq == 103);
+
+ resp = db.view('test/all_docs', {limit: 1, update_seq:true});
+ T(resp.rows.length == 1);
+ T(resp.update_seq == 103);
+
+ resp = db.view('test/all_docs',{update_seq:true},["0","1"]);
+ T(resp.update_seq == 103);
+
+ resp = db.view('test/all_docs',{update_seq:true},["0","1"]);
+ T(resp.update_seq == 103);
+
+ resp = db.view('test/summate',{group:true, update_seq:true},["0","1"]);
+ T(resp.update_seq == 103);
+
+};
diff --git a/1.1.x/share/www/script/test/view_xml.js b/1.1.x/share/www/script/test/view_xml.js
new file mode 100644
index 00000000..3403b47c
--- /dev/null
+++ b/1.1.x/share/www/script/test/view_xml.js
@@ -0,0 +1,39 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.view_xml = function(debug) {
+ var db = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"false"});
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ db.save({content: "<doc><title id='xml'>Testing XML</title></doc>"});
+ db.save({content: "<doc><title id='e4x'>Testing E4X</title></doc>"});
+
+ var results = db.query(
+ "function(doc) {\n" +
+ " var xml = new XML(doc.content);\n" +
+ " emit(xml.title.text().toXMLString(), null);\n" +
+ "}");
+ T(results.total_rows == 2);
+ T(results.rows[0].key == "Testing E4X");
+ T(results.rows[1].key == "Testing XML");
+
+ var results = db.query(
+ "function(doc) {\n" +
+ " var xml = new XML(doc.content);\n" +
+ " emit(xml.title.@id.toXMLString(), null);\n" +
+ "}");
+ T(results.total_rows == 2);
+ T(results.rows[0].key == "e4x");
+ T(results.rows[1].key == "xml");
+};
diff --git a/1.1.x/share/www/session.html b/1.1.x/share/www/session.html
new file mode 100644
index 00000000..0ebd943d
--- /dev/null
+++ b/1.1.x/share/www/session.html
@@ -0,0 +1,96 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Session</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="style/layout.css?0.11.0" type="text/css">
+ <script src="script/json2.js"></script>
+ <script src="script/sha1.js"></script>
+ <script src="script/jquery.js?1.4.2"></script>
+ <script src="script/jquery.couch.js?0.11.0"></script>
+ <script src="script/jquery.dialog.js?0.11.0"></script>
+ <script src="script/futon.js?0.11.0"></script>
+ <script src="script/futon.browse.js?0.11.0"></script>
+ <script src="script/futon.format.js?0.11.0"></script>
+ <script>
+ $(function() {
+ var ret, reason, q = window.location.search, qps = q.split("&");
+ $.map(qps, function(qp) {
+ var m = qp.match(/return=(.*)/);
+ if (m) {
+ ret = decodeURIComponent(m[1]);
+ }
+ m = qp.match(/reason=(.*)/);
+ if (m) {
+ reason = $.futon.escape(decodeURIComponent(m[1]));
+ }
+ });
+ if (reason) {
+ $("#aboutSession").append('<p>The application says: <em>'+reason+'</em></p>');
+ }
+ if (ret) {
+ $("#aboutSession").append($('<p>Once you are logged in, click this link to return to your application: </p>').append($("<a></a>").attr("href", ret).text(ret)));
+ // todo this needs to look different if you are already logged in
+ // a note about you are logged in but you can't access this
+ }
+ // do the sidebar but in the middle without the sidebar
+ $.futon.storage.set("sidebar", "hidden");
+ setTimeout(function() {
+ var ctx = $$("#userCtx").userCtx;
+ $.futon.storage.set("sidebar", "show");
+ if (ctx && ctx.name) {
+ $("#aboutSession").append("<p>It looks like you are logged in, maybe you don't have access to that url.</p>");
+ }
+ },100);
+ });
+ </script>
+ </head>
+ <body>
+ <div id="wrap">
+ <h1><a href="index.html">Overview</a>
+ <strong>Session</strong></h1>
+ <div id="content">
+ <h2>Establish or Modify Your Session</h2>
+ <div id="loginSignup">
+ <div id="aboutSession"></div>
+ <span id="userCtx">
+ <span class="loggedout">
+ <a href="#" class="signup">Signup</a> or <a href="#" class="login">Login</a>
+ </span>
+ <span class="loggedin">
+ Welcome <a class="name">?</a>!
+ <br/>
+ <a href="#" class="logout">Logout</a>
+ </span>
+ <span class="loggedinadmin">
+ Welcome <a class="name">?</a>!
+ <br/>
+ <a href="#" class="createadmin">Setup more admins</a> or
+ <a href="#" class="logout">Logout</a>
+ </span>
+ <span class="adminparty">
+ Welcome to Admin Party!
+ <br/>
+ Everyone is admin. <a href="#" class="createadmin">Fix this</a>
+ </span>
+ </span>
+ </div>
+ </div>
+
+ </div>
+ </body>
+</html>
diff --git a/1.1.x/share/www/spec/couch_js_class_methods_spec.js b/1.1.x/share/www/spec/couch_js_class_methods_spec.js
new file mode 100644
index 00000000..7eac2348
--- /dev/null
+++ b/1.1.x/share/www/spec/couch_js_class_methods_spec.js
@@ -0,0 +1,401 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Specs for couch.js lines 313-470
+
+describe 'CouchDB class'
+ describe 'session stuff'
+ before
+ useTestUserDb();
+ end
+
+ after
+ useOldUserDb();
+ end
+
+ before_each
+ userDoc = users_db.save(CouchDB.prepareUserDoc({name: "Gaius Baltar", roles: ["president"]}, "secretpass"));
+ end
+
+ after_each
+ users_db.deleteDoc({_id : userDoc.id, _rev : userDoc.rev})
+ end
+
+ describe '.login'
+ it 'should return ok true'
+ CouchDB.login("Gaius Baltar", "secretpass").ok.should.be_true
+ end
+
+ it 'should return the name of the logged in user'
+ CouchDB.login("Gaius Baltar", "secretpass").name.should.eql "Gaius Baltar"
+ end
+
+ it 'should return the roles of the logged in user'
+ CouchDB.login("Gaius Baltar", "secretpass").roles.should.eql ["president"]
+ end
+
+ it 'should post _session'
+ CouchDB.should.receive("request", "once").with_args("POST", "/_session")
+ CouchDB.login("Gaius Baltar", "secretpass");
+ end
+
+ it 'should create a session'
+ CouchDB.login("Gaius Baltar", "secretpass");
+ CouchDB.session().userCtx.name.should.eql "Gaius Baltar"
+ end
+ end
+
+ describe '.logout'
+ before_each
+ CouchDB.login("Gaius Baltar", "secretpass");
+ end
+
+ it 'should return ok true'
+ CouchDB.logout().ok.should.be_true
+ end
+
+ it 'should delete _session'
+ CouchDB.should.receive("request", "once").with_args("DELETE", "/_session")
+ CouchDB.logout();
+ end
+
+ it 'should result in an invalid session'
+ CouchDB.logout();
+ CouchDB.session().name.should.be_null
+ end
+ end
+
+ describe '.session'
+ before_each
+ CouchDB.login("Gaius Baltar", "secretpass");
+ end
+
+ it 'should return ok true'
+ CouchDB.session().ok.should.be_true
+ end
+
+ it 'should return the users name'
+ CouchDB.session().userCtx.name.should.eql "Gaius Baltar"
+ end
+
+ it 'should return the users roles'
+ CouchDB.session().userCtx.roles.should.eql ["president"]
+ end
+
+ it 'should return the name of the authentication db'
+ CouchDB.session().info.authentication_db.should.eql "spec_users_db"
+ end
+
+ it 'should return the active authentication handler'
+ CouchDB.session().info.authenticated.should.eql "cookie"
+ end
+ end
+ end
+
+ describe 'db stuff'
+ before_each
+ db = new CouchDB("spec_db", {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ end
+
+ after_each
+ db.deleteDb();
+ end
+
+ describe '.prepareUserDoc'
+ before_each
+ userDoc = CouchDB.prepareUserDoc({name: "Laura Roslin"}, "secretpass");
+ end
+
+ it 'should return the users name'
+ userDoc.name.should.eql "Laura Roslin"
+ end
+
+ it 'should prefix the id with the CouchDB user_prefix'
+ userDoc._id.should.eql "org.couchdb.user:Laura Roslin"
+ end
+
+ it 'should return the users roles'
+ var userDocWithRoles = CouchDB.prepareUserDoc({name: "William Adama", roles: ["admiral", "commander"]}, "secretpass")
+ userDocWithRoles.roles.should.eql ["admiral", "commander"]
+ end
+
+ it 'should return the hashed password'
+ userDoc.password_sha.length.should.be_at_least 30
+ userDoc.password_sha.should.be_a String
+ end
+ end
+
+ describe '.allDbs'
+ it 'should get _all_dbs'
+ CouchDB.should.receive("request", "once").with_args("GET", "/_all_dbs");
+ CouchDB.allDbs();
+ end
+
+ it 'should return an array that includes a created database'
+ temp_db = new CouchDB("temp_spec_db", {"X-Couch-Full-Commit":"false"});
+ temp_db.createDb();
+ CouchDB.allDbs().should.include("temp_spec_db");
+ temp_db.deleteDb();
+ end
+
+ it 'should return an array that does not include a database that does not exist'
+ CouchDB.allDbs().should.not.include("not_existing_temp_spec_db");
+ end
+ end
+
+ describe '.allDesignDocs'
+ it 'should return the total number of documents'
+ CouchDB.allDesignDocs().spec_db.total_rows.should.eql 0
+ db.save({'type':'battlestar', 'name':'galactica'});
+ CouchDB.allDesignDocs().spec_db.total_rows.should.eql 1
+ end
+
+ it 'should return undefined when the db does not exist'
+ CouchDB.allDesignDocs().non_existing_db.should.be_undefined
+ end
+
+ it 'should return no documents when there are no design documents'
+ CouchDB.allDesignDocs().spec_db.rows.should.eql []
+ end
+
+ it 'should return all design documents'
+ var designDoc = {
+ "views" : {
+ "people" : {
+ "map" : "function(doc) { emit(doc._id, doc); }"
+ }
+ },
+ "_id" : "_design/spec_db"
+ };
+ db.save(designDoc);
+
+ var allDesignDocs = CouchDB.allDesignDocs();
+ allDesignDocs.spec_db.rows[0].id.should.eql "_design/spec_db"
+ allDesignDocs.spec_db.rows[0].key.should.eql "_design/spec_db"
+ allDesignDocs.spec_db.rows[0].value.rev.length.should.be_at_least 30
+ end
+ end
+
+ describe '.getVersion'
+ it 'should get the CouchDB version'
+ CouchDB.should.receive("request", "once").with_args("GET", "/")
+ CouchDB.getVersion();
+ end
+
+ it 'should return the CouchDB version'
+ CouchDB.getVersion().should_match /^\d\d?\.\d\d?\.\d\d?.*/
+ end
+ end
+
+ describe '.replicate'
+ before_each
+ db2 = new CouchDB("spec_db_2", {"X-Couch-Full-Commit":"false"});
+ db2.createDb();
+ host = window.location.protocol + "//" + window.location.host ;
+ end
+
+ after_each
+ db2.deleteDb();
+ end
+
+ it 'should return no_changes true when there are no changes between the dbs'
+ CouchDB.replicate(host + db.uri, host + db2.uri).no_changes.should.be_true
+ end
+
+ it 'should return the session ID'
+ db.save({'type':'battlestar', 'name':'galactica'});
+ CouchDB.replicate(host + db.uri, host + db2.uri).session_id.length.should.be_at_least 30
+ end
+
+ it 'should return source_last_seq'
+ db.save({'type':'battlestar', 'name':'galactica'});
+ db.save({'type':'battlestar', 'name':'pegasus'});
+
+ CouchDB.replicate(host + db.uri, host + db2.uri).source_last_seq.should.eql 2
+ end
+
+ it 'should return the replication history'
+ db.save({'type':'battlestar', 'name':'galactica'});
+ db.save({'type':'battlestar', 'name':'pegasus'});
+
+ var result = CouchDB.replicate(host + db.uri, host + db2.uri);
+ result.history[0].docs_written.should.eql 2
+ result.history[0].start_last_seq.should.eql 0
+ end
+
+ it 'should pass through replication options'
+ db.save({'type':'battlestar', 'name':'galactica'});
+ db2.deleteDb();
+ -{CouchDB.replicate(host + db.uri, host + db2.uri)}.should.throw_error
+ var result = CouchDB.replicate(host + db.uri, host + db2.uri, {"body" : {"create_target":true}});
+
+ result.ok.should.eql true
+ result.history[0].docs_written.should.eql 1
+ db2.info().db_name.should.eql "spec_db_2"
+ end
+ end
+
+ describe '.newXhr'
+ it 'should return a XMLHTTPRequest'
+ CouchDB.newXhr().should.have_prop 'readyState'
+ CouchDB.newXhr().should.have_prop 'responseText'
+ CouchDB.newXhr().should.have_prop 'status'
+ end
+ end
+
+ describe '.request'
+ it 'should return a XMLHttpRequest'
+ var req = CouchDB.request("GET", '/');
+ req.should.include "readyState"
+ req.should.include "responseText"
+ req.should.include "statusText"
+ end
+
+ it 'should pass through the options headers'
+ var xhr = CouchDB.newXhr();
+ stub(CouchDB, 'newXhr').and_return(xhr);
+
+ xhr.should.receive("setRequestHeader", "once").with_args("X-Couch-Full-Commit", "true")
+ CouchDB.request("GET", "/", {'headers': {"X-Couch-Full-Commit":"true"}});
+ end
+
+ it 'should pass through the options body'
+ var xhr = CouchDB.newXhr();
+ stub(CouchDB, 'newXhr').and_return(xhr);
+
+ xhr.should.receive("send", "once").with_args({"body_key":"body_value"})
+ CouchDB.request("GET", "/", {'body': {"body_key":"body_value"}});
+ end
+
+ it 'should prepend the urlPrefix to the uri'
+ var oldPrefix = CouchDB.urlPrefix;
+ CouchDB.urlPrefix = "/_utils";
+
+ var xhr = CouchDB.newXhr();
+ stub(CouchDB, 'newXhr').and_return(xhr);
+
+ xhr.should.receive("open", "once").with_args("GET", "/_utils/", false)
+ CouchDB.request("GET", "/", {'headers': {"X-Couch-Full-Commit":"true"}});
+
+ CouchDB.urlPrefix = oldPrefix;
+ end
+ end
+
+ describe '.requestStats'
+ it 'should get the stats for specified module and key'
+ var stats = CouchDB.requestStats('couchdb', 'open_databases', null);
+ stats.description.should.eql 'number of open databases'
+ stats.current.should.be_a Number
+ end
+
+ it 'should add flush true to the request when there is a test argument'
+ CouchDB.should.receive("request", "once").with_args("GET", "/_stats/httpd/requests?flush=true")
+ CouchDB.requestStats('httpd', 'requests', 'test');
+ end
+
+ it 'should still work when there is a test argument'
+ var stats = CouchDB.requestStats('httpd_status_codes', '200', 'test');
+ stats.description.should.eql 'number of HTTP 200 OK responses'
+ stats.sum.should.be_a Number
+ end
+ end
+
+ describe '.newUuids'
+ after_each
+ CouchDB.uuids_cache = [];
+ end
+
+ it 'should return the specified amount of uuids'
+ var uuids = CouchDB.newUuids(45);
+ uuids.should.have_length 45
+ end
+
+ it 'should return an array with uuids'
+ var uuids = CouchDB.newUuids(1);
+ uuids[0].should.be_a String
+ uuids[0].should.have_length 32
+ end
+
+ it 'should leave the uuids_cache with 100 uuids when theres no buffer size specified'
+ CouchDB.newUuids(23);
+ CouchDB.uuids_cache.should.have_length 100
+ end
+
+ it 'should leave the uuids_cache with the specified buffer size'
+ CouchDB.newUuids(23, 150);
+ CouchDB.uuids_cache.should.have_length 150
+ end
+
+ it 'should get the uuids from the uuids_cache when there are enough uuids in there'
+ CouchDB.newUuids(10);
+ CouchDB.newUuids(25);
+ CouchDB.uuids_cache.should.have_length 75
+ end
+
+ it 'should create new uuids and add as many as specified to the uuids_cache when there are not enough uuids in the cache'
+ CouchDB.newUuids(10);
+ CouchDB.newUuids(125, 60);
+ CouchDB.uuids_cache.should.have_length 160
+ end
+ end
+
+ describe '.maybeThrowError'
+ it 'should throw an error when the request has status 404'
+ var req = CouchDB.request("GET", "/nonexisting_db");
+ -{CouchDB.maybeThrowError(req)}.should.throw_error
+ end
+
+ it 'should throw an error when the request has status 412'
+ var req = CouchDB.request("PUT", "/spec_db");
+ -{CouchDB.maybeThrowError(req)}.should.throw_error
+ end
+
+ it 'should throw an error when the request has status 405'
+ var req = CouchDB.request("DELETE", "/_utils");
+ -{CouchDB.maybeThrowError(req)}.should.throw_error
+ end
+
+ it 'should throw the responseText of the request'
+ var req = CouchDB.request("GET", "/nonexisting_db");
+ try {
+ CouchDB.maybeThrowError(req)
+ } catch(e) {
+ e.error.should.eql JSON.parse(req.responseText).error
+ e.reason.should.eql JSON.parse(req.responseText).reason
+ }
+ end
+
+ it 'should throw an unknown error when the responseText is invalid json'
+ mock_request().and_return("invalid json...", "application/json", 404, {})
+ try {
+ CouchDB.maybeThrowError(CouchDB.newXhr())
+ } catch(e) {
+ e.error.should.eql "unknown"
+ e.reason.should.eql "invalid json..."
+ }
+ end
+ end
+
+ describe '.params'
+ it 'should turn a json object into a http params string'
+ var params = CouchDB.params({"president":"laura", "cag":"lee"})
+ params.should.eql "president=laura&cag=lee"
+ end
+
+ it 'should return a blank string when the object is empty'
+ var params = CouchDB.params({})
+ params.should.eql ""
+ end
+ end
+ end
+end \ No newline at end of file
diff --git a/1.1.x/share/www/spec/couch_js_instance_methods_1_spec.js b/1.1.x/share/www/spec/couch_js_instance_methods_1_spec.js
new file mode 100644
index 00000000..7f23bd2c
--- /dev/null
+++ b/1.1.x/share/www/spec/couch_js_instance_methods_1_spec.js
@@ -0,0 +1,311 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Specs for couch.js lines 1-130
+
+describe 'CouchDB instance'
+ before_each
+ db = new CouchDB("spec_db", {"X-Couch-Full-Commit":"false"});
+ end
+
+ describe '.request'
+ before_each
+ db.createDb();
+ end
+
+ after_each
+ db.deleteDb();
+ end
+
+ it 'should return a XMLHttpRequest'
+ var req = db.request("GET", "/spec_db");
+ req.should.include "readyState"
+ req.should.include "responseText"
+ req.should.include "statusText"
+ // in Safari a XMLHttpRequest is actually a XMLHttpRequestConstructor,
+ // otherwise we could just do:
+ // req.should.be_a XMLHttpRequest
+ end
+
+ it 'should set the options the CouchDB instance has got as httpHeaders'
+ CouchDB.should.receive("request", "once").with_args("GET", "/spec_db", {headers: {"X-Couch-Full-Commit": "false"}})
+ db.request("GET", "/spec_db");
+ end
+
+ it 'should pass through the options'
+ CouchDB.should.receive("request", "once").with_args("GET", "/spec_db", {"X-Couch-Persist": "true", headers: {"X-Couch-Full-Commit": "false"}})
+ db.request("GET", "/spec_db", {"X-Couch-Persist":"true"});
+ end
+ end
+
+ describe '.createDb'
+ after_each
+ db.deleteDb();
+ end
+
+ it 'should create the db'
+ db.createDb();
+ db.last_req.status.should.eql 201
+ end
+
+ it 'should return the ok true'
+ db.createDb().should.eql {"ok" : true}
+ end
+
+ it 'should result in a created db'
+ db.createDb();
+ try{
+ db.createDb();
+ } catch(e) {
+ e.error.should.eql "file_exists"
+ }
+ end
+
+ it 'should have create a db with update sequence 0'
+ db.createDb();
+ db.info().update_seq.should.eql 0
+ end
+ end
+
+ describe '.deleteDb'
+ before_each
+ db.createDb();
+ end
+
+ it 'should delete the db'
+ db.deleteDb();
+ db.last_req.status.should.eql 200
+ end
+
+ it 'should return the responseText of the request'
+ db.deleteDb().should.eql {"ok" : true}
+ end
+
+ it 'should result in a deleted db'
+ db.deleteDb();
+ db.deleteDb();
+ db.last_req.status.should.eql 404
+ end
+ end
+
+ describe 'document methods'
+ before_each
+ doc = {"Name" : "Kara Thrace", "Callsign" : "Starbuck"};
+ db.createDb();
+ end
+
+ after_each
+ db.deleteDb();
+ end
+
+ describe '.save'
+ it 'should save the document'
+ db.save(doc);
+ db.last_req.status.should.eql 201
+ end
+
+ it 'should return ok true'
+ db.save(doc).ok.should.be_true
+ end
+
+ it 'should return ID and revision of the document'
+ var response = db.save(doc);
+ response.id.should.be_a String
+ response.id.should.have_length 32
+ response.rev.should.be_a String
+ response.rev.length.should.be_at_least 30
+ end
+
+ it 'should result in a saved document with generated ID'
+ var response = db.save(doc);
+ var saved_doc = db.open(response.id);
+ saved_doc.Name.should.eql "Kara Thrace"
+ saved_doc.Callsign.should.eql "Starbuck"
+ end
+
+ it 'should save the document with the specified ID'
+ doc._id = "123";
+ var response = db.save(doc);
+ response.id.should.eql "123"
+ end
+
+ it 'should pass through the options'
+ doc._id = "123";
+ CouchDB.should.receive("request", "once").with_args("PUT", "/spec_db/123?batch=ok")
+ db.save(doc, {"batch" : "ok"});
+ end
+ end
+
+ describe '.open'
+ before_each
+ doc._id = "123";
+ db.save(doc);
+ end
+
+ it 'should open the document'
+ db.open("123").should.eql doc
+ end
+
+ it 'should return null when there is no document with the given ID'
+ db.open("non_existing").should.be_null
+ end
+
+ it 'should pass through the options'
+ CouchDB.should.receive("request", "once").with_args("GET", "/spec_db/123?revs=true")
+ db.open("123", {"revs" : "true"});
+ end
+ end
+
+ describe '.deleteDoc'
+ before_each
+ doc._id = "123";
+ saved_doc = db.save(doc);
+ delete_response = db.deleteDoc({_id : "123", _rev : saved_doc.rev});
+ delete_last_req = db.last_req;
+ db.open("123");
+ end
+
+ it 'should send a successful request'
+ delete_last_req.status.should.eql 200
+ end
+
+ it 'should result in a deleted document'
+ db.open("123").should.be_null
+ end
+
+ it 'should return ok true, the ID and the revision of the deleted document'
+ delete_response.ok.should.be_true
+ delete_response.id.should.eql "123"
+ delete_response.rev.should.be_a String
+ delete_response.rev.length.should.be_at_least 30
+ end
+
+ it 'should mark the document as deleted'
+ var responseText = db.request("GET", "/spec_db/123").responseText;
+ JSON.parse(responseText).should.eql {"error":"not_found","reason":"deleted"}
+ end
+
+ it 'should record the revision in the deleted document'
+ var responseText = db.request("GET", "/spec_db/123?rev=" + delete_response.rev).responseText;
+ var deleted_doc = JSON.parse(responseText);
+ deleted_doc._rev.should.eql delete_response.rev
+ deleted_doc._id.should.eql delete_response.id
+ deleted_doc._deleted.should.be_true
+ end
+ end
+
+ describe '.deleteDocAttachment'
+ before_each
+ doc._id = "123";
+ doc._attachments = {
+ "friend.txt" : {
+ "content_type": "text\/plain",
+ // base64 encoded
+ "data": "TGVlIEFkYW1hIGlzIGEgZm9ybWVyIENvbG9uaWFsIEZsZWV0IFJlc2VydmUgb2ZmaWNlci4="
+ }
+ };
+ saved_doc = db.save(doc);
+ end
+
+ it 'should be executed on a document with attachment'
+ db.open("123")._attachments.should.include "friend.txt"
+ db.open("123")._attachments["friend.txt"].stub.should.be_true
+ end
+
+ describe 'after delete'
+ before_each
+ delete_response = db.deleteDocAttachment({_id : "123", _rev : saved_doc.rev}, "friend.txt");
+ db.open("123");
+ end
+
+ it 'should send a successful request'
+ db.last_req.status.should.eql 200
+ end
+
+ it 'should leave the document untouched'
+ db.open("123").Callsign.should.eql "Starbuck"
+ end
+
+ it 'should result in a deleted document attachment'
+ db.open("123").should.not.include "_attachments"
+ end
+
+ it 'should record the revision in the document whose attachment has been deleted'
+ var responseText = db.request("GET", "/spec_db/123?rev=" + delete_response.rev).responseText;
+ var deleted_doc = JSON.parse(responseText);
+ deleted_doc._rev.should.eql delete_response.rev
+ deleted_doc._id.should.eql delete_response.id
+ end
+
+ it 'should return ok true, the ID and the revision of the document whose attachment has been deleted'
+ delete_response.ok.should.be_true
+ delete_response.id.should.eql "123"
+ delete_response.should.have_property 'rev'
+ end
+ end
+ end
+
+ describe '.bulkSave'
+ before_each
+ doc = {"Name" : "Kara Thrace", "Callsign" : "Starbuck"};
+ doc2 = {"Name" : "Karl C. Agathon", "Callsign" : "Helo"};
+ doc3 = {"Name" : "Sharon Valerii", "Callsign" : "Boomer"};
+ docs = [doc, doc2, doc3];
+ end
+
+ it 'should save the documents'
+ db.bulkSave(docs);
+ db.last_req.status.should.eql 201
+ end
+
+ it 'should return ID and revision of the documents'
+ var response = db.bulkSave(docs);
+ response[0].id.should.be_a String
+ response[0].id.should.have_length 32
+ response[0].rev.should.be_a String
+ response[0].rev.length.should.be_at_least 30
+ response[1].id.should.be_a String
+ response[1].id.should.have_length 32
+ response[1].rev.should.be_a String
+ response[1].rev.length.should.be_at_least 30
+ response[2].id.should.be_a String
+ response[2].id.should.have_length 32
+ response[2].rev.should.be_a String
+ response[2].rev.length.should.be_at_least 30
+ end
+
+ it 'should result in saved documents'
+ var response = db.bulkSave(docs);
+ db.open(response[0].id).Name.should.eql "Kara Thrace"
+ db.open(response[1].id).Name.should.eql "Karl C. Agathon"
+ db.open(response[2].id).Name.should.eql "Sharon Valerii"
+ end
+
+ it 'should save the document with specified IDs'
+ doc._id = "123";
+ doc2._id = "456";
+ docs = [doc, doc2, doc3];
+ var response = db.bulkSave(docs);
+ response[0].id.should.eql "123"
+ response[1].id.should.eql "456"
+ response[2].id.should.have_length 32
+ end
+
+ it 'should pass through the options'
+ doc._id = "123";
+ docs = [doc];
+ CouchDB.should.receive("request", "once").with_args("POST", "/spec_db/_bulk_docs", {body: '{"docs":[{"Name":"Kara Thrace","Callsign":"Starbuck","_id":"123"}],"batch":"ok"}'})
+ db.bulkSave(docs, {"batch" : "ok"});
+ end
+ end
+ end
+end \ No newline at end of file
diff --git a/1.1.x/share/www/spec/couch_js_instance_methods_2_spec.js b/1.1.x/share/www/spec/couch_js_instance_methods_2_spec.js
new file mode 100644
index 00000000..76df6368
--- /dev/null
+++ b/1.1.x/share/www/spec/couch_js_instance_methods_2_spec.js
@@ -0,0 +1,246 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Specs for couch.js lines 132-199
+
+describe 'CouchDB instance'
+ before_each
+ db = new CouchDB("spec_db", {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ end
+
+ after_each
+ db.deleteDb();
+ end
+
+ describe '.ensureFullCommit'
+ it 'should return ok true'
+ db.ensureFullCommit().ok.should.be_true
+ end
+
+ it 'should return the instance start time'
+ db.ensureFullCommit().instance_start_time.should.have_length 16
+ end
+
+ it 'should post _ensure_full_commit to the db'
+ db.should.receive("request", "once").with_args("POST", "/spec_db/_ensure_full_commit")
+ db.ensureFullCommit();
+ end
+ end
+
+ describe '.query'
+ before_each
+ db.save({"Name" : "Cally Tyrol", "job" : "deckhand", "_id" : "789"});
+ db.save({"Name" : "Felix Gaeta", "job" : "officer", "_id" : "123"});
+ db.save({"Name" : "Samuel T. Anders", "job" : "pilot", "_id" : "456"});
+ map_function = "function(doc) { emit(doc._id, 1); }";
+ reduce_function = "function(key, values, rereduce) { return sum(values); }";
+ end
+
+ it 'should apply the map function'
+ var result = db.query(map_function);
+
+ result.rows.should.have_length 3
+ result.rows[0].id.should.eql "123"
+ result.rows[0].key.should.eql "123"
+ result.rows[0].value.should.eql 1
+ result.rows[1].id.should.eql "456"
+ result.rows[1].key.should.eql "456"
+ result.rows[1].value.should.eql 1
+ result.rows[2].id.should.eql "789"
+ result.rows[2].key.should.eql "789"
+ result.rows[2].value.should.eql 1
+ end
+
+ it 'should apply the reduce function'
+ var result = db.query(map_function, reduce_function);
+
+ result.rows.should.have_length 1
+ result.rows[0].key.should.be_null
+ result.rows[0].value.should_eql 3
+ end
+
+ it 'should pass through the options'
+ var result = db.query(map_function, null, {"startkey":"456"});
+
+ result.rows.should.have_length 2
+ result.rows[0].id.should.eql "456"
+ result.rows[0].key.should.eql "456"
+ result.rows[0].value.should.eql 1
+ result.rows[1].id.should.eql "789"
+ result.rows[1].key.should.eql "789"
+ result.rows[1].value.should.eql 1
+ end
+
+ it 'should pass through the keys'
+ var result = db.query(map_function, null, {}, ["456", "123"]);
+
+ result.rows.should.have_length 2
+ result.rows[0].id.should.eql "456"
+ result.rows[0].key.should.eql "456"
+ result.rows[0].value.should.eql 1
+ result.rows[1].id.should.eql "123"
+ result.rows[1].key.should.eql "123"
+ result.rows[1].value.should.eql 1
+ end
+
+ it 'should pass through the options and the keys'
+ var result = db.query(map_function, null, {"include_docs":"true"}, ["456"]);
+
+ result.rows.should.have_length 1
+ result.rows[0].id.should.eql "456"
+ result.rows[0].key.should.eql "456"
+ result.rows[0].value.should.eql 1
+ result.rows[0].doc["job"].should.eql "pilot"
+ result.rows[0].doc["_rev"].length.should.be_at_least 30
+ end
+
+ it 'should apply a view in erlang also'
+ // when this test fails, read this: http://wiki.apache.org/couchdb/EnableErlangViews
+ var erlang_map = 'fun({Doc}) -> ' +
+ 'ID = proplists:get_value(<<"_id">>, Doc, null), ' +
+ 'Emit(ID, 1) ' +
+ 'end.';
+ var result = db.query(erlang_map, null, null, null, "erlang");
+
+ result.rows.should.have_length 3
+ result.rows[0].id.should.eql "123"
+ result.rows[0].key.should.eql "123"
+ result.rows[0].value.should.eql 1
+ result.rows[1].id.should.eql "456"
+ result.rows[1].key.should.eql "456"
+ result.rows[1].value.should.eql 1
+ result.rows[2].id.should.eql "789"
+ result.rows[2].key.should.eql "789"
+ result.rows[2].value.should.eql 1
+ end
+ end
+
+ describe '.view'
+ before_each
+ db.save({"Name" : "Cally Tyrol", "job" : "deckhand", "_id" : "789"});
+ db.save({"Name" : "Felix Gaeta", "job" : "officer", "_id" : "123"});
+ db.save({"Name" : "Samuel T. Anders", "job" : "pilot", "_id" : "456"});
+ view = {
+ "views" : {
+ "people" : {
+ "map" : "function(doc) { emit(doc._id, doc.Name); }"
+ }
+ },
+ "_id" : "_design/spec_db"
+ };
+ db.save(view);
+ end
+
+ it 'should apply the view'
+ var result = db.view('spec_db/people');
+
+ result.rows.should.have_length 3
+ result.rows[0].id.should.eql "123"
+ result.rows[0].key.should.eql "123"
+ result.rows[0].value.should.eql "Felix Gaeta"
+ result.rows[1].id.should.eql "456"
+ result.rows[1].key.should.eql "456"
+ result.rows[1].value.should.eql "Samuel T. Anders"
+ result.rows[2].id.should.eql "789"
+ result.rows[2].key.should.eql "789"
+ result.rows[2].value.should.eql "Cally Tyrol"
+ end
+
+ it 'should pass through the options'
+ var result = db.view('spec_db/people', {"skip":"2"});
+
+ result.rows.should.have_length 1
+ result.rows[0].id.should.eql "789"
+ result.rows[0].key.should.eql "789"
+ result.rows[0].value.should.eql "Cally Tyrol"
+ end
+
+ it 'should pass through the keys'
+ var result = db.view('spec_db/people', {}, ["456", "123"]);
+
+ result.rows.should.have_length 2
+ result.rows[0].id.should.eql "456"
+ result.rows[0].key.should.eql "456"
+ result.rows[0].value.should.eql "Samuel T. Anders"
+ result.rows[1].id.should.eql "123"
+ result.rows[1].key.should.eql "123"
+ result.rows[1].value.should.eql "Felix Gaeta"
+ end
+
+ it 'should pass through the options and the keys'
+ var result = db.view('spec_db/people', {"include_docs":"true"}, ["456"]);
+
+ result.rows.should.have_length 1
+ result.rows[0].id.should.eql "456"
+ result.rows[0].key.should.eql "456"
+ result.rows[0].value.should.eql "Samuel T. Anders"
+ result.rows[0].doc["job"].should.eql "pilot"
+ result.rows[0].doc["_rev"].length.should.be_at_least 30
+ end
+
+ it 'should return null when the view doesnt exist'
+ var result = db.view('spec_db/non_existing_view');
+
+ result.should.be_null
+ end
+ end
+
+ describe '.info'
+ before_each
+ result = db.info();
+ end
+
+ it 'should return the name of the database'
+ result.db_name.should.eql "spec_db"
+ end
+
+ it 'should return the number of documents'
+ result.doc_count.should.eql 0
+ end
+
+ it 'should return the start time of the db instance'
+ result.instance_start_time.should.have_length 16
+ end
+ end
+
+ describe '.designInfo'
+ before_each
+ designDoc = {
+ "views" : {
+ "people" : {
+ "map" : "function(doc) { emit(doc._id, doc); }"
+ }
+ },
+ "_id" : "_design/spec_db"
+ };
+ db.save(designDoc);
+ result = db.designInfo("_design/spec_db");
+ end
+
+ it 'should return the database name'
+ result.name.should.eql "spec_db"
+ end
+
+ it 'should return a views language'
+ result.view_index.language.should.eql "javascript"
+ end
+
+ it 'should return a views update sequence'
+ result.view_index.update_seq.should.eql 0
+ end
+
+ it 'should return a views signature'
+ result.view_index.signature.should.have_length 32
+ end
+ end
+end \ No newline at end of file
diff --git a/1.1.x/share/www/spec/couch_js_instance_methods_3_spec.js b/1.1.x/share/www/spec/couch_js_instance_methods_3_spec.js
new file mode 100644
index 00000000..b7464c01
--- /dev/null
+++ b/1.1.x/share/www/spec/couch_js_instance_methods_3_spec.js
@@ -0,0 +1,215 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Specs for couch.js lines 201-265
+
+describe 'CouchDB instance'
+ before_each
+ db = new CouchDB("spec_db", {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ end
+
+ after_each
+ db.deleteDb();
+ end
+
+ describe '.allDocs'
+ it 'should return no docs when there arent any'
+ db.allDocs().total_rows.should.eql 0
+ db.allDocs().rows.should.eql []
+ end
+
+ describe 'with docs'
+ before_each
+ db.save({"Name" : "Felix Gaeta", "_id" : "123"});
+ db.save({"Name" : "Samuel T. Anders", "_id" : "456"});
+ end
+
+ it 'should return all docs'
+ var result = db.allDocs();
+
+ result.total_rows.should.eql 2
+ result.rows.should.have_length 2
+ result.rows[0].id.should.eql "123"
+ result.rows[0].key.should.eql "123"
+ result.rows[0].value.rev.length.should.be_at_least 30
+ result.rows[1].id.should.eql "456"
+ end
+
+ it 'should pass through the options'
+ var result = db.allDocs({"startkey": "123", "limit": "1"});
+
+ result.rows.should.have_length 1
+ result.rows[0].id.should.eql "123"
+ end
+
+ it 'should pass through the keys'
+ var result = db.allDocs({}, ["456"]);
+
+ result.rows.should.have_length 1
+ result.rows[0].id.should.eql "456"
+ result.rows[0].key.should.eql "456"
+ result.rows[0].value.rev.length.should.be_at_least 30
+ end
+
+ it 'should pass through the options and the keys'
+ var result = db.allDocs({"include_docs":"true"}, ["456"]);
+
+ result.rows.should.have_length 1
+ result.rows[0].id.should.eql "456"
+ result.rows[0].key.should.eql "456"
+ result.rows[0].value.rev.length.should.be_at_least 30
+ result.rows[0].doc["Name"].should.eql "Samuel T. Anders"
+ result.rows[0].doc["_rev"].length.should.be_at_least 30
+ end
+
+ end
+ end
+
+ describe '.designDocs'
+ it 'should return nothing when there arent any design docs'
+ db.save({"Name" : "Felix Gaeta", "_id" : "123"});
+ db.designDocs().rows.should.eql []
+ end
+
+ it 'should return all design docs'
+ var designDoc = {
+ "views" : {
+ "people" : {
+ "map" : "function(doc) { emit(doc._id, doc); }"
+ }
+ },
+ "_id" : "_design/spec_db"
+ };
+ db.save(designDoc);
+ db.save({"Name" : "Felix Gaeta", "_id" : "123"});
+
+ var result = db.designDocs();
+
+ result.total_rows.should.eql 2
+ result.rows.should.have_length 1
+ result.rows[0].id.should.eql "_design/spec_db"
+ result.rows[0].key.should.eql "_design/spec_db"
+ result.rows[0].value.rev.length.should.be_at_least 30
+ end
+ end
+
+ describe '.changes'
+ it 'should return no changes when there arent any'
+ db.changes().last_seq.should.eql 0
+ db.changes().results.should.eql []
+ end
+
+ describe 'with changes'
+ before_each
+ db.save({"Name" : "Felix Gaeta", "_id" : "123"});
+ db.save({"Name" : "Samuel T. Anders", "_id" : "456"});
+ end
+
+ it 'should return changes'
+ var result = db.changes();
+
+ result.last_seq.should.eql 2
+ result.results[0].id.should.eql "123"
+ result.results[0].seq.should.eql 1
+ result.results[0].changes[0].rev.length.should.be_at_least 30
+ result.results[1].id.should.eql "456"
+ result.results[1].seq.should.eql 2
+ result.results[1].changes[0].rev.length.should.be_at_least 30
+ end
+
+ it 'should pass through the options'
+ var result = db.changes({"since":"1"});
+
+ result.last_seq.should.eql 2
+ result.results[0].id.should.eql "456"
+ end
+ end
+ end
+
+ describe '.compact'
+ it 'should return ok true'
+ db.compact().ok.should.be_true
+ end
+
+ it 'should post _compact to the db'
+ db.should.receive("request", "once").with_args("POST", "/spec_db/_compact")
+ db.compact();
+ end
+ end
+
+ describe '.viewCleanup'
+ it 'should return ok true'
+ db.viewCleanup().ok.should.be_true
+ end
+
+ it 'should post _view_cleanup to the db'
+ db.should.receive("request", "once").with_args("POST", "/spec_db/_view_cleanup")
+ db.viewCleanup();
+ end
+ end
+
+ describe '.setDbProperty'
+ it 'should return ok true'
+ db.setDbProperty("_revs_limit", 1500).ok.should.be_true
+ end
+
+ it 'should set a db property'
+ db.setDbProperty("_revs_limit", 1500);
+ db.getDbProperty("_revs_limit").should.eql 1500
+ db.setDbProperty("_revs_limit", 1200);
+ db.getDbProperty("_revs_limit").should.eql 1200
+ end
+ end
+
+ describe '.getDbProperty'
+ it 'should get a db property'
+ db.setDbProperty("_revs_limit", 1200);
+ db.getDbProperty("_revs_limit").should.eql 1200
+ end
+
+ it 'should throw an error when the property doesnt exist'
+ function(){ db.getDbProperty("_doesnt_exist")}.should.throw_error
+ end
+ end
+
+ describe '.setSecObj'
+ it 'should return ok true'
+ db.setSecObj({"readers":{"names":["laura"],"roles":["president"]}}).ok.should.be_true
+ end
+
+ it 'should save a well formed object into the _security object '
+ db.should.receive("request", "once").with_args("PUT", "/spec_db/_security", {body: '{"readers":{"names":["laura"],"roles":["president"]}}'})
+ db.setSecObj({"readers": {"names" : ["laura"], "roles" : ["president"]}})
+ end
+
+ it 'should throw an error when the readers or admins object is malformed'
+ -{ db.setSecObj({"admins":["cylon"]}) }.should.throw_error
+ end
+
+ it 'should save any other object into the _security object'
+ db.setSecObj({"something" : "anything"})
+ db.getSecObj().should.eql {"something" : "anything"}
+ end
+ end
+
+ describe '.getSecObj'
+ it 'should get the security object'
+ db.setSecObj({"admins" : {"names" : ["bill"], "roles" : ["admiral"]}})
+ db.getSecObj().should.eql {"admins" : {"names": ["bill"], "roles": ["admiral"]}}
+ end
+
+ it 'should return an empty object when there is no security object'
+ db.getSecObj().should.eql {}
+ end
+ end
+end \ No newline at end of file
diff --git a/1.1.x/share/www/spec/custom_helpers.js b/1.1.x/share/www/spec/custom_helpers.js
new file mode 100644
index 00000000..d29ee87b
--- /dev/null
+++ b/1.1.x/share/www/spec/custom_helpers.js
@@ -0,0 +1,51 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+function stubAlert(){
+ if(typeof(old_alert) == 'undefined'){
+ old_alert = alert;
+ }
+ alert = function(msg){
+ alert_msg = msg;
+ };
+}
+
+function destubAlert(){
+ alert = old_alert;
+}
+
+function errorCallback(status, error, reason){
+ console.log("Unexpected " + status + " error: " + error + " - " + reason)
+ throw("Unexpected " + status + " error: " + error + " - " + reason);
+}
+
+function successCallback(resp){
+ console.log("No error message here unexpectedly, successful response instead.")
+ throw("No error message here unexpectedly, successful response instead.");
+}
+
+function useTestUserDb(){
+ users_db = new CouchDB("spec_users_db");
+ var xhr = CouchDB.request("PUT", "/_config/couch_httpd_auth/authentication_db", {
+ body: JSON.stringify("spec_users_db")
+ });
+ if(typeof(old_auth_db) == 'undefined'){
+ old_auth_db = xhr.responseText.replace(/\n/,'').replace(/"/g,'');
+ }
+}
+
+function useOldUserDb(){
+ CouchDB.request("PUT", "/_config/couch_httpd_auth/authentication_db", {
+ body: JSON.stringify(old_auth_db)
+ });
+ users_db.deleteDb();
+} \ No newline at end of file
diff --git a/1.1.x/share/www/spec/jquery_couch_js_class_methods_spec.js b/1.1.x/share/www/spec/jquery_couch_js_class_methods_spec.js
new file mode 100644
index 00000000..f2df81b3
--- /dev/null
+++ b/1.1.x/share/www/spec/jquery_couch_js_class_methods_spec.js
@@ -0,0 +1,523 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Specs for jquery_couch.js lines 48-156 and 415-448
+
+describe 'jQuery couchdb'
+ before
+ stubAlert();
+ end
+
+ after
+ destubAlert();
+ end
+
+ describe 'activeTasks'
+ before_each
+ db = $.couch.db("spec_db");
+ db.create();
+ end
+
+ after_each
+ db.drop();
+ end
+
+ it 'should return an empty array when there are no active tasks'
+ $.couch.activeTasks({
+ success: function(resp){
+ resp.should.eql []
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return an active task'
+ // doing a bit of stuff here so compaction has something to do and takes a while
+ var battlestar, civillian;
+ db.saveDoc({"type":"Battlestar", "name":"Galactica"}, {
+ success: function(resp){
+ db.openDoc(resp.id, {
+ success: function(resp2){
+ battlestar = resp2;
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ battlestar.name = "Pegasus";
+ db.saveDoc(battlestar);
+
+ db.saveDoc({"type":"Civillian", "name":"Cloud 9"}, {
+ success: function(resp){
+ db.openDoc(resp.id, {
+ success: function(resp2){
+ civillian = resp2;
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ civillian.name = "Olympic Carrier";
+ db.saveDoc(civillian);
+ db.removeDoc(civillian);
+
+ db.compact({
+ ajaxStart: function(resp){
+ $.couch.activeTasks({
+ success: function(resp2){
+ resp2[0].type.should.eql "Database Compaction"
+ resp2[0].task.should.eql "spec_db"
+ resp2[0].should.have_prop "status"
+ resp2[0].should.include "pid"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ }
+ });
+ end
+ end
+
+ describe 'allDbs'
+ it 'should return an array that includes a created database'
+ temp_db = new CouchDB("temp_spec_db", {"X-Couch-Full-Commit":"false"});
+ temp_db.createDb();
+ $.couch.allDbs({
+ success: function(resp){
+ resp.should.include "temp_spec_db"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ temp_db.deleteDb();
+ end
+
+ it 'should return an array that does not include a database that does not exist'
+ $.couch.allDbs({
+ success: function(resp){
+ resp.should.not.include("not_existing_temp_spec_db");
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+ end
+
+ describe 'config'
+ it 'should get the config settings'
+ $.couch.config({
+ success: function(resp){
+ resp.httpd.port.should.eql window.location.port
+ resp.stats.samples.should.match /\[.*\]/
+ resp.native_query_servers.should.have_prop "erlang"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should get a specific config setting'
+ $.couch.config({
+ success: function(resp){
+ parseInt(resp.max_document_size).should.be_a Number
+ resp.delayed_commits.should.be_a String
+ resp.database_dir.should.be_a String
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ }, "couchdb");
+ end
+
+ it 'should update a config setting'
+ $.couch.config({
+ success: function(resp){
+ resp.should.eql ""
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ }, "test", "colony", "Caprica");
+
+ $.couch.config({
+ success: function(resp){
+ resp.colony.should.eql "Caprica"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ }, "test");
+
+ $.couch.config({}, "test", "colony", null);
+ end
+
+ it 'should delete a config setting'
+ $.couch.config({}, "test", "colony", "Caprica");
+
+ $.couch.config({
+ success: function(resp){
+ resp.should.eql "Caprica"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ }, "test", "colony", null);
+
+ $.couch.config({
+ success: function(resp){
+ resp.should.eql {}
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ }, "test");
+ end
+
+ it 'should alert with an error message prefix'
+ $.couch.config("asdf", "asdf", "asdf");
+ alert_msg.should.match /An error occurred retrieving\/updating the server configuration/
+ end
+ end
+
+ describe 'session'
+ it 'should return information about the session'
+ $.couch.session({
+ success: function(resp){
+ resp.info.should.have_prop 'authentication_db'
+ resp.userCtx.should.include 'name'
+ resp.userCtx.roles.should.be_an Array
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+ end
+
+ describe 'userDb'
+ it 'should return the userDb'
+ var authentication_db;
+ $.couch.session({
+ success: function(resp){
+ authentication_db = resp.info.authentication_db;
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+
+ $.couch.userDb(function(resp){
+ resp.name.should.eql authentication_db
+ });
+ end
+
+ it 'should return a db instance'
+ $.couch.userDb(function(resp){
+ resp.should.respond_to 'allDocs'
+ resp.should.respond_to 'bulkSave'
+ });
+ end
+ end
+
+ describe 'user_db stuff'
+ before
+ useTestUserDb();
+ end
+
+ after
+ useOldUserDb();
+ end
+
+ describe 'signup'
+ it 'should return a saved user'
+ $.couch.signup(
+ {name: "Tom Zarek"}, "secretpass", {
+ success: function(resp){
+ resp.id.should.eql "org.couchdb.user:Tom Zarek"
+ resp.rev.length.should.be_at_least 30
+ resp.ok.should.be_true
+ users_db.deleteDoc({_id : resp.id, _rev : resp.rev})
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should create a userDoc in the user db'
+ $.couch.signup(
+ {name: "Tom Zarek"}, "secretpass", {
+ success: function(resp){
+ var user = users_db.open(resp.id);
+ user.name.should.eql "Tom Zarek"
+ user._id.should.eql "org.couchdb.user:Tom Zarek"
+ user.roles.should.eql []
+ user.password_sha.length.should.be_at_least 30
+ user.password_sha.should.be_a String
+ users_db.deleteDoc({_id : resp.id, _rev : resp.rev})
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should create a userDoc with roles when specified'
+ $.couch.signup(
+ {name: "Tom Zarek", roles: ["vice_president", "activist"]}, "secretpass", {
+ success: function(resp){
+ var user = users_db.open(resp.id);
+ user.roles.should.eql ["vice_president", "activist"]
+ users_db.deleteDoc({_id : resp.id, _rev : resp.rev})
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+ end
+
+ describe 'login'
+ before_each
+ user = {};
+ $.couch.signup({name: "Tom Zarek", roles: ["vice_president", "activist"]}, "secretpass", {
+ success: function(resp){
+ user.id = resp.id;
+ user.rev = resp.rev;
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ after_each
+ users_db.deleteDoc({_id : user.id, _rev : user.rev})
+ end
+
+ it 'should return the logged in user'
+ $.couch.login({
+ name: "Tom Zarek",
+ password: "secretpass",
+ success: function(resp){
+ resp.name.should.eql "Tom Zarek"
+ resp.ok.should.be_true
+ resp.roles.should.eql ["vice_president", "activist"]
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should result in a session for the logged in user'
+ $.couch.login({
+ name: "Tom Zarek",
+ password: "secretpass"
+ });
+ $.couch.session({
+ success: function(resp){
+ resp.info.authentication_db.should.eql "spec_users_db"
+ resp.userCtx.name.should.eql "Tom Zarek"
+ resp.userCtx.roles.should.eql ["vice_president", "activist"]
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return a 404 when password is wrong'
+ $.couch.login({
+ name: "Tom Zarek",
+ password: "wrongpass",
+ error: function(status, error, reason){
+ status.should.eql 401
+ error.should.eql "unauthorized"
+ reason.should.eql "Name or password is incorrect."
+ },
+ success: function(resp){successCallback(resp)}
+ });
+ end
+
+ it 'should return a 404 when the user doesnt exist in the users db'
+ $.couch.login({
+ name: "Number Three",
+ password: "secretpass",
+ error: function(status, error, reason){
+ status.should.eql 401
+ error.should.eql "unauthorized"
+ reason.should.eql "Name or password is incorrect."
+ },
+ success: function(resp){successCallback(resp)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ $.couch.login("asdf");
+ alert_msg.should.match /An error occurred logging in/
+ end
+ end
+
+ describe 'logout'
+ before_each
+ user = {};
+ $.couch.signup({name: "Tom Zarek", roles: ["vice_president", "activist"]}, "secretpass", {
+ success: function(resp){
+ user.id = resp.id;
+ user.rev = resp.rev;
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ $.couch.login({name: "Tom Zarek", password: "secretpass"});
+ end
+
+ after_each
+ users_db.deleteDoc({_id : user.id, _rev : user.rev})
+ end
+
+ it 'should return ok true'
+ $.couch.logout({
+ success: function(resp){
+ resp.ok.should.be_true
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should result in an empty session'
+ $.couch.logout();
+ $.couch.session({
+ success: function(resp){
+ resp.userCtx.name.should.be_null
+ resp.userCtx.roles.should.not.include ["vice_president"]
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+ end
+ end
+
+ describe 'encodeDocId'
+ it 'should return the encoded docID when it is not a design document'
+ $.couch.encodeDocId("viper").should.eql(encodeURIComponent("viper"))
+ end
+
+ it 'should encode only the name of the design document'
+ $.couch.encodeDocId("_design/raptor").should.eql("_design/" + encodeURIComponent("raptor"))
+ end
+
+ it 'should also work when the name of the des'
+ $.couch.encodeDocId("_design/battlestar/_view/crew").should.eql("_design/" + encodeURIComponent("battlestar/_view/crew"))
+ end
+ end
+
+ describe 'info'
+ it 'should return the CouchDB version'
+ $.couch.info({
+ success: function(resp){
+ resp.couchdb.should.eql "Welcome"
+ resp.version.should_match /^\d\d?\.\d\d?\.\d\d?.*/
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+ end
+
+ describe 'replicate'
+ before_each
+ db = $.couch.db("spec_db");
+ db.create();
+ db2 = $.couch.db("spec_db_2");
+ db2.create();
+ host = window.location.protocol + "//" + window.location.host ;
+ end
+
+ after_each
+ db.drop();
+ db2.drop();
+ end
+
+ it 'should return no_changes true when there are no changes between the dbs'
+ $.couch.replicate(host + db.uri, host + db2.uri, {
+ success: function(resp){
+ resp.ok.should.be_true
+ resp.no_changes.should.be_true
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return the session ID'
+ db.saveDoc({'type':'battlestar', 'name':'galactica'});
+ $.couch.replicate(host + db.uri, host + db2.uri, {
+ success: function(resp){
+ resp.session_id.length.should.be_at_least 30
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return source_last_seq'
+ db.saveDoc({'type':'battlestar', 'name':'galactica'});
+ db.saveDoc({'type':'battlestar', 'name':'pegasus'});
+
+ $.couch.replicate(host + db.uri, host + db2.uri, {
+ success: function(resp){
+ resp.source_last_seq.should.eql 2
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return the replication history'
+ db.saveDoc({'type':'battlestar', 'name':'galactica'});
+ db.saveDoc({'type':'battlestar', 'name':'pegasus'});
+
+ $.couch.replicate(host + db.uri, host + db2.uri, {
+ success: function(resp){
+ resp.history[0].docs_written.should.eql 2
+ resp.history[0].start_last_seq.should.eql 0
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should pass through replication options'
+ db.saveDoc({'type':'battlestar', 'name':'galactica'});
+ db2.drop();
+ $.couch.replicate(host + db.uri, host + db2.uri, {
+ error: function(status, error, reason){
+ status.should.eql 500
+ reason.should.match /db_not_found/
+ },
+ success: function(resp){successCallback(resp)}
+ });
+
+ $.couch.replicate(host + db.uri, host + db2.uri, {
+ success: function(resp){
+ resp.ok.should.eql true
+ resp.history[0].docs_written.should.eql 1
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ }, {
+ "create_target":true
+ });
+
+ db2.info({
+ success: function(resp){
+ resp.db_name.should.eql "spec_db_2"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ $.couch.replicate("asdf");
+ alert_msg.should.match /Replication failed/
+ end
+ end
+
+ describe 'newUUID'
+ it 'should return a new UUID'
+ var new_uuid = $.couch.newUUID(1);
+ new_uuid.should.be_a String
+ new_uuid.should.have_length 32
+ end
+
+ it 'should fill the uuidCache with the specified number minus 1'
+ // we can't reach the uuidCache from here, so we mock the next request
+ // to test that the next uuid is not coming from the request, but from the cache.
+ $.couch.newUUID(2);
+ mock_request().and_return({'uuids':['a_sample_uuid']})
+ $.couch.newUUID(1).should.not.eql 'a_sample_uuid'
+ $.couch.newUUID(1).should.eql 'a_sample_uuid'
+ end
+
+ it 'should alert with an error message prefix'
+ $.couch.newUUID("asdf");
+ alert_msg.should.match /Failed to retrieve UUID batch/
+ end
+ end
+end \ No newline at end of file
diff --git a/1.1.x/share/www/spec/jquery_couch_js_instance_methods_1_spec.js b/1.1.x/share/www/spec/jquery_couch_js_instance_methods_1_spec.js
new file mode 100644
index 00000000..8538c856
--- /dev/null
+++ b/1.1.x/share/www/spec/jquery_couch_js_instance_methods_1_spec.js
@@ -0,0 +1,202 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Specs for jquery_couch.js lines 163-209
+
+describe 'jQuery couchdb db'
+ before
+ stubAlert();
+ end
+
+ after
+ destubAlert();
+ end
+
+ before_each
+ db = $.couch.db('spec_db');
+ end
+
+ describe 'constructor'
+ it 'should set the name'
+ db.name.should.eql 'spec_db'
+ end
+
+ it 'should set the uri'
+ db.uri.should.eql '/spec_db/'
+ end
+ end
+
+ describe 'triggering db functions'
+ before_each
+ db.create();
+ end
+
+ after_each
+ db.drop();
+ end
+
+ describe 'compact'
+ it 'should return ok true'
+ db.compact({
+ success: function(resp) {
+ resp.ok.should.be_true
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should trigger _compact'
+ db.compact({
+ success: function(resp, obj) {
+ obj.url.should.eql "/spec_db/_compact"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+ end
+
+ describe 'viewCleanup'
+ it 'should return ok true'
+ db.viewCleanup({
+ success: function(resp) {
+ resp.ok.should.be_true
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should trigger _view_cleanup'
+ db.viewCleanup({
+ success: function(resp, obj) {
+ obj.url.should.eql "/spec_db/_view_cleanup"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+ end
+
+ describe 'compactView'
+ before_each
+ var designDoc = {
+ "views" : {
+ "people" : {
+ "map" : "function(doc) { emit(doc._id, doc); }"
+ }
+ },
+ "_id" : "_design/myview"
+ };
+ db.saveDoc(designDoc);
+ db.saveDoc({"Name" : "Felix Gaeta", "_id" : "123"});
+ end
+
+ it 'should return ok true'
+ db.compactView("myview", {
+ success: function(resp) {
+ resp.ok.should.be_true
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should trigger _compact_view with the groupname'
+ db.compactView("myview", {
+ success: function(resp, obj) {
+ obj.url.should.eql "/spec_db/_compact/myview"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return raise a 404 error when the design name doesnt exist'
+ db.compactView("non_existing_design_name", {
+ error: function(status, error, reason){
+ status.should.eql 404
+ error.should.eql "not_found"
+ reason.should.eql "missing"
+ },
+ success: function(resp){successCallback(resp)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.compactView("asdf");
+ alert_msg.should.match /The view could not be compacted/
+ end
+ end
+ end
+
+ describe 'create'
+ after_each
+ db.drop();
+ end
+
+ it 'should return ok true'
+ db.create({
+ success: function(resp) {
+ resp.ok.should.be_true
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should result in a created db'
+ db.create();
+ db.create({
+ error: function(status, error, reason){
+ status.should.eql 412
+ error.should.eql "file_exists"
+ reason.should.eql "The database could not be created, the file already exists."
+ },
+ success: function(resp){successCallback(resp)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.create();
+ db.create();
+ alert_msg.should.match /The database could not be created/
+ end
+ end
+
+ describe 'drop'
+ before_each
+ db.create();
+ end
+
+ it 'should return ok true'
+ db.drop({
+ success: function(resp) {
+ resp.ok.should.be_true
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should result in a deleted db'
+ db.drop();
+ db.drop({
+ error: function(status, error, reason){
+ status.should.eql 404
+ error.should.eql "not_found"
+ reason.should.eql "missing"
+ },
+ success: function(resp){successCallback(resp)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.drop();
+ db.drop();
+ alert_msg.should.match /The database could not be deleted/
+ end
+ end
+end \ No newline at end of file
diff --git a/1.1.x/share/www/spec/jquery_couch_js_instance_methods_2_spec.js b/1.1.x/share/www/spec/jquery_couch_js_instance_methods_2_spec.js
new file mode 100644
index 00000000..8f35affa
--- /dev/null
+++ b/1.1.x/share/www/spec/jquery_couch_js_instance_methods_2_spec.js
@@ -0,0 +1,433 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Specs for jquery_couch.js lines 210-299
+
+describe 'jQuery couchdb db'
+ before
+ stubAlert();
+ end
+
+ after
+ destubAlert();
+ end
+
+ before_each
+ db = $.couch.db('spec_db');
+ db.create();
+ end
+
+ after_each
+ db.drop();
+ end
+
+ describe 'info'
+ before_each
+ result = {};
+ db.info({
+ success: function(resp) { result = resp; },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return the name of the database'
+ result.db_name.should.eql "spec_db"
+ end
+
+ it 'should return the number of documents'
+ result.doc_count.should.eql 0
+ end
+
+ it 'should return the start time of the db instance'
+ result.instance_start_time.should.have_length 16
+ end
+ end
+
+ describe 'allDocs'
+ it 'should return no docs when there arent any'
+ db.allDocs({
+ success: function(resp) {
+ resp.total_rows.should.eql 0
+ resp.rows.should.eql []
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ describe 'with docs'
+ before_each
+ db.saveDoc({"Name" : "Felix Gaeta", "_id" : "123"});
+ db.saveDoc({"Name" : "Samuel T. Anders", "_id" : "456"});
+ end
+
+ it 'should return all docs'
+ db.allDocs({
+ success: function(resp) {
+ resp.total_rows.should.eql 2
+ resp.rows.should.have_length 2
+ resp.rows[0].id.should.eql "123"
+ resp.rows[0].key.should.eql "123"
+ resp.rows[0].value.rev.length.should.be_at_least 30
+ resp.rows[1].id.should.eql "456"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should pass through the options'
+ db.allDocs({
+ "startkey": "123",
+ "limit": "1",
+ success: function(resp) {
+ resp.rows.should.have_length 1
+ resp.rows[0].id.should.eql "123"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+ end
+ end
+
+ describe 'allDesignDocs'
+ it 'should return nothing when there arent any design docs'
+ db.saveDoc({"Name" : "Felix Gaeta", "_id" : "123"});
+ db.allDesignDocs({
+ success: function(resp) {
+ resp.rows.should.eql []
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return all design docs'
+ var designDoc = {
+ "views" : {
+ "people" : {
+ "map" : "function(doc) { emit(doc._id, doc); }"
+ }
+ },
+ "_id" : "_design/spec_db"
+ };
+ db.saveDoc(designDoc);
+ db.saveDoc({"Name" : "Felix Gaeta", "_id" : "123"});
+
+ db.allDesignDocs({
+ success: function(resp) {
+ resp.total_rows.should.eql 2
+ resp.rows.should.have_length 1
+ resp.rows[0].id.should.eql "_design/spec_db"
+ resp.rows[0].key.should.eql "_design/spec_db"
+ resp.rows[0].value.rev.length.should.be_at_least 30
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+ end
+
+ describe 'allApps'
+ it 'should provide a custom function with appName, appPath and design document when there is an attachment with index.html'
+ var designDoc = {"_id" : "_design/with_attachments"};
+
+ designDoc._attachments = {
+ "index.html" : {
+ "content_type": "text\/html",
+ // this is "<html><p>Hi, here is index!</p></html>", base64 encoded
+ "data": "PGh0bWw+PHA+SGksIGhlcmUgaXMgaW5kZXghPC9wPjwvaHRtbD4="
+ }
+ };
+ db.saveDoc(designDoc);
+
+ db.allApps({
+ eachApp: function(appName, appPath, ddoc) {
+ appName.should.eql "with_attachments"
+ appPath.should.eql "/spec_db/_design/with_attachments/index.html"
+ ddoc._id.should.eql "_design/with_attachments"
+ ddoc._attachments["index.html"].content_type.should.eql "text/html"
+ ddoc._attachments["index.html"].length.should.eql "<html><p>Hi, here is index!</p></html>".length
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should provide a custom function with appName, appPath and design document when there is a couchapp with index file'
+ var designDoc = {"_id" : "_design/with_index"};
+ designDoc.couchapp = {
+ "index" : "cylon"
+ };
+ db.saveDoc(designDoc);
+
+ db.allApps({
+ eachApp: function(appName, appPath, ddoc) {
+ appName.should.eql "with_index"
+ appPath.should.eql "/spec_db/_design/with_index/cylon"
+ ddoc._id.should.eql "_design/with_index"
+ ddoc.couchapp.index.should.eql "cylon"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should not call the eachApp function when there is neither index.html in _attachments nor a couchapp index file'
+ var designDoc = {"_id" : "_design/nothing"};
+ db.saveDoc(designDoc);
+
+ var eachApp_called = false;
+ db.allApps({
+ eachApp: function(appName, appPath, ddoc) {
+ eachApp_called = true;
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+
+ eachApp_called.should.be_false
+ end
+
+ it 'should alert with an error message prefix'
+ db.allApps();
+ alert_msg.should.match /Please provide an eachApp function for allApps()/
+ end
+ end
+
+ describe 'openDoc'
+ before_each
+ doc = {"Name" : "Louanne Katraine", "Callsign" : "Kat", "_id" : "123"};
+ db.saveDoc(doc);
+ end
+
+ it 'should open the document'
+ db.openDoc("123", {
+ success: function(resp){
+ resp.should.eql doc
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should raise a 404 error when there is no document with the given ID'
+ db.openDoc("non_existing", {
+ error: function(status, error, reason){
+ status.should.eql 404
+ error.should.eql "not_found"
+ reason.should.eql "missing"
+ },
+ success: function(resp){successCallback(resp)}
+ });
+ end
+
+ it 'should pass through the options'
+ doc.Name = "Sasha";
+ db.saveDoc(doc);
+ db.openDoc("123", {
+ revs: true,
+ success: function(resp){
+ resp._revisions.start.should.eql 2
+ resp._revisions.ids.should.have_length 2
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.openDoc("asdf");
+ alert_msg.should.match /The document could not be retrieved/
+ end
+ end
+
+ describe 'saveDoc'
+ before_each
+ doc = {"Name" : "Kara Thrace", "Callsign" : "Starbuck"};
+ end
+
+ it 'should save the document'
+ db.saveDoc(doc, {
+ success: function(resp, status){
+ status.should.eql 201
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return ok true'
+ db.saveDoc(doc, {
+ success: function(resp, status){
+ resp.ok.should.be_true
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return ID and revision of the document'
+ db.saveDoc(doc, {
+ success: function(resp, status){
+ resp.id.should.be_a String
+ resp.id.should.have_length 32
+ resp.rev.should.be_a String
+ resp.rev.length.should.be_at_least 30
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should result in a saved document with generated ID'
+ db.saveDoc(doc, {
+ success: function(resp, status){
+ db.openDoc(resp.id, {
+ success: function(resp2){
+ resp2.Name.should.eql "Kara Thrace"
+ resp2.Callsign.should.eql "Starbuck"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should save the document with the specified ID'
+ doc._id = "123";
+ db.saveDoc(doc, {
+ success: function(resp, status){
+ resp.id.should.eql "123"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should pass through the options'
+ db.saveDoc(doc, {
+ "batch" : "ok",
+ success: function(resp, status){
+ // when using batch ok, couch sends a 202 status immediately
+ status.should.eql 202
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.saveDoc("asdf");
+ alert_msg.should.match /The document could not be saved/
+ end
+ end
+
+ describe 'bulkSave'
+ before_each
+ doc = {"Name" : "Kara Thrace", "Callsign" : "Starbuck"};
+ doc2 = {"Name" : "Karl C. Agathon", "Callsign" : "Helo"};
+ doc3 = {"Name" : "Sharon Valerii", "Callsign" : "Boomer"};
+ docs = [doc, doc2, doc3];
+ end
+
+ it 'should save all documents'
+ db.bulkSave({"docs": docs});
+ db.allDocs({
+ success: function(resp) {
+ resp.total_rows.should.eql 3
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should result in saved documents'
+ doc3._id = "789";
+ db.bulkSave({"docs": [doc3]});
+
+ db.openDoc("789", {
+ success: function(resp){
+ resp.Name.should.eql "Sharon Valerii"
+ resp.Callsign.should.eql "Boomer"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return ID and revision of the documents'
+ db.bulkSave({"docs": docs},{
+ success: function(resp){
+ resp[0].id.should.be_a String
+ resp[0].id.should.have_length 32
+ resp[0].rev.should.be_a String
+ resp[0].rev.length.should.be_at_least 30
+ resp[1].id.should.be_a String
+ resp[1].id.should.have_length 32
+ resp[1].rev.should.be_a String
+ resp[1].rev.length.should.be_at_least 30
+ resp[2].id.should.be_a String
+ resp[2].id.should.have_length 32
+ resp[2].rev.should.be_a String
+ resp[2].rev.length.should.be_at_least 30
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should save the document with specified IDs'
+ doc._id = "123";
+ doc2._id = "456";
+ docs = [doc, doc2, doc3];
+
+ db.bulkSave({"docs": docs},{
+ success: function(resp){
+ resp[0].id.should.eql "123"
+ resp[1].id.should.eql "456"
+ resp[2].id.should.have_length 32
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should pass through the options'
+ // a lengthy way to test that a conflict can't be created with the
+ // all_or_nothing option set to false, but can be when it's true.
+
+ var old_doc = {"Name" : "Louanne Katraine", "Callsign" : "Kat", "_id" : "123"};
+ db.saveDoc(old_doc, {
+ success: function(resp){
+ old_doc._rev = resp.rev;
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+
+ var new_doc = {"Name" : "Sasha", "Callsign" : "Kat", "_id" : "123"};
+
+ db.bulkSave({"docs": [new_doc], "all_or_nothing": false}, {
+ success: function(resp){
+ resp[0].id.should.eql "123"
+ resp[0].error.should.eql "conflict"
+ resp[0].reason.should.eql "Document update conflict."
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+
+ db.bulkSave({"docs": [new_doc], "all_or_nothing": true}, {
+ success: function(resp){
+ resp[0].id.should.eql "123"
+ resp[0].rev.should.not.eql old_doc._rev
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+
+ db.openDoc("123", {
+ "conflicts": true,
+ success: function(resp){
+ resp._conflicts[0].should.eql old_doc._rev
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.bulkSave("asdf");
+ alert_msg.should.match /The documents could not be saved/
+ end
+ end
+end \ No newline at end of file
diff --git a/1.1.x/share/www/spec/jquery_couch_js_instance_methods_3_spec.js b/1.1.x/share/www/spec/jquery_couch_js_instance_methods_3_spec.js
new file mode 100644
index 00000000..5d27d817
--- /dev/null
+++ b/1.1.x/share/www/spec/jquery_couch_js_instance_methods_3_spec.js
@@ -0,0 +1,540 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Specs for jquery_couch.js lines 300-411
+
+describe 'jQuery couchdb db'
+ before
+ stubAlert();
+ end
+
+ after
+ destubAlert();
+ end
+
+ before_each
+ db = $.couch.db('spec_db');
+ db.create();
+ end
+
+ after_each
+ db.drop();
+ end
+
+ describe 'removeDoc'
+ before_each
+ doc = {"Name" : "Louanne Katraine", "Callsign" : "Kat", "_id" : "345"};
+ saved_doc = {};
+ db.saveDoc(doc, {
+ success: function(resp){
+ saved_doc = resp;
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should result in a deleted document'
+ db.removeDoc({_id : "345", _rev : saved_doc.rev}, {
+ success: function(resp){
+ db.openDoc("345", {
+ error: function(status, error, reason){
+ status.should.eql 404
+ error.should.eql "not_found"
+ reason.should.eql "deleted"
+ },
+ success: function(resp){successCallback(resp)}
+ });
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return ok true, the ID and the revision of the deleted document'
+ db.removeDoc({_id : "345", _rev : saved_doc.rev}, {
+ success: function(resp){
+ resp.ok.should.be_true
+ resp.id.should.eql "345"
+ resp.rev.should.be_a String
+ resp.rev.length.should.be_at_least 30
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should record the revision in the deleted document'
+ db.removeDoc({_id : "345", _rev : saved_doc.rev}, {
+ success: function(resp){
+ db.openDoc("345", {
+ rev: resp.rev,
+ success: function(resp2){
+ resp2._rev.should.eql resp.rev
+ resp2._id.should.eql resp.id
+ resp2._deleted.should.be_true
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.removeDoc({_id: "asdf"});
+ alert_msg.should.match /The document could not be deleted/
+ end
+ end
+
+ describe 'bulkRemove'
+ before_each
+ doc = {"Name" : "Kara Thrace", "Callsign" : "Starbuck", "_id" : "123"};
+ doc2 = {"Name" : "Karl C. Agathon", "Callsign" : "Helo", "_id" : "456"};
+ doc3 = {"Name" : "Sharon Valerii", "Callsign" : "Boomer", "_id" : "789"};
+ docs = [doc, doc2, doc3];
+
+ db.bulkSave({"docs": docs}, {
+ success: function(resp){
+ for (var i = 0; i < docs.length; i++) {
+ docs[i]._rev = resp[i].rev;
+ }
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should remove all documents specified'
+ db.bulkRemove({"docs": docs});
+ db.allDocs({
+ success: function(resp) {
+ resp.total_rows.should.eql 0
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should not remove documents that should not have been deleted'
+ db.bulkRemove({"docs": [doc3]});
+ db.allDocs({
+ success: function(resp) {
+ resp.total_rows.should.eql 2
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should result in deleted documents'
+ db.bulkRemove({"docs": docs}, {
+ success: function(resp){
+ db.openDoc("123", {
+ error: function(status, error, reason){
+ status.should.eql 404
+ error.should.eql "not_found"
+ reason.should.eql "deleted"
+ },
+ success: function(resp){successCallback(resp)}
+ });
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should return the ID and the revision of the deleted documents'
+ db.bulkRemove({"docs": docs}, {
+ success: function(resp){
+ resp[0].id.should.eql "123"
+ resp[0].rev.should.be_a String
+ resp[0].rev.length.should.be_at_least 30
+ resp[1].id.should.eql "456"
+ resp[1].rev.should.be_a String
+ resp[1].rev.length.should.be_at_least 30
+ resp[2].id.should.eql "789"
+ resp[2].rev.should.be_a String
+ resp[2].rev.length.should.be_at_least 30
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should record the revision in the deleted documents'
+ db.bulkRemove({"docs": docs}, {
+ success: function(resp){
+ db.openDoc("123", {
+ rev: resp[0].rev,
+ success: function(resp2){
+ resp2._rev.should.eql resp[0].rev
+ resp2._id.should.eql resp[0].id
+ resp2._deleted.should.be_true
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.bulkRemove({docs: ["asdf"]});
+ alert_msg.should.match /The documents could not be deleted/
+ end
+ end
+
+ describe 'copyDoc'
+ before_each
+ doc = {"Name" : "Sharon Agathon", "Callsign" : "Athena", "_id" : "123"};
+ db.saveDoc(doc);
+ end
+
+ it 'should result in another document with same data and new id'
+ db.copyDoc("123", {
+ success: function(resp){
+ resp.id.should.eql "456"
+ resp.rev.length.should.be_at_least 30
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ }, {
+ headers: {"Destination":"456"}
+ });
+
+ db.openDoc("456", {
+ success: function(resp){
+ resp.Name.should.eql "Sharon Agathon"
+ resp.Callsign.should.eql "Athena"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should throw an error when trying to overwrite a document without providing a revision'
+ doc2 = {"Name" : "Louanne Katraine", "Callsign" : "Kat", "_id" : "456"};
+ db.saveDoc(doc2);
+
+ db.copyDoc("123", {
+ error: function(status, error, reason){
+ status.should.eql 409
+ error.should.eql "conflict"
+ reason.should.eql "Document update conflict."
+ },
+ success: function(resp){successCallback(resp)}
+ }, {
+ headers: {"Destination":"456"}
+ });
+ end
+
+ it 'should overwrite a document with the correct revision'
+ doc2 = {"Name" : "Louanne Katraine", "Callsign" : "Kat", "_id" : "456"};
+ var doc2_rev;
+ db.saveDoc(doc2, {
+ success: function(resp){
+ doc2_rev = resp.rev;
+ }
+ });
+
+ db.copyDoc("123", {
+ success: function(resp){
+ resp.id.should.eql "456"
+ resp.rev.length.should.be_at_least 30
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ }, {
+ headers: {"Destination":"456?rev=" + doc2_rev}
+ });
+
+ db.openDoc("456", {
+ success: function(resp){
+ resp.Name.should.eql "Sharon Agathon"
+ resp.Callsign.should.eql "Athena"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.copyDoc("asdf", {}, {});
+ alert_msg.should.match /The document could not be copied/
+ end
+ end
+
+ describe 'query'
+ before_each
+ db.saveDoc({"Name" : "Cally Tyrol", "job" : "deckhand", "_id" : "789"});
+ db.saveDoc({"Name" : "Felix Gaeta", "job" : "officer", "_id" : "123"});
+ db.saveDoc({"Name" : "Samuel T. Anders", "job" : "pilot", "_id" : "456"});
+ map_function = "function(doc) { emit(doc._id, 1); }";
+ reduce_function = "function(key, values, rereduce) { return sum(values); }";
+ end
+
+ it 'should apply the map function'
+ db.query(map_function, null, null, {
+ success: function(resp){
+ resp.rows.should.have_length 3
+ resp.rows[0].id.should.eql "123"
+ resp.rows[0].key.should.eql "123"
+ resp.rows[0].value.should.eql 1
+ resp.rows[1].id.should.eql "456"
+ resp.rows[1].key.should.eql "456"
+ resp.rows[1].value.should.eql 1
+ resp.rows[2].id.should.eql "789"
+ resp.rows[2].key.should.eql "789"
+ resp.rows[2].value.should.eql 1
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should apply the reduce function'
+ db.query(map_function, reduce_function, null, {
+ success: function(resp){
+ resp.rows.should.have_length 1
+ resp.rows[0].key.should.be_null
+ resp.rows[0].value.should_eql 3
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should pass through the options'
+ db.query(map_function, null, null, {
+ "startkey": "456",
+ success: function(resp){
+ resp.rows.should.have_length 2
+ resp.rows[0].id.should.eql "456"
+ resp.rows[0].key.should.eql "456"
+ resp.rows[0].value.should.eql 1
+ resp.rows[1].id.should.eql "789"
+ resp.rows[1].key.should.eql "789"
+ resp.rows[1].value.should.eql 1
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should pass through the keys'
+ //shouldn't this better work? TODO: implement in jquery.couch.js
+ console.log("shouldn't this better work? TODO: implement in jquery.couch.js")
+ db.query(map_function, null, null, {
+ "keys": ["456", "123"],
+ success: function(resp){
+ resp.rows.should.have_length 2
+ resp.rows[0].id.should.eql "456"
+ resp.rows[0].key.should.eql "456"
+ resp.rows[0].value.should.eql 1
+ resp.rows[1].id.should.eql "123"
+ resp.rows[1].key.should.eql "123"
+ resp.rows[1].value.should.eql 1
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should pass through the options and the keys'
+ //shouldn't this better work? TODO: implement in jquery.couch.js
+ console.log("shouldn't this better work? TODO: implement in jquery.couch.js")
+ db.query(map_function, null, null, {
+ "include_docs":"true",
+ "keys": ["456"],
+ success: function(resp){
+ resp.rows.should.have_length 1
+ resp.rows[0].id.should.eql "456"
+ resp.rows[0].key.should.eql "456"
+ resp.rows[0].value.should.eql 1
+ resp.rows[0].doc["job"].should.eql "pilot"
+ resp.rows[0].doc["_rev"].length.should.be_at_least 30
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should apply a query in erlang also'
+ // when this test fails, read this: http://wiki.apache.org/couchdb/EnableErlangViews
+ var erlang_map = 'fun({Doc}) -> ' +
+ 'ID = proplists:get_value(<<"_id">>, Doc, null), ' +
+ 'Emit(ID, 1) ' +
+ 'end.';
+ db.query(erlang_map, null, "erlang", {
+ success: function(resp){
+ resp.rows.should.have_length 3
+ resp.rows[0].id.should.eql "123"
+ resp.rows[0].key.should.eql "123"
+ resp.rows[0].value.should.eql 1
+ resp.rows[1].id.should.eql "456"
+ resp.rows[1].key.should.eql "456"
+ resp.rows[1].value.should.eql 1
+ resp.rows[2].id.should.eql "789"
+ resp.rows[2].key.should.eql "789"
+ resp.rows[2].value.should.eql 1
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.query("asdf");
+ alert_msg.should.match /An error occurred querying the database/
+ end
+ end
+
+ describe 'view'
+ before_each
+ db.saveDoc({"Name" : "Cally Tyrol", "job" : "deckhand", "_id" : "789"});
+ db.saveDoc({"Name" : "Felix Gaeta", "job" : "officer", "_id" : "123"});
+ db.saveDoc({"Name" : "Samuel T. Anders", "job" : "pilot", "_id" : "456"});
+ view = {
+ "views" : {
+ "people" : {
+ "map" : "function(doc) { emit(doc._id, doc.Name); }"
+ }
+ },
+ "_id" : "_design/spec_db"
+ };
+ db.saveDoc(view);
+ end
+
+ it 'should apply the view'
+ db.view('spec_db/people', {
+ success: function(resp){
+ resp.rows.should.have_length 3
+ resp.rows[0].id.should.eql "123"
+ resp.rows[0].key.should.eql "123"
+ resp.rows[0].value.should.eql "Felix Gaeta"
+ resp.rows[1].id.should.eql "456"
+ resp.rows[1].key.should.eql "456"
+ resp.rows[1].value.should.eql "Samuel T. Anders"
+ resp.rows[2].id.should.eql "789"
+ resp.rows[2].key.should.eql "789"
+ resp.rows[2].value.should.eql "Cally Tyrol"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should pass through the options'
+ db.view('spec_db/people', {
+ "skip":"2",
+ success: function(resp){
+ resp.rows.should.have_length 1
+ resp.rows[0].id.should.eql "789"
+ resp.rows[0].key.should.eql "789"
+ resp.rows[0].value.should.eql "Cally Tyrol"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should pass through the keys'
+ db.view('spec_db/people', {
+ "keys":["456", "123"],
+ success: function(resp){
+ resp.rows.should.have_length 2
+ resp.rows[0].id.should.eql "456"
+ resp.rows[0].key.should.eql "456"
+ resp.rows[0].value.should.eql "Samuel T. Anders"
+ resp.rows[1].id.should.eql "123"
+ resp.rows[1].key.should.eql "123"
+ resp.rows[1].value.should.eql "Felix Gaeta"
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should pass through the options and the keys'
+ db.view('spec_db/people', {
+ "include_docs":"true",
+ "keys":["456"],
+ success: function(resp){
+ resp.rows.should.have_length 1
+ resp.rows[0].id.should.eql "456"
+ resp.rows[0].key.should.eql "456"
+ resp.rows[0].value.should.eql "Samuel T. Anders"
+ resp.rows[0].doc["job"].should.eql "pilot"
+ resp.rows[0].doc["_rev"].length.should.be_at_least 30
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should throw a 404 when the view doesnt exist'
+ db.view('spec_db/non_existing_view', {
+ error: function(status, error, reason){
+ status.should.eql 404
+ error.should.eql "not_found"
+ reason.should.eql "missing_named_view"
+ },
+ success: function(resp){successCallback(resp)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.view("asdf");
+ alert_msg.should.match /An error occurred accessing the view/
+ end
+ end
+
+ describe 'setDbProperty'
+ it 'should return ok true'
+ db.setDbProperty("_revs_limit", 1500, {
+ success: function(resp){
+ resp.ok.should.be_true
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should set a db property'
+ db.setDbProperty("_revs_limit", 1500);
+ db.getDbProperty("_revs_limit", {
+ success: function(resp){
+ resp.should.eql 1500
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ db.setDbProperty("_revs_limit", 1200);
+ db.getDbProperty("_revs_limit", {
+ success: function(resp){
+ resp.should.eql 1200
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.setDbProperty("asdf");
+ alert_msg.should.match /The property could not be updated/
+ end
+ end
+
+ describe 'getDbProperty'
+ it 'should get a db property'
+ db.setDbProperty("_revs_limit", 1200);
+ db.getDbProperty("_revs_limit", {
+ success: function(resp){
+ resp.should.eql 1200
+ },
+ error: function(status, error, reason){errorCallback(status, error, reason)}
+ });
+ end
+
+ it 'should throw a 404 when the property doesnt exist'
+ db.getDbProperty("_doesnt_exist", {
+ error: function(status, error, reason){
+ status.should.eql 404
+ error.should.eql "not_found"
+ reason.should.eql "missing"
+ },
+ success: function(resp){successCallback(resp)}
+ });
+ end
+
+ it 'should alert with an error message prefix'
+ db.getDbProperty("asdf");
+ alert_msg.should.match /The property could not be retrieved/
+ end
+ end
+end \ No newline at end of file
diff --git a/1.1.x/share/www/spec/run.html b/1.1.x/share/www/spec/run.html
new file mode 100644
index 00000000..e438333d
--- /dev/null
+++ b/1.1.x/share/www/spec/run.html
@@ -0,0 +1,46 @@
+<!--
+Licensed under the Apache License, Version 2.0 (the "License"); you may not
+use this file except in compliance with the License. You may obtain a copy of
+the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations under
+the License.
+-->
+<html>
+ <head>
+ <link type="text/css" rel="stylesheet" href="../script/jspec/jspec.css" />
+ <script src="../script/jquery.js"></script>
+ <script src="../script/sha1.js"></script>
+ <script src="../script/jspec/jspec.js"></script>
+ <script src="../script/jspec/jspec.jquery.js"></script>
+ <script src="../script/jspec/jspec.xhr.js"></script>
+ <script src="./custom_helpers.js"></script>
+ <script src="../script/couch.js"></script>
+ <script src="../script/jquery.couch.js"></script>
+ <script>
+ function runSuites() {
+ JSpec
+ .exec('couch_js_class_methods_spec.js')
+ .exec('couch_js_instance_methods_1_spec.js')
+ .exec('couch_js_instance_methods_2_spec.js')
+ .exec('couch_js_instance_methods_3_spec.js')
+ .exec('jquery_couch_js_class_methods_spec.js')
+ .exec('jquery_couch_js_instance_methods_1_spec.js')
+ .exec('jquery_couch_js_instance_methods_2_spec.js')
+ .exec('jquery_couch_js_instance_methods_3_spec.js')
+ .run({failuresOnly: true})
+ .report()
+ }
+ </script>
+ </head>
+ <body class="jspec" onLoad="runSuites();">
+ <div id="jspec-top"><h2 id="jspec-title">JSpec <em><script>document.write(JSpec.version)</script></em></h2></div>
+ <div id="jspec"></div>
+ <div id="jspec-bottom"></div>
+ </body>
+</html>
diff --git a/1.1.x/share/www/status.html b/1.1.x/share/www/status.html
new file mode 100644
index 00000000..2067ab9b
--- /dev/null
+++ b/1.1.x/share/www/status.html
@@ -0,0 +1,109 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Status</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="style/layout.css?0.11.0" type="text/css">
+ <script src="script/json2.js"></script>
+ <script src="script/sha1.js"></script>
+ <script src="script/jquery.js?1.4.2"></script>
+ <script src="script/jquery.couch.js?0.11.0"></script>
+ <script src="script/jquery.dialog.js?0.11.0"></script>
+ <script src="script/futon.js?0.11.0"></script>
+ </head>
+ <body><div id="wrap">
+ <h1>
+ <a href="index.html">Overview</a>
+ <strong>Status</strong>
+ </h1>
+ <div id="content">
+ <div id="interval">
+ <label>Poll interval:
+ <input type="range" min="1" max="30" value="5" size="3">
+ <span class="secs">5</span> second(s)
+ </label>
+ </div>
+ <table id="status" class="listing" cellspacing="0">
+ <caption>Active Tasks</caption>
+ <thead><tr>
+ <th>Type</th>
+ <th>Object</th>
+ <th>PID</th>
+ <th>Status</th>
+ </tr></thead>
+ <tbody class="content"></tbody>
+ </table>
+
+ </div>
+ </div></body>
+ <script>
+ var refreshTimeout = null;
+
+ $.futon.storage.declare("poll_interval", {defaultValue: 5});
+
+ function refresh() {
+ $.couch.activeTasks({
+ success: function(tasks) {
+ clearTimeout(refreshTimeout);
+ $("#status tbody.content").empty();
+ if (!tasks.length) {
+ $("<tr class='none'><th colspan='4'>No tasks running</th></tr>")
+ .appendTo("#status tbody.content");
+ } else {
+ $.each(tasks, function(idx, task) {
+ $("<tr><th></th><td class='object'></td><td class='pid'></td><td class='status'></td></tr>")
+ .find("th").text(task.type).end()
+ .find("td.object").text(task.task).end()
+ .find("td.pid").text(task.pid).end()
+ .find("td.status").text(task.status).end()
+ .appendTo("#status tbody.content");
+ });
+ }
+ refreshTimeout = setTimeout(refresh,
+ parseInt($("#interval input").val(), 10) * 1000);
+ }
+ });
+ }
+
+ function updateInterval(value) {
+ if (isNaN(value)) {
+ value = 5;
+ $("#interval input").val(value);
+ }
+ $("#interval .secs").text(value);
+ refresh();
+ $.futon.storage.set("poll_interval", value);
+ }
+
+ $(function() {
+ var slider = $("#interval input");
+ slider.val(parseInt($.futon.storage.get("poll_interval")));
+ if (slider[0].type == "range") {
+ slider.bind("input", function() {
+ updateInterval(this.value);
+ });
+ $("#interval .secs").text($("#interval input").val());
+ } else {
+ slider.bind("change", function() {
+ updateInterval(this.value);
+ });
+ $("#interval .secs").hide();
+ }
+ refresh();
+ });
+ </script>
+</html>
diff --git a/1.1.x/share/www/style/jquery-ui-1.8.11.custom.css b/1.1.x/share/www/style/jquery-ui-1.8.11.custom.css
new file mode 100644
index 00000000..a6b2f744
--- /dev/null
+++ b/1.1.x/share/www/style/jquery-ui-1.8.11.custom.css
@@ -0,0 +1,347 @@
+/*
+ * jQuery UI CSS Framework 1.8.11
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Theming/API
+ */
+
+/* Layout helpers
+----------------------------------*/
+.ui-helper-hidden { display: none; }
+.ui-helper-hidden-accessible { position: absolute !important; clip: rect(1px 1px 1px 1px); clip: rect(1px,1px,1px,1px); }
+.ui-helper-reset { margin: 0; padding: 0; border: 0; outline: 0; line-height: 1.3; text-decoration: none; font-size: 100%; list-style: none; }
+.ui-helper-clearfix:after { content: "."; display: block; height: 0; clear: both; visibility: hidden; }
+.ui-helper-clearfix { display: inline-block; }
+/* required comment for clearfix to work in Opera \*/
+* html .ui-helper-clearfix { height:1%; }
+.ui-helper-clearfix { display:block; }
+/* end clearfix */
+.ui-helper-zfix { width: 100%; height: 100%; top: 0; left: 0; position: absolute; opacity: 0; filter:Alpha(Opacity=0); }
+
+
+/* Interaction Cues
+----------------------------------*/
+.ui-state-disabled { cursor: default !important; }
+
+
+/* Icons
+----------------------------------*/
+
+/* states and images */
+.ui-icon { display: block; text-indent: -99999px; overflow: hidden; background-repeat: no-repeat; }
+
+
+/* Misc visuals
+----------------------------------*/
+
+/* Overlays */
+.ui-widget-overlay { position: absolute; top: 0; left: 0; width: 100%; height: 100%; }
+
+
+/*
+ * jQuery UI CSS Framework 1.8.11
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Theming/API
+ *
+ * To view and modify this theme, visit http://jqueryui.com/themeroller/?ffDefault=Trebuchet%20MS,%20Tahoma,%20Verdana,%20Arial,%20sans-serif&fwDefault=bold&fsDefault=1.1em&cornerRadius=4px&bgColorHeader=f6a828&bgTextureHeader=12_gloss_wave.png&bgImgOpacityHeader=35&borderColorHeader=e78f08&fcHeader=ffffff&iconColorHeader=ffffff&bgColorContent=eeeeee&bgTextureContent=03_highlight_soft.png&bgImgOpacityContent=100&borderColorContent=dddddd&fcContent=333333&iconColorContent=222222&bgColorDefault=f6f6f6&bgTextureDefault=02_glass.png&bgImgOpacityDefault=100&borderColorDefault=cccccc&fcDefault=1c94c4&iconColorDefault=ef8c08&bgColorHover=fdf5ce&bgTextureHover=02_glass.png&bgImgOpacityHover=100&borderColorHover=fbcb09&fcHover=c77405&iconColorHover=ef8c08&bgColorActive=ffffff&bgTextureActive=02_glass.png&bgImgOpacityActive=65&borderColorActive=fbd850&fcActive=eb8f00&iconColorActive=ef8c08&bgColorHighlight=ffe45c&bgTextureHighlight=03_highlight_soft.png&bgImgOpacityHighlight=75&borderColorHighlight=fed22f&fcHighlight=363636&iconColorHighlight=228ef1&bgColorError=b81900&bgTextureError=08_diagonals_thick.png&bgImgOpacityError=18&borderColorError=cd0a0a&fcError=ffffff&iconColorError=ffd27a&bgColorOverlay=666666&bgTextureOverlay=08_diagonals_thick.png&bgImgOpacityOverlay=20&opacityOverlay=50&bgColorShadow=000000&bgTextureShadow=01_flat.png&bgImgOpacityShadow=10&opacityShadow=20&thicknessShadow=5px&offsetTopShadow=-5px&offsetLeftShadow=-5px&cornerRadiusShadow=5px
+ */
+
+
+/* Component containers
+----------------------------------*/
+.ui-widget { font-family: Trebuchet MS, Tahoma, Verdana, Arial, sans-serif; font-size: 1.1em; }
+.ui-widget .ui-widget { font-size: 1em; }
+.ui-widget input, .ui-widget select, .ui-widget textarea, .ui-widget button { font-family: Trebuchet MS, Tahoma, Verdana, Arial, sans-serif; font-size: 1em; }
+.ui-widget-content { border: 1px solid #dddddd; background: #eeeeee 50% top repeat-x; color: #333333; }
+.ui-widget-content a { color: #333333; }
+.ui-widget-header { border: 1px solid #e78f08; background: #f6a828 50% 50% repeat-x; color: #ffffff; font-weight: bold; }
+.ui-widget-header a { color: #ffffff; }
+
+/* Interaction states
+----------------------------------*/
+.ui-state-default, .ui-widget-content .ui-state-default, .ui-widget-header .ui-state-default { border: 1px solid #cccccc; background: #f6f6f6 50% 50% repeat-x; font-weight: bold; color: #1c94c4; }
+.ui-state-default a, .ui-state-default a:link, .ui-state-default a:visited { color: #1c94c4; text-decoration: none; }
+.ui-state-hover, .ui-widget-content .ui-state-hover, .ui-widget-header .ui-state-hover, .ui-state-focus, .ui-widget-content .ui-state-focus, .ui-widget-header .ui-state-focus { border: 1px solid #fbcb09; background: #fdf5ce 50% 50% repeat-x; font-weight: bold; color: #c77405; }
+.ui-state-hover a, .ui-state-hover a:hover { color: #c77405; text-decoration: none; }
+.ui-state-active, .ui-widget-content .ui-state-active, .ui-widget-header .ui-state-active { border: 1px solid #fbd850; background: #ffffff 50% 50% repeat-x; font-weight: bold; color: #eb8f00; }
+.ui-state-active a, .ui-state-active a:link, .ui-state-active a:visited { color: #eb8f00; text-decoration: none; }
+.ui-widget :active { outline: none; }
+
+/* Interaction Cues
+----------------------------------*/
+.ui-state-highlight, .ui-widget-content .ui-state-highlight, .ui-widget-header .ui-state-highlight {border: 1px solid #fed22f; background: #ffe45c 50% top repeat-x; color: #363636; }
+.ui-state-highlight a, .ui-widget-content .ui-state-highlight a,.ui-widget-header .ui-state-highlight a { color: #363636; }
+.ui-state-error, .ui-widget-content .ui-state-error, .ui-widget-header .ui-state-error {border: 1px solid #cd0a0a; background: #b81900 50% 50% repeat; color: #ffffff; }
+.ui-state-error a, .ui-widget-content .ui-state-error a, .ui-widget-header .ui-state-error a { color: #ffffff; }
+.ui-state-error-text, .ui-widget-content .ui-state-error-text, .ui-widget-header .ui-state-error-text { color: #ffffff; }
+.ui-priority-primary, .ui-widget-content .ui-priority-primary, .ui-widget-header .ui-priority-primary { font-weight: bold; }
+.ui-priority-secondary, .ui-widget-content .ui-priority-secondary, .ui-widget-header .ui-priority-secondary { opacity: .7; filter:Alpha(Opacity=70); font-weight: normal; }
+.ui-state-disabled, .ui-widget-content .ui-state-disabled, .ui-widget-header .ui-state-disabled { opacity: .35; filter:Alpha(Opacity=35); background-image: none; }
+
+/* Icons
+----------------------------------*/
+
+/* states and images */
+.ui-icon { width: 16px; height: 16px; background-image: url(images/ui-icons_222222_256x240.png); }
+.ui-widget-content .ui-icon {background-image: url(images/ui-icons_222222_256x240.png); }
+.ui-widget-header .ui-icon {background-image: url(images/ui-icons_ffffff_256x240.png); }
+.ui-state-default .ui-icon { background-image: url(images/ui-icons_ef8c08_256x240.png); }
+.ui-state-hover .ui-icon, .ui-state-focus .ui-icon {background-image: url(images/ui-icons_ef8c08_256x240.png); }
+.ui-state-active .ui-icon {background-image: url(images/ui-icons_ef8c08_256x240.png); }
+.ui-state-highlight .ui-icon {background-image: url(images/ui-icons_228ef1_256x240.png); }
+.ui-state-error .ui-icon, .ui-state-error-text .ui-icon {background-image: url(images/ui-icons_ffd27a_256x240.png); }
+
+/* positioning */
+.ui-icon-carat-1-n { background-position: 0 0; }
+.ui-icon-carat-1-ne { background-position: -16px 0; }
+.ui-icon-carat-1-e { background-position: -32px 0; }
+.ui-icon-carat-1-se { background-position: -48px 0; }
+.ui-icon-carat-1-s { background-position: -64px 0; }
+.ui-icon-carat-1-sw { background-position: -80px 0; }
+.ui-icon-carat-1-w { background-position: -96px 0; }
+.ui-icon-carat-1-nw { background-position: -112px 0; }
+.ui-icon-carat-2-n-s { background-position: -128px 0; }
+.ui-icon-carat-2-e-w { background-position: -144px 0; }
+.ui-icon-triangle-1-n { background-position: 0 -16px; }
+.ui-icon-triangle-1-ne { background-position: -16px -16px; }
+.ui-icon-triangle-1-e { background-position: -32px -16px; }
+.ui-icon-triangle-1-se { background-position: -48px -16px; }
+.ui-icon-triangle-1-s { background-position: -64px -16px; }
+.ui-icon-triangle-1-sw { background-position: -80px -16px; }
+.ui-icon-triangle-1-w { background-position: -96px -16px; }
+.ui-icon-triangle-1-nw { background-position: -112px -16px; }
+.ui-icon-triangle-2-n-s { background-position: -128px -16px; }
+.ui-icon-triangle-2-e-w { background-position: -144px -16px; }
+.ui-icon-arrow-1-n { background-position: 0 -32px; }
+.ui-icon-arrow-1-ne { background-position: -16px -32px; }
+.ui-icon-arrow-1-e { background-position: -32px -32px; }
+.ui-icon-arrow-1-se { background-position: -48px -32px; }
+.ui-icon-arrow-1-s { background-position: -64px -32px; }
+.ui-icon-arrow-1-sw { background-position: -80px -32px; }
+.ui-icon-arrow-1-w { background-position: -96px -32px; }
+.ui-icon-arrow-1-nw { background-position: -112px -32px; }
+.ui-icon-arrow-2-n-s { background-position: -128px -32px; }
+.ui-icon-arrow-2-ne-sw { background-position: -144px -32px; }
+.ui-icon-arrow-2-e-w { background-position: -160px -32px; }
+.ui-icon-arrow-2-se-nw { background-position: -176px -32px; }
+.ui-icon-arrowstop-1-n { background-position: -192px -32px; }
+.ui-icon-arrowstop-1-e { background-position: -208px -32px; }
+.ui-icon-arrowstop-1-s { background-position: -224px -32px; }
+.ui-icon-arrowstop-1-w { background-position: -240px -32px; }
+.ui-icon-arrowthick-1-n { background-position: 0 -48px; }
+.ui-icon-arrowthick-1-ne { background-position: -16px -48px; }
+.ui-icon-arrowthick-1-e { background-position: -32px -48px; }
+.ui-icon-arrowthick-1-se { background-position: -48px -48px; }
+.ui-icon-arrowthick-1-s { background-position: -64px -48px; }
+.ui-icon-arrowthick-1-sw { background-position: -80px -48px; }
+.ui-icon-arrowthick-1-w { background-position: -96px -48px; }
+.ui-icon-arrowthick-1-nw { background-position: -112px -48px; }
+.ui-icon-arrowthick-2-n-s { background-position: -128px -48px; }
+.ui-icon-arrowthick-2-ne-sw { background-position: -144px -48px; }
+.ui-icon-arrowthick-2-e-w { background-position: -160px -48px; }
+.ui-icon-arrowthick-2-se-nw { background-position: -176px -48px; }
+.ui-icon-arrowthickstop-1-n { background-position: -192px -48px; }
+.ui-icon-arrowthickstop-1-e { background-position: -208px -48px; }
+.ui-icon-arrowthickstop-1-s { background-position: -224px -48px; }
+.ui-icon-arrowthickstop-1-w { background-position: -240px -48px; }
+.ui-icon-arrowreturnthick-1-w { background-position: 0 -64px; }
+.ui-icon-arrowreturnthick-1-n { background-position: -16px -64px; }
+.ui-icon-arrowreturnthick-1-e { background-position: -32px -64px; }
+.ui-icon-arrowreturnthick-1-s { background-position: -48px -64px; }
+.ui-icon-arrowreturn-1-w { background-position: -64px -64px; }
+.ui-icon-arrowreturn-1-n { background-position: -80px -64px; }
+.ui-icon-arrowreturn-1-e { background-position: -96px -64px; }
+.ui-icon-arrowreturn-1-s { background-position: -112px -64px; }
+.ui-icon-arrowrefresh-1-w { background-position: -128px -64px; }
+.ui-icon-arrowrefresh-1-n { background-position: -144px -64px; }
+.ui-icon-arrowrefresh-1-e { background-position: -160px -64px; }
+.ui-icon-arrowrefresh-1-s { background-position: -176px -64px; }
+.ui-icon-arrow-4 { background-position: 0 -80px; }
+.ui-icon-arrow-4-diag { background-position: -16px -80px; }
+.ui-icon-extlink { background-position: -32px -80px; }
+.ui-icon-newwin { background-position: -48px -80px; }
+.ui-icon-refresh { background-position: -64px -80px; }
+.ui-icon-shuffle { background-position: -80px -80px; }
+.ui-icon-transfer-e-w { background-position: -96px -80px; }
+.ui-icon-transferthick-e-w { background-position: -112px -80px; }
+.ui-icon-folder-collapsed { background-position: 0 -96px; }
+.ui-icon-folder-open { background-position: -16px -96px; }
+.ui-icon-document { background-position: -32px -96px; }
+.ui-icon-document-b { background-position: -48px -96px; }
+.ui-icon-note { background-position: -64px -96px; }
+.ui-icon-mail-closed { background-position: -80px -96px; }
+.ui-icon-mail-open { background-position: -96px -96px; }
+.ui-icon-suitcase { background-position: -112px -96px; }
+.ui-icon-comment { background-position: -128px -96px; }
+.ui-icon-person { background-position: -144px -96px; }
+.ui-icon-print { background-position: -160px -96px; }
+.ui-icon-trash { background-position: -176px -96px; }
+.ui-icon-locked { background-position: -192px -96px; }
+.ui-icon-unlocked { background-position: -208px -96px; }
+.ui-icon-bookmark { background-position: -224px -96px; }
+.ui-icon-tag { background-position: -240px -96px; }
+.ui-icon-home { background-position: 0 -112px; }
+.ui-icon-flag { background-position: -16px -112px; }
+.ui-icon-calendar { background-position: -32px -112px; }
+.ui-icon-cart { background-position: -48px -112px; }
+.ui-icon-pencil { background-position: -64px -112px; }
+.ui-icon-clock { background-position: -80px -112px; }
+.ui-icon-disk { background-position: -96px -112px; }
+.ui-icon-calculator { background-position: -112px -112px; }
+.ui-icon-zoomin { background-position: -128px -112px; }
+.ui-icon-zoomout { background-position: -144px -112px; }
+.ui-icon-search { background-position: -160px -112px; }
+.ui-icon-wrench { background-position: -176px -112px; }
+.ui-icon-gear { background-position: -192px -112px; }
+.ui-icon-heart { background-position: -208px -112px; }
+.ui-icon-star { background-position: -224px -112px; }
+.ui-icon-link { background-position: -240px -112px; }
+.ui-icon-cancel { background-position: 0 -128px; }
+.ui-icon-plus { background-position: -16px -128px; }
+.ui-icon-plusthick { background-position: -32px -128px; }
+.ui-icon-minus { background-position: -48px -128px; }
+.ui-icon-minusthick { background-position: -64px -128px; }
+.ui-icon-close { background-position: -80px -128px; }
+.ui-icon-closethick { background-position: -96px -128px; }
+.ui-icon-key { background-position: -112px -128px; }
+.ui-icon-lightbulb { background-position: -128px -128px; }
+.ui-icon-scissors { background-position: -144px -128px; }
+.ui-icon-clipboard { background-position: -160px -128px; }
+.ui-icon-copy { background-position: -176px -128px; }
+.ui-icon-contact { background-position: -192px -128px; }
+.ui-icon-image { background-position: -208px -128px; }
+.ui-icon-video { background-position: -224px -128px; }
+.ui-icon-script { background-position: -240px -128px; }
+.ui-icon-alert { background-position: 0 -144px; }
+.ui-icon-info { background-position: -16px -144px; }
+.ui-icon-notice { background-position: -32px -144px; }
+.ui-icon-help { background-position: -48px -144px; }
+.ui-icon-check { background-position: -64px -144px; }
+.ui-icon-bullet { background-position: -80px -144px; }
+.ui-icon-radio-off { background-position: -96px -144px; }
+.ui-icon-radio-on { background-position: -112px -144px; }
+.ui-icon-pin-w { background-position: -128px -144px; }
+.ui-icon-pin-s { background-position: -144px -144px; }
+.ui-icon-play { background-position: 0 -160px; }
+.ui-icon-pause { background-position: -16px -160px; }
+.ui-icon-seek-next { background-position: -32px -160px; }
+.ui-icon-seek-prev { background-position: -48px -160px; }
+.ui-icon-seek-end { background-position: -64px -160px; }
+.ui-icon-seek-start { background-position: -80px -160px; }
+/* ui-icon-seek-first is deprecated, use ui-icon-seek-start instead */
+.ui-icon-seek-first { background-position: -80px -160px; }
+.ui-icon-stop { background-position: -96px -160px; }
+.ui-icon-eject { background-position: -112px -160px; }
+.ui-icon-volume-off { background-position: -128px -160px; }
+.ui-icon-volume-on { background-position: -144px -160px; }
+.ui-icon-power { background-position: 0 -176px; }
+.ui-icon-signal-diag { background-position: -16px -176px; }
+.ui-icon-signal { background-position: -32px -176px; }
+.ui-icon-battery-0 { background-position: -48px -176px; }
+.ui-icon-battery-1 { background-position: -64px -176px; }
+.ui-icon-battery-2 { background-position: -80px -176px; }
+.ui-icon-battery-3 { background-position: -96px -176px; }
+.ui-icon-circle-plus { background-position: 0 -192px; }
+.ui-icon-circle-minus { background-position: -16px -192px; }
+.ui-icon-circle-close { background-position: -32px -192px; }
+.ui-icon-circle-triangle-e { background-position: -48px -192px; }
+.ui-icon-circle-triangle-s { background-position: -64px -192px; }
+.ui-icon-circle-triangle-w { background-position: -80px -192px; }
+.ui-icon-circle-triangle-n { background-position: -96px -192px; }
+.ui-icon-circle-arrow-e { background-position: -112px -192px; }
+.ui-icon-circle-arrow-s { background-position: -128px -192px; }
+.ui-icon-circle-arrow-w { background-position: -144px -192px; }
+.ui-icon-circle-arrow-n { background-position: -160px -192px; }
+.ui-icon-circle-zoomin { background-position: -176px -192px; }
+.ui-icon-circle-zoomout { background-position: -192px -192px; }
+.ui-icon-circle-check { background-position: -208px -192px; }
+.ui-icon-circlesmall-plus { background-position: 0 -208px; }
+.ui-icon-circlesmall-minus { background-position: -16px -208px; }
+.ui-icon-circlesmall-close { background-position: -32px -208px; }
+.ui-icon-squaresmall-plus { background-position: -48px -208px; }
+.ui-icon-squaresmall-minus { background-position: -64px -208px; }
+.ui-icon-squaresmall-close { background-position: -80px -208px; }
+.ui-icon-grip-dotted-vertical { background-position: 0 -224px; }
+.ui-icon-grip-dotted-horizontal { background-position: -16px -224px; }
+.ui-icon-grip-solid-vertical { background-position: -32px -224px; }
+.ui-icon-grip-solid-horizontal { background-position: -48px -224px; }
+.ui-icon-gripsmall-diagonal-se { background-position: -64px -224px; }
+.ui-icon-grip-diagonal-se { background-position: -80px -224px; }
+
+
+/* Misc visuals
+----------------------------------*/
+
+/* Corner radius */
+.ui-corner-tl { -moz-border-radius-topleft: 4px; -webkit-border-top-left-radius: 4px; border-top-left-radius: 4px; }
+.ui-corner-tr { -moz-border-radius-topright: 4px; -webkit-border-top-right-radius: 4px; border-top-right-radius: 4px; }
+.ui-corner-bl { -moz-border-radius-bottomleft: 4px; -webkit-border-bottom-left-radius: 4px; border-bottom-left-radius: 4px; }
+.ui-corner-br { -moz-border-radius-bottomright: 4px; -webkit-border-bottom-right-radius: 4px; border-bottom-right-radius: 4px; }
+.ui-corner-top { -moz-border-radius-topleft: 4px; -webkit-border-top-left-radius: 4px; border-top-left-radius: 4px; -moz-border-radius-topright: 4px; -webkit-border-top-right-radius: 4px; border-top-right-radius: 4px; }
+.ui-corner-bottom { -moz-border-radius-bottomleft: 4px; -webkit-border-bottom-left-radius: 4px; border-bottom-left-radius: 4px; -moz-border-radius-bottomright: 4px; -webkit-border-bottom-right-radius: 4px; border-bottom-right-radius: 4px; }
+.ui-corner-right { -moz-border-radius-topright: 4px; -webkit-border-top-right-radius: 4px; border-top-right-radius: 4px; -moz-border-radius-bottomright: 4px; -webkit-border-bottom-right-radius: 4px; border-bottom-right-radius: 4px; }
+.ui-corner-left { -moz-border-radius-topleft: 4px; -webkit-border-top-left-radius: 4px; border-top-left-radius: 4px; -moz-border-radius-bottomleft: 4px; -webkit-border-bottom-left-radius: 4px; border-bottom-left-radius: 4px; }
+.ui-corner-all { -moz-border-radius: 4px; -webkit-border-radius: 4px; border-radius: 4px; }
+
+/* Overlays */
+.ui-widget-overlay { background: #666666 50% 50% repeat; opacity: .50;filter:Alpha(Opacity=50); }
+.ui-widget-shadow { margin: -5px 0 0 -5px; padding: 5px; background: #000000 50% 50% repeat-x; opacity: .20;filter:Alpha(Opacity=20); -moz-border-radius: 5px; -webkit-border-radius: 5px; border-radius: 5px; }/*
+ * jQuery UI Autocomplete 1.8.11
+ *
+ * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Autocomplete#theming
+ */
+.ui-autocomplete { position: absolute; cursor: default; }
+
+/* workarounds */
+* html .ui-autocomplete { width:1px; } /* without this, the menu expands to 100% in IE6 */
+
+/*
+ * jQuery UI Menu 1.8.11
+ *
+ * Copyright 2010, AUTHORS.txt (http://jqueryui.com/about)
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * http://docs.jquery.com/UI/Menu#theming
+ */
+.ui-menu {
+ list-style:none;
+ padding: 2px;
+ margin: 0;
+ display:block;
+ float: left;
+}
+.ui-menu .ui-menu {
+ margin-top: -3px;
+}
+.ui-menu .ui-menu-item {
+ margin:0;
+ padding: 0;
+ zoom: 1;
+ float: left;
+ clear: left;
+ width: 100%;
+}
+.ui-menu .ui-menu-item a {
+ text-decoration:none;
+ display:block;
+ padding:.2em .4em;
+ line-height:1.5;
+ zoom:1;
+}
+.ui-menu .ui-menu-item a.ui-state-hover,
+.ui-menu .ui-menu-item a.ui-state-active {
+ font-weight: normal;
+ margin: -1px;
+}
diff --git a/1.1.x/share/www/style/layout.css b/1.1.x/share/www/style/layout.css
new file mode 100644
index 00000000..25195e8a
--- /dev/null
+++ b/1.1.x/share/www/style/layout.css
@@ -0,0 +1,618 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+/* General styles */
+
+html, body { color: #000; font: normal 90% Arial,Helvetica,sans-serif;
+ height: 100%; margin: 0; padding: 0; overflow: hidden;
+}
+:link, :visited { color: #ba1e16; text-decoration: none; }
+:link img, :visited img { border: none; }
+
+h1 { background: #333; border-right: 2px solid #111;
+ border-bottom: 1px solid #333; color: #999;
+ font: normal 125% Arial,Helvetica,sans-serif; height: 32px;
+ line-height: 32px; margin: 0; padding: 0 0 0 .5em; position: relative;
+}
+h1 :link, h1 :visited, h1 strong { padding: .4em .5em; }
+h1 :link, h1 :visited {
+ background: url(../image/path.gif) 100% 50% no-repeat;
+ color: #bbb; cursor: pointer; padding-right: 2.2em;
+ text-shadow: #333 2px 2px 1px;
+}
+h1 strong { color: #fff; font-weight: normal; padding-right: 25px; }
+h1 strong a {color:#fff !important;background:none !important;}
+h1 :link.raw, h1 :visited.raw {
+ background: url(../image/rarrow.png) 100% 50% no-repeat; position: absolute;
+ right: 20px; width: 35px; height: 100%; padding: 0; margin: 0;
+}
+body.loading h1 strong {
+ background: url(../image/spinner_33.gif) right center no-repeat;
+}
+
+hr { border: 1px solid #999; border-width: 1px 0 0; }
+dl dt { font-weight: bold; }
+code, tt, pre {
+ font-family: "DejaVu Sans Mono",Menlo,Courier,monospace;
+}
+code.key { color: #333; font-weight: bold; }
+code.string { color: #393; }
+code.number, code.boolean { color: #339; }
+code.null { color: #666; }
+
+button { font-size: 100%; -webkit-appearance: square-button; }
+button[disabled] { color: #999; }
+input, select, textarea { background: #fff; border: 1px solid;
+ border-color: #999 #ddd #ddd #999; color: #000; margin: 0; padding: 1px;
+}
+input.placeholder { color: #999; }
+textarea {
+ font-family: "DejaVu Sans Mono",Menlo,Courier,monospace;
+ font-size: 100%;
+}
+fieldset { border: none; font-size: 95%; margin: 0; padding: .2em 0 0; }
+fieldset legend { color: #666; font-weight: bold; padding: 0; }
+fieldset input, fieldset select { font-size: 95%; }
+fieldset p { margin: .4em; }
+
+p.help { color: #999; font-size: 90%; margin: 0 2em 1em; }
+
+/* Tabular listings */
+
+table.listing { border-collapse: separate; border-spacing: 0;
+ border: 1px solid #a7a7a7; clear: both; width: 100%;
+}
+table.listing caption { display: none; }
+table.listing th, table.listing td { padding: .2em .5em; }
+table.listing thead th { background: #dadada url(../image/thead.gif) repeat-x;
+ border: 1px solid #a7a7a7; border-width: 0 0 1px 1px; color: #333;
+ font-size: 95%; font-weight: normal; text-align: left;
+ text-shadow: #999 2px 1px 2px; white-space: nowrap;
+}
+table.listing thead th:first-child { border-left: none; }
+table.listing thead th.key {
+ background: #a7afb6 url(../image/thead-key.gif) 0 0 repeat-x;
+ padding-top: 2px;
+}
+table.listing thead th.key span {
+ background: url(../image/order-asc.gif) 100% 3px no-repeat; cursor: pointer;
+ padding-right: 20px;
+}
+table.listing thead th.desc span {
+ background-image: url(../image/order-desc.gif);
+}
+table.listing tbody tr th, table.listing tbody tr td { background: #feffea; }
+table.listing tbody tr.odd th, table.listing tbody tr.odd td,
+table.listing tbody.odd tr th, table.listing tbody.odd tr td {
+ background: #fff;
+}
+table.listing tbody th, table.listing tbody td {
+ border-left: 1px solid #d9d9d9; padding: .4em .5em; vertical-align: top;
+}
+table.listing tbody th:first-child, table.listing tbody td:first-child {
+ border-left: none;
+}
+table.listing tbody th { text-align: left; }
+table.listing tbody th :link, table.listing tbody th :visited {
+ display: block;
+}
+table.listing tbody.footer tr td { background: #e9e9e9;
+ border-top: 1px solid #a7a7a7; color: #999; font-size: 90%;
+ line-height: 1.8em;
+}
+table.listing tbody.footer #paging { float: right; }
+table.listing tbody.footer #paging a,
+table.listing tbody.footer #paging label {
+ padding: 0 .5em;
+}
+table.listing tbody.footer #paging label { color: #666; }
+table.listing tbody.footer #paging select { font-size: 90%; padding: 0; }
+
+/* Inline editing */
+
+span.editinline-tools { margin: 2px 2px 0; float: right; margin-right: -45px; }
+span.editinline-tools button { background: transparent 0 0 no-repeat;
+ border: none; cursor: pointer; display: block; float: left; margin: 0 .2em;
+ width: 11px; height: 11px;
+}
+span.editinline-tools button:hover { background-position: 0 -22px; }
+span.editinline-tools button:active { background-position: 0 -44px; }
+span.editinline-tools button.apply {
+ background-image: url(../image/apply.gif);
+}
+span.editinline-tools button.cancel {
+ background-image: url(../image/cancel.gif);
+}
+
+/* Resizer grippies */
+
+div.grippie { background: #e9e9e9 url(../image/grippie.gif) 50% 50% no-repeat;
+ border: 1px solid #aaa; border-top: none; min-height: 10px;
+}
+
+/* Suggest results */
+
+ul.suggest-dropdown { border: 1px solid #999; background-color: #eee;
+ padding: 0; margin: 0; list-style: none; opacity: .85;
+ -moz-box-shadow: 2px 2px 10px #333; -webkit-box-shadow: 2px 2px 10px #333;
+}
+ul.suggest-dropdown li { padding: 2px 5px; white-space: nowrap; color: #101010;
+ text-align: left;
+}
+ul.suggest-dropdown li.selected { cursor: pointer; background: Highlight;
+ color: HighlightText;
+}
+
+/* Logo & Navigation */
+
+#sidebar { background: #fff; position: absolute; top: 0; right: -210px;
+ width: 210px; height: 100%;
+}
+body.fullwidth #sidebar { border-bottom: 1px solid #333; right: 0;
+ width: 26px;
+}
+#sidebar-toggle { background: url(../image/sidebar-toggle.png) 0 0 no-repeat;
+ color: #999; cursor: pointer; display: block; position: absolute; right: 0;
+ top: 0; font-size: 110%; width: 26px; height: 32px; text-indent: -9999px;
+}
+#sidebar-toggle:hover { background-position: -26px 0; }
+#sidebar-toggle:focus { outline: none; }
+#sidebar.hidden #sidebar-toggle { background-position: 0 -32px; }
+#sidebar.hidden #sidebar-toggle:hover { background-position: -26px -32px; }
+
+#logo { margin: 30px 0 0; padding: 0 18px 10px; }
+
+#nav { color: #333; font-size: 110%; font-weight: bold; list-style: none;
+ margin: 0; overflow: auto; overflow-x: hidden; padding: 0; width: 210px;
+}
+#nav ul { list-style: none; margin: 0; padding: 0; }
+#nav li { color: #999; margin: 5px 0 0; padding: 3px 0; }
+#nav li span { padding: 0 20px; }
+#nav li.selected { background: #e9e9e9; }
+#nav li li { font-size: 90%; font-weight: normal; margin: 0;
+ padding: 2px 20px 2px 40px;
+}
+#nav li li:hover { background: #e4e4e4; }
+#nav li.selected li:hover { background: #d7d7d7; }
+#nav li li :link, #nav li li :visited { color: #333; display: block;
+ overflow: hidden; text-decoration: none; text-overflow: ellipsis;
+}
+#nav li li :link:hover, #nav li li :visited:hover { color: #000; }
+#nav li li :link:focus, #nav li li :visited:focus { outline: none; }
+#nav li li.selected { background: #aaa !important; border-top: 1px solid #999;
+ color: #fff; padding-top: 1px;
+}
+#nav li li.selected :link, #nav li li.selected :visited { color: #fff; }
+#nav li li.selected :link:hover, #nav li li.selected :visited:hover {
+ color: #fff;
+}
+#nav li button { background: transparent 0 0 no-repeat; border: none;
+ cursor: pointer; width: 15px; height: 15px; margin-left: -20px;
+ position: absolute; vertical-align: top;
+}
+#nav li li:hover button.remove {
+ background-image: url(../image/delete-mini.png);
+}
+#nav li button.remove:hover { background-position: -15px 0; }
+
+#footer { background: #ddd; border-top: 1px solid #bbb; color: #000;
+ font-size: 80%; opacity: .7; padding: 5px 10px; position: absolute; right: 0;
+ bottom: 0; min-height: 1.3em; width: 190px; text-align: right;
+}
+#footer .couch :link, #footer .couch :visited { color: #000; }
+
+#userCtx span { display:none; }
+
+#wrap { background: #fff url(../image/bg.png) 100% 0 repeat-y;
+ height: 100%; margin-right: 210px; position: relative;
+}
+body.fullwidth #wrap { margin-right: 0; }
+#content { padding: 1em 16px 3em 10px; overflow: auto; overflow-y: scroll;
+ position: absolute; top: 33px; bottom: 0; left: 0; right: 0;
+}
+
+/* Toolbar */
+
+#toolbar { font-size: 90%; line-height: 16px; list-style: none;
+ margin: 0 0 .5em; padding: 5px 5px 5px 3px;
+}
+#toolbar li { display: inline; }
+#toolbar li.current {float:right;}
+#toolbar button { background: transparent 2px 2px no-repeat; border: none;
+ color: #666; margin: 0; padding: 2px 1em 2px 22px; cursor: pointer;
+ font-size: 95%; line-height: 16px;
+}
+#toolbar button:hover { background-position: 2px -30px; color: #000; }
+#toolbar button:active { background-position: 2px -62px; color: #000; }
+#toolbar button.add { background-image: url(../image/add.png); }
+#toolbar button.security { background-image: url(../image/key.png); }
+#toolbar button.compact { background-image: url(../image/compact.png); }
+#toolbar button.delete { background-image: url(../image/delete.png); }
+#toolbar button.load { background-image: url(../image/load.png); }
+#toolbar button.run { background-image: url(../image/run.png); }
+#toolbar button.save { background-image: url(../image/save.png); }
+#toolbar button.share { background-image: url(../image/compact.png); }
+
+/* Dialogs */
+
+#overlay { background: #bbb; cursor: wait; position: fixed; width: 100%;
+ height: 100%; top: 0; left: 0;
+}
+*html #overlay { position: absolute;
+ width: expression(document.body.clientWidth + 'px');
+ height: expression(document.body.clientHeight + 'px');
+}
+#dialog { background: #333 url(../image/progress.gif) 50% 50% no-repeat;
+ color: #f4f4f4; overflow: hidden; opacity: .95; max-width: 33em;
+ padding: 1em 1em 0; -moz-border-radius: 7px;
+ -moz-box-shadow: 4px 4px 6px #333; -webkit-border-radius: 7px;
+ -webkit-box-shadow: 4px 4px 6px #333;
+}
+*html #dialog { width: 33em; }
+#dialog.loading { width: 220px; height: 80px; }
+#dialog.loaded { background-image: none; }
+#dialog h2 { background: #666 98% 50% no-repeat;
+ border-top: 1px solid #555; border-bottom: 1px solid #777; color: #ccc;
+ font-size: 110%; font-weight: bold; margin: 0 -1em; padding: .35em 1em;
+}
+body.loading #dialog h2 {
+ background-image: url(../image/spinner_6b.gif);
+}
+#dialog h3 { color: #ccc; font-size: 100%; font-weight: bold; margin: 0 -2em;
+ padding: .35em 2em 0;
+}
+#dialog fieldset { background: #222; border-top: 1px solid #111;
+ margin: 0 0 1em; padding: .5em 1em 1em;
+ -moz-border-radius-bottomleft: 7px; -moz-border-radius-bottomright: 7px;
+ -webkit-border-bottom-left-radius: 7px;
+ -webkit-border-bottom-right-radius: 7px;
+}
+#dialog p.help { color: #bbb; font-size: 95%; margin: 0 0 1em; }
+#dialog fieldset table { margin-top: 1em; }
+#dialog fieldset th, #dialog fieldset td { padding: .5em;
+ vertical-align: top;
+}
+#dialog fieldset th { color: #999; font-weight: bold;
+ text-align: right;
+}
+#dialog fieldset input { background-color: #e9e9e9; vertical-align: middle; }
+#dialog fieldset input.error { background-color: #f9e4e4; }
+#dialog fieldset div.error { padding-top: .3em; color: #b33; }
+#dialog fieldset.radiogroup { padding-top: 1em; }
+#dialog fieldset.radiogroup label { position: relative; padding-left: 25px; }
+#dialog fieldset.radiogroup input { position: absolute; left: 5px; top: 2px; }
+#dialog fieldset.radiogroup p.help { margin-top: .5em; margin-left: 25px; }
+#dialog fieldset.radiogroup hr { border-color: #333; margin-left: 25px; }
+#dialog .buttons { padding: 0 .5em .5em; text-align: right; }
+#dialog .buttons button { background: #444; border: 1px solid #aaa;
+ color: #ddd; cursor: pointer; font-size: 90%; font-weight: normal;
+ margin: 0 0 0 5px; padding: .2em 2em; -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
+#dialog .buttons button[type=submit] { font-weight: bold; }
+#dialog .buttons button:hover { background: #555; }
+#dialog .buttons button:active { background: #333; color: #fff; }
+
+#dialog fieldset td#progress {
+ background: url(../image/progress.gif) 50% 50% no-repeat;
+ visibility: hidden;
+}
+
+/* Document quick jump */
+
+#jumpto { float: right; padding: 5px 10px 5px 5px; line-height: 16px;
+ font-weight: bold; color: #666; font-size: 90%; }
+
+#jumpto input { font-size: 90%; }
+
+/* View selector */
+
+#switch { color: #666; float: right; font-size: 90%; font-weight: bold;
+ line-height: 16px; padding: 5px;
+}
+#switch select { font-size: 90%; }
+
+/* Stale views checkbox */
+
+#staleviews {
+ color: #666; float: right; font-size: 90%;
+ font-weight: bold; line-height: 16px; padding: 5px;
+}
+
+/* View function editing */
+
+#viewcode { background: #fff; border: 1px solid;
+ border-color: #999 #ddd #ddd #999; margin: 0 0 1em; overflow: hidden;
+}
+#viewcode .top, #viewcode .bottom { background-color: #e9e9e9;
+ border: 1px solid; border-color: #ddd #ddd #e9e9e9 #ddd; color: #333;
+ padding: 0 .5em 2px;
+}
+#viewcode .top { border-bottom: 1px solid #ddd; color: #aaa; font-size: 95%; }
+#viewcode .top span { border: none; color: #666; cursor: pointer;
+ display: block; font-size: 90%; margin: 0; padding: 2px 0 0;
+}
+#viewcode .top span#view-toggle {
+ background: url(../image/twisty.gif) 0 -96px no-repeat; padding-left: 15px;
+}
+#viewcode.collapsed .top span#view-toggle { background-position: 0 4px; }
+#viewcode .top a { float: right; font-size: 90%; line-height: 1.4em;
+ padding: 2px 2px 0 0;
+}
+#viewcode .top a:link, #viewcode .top a:visited { color: #999; }
+#viewcode table { border: none; border-collapse: separate; border-spacing: 0;
+ margin: 0; table-layout: fixed; width: 100%; max-width: 100%;
+}
+#viewcode table td { border: none; padding: 0; }
+#viewcode table td.splitter { background: #e9e9e9; width: 4px; }
+#viewcode table td.map { border-right: 1px solid #ccc; }
+#viewcode table td.reduce { border-left: 1px solid #ccc; }
+#viewcode .code label { font-size: 90%; color: #999; padding: 0 .5em;
+ white-space: nowrap;
+}
+#viewcode .code textarea { border: none; border-top: 1px solid #ccc;
+ color: #333; font-size: 11px; margin: 0; min-height: 50px; overflow: auto;
+ padding: .4em 0 0; resize: none; width: 100%;
+}
+#viewcode .code textarea:focus { background: #e9f4ff; }
+#viewcode .bottom { border-bottom: none; clear: left; padding: 1px 3px; }
+#viewcode .bottom button { font-size: 90%; margin: 0 1em 0 0;
+ padding-left: 2em; padding-right: 2em;
+}
+*html #viewcode .bottom button { padding: 0 .5em; }
+*+html #viewcode .bottom button { padding: 0 .5em; }
+#viewcode .bottom button.revert, #viewcode .bottom button.save,
+#viewcode .bottom button.saveas {
+ float: right; margin: 0 0 0 1em;
+}
+#viewcode .bottom button.save { font-weight: bold; }
+#viewcode .bottom label { color: #666; font-size: 90%; }
+#viewcode .grippie { background-position: 50% 50%; }
+#viewcode.collapsed { background: #e9e9e9; }
+#viewcode.collapsed .top { border-bottom: none; }
+#viewcode.collapsed .top span { background-position: 0 3px; }
+#viewcode.collapsed table, #viewcode.collapsed .bottom { display: none; }
+
+#tempwarn { display: none; font-size: 90%; margin: 0 2em 1.5em; }
+#grouptruenotice { display: none; font-size: 90%; margin: 1ex 2em 1.5em; }
+
+/* Database table */
+
+#databases thead th.size, #databases thead th.count, #databases thead th.seq,
+#databases tbody td.size, #databases tbody td.count, #databases tbody td.seq {
+ text-align: right;
+}
+
+/* Documents table */
+
+#documents thead th { line-height: 150%; width: 50%; }
+#documents thead th label { color: #333; float: right; font-size: 90%;
+ text-shadow: none;
+}
+#documents thead th label.disabled { color: #777; }
+#documents thead th label input { vertical-align: middle; }
+#documents thead th label input[type=range] { width: 7em; }
+#documents thead th label output { width: 4em; display: inline-block; }
+#documents tbody.content td { color: #999;
+ font: normal 11px "DejaVu Sans Mono",Menlo,Courier,monospace;
+}
+#documents tbody.content td.key { color: #333; }
+#documents tbody.content td.key a { display: block; }
+#documents tbody.content td.key a strong { font-weight: normal; }
+#documents tbody.content td.key span.docid { color: #999;
+ font: normal 10px Arial,Helvetica,sans-serif;
+}
+#documents tbody.content td.value { font-size: 10px; }
+
+/* Document display tabs */
+
+#tabs { float: right; list-style: none; margin: -1.4em 0 0; }
+#tabs li { display: inline; font-size: 95%; padding: 0; }
+#tabs li.active { font-weight: bold; }
+#tabs :link, #tabs :visited { background: #dadada; color: #666;
+ border: 1px solid #a7a7a7; float: left; margin: 0 0 0 .5em;
+ padding: .5em 2em .3em; position: relative; top: 1px;
+}
+#tabs .active :link, #tabs .active :visited { background: #e9e9e9;
+ border-bottom-color: #e9e9e9; color: #333;
+}
+#tabs :link:focus, #tabs :visited:focus { outline: none; }
+
+/* Document fields table */
+
+#fields { clear: right; table-layout: fixed; }
+#fields col.field { width: 33%; }
+#fields tbody.content th { padding-left: 25px; padding-right: 48px; }
+#fields tbody.content th button.delete {
+ background: url(../image/delete-mini.png) no-repeat; border: none;
+ cursor: pointer; float: left; margin: .2em 5px 0 -20px; padding: 0;
+ width: 15px; height: 15px;
+}
+#fields tbody.content th button.delete:hover { background-position: -15px 0; }
+#fields tbody.content th b { display: block; padding: 2px 2px 2px 3px; }
+#fields tbody.content th b.editinline-container { padding: 0; }
+#fields tbody.content td { color: #999; padding-left: 14px;
+ padding-right: 48px;
+}
+#fields tbody.content td code { display: block; font-size: 11px;
+ padding: 2px 2px 2px 3px; position: relative;
+}
+#fields tbody.content td code.string { white-space: pre-wrap; }
+#fields tbody.content td code.string:before { color: #ccc; content: "“";
+ position: absolute; left: -4px;
+}
+#fields tbody.content td code.string:after { color: #ccc; content: "”"; }
+
+#fields tbody.content td dl { margin: 0; padding: 0; }
+#fields tbody.content td dt {
+ background: transparent url(../image/toggle-collapse.gif) 0 3px no-repeat;
+ clear: left; color: #333; cursor: pointer; line-height: 1em;
+ margin-left: -12px; padding-left: 14px;
+}
+#fields tbody.content td dd { line-height: 1em; margin: 0;
+ padding: 0 0 0 1em;
+}
+#fields tbody.content td dt.collapsed {
+ background-image: url(../image/toggle-expand.gif);
+}
+#fields tbody.content td dt.inline { background-image: none; cursor: default;
+ float: left; margin-left: 0; padding-left: 2px; padding-right: .5em;
+ padding-top: 2px;
+}
+#fields tbody.content td dd code.string { left: 4px; text-indent: -6px; }
+#fields tbody.content td dd code.string:before { position: static; }
+#fields tbody.content input, #fields tbody.content textarea,
+#fields tbody.source textarea {
+ background: #fff; border: 1px solid; border-color: #999 #ddd #ddd #999;
+ margin: 0; padding: 1px; width: 100%;
+}
+#fields tbody.content th input { font-family: inherit; font-size: inherit;
+ font-weight: bold;
+}
+#fields tbody.content td input, #fields tbody.content td textarea,
+#fields tbody.source textarea {
+ font: normal 11px "DejaVu Sans Mono",Menlo,Courier,monospace;
+}
+#fields tbody.content input.invalid,
+#fields tbody.content textarea.invalid,
+#fields tbody.source textarea.invalid {
+ background: #f9f4f4; border-color: #b66 #ebb #ebb #b66;
+}
+#fields tbody.content div.grippie, #fields tbody.source div.grippie {
+ padding: 0 1px; width: 100%;
+}
+#fields tbody.content div.error, #fields tbody.source div.error {
+ color: #d33;
+}
+
+#fields tbody.content td ul.attachments { list-style: none; margin: 0;
+ padding: 0;
+}
+#fields tbody.content td ul.attachments li {
+ margin-bottom: .3em; min-height: 20px; padding-left: 20px;
+}
+#fields tbody.content td ul.attachments tt { font-size: 11px; }
+#fields tbody.content td ul.attachments li span.info { color: #666;
+ display: block; font-size: 95%;
+}
+#fields tbody.content td ul.attachments li button {
+ background: transparent no-repeat; border: none; cursor: pointer;
+ float: left; margin: 0 2px 0 -20px; padding: 0; width: 15px; height: 15px;
+ vertical-align: middle;
+}
+#fields tbody.content td ul.attachments li button:hover {
+ background-position: -15px 0;
+}
+#fields tbody.content td ul.attachments li button.delete {
+ background-image: url(../image/delete-mini.png);
+}
+#fields tbody.source td pre { color: #999; font-size: 11px; line-height: 1.6em;
+ margin: 0; overflow: auto; white-space: pre-wrap; width: 100%;
+}
+#fields tbody.source td.editinline-container { padding-left: 14px; padding-right: 48px; }
+
+/* Test suite */
+
+#tests { table-layout: fixed; }
+#tests thead th.name { width: 20%; }
+#tests thead th.status { padding-left: 20px; width: 10em; }
+#tests thead th.duration { text-align: right; width: 7em; }
+#tests tbody.content th { cursor: help; padding-left: 25px;
+ white-space: nowrap;
+}
+#tests tbody.content th button.run {
+ background: url(../image/run-mini.png) no-repeat; border: none;
+ cursor: pointer; float: left; margin: .2em 5px 0 -20px; padding: 0;
+ width: 15px; height: 15px;
+}
+#tests tbody.content th button.run:hover { background-position: -15px 0; }
+#tests tbody.content td.duration { text-align: right; width: 6em; }
+#tests tbody.content td.status { background-position: 5px 8px;
+ background-repeat: no-repeat; color: #999; padding-left: 20px;
+}
+#tests tbody.content td.details { width: 50%; }
+#tests tbody.content td.details a { border-bottom: 1px dashed #ccc;
+ color: #999; float: right; font-size: 85%;
+}
+#tests tbody.content td.details ol { color: #999; margin: 0;
+ padding: 0 0 0 1.5em;
+}
+#tests tbody.content td.details ol b { color: #333; font-weight: normal; }
+#tests tbody.content td.details ol code { color: #c00; font-size: 100%; }
+#tests tbody.content td.details ol code.error { white-space: pre; }
+#tests tbody.content td.running {
+ background-image: url(../image/running.png); color: #333;
+}
+#tests tbody.content td.success, span.success {
+ background-image: url(../image/test_success.gif) no-repeat; color: #060;
+}
+#tests tbody.content td.error, #tests tbody.content td.failure, span.failure {
+ background-image: url(../image/test_failure.gif) no-repeat; color: #c00;
+}
+
+/* Configuration */
+
+#config tbody th { background: #e6e6e6; border-right: none;
+ border-top: 1px solid #d9d9d9;
+}
+#config tbody td.name { border-left: 1px solid #d9d9d9; color: #333;
+ font-weight: bold;
+}
+#config tbody td.value { padding: 1px 48px 1px 1px; }
+#config tbody td.value code { display: block; font-size: 11px;
+ padding: 2px 2px 2px 3px;
+}
+#config tbody td.value code.editinline-container { padding: 0; }
+#config tbody td input {
+ background: #fff; border: 1px solid; border-color: #999 #ddd #ddd #999;
+ font: normal 11px "DejaVu Sans Mono",Menlo,Courier,monospace;
+ margin: 0; padding: 1px; width: 100%;
+}
+
+/* Replication */
+
+form#replicator { background: #f4f4f4; border: 1px solid;
+ border-color: #999 #ccc #ccc #999; margin: .5em 1em 1.5em; padding: .5em;
+ -moz-border-radius: 7px; -webkit-border-radius: 7px;
+}
+form#replicator fieldset { float: left; padding: 1px; }
+form#replicator p.swap { float: left; margin: 2em 0 0; padding: 1px 1em; }
+form#replicator p.swap button { background: transparent; border: none;
+ color: #666; cursor: pointer; font-size: 150%;
+}
+form#replicator p.swap button:hover { color: #000; }
+form#replicator p.actions { padding: 1px; clear: left; margin: 0;
+ text-align: right;
+}
+
+/* Active tasks */
+
+#interval { color: #666; float: right; font-size: 90%; font-weight: bold;
+ line-height: 16px; padding: 5px;
+}
+#interval input { vertical-align: top; }
+#interval .secs { display: inline-block; width: 2em; text-align: right; }
+
+#status tr.none th { color: #666; font-weight: normal; }
+#status td.object, #status td.pid {
+ font-family: "DejaVu Sans Mono",Menlo,Courier,monospace;
+ font-size: 11px;
+}
+
+
+/* Session */
+#loginSignup {
+ font-size:200%;
+}
diff --git a/1.1.x/src/Makefile.am b/1.1.x/src/Makefile.am
new file mode 100644
index 00000000..b9529f94
--- /dev/null
+++ b/1.1.x/src/Makefile.am
@@ -0,0 +1,13 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+SUBDIRS = couchdb erlang-oauth etap ibrowse mochiweb
diff --git a/1.1.x/src/couchdb/Makefile.am b/1.1.x/src/couchdb/Makefile.am
new file mode 100644
index 00000000..92f6dcf6
--- /dev/null
+++ b/1.1.x/src/couchdb/Makefile.am
@@ -0,0 +1,209 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+SUBDIRS = priv
+
+# devdocdir = $(localdocdir)/developer/couchdb
+couchlibdir = $(localerlanglibdir)/couch-$(version)
+couchincludedir = $(couchlibdir)/include
+couchebindir = $(couchlibdir)/ebin
+
+couchinclude_DATA = couch_db.hrl couch_js_functions.hrl
+couchebin_DATA = $(compiled_files)
+
+# dist_devdoc_DATA = $(doc_base) $(doc_modules)
+
+CLEANFILES = $(compiled_files) $(doc_base)
+
+# CLEANFILES = $(doc_modules) edoc-info
+
+source_files = \
+ couch.erl \
+ couch_app.erl \
+ couch_auth_cache.erl \
+ couch_btree.erl \
+ couch_changes.erl \
+ couch_config.erl \
+ couch_config_writer.erl \
+ couch_db.erl \
+ couch_db_update_notifier.erl \
+ couch_db_update_notifier_sup.erl \
+ couch_doc.erl \
+ couch_event_sup.erl \
+ couch_external_manager.erl \
+ couch_external_server.erl \
+ couch_file.erl \
+ couch_httpd.erl \
+ couch_httpd_db.erl \
+ couch_httpd_auth.erl \
+ couch_httpd_oauth.erl \
+ couch_httpd_external.erl \
+ couch_httpd_show.erl \
+ couch_httpd_view.erl \
+ couch_httpd_misc_handlers.erl \
+ couch_httpd_proxy.erl \
+ couch_httpd_rewrite.erl \
+ couch_httpd_stats_handlers.erl \
+ couch_httpd_vhost.erl \
+ couch_key_tree.erl \
+ couch_log.erl \
+ couch_native_process.erl \
+ couch_os_daemons.erl \
+ couch_os_process.erl \
+ couch_query_servers.erl \
+ couch_ref_counter.erl \
+ couch_rep.erl \
+ couch_rep_att.erl \
+ couch_rep_changes_feed.erl \
+ couch_rep_httpc.erl \
+ couch_rep_missing_revs.erl \
+ couch_rep_reader.erl \
+ couch_rep_sup.erl \
+ couch_rep_writer.erl \
+ couch_replication_manager.erl \
+ couch_server.erl \
+ couch_server_sup.erl \
+ couch_stats_aggregator.erl \
+ couch_stats_collector.erl \
+ couch_stream.erl \
+ couch_task_status.erl \
+ couch_util.erl \
+ couch_uuids.erl \
+ couch_view.erl \
+ couch_view_compactor.erl \
+ couch_view_updater.erl \
+ couch_view_group.erl \
+ couch_db_updater.erl \
+ couch_work_queue.erl
+
+EXTRA_DIST = $(source_files) couch_db.hrl couch_js_functions.hrl
+
+compiled_files = \
+ couch.app \
+ couch.beam \
+ couch_app.beam \
+ couch_auth_cache.beam \
+ couch_btree.beam \
+ couch_changes.beam \
+ couch_config.beam \
+ couch_config_writer.beam \
+ couch_db.beam \
+ couch_db_update_notifier.beam \
+ couch_db_update_notifier_sup.beam \
+ couch_doc.beam \
+ couch_event_sup.beam \
+ couch_external_manager.beam \
+ couch_external_server.beam \
+ couch_file.beam \
+ couch_httpd.beam \
+ couch_httpd_db.beam \
+ couch_httpd_auth.beam \
+ couch_httpd_oauth.beam \
+ couch_httpd_proxy.beam \
+ couch_httpd_external.beam \
+ couch_httpd_show.beam \
+ couch_httpd_view.beam \
+ couch_httpd_misc_handlers.beam \
+ couch_httpd_rewrite.beam \
+ couch_httpd_stats_handlers.beam \
+ couch_httpd_vhost.beam \
+ couch_key_tree.beam \
+ couch_log.beam \
+ couch_native_process.beam \
+ couch_os_daemons.beam \
+ couch_os_process.beam \
+ couch_query_servers.beam \
+ couch_ref_counter.beam \
+ couch_rep.beam \
+ couch_rep_att.beam \
+ couch_rep_changes_feed.beam \
+ couch_rep_httpc.beam \
+ couch_rep_missing_revs.beam \
+ couch_rep_reader.beam \
+ couch_rep_sup.beam \
+ couch_rep_writer.beam \
+ couch_replication_manager.beam \
+ couch_server.beam \
+ couch_server_sup.beam \
+ couch_stats_aggregator.beam \
+ couch_stats_collector.beam \
+ couch_stream.beam \
+ couch_task_status.beam \
+ couch_util.beam \
+ couch_uuids.beam \
+ couch_view.beam \
+ couch_view_compactor.beam \
+ couch_view_updater.beam \
+ couch_view_group.beam \
+ couch_db_updater.beam \
+ couch_work_queue.beam
+
+# doc_base = \
+# erlang.png \
+# index.html \
+# modules-frame.html \
+# overview-summary.html \
+# packages-frame.html \
+# stylesheet.css
+
+# doc_modules = \
+# couch_btree.html \
+# couch_config.html \
+# couch_config_writer.html \
+# couch_db.html \
+# couch_db_update_notifier.html \
+# couch_db_update_notifier_sup.html \
+# couch_doc.html \
+# couch_event_sup.html \
+# couch_file.html \
+# couch_httpd.html \
+# couch_key_tree.html \
+# couch_log.html \
+# couch_query_servers.html \
+# couch_rep.html \
+# couch_rep_sup.html \
+# couch_server.html \
+# couch_server_sup.html \
+# couch_stream.html \
+# couch_util.html \
+# couch_view.html
+
+if WINDOWS
+couch.app: couch.app.tpl
+ modules=`find . -name "couch*.erl" -exec basename {} .erl \; | tr '\n' ',' | sed "s/,$$//"`; \
+ sed -e "s|%package_name%|@package_name@|g" \
+ -e "s|%version%|@version@|g" \
+ -e "s|@modules@|$$modules|g" \
+ -e "s|%localconfdir%|../etc/couchdb|g" \
+ -e "s|@defaultini@|default.ini|g" \
+ -e "s|@localini@|local.ini|g" > \
+ $@ < $<
+else
+couch.app: couch.app.tpl
+ modules=`{ find . -name "*.erl" -exec basename {} .erl \; | tr '\n' ','; echo ''; } | sed "s/,$$//"`; \
+ sed -e "s|%package_name%|@package_name@|g" \
+ -e "s|%version%|@version@|g" \
+ -e "s|@modules@|$$modules|g" \
+ -e "s|%localconfdir%|@localconfdir@|g" \
+ -e "s|@defaultini@|default.ini|g" \
+ -e "s|@localini@|local.ini|g" > \
+ $@ < $<
+ chmod +x $@
+endif
+
+# $(dist_devdoc_DATA): edoc-info
+
+# $(ERL) -noshell -run edoc_run files [\"$<\"]
+
+%.beam: %.erl couch_db.hrl couch_js_functions.hrl
+ $(ERLC) $(ERLC_FLAGS) ${TEST} $<;
+
diff --git a/1.1.x/src/couchdb/couch.app.tpl.in b/1.1.x/src/couchdb/couch.app.tpl.in
new file mode 100644
index 00000000..36b0b34c
--- /dev/null
+++ b/1.1.x/src/couchdb/couch.app.tpl.in
@@ -0,0 +1,29 @@
+{application, couch, [
+ {description, "@package_name@"},
+ {vsn, "@version@"},
+ {modules, [@modules@]},
+ {registered, [
+ couch_config,
+ couch_db_update,
+ couch_db_update_notifier_sup,
+ couch_external_manager,
+ couch_httpd,
+ couch_log,
+ couch_primary_services,
+ couch_query_servers,
+ couch_rep_sup,
+ couch_secondary_services,
+ couch_server,
+ couch_server_sup,
+ couch_stats_aggregator,
+ couch_stats_collector,
+ couch_task_status,
+ couch_view
+ ]},
+ {mod, {couch_app, [
+ "%localconfdir%/@defaultini@",
+ "%localconfdir%/@localini@"
+ ]}},
+ {applications, [kernel, stdlib]},
+ {included_applications, [crypto, sasl, inets, oauth, ibrowse, mochiweb]}
+]}.
diff --git a/1.1.x/src/couchdb/couch.erl b/1.1.x/src/couchdb/couch.erl
new file mode 100644
index 00000000..956e9489
--- /dev/null
+++ b/1.1.x/src/couchdb/couch.erl
@@ -0,0 +1,39 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch).
+
+-compile(export_all).
+
+start() ->
+ application:start(couch).
+
+stop() ->
+ application:stop(couch).
+
+restart() ->
+ case stop() of
+ ok ->
+ start();
+ {error, {not_started,couch}} ->
+ start();
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+reload() ->
+ case supervisor:terminate_child(couch_server_sup, couch_config) of
+ ok ->
+ supervisor:restart_child(couch_server_sup, couch_config);
+ {error, Reason} ->
+ {error, Reason}
+ end.
diff --git a/1.1.x/src/couchdb/couch_app.erl b/1.1.x/src/couchdb/couch_app.erl
new file mode 100644
index 00000000..232953d9
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_app.erl
@@ -0,0 +1,56 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_app).
+
+-behaviour(application).
+
+-include("couch_db.hrl").
+
+-export([start/2, stop/1]).
+
+start(_Type, DefaultIniFiles) ->
+ IniFiles = get_ini_files(DefaultIniFiles),
+ case start_apps([crypto, public_key, sasl, inets, oauth, ssl, ibrowse, mochiweb]) of
+ ok ->
+ couch_server_sup:start_link(IniFiles);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+stop(_) ->
+ ok.
+
+get_ini_files(Default) ->
+ case init:get_argument(couch_ini) of
+ error ->
+ Default;
+ {ok, [[]]} ->
+ Default;
+ {ok, [Values]} ->
+ Values
+ end.
+
+start_apps([]) ->
+ ok;
+start_apps([App|Rest]) ->
+ case application:start(App) of
+ ok ->
+ start_apps(Rest);
+ {error, {already_started, App}} ->
+ start_apps(Rest);
+ {error, _Reason} when App =:= public_key ->
+ % ignore on R12B5
+ start_apps(Rest);
+ {error, _Reason} ->
+ {error, {app_would_not_start, App}}
+ end.
diff --git a/1.1.x/src/couchdb/couch_auth_cache.erl b/1.1.x/src/couchdb/couch_auth_cache.erl
new file mode 100644
index 00000000..e0715b88
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_auth_cache.erl
@@ -0,0 +1,419 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_auth_cache).
+-behaviour(gen_server).
+
+% public API
+-export([get_user_creds/1]).
+
+% gen_server API
+-export([start_link/0, init/1, handle_call/3, handle_info/2, handle_cast/2]).
+-export([code_change/3, terminate/2]).
+
+-include("couch_db.hrl").
+-include("couch_js_functions.hrl").
+
+-define(STATE, auth_state_ets).
+-define(BY_USER, auth_by_user_ets).
+-define(BY_ATIME, auth_by_atime_ets).
+
+-record(state, {
+ max_cache_size = 0,
+ cache_size = 0,
+ db_notifier = nil
+}).
+
+
+-spec get_user_creds(UserName::string() | binary()) ->
+ Credentials::list() | nil.
+
+get_user_creds(UserName) when is_list(UserName) ->
+ get_user_creds(?l2b(UserName));
+
+get_user_creds(UserName) ->
+ UserCreds = case couch_config:get("admins", ?b2l(UserName)) of
+ "-hashed-" ++ HashedPwdAndSalt ->
+ % the name is an admin, now check to see if there is a user doc
+ % which has a matching name, salt, and password_sha
+ [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
+ case get_from_cache(UserName) of
+ nil ->
+ [{<<"roles">>, [<<"_admin">>]},
+ {<<"salt">>, ?l2b(Salt)},
+ {<<"password_sha">>, ?l2b(HashedPwd)}];
+ UserProps when is_list(UserProps) ->
+ DocRoles = couch_util:get_value(<<"roles">>, UserProps),
+ [{<<"roles">>, [<<"_admin">> | DocRoles]},
+ {<<"salt">>, ?l2b(Salt)},
+ {<<"password_sha">>, ?l2b(HashedPwd)}]
+ end;
+ _Else ->
+ get_from_cache(UserName)
+ end,
+ validate_user_creds(UserCreds).
+
+
+get_from_cache(UserName) ->
+ exec_if_auth_db(
+ fun(_AuthDb) ->
+ maybe_refresh_cache(),
+ case ets:lookup(?BY_USER, UserName) of
+ [] ->
+ gen_server:call(?MODULE, {fetch, UserName}, infinity);
+ [{UserName, {Credentials, _ATime}}] ->
+ couch_stats_collector:increment({couchdb, auth_cache_hits}),
+ gen_server:cast(?MODULE, {cache_hit, UserName}),
+ Credentials
+ end
+ end,
+ nil
+ ).
+
+
+validate_user_creds(nil) ->
+ nil;
+validate_user_creds(UserCreds) ->
+ case couch_util:get_value(<<"_conflicts">>, UserCreds) of
+ undefined ->
+ ok;
+ _ConflictList ->
+ throw({unauthorized,
+ <<"User document conflicts must be resolved before the document",
+ " is used for authentication purposes.">>
+ })
+ end,
+ UserCreds.
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+init(_) ->
+ ?STATE = ets:new(?STATE, [set, protected, named_table]),
+ ?BY_USER = ets:new(?BY_USER, [set, protected, named_table]),
+ ?BY_ATIME = ets:new(?BY_ATIME, [ordered_set, private, named_table]),
+ AuthDbName = couch_config:get("couch_httpd_auth", "authentication_db"),
+ true = ets:insert(?STATE, {auth_db_name, ?l2b(AuthDbName)}),
+ true = ets:insert(?STATE, {auth_db, open_auth_db()}),
+ process_flag(trap_exit, true),
+ ok = couch_config:register(
+ fun("couch_httpd_auth", "auth_cache_size", SizeList) ->
+ Size = list_to_integer(SizeList),
+ ok = gen_server:call(?MODULE, {new_max_cache_size, Size}, infinity)
+ end
+ ),
+ ok = couch_config:register(
+ fun("couch_httpd_auth", "authentication_db", DbName) ->
+ ok = gen_server:call(?MODULE, {new_auth_db, ?l2b(DbName)}, infinity)
+ end
+ ),
+ {ok, Notifier} = couch_db_update_notifier:start_link(fun handle_db_event/1),
+ State = #state{
+ db_notifier = Notifier,
+ max_cache_size = list_to_integer(
+ couch_config:get("couch_httpd_auth", "auth_cache_size", "50")
+ )
+ },
+ {ok, State}.
+
+
+handle_db_event({Event, DbName}) ->
+ [{auth_db_name, AuthDbName}] = ets:lookup(?STATE, auth_db_name),
+ case DbName =:= AuthDbName of
+ true ->
+ case Event of
+ deleted -> gen_server:call(?MODULE, auth_db_deleted, infinity);
+ created -> gen_server:call(?MODULE, auth_db_created, infinity);
+ compacted -> gen_server:call(?MODULE, auth_db_compacted, infinity);
+ _Else -> ok
+ end;
+ false ->
+ ok
+ end.
+
+
+handle_call({new_auth_db, AuthDbName}, _From, State) ->
+ NewState = clear_cache(State),
+ true = ets:insert(?STATE, {auth_db_name, AuthDbName}),
+ true = ets:insert(?STATE, {auth_db, open_auth_db()}),
+ {reply, ok, NewState};
+
+handle_call(auth_db_deleted, _From, State) ->
+ NewState = clear_cache(State),
+ true = ets:insert(?STATE, {auth_db, nil}),
+ {reply, ok, NewState};
+
+handle_call(auth_db_created, _From, State) ->
+ NewState = clear_cache(State),
+ true = ets:insert(?STATE, {auth_db, open_auth_db()}),
+ {reply, ok, NewState};
+
+handle_call(auth_db_compacted, _From, State) ->
+ exec_if_auth_db(
+ fun(AuthDb) ->
+ true = ets:insert(?STATE, {auth_db, reopen_auth_db(AuthDb)})
+ end
+ ),
+ {reply, ok, State};
+
+handle_call({new_max_cache_size, NewSize}, _From, State) ->
+ case NewSize >= State#state.cache_size of
+ true ->
+ ok;
+ false ->
+ lists:foreach(
+ fun(_) ->
+ LruTime = ets:last(?BY_ATIME),
+ [{LruTime, UserName}] = ets:lookup(?BY_ATIME, LruTime),
+ true = ets:delete(?BY_ATIME, LruTime),
+ true = ets:delete(?BY_USER, UserName)
+ end,
+ lists:seq(1, State#state.cache_size - NewSize)
+ )
+ end,
+ NewState = State#state{
+ max_cache_size = NewSize,
+ cache_size = lists:min([NewSize, State#state.cache_size])
+ },
+ {reply, ok, NewState};
+
+handle_call({fetch, UserName}, _From, State) ->
+ {Credentials, NewState} = case ets:lookup(?BY_USER, UserName) of
+ [{UserName, {Creds, ATime}}] ->
+ couch_stats_collector:increment({couchdb, auth_cache_hits}),
+ cache_hit(UserName, Creds, ATime),
+ {Creds, State};
+ [] ->
+ couch_stats_collector:increment({couchdb, auth_cache_misses}),
+ Creds = get_user_props_from_db(UserName),
+ State1 = add_cache_entry(UserName, Creds, erlang:now(), State),
+ {Creds, State1}
+ end,
+ {reply, Credentials, NewState};
+
+handle_call(refresh, _From, State) ->
+ exec_if_auth_db(fun refresh_entries/1),
+ {reply, ok, State}.
+
+
+handle_cast({cache_hit, UserName}, State) ->
+ case ets:lookup(?BY_USER, UserName) of
+ [{UserName, {Credentials, ATime}}] ->
+ cache_hit(UserName, Credentials, ATime);
+ _ ->
+ ok
+ end,
+ {noreply, State}.
+
+
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+
+terminate(_Reason, #state{db_notifier = Notifier}) ->
+ couch_db_update_notifier:stop(Notifier),
+ exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end),
+ true = ets:delete(?BY_USER),
+ true = ets:delete(?BY_ATIME),
+ true = ets:delete(?STATE).
+
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+clear_cache(State) ->
+ exec_if_auth_db(fun(AuthDb) -> catch couch_db:close(AuthDb) end),
+ true = ets:delete_all_objects(?BY_USER),
+ true = ets:delete_all_objects(?BY_ATIME),
+ State#state{cache_size = 0}.
+
+
+add_cache_entry(UserName, Credentials, ATime, State) ->
+ case State#state.cache_size >= State#state.max_cache_size of
+ true ->
+ free_mru_cache_entry();
+ false ->
+ ok
+ end,
+ true = ets:insert(?BY_ATIME, {ATime, UserName}),
+ true = ets:insert(?BY_USER, {UserName, {Credentials, ATime}}),
+ State#state{cache_size = couch_util:get_value(size, ets:info(?BY_USER))}.
+
+
+free_mru_cache_entry() ->
+ case ets:last(?BY_ATIME) of
+ '$end_of_table' ->
+ ok; % empty cache
+ LruTime ->
+ [{LruTime, UserName}] = ets:lookup(?BY_ATIME, LruTime),
+ true = ets:delete(?BY_ATIME, LruTime),
+ true = ets:delete(?BY_USER, UserName)
+ end.
+
+
+cache_hit(UserName, Credentials, ATime) ->
+ NewATime = erlang:now(),
+ true = ets:delete(?BY_ATIME, ATime),
+ true = ets:insert(?BY_ATIME, {NewATime, UserName}),
+ true = ets:insert(?BY_USER, {UserName, {Credentials, NewATime}}).
+
+
+refresh_entries(AuthDb) ->
+ case reopen_auth_db(AuthDb) of
+ nil ->
+ ok;
+ AuthDb2 ->
+ case AuthDb2#db.update_seq > AuthDb#db.update_seq of
+ true ->
+ {ok, _, _} = couch_db:enum_docs_since(
+ AuthDb2,
+ AuthDb#db.update_seq,
+ fun(DocInfo, _, _) -> refresh_entry(AuthDb2, DocInfo) end,
+ AuthDb#db.update_seq,
+ []
+ ),
+ true = ets:insert(?STATE, {auth_db, AuthDb2});
+ false ->
+ ok
+ end
+ end.
+
+
+refresh_entry(Db, #doc_info{high_seq = DocSeq} = DocInfo) ->
+ case is_user_doc(DocInfo) of
+ {true, UserName} ->
+ case ets:lookup(?BY_USER, UserName) of
+ [] ->
+ ok;
+ [{UserName, {_OldCreds, ATime}}] ->
+ {ok, Doc} = couch_db:open_doc(Db, DocInfo, [conflicts, deleted]),
+ NewCreds = user_creds(Doc),
+ true = ets:insert(?BY_USER, {UserName, {NewCreds, ATime}})
+ end;
+ false ->
+ ok
+ end,
+ {ok, DocSeq}.
+
+
+user_creds(#doc{deleted = true}) ->
+ nil;
+user_creds(#doc{} = Doc) ->
+ {Creds} = couch_query_servers:json_doc(Doc),
+ Creds.
+
+
+is_user_doc(#doc_info{id = <<"org.couchdb.user:", UserName/binary>>}) ->
+ {true, UserName};
+is_user_doc(_) ->
+ false.
+
+
+maybe_refresh_cache() ->
+ case cache_needs_refresh() of
+ true ->
+ ok = gen_server:call(?MODULE, refresh, infinity);
+ false ->
+ ok
+ end.
+
+
+cache_needs_refresh() ->
+ exec_if_auth_db(
+ fun(AuthDb) ->
+ case reopen_auth_db(AuthDb) of
+ nil ->
+ false;
+ AuthDb2 ->
+ AuthDb2#db.update_seq > AuthDb#db.update_seq
+ end
+ end,
+ false
+ ).
+
+
+reopen_auth_db(AuthDb) ->
+ case (catch couch_db:reopen(AuthDb)) of
+ {ok, AuthDb2} ->
+ AuthDb2;
+ _ ->
+ nil
+ end.
+
+
+exec_if_auth_db(Fun) ->
+ exec_if_auth_db(Fun, ok).
+
+exec_if_auth_db(Fun, DefRes) ->
+ case ets:lookup(?STATE, auth_db) of
+ [{auth_db, #db{} = AuthDb}] ->
+ Fun(AuthDb);
+ _ ->
+ DefRes
+ end.
+
+
+open_auth_db() ->
+ [{auth_db_name, DbName}] = ets:lookup(?STATE, auth_db_name),
+ {ok, AuthDb} = ensure_users_db_exists(DbName, [sys_db]),
+ AuthDb.
+
+
+get_user_props_from_db(UserName) ->
+ exec_if_auth_db(
+ fun(AuthDb) ->
+ Db = reopen_auth_db(AuthDb),
+ DocId = <<"org.couchdb.user:", UserName/binary>>,
+ try
+ {ok, Doc} = couch_db:open_doc(Db, DocId, [conflicts]),
+ {DocProps} = couch_query_servers:json_doc(Doc),
+ DocProps
+ catch
+ _:_Error ->
+ nil
+ end
+ end,
+ nil
+ ).
+
+ensure_users_db_exists(DbName, Options) ->
+ Options1 = [{user_ctx, #user_ctx{roles=[<<"_admin">>]}} | Options],
+ case couch_db:open(DbName, Options1) of
+ {ok, Db} ->
+ ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
+ {ok, Db};
+ _Error ->
+ {ok, Db} = couch_db:create(DbName, Options1),
+ ok = ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
+ {ok, Db}
+ end.
+
+ensure_auth_ddoc_exists(Db, DDocId) ->
+ case couch_db:open_doc(Db, DDocId) of
+ {not_found, _Reason} ->
+ {ok, AuthDesign} = auth_design_doc(DDocId),
+ {ok, _Rev} = couch_db:update_doc(Db, AuthDesign, []);
+ _ ->
+ ok
+ end,
+ ok.
+
+auth_design_doc(DocId) ->
+ DocProps = [
+ {<<"_id">>, DocId},
+ {<<"language">>,<<"javascript">>},
+ {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION}
+ ],
+ {ok, couch_doc:from_json_obj({DocProps})}.
diff --git a/1.1.x/src/couchdb/couch_btree.erl b/1.1.x/src/couchdb/couch_btree.erl
new file mode 100644
index 00000000..f8c126f3
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_btree.erl
@@ -0,0 +1,679 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_btree).
+
+-export([open/2, open/3, query_modify/4, add/2, add_remove/3]).
+-export([fold/4, full_reduce/1, final_reduce/2, foldl/3, foldl/4]).
+-export([fold_reduce/4, lookup/2, get_state/1, set_options/2]).
+
+-define(CHUNK_THRESHOLD, 16#4ff).
+
+-record(btree,
+ {fd,
+ root,
+ extract_kv = fun({Key, Value}) -> {Key, Value} end,
+ assemble_kv = fun(Key, Value) -> {Key, Value} end,
+ less = fun(A, B) -> A < B end,
+ reduce = nil
+ }).
+
+extract(#btree{extract_kv=Extract}, Value) ->
+ Extract(Value).
+
+assemble(#btree{assemble_kv=Assemble}, Key, Value) ->
+ Assemble(Key, Value).
+
+less(#btree{less=Less}, A, B) ->
+ Less(A, B).
+
+% pass in 'nil' for State if a new Btree.
+open(State, Fd) ->
+ {ok, #btree{root=State, fd=Fd}}.
+
+set_options(Bt, []) ->
+ Bt;
+set_options(Bt, [{split, Extract}|Rest]) ->
+ set_options(Bt#btree{extract_kv=Extract}, Rest);
+set_options(Bt, [{join, Assemble}|Rest]) ->
+ set_options(Bt#btree{assemble_kv=Assemble}, Rest);
+set_options(Bt, [{less, Less}|Rest]) ->
+ set_options(Bt#btree{less=Less}, Rest);
+set_options(Bt, [{reduce, Reduce}|Rest]) ->
+ set_options(Bt#btree{reduce=Reduce}, Rest).
+
+open(State, Fd, Options) ->
+ {ok, set_options(#btree{root=State, fd=Fd}, Options)}.
+
+get_state(#btree{root=Root}) ->
+ Root.
+
+final_reduce(#btree{reduce=Reduce}, Val) ->
+ final_reduce(Reduce, Val);
+final_reduce(Reduce, {[], []}) ->
+ Reduce(reduce, []);
+final_reduce(_Bt, {[], [Red]}) ->
+ Red;
+final_reduce(Reduce, {[], Reductions}) ->
+ Reduce(rereduce, Reductions);
+final_reduce(Reduce, {KVs, Reductions}) ->
+ Red = Reduce(reduce, KVs),
+ final_reduce(Reduce, {[], [Red | Reductions]}).
+
+fold_reduce(#btree{root=Root}=Bt, Fun, Acc, Options) ->
+ Dir = couch_util:get_value(dir, Options, fwd),
+ StartKey = couch_util:get_value(start_key, Options),
+ EndKey = case couch_util:get_value(end_key_gt, Options) of
+ undefined -> couch_util:get_value(end_key, Options);
+ LastKey -> LastKey
+ end,
+ KeyGroupFun = couch_util:get_value(key_group_fun, Options, fun(_,_) -> true end),
+ {StartKey2, EndKey2} =
+ case Dir of
+ rev -> {EndKey, StartKey};
+ fwd -> {StartKey, EndKey}
+ end,
+ try
+ {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
+ reduce_stream_node(Bt, Dir, Root, StartKey2, EndKey2, undefined, [], [],
+ KeyGroupFun, Fun, Acc),
+ if GroupedKey2 == undefined ->
+ {ok, Acc2};
+ true ->
+ case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of
+ {ok, Acc3} -> {ok, Acc3};
+ {stop, Acc3} -> {ok, Acc3}
+ end
+ end
+ catch
+ throw:{stop, AccDone} -> {ok, AccDone}
+ end.
+
+full_reduce(#btree{root=nil,reduce=Reduce}) ->
+ {ok, Reduce(reduce, [])};
+full_reduce(#btree{root={_P, Red}}) ->
+ {ok, Red}.
+
+% wraps a 2 arity function with the proper 3 arity function
+convert_fun_arity(Fun) when is_function(Fun, 2) ->
+ fun(KV, _Reds, AccIn) -> Fun(KV, AccIn) end;
+convert_fun_arity(Fun) when is_function(Fun, 3) ->
+ Fun. % Already arity 3
+
+make_key_in_end_range_function(#btree{less=Less}, fwd, Options) ->
+ case couch_util:get_value(end_key_gt, Options) of
+ undefined ->
+ case couch_util:get_value(end_key, Options) of
+ undefined ->
+ fun(_Key) -> true end;
+ LastKey ->
+ fun(Key) -> not Less(LastKey, Key) end
+ end;
+ EndKey ->
+ fun(Key) -> Less(Key, EndKey) end
+ end;
+make_key_in_end_range_function(#btree{less=Less}, rev, Options) ->
+ case couch_util:get_value(end_key_gt, Options) of
+ undefined ->
+ case couch_util:get_value(end_key, Options) of
+ undefined ->
+ fun(_Key) -> true end;
+ LastKey ->
+ fun(Key) -> not Less(Key, LastKey) end
+ end;
+ EndKey ->
+ fun(Key) -> Less(EndKey, Key) end
+ end.
+
+
+foldl(Bt, Fun, Acc) ->
+ fold(Bt, Fun, Acc, []).
+
+foldl(Bt, Fun, Acc, Options) ->
+ fold(Bt, Fun, Acc, Options).
+
+
+fold(#btree{root=nil}, _Fun, Acc, _Options) ->
+ {ok, {[], []}, Acc};
+fold(#btree{root=Root}=Bt, Fun, Acc, Options) ->
+ Dir = couch_util:get_value(dir, Options, fwd),
+ InRange = make_key_in_end_range_function(Bt, Dir, Options),
+ Result =
+ case couch_util:get_value(start_key, Options) of
+ undefined ->
+ stream_node(Bt, [], Bt#btree.root, InRange, Dir,
+ convert_fun_arity(Fun), Acc);
+ StartKey ->
+ stream_node(Bt, [], Bt#btree.root, StartKey, InRange, Dir,
+ convert_fun_arity(Fun), Acc)
+ end,
+ case Result of
+ {ok, Acc2}->
+ {_P, FullReduction} = Root,
+ {ok, {[], [FullReduction]}, Acc2};
+ {stop, LastReduction, Acc2} ->
+ {ok, LastReduction, Acc2}
+ end.
+
+add(Bt, InsertKeyValues) ->
+ add_remove(Bt, InsertKeyValues, []).
+
+add_remove(Bt, InsertKeyValues, RemoveKeys) ->
+ {ok, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys),
+ {ok, Bt2}.
+
+query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
+ #btree{root=Root} = Bt,
+ InsertActions = lists:map(
+ fun(KeyValue) ->
+ {Key, Value} = extract(Bt, KeyValue),
+ {insert, Key, Value}
+ end, InsertValues),
+ RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
+ FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
+ SortFun =
+ fun({OpA, A, _}, {OpB, B, _}) ->
+ case A == B of
+ % A and B are equal, sort by op.
+ true -> op_order(OpA) < op_order(OpB);
+ false ->
+ less(Bt, A, B)
+ end
+ end,
+ Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
+ {ok, KeyPointers, QueryResults, Bt2} = modify_node(Bt, Root, Actions, []),
+ {ok, NewRoot, Bt3} = complete_root(Bt2, KeyPointers),
+ {ok, QueryResults, Bt3#btree{root=NewRoot}}.
+
+% for ordering different operations with the same key.
+% fetch < remove < insert
+op_order(fetch) -> 1;
+op_order(remove) -> 2;
+op_order(insert) -> 3.
+
+lookup(#btree{root=Root, less=Less}=Bt, Keys) ->
+ SortedKeys = lists:sort(Less, Keys),
+ {ok, SortedResults} = lookup(Bt, Root, SortedKeys),
+ % We want to return the results in the same order as the keys were input
+ % but we may have changed the order when we sorted. So we need to put the
+ % order back into the results.
+ couch_util:reorder_results(Keys, SortedResults).
+
+lookup(_Bt, nil, Keys) ->
+ {ok, [{Key, not_found} || Key <- Keys]};
+lookup(Bt, {Pointer, _Reds}, Keys) ->
+ {NodeType, NodeList} = get_node(Bt, Pointer),
+ case NodeType of
+ kp_node ->
+ lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []);
+ kv_node ->
+ lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, [])
+ end.
+
+lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
+ {ok, lists:reverse(Output)};
+lookup_kpnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
+ {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
+lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Output) ->
+ N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), FirstLookupKey),
+ {Key, PointerInfo} = element(N, NodeTuple),
+ SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
+ case lists:splitwith(SplitFun, LookupKeys) of
+ {[], GreaterQueries} ->
+ lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output);
+ {LessEqQueries, GreaterQueries} ->
+ {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
+ lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output))
+ end.
+
+
+lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
+ {ok, lists:reverse(Output)};
+lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
+ % keys not found
+ {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
+lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) ->
+ N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey),
+ {Key, Value} = element(N, NodeTuple),
+ case less(Bt, LookupKey, Key) of
+ true ->
+ % LookupKey is less than Key
+ lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]);
+ false ->
+ case less(Bt, Key, LookupKey) of
+ true ->
+ % LookupKey is greater than Key
+ lookup_kvnode(Bt, NodeTuple, N+1, RestLookupKeys, [{LookupKey, not_found} | Output]);
+ false ->
+ % LookupKey is equal to Key
+ lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output])
+ end
+ end.
+
+
+complete_root(Bt, []) ->
+ {ok, nil, Bt};
+complete_root(Bt, [{_Key, PointerInfo}])->
+ {ok, PointerInfo, Bt};
+complete_root(Bt, KPs) ->
+ {ok, ResultKeyPointers, Bt2} = write_node(Bt, kp_node, KPs),
+ complete_root(Bt2, ResultKeyPointers).
+
+%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
+% It is inaccurate as it does not account for compression when blocks are
+% written. Plus with the "case byte_size(term_to_binary(InList)) of" code
+% it's probably really inefficient.
+
+chunkify(InList) ->
+ case byte_size(term_to_binary(InList)) of
+ Size when Size > ?CHUNK_THRESHOLD ->
+ NumberOfChunksLikely = ((Size div ?CHUNK_THRESHOLD) + 1),
+ ChunkThreshold = Size div NumberOfChunksLikely,
+ chunkify(InList, ChunkThreshold, [], 0, []);
+ _Else ->
+ [InList]
+ end.
+
+chunkify([], _ChunkThreshold, [], 0, OutputChunks) ->
+ lists:reverse(OutputChunks);
+chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
+ lists:reverse([lists:reverse(OutList) | OutputChunks]);
+chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
+ case byte_size(term_to_binary(InElement)) of
+ Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] ->
+ chunkify(RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]);
+ Size ->
+ chunkify(RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks)
+ end.
+
+modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
+ case RootPointerInfo of
+ nil ->
+ NodeType = kv_node,
+ NodeList = [];
+ {Pointer, _Reds} ->
+ {NodeType, NodeList} = get_node(Bt, Pointer)
+ end,
+ NodeTuple = list_to_tuple(NodeList),
+
+ {ok, NewNodeList, QueryOutput2, Bt2} =
+ case NodeType of
+ kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
+ kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput)
+ end,
+ case NewNodeList of
+ [] -> % no nodes remain
+ {ok, [], QueryOutput2, Bt2};
+ NodeList -> % nothing changed
+ {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
+ {ok, [{LastKey, RootPointerInfo}], QueryOutput2, Bt2};
+ _Else2 ->
+ {ok, ResultList, Bt3} = write_node(Bt2, NodeType, NewNodeList),
+ {ok, ResultList, QueryOutput2, Bt3}
+ end.
+
+reduce_node(#btree{reduce=nil}, _NodeType, _NodeList) ->
+ [];
+reduce_node(#btree{reduce=R}, kp_node, NodeList) ->
+ R(rereduce, [Red || {_K, {_P, Red}} <- NodeList]);
+reduce_node(#btree{reduce=R}=Bt, kv_node, NodeList) ->
+ R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]).
+
+
+get_node(#btree{fd = Fd}, NodePos) ->
+ {ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos),
+ {NodeType, NodeList}.
+
+write_node(Bt, NodeType, NodeList) ->
+ % split up nodes into smaller sizes
+ NodeListList = chunkify(NodeList),
+ % now write out each chunk and return the KeyPointer pairs for those nodes
+ ResultList = [
+ begin
+ {ok, Pointer} = couch_file:append_term(Bt#btree.fd, {NodeType, ANodeList}),
+ {LastKey, _} = lists:last(ANodeList),
+ {LastKey, {Pointer, reduce_node(Bt, NodeType, ANodeList)}}
+ end
+ ||
+ ANodeList <- NodeListList
+ ],
+ {ok, ResultList, Bt}.
+
+modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
+ modify_node(Bt, nil, Actions, QueryOutput);
+modify_kpnode(Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
+ {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
+ tuple_size(NodeTuple), [])), QueryOutput, Bt};
+modify_kpnode(Bt, NodeTuple, LowerBound,
+ [{_, FirstActionKey, _}|_]=Actions, ResultNode, QueryOutput) ->
+ Sz = tuple_size(NodeTuple),
+ N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey),
+ case N =:= Sz of
+ true ->
+ % perform remaining actions on last node
+ {_, PointerInfo} = element(Sz, NodeTuple),
+ {ok, ChildKPs, QueryOutput2, Bt2} =
+ modify_node(Bt, PointerInfo, Actions, QueryOutput),
+ NodeList = lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
+ Sz - 1, ChildKPs)),
+ {ok, NodeList, QueryOutput2, Bt2};
+ false ->
+ {NodeKey, PointerInfo} = element(N, NodeTuple),
+ SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
+ not less(Bt, NodeKey, ActionKey)
+ end,
+ {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions),
+ {ok, ChildKPs, QueryOutput2, Bt2} =
+ modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
+ ResultNode2 = lists:reverse(ChildKPs, bounded_tuple_to_revlist(NodeTuple,
+ LowerBound, N - 1, ResultNode)),
+ modify_kpnode(Bt2, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2)
+ end.
+
+bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
+ Tail;
+bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
+ bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]).
+
+bounded_tuple_to_list(Tuple, Start, End, Tail) ->
+ bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
+
+bounded_tuple_to_list2(_Tuple, Start, End, Acc, Tail) when Start > End ->
+ lists:reverse(Acc, Tail);
+bounded_tuple_to_list2(Tuple, Start, End, Acc, Tail) ->
+ bounded_tuple_to_list2(Tuple, Start + 1, End, [element(Start, Tuple) | Acc], Tail).
+
+find_first_gteq(_Bt, _Tuple, Start, End, _Key) when Start == End ->
+ End;
+find_first_gteq(Bt, Tuple, Start, End, Key) ->
+ Mid = Start + ((End - Start) div 2),
+ {TupleKey, _} = element(Mid, Tuple),
+ case less(Bt, TupleKey, Key) of
+ true ->
+ find_first_gteq(Bt, Tuple, Mid+1, End, Key);
+ false ->
+ find_first_gteq(Bt, Tuple, Start, Mid, Key)
+ end.
+
+modify_kvnode(Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
+ {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])), QueryOutput, Bt};
+modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) when LowerBound > tuple_size(NodeTuple) ->
+ case ActionType of
+ insert ->
+ modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
+ remove ->
+ % just drop the action
+ modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput);
+ fetch ->
+ % the key/value must not exist in the tree
+ modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
+ end;
+modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], AccNode, QueryOutput) ->
+ N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey),
+ {Key, Value} = element(N, NodeTuple),
+ ResultNode = bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode),
+ case less(Bt, ActionKey, Key) of
+ true ->
+ case ActionType of
+ insert ->
+ % ActionKey is less than the Key, so insert
+ modify_kvnode(Bt, NodeTuple, N, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
+ remove ->
+ % ActionKey is less than the Key, just drop the action
+ modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput);
+ fetch ->
+ % ActionKey is less than the Key, the key/value must not exist in the tree
+ modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
+ end;
+ false ->
+ % ActionKey and Key are maybe equal.
+ case less(Bt, Key, ActionKey) of
+ false ->
+ case ActionType of
+ insert ->
+ modify_kvnode(Bt, NodeTuple, N+1, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
+ remove ->
+ modify_kvnode(Bt, NodeTuple, N+1, RestActions, ResultNode, QueryOutput);
+ fetch ->
+ % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
+ % since an identical action key can follow it.
+ modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput])
+ end;
+ true ->
+ modify_kvnode(Bt, NodeTuple, N + 1, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput)
+ end
+ end.
+
+
+reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _KeyEnd, GroupedKey, GroupedKVsAcc,
+ GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) ->
+ {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
+reduce_stream_node(Bt, Dir, {P, _R}, KeyStart, KeyEnd, GroupedKey, GroupedKVsAcc,
+ GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
+ case get_node(Bt, P) of
+ {kp_node, NodeList} ->
+ reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, KeyEnd, GroupedKey,
+ GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc);
+ {kv_node, KVs} ->
+ reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, KeyEnd, GroupedKey,
+ GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc)
+ end.
+
+reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, KeyEnd,
+ GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
+ KeyGroupFun, Fun, Acc) ->
+
+ GTEKeyStartKVs =
+ case KeyStart of
+ undefined ->
+ KVs;
+ _ ->
+ lists:dropwhile(fun({Key,_}) -> less(Bt, Key, KeyStart) end, KVs)
+ end,
+ KVs2 =
+ case KeyEnd of
+ undefined ->
+ GTEKeyStartKVs;
+ _ ->
+ lists:takewhile(
+ fun({Key,_}) ->
+ not less(Bt, KeyEnd, Key)
+ end, GTEKeyStartKVs)
+ end,
+ reduce_stream_kv_node2(Bt, adjust_dir(Dir, KVs2), GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
+ KeyGroupFun, Fun, Acc).
+
+
+reduce_stream_kv_node2(_Bt, [], GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
+ _KeyGroupFun, _Fun, Acc) ->
+ {ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
+reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc,
+ GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
+ case GroupedKey of
+ undefined ->
+ reduce_stream_kv_node2(Bt, RestKVs, Key,
+ [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc);
+ _ ->
+
+ case KeyGroupFun(GroupedKey, Key) of
+ true ->
+ reduce_stream_kv_node2(Bt, RestKVs, GroupedKey,
+ [assemble(Bt,Key,Value)|GroupedKVsAcc], GroupedRedsAcc, KeyGroupFun,
+ Fun, Acc);
+ false ->
+ case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of
+ {ok, Acc2} ->
+ reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)],
+ [], KeyGroupFun, Fun, Acc2);
+ {stop, Acc2} ->
+ throw({stop, Acc2})
+ end
+ end
+ end.
+
+reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, KeyEnd,
+ GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
+ KeyGroupFun, Fun, Acc) ->
+ Nodes =
+ case KeyStart of
+ undefined ->
+ NodeList;
+ _ ->
+ lists:dropwhile(
+ fun({Key,_}) ->
+ less(Bt, Key, KeyStart)
+ end, NodeList)
+ end,
+ NodesInRange =
+ case KeyEnd of
+ undefined ->
+ Nodes;
+ _ ->
+ {InRange, MaybeInRange} = lists:splitwith(
+ fun({Key,_}) ->
+ less(Bt, Key, KeyEnd)
+ end, Nodes),
+ InRange ++ case MaybeInRange of [] -> []; [FirstMaybe|_] -> [FirstMaybe] end
+ end,
+ reduce_stream_kp_node2(Bt, Dir, adjust_dir(Dir, NodesInRange), KeyStart, KeyEnd,
+ GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc).
+
+
+reduce_stream_kp_node2(Bt, Dir, [{_Key, NodeInfo} | RestNodeList], KeyStart, KeyEnd,
+ undefined, [], [], KeyGroupFun, Fun, Acc) ->
+ {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
+ reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, KeyEnd, undefined,
+ [], [], KeyGroupFun, Fun, Acc),
+ reduce_stream_kp_node2(Bt, Dir, RestNodeList, KeyStart, KeyEnd, GroupedKey2,
+ GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
+reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, KeyEnd,
+ GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
+ {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) ->
+ KeyGroupFun(GroupedKey, Key) end, NodeList),
+ {GroupedNodes, UngroupedNodes} =
+ case Grouped0 of
+ [] ->
+ {Grouped0, Ungrouped0};
+ _ ->
+ [FirstGrouped | RestGrouped] = lists:reverse(Grouped0),
+ {RestGrouped, [FirstGrouped | Ungrouped0]}
+ end,
+ GroupedReds = [R || {_, {_,R}} <- GroupedNodes],
+ case UngroupedNodes of
+ [{_Key, NodeInfo}|RestNodes] ->
+ {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
+ reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, KeyEnd, GroupedKey,
+ GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc),
+ reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, KeyEnd, GroupedKey2,
+ GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
+ [] ->
+ {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey}
+ end.
+
+adjust_dir(fwd, List) ->
+ List;
+adjust_dir(rev, List) ->
+ lists:reverse(List).
+
+stream_node(Bt, Reds, {Pointer, _Reds}, StartKey, InRange, Dir, Fun, Acc) ->
+ {NodeType, NodeList} = get_node(Bt, Pointer),
+ case NodeType of
+ kp_node ->
+ stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc);
+ kv_node ->
+ stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc)
+ end.
+
+stream_node(Bt, Reds, {Pointer, _Reds}, InRange, Dir, Fun, Acc) ->
+ {NodeType, NodeList} = get_node(Bt, Pointer),
+ case NodeType of
+ kp_node ->
+ stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc);
+ kv_node ->
+ stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc)
+ end.
+
+stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) ->
+ {ok, Acc};
+stream_kp_node(Bt, Reds, [{_Key, {Pointer, Red}} | Rest], InRange, Dir, Fun, Acc) ->
+ case stream_node(Bt, Reds, {Pointer, Red}, InRange, Dir, Fun, Acc) of
+ {ok, Acc2} ->
+ stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2);
+ {stop, LastReds, Acc2} ->
+ {stop, LastReds, Acc2}
+ end.
+
+drop_nodes(_Bt, Reds, _StartKey, []) ->
+ {Reds, []};
+drop_nodes(Bt, Reds, StartKey, [{NodeKey, {Pointer, Red}} | RestKPs]) ->
+ case less(Bt, NodeKey, StartKey) of
+ true -> drop_nodes(Bt, [Red | Reds], StartKey, RestKPs);
+ false -> {Reds, [{NodeKey, {Pointer, Red}} | RestKPs]}
+ end.
+
+stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) ->
+ {NewReds, NodesToStream} =
+ case Dir of
+ fwd ->
+ % drop all nodes sorting before the key
+ drop_nodes(Bt, Reds, StartKey, KPs);
+ rev ->
+ % keep all nodes sorting before the key, AND the first node to sort after
+ RevKPs = lists:reverse(KPs),
+ case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
+ {_RevsBefore, []} ->
+ % everything sorts before it
+ {Reds, KPs};
+ {RevBefore, [FirstAfter | Drop]} ->
+ {[Red || {_K,{_P,Red}} <- Drop] ++ Reds,
+ [FirstAfter | lists:reverse(RevBefore)]}
+ end
+ end,
+ case NodesToStream of
+ [] ->
+ {ok, Acc};
+ [{_Key, {Pointer, Red}} | Rest] ->
+ case stream_node(Bt, NewReds, {Pointer, Red}, StartKey, InRange, Dir, Fun, Acc) of
+ {ok, Acc2} ->
+ stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2);
+ {stop, LastReds, Acc2} ->
+ {stop, LastReds, Acc2}
+ end
+ end.
+
+stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) ->
+ DropFun =
+ case Dir of
+ fwd ->
+ fun({Key, _}) -> less(Bt, Key, StartKey) end;
+ rev ->
+ fun({Key, _}) -> less(Bt, StartKey, Key) end
+ end,
+ {LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs),
+ AssembleLTKVs = [assemble(Bt,K,V) || {K,V} <- LTKVs],
+ stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc).
+
+stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) ->
+ {ok, Acc};
+stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) ->
+ case InRange(K) of
+ false ->
+ {stop, {PrevKVs, Reds}, Acc};
+ true ->
+ AssembledKV = assemble(Bt, K, V),
+ case Fun(AssembledKV, {PrevKVs, Reds}, Acc) of
+ {ok, Acc2} ->
+ stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2);
+ {stop, Acc2} ->
+ {stop, {PrevKVs, Reds}, Acc2}
+ end
+ end.
diff --git a/1.1.x/src/couchdb/couch_changes.erl b/1.1.x/src/couchdb/couch_changes.erl
new file mode 100644
index 00000000..6eb6f7e1
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_changes.erl
@@ -0,0 +1,339 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_changes).
+-include("couch_db.hrl").
+
+-export([handle_changes/3]).
+
+%% @type Req -> #httpd{} | {json_req, JsonObj()}
+handle_changes(#changes_args{style=Style}=Args1, Req, Db) ->
+ #changes_args{feed = Feed} = Args = Args1#changes_args{
+ filter = make_filter_fun(Args1#changes_args.filter, Style, Req, Db)
+ },
+ StartSeq = case Args#changes_args.dir of
+ rev ->
+ couch_db:get_update_seq(Db);
+ fwd ->
+ Args#changes_args.since
+ end,
+ if Feed == "continuous" orelse Feed == "longpoll" ->
+ fun(CallbackAcc) ->
+ {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+ Self = self(),
+ {ok, Notify} = couch_db_update_notifier:start_link(
+ fun({_, DbName}) when DbName == Db#db.name ->
+ Self ! db_updated;
+ (_) ->
+ ok
+ end
+ ),
+ UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
+ {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+ try
+ keep_sending_changes(
+ Args,
+ Callback,
+ UserAcc2,
+ Db,
+ StartSeq,
+ <<"">>,
+ Timeout,
+ TimeoutFun
+ )
+ after
+ couch_db_update_notifier:stop(Notify),
+ get_rest_db_updated(ok) % clean out any remaining update messages
+ end
+ end;
+ true ->
+ fun(CallbackAcc) ->
+ {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+ UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
+ {ok, {_, LastSeq, _Prepend, _, _, UserAcc3, _, _, _, _}} =
+ send_changes(
+ Args#changes_args{feed="normal"},
+ Callback,
+ UserAcc2,
+ Db,
+ StartSeq,
+ <<>>
+ ),
+ end_sending_changes(Callback, UserAcc3, LastSeq, Feed)
+ end
+ end.
+
+get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 3) ->
+ Pair;
+get_callback_acc(Callback) when is_function(Callback, 2) ->
+ {fun(Ev, Data, _) -> Callback(Ev, Data) end, ok}.
+
+%% @type Req -> #httpd{} | {json_req, JsonObj()}
+make_filter_fun([$_ | _] = FilterName, Style, Req, Db) ->
+ builtin_filter_fun(FilterName, Style, Req, Db);
+make_filter_fun(FilterName, Style, Req, Db) ->
+ os_filter_fun(FilterName, Style, Req, Db).
+
+os_filter_fun(FilterName, Style, Req, Db) ->
+ case [list_to_binary(couch_httpd:unquote(Part))
+ || Part <- string:tokens(FilterName, "/")] of
+ [] ->
+ fun(_Db2, #doc_info{revs=Revs}) ->
+ builtin_results(Style, Revs)
+ end;
+ [DName, FName] ->
+ DesignId = <<"_design/", DName/binary>>,
+ DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, []),
+ % validate that the ddoc has the filter fun
+ #doc{body={Props}} = DDoc,
+ couch_util:get_nested_json_value({Props}, [<<"filters">>, FName]),
+ fun(Db2, DocInfo) ->
+ DocInfos =
+ case Style of
+ main_only ->
+ [DocInfo];
+ all_docs ->
+ [DocInfo#doc_info{revs=[Rev]}|| Rev <- DocInfo#doc_info.revs]
+ end,
+ Docs = [Doc || {ok, Doc} <- [
+ couch_db:open_doc(Db2, DocInfo2, [deleted, conflicts])
+ || DocInfo2 <- DocInfos]],
+ {ok, Passes} = couch_query_servers:filter_docs(
+ Req, Db2, DDoc, FName, Docs
+ ),
+ [{[{<<"rev">>, couch_doc:rev_to_str({RevPos,RevId})}]}
+ || {Pass, #doc{revs={RevPos,[RevId|_]}}}
+ <- lists:zip(Passes, Docs), Pass == true]
+ end;
+ _Else ->
+ throw({bad_request,
+ "filter parameter must be of the form `designname/filtername`"})
+ end.
+
+builtin_filter_fun("_doc_ids", Style, {json_req, {Props}}, _Db) ->
+ filter_docids(couch_util:get_value(<<"doc_ids">>, Props), Style);
+builtin_filter_fun("_doc_ids", Style, #httpd{method='POST'}=Req, _Db) ->
+ {Props} = couch_httpd:json_body_obj(Req),
+ DocIds = couch_util:get_value(<<"doc_ids">>, Props, nil),
+ filter_docids(DocIds, Style);
+builtin_filter_fun("_doc_ids", Style, #httpd{method='GET'}=Req, _Db) ->
+ DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
+ filter_docids(DocIds, Style);
+builtin_filter_fun("_design", Style, _Req, _Db) ->
+ filter_designdoc(Style);
+builtin_filter_fun(_FilterName, _Style, _Req, _Db) ->
+ throw({bad_request, "unknown builtin filter name"}).
+
+filter_docids(DocIds, Style) when is_list(DocIds)->
+ fun(_Db, #doc_info{id=DocId, revs=Revs}) ->
+ case lists:member(DocId, DocIds) of
+ true ->
+ builtin_results(Style, Revs);
+ _ -> []
+ end
+ end;
+filter_docids(_, _) ->
+ throw({bad_request, "`doc_ids` filter parameter is not a list."}).
+
+filter_designdoc(Style) ->
+ fun(_Db, #doc_info{id=DocId, revs=Revs}) ->
+ case DocId of
+ <<"_design", _/binary>> ->
+ builtin_results(Style, Revs);
+ _ -> []
+ end
+ end.
+
+builtin_results(Style, [#rev_info{rev=Rev}|_]=Revs) ->
+ case Style of
+ main_only ->
+ [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}];
+ all_docs ->
+ [{[{<<"rev">>, couch_doc:rev_to_str(R)}]}
+ || #rev_info{rev=R} <- Revs]
+ end.
+
+get_changes_timeout(Args, Callback) ->
+ #changes_args{
+ heartbeat = Heartbeat,
+ timeout = Timeout,
+ feed = ResponseType
+ } = Args,
+ DefaultTimeout = list_to_integer(
+ couch_config:get("httpd", "changes_timeout", "60000")
+ ),
+ case Heartbeat of
+ undefined ->
+ case Timeout of
+ undefined ->
+ {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
+ infinity ->
+ {infinity, fun(UserAcc) -> {stop, UserAcc} end};
+ _ ->
+ {lists:min([DefaultTimeout, Timeout]),
+ fun(UserAcc) -> {stop, UserAcc} end}
+ end;
+ true ->
+ {DefaultTimeout,
+ fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
+ _ ->
+ {lists:min([DefaultTimeout, Heartbeat]),
+ fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}
+ end.
+
+start_sending_changes(_Callback, UserAcc, "continuous") ->
+ UserAcc;
+start_sending_changes(Callback, UserAcc, ResponseType) ->
+ Callback(start, ResponseType, UserAcc).
+
+send_changes(Args, Callback, UserAcc, Db, StartSeq, Prepend) ->
+ #changes_args{
+ style = Style,
+ include_docs = IncludeDocs,
+ conflicts = Conflicts,
+ limit = Limit,
+ feed = ResponseType,
+ dir = Dir,
+ filter = FilterFun
+ } = Args,
+ couch_db:changes_since(
+ Db,
+ Style,
+ StartSeq,
+ fun changes_enumerator/2,
+ [{dir, Dir}],
+ {Db, StartSeq, Prepend, FilterFun, Callback, UserAcc, ResponseType,
+ Limit, IncludeDocs, Conflicts}
+ ).
+
+keep_sending_changes(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout,
+ TimeoutFun) ->
+ #changes_args{
+ feed = ResponseType,
+ limit = Limit,
+ db_open_options = DbOptions
+ } = Args,
+ % ?LOG_INFO("send_changes start ~p",[StartSeq]),
+ {ok, {_, EndSeq, Prepend2, _, _, UserAcc2, _, NewLimit, _, _}} = send_changes(
+ Args#changes_args{dir=fwd}, Callback, UserAcc, Db, StartSeq, Prepend
+ ),
+ % ?LOG_INFO("send_changes last ~p",[EndSeq]),
+ couch_db:close(Db),
+ if Limit > NewLimit, ResponseType == "longpoll" ->
+ end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType);
+ true ->
+ case wait_db_updated(Timeout, TimeoutFun, UserAcc2) of
+ {updated, UserAcc3} ->
+ % ?LOG_INFO("wait_db_updated updated ~p",[{Db#db.name, EndSeq}]),
+ DbOptions1 = [{user_ctx, Db#db.user_ctx} | DbOptions],
+ case couch_db:open(Db#db.name, DbOptions1) of
+ {ok, Db2} ->
+ keep_sending_changes(
+ Args#changes_args{limit=NewLimit},
+ Callback,
+ UserAcc3,
+ Db2,
+ EndSeq,
+ Prepend2,
+ Timeout,
+ TimeoutFun
+ );
+ _Else ->
+ end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType)
+ end;
+ {stop, UserAcc3} ->
+ % ?LOG_INFO("wait_db_updated stop ~p",[{Db#db.name, EndSeq}]),
+ end_sending_changes(Callback, UserAcc3, EndSeq, ResponseType)
+ end
+ end.
+
+end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->
+ Callback({stop, EndSeq}, ResponseType, UserAcc).
+
+changes_enumerator(DocInfo, {Db, _, _, FilterFun, Callback, UserAcc,
+ "continuous", Limit, IncludeDocs, Conflicts}) ->
+
+ #doc_info{high_seq = Seq} = DocInfo,
+ Results0 = FilterFun(Db, DocInfo),
+ Results = [Result || Result <- Results0, Result /= null],
+ Go = if Limit =< 1 -> stop; true -> ok end,
+ case Results of
+ [] ->
+ {Go, {Db, Seq, nil, FilterFun, Callback, UserAcc, "continuous", Limit,
+ IncludeDocs, Conflicts}
+ };
+ _ ->
+ ChangesRow = changes_row(Db, Results, DocInfo, IncludeDocs, Conflicts),
+ UserAcc2 = Callback({change, ChangesRow, <<>>}, "continuous", UserAcc),
+ {Go, {Db, Seq, nil, FilterFun, Callback, UserAcc2, "continuous",
+ Limit - 1, IncludeDocs, Conflicts}
+ }
+ end;
+changes_enumerator(DocInfo, {Db, _, Prepend, FilterFun, Callback, UserAcc,
+ ResponseType, Limit, IncludeDocs, Conflicts}) ->
+
+ #doc_info{high_seq = Seq} = DocInfo,
+ Results0 = FilterFun(Db, DocInfo),
+ Results = [Result || Result <- Results0, Result /= null],
+ Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
+ case Results of
+ [] ->
+ {Go, {Db, Seq, Prepend, FilterFun, Callback, UserAcc, ResponseType,
+ Limit, IncludeDocs, Conflicts}
+ };
+ _ ->
+ ChangesRow = changes_row(Db, Results, DocInfo, IncludeDocs, Conflicts),
+ UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
+ {Go, {Db, Seq, <<",\n">>, FilterFun, Callback, UserAcc2, ResponseType,
+ Limit - 1, IncludeDocs, Conflicts}
+ }
+ end.
+
+
+changes_row(Db, Results, DocInfo, IncludeDoc, Conflicts) ->
+ #doc_info{
+ id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]
+ } = DocInfo,
+ {[{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++
+ deleted_item(Del) ++ case IncludeDoc of
+ true ->
+ Options = if Conflicts -> [conflicts]; true -> [] end,
+ couch_httpd_view:doc_member(Db, DocInfo, Options);
+ false ->
+ []
+ end}.
+
+deleted_item(true) -> [{<<"deleted">>, true}];
+deleted_item(_) -> [].
+
+% waits for a db_updated msg, if there are multiple msgs, collects them.
+wait_db_updated(Timeout, TimeoutFun, UserAcc) ->
+ receive
+ db_updated ->
+ get_rest_db_updated(UserAcc)
+ after Timeout ->
+ {Go, UserAcc2} = TimeoutFun(UserAcc),
+ case Go of
+ ok ->
+ wait_db_updated(Timeout, TimeoutFun, UserAcc2);
+ stop ->
+ {stop, UserAcc2}
+ end
+ end.
+
+get_rest_db_updated(UserAcc) ->
+ receive
+ db_updated ->
+ get_rest_db_updated(UserAcc)
+ after 0 ->
+ {updated, UserAcc}
+ end.
diff --git a/1.1.x/src/couchdb/couch_config.erl b/1.1.x/src/couchdb/couch_config.erl
new file mode 100644
index 00000000..792ff5a0
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_config.erl
@@ -0,0 +1,254 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Reads CouchDB's ini file and gets queried for configuration parameters.
+% This module is initialized with a list of ini files that it consecutively
+% reads Key/Value pairs from and saves them in an ets table. If more an one
+% ini file is specified, the last one is used to write changes that are made
+% with store/2 back to that ini file.
+
+-module(couch_config).
+-behaviour(gen_server).
+
+-include("couch_db.hrl").
+
+
+-export([start_link/1, stop/0]).
+-export([all/0, get/1, get/2, get/3, set/3, set/4, delete/2, delete/3]).
+-export([register/1, register/2]).
+-export([parse_ini_file/1]).
+
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+-record(config, {
+ notify_funs=[],
+ write_filename=undefined
+}).
+
+
+start_link(IniFiles) ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, IniFiles, []).
+
+stop() ->
+ gen_server:cast(?MODULE, stop).
+
+
+all() ->
+ lists:sort(gen_server:call(?MODULE, all, infinity)).
+
+
+get(Section) when is_binary(Section) ->
+ ?MODULE:get(?b2l(Section));
+get(Section) ->
+ Matches = ets:match(?MODULE, {{Section, '$1'}, '$2'}),
+ [{Key, Value} || [Key, Value] <- Matches].
+
+get(Section, Key) ->
+ ?MODULE:get(Section, Key, undefined).
+
+get(Section, Key, Default) when is_binary(Section) and is_binary(Key) ->
+ ?MODULE:get(?b2l(Section), ?b2l(Key), Default);
+get(Section, Key, Default) ->
+ case ets:lookup(?MODULE, {Section, Key}) of
+ [] -> Default;
+ [{_, Match}] -> Match
+ end.
+
+set(Section, Key, Value) ->
+ ?MODULE:set(Section, Key, Value, true).
+
+set(Section, Key, Value, Persist) when is_binary(Section) and is_binary(Key) ->
+ ?MODULE:set(?b2l(Section), ?b2l(Key), Value, Persist);
+set(Section, Key, Value, Persist) ->
+ gen_server:call(?MODULE, {set, Section, Key, Value, Persist}).
+
+
+delete(Section, Key) when is_binary(Section) and is_binary(Key) ->
+ delete(?b2l(Section), ?b2l(Key));
+delete(Section, Key) ->
+ delete(Section, Key, true).
+
+delete(Section, Key, Persist) when is_binary(Section) and is_binary(Key) ->
+ delete(?b2l(Section), ?b2l(Key), Persist);
+delete(Section, Key, Persist) ->
+ gen_server:call(?MODULE, {delete, Section, Key, Persist}).
+
+
+register(Fun) ->
+ ?MODULE:register(Fun, self()).
+
+register(Fun, Pid) ->
+ gen_server:call(?MODULE, {register, Fun, Pid}).
+
+
+init(IniFiles) ->
+ ets:new(?MODULE, [named_table, set, protected]),
+ try
+ lists:map(fun(IniFile) ->
+ {ok, ParsedIniValues} = parse_ini_file(IniFile),
+ ets:insert(?MODULE, ParsedIniValues)
+ end, IniFiles),
+ WriteFile = case IniFiles of
+ [_|_] -> lists:last(IniFiles);
+ _ -> undefined
+ end,
+ {ok, #config{write_filename = WriteFile}}
+ catch _Tag:Error ->
+ {stop, Error}
+ end.
+
+
+terminate(_Reason, _State) ->
+ ok.
+
+
+handle_call(all, _From, Config) ->
+ Resp = lists:sort((ets:tab2list(?MODULE))),
+ {reply, Resp, Config};
+handle_call({set, Sec, Key, Val, Persist}, From, Config) ->
+ Result = case {Persist, Config#config.write_filename} of
+ {true, undefined} ->
+ ok;
+ {true, FileName} ->
+ couch_config_writer:save_to_file({{Sec, Key}, Val}, FileName);
+ _ ->
+ ok
+ end,
+ case Result of
+ ok ->
+ true = ets:insert(?MODULE, {{Sec, Key}, Val}),
+ spawn_link(fun() ->
+ [catch F(Sec, Key, Val, Persist) || {_Pid, F} <- Config#config.notify_funs],
+ gen_server:reply(From, ok)
+ end),
+ {noreply, Config};
+ _Error ->
+ {reply, Result, Config}
+ end;
+handle_call({delete, Sec, Key, Persist}, From, Config) ->
+ true = ets:delete(?MODULE, {Sec,Key}),
+ case {Persist, Config#config.write_filename} of
+ {true, undefined} ->
+ ok;
+ {true, FileName} ->
+ couch_config_writer:save_to_file({{Sec, Key}, ""}, FileName);
+ _ ->
+ ok
+ end,
+ spawn_link(fun() ->
+ [catch F(Sec, Key, deleted, Persist) || {_Pid, F} <- Config#config.notify_funs],
+ gen_server:reply(From, ok)
+ end),
+ {noreply, Config};
+handle_call({register, Fun, Pid}, _From, #config{notify_funs=PidFuns}=Config) ->
+ erlang:monitor(process, Pid),
+ % convert 1 and 2 arity to 3 arity
+ Fun2 =
+ case Fun of
+ _ when is_function(Fun, 1) ->
+ fun(Section, _Key, _Value, _Persist) -> Fun(Section) end;
+ _ when is_function(Fun, 2) ->
+ fun(Section, Key, _Value, _Persist) -> Fun(Section, Key) end;
+ _ when is_function(Fun, 3) ->
+ fun(Section, Key, Value, _Persist) -> Fun(Section, Key, Value) end;
+ _ when is_function(Fun, 4) ->
+ Fun
+ end,
+ {reply, ok, Config#config{notify_funs=[{Pid, Fun2} | PidFuns]}}.
+
+
+handle_cast(stop, State) ->
+ {stop, normal, State};
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info({'DOWN', _, _, DownPid, _}, #config{notify_funs=PidFuns}=Config) ->
+ % remove any funs registered by the downed process
+ FilteredPidFuns = [{Pid,Fun} || {Pid,Fun} <- PidFuns, Pid /= DownPid],
+ {noreply, Config#config{notify_funs=FilteredPidFuns}}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+parse_ini_file(IniFile) ->
+ IniFilename = couch_util:abs_pathname(IniFile),
+ IniBin =
+ case file:read_file(IniFilename) of
+ {ok, IniBin0} ->
+ IniBin0;
+ {error, eacces} ->
+ throw({file_permission_error, IniFile});
+ {error, enoent} ->
+ Fmt = "Couldn't find server configuration file ~s.",
+ Msg = ?l2b(io_lib:format(Fmt, [IniFilename])),
+ ?LOG_ERROR("~s~n", [Msg]),
+ throw({startup_error, Msg})
+ end,
+
+ Lines = re:split(IniBin, "\r\n|\n|\r|\032", [{return, list}]),
+ {_, ParsedIniValues} =
+ lists:foldl(fun(Line, {AccSectionName, AccValues}) ->
+ case string:strip(Line) of
+ "[" ++ Rest ->
+ case re:split(Rest, "\\]", [{return, list}]) of
+ [NewSectionName, ""] ->
+ {NewSectionName, AccValues};
+ _Else -> % end bracket not at end, ignore this line
+ {AccSectionName, AccValues}
+ end;
+ ";" ++ _Comment ->
+ {AccSectionName, AccValues};
+ Line2 ->
+ case re:split(Line2, "\s?=\s?", [{return, list}]) of
+ [Value] ->
+ MultiLineValuePart = case re:run(Line, "^ \\S", []) of
+ {match, _} ->
+ true;
+ _ ->
+ false
+ end,
+ case {MultiLineValuePart, AccValues} of
+ {true, [{{_, ValueName}, PrevValue} | AccValuesRest]} ->
+ % remove comment
+ case re:split(Value, " ;|\t;", [{return, list}]) of
+ [[]] ->
+ % empty line
+ {AccSectionName, AccValues};
+ [LineValue | _Rest] ->
+ E = {{AccSectionName, ValueName},
+ PrevValue ++ " " ++ LineValue},
+ {AccSectionName, [E | AccValuesRest]}
+ end;
+ _ ->
+ {AccSectionName, AccValues}
+ end;
+ [""|_LineValues] -> % line begins with "=", ignore
+ {AccSectionName, AccValues};
+ [ValueName|LineValues] -> % yeehaw, got a line!
+ RemainingLine = couch_util:implode(LineValues, "="),
+ % removes comments
+ case re:split(RemainingLine, " ;|\t;", [{return, list}]) of
+ [[]] ->
+ % empty line means delete this key
+ ets:delete(?MODULE, {AccSectionName, ValueName}),
+ {AccSectionName, AccValues};
+ [LineValue | _Rest] ->
+ {AccSectionName,
+ [{{AccSectionName, ValueName}, LineValue} | AccValues]}
+ end
+ end
+ end
+ end, {"", []}, Lines),
+ {ok, ParsedIniValues}.
+
diff --git a/1.1.x/src/couchdb/couch_config_writer.erl b/1.1.x/src/couchdb/couch_config_writer.erl
new file mode 100644
index 00000000..decd269a
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_config_writer.erl
@@ -0,0 +1,86 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% @doc Saves a Key/Value pair to a ini file. The Key consists of a Section
+%% and Option combination. If that combination is found in the ini file
+%% the new value replaces the old value. If only the Section is found the
+%% Option and value combination is appended to the Section. If the Section
+%% does not yet exist in the ini file, it is added and the Option/Value
+%% pair is appended.
+%% @see couch_config
+
+-module(couch_config_writer).
+
+-export([save_to_file/2]).
+
+%% @spec save_to_file(
+%% Config::{{Section::string(), Option::string()}, Value::string()},
+%% File::filename()) -> ok
+%% @doc Saves a Section/Key/Value triple to the ini file File::filename()
+save_to_file({{Section, Key}, Value}, File) ->
+ {ok, OldFileContents} = file:read_file(File),
+ Lines = re:split(OldFileContents, "\r\n|\n|\r|\032", [{return, list}]),
+
+ SectionLine = "[" ++ Section ++ "]",
+ {ok, Pattern} = re:compile(["^(", Key, "\\s*=)|\\[[a-zA-Z0-9\_-]*\\]"]),
+
+ NewLines = process_file_lines(Lines, [], SectionLine, Pattern, Key, Value),
+ NewFileContents = reverse_and_add_newline(strip_empty_lines(NewLines), []),
+ case file:write_file(File, NewFileContents) of
+ ok ->
+ ok;
+ {error, eacces} ->
+ {file_permission_error, File};
+ Error ->
+ Error
+ end.
+
+
+process_file_lines([Section|Rest], SeenLines, Section, Pattern, Key, Value) ->
+ process_section_lines(Rest, [Section|SeenLines], Pattern, Key, Value);
+
+process_file_lines([Line|Rest], SeenLines, Section, Pattern, Key, Value) ->
+ process_file_lines(Rest, [Line|SeenLines], Section, Pattern, Key, Value);
+
+process_file_lines([], SeenLines, Section, _Pattern, Key, Value) ->
+ % Section wasn't found. Append it with the option here.
+ [Key ++ " = " ++ Value, Section, "" | strip_empty_lines(SeenLines)].
+
+
+process_section_lines([Line|Rest], SeenLines, Pattern, Key, Value) ->
+ case re:run(Line, Pattern, [{capture, all_but_first}]) of
+ nomatch -> % Found nothing interesting. Move on.
+ process_section_lines(Rest, [Line|SeenLines], Pattern, Key, Value);
+ {match, []} -> % Found another section. Append the option here.
+ lists:reverse(Rest) ++
+ [Line, "", Key ++ " = " ++ Value | strip_empty_lines(SeenLines)];
+ {match, _} -> % Found the option itself. Replace it.
+ lists:reverse(Rest) ++ [Key ++ " = " ++ Value | SeenLines]
+ end;
+
+process_section_lines([], SeenLines, _Pattern, Key, Value) ->
+ % Found end of file within the section. Append the option here.
+ [Key ++ " = " ++ Value | strip_empty_lines(SeenLines)].
+
+
+reverse_and_add_newline([Line|Rest], Content) ->
+ reverse_and_add_newline(Rest, [Line, "\n", Content]);
+
+reverse_and_add_newline([], Content) ->
+ Content.
+
+
+strip_empty_lines(["" | Rest]) ->
+ strip_empty_lines(Rest);
+
+strip_empty_lines(All) ->
+ All.
diff --git a/1.1.x/src/couchdb/couch_db.erl b/1.1.x/src/couchdb/couch_db.erl
new file mode 100644
index 00000000..1e7addaf
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_db.erl
@@ -0,0 +1,1210 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_db).
+-behaviour(gen_server).
+
+-export([open/2,open_int/2,close/1,create/2,start_compact/1,get_db_info/1,get_design_docs/1]).
+-export([open_ref_counted/2,is_idle/1,monitor/1,count_changes_since/2]).
+-export([update_doc/3,update_doc/4,update_docs/4,update_docs/2,update_docs/3,delete_doc/3]).
+-export([get_doc_info/2,open_doc/2,open_doc/3,open_doc_revs/4]).
+-export([set_revs_limit/2,get_revs_limit/1]).
+-export([get_missing_revs/2,name/1,doc_to_tree/1,get_update_seq/1,get_committed_update_seq/1]).
+-export([enum_docs/4,enum_docs_since/5]).
+-export([enum_docs_since_reduce_to_count/1,enum_docs_reduce_to_count/1]).
+-export([increment_update_seq/1,get_purge_seq/1,purge_docs/2,get_last_purged/1]).
+-export([start_link/3,open_doc_int/3,ensure_full_commit/1]).
+-export([set_security/2,get_security/1]).
+-export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
+-export([changes_since/5,changes_since/6,read_doc/2,new_revid/1]).
+-export([check_is_admin/1, check_is_reader/1]).
+-export([reopen/1]).
+
+-include("couch_db.hrl").
+
+
+start_link(DbName, Filepath, Options) ->
+ case open_db_file(Filepath, Options) of
+ {ok, Fd} ->
+ StartResult = gen_server:start_link(couch_db, {DbName, Filepath, Fd, Options}, []),
+ unlink(Fd),
+ StartResult;
+ Else ->
+ Else
+ end.
+
+open_db_file(Filepath, Options) ->
+ case couch_file:open(Filepath, Options) of
+ {ok, Fd} ->
+ {ok, Fd};
+ {error, enoent} ->
+ % couldn't find file. is there a compact version? This can happen if
+ % crashed during the file switch.
+ case couch_file:open(Filepath ++ ".compact") of
+ {ok, Fd} ->
+ ?LOG_INFO("Found ~s~s compaction file, using as primary storage.", [Filepath, ".compact"]),
+ ok = file:rename(Filepath ++ ".compact", Filepath),
+ ok = couch_file:sync(Fd),
+ {ok, Fd};
+ {error, enoent} ->
+ {not_found, no_db_file}
+ end;
+ Error ->
+ Error
+ end.
+
+
+create(DbName, Options) ->
+ couch_server:create(DbName, Options).
+
+% this is for opening a database for internal purposes like the replicator
+% or the view indexer. it never throws a reader error.
+open_int(DbName, Options) ->
+ couch_server:open(DbName, Options).
+
+% this should be called anytime an http request opens the database.
+% it ensures that the http userCtx is a valid reader
+open(DbName, Options) ->
+ case couch_server:open(DbName, Options) of
+ {ok, Db} ->
+ try
+ check_is_reader(Db),
+ {ok, Db}
+ catch
+ throw:Error ->
+ close(Db),
+ throw(Error)
+ end;
+ Else -> Else
+ end.
+
+reopen(#db{main_pid = Pid, fd_ref_counter = OldRefCntr, user_ctx = UserCtx}) ->
+ {ok, #db{fd_ref_counter = NewRefCntr} = NewDb} =
+ gen_server:call(Pid, get_db, infinity),
+ case NewRefCntr =:= OldRefCntr of
+ true ->
+ ok;
+ false ->
+ couch_ref_counter:add(NewRefCntr),
+ catch couch_ref_counter:drop(OldRefCntr)
+ end,
+ {ok, NewDb#db{user_ctx = UserCtx}}.
+
+ensure_full_commit(#db{update_pid=UpdatePid,instance_start_time=StartTime}) ->
+ ok = gen_server:call(UpdatePid, full_commit, infinity),
+ {ok, StartTime}.
+
+close(#db{fd_ref_counter=RefCntr}) ->
+ couch_ref_counter:drop(RefCntr).
+
+open_ref_counted(MainPid, OpenedPid) ->
+ gen_server:call(MainPid, {open_ref_count, OpenedPid}).
+
+is_idle(MainPid) ->
+ gen_server:call(MainPid, is_idle).
+
+monitor(#db{main_pid=MainPid}) ->
+ erlang:monitor(process, MainPid).
+
+start_compact(#db{update_pid=Pid}) ->
+ gen_server:call(Pid, start_compact).
+
+delete_doc(Db, Id, Revisions) ->
+ DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions],
+ {ok, [Result]} = update_docs(Db, DeletedDocs, []),
+ {ok, Result}.
+
+open_doc(Db, IdOrDocInfo) ->
+ open_doc(Db, IdOrDocInfo, []).
+
+open_doc(Db, Id, Options) ->
+ increment_stat(Db, {couchdb, database_reads}),
+ case open_doc_int(Db, Id, Options) of
+ {ok, #doc{deleted=true}=Doc} ->
+ case lists:member(deleted, Options) of
+ true ->
+ apply_open_options({ok, Doc},Options);
+ false ->
+ {not_found, deleted}
+ end;
+ Else ->
+ apply_open_options(Else,Options)
+ end.
+
+apply_open_options({ok, Doc},Options) ->
+ apply_open_options2(Doc,Options);
+apply_open_options(Else,_Options) ->
+ Else.
+
+apply_open_options2(Doc,[]) ->
+ {ok, Doc};
+apply_open_options2(#doc{atts=Atts,revs=Revs}=Doc,
+ [{atts_since, PossibleAncestors}|Rest]) ->
+ RevPos = find_ancestor_rev_pos(Revs, PossibleAncestors),
+ apply_open_options2(Doc#doc{atts=[A#att{data=
+ if AttPos>RevPos -> Data; true -> stub end}
+ || #att{revpos=AttPos,data=Data}=A <- Atts]}, Rest);
+apply_open_options2(Doc,[_|Rest]) ->
+ apply_open_options2(Doc,Rest).
+
+
+find_ancestor_rev_pos({_, []}, _AttsSinceRevs) ->
+ 0;
+find_ancestor_rev_pos(_DocRevs, []) ->
+ 0;
+find_ancestor_rev_pos({RevPos, [RevId|Rest]}, AttsSinceRevs) ->
+ case lists:member({RevPos, RevId}, AttsSinceRevs) of
+ true ->
+ RevPos;
+ false ->
+ find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
+ end.
+
+open_doc_revs(Db, Id, Revs, Options) ->
+ increment_stat(Db, {couchdb, database_reads}),
+ [{ok, Results}] = open_doc_revs_int(Db, [{Id, Revs}], Options),
+ {ok, [apply_open_options(Result, Options) || Result <- Results]}.
+
+% Each returned result is a list of tuples:
+% {Id, MissingRevs, PossibleAncestors}
+% if no revs are missing, it's omitted from the results.
+get_missing_revs(Db, IdRevsList) ->
+ Results = get_full_doc_infos(Db, [Id1 || {Id1, _Revs} <- IdRevsList]),
+ {ok, find_missing(IdRevsList, Results)}.
+
+find_missing([], []) ->
+ [];
+find_missing([{Id, Revs}|RestIdRevs], [{ok, FullInfo} | RestLookupInfo]) ->
+ case couch_key_tree:find_missing(FullInfo#full_doc_info.rev_tree, Revs) of
+ [] ->
+ find_missing(RestIdRevs, RestLookupInfo);
+ MissingRevs ->
+ #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
+ LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
+ % Find the revs that are possible parents of this rev
+ PossibleAncestors =
+ lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
+ % this leaf is a "possible ancenstor" of the missing
+ % revs if this LeafPos lessthan any of the missing revs
+ case lists:any(fun({MissingPos, _}) ->
+ LeafPos < MissingPos end, MissingRevs) of
+ true ->
+ [{LeafPos, LeafRevId} | Acc];
+ false ->
+ Acc
+ end
+ end, [], LeafRevs),
+ [{Id, MissingRevs, PossibleAncestors} |
+ find_missing(RestIdRevs, RestLookupInfo)]
+ end;
+find_missing([{Id, Revs}|RestIdRevs], [not_found | RestLookupInfo]) ->
+ [{Id, Revs, []} | find_missing(RestIdRevs, RestLookupInfo)].
+
+get_doc_info(Db, Id) ->
+ case get_full_doc_info(Db, Id) of
+ {ok, DocInfo} ->
+ {ok, couch_doc:to_doc_info(DocInfo)};
+ Else ->
+ Else
+ end.
+
+% returns {ok, DocInfo} or not_found
+get_full_doc_info(Db, Id) ->
+ [Result] = get_full_doc_infos(Db, [Id]),
+ Result.
+
+get_full_doc_infos(Db, Ids) ->
+ couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids).
+
+increment_update_seq(#db{update_pid=UpdatePid}) ->
+ gen_server:call(UpdatePid, increment_update_seq).
+
+purge_docs(#db{update_pid=UpdatePid}, IdsRevs) ->
+ gen_server:call(UpdatePid, {purge_docs, IdsRevs}).
+
+get_committed_update_seq(#db{committed_update_seq=Seq}) ->
+ Seq.
+
+get_update_seq(#db{update_seq=Seq})->
+ Seq.
+
+get_purge_seq(#db{header=#db_header{purge_seq=PurgeSeq}})->
+ PurgeSeq.
+
+get_last_purged(#db{header=#db_header{purged_docs=nil}}) ->
+ {ok, []};
+get_last_purged(#db{fd=Fd, header=#db_header{purged_docs=PurgedPointer}}) ->
+ couch_file:pread_term(Fd, PurgedPointer).
+
+get_db_info(Db) ->
+ #db{fd=Fd,
+ header=#db_header{disk_version=DiskVersion},
+ compactor_pid=Compactor,
+ update_seq=SeqNum,
+ name=Name,
+ fulldocinfo_by_id_btree=FullDocBtree,
+ instance_start_time=StartTime,
+ committed_update_seq=CommittedUpdateSeq} = Db,
+ {ok, Size} = couch_file:bytes(Fd),
+ {ok, {Count, DelCount}} = couch_btree:full_reduce(FullDocBtree),
+ InfoList = [
+ {db_name, Name},
+ {doc_count, Count},
+ {doc_del_count, DelCount},
+ {update_seq, SeqNum},
+ {purge_seq, couch_db:get_purge_seq(Db)},
+ {compact_running, Compactor/=nil},
+ {disk_size, Size},
+ {instance_start_time, StartTime},
+ {disk_format_version, DiskVersion},
+ {committed_update_seq, CommittedUpdateSeq}
+ ],
+ {ok, InfoList}.
+
+get_design_docs(#db{fulldocinfo_by_id_btree=Btree}=Db) ->
+ {ok,_, Docs} = couch_btree:fold(Btree,
+ fun(#full_doc_info{id= <<"_design/",_/binary>>}=FullDocInfo, _Reds, AccDocs) ->
+ {ok, Doc} = couch_db:open_doc_int(Db, FullDocInfo, []),
+ {ok, [Doc | AccDocs]};
+ (_, _Reds, AccDocs) ->
+ {stop, AccDocs}
+ end,
+ [], [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}]),
+ {ok, Docs}.
+
+check_is_admin(#db{user_ctx=#user_ctx{name=Name,roles=Roles}}=Db) ->
+ {Admins} = get_admins(Db),
+ AdminRoles = [<<"_admin">> | couch_util:get_value(<<"roles">>, Admins, [])],
+ AdminNames = couch_util:get_value(<<"names">>, Admins,[]),
+ case AdminRoles -- Roles of
+ AdminRoles -> % same list, not an admin role
+ case AdminNames -- [Name] of
+ AdminNames -> % same names, not an admin
+ throw({unauthorized, <<"You are not a db or server admin.">>});
+ _ ->
+ ok
+ end;
+ _ ->
+ ok
+ end.
+
+check_is_reader(#db{user_ctx=#user_ctx{name=Name,roles=Roles}=UserCtx}=Db) ->
+ case (catch check_is_admin(Db)) of
+ ok -> ok;
+ _ ->
+ {Readers} = get_readers(Db),
+ ReaderRoles = couch_util:get_value(<<"roles">>, Readers,[]),
+ WithAdminRoles = [<<"_admin">> | ReaderRoles],
+ ReaderNames = couch_util:get_value(<<"names">>, Readers,[]),
+ case ReaderRoles ++ ReaderNames of
+ [] -> ok; % no readers == public access
+ _Else ->
+ case WithAdminRoles -- Roles of
+ WithAdminRoles -> % same list, not an reader role
+ case ReaderNames -- [Name] of
+ ReaderNames -> % same names, not a reader
+ ?LOG_DEBUG("Not a reader: UserCtx ~p vs Names ~p Roles ~p",[UserCtx, ReaderNames, WithAdminRoles]),
+ throw({unauthorized, <<"You are not authorized to access this db.">>});
+ _ ->
+ ok
+ end;
+ _ ->
+ ok
+ end
+ end
+ end.
+
+get_admins(#db{security=SecProps}) ->
+ couch_util:get_value(<<"admins">>, SecProps, {[]}).
+
+get_readers(#db{security=SecProps}) ->
+ couch_util:get_value(<<"readers">>, SecProps, {[]}).
+
+get_security(#db{security=SecProps}) ->
+ {SecProps}.
+
+set_security(#db{update_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) ->
+ check_is_admin(Db),
+ ok = validate_security_object(NewSecProps),
+ ok = gen_server:call(Pid, {set_security, NewSecProps}, infinity),
+ {ok, _} = ensure_full_commit(Db),
+ ok;
+set_security(_, _) ->
+ throw(bad_request).
+
+validate_security_object(SecProps) ->
+ Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}),
+ Readers = couch_util:get_value(<<"readers">>, SecProps, {[]}),
+ ok = validate_names_and_roles(Admins),
+ ok = validate_names_and_roles(Readers),
+ ok.
+
+% validate user input
+validate_names_and_roles({Props}) when is_list(Props) ->
+ case couch_util:get_value(<<"names">>,Props,[]) of
+ Ns when is_list(Ns) ->
+ [throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)],
+ Ns;
+ _ -> throw("names must be a JSON list of strings")
+ end,
+ case couch_util:get_value(<<"roles">>,Props,[]) of
+ Rs when is_list(Rs) ->
+ [throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)],
+ Rs;
+ _ -> throw("roles must be a JSON list of strings")
+ end,
+ ok.
+
+get_revs_limit(#db{revs_limit=Limit}) ->
+ Limit.
+
+set_revs_limit(#db{update_pid=Pid}=Db, Limit) when Limit > 0 ->
+ check_is_admin(Db),
+ gen_server:call(Pid, {set_revs_limit, Limit}, infinity);
+set_revs_limit(_Db, _Limit) ->
+ throw(invalid_revs_limit).
+
+name(#db{name=Name}) ->
+ Name.
+
+update_doc(Db, Doc, Options) ->
+ update_doc(Db, Doc, Options, interactive_edit).
+
+update_doc(Db, Doc, Options, UpdateType) ->
+ case update_docs(Db, [Doc], Options, UpdateType) of
+ {ok, [{ok, NewRev}]} ->
+ {ok, NewRev};
+ {ok, [{{_Id, _Rev}, Error}]} ->
+ throw(Error);
+ {ok, [Error]} ->
+ throw(Error);
+ {ok, []} ->
+ % replication success
+ {Pos, [RevId | _]} = Doc#doc.revs,
+ {ok, {Pos, RevId}}
+ end.
+
+update_docs(Db, Docs) ->
+ update_docs(Db, Docs, []).
+
+% group_alike_docs groups the sorted documents into sublist buckets, by id.
+% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
+group_alike_docs(Docs) ->
+ Sorted = lists:sort(fun(#doc{id=A},#doc{id=B})-> A < B end, Docs),
+ group_alike_docs(Sorted, []).
+
+group_alike_docs([], Buckets) ->
+ lists:reverse(Buckets);
+group_alike_docs([Doc|Rest], []) ->
+ group_alike_docs(Rest, [[Doc]]);
+group_alike_docs([Doc|Rest], [Bucket|RestBuckets]) ->
+ [#doc{id=BucketId}|_] = Bucket,
+ case Doc#doc.id == BucketId of
+ true ->
+ % add to existing bucket
+ group_alike_docs(Rest, [[Doc|Bucket]|RestBuckets]);
+ false ->
+ % add to new bucket
+ group_alike_docs(Rest, [[Doc]|[Bucket|RestBuckets]])
+ end.
+
+validate_doc_update(#db{}=Db, #doc{id= <<"_design/",_/binary>>}, _GetDiskDocFun) ->
+ catch check_is_admin(Db);
+validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) ->
+ ok;
+validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) ->
+ ok;
+validate_doc_update(Db, Doc, GetDiskDocFun) ->
+ DiskDoc = GetDiskDocFun(),
+ JsonCtx = couch_util:json_user_ctx(Db),
+ SecObj = get_security(Db),
+ try [case Fun(Doc, DiskDoc, JsonCtx, SecObj) of
+ ok -> ok;
+ Error -> throw(Error)
+ end || Fun <- Db#db.validate_doc_funs],
+ ok
+ catch
+ throw:Error ->
+ Error
+ end.
+
+
+prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, Revs}}=Doc,
+ OldFullDocInfo, LeafRevsDict, AllowConflict) ->
+ case Revs of
+ [PrevRev|_] ->
+ case dict:find({RevStart, PrevRev}, LeafRevsDict) of
+ {ok, {Deleted, DiskSp, DiskRevs}} ->
+ case couch_doc:has_stubs(Doc) of
+ true ->
+ DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs),
+ Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
+ {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2};
+ false ->
+ LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,DiskSp,DiskRevs) end,
+ {validate_doc_update(Db, Doc, LoadDiskDoc), Doc}
+ end;
+ error when AllowConflict ->
+ couch_doc:merge_stubs(Doc, #doc{}), % will generate error if
+ % there are stubs
+ {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
+ error ->
+ {conflict, Doc}
+ end;
+ [] ->
+ % new doc, and we have existing revs.
+ % reuse existing deleted doc
+ if OldFullDocInfo#full_doc_info.deleted orelse AllowConflict ->
+ {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
+ true ->
+ {conflict, Doc}
+ end
+ end.
+
+
+
+prep_and_validate_updates(_Db, [], [], _AllowConflict, AccPrepped,
+ AccFatalErrors) ->
+ {AccPrepped, AccFatalErrors};
+prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups],
+ AllowConflict, AccPrepped, AccErrors) ->
+ [#doc{id=Id}|_]=DocBucket,
+ % no existing revs are known,
+ {PreppedBucket, AccErrors3} = lists:foldl(
+ fun(#doc{revs=Revs}=Doc, {AccBucket, AccErrors2}) ->
+ case couch_doc:has_stubs(Doc) of
+ true ->
+ couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
+ false -> ok
+ end,
+ case Revs of
+ {0, []} ->
+ case validate_doc_update(Db, Doc, fun() -> nil end) of
+ ok ->
+ {[Doc | AccBucket], AccErrors2};
+ Error ->
+ {AccBucket, [{{Id, {0, []}}, Error} | AccErrors2]}
+ end;
+ _ ->
+ % old revs specified but none exist, a conflict
+ {AccBucket, [{{Id, Revs}, conflict} | AccErrors2]}
+ end
+ end,
+ {[], AccErrors}, DocBucket),
+
+ prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
+ [PreppedBucket | AccPrepped], AccErrors3);
+prep_and_validate_updates(Db, [DocBucket|RestBuckets],
+ [{ok, #full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo}|RestLookups],
+ AllowConflict, AccPrepped, AccErrors) ->
+ Leafs = couch_key_tree:get_all_leafs(OldRevTree),
+ LeafRevsDict = dict:from_list([{{Start, RevId}, {Deleted, Sp, Revs}} ||
+ {{Deleted, Sp, _Seq}, {Start, [RevId|_]}=Revs} <- Leafs]),
+ {PreppedBucket, AccErrors3} = lists:foldl(
+ fun(Doc, {Docs2Acc, AccErrors2}) ->
+ case prep_and_validate_update(Db, Doc, OldFullDocInfo,
+ LeafRevsDict, AllowConflict) of
+ {ok, Doc2} ->
+ {[Doc2 | Docs2Acc], AccErrors2};
+ {Error, #doc{id=Id,revs=Revs}} ->
+ % Record the error
+ {Docs2Acc, [{{Id, Revs}, Error} |AccErrors2]}
+ end
+ end,
+ {[], AccErrors}, DocBucket),
+ prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
+ [PreppedBucket | AccPrepped], AccErrors3).
+
+
+update_docs(Db, Docs, Options) ->
+ update_docs(Db, Docs, Options, interactive_edit).
+
+
+prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
+ Errors2 = [{{Id, {Pos, Rev}}, Error} ||
+ {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors],
+ {lists:reverse(AccPrepped), lists:reverse(Errors2)};
+prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) ->
+ case OldInfo of
+ not_found ->
+ {ValidatedBucket, AccErrors3} = lists:foldl(
+ fun(Doc, {AccPrepped2, AccErrors2}) ->
+ case couch_doc:has_stubs(Doc) of
+ true ->
+ couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
+ false -> ok
+ end,
+ case validate_doc_update(Db, Doc, fun() -> nil end) of
+ ok ->
+ {[Doc | AccPrepped2], AccErrors2};
+ Error ->
+ {AccPrepped2, [{Doc, Error} | AccErrors2]}
+ end
+ end,
+ {[], AccErrors}, Bucket),
+ prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
+ {ok, #full_doc_info{rev_tree=OldTree}} ->
+ NewRevTree = lists:foldl(
+ fun(NewDoc, AccTree) ->
+ {NewTree, _} = couch_key_tree:merge(AccTree,
+ couch_db:doc_to_tree(NewDoc), Db#db.revs_limit),
+ NewTree
+ end,
+ OldTree, Bucket),
+ Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
+ LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]),
+ {ValidatedBucket, AccErrors3} =
+ lists:foldl(
+ fun(#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, {AccValidated, AccErrors2}) ->
+ case dict:find({Pos, RevId}, LeafRevsFullDict) of
+ {ok, {Start, Path}} ->
+ % our unflushed doc is a leaf node. Go back on the path
+ % to find the previous rev that's on disk.
+
+ LoadPrevRevFun = fun() ->
+ make_first_doc_on_disk(Db,Id,Start-1, tl(Path))
+ end,
+
+ case couch_doc:has_stubs(Doc) of
+ true ->
+ DiskDoc = LoadPrevRevFun(),
+ Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
+ GetDiskDocFun = fun() -> DiskDoc end;
+ false ->
+ Doc2 = Doc,
+ GetDiskDocFun = LoadPrevRevFun
+ end,
+
+ case validate_doc_update(Db, Doc2, GetDiskDocFun) of
+ ok ->
+ {[Doc2 | AccValidated], AccErrors2};
+ Error ->
+ {AccValidated, [{Doc, Error} | AccErrors2]}
+ end;
+ _ ->
+ % this doc isn't a leaf or already exists in the tree.
+ % ignore but consider it a success.
+ {AccValidated, AccErrors2}
+ end
+ end,
+ {[], AccErrors}, Bucket),
+ prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo,
+ [ValidatedBucket | AccPrepped], AccErrors3)
+ end.
+
+
+
+new_revid(#doc{body=Body,revs={OldStart,OldRevs},
+ atts=Atts,deleted=Deleted}) ->
+ case [{N, T, M} || #att{name=N,type=T,md5=M} <- Atts, M =/= <<>>] of
+ Atts2 when length(Atts) =/= length(Atts2) ->
+ % We must have old style non-md5 attachments
+ ?l2b(integer_to_list(couch_util:rand32()));
+ Atts2 ->
+ OldRev = case OldRevs of [] -> 0; [OldRev0|_] -> OldRev0 end,
+ couch_util:md5(term_to_binary([Deleted, OldStart, OldRev, Body, Atts2]))
+ end.
+
+new_revs([], OutBuckets, IdRevsAcc) ->
+ {lists:reverse(OutBuckets), IdRevsAcc};
+new_revs([Bucket|RestBuckets], OutBuckets, IdRevsAcc) ->
+ {NewBucket, IdRevsAcc3} = lists:mapfoldl(
+ fun(#doc{id=Id,revs={Start, RevIds}}=Doc, IdRevsAcc2)->
+ NewRevId = new_revid(Doc),
+ {Doc#doc{revs={Start+1, [NewRevId | RevIds]}},
+ [{{Id, {Start, RevIds}}, {ok, {Start+1, NewRevId}}} | IdRevsAcc2]}
+ end, IdRevsAcc, Bucket),
+ new_revs(RestBuckets, [NewBucket|OutBuckets], IdRevsAcc3).
+
+check_dup_atts(#doc{atts=Atts}=Doc) ->
+ Atts2 = lists:sort(fun(#att{name=N1}, #att{name=N2}) -> N1 < N2 end, Atts),
+ check_dup_atts2(Atts2),
+ Doc.
+
+check_dup_atts2([#att{name=N}, #att{name=N} | _]) ->
+ throw({bad_request, <<"Duplicate attachments">>});
+check_dup_atts2([_ | Rest]) ->
+ check_dup_atts2(Rest);
+check_dup_atts2(_) ->
+ ok.
+
+
+update_docs(Db, Docs, Options, replicated_changes) ->
+ increment_stat(Db, {couchdb, database_writes}),
+ DocBuckets = group_alike_docs(Docs),
+
+ case (Db#db.validate_doc_funs /= []) orelse
+ lists:any(
+ fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true;
+ (#doc{atts=Atts}) ->
+ Atts /= []
+ end, Docs) of
+ true ->
+ Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
+ ExistingDocs = get_full_doc_infos(Db, Ids),
+
+ {DocBuckets2, DocErrors} =
+ prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocs, [], []),
+ DocBuckets3 = [Bucket || [_|_]=Bucket <- DocBuckets2]; % remove empty buckets
+ false ->
+ DocErrors = [],
+ DocBuckets3 = DocBuckets
+ end,
+ DocBuckets4 = [[doc_flush_atts(check_dup_atts(Doc), Db#db.fd)
+ || Doc <- Bucket] || Bucket <- DocBuckets3],
+ {ok, []} = write_and_commit(Db, DocBuckets4, [], [merge_conflicts | Options]),
+ {ok, DocErrors};
+
+update_docs(Db, Docs, Options, interactive_edit) ->
+ increment_stat(Db, {couchdb, database_writes}),
+ AllOrNothing = lists:member(all_or_nothing, Options),
+ % go ahead and generate the new revision ids for the documents.
+ % separate out the NonRep documents from the rest of the documents
+ {Docs2, NonRepDocs} = lists:foldl(
+ fun(#doc{id=Id}=Doc, {DocsAcc, NonRepDocsAcc}) ->
+ case Id of
+ <<?LOCAL_DOC_PREFIX, _/binary>> ->
+ {DocsAcc, [Doc | NonRepDocsAcc]};
+ Id->
+ {[Doc | DocsAcc], NonRepDocsAcc}
+ end
+ end, {[], []}, Docs),
+
+ DocBuckets = group_alike_docs(Docs2),
+
+ case (Db#db.validate_doc_funs /= []) orelse
+ lists:any(
+ fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) ->
+ true;
+ (#doc{atts=Atts}) ->
+ Atts /= []
+ end, Docs2) of
+ true ->
+ % lookup the doc by id and get the most recent
+ Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
+ ExistingDocInfos = get_full_doc_infos(Db, Ids),
+
+ {DocBucketsPrepped, PreCommitFailures} = prep_and_validate_updates(Db,
+ DocBuckets, ExistingDocInfos, AllOrNothing, [], []),
+
+ % strip out any empty buckets
+ DocBuckets2 = [Bucket || [_|_] = Bucket <- DocBucketsPrepped];
+ false ->
+ PreCommitFailures = [],
+ DocBuckets2 = DocBuckets
+ end,
+
+ if (AllOrNothing) and (PreCommitFailures /= []) ->
+ {aborted, lists:map(
+ fun({{Id,{Pos, [RevId|_]}}, Error}) ->
+ {{Id, {Pos, RevId}}, Error};
+ ({{Id,{0, []}}, Error}) ->
+ {{Id, {0, <<>>}}, Error}
+ end, PreCommitFailures)};
+ true ->
+ Options2 = if AllOrNothing -> [merge_conflicts];
+ true -> [] end ++ Options,
+ DocBuckets3 = [[
+ doc_flush_atts(set_new_att_revpos(
+ check_dup_atts(Doc)), Db#db.fd)
+ || Doc <- B] || B <- DocBuckets2],
+ {DocBuckets4, IdRevs} = new_revs(DocBuckets3, [], []),
+
+ {ok, CommitResults} = write_and_commit(Db, DocBuckets4, NonRepDocs, Options2),
+
+ ResultsDict = dict:from_list(IdRevs ++ CommitResults ++ PreCommitFailures),
+ {ok, lists:map(
+ fun(#doc{id=Id,revs={Pos, RevIds}}) ->
+ {ok, Result} = dict:find({Id, {Pos, RevIds}}, ResultsDict),
+ Result
+ end, Docs)}
+ end.
+
+% Returns the first available document on disk. Input list is a full rev path
+% for the doc.
+make_first_doc_on_disk(_Db, _Id, _Pos, []) ->
+ nil;
+make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) ->
+ make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
+make_first_doc_on_disk(Db, Id, Pos, [{_Rev, {IsDel, Sp, _Seq}} |_]=DocPath) ->
+ Revs = [Rev || {Rev, _} <- DocPath],
+ make_doc(Db, Id, IsDel, Sp, {Pos, Revs}).
+
+set_commit_option(Options) ->
+ CommitSettings = {
+ [true || O <- Options, O==full_commit orelse O==delay_commit],
+ couch_config:get("couchdb", "delayed_commits", "false")
+ },
+ case CommitSettings of
+ {[true], _} ->
+ Options; % user requested explicit commit setting, do not change it
+ {_, "true"} ->
+ Options; % delayed commits are enabled, do nothing
+ {_, "false"} ->
+ [full_commit|Options];
+ {_, Else} ->
+ ?LOG_ERROR("[couchdb] delayed_commits setting must be true/false, not ~p",
+ [Else]),
+ [full_commit|Options]
+ end.
+
+collect_results(UpdatePid, MRef, ResultsAcc) ->
+ receive
+ {result, UpdatePid, Result} ->
+ collect_results(UpdatePid, MRef, [Result | ResultsAcc]);
+ {done, UpdatePid} ->
+ {ok, ResultsAcc};
+ {retry, UpdatePid} ->
+ retry;
+ {'DOWN', MRef, _, _, Reason} ->
+ exit(Reason)
+ end.
+
+write_and_commit(#db{update_pid=UpdatePid}=Db, DocBuckets,
+ NonRepDocs, Options0) ->
+ Options = set_commit_option(Options0),
+ MergeConflicts = lists:member(merge_conflicts, Options),
+ FullCommit = lists:member(full_commit, Options),
+ MRef = erlang:monitor(process, UpdatePid),
+ try
+ UpdatePid ! {update_docs, self(), DocBuckets, NonRepDocs, MergeConflicts, FullCommit},
+ case collect_results(UpdatePid, MRef, []) of
+ {ok, Results} -> {ok, Results};
+ retry ->
+ % This can happen if the db file we wrote to was swapped out by
+ % compaction. Retry by reopening the db and writing to the current file
+ {ok, Db2} = open_ref_counted(Db#db.main_pid, self()),
+ DocBuckets2 = [[doc_flush_atts(Doc, Db2#db.fd) || Doc <- Bucket] || Bucket <- DocBuckets],
+ % We only retry once
+ close(Db2),
+ UpdatePid ! {update_docs, self(), DocBuckets2, NonRepDocs, MergeConflicts, FullCommit},
+ case collect_results(UpdatePid, MRef, []) of
+ {ok, Results} -> {ok, Results};
+ retry -> throw({update_error, compaction_retry})
+ end
+ end
+ after
+ erlang:demonitor(MRef, [flush])
+ end.
+
+
+set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts}=Doc) ->
+ Doc#doc{atts= lists:map(fun(#att{data={_Fd,_Sp}}=Att) ->
+ % already commited to disk, do not set new rev
+ Att;
+ (Att) ->
+ Att#att{revpos=RevPos+1}
+ end, Atts)}.
+
+
+doc_flush_atts(Doc, Fd) ->
+ Doc#doc{atts=[flush_att(Fd, Att) || Att <- Doc#doc.atts]}.
+
+check_md5(_NewSig, <<>>) -> ok;
+check_md5(Sig1, Sig2) when Sig1 == Sig2 -> ok;
+check_md5(_, _) -> throw(md5_mismatch).
+
+flush_att(Fd, #att{data={Fd0, _}}=Att) when Fd0 == Fd ->
+ % already written to our file, nothing to write
+ Att;
+
+flush_att(Fd, #att{data={OtherFd,StreamPointer}, md5=InMd5,
+ disk_len=InDiskLen} = Att) ->
+ {NewStreamData, Len, _IdentityLen, Md5, IdentityMd5} =
+ couch_stream:copy_to_new_stream(OtherFd, StreamPointer, Fd),
+ check_md5(IdentityMd5, InMd5),
+ Att#att{data={Fd, NewStreamData}, md5=Md5, att_len=Len, disk_len=InDiskLen};
+
+flush_att(Fd, #att{data=Data}=Att) when is_binary(Data) ->
+ with_stream(Fd, Att, fun(OutputStream) ->
+ couch_stream:write(OutputStream, Data)
+ end);
+
+flush_att(Fd, #att{data=Fun,att_len=undefined}=Att) when is_function(Fun) ->
+ with_stream(Fd, Att, fun(OutputStream) ->
+ % Fun(MaxChunkSize, WriterFun) must call WriterFun
+ % once for each chunk of the attachment,
+ Fun(4096,
+ % WriterFun({Length, Binary}, State)
+ % WriterFun({0, _Footers}, State)
+ % Called with Length == 0 on the last time.
+ % WriterFun returns NewState.
+ fun({0, Footers}, _) ->
+ F = mochiweb_headers:from_binary(Footers),
+ case mochiweb_headers:get_value("Content-MD5", F) of
+ undefined ->
+ ok;
+ Md5 ->
+ {md5, base64:decode(Md5)}
+ end;
+ ({_Length, Chunk}, _) ->
+ couch_stream:write(OutputStream, Chunk)
+ end, ok)
+ end);
+
+flush_att(Fd, #att{data=Fun,att_len=AttLen}=Att) when is_function(Fun) ->
+ with_stream(Fd, Att, fun(OutputStream) ->
+ write_streamed_attachment(OutputStream, Fun, AttLen)
+ end).
+
+% From RFC 2616 3.6.1 - Chunked Transfer Coding
+%
+% In other words, the origin server is willing to accept
+% the possibility that the trailer fields might be silently
+% discarded along the path to the client.
+%
+% I take this to mean that if "Trailers: Content-MD5\r\n"
+% is present in the request, but there is no Content-MD5
+% trailer, we're free to ignore this inconsistency and
+% pretend that no Content-MD5 exists.
+with_stream(Fd, #att{md5=InMd5,type=Type,encoding=Enc}=Att, Fun) ->
+ {ok, OutputStream} = case (Enc =:= identity) andalso
+ couch_util:compressible_att_type(Type) of
+ true ->
+ CompLevel = list_to_integer(
+ couch_config:get("attachments", "compression_level", "0")
+ ),
+ couch_stream:open(Fd, gzip, [{compression_level, CompLevel}]);
+ _ ->
+ couch_stream:open(Fd)
+ end,
+ ReqMd5 = case Fun(OutputStream) of
+ {md5, FooterMd5} ->
+ case InMd5 of
+ md5_in_footer -> FooterMd5;
+ _ -> InMd5
+ end;
+ _ ->
+ InMd5
+ end,
+ {StreamInfo, Len, IdentityLen, Md5, IdentityMd5} =
+ couch_stream:close(OutputStream),
+ check_md5(IdentityMd5, ReqMd5),
+ {AttLen, DiskLen, NewEnc} = case Enc of
+ identity ->
+ case {Md5, IdentityMd5} of
+ {Same, Same} ->
+ {Len, IdentityLen, identity};
+ _ ->
+ {Len, IdentityLen, gzip}
+ end;
+ gzip ->
+ case {Att#att.att_len, Att#att.disk_len} of
+ {AL, DL} when AL =:= undefined orelse DL =:= undefined ->
+ % Compressed attachment uploaded through the standalone API.
+ {Len, Len, gzip};
+ {AL, DL} ->
+ % This case is used for efficient push-replication, where a
+ % compressed attachment is located in the body of multipart
+ % content-type request.
+ {AL, DL, gzip}
+ end
+ end,
+ Att#att{
+ data={Fd,StreamInfo},
+ att_len=AttLen,
+ disk_len=DiskLen,
+ md5=Md5,
+ encoding=NewEnc
+ }.
+
+
+write_streamed_attachment(_Stream, _F, 0) ->
+ ok;
+write_streamed_attachment(Stream, F, LenLeft) when LenLeft > 0 ->
+ Bin = F(),
+ ok = couch_stream:write(Stream, Bin),
+ write_streamed_attachment(Stream, F, LenLeft - size(Bin)).
+
+enum_docs_since_reduce_to_count(Reds) ->
+ couch_btree:final_reduce(
+ fun couch_db_updater:btree_by_seq_reduce/2, Reds).
+
+enum_docs_reduce_to_count(Reds) ->
+ {Count, _DelCount} = couch_btree:final_reduce(
+ fun couch_db_updater:btree_by_id_reduce/2, Reds),
+ Count.
+
+changes_since(Db, Style, StartSeq, Fun, Acc) ->
+ changes_since(Db, Style, StartSeq, Fun, [], Acc).
+
+changes_since(Db, Style, StartSeq, Fun, Options, Acc) ->
+ Wrapper = fun(DocInfo, _Offset, Acc2) ->
+ #doc_info{revs=Revs} = DocInfo,
+ DocInfo2 =
+ case Style of
+ main_only ->
+ DocInfo;
+ all_docs ->
+ % remove revs before the seq
+ DocInfo#doc_info{revs=[RevInfo ||
+ #rev_info{seq=RevSeq}=RevInfo <- Revs, StartSeq < RevSeq]}
+ end,
+ Fun(DocInfo2, Acc2)
+ end,
+ {ok, _LastReduction, AccOut} = couch_btree:fold(Db#db.docinfo_by_seq_btree,
+ Wrapper, Acc, [{start_key, StartSeq + 1}] ++ Options),
+ {ok, AccOut}.
+
+count_changes_since(Db, SinceSeq) ->
+ {ok, Changes} =
+ couch_btree:fold_reduce(Db#db.docinfo_by_seq_btree,
+ fun(_SeqStart, PartialReds, 0) ->
+ {ok, couch_btree:final_reduce(Db#db.docinfo_by_seq_btree, PartialReds)}
+ end,
+ 0, [{start_key, SinceSeq + 1}]),
+ Changes.
+
+enum_docs_since(Db, SinceSeq, InFun, Acc, Options) ->
+ {ok, LastReduction, AccOut} = couch_btree:fold(Db#db.docinfo_by_seq_btree, InFun, Acc, [{start_key, SinceSeq + 1} | Options]),
+ {ok, enum_docs_since_reduce_to_count(LastReduction), AccOut}.
+
+enum_docs(Db, InFun, InAcc, Options) ->
+ {ok, LastReduce, OutAcc} = couch_btree:fold(Db#db.fulldocinfo_by_id_btree, InFun, InAcc, Options),
+ {ok, enum_docs_reduce_to_count(LastReduce), OutAcc}.
+
+% server functions
+
+init({DbName, Filepath, Fd, Options}) ->
+ {ok, UpdaterPid} = gen_server:start_link(couch_db_updater, {self(), DbName, Filepath, Fd, Options}, []),
+ {ok, #db{fd_ref_counter=RefCntr}=Db} = gen_server:call(UpdaterPid, get_db),
+ couch_ref_counter:add(RefCntr),
+ case lists:member(sys_db, Options) of
+ true ->
+ ok;
+ false ->
+ couch_stats_collector:track_process_count({couchdb, open_databases})
+ end,
+ process_flag(trap_exit, true),
+ {ok, Db}.
+
+terminate(_Reason, Db) ->
+ couch_util:shutdown_sync(Db#db.update_pid),
+ ok.
+
+handle_call({open_ref_count, OpenerPid}, _, #db{fd_ref_counter=RefCntr}=Db) ->
+ ok = couch_ref_counter:add(RefCntr, OpenerPid),
+ {reply, {ok, Db}, Db};
+handle_call(is_idle, _From, #db{fd_ref_counter=RefCntr, compactor_pid=Compact,
+ waiting_delayed_commit=Delay}=Db) ->
+ % Idle means no referrers. Unless in the middle of a compaction file switch,
+ % there are always at least 2 referrers, couch_db_updater and us.
+ {reply, (Delay == nil) andalso (Compact == nil) andalso (couch_ref_counter:count(RefCntr) == 2), Db};
+handle_call({db_updated, NewDb}, _From, #db{fd_ref_counter=OldRefCntr}) ->
+ #db{fd_ref_counter=NewRefCntr}=NewDb,
+ case NewRefCntr =:= OldRefCntr of
+ true -> ok;
+ false ->
+ couch_ref_counter:add(NewRefCntr),
+ couch_ref_counter:drop(OldRefCntr)
+ end,
+ {reply, ok, NewDb};
+handle_call(get_db, _From, Db) ->
+ {reply, {ok, Db}, Db}.
+
+
+handle_cast(Msg, Db) ->
+ ?LOG_ERROR("Bad cast message received for db ~s: ~p", [Db#db.name, Msg]),
+ exit({error, Msg}).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_info({'EXIT', _Pid, normal}, Db) ->
+ {noreply, Db};
+handle_info({'EXIT', _Pid, Reason}, Server) ->
+ {stop, Reason, Server};
+handle_info(Msg, Db) ->
+ ?LOG_ERROR("Bad message received for db ~s: ~p", [Db#db.name, Msg]),
+ exit({error, Msg}).
+
+
+%%% Internal function %%%
+open_doc_revs_int(Db, IdRevs, Options) ->
+ Ids = [Id || {Id, _Revs} <- IdRevs],
+ LookupResults = get_full_doc_infos(Db, Ids),
+ lists:zipwith(
+ fun({Id, Revs}, Lookup) ->
+ case Lookup of
+ {ok, #full_doc_info{rev_tree=RevTree}} ->
+ {FoundRevs, MissingRevs} =
+ case Revs of
+ all ->
+ {couch_key_tree:get_all_leafs(RevTree), []};
+ _ ->
+ case lists:member(latest, Options) of
+ true ->
+ couch_key_tree:get_key_leafs(RevTree, Revs);
+ false ->
+ couch_key_tree:get(RevTree, Revs)
+ end
+ end,
+ FoundResults =
+ lists:map(fun({Value, {Pos, [Rev|_]}=FoundRevPath}) ->
+ case Value of
+ ?REV_MISSING ->
+ % we have the rev in our list but know nothing about it
+ {{not_found, missing}, {Pos, Rev}};
+ {IsDeleted, SummaryPtr, _UpdateSeq} ->
+ {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
+ end
+ end, FoundRevs),
+ Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
+ {ok, Results};
+ not_found when Revs == all ->
+ {ok, []};
+ not_found ->
+ {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
+ end
+ end,
+ IdRevs, LookupResults).
+
+open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, _Options) ->
+ case couch_btree:lookup(Db#db.local_docs_btree, [Id]) of
+ [{ok, {_, {Rev, BodyData}}}] ->
+ {ok, #doc{id=Id, revs={0, [list_to_binary(integer_to_list(Rev))]}, body=BodyData}};
+ [not_found] ->
+ {not_found, missing}
+ end;
+open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) ->
+ #rev_info{deleted=IsDeleted,rev={Pos,RevId},body_sp=Bp} = RevInfo,
+ Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}),
+ {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}};
+open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
+ #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} =
+ DocInfo = couch_doc:to_doc_info(FullDocInfo),
+ {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
+ Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath),
+ {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}};
+open_doc_int(Db, Id, Options) ->
+ case get_full_doc_info(Db, Id) of
+ {ok, FullDocInfo} ->
+ open_doc_int(Db, FullDocInfo, Options);
+ not_found ->
+ {not_found, missing}
+ end.
+
+doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTree, Options) ->
+ case lists:member(revs_info, Options) of
+ false -> [];
+ true ->
+ {[{Pos, RevPath}],[]} =
+ couch_key_tree:get_full_key_paths(RevTree, [Rev]),
+
+ [{revs_info, Pos, lists:map(
+ fun({Rev1, {true, _Sp, _UpdateSeq}}) ->
+ {Rev1, deleted};
+ ({Rev1, {false, _Sp, _UpdateSeq}}) ->
+ {Rev1, available};
+ ({Rev1, ?REV_MISSING}) ->
+ {Rev1, missing}
+ end, RevPath)}]
+ end ++
+ case lists:member(conflicts, Options) of
+ false -> [];
+ true ->
+ case [Rev1 || #rev_info{rev=Rev1,deleted=false} <- RestInfo] of
+ [] -> [];
+ ConflictRevs -> [{conflicts, ConflictRevs}]
+ end
+ end ++
+ case lists:member(deleted_conflicts, Options) of
+ false -> [];
+ true ->
+ case [Rev1 || #rev_info{rev=Rev1,deleted=true} <- RestInfo] of
+ [] -> [];
+ DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}]
+ end
+ end ++
+ case lists:member(local_seq, Options) of
+ false -> [];
+ true -> [{local_seq, Seq}]
+ end.
+
+read_doc(#db{fd=Fd}, OldStreamPointer) when is_tuple(OldStreamPointer) ->
+ % 09 UPGRADE CODE
+ couch_stream:old_read_term(Fd, OldStreamPointer);
+read_doc(#db{fd=Fd}, Pos) ->
+ couch_file:pread_term(Fd, Pos).
+
+
+doc_to_tree(#doc{revs={Start, RevIds}}=Doc) ->
+ [Tree] = doc_to_tree_simple(Doc, lists:reverse(RevIds)),
+ {Start - length(RevIds) + 1, Tree}.
+
+
+doc_to_tree_simple(Doc, [RevId]) ->
+ [{RevId, Doc, []}];
+doc_to_tree_simple(Doc, [RevId | Rest]) ->
+ [{RevId, ?REV_MISSING, doc_to_tree_simple(Doc, Rest)}].
+
+
+make_doc(#db{fd=Fd}=Db, Id, Deleted, Bp, RevisionPath) ->
+ {BodyData, Atts} =
+ case Bp of
+ nil ->
+ {[], []};
+ _ ->
+ {ok, {BodyData0, Atts0}} = read_doc(Db, Bp),
+ {BodyData0,
+ lists:map(
+ fun({Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) ->
+ #att{name=Name,
+ type=Type,
+ att_len=AttLen,
+ disk_len=DiskLen,
+ md5=Md5,
+ revpos=RevPos,
+ data={Fd,Sp},
+ encoding=
+ case Enc of
+ true ->
+ % 0110 UPGRADE CODE
+ gzip;
+ false ->
+ % 0110 UPGRADE CODE
+ identity;
+ _ ->
+ Enc
+ end
+ };
+ ({Name,Type,Sp,AttLen,RevPos,Md5}) ->
+ #att{name=Name,
+ type=Type,
+ att_len=AttLen,
+ disk_len=AttLen,
+ md5=Md5,
+ revpos=RevPos,
+ data={Fd,Sp}};
+ ({Name,{Type,Sp,AttLen}}) ->
+ #att{name=Name,
+ type=Type,
+ att_len=AttLen,
+ disk_len=AttLen,
+ md5= <<>>,
+ revpos=0,
+ data={Fd,Sp}}
+ end, Atts0)}
+ end,
+ #doc{
+ id = Id,
+ revs = RevisionPath,
+ body = BodyData,
+ atts = Atts,
+ deleted = Deleted
+ }.
+
+
+increment_stat(#db{is_sys_db = true}, _Stat) ->
+ ok;
+increment_stat(#db{}, Stat) ->
+ couch_stats_collector:increment(Stat).
diff --git a/1.1.x/src/couchdb/couch_db.hrl b/1.1.x/src/couchdb/couch_db.hrl
new file mode 100644
index 00000000..003cb688
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_db.hrl
@@ -0,0 +1,278 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-define(LOCAL_DOC_PREFIX, "_local/").
+-define(DESIGN_DOC_PREFIX0, "_design").
+-define(DESIGN_DOC_PREFIX, "_design/").
+
+-define(MIN_STR, <<"">>).
+-define(MAX_STR, <<255>>). % illegal utf string
+
+-define(JSON_ENCODE(V), couch_util:json_encode(V)).
+-define(JSON_DECODE(V), couch_util:json_decode(V)).
+
+-define(b2l(V), binary_to_list(V)).
+-define(l2b(V), list_to_binary(V)).
+
+-define(DEFAULT_ATTACHMENT_CONTENT_TYPE, <<"application/octet-stream">>).
+
+-define(LOG_DEBUG(Format, Args), couch_log:debug(Format, Args)).
+-define(LOG_INFO(Format, Args), couch_log:info(Format, Args)).
+-define(LOG_ERROR(Format, Args), couch_log:error(Format, Args)).
+
+-record(rev_info,
+ {
+ rev,
+ seq = 0,
+ deleted = false,
+ body_sp = nil % stream pointer
+ }).
+
+-record(doc_info,
+ {
+ id = <<"">>,
+ high_seq = 0,
+ revs = [] % rev_info
+ }).
+
+-record(full_doc_info,
+ {id = <<"">>,
+ update_seq = 0,
+ deleted = false,
+ rev_tree = []
+ }).
+
+-record(httpd,
+ {mochi_req,
+ peer,
+ method,
+ requested_path_parts,
+ path_parts,
+ db_url_handlers,
+ user_ctx,
+ req_body = undefined,
+ design_url_handlers,
+ auth,
+ default_fun,
+ url_handlers
+ }).
+
+
+-record(doc,
+ {
+ id = <<"">>,
+ revs = {0, []},
+
+ % the json body object.
+ body = {[]},
+
+ atts = [], % attachments
+
+ deleted = false,
+
+ % key/value tuple of meta information, provided when using special options:
+ % couch_db:open_doc(Db, Id, Options).
+ meta = []
+ }).
+
+
+-record(att,
+ {
+ name,
+ type,
+ att_len,
+ disk_len, % length of the attachment in its identity form
+ % (that is, without a content encoding applied to it)
+ % differs from att_len when encoding /= identity
+ md5= <<>>,
+ revpos=0,
+ data,
+ encoding=identity % currently supported values are:
+ % identity, gzip
+ % additional values to support in the future:
+ % deflate, compress
+ }).
+
+
+-record(user_ctx,
+ {
+ name=null,
+ roles=[],
+ handler
+ }).
+
+% This should be updated anytime a header change happens that requires more
+% than filling in new defaults.
+%
+% As long the changes are limited to new header fields (with inline
+% defaults) added to the end of the record, then there is no need to increment
+% the disk revision number.
+%
+% if the disk revision is incremented, then new upgrade logic will need to be
+% added to couch_db_updater:init_db.
+
+-define(LATEST_DISK_VERSION, 5).
+
+-record(db_header,
+ {disk_version = ?LATEST_DISK_VERSION,
+ update_seq = 0,
+ unused = 0,
+ fulldocinfo_by_id_btree_state = nil,
+ docinfo_by_seq_btree_state = nil,
+ local_docs_btree_state = nil,
+ purge_seq = 0,
+ purged_docs = nil,
+ security_ptr = nil,
+ revs_limit = 1000
+ }).
+
+-record(db,
+ {main_pid = nil,
+ update_pid = nil,
+ compactor_pid = nil,
+ instance_start_time, % number of microsecs since jan 1 1970 as a binary string
+ fd,
+ fd_ref_counter,
+ header = #db_header{},
+ committed_update_seq,
+ fulldocinfo_by_id_btree,
+ docinfo_by_seq_btree,
+ local_docs_btree,
+ update_seq,
+ name,
+ filepath,
+ validate_doc_funs = [],
+ security = [],
+ security_ptr = nil,
+ user_ctx = #user_ctx{},
+ waiting_delayed_commit = nil,
+ revs_limit = 1000,
+ fsync_options = [],
+ is_sys_db = false
+ }).
+
+
+-record(view_query_args, {
+ start_key,
+ end_key,
+ start_docid = ?MIN_STR,
+ end_docid = ?MAX_STR,
+
+ direction = fwd,
+ inclusive_end=true, % aka a closed-interval
+
+ limit = 10000000000, % Huge number to simplify logic
+ skip = 0,
+
+ group_level = 0,
+
+ view_type = nil,
+ include_docs = false,
+ conflicts = false,
+ stale = false,
+ multi_get = false,
+ callback = nil,
+ list = nil
+}).
+
+-record(view_fold_helper_funs, {
+ reduce_count,
+ passed_end,
+ start_response,
+ send_row
+}).
+
+-record(reduce_fold_helper_funs, {
+ start_response,
+ send_row
+}).
+
+-record(extern_resp_args, {
+ code = 200,
+ stop = false,
+ data = <<>>,
+ ctype = "application/json",
+ headers = []
+}).
+
+-record(group, {
+ sig=nil,
+ db=nil,
+ fd=nil,
+ name,
+ def_lang,
+ design_options=[],
+ views,
+ lib,
+ id_btree=nil,
+ current_seq=0,
+ purge_seq=0,
+ query_server=nil,
+ waiting_delayed_commit=nil
+ }).
+
+-record(view,
+ {id_num,
+ update_seq=0,
+ purge_seq=0,
+ map_names=[],
+ def,
+ btree=nil,
+ reduce_funs=[],
+ options=[]
+ }).
+
+-record(index_header,
+ {seq=0,
+ purge_seq=0,
+ id_btree_state=nil,
+ view_states=nil
+ }).
+
+-record(http_db, {
+ url,
+ auth = [],
+ resource = "",
+ headers = [
+ {"User-Agent", "CouchDB/"++couch_server:get_version()},
+ {"Accept", "application/json"},
+ {"Accept-Encoding", "gzip"}
+ ],
+ qs = [],
+ method = get,
+ body = nil,
+ options = [
+ {response_format,binary},
+ {inactivity_timeout, 30000}
+ ],
+ retries = 10,
+ pause = 500,
+ conn = nil
+}).
+
+% small value used in revision trees to indicate the revision isn't stored
+-define(REV_MISSING, []).
+
+-record(changes_args, {
+ feed = "normal",
+ dir = fwd,
+ since = 0,
+ limit = 1000000000000000,
+ style = main_only,
+ heartbeat,
+ timeout,
+ filter = "",
+ include_docs = false,
+ conflicts = false,
+ db_open_options = []
+}).
+
diff --git a/1.1.x/src/couchdb/couch_db_update_notifier.erl b/1.1.x/src/couchdb/couch_db_update_notifier.erl
new file mode 100644
index 00000000..150eb31b
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_db_update_notifier.erl
@@ -0,0 +1,73 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%
+% This causes an OS process to spawned and it is notified every time a database
+% is updated.
+%
+% The notifications are in the form of a the database name sent as a line of
+% text to the OS processes stdout.
+%
+
+-module(couch_db_update_notifier).
+
+-behaviour(gen_event).
+
+-export([start_link/1, notify/1]).
+-export([init/1, terminate/2, handle_event/2, handle_call/2, handle_info/2, code_change/3,stop/1]).
+
+-include("couch_db.hrl").
+
+start_link(Exec) ->
+ couch_event_sup:start_link(couch_db_update, {couch_db_update_notifier, make_ref()}, Exec).
+
+notify(Event) ->
+ gen_event:notify(couch_db_update, Event).
+
+stop(Pid) ->
+ couch_event_sup:stop(Pid).
+
+init(Exec) when is_list(Exec) -> % an exe
+ couch_os_process:start_link(Exec, []);
+init(Else) ->
+ {ok, Else}.
+
+terminate(_Reason, Pid) when is_pid(Pid) ->
+ couch_os_process:stop(Pid),
+ ok;
+terminate(_Reason, _State) ->
+ ok.
+
+handle_event(Event, Fun) when is_function(Fun, 1) ->
+ Fun(Event),
+ {ok, Fun};
+handle_event(Event, {Fun, FunAcc}) ->
+ FunAcc2 = Fun(Event, FunAcc),
+ {ok, {Fun, FunAcc2}};
+handle_event({EventAtom, DbName}, Pid) ->
+ Obj = {[{type, list_to_binary(atom_to_list(EventAtom))}, {db, DbName}]},
+ ok = couch_os_process:send(Pid, Obj),
+ {ok, Pid}.
+
+handle_call(_Request, State) ->
+ {reply, ok, State}.
+
+handle_info({'EXIT', Pid, Reason}, Pid) ->
+ ?LOG_ERROR("Update notification process ~p died: ~p", [Pid, Reason]),
+ remove_handler;
+handle_info({'EXIT', _, _}, Pid) ->
+ %% the db_update event manager traps exits and forwards this message to all
+ %% its handlers. Just ignore as it wasn't our os_process that exited.
+ {ok, Pid}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/1.1.x/src/couchdb/couch_db_update_notifier_sup.erl b/1.1.x/src/couchdb/couch_db_update_notifier_sup.erl
new file mode 100644
index 00000000..4d730fc7
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_db_update_notifier_sup.erl
@@ -0,0 +1,63 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%
+% This causes an OS process to spawned and it is notified every time a database
+% is updated.
+%
+% The notifications are in the form of a the database name sent as a line of
+% text to the OS processes stdout.
+%
+
+-module(couch_db_update_notifier_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/0,init/1]).
+
+start_link() ->
+ supervisor:start_link({local, couch_db_update_notifier_sup},
+ couch_db_update_notifier_sup, []).
+
+init([]) ->
+ ok = couch_config:register(
+ fun("update_notification", Key, Value) -> reload_config(Key, Value) end
+ ),
+
+ UpdateNotifierExes = couch_config:get("update_notification"),
+
+ {ok,
+ {{one_for_one, 10, 3600},
+ lists:map(fun({Name, UpdateNotifierExe}) ->
+ {Name,
+ {couch_db_update_notifier, start_link, [UpdateNotifierExe]},
+ permanent,
+ 1000,
+ supervisor,
+ [couch_db_update_notifier]}
+ end, UpdateNotifierExes)}}.
+
+%% @doc when update_notification configuration changes, terminate the process
+%% for that notifier and start a new one with the updated config
+reload_config(Id, Exe) ->
+ ChildSpec = {
+ Id,
+ {couch_db_update_notifier, start_link, [Exe]},
+ permanent,
+ 1000,
+ supervisor,
+ [couch_db_update_notifier]
+ },
+ supervisor:terminate_child(couch_db_update_notifier_sup, Id),
+ supervisor:delete_child(couch_db_update_notifier_sup, Id),
+ supervisor:start_child(couch_db_update_notifier_sup, ChildSpec).
+
diff --git a/1.1.x/src/couchdb/couch_db_updater.erl b/1.1.x/src/couchdb/couch_db_updater.erl
new file mode 100644
index 00000000..2b317d95
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_db_updater.erl
@@ -0,0 +1,896 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_db_updater).
+-behaviour(gen_server).
+
+-export([btree_by_id_reduce/2,btree_by_seq_reduce/2]).
+-export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
+
+-include("couch_db.hrl").
+
+
+init({MainPid, DbName, Filepath, Fd, Options}) ->
+ process_flag(trap_exit, true),
+ case lists:member(create, Options) of
+ true ->
+ % create a new header and writes it to the file
+ Header = #db_header{},
+ ok = couch_file:write_header(Fd, Header),
+ % delete any old compaction files that might be hanging around
+ RootDir = couch_config:get("couchdb", "database_dir", "."),
+ couch_file:delete(RootDir, Filepath ++ ".compact");
+ false ->
+ ok = couch_file:upgrade_old_header(Fd, <<$g, $m, $k, 0>>), % 09 UPGRADE CODE
+ case couch_file:read_header(Fd) of
+ {ok, Header} ->
+ ok;
+ no_valid_header ->
+ % create a new header and writes it to the file
+ Header = #db_header{},
+ ok = couch_file:write_header(Fd, Header),
+ % delete any old compaction files that might be hanging around
+ file:delete(Filepath ++ ".compact")
+ end
+ end,
+
+ Db = init_db(DbName, Filepath, Fd, Header),
+ Db2 = refresh_validate_doc_funs(Db),
+ {ok, Db2#db{main_pid = MainPid, is_sys_db = lists:member(sys_db, Options)}}.
+
+
+terminate(_Reason, Db) ->
+ couch_file:close(Db#db.fd),
+ couch_util:shutdown_sync(Db#db.compactor_pid),
+ couch_util:shutdown_sync(Db#db.fd_ref_counter),
+ ok.
+
+handle_call(get_db, _From, Db) ->
+ {reply, {ok, Db}, Db};
+handle_call(full_commit, _From, #db{waiting_delayed_commit=nil}=Db) ->
+ {reply, ok, Db}; % no data waiting, return ok immediately
+handle_call(full_commit, _From, Db) ->
+ {reply, ok, commit_data(Db)}; % commit the data and return ok
+handle_call(increment_update_seq, _From, Db) ->
+ Db2 = commit_data(Db#db{update_seq=Db#db.update_seq+1}),
+ ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+ couch_db_update_notifier:notify({updated, Db#db.name}),
+ {reply, {ok, Db2#db.update_seq}, Db2};
+
+handle_call({set_security, NewSec}, _From, Db) ->
+ {ok, Ptr} = couch_file:append_term(Db#db.fd, NewSec),
+ Db2 = commit_data(Db#db{security=NewSec, security_ptr=Ptr,
+ update_seq=Db#db.update_seq+1}),
+ ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+ {reply, ok, Db2};
+
+handle_call({set_revs_limit, Limit}, _From, Db) ->
+ Db2 = commit_data(Db#db{revs_limit=Limit,
+ update_seq=Db#db.update_seq+1}),
+ ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+ {reply, ok, Db2};
+
+handle_call({purge_docs, _IdRevs}, _From,
+ #db{compactor_pid=Pid}=Db) when Pid /= nil ->
+ {reply, {error, purge_during_compaction}, Db};
+handle_call({purge_docs, IdRevs}, _From, Db) ->
+ #db{
+ fd=Fd,
+ fulldocinfo_by_id_btree = DocInfoByIdBTree,
+ docinfo_by_seq_btree = DocInfoBySeqBTree,
+ update_seq = LastSeq,
+ header = Header = #db_header{purge_seq=PurgeSeq}
+ } = Db,
+ DocLookups = couch_btree:lookup(DocInfoByIdBTree,
+ [Id || {Id, _Revs} <- IdRevs]),
+
+ NewDocInfos = lists:zipwith(
+ fun({_Id, Revs}, {ok, #full_doc_info{rev_tree=Tree}=FullDocInfo}) ->
+ case couch_key_tree:remove_leafs(Tree, Revs) of
+ {_, []=_RemovedRevs} -> % no change
+ nil;
+ {NewTree, RemovedRevs} ->
+ {FullDocInfo#full_doc_info{rev_tree=NewTree},RemovedRevs}
+ end;
+ (_, not_found) ->
+ nil
+ end,
+ IdRevs, DocLookups),
+
+ SeqsToRemove = [Seq
+ || {#full_doc_info{update_seq=Seq},_} <- NewDocInfos],
+
+ FullDocInfoToUpdate = [FullInfo
+ || {#full_doc_info{rev_tree=Tree}=FullInfo,_}
+ <- NewDocInfos, Tree /= []],
+
+ IdRevsPurged = [{Id, Revs}
+ || {#full_doc_info{id=Id}, Revs} <- NewDocInfos],
+
+ {DocInfoToUpdate, NewSeq} = lists:mapfoldl(
+ fun(#full_doc_info{rev_tree=Tree}=FullInfo, SeqAcc) ->
+ Tree2 = couch_key_tree:map_leafs(
+ fun(_RevId, {IsDeleted, BodyPointer, _UpdateSeq}) ->
+ {IsDeleted, BodyPointer, SeqAcc + 1}
+ end, Tree),
+ {couch_doc:to_doc_info(FullInfo#full_doc_info{rev_tree=Tree2}),
+ SeqAcc + 1}
+ end, LastSeq, FullDocInfoToUpdate),
+
+ IdsToRemove = [Id || {#full_doc_info{id=Id,rev_tree=[]},_}
+ <- NewDocInfos],
+
+ {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree,
+ DocInfoToUpdate, SeqsToRemove),
+ {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree,
+ FullDocInfoToUpdate, IdsToRemove),
+ {ok, Pointer} = couch_file:append_term(Fd, IdRevsPurged),
+
+ Db2 = commit_data(
+ Db#db{
+ fulldocinfo_by_id_btree = DocInfoByIdBTree2,
+ docinfo_by_seq_btree = DocInfoBySeqBTree2,
+ update_seq = NewSeq + 1,
+ header=Header#db_header{purge_seq=PurgeSeq+1, purged_docs=Pointer}}),
+
+ ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+ couch_db_update_notifier:notify({updated, Db#db.name}),
+ {reply, {ok, (Db2#db.header)#db_header.purge_seq, IdRevsPurged}, Db2};
+handle_call(start_compact, _From, Db) ->
+ case Db#db.compactor_pid of
+ nil ->
+ ?LOG_INFO("Starting compaction for db \"~s\"", [Db#db.name]),
+ Pid = spawn_link(fun() -> start_copy_compact(Db) end),
+ Db2 = Db#db{compactor_pid=Pid},
+ ok = gen_server:call(Db#db.main_pid, {db_updated, Db2}),
+ {reply, ok, Db2};
+ _ ->
+ % compact currently running, this is a no-op
+ {reply, ok, Db}
+ end.
+
+
+
+handle_cast({compact_done, CompactFilepath}, #db{filepath=Filepath}=Db) ->
+ {ok, NewFd} = couch_file:open(CompactFilepath),
+ {ok, NewHeader} = couch_file:read_header(NewFd),
+ #db{update_seq=NewSeq} = NewDb =
+ init_db(Db#db.name, Filepath, NewFd, NewHeader),
+ unlink(NewFd),
+ case Db#db.update_seq == NewSeq of
+ true ->
+ % suck up all the local docs into memory and write them to the new db
+ {ok, _, LocalDocs} = couch_btree:foldl(Db#db.local_docs_btree,
+ fun(Value, _Offset, Acc) -> {ok, [Value | Acc]} end, []),
+ {ok, NewLocalBtree} = couch_btree:add(NewDb#db.local_docs_btree, LocalDocs),
+
+ NewDb2 = commit_data(NewDb#db{
+ local_docs_btree = NewLocalBtree,
+ main_pid = Db#db.main_pid,
+ filepath = Filepath,
+ instance_start_time = Db#db.instance_start_time,
+ revs_limit = Db#db.revs_limit
+ }),
+
+ ?LOG_DEBUG("CouchDB swapping files ~s and ~s.",
+ [Filepath, CompactFilepath]),
+ RootDir = couch_config:get("couchdb", "database_dir", "."),
+ couch_file:delete(RootDir, Filepath),
+ ok = file:rename(CompactFilepath, Filepath),
+ close_db(Db),
+ NewDb3 = refresh_validate_doc_funs(NewDb2),
+ ok = gen_server:call(Db#db.main_pid, {db_updated, NewDb3}, infinity),
+ couch_db_update_notifier:notify({compacted, NewDb3#db.name}),
+ ?LOG_INFO("Compaction for db \"~s\" completed.", [Db#db.name]),
+ {noreply, NewDb3#db{compactor_pid=nil}};
+ false ->
+ ?LOG_INFO("Compaction file still behind main file "
+ "(update seq=~p. compact update seq=~p). Retrying.",
+ [Db#db.update_seq, NewSeq]),
+ close_db(NewDb),
+ Pid = spawn_link(fun() -> start_copy_compact(Db) end),
+ Db2 = Db#db{compactor_pid=Pid},
+ {noreply, Db2}
+ end.
+
+
+handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts,
+ FullCommit}, Db) ->
+ GroupedDocs2 = [[{Client, D} || D <- DocGroup] || DocGroup <- GroupedDocs],
+ if NonRepDocs == [] ->
+ {GroupedDocs3, Clients, FullCommit2} = collect_updates(GroupedDocs2,
+ [Client], MergeConflicts, FullCommit);
+ true ->
+ GroupedDocs3 = GroupedDocs2,
+ FullCommit2 = FullCommit,
+ Clients = [Client]
+ end,
+ NonRepDocs2 = [{Client, NRDoc} || NRDoc <- NonRepDocs],
+ try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts,
+ FullCommit2) of
+ {ok, Db2} ->
+ ok = gen_server:call(Db#db.main_pid, {db_updated, Db2}),
+ if Db2#db.update_seq /= Db#db.update_seq ->
+ couch_db_update_notifier:notify({updated, Db2#db.name});
+ true -> ok
+ end,
+ [catch(ClientPid ! {done, self()}) || ClientPid <- Clients],
+ {noreply, Db2}
+ catch
+ throw: retry ->
+ [catch(ClientPid ! {retry, self()}) || ClientPid <- Clients],
+ {noreply, Db}
+ end;
+handle_info(delayed_commit, #db{waiting_delayed_commit=nil}=Db) ->
+ %no outstanding delayed commits, ignore
+ {noreply, Db};
+handle_info(delayed_commit, Db) ->
+ case commit_data(Db) of
+ Db ->
+ {noreply, Db};
+ Db2 ->
+ ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+ {noreply, Db2}
+ end;
+handle_info({'EXIT', _Pid, normal}, Db) ->
+ {noreply, Db};
+handle_info({'EXIT', _Pid, Reason}, Db) ->
+ {stop, Reason, Db}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+merge_updates([], RestB, AccOutGroups) ->
+ lists:reverse(AccOutGroups, RestB);
+merge_updates(RestA, [], AccOutGroups) ->
+ lists:reverse(AccOutGroups, RestA);
+merge_updates([[{_, #doc{id=IdA}}|_]=GroupA | RestA],
+ [[{_, #doc{id=IdB}}|_]=GroupB | RestB], AccOutGroups) ->
+ if IdA == IdB ->
+ merge_updates(RestA, RestB, [GroupA ++ GroupB | AccOutGroups]);
+ IdA < IdB ->
+ merge_updates(RestA, [GroupB | RestB], [GroupA | AccOutGroups]);
+ true ->
+ merge_updates([GroupA | RestA], RestB, [GroupB | AccOutGroups])
+ end.
+
+collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts, FullCommit) ->
+ receive
+ % Only collect updates with the same MergeConflicts flag and without
+ % local docs. It's easier to just avoid multiple _local doc
+ % updaters than deal with their possible conflicts, and local docs
+ % writes are relatively rare. Can be optmized later if really needed.
+ {update_docs, Client, GroupedDocs, [], MergeConflicts, FullCommit2} ->
+ GroupedDocs2 = [[{Client, Doc} || Doc <- DocGroup]
+ || DocGroup <- GroupedDocs],
+ GroupedDocsAcc2 =
+ merge_updates(GroupedDocsAcc, GroupedDocs2, []),
+ collect_updates(GroupedDocsAcc2, [Client | ClientsAcc],
+ MergeConflicts, (FullCommit or FullCommit2))
+ after 0 ->
+ {GroupedDocsAcc, ClientsAcc, FullCommit}
+ end.
+
+
+btree_by_seq_split(#doc_info{id=Id, high_seq=KeySeq, revs=Revs}) ->
+ {RevInfos, DeletedRevInfos} = lists:foldl(
+ fun(#rev_info{deleted = false, seq = Seq} = Ri, {Acc, AccDel}) ->
+ {[{Ri#rev_info.rev, Seq, Ri#rev_info.body_sp} | Acc], AccDel};
+ (#rev_info{deleted = true, seq = Seq} = Ri, {Acc, AccDel}) ->
+ {Acc, [{Ri#rev_info.rev, Seq, Ri#rev_info.body_sp} | AccDel]}
+ end,
+ {[], []}, Revs),
+ {KeySeq, {Id, lists:reverse(RevInfos), lists:reverse(DeletedRevInfos)}}.
+
+btree_by_seq_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
+ #doc_info{
+ id = Id,
+ high_seq=KeySeq,
+ revs =
+ [#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp = Bp} ||
+ {Rev, Seq, Bp} <- RevInfos] ++
+ [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} ||
+ {Rev, Seq, Bp} <- DeletedRevInfos]};
+btree_by_seq_join(KeySeq,{Id, Rev, Bp, Conflicts, DelConflicts, Deleted}) ->
+ % 09 UPGRADE CODE
+ % this is the 0.9.0 and earlier by_seq record. It's missing the body pointers
+ % and individual seq nums for conflicts that are currently in the index,
+ % meaning the filtered _changes api will not work except for on main docs.
+ % Simply compact a 0.9.0 database to upgrade the index.
+ #doc_info{
+ id=Id,
+ high_seq=KeySeq,
+ revs = [#rev_info{rev=Rev,seq=KeySeq,deleted=Deleted,body_sp=Bp}] ++
+ [#rev_info{rev=Rev1,seq=KeySeq,deleted=false} || Rev1 <- Conflicts] ++
+ [#rev_info{rev=Rev2,seq=KeySeq,deleted=true} || Rev2 <- DelConflicts]}.
+
+btree_by_id_split(#full_doc_info{id=Id, update_seq=Seq,
+ deleted=Deleted, rev_tree=Tree}) ->
+ DiskTree =
+ couch_key_tree:map(
+ fun(_RevId, {IsDeleted, BodyPointer, UpdateSeq}) ->
+ {if IsDeleted -> 1; true -> 0 end, BodyPointer, UpdateSeq};
+ (_RevId, ?REV_MISSING) ->
+ ?REV_MISSING
+ end, Tree),
+ {Id, {Seq, if Deleted -> 1; true -> 0 end, DiskTree}}.
+
+btree_by_id_join(Id, {HighSeq, Deleted, DiskTree}) ->
+ Tree =
+ couch_key_tree:map(
+ fun(_RevId, {IsDeleted, BodyPointer, UpdateSeq}) ->
+ {IsDeleted == 1, BodyPointer, UpdateSeq};
+ (_RevId, ?REV_MISSING) ->
+ ?REV_MISSING;
+ (_RevId, {IsDeleted, BodyPointer}) ->
+ % 09 UPGRADE CODE
+ % this is the 0.9.0 and earlier rev info record. It's missing the seq
+ % nums, which means couchdb will sometimes reexamine unchanged
+ % documents with the _changes API.
+ % This is fixed by compacting the database.
+ {IsDeleted == 1, BodyPointer, HighSeq}
+ end, DiskTree),
+
+ #full_doc_info{id=Id, update_seq=HighSeq, deleted=Deleted==1, rev_tree=Tree}.
+
+btree_by_id_reduce(reduce, FullDocInfos) ->
+ lists:foldl(
+ fun(#full_doc_info{deleted = false}, {NotDeleted, Deleted}) ->
+ {NotDeleted + 1, Deleted};
+ (#full_doc_info{deleted = true}, {NotDeleted, Deleted}) ->
+ {NotDeleted, Deleted + 1}
+ end,
+ {0, 0}, FullDocInfos);
+btree_by_id_reduce(rereduce, [FirstRed | RestReds]) ->
+ lists:foldl(
+ fun({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted}) ->
+ {AccNotDeleted + NotDeleted, AccDeleted + Deleted}
+ end,
+ FirstRed, RestReds).
+
+btree_by_seq_reduce(reduce, DocInfos) ->
+ % count the number of documents
+ length(DocInfos);
+btree_by_seq_reduce(rereduce, Reds) ->
+ lists:sum(Reds).
+
+simple_upgrade_record(Old, New) when tuple_size(Old) =:= tuple_size(New) ->
+ Old;
+simple_upgrade_record(Old, New) when tuple_size(Old) < tuple_size(New) ->
+ OldSz = tuple_size(Old),
+ NewValuesTail =
+ lists:sublist(tuple_to_list(New), OldSz + 1, tuple_size(New) - OldSz),
+ list_to_tuple(tuple_to_list(Old) ++ NewValuesTail).
+
+
+init_db(DbName, Filepath, Fd, Header0) ->
+ Header1 = simple_upgrade_record(Header0, #db_header{}),
+ Header =
+ case element(2, Header1) of
+ 1 -> Header1#db_header{unused = 0, security_ptr = nil}; % 0.9
+ 2 -> Header1#db_header{unused = 0, security_ptr = nil}; % post 0.9 and pre 0.10
+ 3 -> Header1#db_header{security_ptr = nil}; % post 0.9 and pre 0.10
+ 4 -> Header1#db_header{security_ptr = nil}; % 0.10 and pre 0.11
+ ?LATEST_DISK_VERSION -> Header1;
+ _ -> throw({database_disk_version_error, "Incorrect disk header version"})
+ end,
+
+ {ok, FsyncOptions} = couch_util:parse_term(
+ couch_config:get("couchdb", "fsync_options",
+ "[before_header, after_header, on_file_open]")),
+
+ case lists:member(on_file_open, FsyncOptions) of
+ true -> ok = couch_file:sync(Fd);
+ _ -> ok
+ end,
+
+ {ok, IdBtree} = couch_btree:open(Header#db_header.fulldocinfo_by_id_btree_state, Fd,
+ [{split, fun(X) -> btree_by_id_split(X) end},
+ {join, fun(X,Y) -> btree_by_id_join(X,Y) end},
+ {reduce, fun(X,Y) -> btree_by_id_reduce(X,Y) end}]),
+ {ok, SeqBtree} = couch_btree:open(Header#db_header.docinfo_by_seq_btree_state, Fd,
+ [{split, fun(X) -> btree_by_seq_split(X) end},
+ {join, fun(X,Y) -> btree_by_seq_join(X,Y) end},
+ {reduce, fun(X,Y) -> btree_by_seq_reduce(X,Y) end}]),
+ {ok, LocalDocsBtree} = couch_btree:open(Header#db_header.local_docs_btree_state, Fd),
+ case Header#db_header.security_ptr of
+ nil ->
+ Security = [],
+ SecurityPtr = nil;
+ SecurityPtr ->
+ {ok, Security} = couch_file:pread_term(Fd, SecurityPtr)
+ end,
+ % convert start time tuple to microsecs and store as a binary string
+ {MegaSecs, Secs, MicroSecs} = now(),
+ StartTime = ?l2b(io_lib:format("~p",
+ [(MegaSecs*1000000*1000000) + (Secs*1000000) + MicroSecs])),
+ {ok, RefCntr} = couch_ref_counter:start([Fd]),
+ #db{
+ update_pid=self(),
+ fd=Fd,
+ fd_ref_counter = RefCntr,
+ header=Header,
+ fulldocinfo_by_id_btree = IdBtree,
+ docinfo_by_seq_btree = SeqBtree,
+ local_docs_btree = LocalDocsBtree,
+ committed_update_seq = Header#db_header.update_seq,
+ update_seq = Header#db_header.update_seq,
+ name = DbName,
+ filepath = Filepath,
+ security = Security,
+ security_ptr = SecurityPtr,
+ instance_start_time = StartTime,
+ revs_limit = Header#db_header.revs_limit,
+ fsync_options = FsyncOptions
+ }.
+
+
+close_db(#db{fd_ref_counter = RefCntr}) ->
+ couch_ref_counter:drop(RefCntr).
+
+
+refresh_validate_doc_funs(Db) ->
+ {ok, DesignDocs} = couch_db:get_design_docs(Db),
+ ProcessDocFuns = lists:flatmap(
+ fun(DesignDoc) ->
+ case couch_doc:get_validate_doc_fun(DesignDoc) of
+ nil -> [];
+ Fun -> [Fun]
+ end
+ end, DesignDocs),
+ Db#db{validate_doc_funs=ProcessDocFuns}.
+
+% rev tree functions
+
+flush_trees(_Db, [], AccFlushedTrees) ->
+ {ok, lists:reverse(AccFlushedTrees)};
+flush_trees(#db{fd=Fd,header=Header}=Db,
+ [InfoUnflushed | RestUnflushed], AccFlushed) ->
+ #full_doc_info{update_seq=UpdateSeq, rev_tree=Unflushed} = InfoUnflushed,
+ Flushed = couch_key_tree:map(
+ fun(_Rev, Value) ->
+ case Value of
+ #doc{atts=Atts,deleted=IsDeleted}=Doc ->
+ % this node value is actually an unwritten document summary,
+ % write to disk.
+ % make sure the Fd in the written bins is the same Fd we are
+ % and convert bins, removing the FD.
+ % All bins should have been written to disk already.
+ DiskAtts =
+ case Atts of
+ [] -> [];
+ [#att{data={BinFd, _Sp}} | _ ] when BinFd == Fd ->
+ [{N,T,P,AL,DL,R,M,E}
+ || #att{name=N,type=T,data={_,P},md5=M,revpos=R,
+ att_len=AL,disk_len=DL,encoding=E}
+ <- Atts];
+ _ ->
+ % BinFd must not equal our Fd. This can happen when a database
+ % is being switched out during a compaction
+ ?LOG_DEBUG("File where the attachments are written has"
+ " changed. Possibly retrying.", []),
+ throw(retry)
+ end,
+ {ok, NewSummaryPointer} =
+ case Header#db_header.disk_version < 4 of
+ true ->
+ couch_file:append_term(Fd, {Doc#doc.body, DiskAtts});
+ false ->
+ couch_file:append_term_md5(Fd, {Doc#doc.body, DiskAtts})
+ end,
+ {IsDeleted, NewSummaryPointer, UpdateSeq};
+ _ ->
+ Value
+ end
+ end, Unflushed),
+ flush_trees(Db, RestUnflushed, [InfoUnflushed#full_doc_info{rev_tree=Flushed} | AccFlushed]).
+
+
+send_result(Client, Id, OriginalRevs, NewResult) ->
+ % used to send a result to the client
+ catch(Client ! {result, self(), {{Id, OriginalRevs}, NewResult}}).
+
+merge_rev_trees(_Limit, _Merge, [], [], AccNewInfos, AccRemoveSeqs, AccSeq) ->
+ {ok, lists:reverse(AccNewInfos), AccRemoveSeqs, AccSeq};
+merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList],
+ [OldDocInfo|RestOldInfo], AccNewInfos, AccRemoveSeqs, AccSeq) ->
+ #full_doc_info{id=Id,rev_tree=OldTree,deleted=OldDeleted,update_seq=OldSeq}
+ = OldDocInfo,
+ NewRevTree = lists:foldl(
+ fun({Client, #doc{revs={Pos,[_Rev|PrevRevs]}}=NewDoc}, AccTree) ->
+ if not MergeConflicts ->
+ case couch_key_tree:merge(AccTree, couch_db:doc_to_tree(NewDoc),
+ Limit) of
+ {_NewTree, conflicts} when (not OldDeleted) ->
+ send_result(Client, Id, {Pos-1,PrevRevs}, conflict),
+ AccTree;
+ {NewTree, conflicts} when PrevRevs /= [] ->
+ % Check to be sure if prev revision was specified, it's
+ % a leaf node in the tree
+ Leafs = couch_key_tree:get_all_leafs(AccTree),
+ IsPrevLeaf = lists:any(fun({_, {LeafPos, [LeafRevId|_]}}) ->
+ {LeafPos, LeafRevId} == {Pos-1, hd(PrevRevs)}
+ end, Leafs),
+ if IsPrevLeaf ->
+ NewTree;
+ true ->
+ send_result(Client, Id, {Pos-1,PrevRevs}, conflict),
+ AccTree
+ end;
+ {NewTree, no_conflicts} when AccTree == NewTree ->
+ % the tree didn't change at all
+ % meaning we are saving a rev that's already
+ % been editted again.
+ if (Pos == 1) and OldDeleted ->
+ % this means we are recreating a brand new document
+ % into a state that already existed before.
+ % put the rev into a subsequent edit of the deletion
+ #doc_info{revs=[#rev_info{rev={OldPos,OldRev}}|_]} =
+ couch_doc:to_doc_info(OldDocInfo),
+ NewRevId = couch_db:new_revid(
+ NewDoc#doc{revs={OldPos, [OldRev]}}),
+ NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}},
+ {NewTree2, _} = couch_key_tree:merge(AccTree,
+ couch_db:doc_to_tree(NewDoc2), Limit),
+ % we changed the rev id, this tells the caller we did
+ send_result(Client, Id, {Pos-1,PrevRevs},
+ {ok, {OldPos + 1, NewRevId}}),
+ NewTree2;
+ true ->
+ send_result(Client, Id, {Pos-1,PrevRevs}, conflict),
+ AccTree
+ end;
+ {NewTree, _} ->
+ NewTree
+ end;
+ true ->
+ {NewTree, _} = couch_key_tree:merge(AccTree,
+ couch_db:doc_to_tree(NewDoc), Limit),
+ NewTree
+ end
+ end,
+ OldTree, NewDocs),
+ if NewRevTree == OldTree ->
+ % nothing changed
+ merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
+ AccNewInfos, AccRemoveSeqs, AccSeq);
+ true ->
+ % we have updated the document, give it a new seq #
+ NewInfo = #full_doc_info{id=Id,update_seq=AccSeq+1,rev_tree=NewRevTree},
+ RemoveSeqs = case OldSeq of
+ 0 -> AccRemoveSeqs;
+ _ -> [OldSeq | AccRemoveSeqs]
+ end,
+ merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
+ [NewInfo|AccNewInfos], RemoveSeqs, AccSeq+1)
+ end.
+
+
+
+new_index_entries([], AccById, AccBySeq) ->
+ {AccById, AccBySeq};
+new_index_entries([FullDocInfo|RestInfos], AccById, AccBySeq) ->
+ #doc_info{revs=[#rev_info{deleted=Deleted}|_]} = DocInfo =
+ couch_doc:to_doc_info(FullDocInfo),
+ new_index_entries(RestInfos,
+ [FullDocInfo#full_doc_info{deleted=Deleted}|AccById],
+ [DocInfo|AccBySeq]).
+
+
+stem_full_doc_infos(#db{revs_limit=Limit}, DocInfos) ->
+ [Info#full_doc_info{rev_tree=couch_key_tree:stem(Tree, Limit)} ||
+ #full_doc_info{rev_tree=Tree}=Info <- DocInfos].
+
+update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) ->
+ #db{
+ fulldocinfo_by_id_btree = DocInfoByIdBTree,
+ docinfo_by_seq_btree = DocInfoBySeqBTree,
+ update_seq = LastSeq,
+ revs_limit = RevsLimit
+ } = Db,
+ Ids = [Id || [{_Client, #doc{id=Id}}|_] <- DocsList],
+ % lookup up the old documents, if they exist.
+ OldDocLookups = couch_btree:lookup(DocInfoByIdBTree, Ids),
+ OldDocInfos = lists:zipwith(
+ fun(_Id, {ok, FullDocInfo}) ->
+ FullDocInfo;
+ (Id, not_found) ->
+ #full_doc_info{id=Id}
+ end,
+ Ids, OldDocLookups),
+ % Merge the new docs into the revision trees.
+ {ok, NewFullDocInfos, RemoveSeqs, NewSeq} = merge_rev_trees(RevsLimit,
+ MergeConflicts, DocsList, OldDocInfos, [], [], LastSeq),
+
+ % All documents are now ready to write.
+
+ {ok, Db2} = update_local_docs(Db, NonRepDocs),
+
+ % Write out the document summaries (the bodies are stored in the nodes of
+ % the trees, the attachments are already written to disk)
+ {ok, FlushedFullDocInfos} = flush_trees(Db2, NewFullDocInfos, []),
+
+ {IndexFullDocInfos, IndexDocInfos} =
+ new_index_entries(FlushedFullDocInfos, [], []),
+
+ % and the indexes
+ {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree, IndexFullDocInfos, []),
+ {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree, IndexDocInfos, RemoveSeqs),
+
+ Db3 = Db2#db{
+ fulldocinfo_by_id_btree = DocInfoByIdBTree2,
+ docinfo_by_seq_btree = DocInfoBySeqBTree2,
+ update_seq = NewSeq},
+
+ % Check if we just updated any design documents, and update the validation
+ % funs if we did.
+ case lists:any(
+ fun(<<"_design/", _/binary>>) -> true; (_) -> false end, Ids) of
+ false ->
+ Db4 = Db3;
+ true ->
+ Db4 = refresh_validate_doc_funs(Db3)
+ end,
+
+ {ok, commit_data(Db4, not FullCommit)}.
+
+
+update_local_docs(Db, []) ->
+ {ok, Db};
+update_local_docs(#db{local_docs_btree=Btree}=Db, Docs) ->
+ Ids = [Id || {_Client, #doc{id=Id}} <- Docs],
+ OldDocLookups = couch_btree:lookup(Btree, Ids),
+ BtreeEntries = lists:zipwith(
+ fun({Client, #doc{id=Id,deleted=Delete,revs={0,PrevRevs},body=Body}}, OldDocLookup) ->
+ case PrevRevs of
+ [RevStr|_] ->
+ PrevRev = list_to_integer(?b2l(RevStr));
+ [] ->
+ PrevRev = 0
+ end,
+ OldRev =
+ case OldDocLookup of
+ {ok, {_, {OldRev0, _}}} -> OldRev0;
+ not_found -> 0
+ end,
+ case OldRev == PrevRev of
+ true ->
+ case Delete of
+ false ->
+ send_result(Client, Id, {0, PrevRevs}, {ok,
+ {0, ?l2b(integer_to_list(PrevRev + 1))}}),
+ {update, {Id, {PrevRev + 1, Body}}};
+ true ->
+ send_result(Client, Id, {0, PrevRevs},
+ {ok, {0, <<"0">>}}),
+ {remove, Id}
+ end;
+ false ->
+ send_result(Client, Id, {0, PrevRevs}, conflict),
+ ignore
+ end
+ end, Docs, OldDocLookups),
+
+ BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries],
+ BtreeIdsUpdate = [{Key, Val} || {update, {Key, Val}} <- BtreeEntries],
+
+ {ok, Btree2} =
+ couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove),
+
+ {ok, Db#db{local_docs_btree = Btree2}}.
+
+
+commit_data(Db) ->
+ commit_data(Db, false).
+
+db_to_header(Db, Header) ->
+ Header#db_header{
+ update_seq = Db#db.update_seq,
+ docinfo_by_seq_btree_state = couch_btree:get_state(Db#db.docinfo_by_seq_btree),
+ fulldocinfo_by_id_btree_state = couch_btree:get_state(Db#db.fulldocinfo_by_id_btree),
+ local_docs_btree_state = couch_btree:get_state(Db#db.local_docs_btree),
+ security_ptr = Db#db.security_ptr,
+ revs_limit = Db#db.revs_limit}.
+
+commit_data(#db{waiting_delayed_commit=nil} = Db, true) ->
+ Db#db{waiting_delayed_commit=erlang:send_after(1000,self(),delayed_commit)};
+commit_data(Db, true) ->
+ Db;
+commit_data(Db, _) ->
+ #db{
+ fd = Fd,
+ filepath = Filepath,
+ header = OldHeader,
+ fsync_options = FsyncOptions,
+ waiting_delayed_commit = Timer
+ } = Db,
+ if is_reference(Timer) -> erlang:cancel_timer(Timer); true -> ok end,
+ case db_to_header(Db, OldHeader) of
+ OldHeader ->
+ Db#db{waiting_delayed_commit=nil};
+ Header ->
+ case lists:member(before_header, FsyncOptions) of
+ true -> ok = couch_file:sync(Filepath);
+ _ -> ok
+ end,
+
+ ok = couch_file:write_header(Fd, Header),
+
+ case lists:member(after_header, FsyncOptions) of
+ true -> ok = couch_file:sync(Filepath);
+ _ -> ok
+ end,
+
+ Db#db{waiting_delayed_commit=nil,
+ header=Header,
+ committed_update_seq=Db#db.update_seq}
+ end.
+
+
+copy_doc_attachments(#db{fd=SrcFd}=SrcDb, {Pos,_RevId}, SrcSp, DestFd) ->
+ {ok, {BodyData, BinInfos}} = couch_db:read_doc(SrcDb, SrcSp),
+ % copy the bin values
+ NewBinInfos = lists:map(
+ fun({Name, {Type, BinSp, AttLen}}) when is_tuple(BinSp) orelse BinSp == null ->
+ % 09 UPGRADE CODE
+ {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} =
+ couch_stream:old_copy_to_new_stream(SrcFd, BinSp, AttLen, DestFd),
+ {Name, Type, NewBinSp, AttLen, AttLen, Pos, Md5, identity};
+ ({Name, {Type, BinSp, AttLen}}) ->
+ % 09 UPGRADE CODE
+ {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} =
+ couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
+ {Name, Type, NewBinSp, AttLen, AttLen, Pos, Md5, identity};
+ ({Name, Type, BinSp, AttLen, _RevPos, <<>>}) when
+ is_tuple(BinSp) orelse BinSp == null ->
+ % 09 UPGRADE CODE
+ {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} =
+ couch_stream:old_copy_to_new_stream(SrcFd, BinSp, AttLen, DestFd),
+ {Name, Type, NewBinSp, AttLen, AttLen, AttLen, Md5, identity};
+ ({Name, Type, BinSp, AttLen, RevPos, Md5}) ->
+ % 010 UPGRADE CODE
+ {NewBinSp, AttLen, AttLen, Md5, _IdentityMd5} =
+ couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
+ {Name, Type, NewBinSp, AttLen, AttLen, RevPos, Md5, identity};
+ ({Name, Type, BinSp, AttLen, DiskLen, RevPos, Md5, Enc1}) ->
+ {NewBinSp, AttLen, _, Md5, _IdentityMd5} =
+ couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
+ Enc = case Enc1 of
+ true ->
+ % 0110 UPGRADE CODE
+ gzip;
+ false ->
+ % 0110 UPGRADE CODE
+ identity;
+ _ ->
+ Enc1
+ end,
+ {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, Md5, Enc}
+ end, BinInfos),
+ {BodyData, NewBinInfos}.
+
+copy_docs(Db, #db{fd=DestFd}=NewDb, InfoBySeq0, Retry) ->
+ % COUCHDB-968, make sure we prune duplicates during compaction
+ InfoBySeq = lists:usort(fun(#doc_info{id=A}, #doc_info{id=B}) -> A =< B end,
+ InfoBySeq0),
+ Ids = [Id || #doc_info{id=Id} <- InfoBySeq],
+ LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids),
+
+ NewFullDocInfos1 = lists:map(
+ fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) ->
+ Info#full_doc_info{rev_tree=couch_key_tree:map(
+ fun(Rev, {IsDel, Sp, Seq}, leaf) ->
+ DocBody = copy_doc_attachments(Db, Rev, Sp, DestFd),
+ {ok, Pos} = couch_file:append_term_md5(DestFd, DocBody),
+ {IsDel, Pos, Seq};
+ (_, _, branch) ->
+ ?REV_MISSING
+ end, RevTree)}
+ end, LookupResults),
+
+ NewFullDocInfos = stem_full_doc_infos(Db, NewFullDocInfos1),
+ NewDocInfos = [couch_doc:to_doc_info(Info) || Info <- NewFullDocInfos],
+ RemoveSeqs =
+ case Retry of
+ false ->
+ [];
+ true ->
+ % We are retrying a compaction, meaning the documents we are copying may
+ % already exist in our file and must be removed from the by_seq index.
+ Existing = couch_btree:lookup(NewDb#db.fulldocinfo_by_id_btree, Ids),
+ [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
+ end,
+
+ {ok, DocInfoBTree} = couch_btree:add_remove(
+ NewDb#db.docinfo_by_seq_btree, NewDocInfos, RemoveSeqs),
+ {ok, FullDocInfoBTree} = couch_btree:add_remove(
+ NewDb#db.fulldocinfo_by_id_btree, NewFullDocInfos, []),
+ NewDb#db{ fulldocinfo_by_id_btree=FullDocInfoBTree,
+ docinfo_by_seq_btree=DocInfoBTree}.
+
+
+
+copy_compact(Db, NewDb0, Retry) ->
+ FsyncOptions = [Op || Op <- NewDb0#db.fsync_options, Op == before_header],
+ NewDb = NewDb0#db{fsync_options=FsyncOptions},
+ TotalChanges = couch_db:count_changes_since(Db, NewDb#db.update_seq),
+ EnumBySeqFun =
+ fun(#doc_info{high_seq=Seq}=DocInfo, _Offset, {AccNewDb, AccUncopied, TotalCopied}) ->
+ couch_task_status:update("Copied ~p of ~p changes (~p%)",
+ [TotalCopied, TotalChanges, (TotalCopied*100) div TotalChanges]),
+ if TotalCopied rem 1000 =:= 0 ->
+ NewDb2 = copy_docs(Db, AccNewDb, lists:reverse([DocInfo | AccUncopied]), Retry),
+ if TotalCopied rem 10000 =:= 0 ->
+ {ok, {commit_data(NewDb2#db{update_seq=Seq}), [], TotalCopied + 1}};
+ true ->
+ {ok, {NewDb2#db{update_seq=Seq}, [], TotalCopied + 1}}
+ end;
+ true ->
+ {ok, {AccNewDb, [DocInfo | AccUncopied], TotalCopied + 1}}
+ end
+ end,
+
+ couch_task_status:set_update_frequency(500),
+
+ {ok, _, {NewDb2, Uncopied, TotalChanges}} =
+ couch_btree:foldl(Db#db.docinfo_by_seq_btree, EnumBySeqFun,
+ {NewDb, [], 0},
+ [{start_key, NewDb#db.update_seq + 1}]),
+
+ couch_task_status:update("Flushing"),
+
+ NewDb3 = copy_docs(Db, NewDb2, lists:reverse(Uncopied), Retry),
+
+ % copy misc header values
+ if NewDb3#db.security /= Db#db.security ->
+ {ok, Ptr} = couch_file:append_term(NewDb3#db.fd, Db#db.security),
+ NewDb4 = NewDb3#db{security=Db#db.security, security_ptr=Ptr};
+ true ->
+ NewDb4 = NewDb3
+ end,
+
+ commit_data(NewDb4#db{update_seq=Db#db.update_seq}).
+
+start_copy_compact(#db{name=Name,filepath=Filepath,header=#db_header{purge_seq=PurgeSeq}}=Db) ->
+ CompactFile = Filepath ++ ".compact",
+ ?LOG_DEBUG("Compaction process spawned for db \"~s\"", [Name]),
+ case couch_file:open(CompactFile) of
+ {ok, Fd} ->
+ couch_task_status:add_task(<<"Database Compaction">>, <<Name/binary, " retry">>, <<"Starting">>),
+ Retry = true,
+ case couch_file:read_header(Fd) of
+ {ok, Header} ->
+ ok;
+ no_valid_header ->
+ ok = couch_file:write_header(Fd, Header=#db_header{})
+ end;
+ {error, enoent} ->
+ couch_task_status:add_task(<<"Database Compaction">>, Name, <<"Starting">>),
+ {ok, Fd} = couch_file:open(CompactFile, [create]),
+ Retry = false,
+ ok = couch_file:write_header(Fd, Header=#db_header{})
+ end,
+ NewDb = init_db(Name, CompactFile, Fd, Header),
+ NewDb2 = if PurgeSeq > 0 ->
+ {ok, PurgedIdsRevs} = couch_db:get_last_purged(Db),
+ {ok, Pointer} = couch_file:append_term(Fd, PurgedIdsRevs),
+ NewDb#db{header=Header#db_header{purge_seq=PurgeSeq, purged_docs=Pointer}};
+ true ->
+ NewDb
+ end,
+ unlink(Fd),
+
+ NewDb3 = copy_compact(Db, NewDb2, Retry),
+ close_db(NewDb3),
+ gen_server:cast(Db#db.update_pid, {compact_done, CompactFile}).
+
diff --git a/1.1.x/src/couchdb/couch_doc.erl b/1.1.x/src/couchdb/couch_doc.erl
new file mode 100644
index 00000000..e3d66145
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_doc.erl
@@ -0,0 +1,527 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_doc).
+
+-export([to_doc_info/1,to_doc_info_path/1,parse_rev/1,parse_revs/1,rev_to_str/1,revs_to_strs/1]).
+-export([att_foldl/3,range_att_foldl/5,att_foldl_decode/3,get_validate_doc_fun/1]).
+-export([from_json_obj/1,to_json_obj/2,has_stubs/1, merge_stubs/2]).
+-export([validate_docid/1]).
+-export([doc_from_multi_part_stream/2]).
+-export([doc_to_multi_part_stream/5, len_doc_to_multi_part_stream/4]).
+
+-include("couch_db.hrl").
+
+% helpers used by to_json_obj
+to_json_rev(0, []) ->
+ [];
+to_json_rev(Start, [FirstRevId|_]) ->
+ [{<<"_rev">>, ?l2b([integer_to_list(Start),"-",revid_to_str(FirstRevId)])}].
+
+to_json_body(true, {Body}) ->
+ Body ++ [{<<"_deleted">>, true}];
+to_json_body(false, {Body}) ->
+ Body.
+
+to_json_revisions(Options, Start, RevIds) ->
+ case lists:member(revs, Options) of
+ false -> [];
+ true ->
+ [{<<"_revisions">>, {[{<<"start">>, Start},
+ {<<"ids">>, [revid_to_str(R) ||R <- RevIds]}]}}]
+ end.
+
+revid_to_str(RevId) when size(RevId) =:= 16 ->
+ ?l2b(couch_util:to_hex(RevId));
+revid_to_str(RevId) ->
+ RevId.
+
+rev_to_str({Pos, RevId}) ->
+ ?l2b([integer_to_list(Pos),"-",revid_to_str(RevId)]).
+
+
+revs_to_strs([]) ->
+ [];
+revs_to_strs([{Pos, RevId}| Rest]) ->
+ [rev_to_str({Pos, RevId}) | revs_to_strs(Rest)].
+
+to_json_meta(Meta) ->
+ lists:map(
+ fun({revs_info, Start, RevsInfo}) ->
+ {JsonRevsInfo, _Pos} = lists:mapfoldl(
+ fun({RevId, Status}, PosAcc) ->
+ JsonObj = {[{<<"rev">>, rev_to_str({PosAcc, RevId})},
+ {<<"status">>, ?l2b(atom_to_list(Status))}]},
+ {JsonObj, PosAcc - 1}
+ end, Start, RevsInfo),
+ {<<"_revs_info">>, JsonRevsInfo};
+ ({local_seq, Seq}) ->
+ {<<"_local_seq">>, Seq};
+ ({conflicts, Conflicts}) ->
+ {<<"_conflicts">>, revs_to_strs(Conflicts)};
+ ({deleted_conflicts, DConflicts}) ->
+ {<<"_deleted_conflicts">>, revs_to_strs(DConflicts)}
+ end, Meta).
+
+to_json_attachments(Attachments, Options) ->
+ to_json_attachments(
+ Attachments,
+ lists:member(attachments, Options),
+ lists:member(follows, Options),
+ lists:member(att_encoding_info, Options)
+ ).
+
+to_json_attachments([], _OutputData, _DataToFollow, _ShowEncInfo) ->
+ [];
+to_json_attachments(Atts, OutputData, DataToFollow, ShowEncInfo) ->
+ AttProps = lists:map(
+ fun(#att{disk_len=DiskLen, att_len=AttLen, encoding=Enc}=Att) ->
+ {Att#att.name, {[
+ {<<"content_type">>, Att#att.type},
+ {<<"revpos">>, Att#att.revpos}
+ ] ++
+ if not OutputData orelse Att#att.data == stub ->
+ [{<<"length">>, DiskLen}, {<<"stub">>, true}];
+ true ->
+ if DataToFollow ->
+ [{<<"length">>, DiskLen}, {<<"follows">>, true}];
+ true ->
+ AttData = case Enc of
+ gzip ->
+ zlib:gunzip(att_to_bin(Att));
+ identity ->
+ att_to_bin(Att)
+ end,
+ [{<<"data">>, base64:encode(AttData)}]
+ end
+ end ++
+ case {ShowEncInfo, Enc} of
+ {false, _} ->
+ [];
+ {true, identity} ->
+ [];
+ {true, _} ->
+ [
+ {<<"encoding">>, couch_util:to_binary(Enc)},
+ {<<"encoded_length">>, AttLen}
+ ]
+ end
+ }}
+ end, Atts),
+ [{<<"_attachments">>, {AttProps}}].
+
+to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
+ meta=Meta}=Doc,Options)->
+ {[{<<"_id">>, Id}]
+ ++ to_json_rev(Start, RevIds)
+ ++ to_json_body(Del, Body)
+ ++ to_json_revisions(Options, Start, RevIds)
+ ++ to_json_meta(Meta)
+ ++ to_json_attachments(Doc#doc.atts, Options)
+ }.
+
+from_json_obj({Props}) ->
+ transfer_fields(Props, #doc{body=[]});
+
+from_json_obj(_Other) ->
+ throw({bad_request, "Document must be a JSON object"}).
+
+parse_revid(RevId) when size(RevId) =:= 32 ->
+ RevInt = erlang:list_to_integer(?b2l(RevId), 16),
+ <<RevInt:128>>;
+parse_revid(RevId) when length(RevId) =:= 32 ->
+ RevInt = erlang:list_to_integer(RevId, 16),
+ <<RevInt:128>>;
+parse_revid(RevId) when is_binary(RevId) ->
+ RevId;
+parse_revid(RevId) when is_list(RevId) ->
+ ?l2b(RevId).
+
+
+parse_rev(Rev) when is_binary(Rev) ->
+ parse_rev(?b2l(Rev));
+parse_rev(Rev) when is_list(Rev) ->
+ SplitRev = lists:splitwith(fun($-) -> false; (_) -> true end, Rev),
+ case SplitRev of
+ {Pos, [$- | RevId]} -> {list_to_integer(Pos), parse_revid(RevId)};
+ _Else -> throw({bad_request, <<"Invalid rev format">>})
+ end;
+parse_rev(_BadRev) ->
+ throw({bad_request, <<"Invalid rev format">>}).
+
+parse_revs([]) ->
+ [];
+parse_revs([Rev | Rest]) ->
+ [parse_rev(Rev) | parse_revs(Rest)].
+
+
+validate_docid(Id) when is_binary(Id) ->
+ case couch_util:validate_utf8(Id) of
+ false -> throw({bad_request, <<"Document id must be valid UTF-8">>});
+ true -> ok
+ end,
+ case Id of
+ <<"_design/", _/binary>> -> ok;
+ <<"_local/", _/binary>> -> ok;
+ <<"_", _/binary>> ->
+ throw({bad_request, <<"Only reserved document ids may start with underscore.">>});
+ _Else -> ok
+ end;
+validate_docid(Id) ->
+ ?LOG_DEBUG("Document id is not a string: ~p", [Id]),
+ throw({bad_request, <<"Document id must be a string">>}).
+
+transfer_fields([], #doc{body=Fields}=Doc) ->
+ % convert fields back to json object
+ Doc#doc{body={lists:reverse(Fields)}};
+
+transfer_fields([{<<"_id">>, Id} | Rest], Doc) ->
+ validate_docid(Id),
+ transfer_fields(Rest, Doc#doc{id=Id});
+
+transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc) ->
+ {Pos, RevId} = parse_rev(Rev),
+ transfer_fields(Rest,
+ Doc#doc{revs={Pos, [RevId]}});
+
+transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc) ->
+ % we already got the rev from the _revisions
+ transfer_fields(Rest,Doc);
+
+transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) ->
+ Atts = lists:map(fun({Name, {BinProps}}) ->
+ case couch_util:get_value(<<"stub">>, BinProps) of
+ true ->
+ Type = couch_util:get_value(<<"content_type">>, BinProps),
+ RevPos = couch_util:get_value(<<"revpos">>, BinProps, nil),
+ DiskLen = couch_util:get_value(<<"length">>, BinProps),
+ {Enc, EncLen} = att_encoding_info(BinProps),
+ #att{name=Name, data=stub, type=Type, att_len=EncLen,
+ disk_len=DiskLen, encoding=Enc, revpos=RevPos};
+ _ ->
+ Type = couch_util:get_value(<<"content_type">>, BinProps,
+ ?DEFAULT_ATTACHMENT_CONTENT_TYPE),
+ RevPos = couch_util:get_value(<<"revpos">>, BinProps, 0),
+ case couch_util:get_value(<<"follows">>, BinProps) of
+ true ->
+ DiskLen = couch_util:get_value(<<"length">>, BinProps),
+ {Enc, EncLen} = att_encoding_info(BinProps),
+ #att{name=Name, data=follows, type=Type, encoding=Enc,
+ att_len=EncLen, disk_len=DiskLen, revpos=RevPos};
+ _ ->
+ Value = couch_util:get_value(<<"data">>, BinProps),
+ Bin = base64:decode(Value),
+ LenBin = size(Bin),
+ #att{name=Name, data=Bin, type=Type, att_len=LenBin,
+ disk_len=LenBin, revpos=RevPos}
+ end
+ end
+ end, JsonBins),
+ transfer_fields(Rest, Doc#doc{atts=Atts});
+
+transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) ->
+ RevIds = couch_util:get_value(<<"ids">>, Props),
+ Start = couch_util:get_value(<<"start">>, Props),
+ if not is_integer(Start) ->
+ throw({doc_validation, "_revisions.start isn't an integer."});
+ not is_list(RevIds) ->
+ throw({doc_validation, "_revisions.ids isn't a array."});
+ true ->
+ ok
+ end,
+ [throw({doc_validation, "RevId isn't a string"}) ||
+ RevId <- RevIds, not is_binary(RevId)],
+ RevIds2 = [parse_revid(RevId) || RevId <- RevIds],
+ transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}});
+
+transfer_fields([{<<"_deleted">>, B} | Rest], Doc) when is_boolean(B) ->
+ transfer_fields(Rest, Doc#doc{deleted=B});
+
+% ignored fields
+transfer_fields([{<<"_revs_info">>, _} | Rest], Doc) ->
+ transfer_fields(Rest, Doc);
+transfer_fields([{<<"_local_seq">>, _} | Rest], Doc) ->
+ transfer_fields(Rest, Doc);
+transfer_fields([{<<"_conflicts">>, _} | Rest], Doc) ->
+ transfer_fields(Rest, Doc);
+transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc) ->
+ transfer_fields(Rest, Doc);
+
+% special fields for replication documents
+transfer_fields([{<<"_replication_state">>, _} = Field | Rest],
+ #doc{body=Fields} = Doc) ->
+ transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+transfer_fields([{<<"_replication_state_time">>, _} = Field | Rest],
+ #doc{body=Fields} = Doc) ->
+ transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+transfer_fields([{<<"_replication_id">>, _} = Field | Rest],
+ #doc{body=Fields} = Doc) ->
+ transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+
+% unknown special field
+transfer_fields([{<<"_",Name/binary>>, _} | _], _) ->
+ throw({doc_validation,
+ ?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
+
+transfer_fields([Field | Rest], #doc{body=Fields}=Doc) ->
+ transfer_fields(Rest, Doc#doc{body=[Field|Fields]}).
+
+att_encoding_info(BinProps) ->
+ DiskLen = couch_util:get_value(<<"length">>, BinProps),
+ case couch_util:get_value(<<"encoding">>, BinProps) of
+ undefined ->
+ {identity, DiskLen};
+ Enc ->
+ EncodedLen = couch_util:get_value(<<"encoded_length">>, BinProps, DiskLen),
+ {list_to_existing_atom(?b2l(Enc)), EncodedLen}
+ end.
+
+to_doc_info(FullDocInfo) ->
+ {DocInfo, _Path} = to_doc_info_path(FullDocInfo),
+ DocInfo.
+
+max_seq([], Max) ->
+ Max;
+max_seq([#rev_info{seq=Seq}|Rest], Max) ->
+ max_seq(Rest, if Max > Seq -> Max; true -> Seq end).
+
+to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree}) ->
+ RevInfosAndPath =
+ [{#rev_info{deleted=Del,body_sp=Bp,seq=Seq,rev={Pos,RevId}}, Path} ||
+ {{Del, Bp, Seq},{Pos, [RevId|_]}=Path} <-
+ couch_key_tree:get_all_leafs(Tree)],
+ SortedRevInfosAndPath = lists:sort(
+ fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA},
+ {#rev_info{deleted=DeletedB,rev=RevB}, _PathB}) ->
+ % sort descending by {not deleted, rev}
+ {not DeletedA, RevA} > {not DeletedB, RevB}
+ end, RevInfosAndPath),
+ [{_RevInfo, WinPath}|_] = SortedRevInfosAndPath,
+ RevInfos = [RevInfo || {RevInfo, _Path} <- SortedRevInfosAndPath],
+ {#doc_info{id=Id, high_seq=max_seq(RevInfos, 0), revs=RevInfos}, WinPath}.
+
+
+
+
+att_foldl(#att{data=Bin}, Fun, Acc) when is_binary(Bin) ->
+ Fun(Bin, Acc);
+att_foldl(#att{data={Fd,Sp},att_len=Len}, Fun, Acc) when is_tuple(Sp) orelse Sp == null ->
+ % 09 UPGRADE CODE
+ couch_stream:old_foldl(Fd, Sp, Len, Fun, Acc);
+att_foldl(#att{data={Fd,Sp},md5=Md5}, Fun, Acc) ->
+ couch_stream:foldl(Fd, Sp, Md5, Fun, Acc);
+att_foldl(#att{data=DataFun,att_len=Len}, Fun, Acc) when is_function(DataFun) ->
+ fold_streamed_data(DataFun, Len, Fun, Acc).
+
+range_att_foldl(#att{data={Fd,Sp}}, From, To, Fun, Acc) ->
+ couch_stream:range_foldl(Fd, Sp, From, To, Fun, Acc).
+
+att_foldl_decode(#att{data={Fd,Sp},md5=Md5,encoding=Enc}, Fun, Acc) ->
+ couch_stream:foldl_decode(Fd, Sp, Md5, Enc, Fun, Acc);
+att_foldl_decode(#att{data=Fun2,att_len=Len, encoding=identity}, Fun, Acc) ->
+ fold_streamed_data(Fun2, Len, Fun, Acc).
+
+att_to_bin(#att{data=Bin}) when is_binary(Bin) ->
+ Bin;
+att_to_bin(#att{data=Iolist}) when is_list(Iolist) ->
+ iolist_to_binary(Iolist);
+att_to_bin(#att{data={_Fd,_Sp}}=Att) ->
+ iolist_to_binary(
+ lists:reverse(att_foldl(
+ Att,
+ fun(Bin,Acc) -> [Bin|Acc] end,
+ []
+ ))
+ );
+att_to_bin(#att{data=DataFun, att_len=Len}) when is_function(DataFun)->
+ iolist_to_binary(
+ lists:reverse(fold_streamed_data(
+ DataFun,
+ Len,
+ fun(Data, Acc) -> [Data | Acc] end,
+ []
+ ))
+ ).
+
+get_validate_doc_fun(#doc{body={Props}}=DDoc) ->
+ case couch_util:get_value(<<"validate_doc_update">>, Props) of
+ undefined ->
+ nil;
+ _Else ->
+ fun(EditDoc, DiskDoc, Ctx, SecObj) ->
+ couch_query_servers:validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj)
+ end
+ end.
+
+
+has_stubs(#doc{atts=Atts}) ->
+ has_stubs(Atts);
+has_stubs([]) ->
+ false;
+has_stubs([#att{data=stub}|_]) ->
+ true;
+has_stubs([_Att|Rest]) ->
+ has_stubs(Rest).
+
+merge_stubs(#doc{id=Id,atts=MemBins}=StubsDoc, #doc{atts=DiskBins}) ->
+ BinDict = dict:from_list([{Name, Att} || #att{name=Name}=Att <- DiskBins]),
+ MergedBins = lists:map(
+ fun(#att{name=Name, data=stub, revpos=StubRevPos}) ->
+ case dict:find(Name, BinDict) of
+ {ok, #att{revpos=DiskRevPos}=DiskAtt}
+ when DiskRevPos == StubRevPos orelse StubRevPos == nil ->
+ DiskAtt;
+ _ ->
+ throw({missing_stub,
+ <<"id:", Id/binary, ", name:", Name/binary>>})
+ end;
+ (Att) ->
+ Att
+ end, MemBins),
+ StubsDoc#doc{atts= MergedBins}.
+
+fold_streamed_data(_RcvFun, 0, _Fun, Acc) ->
+ Acc;
+fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0->
+ Bin = RcvFun(),
+ ResultAcc = Fun(Bin, Acc),
+ fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc).
+
+len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) ->
+ AttsSize = lists:foldl(fun(#att{data=Data} = Att, AccAttsSize) ->
+ case Data of
+ stub ->
+ AccAttsSize;
+ _ ->
+ AccAttsSize +
+ 4 + % "\r\n\r\n"
+ case SendEncodedAtts of
+ true ->
+ Att#att.att_len;
+ _ ->
+ Att#att.disk_len
+ end +
+ 4 + % "\r\n--"
+ size(Boundary)
+ end
+ end, 0, Atts),
+ if AttsSize == 0 ->
+ {<<"application/json">>, iolist_size(JsonBytes)};
+ true ->
+ {<<"multipart/related; boundary=\"", Boundary/binary, "\"">>,
+ 2 + % "--"
+ size(Boundary) +
+ 36 + % "\r\ncontent-type: application/json\r\n\r\n"
+ iolist_size(JsonBytes) +
+ 4 + % "\r\n--"
+ size(Boundary) +
+ + AttsSize +
+ 2 % "--"
+ }
+ end.
+
+doc_to_multi_part_stream(Boundary, JsonBytes, Atts, WriteFun,
+ SendEncodedAtts) ->
+ case lists:any(fun(#att{data=Data})-> Data /= stub end, Atts) of
+ true ->
+ WriteFun([<<"--", Boundary/binary,
+ "\r\ncontent-type: application/json\r\n\r\n">>,
+ JsonBytes, <<"\r\n--", Boundary/binary>>]),
+ atts_to_mp(Atts, Boundary, WriteFun, SendEncodedAtts);
+ false ->
+ WriteFun(JsonBytes)
+ end.
+
+atts_to_mp([], _Boundary, WriteFun, _SendEncAtts) ->
+ WriteFun(<<"--">>);
+atts_to_mp([#att{data=stub} | RestAtts], Boundary, WriteFun,
+ SendEncodedAtts) ->
+ atts_to_mp(RestAtts, Boundary, WriteFun, SendEncodedAtts);
+atts_to_mp([Att | RestAtts], Boundary, WriteFun,
+ SendEncodedAtts) ->
+ WriteFun(<<"\r\n\r\n">>),
+ AttFun = case SendEncodedAtts of
+ false ->
+ fun att_foldl_decode/3;
+ true ->
+ fun att_foldl/3
+ end,
+ AttFun(Att, fun(Data, _) -> WriteFun(Data) end, ok),
+ WriteFun(<<"\r\n--", Boundary/binary>>),
+ atts_to_mp(RestAtts, Boundary, WriteFun, SendEncodedAtts).
+
+
+doc_from_multi_part_stream(ContentType, DataFun) ->
+ Self = self(),
+ Parser = spawn_link(fun() ->
+ couch_httpd:parse_multipart_request(ContentType, DataFun,
+ fun(Next)-> mp_parse_doc(Next, []) end),
+ unlink(Self)
+ end),
+ Parser ! {get_doc_bytes, self()},
+ receive
+ {doc_bytes, DocBytes} ->
+ erlang:put(mochiweb_request_recv, true),
+ Doc = from_json_obj(?JSON_DECODE(DocBytes)),
+ % go through the attachments looking for 'follows' in the data,
+ % replace with function that reads the data from MIME stream.
+ ReadAttachmentDataFun = fun() ->
+ Parser ! {get_bytes, self()},
+ receive {bytes, Bytes} -> Bytes end
+ end,
+ Atts2 = lists:map(
+ fun(#att{data=follows}=A) ->
+ A#att{data=ReadAttachmentDataFun};
+ (A) ->
+ A
+ end, Doc#doc.atts),
+ {ok, Doc#doc{atts=Atts2}}
+ end.
+
+mp_parse_doc({headers, H}, []) ->
+ case couch_util:get_value("content-type", H) of
+ {"application/json", _} ->
+ fun (Next) ->
+ mp_parse_doc(Next, [])
+ end
+ end;
+mp_parse_doc({body, Bytes}, AccBytes) ->
+ fun (Next) ->
+ mp_parse_doc(Next, [Bytes | AccBytes])
+ end;
+mp_parse_doc(body_end, AccBytes) ->
+ receive {get_doc_bytes, From} ->
+ From ! {doc_bytes, lists:reverse(AccBytes)}
+ end,
+ fun (Next) ->
+ mp_parse_atts(Next)
+ end.
+
+mp_parse_atts(eof) ->
+ ok;
+mp_parse_atts({headers, _H}) ->
+ fun (Next) ->
+ mp_parse_atts(Next)
+ end;
+mp_parse_atts({body, Bytes}) ->
+ receive {get_bytes, From} ->
+ From ! {bytes, Bytes}
+ end,
+ fun (Next) ->
+ mp_parse_atts(Next)
+ end;
+mp_parse_atts(body_end) ->
+ fun (Next) ->
+ mp_parse_atts(Next)
+ end.
+
+
diff --git a/1.1.x/src/couchdb/couch_event_sup.erl b/1.1.x/src/couchdb/couch_event_sup.erl
new file mode 100644
index 00000000..07c48790
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_event_sup.erl
@@ -0,0 +1,73 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% The purpose of this module is to allow event handlers to particpate in Erlang
+%% supervisor trees. It provide a monitorable process that crashes if the event
+%% handler fails. The process, when shutdown, deregisters the event handler.
+
+-module(couch_event_sup).
+-behaviour(gen_server).
+
+-include("couch_db.hrl").
+
+-export([start_link/3,start_link/4, stop/1]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]).
+
+%
+% Instead calling the
+% ok = gen_event:add_sup_handler(error_logger, my_log, Args)
+%
+% do this:
+% {ok, LinkedPid} = couch_event_sup:start_link(error_logger, my_log, Args)
+%
+% The benefit is the event is now part of the process tree, and can be
+% started, restarted and shutdown consistently like the rest of the server
+% components.
+%
+% And now if the "event" crashes, the supervisor is notified and can restart
+% the event handler.
+%
+% Use this form to named process:
+% {ok, LinkedPid} = couch_event_sup:start_link({local, my_log}, error_logger, my_log, Args)
+%
+
+start_link(EventMgr, EventHandler, Args) ->
+ gen_server:start_link(couch_event_sup, {EventMgr, EventHandler, Args}, []).
+
+start_link(ServerName, EventMgr, EventHandler, Args) ->
+ gen_server:start_link(ServerName, couch_event_sup, {EventMgr, EventHandler, Args}, []).
+
+stop(Pid) ->
+ gen_server:cast(Pid, stop).
+
+init({EventMgr, EventHandler, Args}) ->
+ case gen_event:add_sup_handler(EventMgr, EventHandler, Args) of
+ ok ->
+ {ok, {EventMgr, EventHandler}};
+ {stop, Error} ->
+ {stop, Error}
+ end.
+
+terminate(_Reason, _State) ->
+ ok.
+
+handle_call(_Whatever, _From, State) ->
+ {ok, State}.
+
+handle_cast(stop, State) ->
+ {stop, normal, State}.
+
+handle_info({gen_event_EXIT, _Handler, Reason}, State) ->
+ {stop, Reason, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/1.1.x/src/couchdb/couch_external_manager.erl b/1.1.x/src/couchdb/couch_external_manager.erl
new file mode 100644
index 00000000..7e401389
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_external_manager.erl
@@ -0,0 +1,101 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_external_manager).
+-behaviour(gen_server).
+
+-export([start_link/0, execute/2, config_change/2]).
+-export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2, handle_info/2]).
+
+-include("couch_db.hrl").
+
+start_link() ->
+ gen_server:start_link({local, couch_external_manager},
+ couch_external_manager, [], []).
+
+execute(UrlName, JsonReq) ->
+ Pid = gen_server:call(couch_external_manager, {get, UrlName}),
+ case Pid of
+ {error, Reason} ->
+ Reason;
+ _ ->
+ couch_external_server:execute(Pid, JsonReq)
+ end.
+
+config_change("external", UrlName) ->
+ gen_server:call(couch_external_manager, {config, UrlName}).
+
+% gen_server API
+
+init([]) ->
+ process_flag(trap_exit, true),
+ Handlers = ets:new(couch_external_manager_handlers, [set, private]),
+ couch_config:register(fun config_change/2),
+ {ok, Handlers}.
+
+terminate(_Reason, Handlers) ->
+ ets:foldl(fun({_UrlName, Pid}, nil) ->
+ couch_external_server:stop(Pid),
+ nil
+ end, nil, Handlers),
+ ok.
+
+handle_call({get, UrlName}, _From, Handlers) ->
+ case ets:lookup(Handlers, UrlName) of
+ [] ->
+ case couch_config:get("external", UrlName, nil) of
+ nil ->
+ Msg = lists:flatten(
+ io_lib:format("No server configured for ~p.", [UrlName])),
+ {reply, {error, {unknown_external_server, ?l2b(Msg)}}, Handlers};
+ Command ->
+ {ok, NewPid} = couch_external_server:start_link(UrlName, Command),
+ true = ets:insert(Handlers, {UrlName, NewPid}),
+ {reply, NewPid, Handlers}
+ end;
+ [{UrlName, Pid}] ->
+ {reply, Pid, Handlers}
+ end;
+handle_call({config, UrlName}, _From, Handlers) ->
+ % A newly added handler and a handler that had it's command
+ % changed are treated exactly the same.
+
+ % Shutdown the old handler.
+ case ets:lookup(Handlers, UrlName) of
+ [{UrlName, Pid}] ->
+ couch_external_server:stop(Pid);
+ [] ->
+ ok
+ end,
+ % Wait for next request to boot the handler.
+ {reply, ok, Handlers}.
+
+handle_cast(_Whatever, State) ->
+ {noreply, State}.
+
+handle_info({'EXIT', Pid, normal}, Handlers) ->
+ ?LOG_INFO("EXTERNAL: Server ~p terminated normally", [Pid]),
+ % The process terminated normally without us asking - Remove Pid from the
+ % handlers table so we don't attempt to reuse it
+ ets:match_delete(Handlers, {'_', Pid}),
+ {noreply, Handlers};
+
+handle_info({'EXIT', Pid, Reason}, Handlers) ->
+ ?LOG_INFO("EXTERNAL: Server ~p died. (reason: ~p)", [Pid, Reason]),
+ % Remove Pid from the handlers table so we don't try closing
+ % it a second time in terminate/2.
+ ets:match_delete(Handlers, {'_', Pid}),
+ {stop, normal, Handlers}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
diff --git a/1.1.x/src/couchdb/couch_external_server.erl b/1.1.x/src/couchdb/couch_external_server.erl
new file mode 100644
index 00000000..045fcee9
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_external_server.erl
@@ -0,0 +1,69 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_external_server).
+-behaviour(gen_server).
+
+-export([start_link/2, stop/1, execute/2]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
+
+-include("couch_db.hrl").
+
+% External API
+
+start_link(Name, Command) ->
+ gen_server:start_link(couch_external_server, [Name, Command], []).
+
+stop(Pid) ->
+ gen_server:cast(Pid, stop).
+
+execute(Pid, JsonReq) ->
+ gen_server:call(Pid, {execute, JsonReq}, infinity).
+
+% Gen Server Handlers
+
+init([Name, Command]) ->
+ ?LOG_INFO("EXTERNAL: Starting process for: ~s", [Name]),
+ ?LOG_INFO("COMMAND: ~s", [Command]),
+ process_flag(trap_exit, true),
+ Timeout = list_to_integer(couch_config:get("couchdb", "os_process_timeout",
+ "5000")),
+ {ok, Pid} = couch_os_process:start_link(Command, [{timeout, Timeout}]),
+ couch_config:register(fun("couchdb", "os_process_timeout", NewTimeout) ->
+ couch_os_process:set_timeout(Pid, list_to_integer(NewTimeout))
+ end),
+ {ok, {Name, Command, Pid}}.
+
+terminate(_Reason, {_Name, _Command, Pid}) ->
+ couch_os_process:stop(Pid),
+ ok.
+
+handle_call({execute, JsonReq}, _From, {Name, Command, Pid}) ->
+ {reply, couch_os_process:prompt(Pid, JsonReq), {Name, Command, Pid}}.
+
+handle_info({'EXIT', _Pid, normal}, State) ->
+ {noreply, State};
+handle_info({'EXIT', Pid, Reason}, {Name, Command, Pid}) ->
+ ?LOG_INFO("EXTERNAL: Process for ~s exiting. (reason: ~w)", [Name, Reason]),
+ {stop, Reason, {Name, Command, Pid}}.
+
+handle_cast(stop, {Name, Command, Pid}) ->
+ ?LOG_INFO("EXTERNAL: Shutting down ~s", [Name]),
+ exit(Pid, normal),
+ {stop, normal, {Name, Command, Pid}};
+handle_cast(_Whatever, State) ->
+ {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
diff --git a/1.1.x/src/couchdb/couch_file.erl b/1.1.x/src/couchdb/couch_file.erl
new file mode 100644
index 00000000..7b677034
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_file.erl
@@ -0,0 +1,614 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_file).
+-behaviour(gen_server).
+
+-include("couch_db.hrl").
+
+-define(SIZE_BLOCK, 4096).
+
+-record(file, {
+ fd,
+ tail_append_begin = 0, % 09 UPGRADE CODE
+ eof = 0
+ }).
+
+-export([open/1, open/2, close/1, bytes/1, sync/1, append_binary/2,old_pread/3]).
+-export([append_term/2, pread_term/2, pread_iolist/2, write_header/2]).
+-export([pread_binary/2, read_header/1, truncate/2, upgrade_old_header/2]).
+-export([append_term_md5/2,append_binary_md5/2]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
+-export([delete/2,delete/3,init_delete_dir/1]).
+
+%%----------------------------------------------------------------------
+%% Args: Valid Options are [create] and [create,overwrite].
+%% Files are opened in read/write mode.
+%% Returns: On success, {ok, Fd}
+%% or {error, Reason} if the file could not be opened.
+%%----------------------------------------------------------------------
+
+open(Filepath) ->
+ open(Filepath, []).
+
+open(Filepath, Options) ->
+ case gen_server:start_link(couch_file,
+ {Filepath, Options, self(), Ref = make_ref()}, []) of
+ {ok, Fd} ->
+ {ok, Fd};
+ ignore ->
+ % get the error
+ receive
+ {Ref, Pid, Error} ->
+ case process_info(self(), trap_exit) of
+ {trap_exit, true} -> receive {'EXIT', Pid, _} -> ok end;
+ {trap_exit, false} -> ok
+ end,
+ case Error of
+ {error, eacces} -> {file_permission_error, Filepath};
+ _ -> Error
+ end
+ end;
+ Error ->
+ Error
+ end.
+
+
+%%----------------------------------------------------------------------
+%% Purpose: To append an Erlang term to the end of the file.
+%% Args: Erlang term to serialize and append to the file.
+%% Returns: {ok, Pos} where Pos is the file offset to the beginning the
+%% serialized term. Use pread_term to read the term back.
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+append_term(Fd, Term) ->
+ append_binary(Fd, term_to_binary(Term)).
+
+append_term_md5(Fd, Term) ->
+ append_binary_md5(Fd, term_to_binary(Term)).
+
+
+%%----------------------------------------------------------------------
+%% Purpose: To append an Erlang binary to the end of the file.
+%% Args: Erlang term to serialize and append to the file.
+%% Returns: {ok, Pos} where Pos is the file offset to the beginning the
+%% serialized term. Use pread_term to read the term back.
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+append_binary(Fd, Bin) ->
+ Size = iolist_size(Bin),
+ gen_server:call(Fd, {append_bin,
+ [<<0:1/integer,Size:31/integer>>, Bin]}, infinity).
+
+append_binary_md5(Fd, Bin) ->
+ Size = iolist_size(Bin),
+ gen_server:call(Fd, {append_bin,
+ [<<1:1/integer,Size:31/integer>>, couch_util:md5(Bin), Bin]}, infinity).
+
+
+%%----------------------------------------------------------------------
+%% Purpose: Reads a term from a file that was written with append_term
+%% Args: Pos, the offset into the file where the term is serialized.
+%% Returns: {ok, Term}
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+
+pread_term(Fd, Pos) ->
+ {ok, Bin} = pread_binary(Fd, Pos),
+ {ok, binary_to_term(Bin)}.
+
+
+%%----------------------------------------------------------------------
+%% Purpose: Reads a binrary from a file that was written with append_binary
+%% Args: Pos, the offset into the file where the term is serialized.
+%% Returns: {ok, Term}
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+pread_binary(Fd, Pos) ->
+ {ok, L} = pread_iolist(Fd, Pos),
+ {ok, iolist_to_binary(L)}.
+
+
+pread_iolist(Fd, Pos) ->
+ case gen_server:call(Fd, {pread_iolist, Pos}, infinity) of
+ {ok, IoList, <<>>} ->
+ {ok, IoList};
+ {ok, IoList, Md5} ->
+ case couch_util:md5(IoList) of
+ Md5 ->
+ {ok, IoList};
+ _ ->
+ exit({file_corruption, <<"file corruption">>})
+ end;
+ Error ->
+ Error
+ end.
+
+%%----------------------------------------------------------------------
+%% Purpose: The length of a file, in bytes.
+%% Returns: {ok, Bytes}
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+% length in bytes
+bytes(Fd) ->
+ gen_server:call(Fd, bytes, infinity).
+
+%%----------------------------------------------------------------------
+%% Purpose: Truncate a file to the number of bytes.
+%% Returns: ok
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+truncate(Fd, Pos) ->
+ gen_server:call(Fd, {truncate, Pos}, infinity).
+
+%%----------------------------------------------------------------------
+%% Purpose: Ensure all bytes written to the file are flushed to disk.
+%% Returns: ok
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+sync(Filepath) when is_list(Filepath) ->
+ {ok, Fd} = file:open(Filepath, [append, raw]),
+ try file:sync(Fd) after file:close(Fd) end;
+sync(Fd) ->
+ gen_server:call(Fd, sync, infinity).
+
+%%----------------------------------------------------------------------
+%% Purpose: Close the file.
+%% Returns: ok
+%%----------------------------------------------------------------------
+close(Fd) ->
+ couch_util:shutdown_sync(Fd).
+
+
+delete(RootDir, Filepath) ->
+ delete(RootDir, Filepath, true).
+
+
+delete(RootDir, Filepath, Async) ->
+ DelFile = filename:join([RootDir,".delete", ?b2l(couch_uuids:random())]),
+ case file:rename(Filepath, DelFile) of
+ ok ->
+ if (Async) ->
+ spawn(file, delete, [DelFile]),
+ ok;
+ true ->
+ file:delete(DelFile)
+ end;
+ Error ->
+ Error
+ end.
+
+
+init_delete_dir(RootDir) ->
+ Dir = filename:join(RootDir,".delete"),
+ % note: ensure_dir requires an actual filename companent, which is the
+ % reason for "foo".
+ filelib:ensure_dir(filename:join(Dir,"foo")),
+ filelib:fold_files(Dir, ".*", true,
+ fun(Filename, _) ->
+ ok = file:delete(Filename)
+ end, ok).
+
+
+% 09 UPGRADE CODE
+old_pread(Fd, Pos, Len) ->
+ {ok, <<RawBin:Len/binary>>, false} = gen_server:call(Fd, {pread, Pos, Len}, infinity),
+ {ok, RawBin}.
+
+% 09 UPGRADE CODE
+upgrade_old_header(Fd, Sig) ->
+ gen_server:call(Fd, {upgrade_old_header, Sig}, infinity).
+
+
+read_header(Fd) ->
+ case gen_server:call(Fd, find_header, infinity) of
+ {ok, Bin} ->
+ {ok, binary_to_term(Bin)};
+ Else ->
+ Else
+ end.
+
+write_header(Fd, Data) ->
+ Bin = term_to_binary(Data),
+ Md5 = couch_util:md5(Bin),
+ % now we assemble the final header binary and write to disk
+ FinalBin = <<Md5/binary, Bin/binary>>,
+ gen_server:call(Fd, {write_header, FinalBin}, infinity).
+
+
+
+
+init_status_error(ReturnPid, Ref, Error) ->
+ ReturnPid ! {Ref, self(), Error},
+ ignore.
+
+% server functions
+
+init({Filepath, Options, ReturnPid, Ref}) ->
+ process_flag(trap_exit, true),
+ case lists:member(create, Options) of
+ true ->
+ filelib:ensure_dir(Filepath),
+ case file:open(Filepath, [read, append, raw, binary]) of
+ {ok, Fd} ->
+ {ok, Length} = file:position(Fd, eof),
+ case Length > 0 of
+ true ->
+ % this means the file already exists and has data.
+ % FYI: We don't differentiate between empty files and non-existant
+ % files here.
+ case lists:member(overwrite, Options) of
+ true ->
+ {ok, 0} = file:position(Fd, 0),
+ ok = file:truncate(Fd),
+ ok = file:sync(Fd),
+ maybe_track_open_os_files(Options),
+ {ok, #file{fd=Fd}};
+ false ->
+ ok = file:close(Fd),
+ init_status_error(ReturnPid, Ref, file_exists)
+ end;
+ false ->
+ maybe_track_open_os_files(Options),
+ {ok, #file{fd=Fd}}
+ end;
+ Error ->
+ init_status_error(ReturnPid, Ref, Error)
+ end;
+ false ->
+ % open in read mode first, so we don't create the file if it doesn't exist.
+ case file:open(Filepath, [read, raw]) of
+ {ok, Fd_Read} ->
+ {ok, Fd} = file:open(Filepath, [read, append, raw, binary]),
+ ok = file:close(Fd_Read),
+ maybe_track_open_os_files(Options),
+ {ok, Length} = file:position(Fd, eof),
+ {ok, #file{fd=Fd, eof=Length}};
+ Error ->
+ init_status_error(ReturnPid, Ref, Error)
+ end
+ end.
+
+maybe_track_open_os_files(FileOptions) ->
+ case lists:member(sys_db, FileOptions) of
+ true ->
+ ok;
+ false ->
+ couch_stats_collector:track_process_count({couchdb, open_os_files})
+ end.
+
+terminate(_Reason, #file{fd = Fd}) ->
+ ok = file:close(Fd).
+
+
+handle_call({pread_iolist, Pos}, _From, File) ->
+ {RawData, NextPos} = try
+ % up to 8Kbs of read ahead
+ read_raw_iolist_int(File, Pos, 2 * ?SIZE_BLOCK - (Pos rem ?SIZE_BLOCK))
+ catch
+ _:_ ->
+ read_raw_iolist_int(File, Pos, 4)
+ end,
+ <<Prefix:1/integer, Len:31/integer, RestRawData/binary>> =
+ iolist_to_binary(RawData),
+ case Prefix of
+ 1 ->
+ {Md5, IoList} = extract_md5(
+ maybe_read_more_iolist(RestRawData, 16 + Len, NextPos, File)),
+ {reply, {ok, IoList, Md5}, File};
+ 0 ->
+ IoList = maybe_read_more_iolist(RestRawData, Len, NextPos, File),
+ {reply, {ok, IoList, <<>>}, File}
+ end;
+handle_call({pread, Pos, Bytes}, _From, #file{fd=Fd,tail_append_begin=TailAppendBegin}=File) ->
+ {ok, Bin} = file:pread(Fd, Pos, Bytes),
+ {reply, {ok, Bin, Pos >= TailAppendBegin}, File};
+handle_call(bytes, _From, #file{eof=Length}=File) ->
+ {reply, {ok, Length}, File};
+handle_call(sync, _From, #file{fd=Fd}=File) ->
+ {reply, file:sync(Fd), File};
+handle_call({truncate, Pos}, _From, #file{fd=Fd}=File) ->
+ {ok, Pos} = file:position(Fd, Pos),
+ case file:truncate(Fd) of
+ ok ->
+ {reply, ok, File#file{eof=Pos}};
+ Error ->
+ {reply, Error, File}
+ end;
+handle_call({append_bin, Bin}, _From, #file{fd=Fd, eof=Pos}=File) ->
+ Blocks = make_blocks(Pos rem ?SIZE_BLOCK, Bin),
+ case file:write(Fd, Blocks) of
+ ok ->
+ {reply, {ok, Pos}, File#file{eof=Pos+iolist_size(Blocks)}};
+ Error ->
+ {reply, Error, File}
+ end;
+handle_call({write_header, Bin}, _From, #file{fd=Fd, eof=Pos}=File) ->
+ BinSize = size(Bin),
+ case Pos rem ?SIZE_BLOCK of
+ 0 ->
+ Padding = <<>>;
+ BlockOffset ->
+ Padding = <<0:(8*(?SIZE_BLOCK-BlockOffset))>>
+ end,
+ FinalBin = [Padding, <<1, BinSize:32/integer>> | make_blocks(5, [Bin])],
+ case file:write(Fd, FinalBin) of
+ ok ->
+ {reply, ok, File#file{eof=Pos+iolist_size(FinalBin)}};
+ Error ->
+ {reply, Error, File}
+ end;
+
+
+handle_call({upgrade_old_header, Prefix}, _From, #file{fd=Fd}=File) ->
+ case (catch read_old_header(Fd, Prefix)) of
+ {ok, Header} ->
+ TailAppendBegin = File#file.eof,
+ Bin = term_to_binary(Header),
+ Md5 = couch_util:md5(Bin),
+ % now we assemble the final header binary and write to disk
+ FinalBin = <<Md5/binary, Bin/binary>>,
+ {reply, ok, _} = handle_call({write_header, FinalBin}, ok, File),
+ ok = write_old_header(Fd, <<"upgraded">>, TailAppendBegin),
+ {reply, ok, File#file{tail_append_begin=TailAppendBegin}};
+ _Error ->
+ case (catch read_old_header(Fd, <<"upgraded">>)) of
+ {ok, TailAppendBegin} ->
+ {reply, ok, File#file{tail_append_begin = TailAppendBegin}};
+ _Error2 ->
+ {reply, ok, File}
+ end
+ end;
+
+
+handle_call(find_header, _From, #file{fd=Fd, eof=Pos}=File) ->
+ {reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}.
+
+% 09 UPGRADE CODE
+-define(HEADER_SIZE, 2048). % size of each segment of the doubly written header
+
+% 09 UPGRADE CODE
+read_old_header(Fd, Prefix) ->
+ {ok, Bin} = file:pread(Fd, 0, 2*(?HEADER_SIZE)),
+ <<Bin1:(?HEADER_SIZE)/binary, Bin2:(?HEADER_SIZE)/binary>> = Bin,
+ Result =
+ % read the first header
+ case extract_header(Prefix, Bin1) of
+ {ok, Header1} ->
+ case extract_header(Prefix, Bin2) of
+ {ok, Header2} ->
+ case Header1 == Header2 of
+ true ->
+ % Everything is completely normal!
+ {ok, Header1};
+ false ->
+ % To get here we must have two different header versions with signatures intact.
+ % It's weird but possible (a commit failure right at the 2k boundary). Log it and take the first.
+ ?LOG_INFO("Header version differences.~nPrimary Header: ~p~nSecondary Header: ~p", [Header1, Header2]),
+ {ok, Header1}
+ end;
+ Error ->
+ % error reading second header. It's ok, but log it.
+ ?LOG_INFO("Secondary header corruption (error: ~p). Using primary header.", [Error]),
+ {ok, Header1}
+ end;
+ Error ->
+ % error reading primary header
+ case extract_header(Prefix, Bin2) of
+ {ok, Header2} ->
+ % log corrupt primary header. It's ok since the secondary is still good.
+ ?LOG_INFO("Primary header corruption (error: ~p). Using secondary header.", [Error]),
+ {ok, Header2};
+ _ ->
+ % error reading secondary header too
+ % return the error, no need to log anything as the caller will be responsible for dealing with the error.
+ Error
+ end
+ end,
+ case Result of
+ {ok, {pointer_to_header_data, Ptr}} ->
+ pread_term(Fd, Ptr);
+ _ ->
+ Result
+ end.
+
+% 09 UPGRADE CODE
+extract_header(Prefix, Bin) ->
+ SizeOfPrefix = size(Prefix),
+ SizeOfTermBin = ?HEADER_SIZE -
+ SizeOfPrefix -
+ 16, % md5 sig
+
+ <<HeaderPrefix:SizeOfPrefix/binary, TermBin:SizeOfTermBin/binary, Sig:16/binary>> = Bin,
+
+ % check the header prefix
+ case HeaderPrefix of
+ Prefix ->
+ % check the integrity signature
+ case couch_util:md5(TermBin) == Sig of
+ true ->
+ Header = binary_to_term(TermBin),
+ {ok, Header};
+ false ->
+ header_corrupt
+ end;
+ _ ->
+ unknown_header_type
+ end.
+
+
+% 09 UPGRADE CODE
+write_old_header(Fd, Prefix, Data) ->
+ TermBin = term_to_binary(Data),
+ % the size of all the bytes written to the header, including the md5 signature (16 bytes)
+ FilledSize = byte_size(Prefix) + byte_size(TermBin) + 16,
+ {TermBin2, FilledSize2} =
+ case FilledSize > ?HEADER_SIZE of
+ true ->
+ % too big!
+ {ok, Pos} = append_binary(Fd, TermBin),
+ PtrBin = term_to_binary({pointer_to_header_data, Pos}),
+ {PtrBin, byte_size(Prefix) + byte_size(PtrBin) + 16};
+ false ->
+ {TermBin, FilledSize}
+ end,
+ ok = file:sync(Fd),
+ % pad out the header with zeros, then take the md5 hash
+ PadZeros = <<0:(8*(?HEADER_SIZE - FilledSize2))>>,
+ Sig = couch_util:md5([TermBin2, PadZeros]),
+ % now we assemble the final header binary and write to disk
+ WriteBin = <<Prefix/binary, TermBin2/binary, PadZeros/binary, Sig/binary>>,
+ ?HEADER_SIZE = size(WriteBin), % sanity check
+ DblWriteBin = [WriteBin, WriteBin],
+ ok = file:pwrite(Fd, 0, DblWriteBin),
+ ok = file:sync(Fd).
+
+
+handle_cast(close, Fd) ->
+ {stop,normal,Fd}.
+
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_info({'EXIT', _, normal}, Fd) ->
+ {noreply, Fd};
+handle_info({'EXIT', _, Reason}, Fd) ->
+ {stop, Reason, Fd}.
+
+
+find_header(_Fd, -1) ->
+ no_valid_header;
+find_header(Fd, Block) ->
+ case (catch load_header(Fd, Block)) of
+ {ok, Bin} ->
+ {ok, Bin};
+ _Error ->
+ find_header(Fd, Block -1)
+ end.
+
+load_header(Fd, Block) ->
+ {ok, <<1, HeaderLen:32/integer, RestBlock/binary>>} =
+ file:pread(Fd, Block * ?SIZE_BLOCK, ?SIZE_BLOCK),
+ TotalBytes = calculate_total_read_len(1, HeaderLen),
+ case TotalBytes > byte_size(RestBlock) of
+ false ->
+ <<RawBin:TotalBytes/binary, _/binary>> = RestBlock;
+ true ->
+ {ok, Missing} = file:pread(
+ Fd, (Block * ?SIZE_BLOCK) + 5 + byte_size(RestBlock),
+ TotalBytes - byte_size(RestBlock)),
+ RawBin = <<RestBlock/binary, Missing/binary>>
+ end,
+ <<Md5Sig:16/binary, HeaderBin/binary>> =
+ iolist_to_binary(remove_block_prefixes(1, RawBin)),
+ Md5Sig = couch_util:md5(HeaderBin),
+ {ok, HeaderBin}.
+
+maybe_read_more_iolist(Buffer, DataSize, _, _)
+ when DataSize =< byte_size(Buffer) ->
+ <<Data:DataSize/binary, _/binary>> = Buffer,
+ [Data];
+maybe_read_more_iolist(Buffer, DataSize, NextPos, File) ->
+ {Missing, _} =
+ read_raw_iolist_int(File, NextPos, DataSize - byte_size(Buffer)),
+ [Buffer, Missing].
+
+-spec read_raw_iolist_int(#file{}, Pos::non_neg_integer(), Len::non_neg_integer()) ->
+ {Data::iolist(), CurPos::non_neg_integer()}.
+read_raw_iolist_int(Fd, {Pos, _Size}, Len) -> % 0110 UPGRADE CODE
+ read_raw_iolist_int(Fd, Pos, Len);
+read_raw_iolist_int(#file{fd=Fd, tail_append_begin=TAB}, Pos, Len) ->
+ BlockOffset = Pos rem ?SIZE_BLOCK,
+ TotalBytes = calculate_total_read_len(BlockOffset, Len),
+ {ok, <<RawBin:TotalBytes/binary>>} = file:pread(Fd, Pos, TotalBytes),
+ if Pos >= TAB ->
+ {remove_block_prefixes(BlockOffset, RawBin), Pos + TotalBytes};
+ true ->
+ % 09 UPGRADE CODE
+ <<ReturnBin:Len/binary, _/binary>> = RawBin,
+ {[ReturnBin], Pos + Len}
+ end.
+
+-spec extract_md5(iolist()) -> {binary(), iolist()}.
+extract_md5(FullIoList) ->
+ {Md5List, IoList} = split_iolist(FullIoList, 16, []),
+ {iolist_to_binary(Md5List), IoList}.
+
+calculate_total_read_len(0, FinalLen) ->
+ calculate_total_read_len(1, FinalLen) + 1;
+calculate_total_read_len(BlockOffset, FinalLen) ->
+ case ?SIZE_BLOCK - BlockOffset of
+ BlockLeft when BlockLeft >= FinalLen ->
+ FinalLen;
+ BlockLeft ->
+ FinalLen + ((FinalLen - BlockLeft) div (?SIZE_BLOCK -1)) +
+ if ((FinalLen - BlockLeft) rem (?SIZE_BLOCK -1)) =:= 0 -> 0;
+ true -> 1 end
+ end.
+
+remove_block_prefixes(_BlockOffset, <<>>) ->
+ [];
+remove_block_prefixes(0, <<_BlockPrefix,Rest/binary>>) ->
+ remove_block_prefixes(1, Rest);
+remove_block_prefixes(BlockOffset, Bin) ->
+ BlockBytesAvailable = ?SIZE_BLOCK - BlockOffset,
+ case size(Bin) of
+ Size when Size > BlockBytesAvailable ->
+ <<DataBlock:BlockBytesAvailable/binary,Rest/binary>> = Bin,
+ [DataBlock | remove_block_prefixes(0, Rest)];
+ _Size ->
+ [Bin]
+ end.
+
+make_blocks(_BlockOffset, []) ->
+ [];
+make_blocks(0, IoList) ->
+ [<<0>> | make_blocks(1, IoList)];
+make_blocks(BlockOffset, IoList) ->
+ case split_iolist(IoList, (?SIZE_BLOCK - BlockOffset), []) of
+ {Begin, End} ->
+ [Begin | make_blocks(0, End)];
+ _SplitRemaining ->
+ IoList
+ end.
+
+%% @doc Returns a tuple where the first element contains the leading SplitAt
+%% bytes of the original iolist, and the 2nd element is the tail. If SplitAt
+%% is larger than byte_size(IoList), return the difference.
+-spec split_iolist(IoList::iolist(), SplitAt::non_neg_integer(), Acc::list()) ->
+ {iolist(), iolist()} | non_neg_integer().
+split_iolist(List, 0, BeginAcc) ->
+ {lists:reverse(BeginAcc), List};
+split_iolist([], SplitAt, _BeginAcc) ->
+ SplitAt;
+split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) when SplitAt > byte_size(Bin) ->
+ split_iolist(Rest, SplitAt - byte_size(Bin), [Bin | BeginAcc]);
+split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) ->
+ <<Begin:SplitAt/binary,End/binary>> = Bin,
+ split_iolist([End | Rest], 0, [Begin | BeginAcc]);
+split_iolist([Sublist| Rest], SplitAt, BeginAcc) when is_list(Sublist) ->
+ case split_iolist(Sublist, SplitAt, BeginAcc) of
+ {Begin, End} ->
+ {Begin, [End | Rest]};
+ SplitRemaining ->
+ split_iolist(Rest, SplitAt - (SplitAt - SplitRemaining), [Sublist | BeginAcc])
+ end;
+split_iolist([Byte | Rest], SplitAt, BeginAcc) when is_integer(Byte) ->
+ split_iolist(Rest, SplitAt - 1, [Byte | BeginAcc]).
diff --git a/1.1.x/src/couchdb/couch_httpd.erl b/1.1.x/src/couchdb/couch_httpd.erl
new file mode 100644
index 00000000..73d214e8
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_httpd.erl
@@ -0,0 +1,997 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd).
+-include("couch_db.hrl").
+
+-export([start_link/0, start_link/1, stop/0, handle_request/5]).
+
+-export([header_value/2,header_value/3,qs_value/2,qs_value/3,qs/1,qs_json_value/3]).
+-export([path/1,absolute_uri/2,body_length/1]).
+-export([verify_is_server_admin/1,unquote/1,quote/1,recv/2,recv_chunked/4,error_info/1]).
+-export([make_fun_spec_strs/1]).
+-export([make_arity_1_fun/1, make_arity_2_fun/1, make_arity_3_fun/1]).
+-export([parse_form/1,json_body/1,json_body_obj/1,body/1,doc_etag/1, make_etag/1, etag_respond/3]).
+-export([primary_header_value/2,partition/1,serve_file/3,serve_file/4, server_header/0]).
+-export([start_chunked_response/3,send_chunk/2,log_request/2]).
+-export([start_response_length/4, start_response/3, send/2]).
+-export([start_json_response/2, start_json_response/3, end_json_response/1]).
+-export([send_response/4,send_method_not_allowed/2,send_error/4, send_redirect/2,send_chunked_error/2]).
+-export([send_json/2,send_json/3,send_json/4,last_chunk/1,parse_multipart_request/3]).
+-export([accepted_encodings/1,handle_request_int/5,validate_referer/1,validate_ctype/2]).
+
+start_link() ->
+ start_link(http).
+start_link(http) ->
+ Port = couch_config:get("httpd", "port", "5984"),
+ start_link(?MODULE, [{port, Port}]);
+start_link(https) ->
+ Port = couch_config:get("ssl", "port", "6984"),
+ CertFile = couch_config:get("ssl", "cert_file", nil),
+ KeyFile = couch_config:get("ssl", "key_file", nil),
+ Options = case CertFile /= nil andalso KeyFile /= nil of
+ true ->
+ [{port, Port},
+ {ssl, true},
+ {ssl_opts, [
+ {certfile, CertFile},
+ {keyfile, KeyFile}]}];
+ false ->
+ io:format("SSL enabled but PEM certificates are missing.", []),
+ throw({error, missing_certs})
+ end,
+ start_link(https, Options).
+start_link(Name, Options) ->
+ % read config and register for configuration changes
+
+ % just stop if one of the config settings change. couch_server_sup
+ % will restart us and then we will pick up the new settings.
+
+ BindAddress = couch_config:get("httpd", "bind_address", any),
+ DefaultSpec = "{couch_httpd_db, handle_request}",
+ DefaultFun = make_arity_1_fun(
+ couch_config:get("httpd", "default_handler", DefaultSpec)
+ ),
+
+ UrlHandlersList = lists:map(
+ fun({UrlKey, SpecStr}) ->
+ {?l2b(UrlKey), make_arity_1_fun(SpecStr)}
+ end, couch_config:get("httpd_global_handlers")),
+
+ DbUrlHandlersList = lists:map(
+ fun({UrlKey, SpecStr}) ->
+ {?l2b(UrlKey), make_arity_2_fun(SpecStr)}
+ end, couch_config:get("httpd_db_handlers")),
+
+ DesignUrlHandlersList = lists:map(
+ fun({UrlKey, SpecStr}) ->
+ {?l2b(UrlKey), make_arity_3_fun(SpecStr)}
+ end, couch_config:get("httpd_design_handlers")),
+
+ UrlHandlers = dict:from_list(UrlHandlersList),
+ DbUrlHandlers = dict:from_list(DbUrlHandlersList),
+ DesignUrlHandlers = dict:from_list(DesignUrlHandlersList),
+ {ok, ServerOptions} = couch_util:parse_term(
+ couch_config:get("httpd", "server_options", "[]")),
+ {ok, SocketOptions} = couch_util:parse_term(
+ couch_config:get("httpd", "socket_options", "[]")),
+ Loop = fun(Req)->
+ case SocketOptions of
+ [] ->
+ ok;
+ _ ->
+ ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
+ end,
+ apply(?MODULE, handle_request, [
+ Req, DefaultFun, UrlHandlers, DbUrlHandlers, DesignUrlHandlers
+ ])
+ end,
+
+ % and off we go
+
+ {ok, Pid} = case mochiweb_http:start(Options ++ ServerOptions ++ [
+ {loop, Loop},
+ {name, Name},
+ {ip, BindAddress}
+ ]) of
+ {ok, MochiPid} -> {ok, MochiPid};
+ {error, Reason} ->
+ io:format("Failure to start Mochiweb: ~s~n",[Reason]),
+ throw({error, Reason})
+ end,
+
+ ok = couch_config:register(
+ fun("httpd", "bind_address") ->
+ ?MODULE:stop();
+ ("httpd", "port") ->
+ ?MODULE:stop();
+ ("httpd", "default_handler") ->
+ ?MODULE:stop();
+ ("httpd", "server_options") ->
+ ?MODULE:stop();
+ ("httpd", "socket_options") ->
+ ?MODULE:stop();
+ ("httpd_global_handlers", _) ->
+ ?MODULE:stop();
+ ("httpd_db_handlers", _) ->
+ ?MODULE:stop();
+ ("vhosts", _) ->
+ ?MODULE:stop();
+ ("ssl", _) ->
+ ?MODULE:stop()
+ end, Pid),
+
+ {ok, Pid}.
+
+% SpecStr is a string like "{my_module, my_fun}"
+% or "{my_module, my_fun, <<"my_arg">>}"
+make_arity_1_fun(SpecStr) ->
+ case couch_util:parse_term(SpecStr) of
+ {ok, {Mod, Fun, SpecArg}} ->
+ fun(Arg) -> Mod:Fun(Arg, SpecArg) end;
+ {ok, {Mod, Fun}} ->
+ fun(Arg) -> Mod:Fun(Arg) end
+ end.
+
+make_arity_2_fun(SpecStr) ->
+ case couch_util:parse_term(SpecStr) of
+ {ok, {Mod, Fun, SpecArg}} ->
+ fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end;
+ {ok, {Mod, Fun}} ->
+ fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end
+ end.
+
+make_arity_3_fun(SpecStr) ->
+ case couch_util:parse_term(SpecStr) of
+ {ok, {Mod, Fun, SpecArg}} ->
+ fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end;
+ {ok, {Mod, Fun}} ->
+ fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end
+ end.
+
+% SpecStr is "{my_module, my_fun}, {my_module2, my_fun2}"
+make_fun_spec_strs(SpecStr) ->
+ re:split(SpecStr, "(?<=})\\s*,\\s*(?={)", [{return, list}]).
+
+stop() ->
+ mochiweb_http:stop(?MODULE).
+
+
+handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers,
+ DesignUrlHandlers) ->
+
+ MochiReq1 = couch_httpd_vhost:match_vhost(MochiReq),
+ handle_request_int(MochiReq1, DefaultFun,
+ UrlHandlers, DbUrlHandlers, DesignUrlHandlers).
+
+handle_request_int(MochiReq, DefaultFun,
+ UrlHandlers, DbUrlHandlers, DesignUrlHandlers) ->
+ Begin = now(),
+ AuthenticationSrcs = make_fun_spec_strs(
+ couch_config:get("httpd", "authentication_handlers")),
+ % for the path, use the raw path with the query string and fragment
+ % removed, but URL quoting left intact
+ RawUri = MochiReq:get(raw_path),
+ {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
+
+ Headers = MochiReq:get(headers),
+
+ % get requested path
+ RequestedPath = case MochiReq:get_header_value("x-couchdb-vhost-path") of
+ undefined -> RawUri;
+ P -> P
+ end,
+
+ HandlerKey =
+ case mochiweb_util:partition(Path, "/") of
+ {"", "", ""} ->
+ <<"/">>; % Special case the root url handler
+ {FirstPart, _, _} ->
+ list_to_binary(FirstPart)
+ end,
+ ?LOG_DEBUG("~p ~s ~p from ~p~nHeaders: ~p", [
+ MochiReq:get(method),
+ RawUri,
+ MochiReq:get(version),
+ MochiReq:get(peer),
+ mochiweb_headers:to_list(MochiReq:get(headers))
+ ]),
+
+ Method1 =
+ case MochiReq:get(method) of
+ % already an atom
+ Meth when is_atom(Meth) -> Meth;
+
+ % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
+ % possible (if any module references the atom, then it's existing).
+ Meth -> couch_util:to_existing_atom(Meth)
+ end,
+ increment_method_stats(Method1),
+
+ % allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
+ MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"),
+ Method2 = case lists:member(MethodOverride, ["GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "COPY"]) of
+ true ->
+ ?LOG_INFO("MethodOverride: ~s (real method was ~s)", [MethodOverride, Method1]),
+ case Method1 of
+ 'POST' -> couch_util:to_existing_atom(MethodOverride);
+ _ ->
+ % Ignore X-HTTP-Method-Override when the original verb isn't POST.
+ % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
+ % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
+ Method1
+ end;
+ _ -> Method1
+ end,
+
+ % alias HEAD to GET as mochiweb takes care of stripping the body
+ Method = case Method2 of
+ 'HEAD' -> 'GET';
+ Other -> Other
+ end,
+
+ HttpReq = #httpd{
+ mochi_req = MochiReq,
+ peer = MochiReq:get(peer),
+ method = Method,
+ requested_path_parts = [list_to_binary(couch_httpd:unquote(Part))
+ || Part <- string:tokens(RequestedPath, "/")],
+ path_parts = [list_to_binary(couch_httpd:unquote(Part))
+ || Part <- string:tokens(Path, "/")],
+ db_url_handlers = DbUrlHandlers,
+ design_url_handlers = DesignUrlHandlers,
+ default_fun = DefaultFun,
+ url_handlers = UrlHandlers
+ },
+
+ HandlerFun = couch_util:dict_find(HandlerKey, UrlHandlers, DefaultFun),
+
+ {ok, Resp} =
+ try
+ case authenticate_request(HttpReq, AuthenticationSrcs) of
+ #httpd{} = Req ->
+ HandlerFun(Req);
+ Response ->
+ Response
+ end
+ catch
+ throw:{http_head_abort, Resp0} ->
+ {ok, Resp0};
+ throw:{invalid_json, S} ->
+ ?LOG_ERROR("attempted upload of invalid JSON (set log_level to debug to log it)", []),
+ ?LOG_DEBUG("Invalid JSON: ~p",[S]),
+ send_error(HttpReq, {bad_request, io_lib:format("invalid UTF-8 JSON: ~p",[S])});
+ throw:unacceptable_encoding ->
+ ?LOG_ERROR("unsupported encoding method for the response", []),
+ send_error(HttpReq, {not_acceptable, "unsupported encoding"});
+ throw:bad_accept_encoding_value ->
+ ?LOG_ERROR("received invalid Accept-Encoding header", []),
+ send_error(HttpReq, bad_request);
+ exit:normal ->
+ exit(normal);
+ throw:Error ->
+ ?LOG_DEBUG("Minor error in HTTP request: ~p",[Error]),
+ ?LOG_DEBUG("Stacktrace: ~p",[erlang:get_stacktrace()]),
+ send_error(HttpReq, Error);
+ error:badarg ->
+ ?LOG_ERROR("Badarg error in HTTP request",[]),
+ ?LOG_INFO("Stacktrace: ~p",[erlang:get_stacktrace()]),
+ send_error(HttpReq, badarg);
+ error:function_clause ->
+ ?LOG_ERROR("function_clause error in HTTP request",[]),
+ ?LOG_INFO("Stacktrace: ~p",[erlang:get_stacktrace()]),
+ send_error(HttpReq, function_clause);
+ Tag:Error ->
+ ?LOG_ERROR("Uncaught error in HTTP request: ~p",[{Tag, Error}]),
+ ?LOG_INFO("Stacktrace: ~p",[erlang:get_stacktrace()]),
+ send_error(HttpReq, Error)
+ end,
+ RequestTime = round(timer:now_diff(now(), Begin)/1000),
+ couch_stats_collector:record({couchdb, request_time}, RequestTime),
+ couch_stats_collector:increment({httpd, requests}),
+ {ok, Resp}.
+
+% Try authentication handlers in order until one sets a user_ctx
+% the auth funs also have the option of returning a response
+% move this to couch_httpd_auth?
+authenticate_request(#httpd{user_ctx=#user_ctx{}} = Req, _AuthSrcs) ->
+ Req;
+authenticate_request(#httpd{} = Req, []) ->
+ case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
+ "true" ->
+ throw({unauthorized, <<"Authentication required.">>});
+ "false" ->
+ Req#httpd{user_ctx=#user_ctx{}}
+ end;
+authenticate_request(#httpd{} = Req, [AuthSrc|Rest]) ->
+ AuthFun = make_arity_1_fun(AuthSrc),
+ R = case AuthFun(Req) of
+ #httpd{user_ctx=#user_ctx{}=UserCtx}=Req2 ->
+ Req2#httpd{user_ctx=UserCtx#user_ctx{handler=?l2b(AuthSrc)}};
+ Else -> Else
+ end,
+ authenticate_request(R, Rest);
+authenticate_request(Response, _AuthSrcs) ->
+ Response.
+
+increment_method_stats(Method) ->
+ couch_stats_collector:increment({httpd_request_methods, Method}).
+
+validate_referer(Req) ->
+ Host = host_for_request(Req),
+ Referer = header_value(Req, "Referer", fail),
+ case Referer of
+ fail ->
+ throw({bad_request, <<"Referer header required.">>});
+ Referer ->
+ {_,RefererHost,_,_,_} = mochiweb_util:urlsplit(Referer),
+ if
+ RefererHost =:= Host -> ok;
+ true -> throw({bad_request, <<"Referer header must match host.">>})
+ end
+ end.
+
+validate_ctype(Req, Ctype) ->
+ case couch_httpd:header_value(Req, "Content-Type") of
+ undefined ->
+ throw({bad_ctype, "Content-Type must be "++Ctype});
+ ReqCtype ->
+ % ?LOG_ERROR("Ctype ~p ReqCtype ~p",[Ctype,ReqCtype]),
+ case re:split(ReqCtype, ";", [{return, list}]) of
+ [Ctype] -> ok;
+ [Ctype, _Rest] -> ok;
+ _Else ->
+ throw({bad_ctype, "Content-Type must be "++Ctype})
+ end
+ end.
+
+% Utilities
+
+partition(Path) ->
+ mochiweb_util:partition(Path, "/").
+
+header_value(#httpd{mochi_req=MochiReq}, Key) ->
+ MochiReq:get_header_value(Key).
+
+header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
+ case MochiReq:get_header_value(Key) of
+ undefined -> Default;
+ Value -> Value
+ end.
+
+primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
+ MochiReq:get_primary_header_value(Key).
+
+accepted_encodings(#httpd{mochi_req=MochiReq}) ->
+ case MochiReq:accepted_encodings(["gzip", "identity"]) of
+ bad_accept_encoding_value ->
+ throw(bad_accept_encoding_value);
+ [] ->
+ throw(unacceptable_encoding);
+ EncList ->
+ EncList
+ end.
+
+serve_file(Req, RelativePath, DocumentRoot) ->
+ serve_file(Req, RelativePath, DocumentRoot, []).
+
+serve_file(#httpd{mochi_req=MochiReq}=Req, RelativePath, DocumentRoot, ExtraHeaders) ->
+ {ok, MochiReq:serve_file(RelativePath, DocumentRoot,
+ server_header() ++ couch_httpd_auth:cookie_auth_header(Req, []) ++ ExtraHeaders)}.
+
+qs_value(Req, Key) ->
+ qs_value(Req, Key, undefined).
+
+qs_value(Req, Key, Default) ->
+ couch_util:get_value(Key, qs(Req), Default).
+
+qs_json_value(Req, Key, Default) ->
+ case qs_value(Req, Key, Default) of
+ Default ->
+ Default;
+ Result ->
+ ?JSON_DECODE(Result)
+ end.
+
+qs(#httpd{mochi_req=MochiReq}) ->
+ MochiReq:parse_qs().
+
+path(#httpd{mochi_req=MochiReq}) ->
+ MochiReq:get(path).
+
+host_for_request(#httpd{mochi_req=MochiReq}) ->
+ XHost = couch_config:get("httpd", "x_forwarded_host", "X-Forwarded-Host"),
+ case MochiReq:get_header_value(XHost) of
+ undefined ->
+ case MochiReq:get_header_value("Host") of
+ undefined ->
+ {ok, {Address, Port}} = inet:sockname(MochiReq:get(socket)),
+ inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
+ Value1 ->
+ Value1
+ end;
+ Value -> Value
+ end.
+
+absolute_uri(#httpd{mochi_req=MochiReq}=Req, Path) ->
+ Host = host_for_request(Req),
+ XSsl = couch_config:get("httpd", "x_forwarded_ssl", "X-Forwarded-Ssl"),
+ Scheme = case MochiReq:get_header_value(XSsl) of
+ "on" -> "https";
+ _ ->
+ XProto = couch_config:get("httpd", "x_forwarded_proto", "X-Forwarded-Proto"),
+ case MochiReq:get_header_value(XProto) of
+ %% Restrict to "https" and "http" schemes only
+ "https" -> "https";
+ _ -> case MochiReq:get(scheme) of
+ https -> "https";
+ http -> "http"
+ end
+ end
+ end,
+ Scheme ++ "://" ++ Host ++ Path.
+
+unquote(UrlEncodedString) ->
+ mochiweb_util:unquote(UrlEncodedString).
+
+quote(UrlDecodedString) ->
+ mochiweb_util:quote_plus(UrlDecodedString).
+
+parse_form(#httpd{mochi_req=MochiReq}) ->
+ mochiweb_multipart:parse_form(MochiReq).
+
+recv(#httpd{mochi_req=MochiReq}, Len) ->
+ MochiReq:recv(Len).
+
+recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
+ % Fun is called once with each chunk
+ % Fun({Length, Binary}, State)
+ % called with Length == 0 on the last time.
+ MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState).
+
+body_length(Req) ->
+ case header_value(Req, "Transfer-Encoding") of
+ undefined ->
+ case header_value(Req, "Content-Length") of
+ undefined -> undefined;
+ Length -> list_to_integer(Length)
+ end;
+ "chunked" -> chunked;
+ Unknown -> {unknown_transfer_encoding, Unknown}
+ end.
+
+body(#httpd{mochi_req=MochiReq, req_body=ReqBody}) ->
+ case ReqBody of
+ undefined ->
+ % Maximum size of document PUT request body (4GB)
+ MaxSize = list_to_integer(
+ couch_config:get("couchdb", "max_document_size", "4294967296")),
+ MochiReq:recv_body(MaxSize);
+ _Else ->
+ ReqBody
+ end.
+
+json_body(Httpd) ->
+ ?JSON_DECODE(body(Httpd)).
+
+json_body_obj(Httpd) ->
+ case json_body(Httpd) of
+ {Props} -> {Props};
+ _Else ->
+ throw({bad_request, "Request body must be a JSON object"})
+ end.
+
+
+
+doc_etag(#doc{revs={Start, [DiskRev|_]}}) ->
+ "\"" ++ ?b2l(couch_doc:rev_to_str({Start, DiskRev})) ++ "\"".
+
+make_etag(Term) ->
+ <<SigInt:128/integer>> = couch_util:md5(term_to_binary(Term)),
+ list_to_binary("\"" ++ lists:flatten(io_lib:format("~.36B",[SigInt])) ++ "\"").
+
+etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
+ etag_match(Req, binary_to_list(CurrentEtag));
+
+etag_match(Req, CurrentEtag) ->
+ EtagsToMatch = string:tokens(
+ couch_httpd:header_value(Req, "If-None-Match", ""), ", "),
+ lists:member(CurrentEtag, EtagsToMatch).
+
+etag_respond(Req, CurrentEtag, RespFun) ->
+ case etag_match(Req, CurrentEtag) of
+ true ->
+ % the client has this in their cache.
+ couch_httpd:send_response(Req, 304, [{"Etag", CurrentEtag}], <<>>);
+ false ->
+ % Run the function.
+ RespFun()
+ end.
+
+verify_is_server_admin(#httpd{user_ctx=UserCtx}) ->
+ verify_is_server_admin(UserCtx);
+verify_is_server_admin(#user_ctx{roles=Roles}) ->
+ case lists:member(<<"_admin">>, Roles) of
+ true -> ok;
+ false -> throw({unauthorized, <<"You are not a server admin.">>})
+ end.
+
+log_request(#httpd{mochi_req=MochiReq,peer=Peer}, Code) ->
+ ?LOG_INFO("~s - - ~p ~s ~B", [
+ Peer,
+ couch_util:to_existing_atom(MochiReq:get(method)),
+ MochiReq:get(raw_path),
+ couch_util:to_integer(Code)
+ ]).
+
+
+start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Length) ->
+ log_request(Req, Code),
+ couch_stats_collector:increment({httpd_status_codes, Code}),
+ Resp = MochiReq:start_response_length({Code, Headers ++ server_header() ++ couch_httpd_auth:cookie_auth_header(Req, Headers), Length}),
+ case MochiReq:get(method) of
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
+ end,
+ {ok, Resp}.
+
+start_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
+ log_request(Req, Code),
+ couch_stats_collector:increment({httpd_status_cdes, Code}),
+ CookieHeader = couch_httpd_auth:cookie_auth_header(Req, Headers),
+ Headers2 = Headers ++ server_header() ++ CookieHeader,
+ Resp = MochiReq:start_response({Code, Headers2}),
+ case MochiReq:get(method) of
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
+ end,
+ {ok, Resp}.
+
+send(Resp, Data) ->
+ Resp:send(Data),
+ {ok, Resp}.
+
+no_resp_conn_header([]) ->
+ true;
+no_resp_conn_header([{Hdr, _}|Rest]) ->
+ case string:to_lower(Hdr) of
+ "connection" -> false;
+ _ -> no_resp_conn_header(Rest)
+ end.
+
+http_1_0_keep_alive(Req, Headers) ->
+ KeepOpen = Req:should_close() == false,
+ IsHttp10 = Req:get(version) == {1, 0},
+ NoRespHeader = no_resp_conn_header(Headers),
+ case KeepOpen andalso IsHttp10 andalso NoRespHeader of
+ true -> [{"Connection", "Keep-Alive"} | Headers];
+ false -> Headers
+ end.
+
+start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
+ log_request(Req, Code),
+ couch_stats_collector:increment({httpd_status_codes, Code}),
+ Headers2 = http_1_0_keep_alive(MochiReq, Headers),
+ Resp = MochiReq:respond({Code, Headers2 ++ server_header() ++ couch_httpd_auth:cookie_auth_header(Req, Headers2), chunked}),
+ case MochiReq:get(method) of
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
+ end,
+ {ok, Resp}.
+
+send_chunk(Resp, Data) ->
+ case iolist_size(Data) of
+ 0 -> ok; % do nothing
+ _ -> Resp:write_chunk(Data)
+ end,
+ {ok, Resp}.
+
+last_chunk(Resp) ->
+ Resp:write_chunk([]),
+ {ok, Resp}.
+
+send_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Body) ->
+ log_request(Req, Code),
+ couch_stats_collector:increment({httpd_status_codes, Code}),
+ Headers2 = http_1_0_keep_alive(MochiReq, Headers),
+ if Code >= 400 ->
+ ?LOG_DEBUG("httpd ~p error response:~n ~s", [Code, Body]);
+ true -> ok
+ end,
+ {ok, MochiReq:respond({Code, Headers2 ++ server_header() ++ couch_httpd_auth:cookie_auth_header(Req, Headers2), Body})}.
+
+send_method_not_allowed(Req, Methods) ->
+ send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>, ?l2b("Only " ++ Methods ++ " allowed")).
+
+send_json(Req, Value) ->
+ send_json(Req, 200, Value).
+
+send_json(Req, Code, Value) ->
+ send_json(Req, Code, [], Value).
+
+send_json(Req, Code, Headers, Value) ->
+ DefaultHeaders = [
+ {"Content-Type", negotiate_content_type(Req)},
+ {"Cache-Control", "must-revalidate"}
+ ],
+ Body = [start_jsonp(Req), ?JSON_ENCODE(Value), end_jsonp(), $\n],
+ send_response(Req, Code, DefaultHeaders ++ Headers, Body).
+
+start_json_response(Req, Code) ->
+ start_json_response(Req, Code, []).
+
+start_json_response(Req, Code, Headers) ->
+ DefaultHeaders = [
+ {"Content-Type", negotiate_content_type(Req)},
+ {"Cache-Control", "must-revalidate"}
+ ],
+ start_jsonp(Req), % Validate before starting chunked.
+ %start_chunked_response(Req, Code, DefaultHeaders ++ Headers).
+ {ok, Resp} = start_chunked_response(Req, Code, DefaultHeaders ++ Headers),
+ case start_jsonp(Req) of
+ [] -> ok;
+ Start -> send_chunk(Resp, Start)
+ end,
+ {ok, Resp}.
+
+end_json_response(Resp) ->
+ send_chunk(Resp, end_jsonp() ++ [$\n]),
+ last_chunk(Resp).
+
+start_jsonp(Req) ->
+ case get(jsonp) of
+ undefined -> put(jsonp, qs_value(Req, "callback", no_jsonp));
+ _ -> ok
+ end,
+ case get(jsonp) of
+ no_jsonp -> [];
+ [] -> [];
+ CallBack ->
+ try
+ % make sure jsonp is configured on (default off)
+ case couch_config:get("httpd", "allow_jsonp", "false") of
+ "true" ->
+ validate_callback(CallBack),
+ CallBack ++ "(";
+ _Else ->
+ % this could throw an error message, but instead we just ignore the
+ % jsonp parameter
+ % throw({bad_request, <<"JSONP must be configured before using.">>})
+ put(jsonp, no_jsonp),
+ []
+ end
+ catch
+ Error ->
+ put(jsonp, no_jsonp),
+ throw(Error)
+ end
+ end.
+
+end_jsonp() ->
+ Resp = case get(jsonp) of
+ no_jsonp -> [];
+ [] -> [];
+ _ -> ");"
+ end,
+ put(jsonp, undefined),
+ Resp.
+
+validate_callback(CallBack) when is_binary(CallBack) ->
+ validate_callback(binary_to_list(CallBack));
+validate_callback([]) ->
+ ok;
+validate_callback([Char | Rest]) ->
+ case Char of
+ _ when Char >= $a andalso Char =< $z -> ok;
+ _ when Char >= $A andalso Char =< $Z -> ok;
+ _ when Char >= $0 andalso Char =< $9 -> ok;
+ _ when Char == $. -> ok;
+ _ when Char == $_ -> ok;
+ _ when Char == $[ -> ok;
+ _ when Char == $] -> ok;
+ _ ->
+ throw({bad_request, invalid_callback})
+ end,
+ validate_callback(Rest).
+
+
+error_info({Error, Reason}) when is_list(Reason) ->
+ error_info({Error, ?l2b(Reason)});
+error_info(bad_request) ->
+ {400, <<"bad_request">>, <<>>};
+error_info({bad_request, Reason}) ->
+ {400, <<"bad_request">>, Reason};
+error_info({query_parse_error, Reason}) ->
+ {400, <<"query_parse_error">>, Reason};
+% Prior art for md5 mismatch resulting in a 400 is from AWS S3
+error_info(md5_mismatch) ->
+ {400, <<"content_md5_mismatch">>, <<"Possible message corruption.">>};
+error_info(not_found) ->
+ {404, <<"not_found">>, <<"missing">>};
+error_info({not_found, Reason}) ->
+ {404, <<"not_found">>, Reason};
+error_info({not_acceptable, Reason}) ->
+ {406, <<"not_acceptable">>, Reason};
+error_info(conflict) ->
+ {409, <<"conflict">>, <<"Document update conflict.">>};
+error_info({forbidden, Msg}) ->
+ {403, <<"forbidden">>, Msg};
+error_info({unauthorized, Msg}) ->
+ {401, <<"unauthorized">>, Msg};
+error_info(file_exists) ->
+ {412, <<"file_exists">>, <<"The database could not be "
+ "created, the file already exists.">>};
+error_info({bad_ctype, Reason}) ->
+ {415, <<"bad_content_type">>, Reason};
+error_info(requested_range_not_satisfiable) ->
+ {416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
+error_info({error, illegal_database_name}) ->
+ {400, <<"illegal_database_name">>, <<"Only lowercase characters (a-z), "
+ "digits (0-9), and any of the characters _, $, (, ), +, -, and / "
+ "are allowed. Must begin with a letter.">>};
+error_info({missing_stub, Reason}) ->
+ {412, <<"missing_stub">>, Reason};
+error_info({Error, Reason}) ->
+ {500, couch_util:to_binary(Error), couch_util:to_binary(Reason)};
+error_info(Error) ->
+ {500, <<"unknown_error">>, couch_util:to_binary(Error)}.
+
+error_headers(#httpd{mochi_req=MochiReq}=Req, Code, ErrorStr, ReasonStr) ->
+ if Code == 401 ->
+ % this is where the basic auth popup is triggered
+ case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
+ undefined ->
+ case couch_config:get("httpd", "WWW-Authenticate", nil) of
+ nil ->
+ % If the client is a browser and the basic auth popup isn't turned on
+ % redirect to the session page.
+ case ErrorStr of
+ <<"unauthorized">> ->
+ case couch_config:get("couch_httpd_auth", "authentication_redirect", nil) of
+ nil -> {Code, []};
+ AuthRedirect ->
+ case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
+ "true" ->
+ % send the browser popup header no matter what if we are require_valid_user
+ {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
+ _False ->
+ case MochiReq:accepts_content_type("text/html") of
+ false ->
+ {Code, []};
+ true ->
+ % Redirect to the path the user requested, not
+ % the one that is used internally.
+ UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of
+ undefined ->
+ MochiReq:get(path);
+ VHostPath ->
+ VHostPath
+ end,
+ RedirectLocation = lists:flatten([
+ AuthRedirect,
+ "?return=", couch_util:url_encode(UrlReturnRaw),
+ "&reason=", couch_util:url_encode(ReasonStr)
+ ]),
+ {302, [{"Location", absolute_uri(Req, RedirectLocation)}]}
+ end
+ end
+ end;
+ _Else ->
+ {Code, []}
+ end;
+ Type ->
+ {Code, [{"WWW-Authenticate", Type}]}
+ end;
+ Type ->
+ {Code, [{"WWW-Authenticate", Type}]}
+ end;
+ true ->
+ {Code, []}
+ end.
+
+send_error(_Req, {already_sent, Resp, _Error}) ->
+ {ok, Resp};
+
+send_error(Req, Error) ->
+ {Code, ErrorStr, ReasonStr} = error_info(Error),
+ {Code1, Headers} = error_headers(Req, Code, ErrorStr, ReasonStr),
+ send_error(Req, Code1, Headers, ErrorStr, ReasonStr).
+
+send_error(Req, Code, ErrorStr, ReasonStr) ->
+ send_error(Req, Code, [], ErrorStr, ReasonStr).
+
+send_error(Req, Code, Headers, ErrorStr, ReasonStr) ->
+ send_json(Req, Code, Headers,
+ {[{<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr}]}).
+
+% give the option for list functions to output html or other raw errors
+send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
+ send_chunk(Resp, Reason),
+ last_chunk(Resp);
+
+send_chunked_error(Resp, Error) ->
+ {Code, ErrorStr, ReasonStr} = error_info(Error),
+ JsonError = {[{<<"code">>, Code},
+ {<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr}]},
+ send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
+ last_chunk(Resp).
+
+send_redirect(Req, Path) ->
+ Headers = [{"Location", couch_httpd:absolute_uri(Req, Path)}],
+ send_response(Req, 301, Headers, <<>>).
+
+negotiate_content_type(#httpd{mochi_req=MochiReq}) ->
+ %% Determine the appropriate Content-Type header for a JSON response
+ %% depending on the Accept header in the request. A request that explicitly
+ %% lists the correct JSON MIME type will get that type, otherwise the
+ %% response will have the generic MIME type "text/plain"
+ AcceptedTypes = case MochiReq:get_header_value("Accept") of
+ undefined -> [];
+ AcceptHeader -> string:tokens(AcceptHeader, ", ")
+ end,
+ case lists:member("application/json", AcceptedTypes) of
+ true -> "application/json";
+ false -> "text/plain;charset=utf-8"
+ end.
+
+server_header() ->
+ [{"Server", "CouchDB/" ++ couch_server:get_version() ++
+ " (Erlang OTP/" ++ erlang:system_info(otp_release) ++ ")"}].
+
+
+-record(mp, {boundary, buffer, data_fun, callback}).
+
+
+parse_multipart_request(ContentType, DataFun, Callback) ->
+ Boundary0 = iolist_to_binary(get_boundary(ContentType)),
+ Boundary = <<"\r\n--", Boundary0/binary>>,
+ Mp = #mp{boundary= Boundary,
+ buffer= <<>>,
+ data_fun=DataFun,
+ callback=Callback},
+ {Mp2, _NilCallback} = read_until(Mp, <<"--", Boundary0/binary>>,
+ fun(Next)-> nil_callback(Next) end),
+ #mp{buffer=Buffer, data_fun=DataFun2, callback=Callback2} =
+ parse_part_header(Mp2),
+ {Buffer, DataFun2, Callback2}.
+
+nil_callback(_Data)->
+ fun(Next) -> nil_callback(Next) end.
+
+get_boundary({"multipart/" ++ _, Opts}) ->
+ case couch_util:get_value("boundary", Opts) of
+ S when is_list(S) ->
+ S
+ end;
+get_boundary(ContentType) ->
+ {"multipart/" ++ _ , Opts} = mochiweb_util:parse_header(ContentType),
+ get_boundary({"multipart/", Opts}).
+
+
+
+split_header(<<>>) ->
+ [];
+split_header(Line) ->
+ {Name, [$: | Value]} = lists:splitwith(fun (C) -> C =/= $: end,
+ binary_to_list(Line)),
+ [{string:to_lower(string:strip(Name)),
+ mochiweb_util:parse_header(Value)}].
+
+read_until(#mp{data_fun=DataFun, buffer=Buffer}=Mp, Pattern, Callback) ->
+ case find_in_binary(Pattern, Buffer) of
+ not_found ->
+ Callback2 = Callback(Buffer),
+ {Buffer2, DataFun2} = DataFun(),
+ Buffer3 = iolist_to_binary(Buffer2),
+ read_until(Mp#mp{data_fun=DataFun2,buffer=Buffer3}, Pattern, Callback2);
+ {partial, 0} ->
+ {NewData, DataFun2} = DataFun(),
+ read_until(Mp#mp{data_fun=DataFun2,
+ buffer= iolist_to_binary([Buffer,NewData])},
+ Pattern, Callback);
+ {partial, Skip} ->
+ <<DataChunk:Skip/binary, Rest/binary>> = Buffer,
+ Callback2 = Callback(DataChunk),
+ {NewData, DataFun2} = DataFun(),
+ read_until(Mp#mp{data_fun=DataFun2,
+ buffer= iolist_to_binary([Rest | NewData])},
+ Pattern, Callback2);
+ {exact, 0} ->
+ PatternLen = size(Pattern),
+ <<_:PatternLen/binary, Rest/binary>> = Buffer,
+ {Mp#mp{buffer= Rest}, Callback};
+ {exact, Skip} ->
+ PatternLen = size(Pattern),
+ <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer,
+ Callback2 = Callback(DataChunk),
+ {Mp#mp{buffer= Rest}, Callback2}
+ end.
+
+
+parse_part_header(#mp{callback=UserCallBack}=Mp) ->
+ {Mp2, AccCallback} = read_until(Mp, <<"\r\n\r\n">>,
+ fun(Next) -> acc_callback(Next, []) end),
+ HeaderData = AccCallback(get_data),
+
+ Headers =
+ lists:foldl(fun(Line, Acc) ->
+ split_header(Line) ++ Acc
+ end, [], re:split(HeaderData,<<"\r\n">>, [])),
+ NextCallback = UserCallBack({headers, Headers}),
+ parse_part_body(Mp2#mp{callback=NextCallback}).
+
+parse_part_body(#mp{boundary=Prefix, callback=Callback}=Mp) ->
+ {Mp2, WrappedCallback} = read_until(Mp, Prefix,
+ fun(Data) -> body_callback_wrapper(Data, Callback) end),
+ Callback2 = WrappedCallback(get_callback),
+ Callback3 = Callback2(body_end),
+ case check_for_last(Mp2#mp{callback=Callback3}) of
+ {last, #mp{callback=Callback3}=Mp3} ->
+ Mp3#mp{callback=Callback3(eof)};
+ {more, Mp3} ->
+ parse_part_header(Mp3)
+ end.
+
+acc_callback(get_data, Acc)->
+ iolist_to_binary(lists:reverse(Acc));
+acc_callback(Data, Acc)->
+ fun(Next) -> acc_callback(Next, [Data | Acc]) end.
+
+body_callback_wrapper(get_callback, Callback) ->
+ Callback;
+body_callback_wrapper(Data, Callback) ->
+ Callback2 = Callback({body, Data}),
+ fun(Next) -> body_callback_wrapper(Next, Callback2) end.
+
+
+check_for_last(#mp{buffer=Buffer, data_fun=DataFun}=Mp) ->
+ case Buffer of
+ <<"--",_/binary>> -> {last, Mp};
+ <<_, _, _/binary>> -> {more, Mp};
+ _ -> % not long enough
+ {Data, DataFun2} = DataFun(),
+ check_for_last(Mp#mp{buffer= <<Buffer/binary, Data/binary>>,
+ data_fun = DataFun2})
+ end.
+
+find_in_binary(B, Data) when size(B) > 0 ->
+ case size(Data) - size(B) of
+ Last when Last < 0 ->
+ partial_find(B, Data, 0, size(Data));
+ Last ->
+ find_in_binary(B, size(B), Data, 0, Last)
+ end.
+
+find_in_binary(B, BS, D, N, Last) when N =< Last->
+ case D of
+ <<_:N/binary, B:BS/binary, _/binary>> ->
+ {exact, N};
+ _ ->
+ find_in_binary(B, BS, D, 1 + N, Last)
+ end;
+find_in_binary(B, BS, D, N, Last) when N =:= 1 + Last ->
+ partial_find(B, D, N, BS - 1).
+
+partial_find(_B, _D, _N, 0) ->
+ not_found;
+partial_find(B, D, N, K) ->
+ <<B1:K/binary, _/binary>> = B,
+ case D of
+ <<_Skip:N/binary, B1/binary>> ->
+ {partial, N};
+ _ ->
+ partial_find(B, D, 1 + N, K - 1)
+ end.
+
+
diff --git a/1.1.x/src/couchdb/couch_httpd_auth.erl b/1.1.x/src/couchdb/couch_httpd_auth.erl
new file mode 100644
index 00000000..155865e5
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_httpd_auth.erl
@@ -0,0 +1,359 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_auth).
+-include("couch_db.hrl").
+
+-export([default_authentication_handler/1,special_test_authentication_handler/1]).
+-export([cookie_authentication_handler/1]).
+-export([null_authentication_handler/1]).
+-export([proxy_authentification_handler/1]).
+-export([cookie_auth_header/2]).
+-export([handle_session_req/1]).
+
+-import(couch_httpd, [header_value/2, send_json/2,send_json/4, send_method_not_allowed/2]).
+
+special_test_authentication_handler(Req) ->
+ case header_value(Req, "WWW-Authenticate") of
+ "X-Couch-Test-Auth " ++ NamePass ->
+ % NamePass is a colon separated string: "joe schmoe:a password".
+ [Name, Pass] = re:split(NamePass, ":", [{return, list}]),
+ case {Name, Pass} of
+ {"Jan Lehnardt", "apple"} -> ok;
+ {"Christopher Lenz", "dog food"} -> ok;
+ {"Noah Slater", "biggiesmalls endian"} -> ok;
+ {"Chris Anderson", "mp3"} -> ok;
+ {"Damien Katz", "pecan pie"} -> ok;
+ {_, _} ->
+ throw({unauthorized, <<"Name or password is incorrect.">>})
+ end,
+ Req#httpd{user_ctx=#user_ctx{name=?l2b(Name)}};
+ _ ->
+ % No X-Couch-Test-Auth credentials sent, give admin access so the
+ % previous authentication can be restored after the test
+ Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}
+ end.
+
+basic_name_pw(Req) ->
+ AuthorizationHeader = header_value(Req, "Authorization"),
+ case AuthorizationHeader of
+ "Basic " ++ Base64Value ->
+ case string:tokens(?b2l(base64:decode(Base64Value)),":") of
+ ["_", "_"] ->
+ % special name and pass to be logged out
+ nil;
+ [User, Pass] ->
+ {User, Pass};
+ [User | Pass] ->
+ {User, string:join(Pass, ":")};
+ _ ->
+ nil
+ end;
+ _ ->
+ nil
+ end.
+
+default_authentication_handler(Req) ->
+ case basic_name_pw(Req) of
+ {User, Pass} ->
+ case couch_auth_cache:get_user_creds(User) of
+ nil ->
+ throw({unauthorized, <<"Name or password is incorrect.">>});
+ UserProps ->
+ UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<>>),
+ PasswordHash = hash_password(?l2b(Pass), UserSalt),
+ ExpectedHash = couch_util:get_value(<<"password_sha">>, UserProps, nil),
+ case couch_util:verify(ExpectedHash, PasswordHash) of
+ true ->
+ Req#httpd{user_ctx=#user_ctx{
+ name=?l2b(User),
+ roles=couch_util:get_value(<<"roles">>, UserProps, [])
+ }};
+ _Else ->
+ throw({unauthorized, <<"Name or password is incorrect.">>})
+ end
+ end;
+ nil ->
+ case couch_server:has_admins() of
+ true ->
+ Req;
+ false ->
+ case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
+ "true" -> Req;
+ % If no admins, and no user required, then everyone is admin!
+ % Yay, admin party!
+ _ -> Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}
+ end
+ end
+ end.
+
+null_authentication_handler(Req) ->
+ Req#httpd{user_ctx=#user_ctx{roles=[<<"_admin">>]}}.
+
+%% @doc proxy auth handler.
+%
+% This handler allows creation of a userCtx object from a user authenticated remotly.
+% The client just pass specific headers to CouchDB and the handler create the userCtx.
+% Headers name can be defined in local.ini. By thefault they are :
+%
+% * X-Auth-CouchDB-UserName : contain the username, (x_auth_username in
+% couch_httpd_auth section)
+% * X-Auth-CouchDB-Roles : contain the user roles, list of roles separated by a
+% comma (x_auth_roles in couch_httpd_auth section)
+% * X-Auth-CouchDB-Token : token to authenticate the authorization (x_auth_token
+% in couch_httpd_auth section). This token is an hmac-sha1 created from secret key
+% and username. The secret key should be the same in the client and couchdb node. s
+% ecret key is the secret key in couch_httpd_auth section of ini. This token is optional
+% if value of proxy_use_secret key in couch_httpd_auth section of ini isn't true.
+%
+proxy_authentification_handler(Req) ->
+ case proxy_auth_user(Req) of
+ nil -> Req;
+ Req2 -> Req2
+ end.
+
+proxy_auth_user(Req) ->
+ XHeaderUserName = couch_config:get("couch_httpd_auth", "x_auth_username",
+ "X-Auth-CouchDB-UserName"),
+ XHeaderRoles = couch_config:get("couch_httpd_auth", "x_auth_roles",
+ "X-Auth-CouchDB-Roles"),
+ XHeaderToken = couch_config:get("couch_httpd_auth", "x_auth_token",
+ "X-Auth-CouchDB-Token"),
+ case header_value(Req, XHeaderUserName) of
+ undefined -> nil;
+ UserName ->
+ Roles = case header_value(Req, XHeaderRoles) of
+ undefined -> [];
+ Else ->
+ [?l2b(R) || R <- string:tokens(Else, ",")]
+ end,
+ case couch_config:get("couch_httpd_auth", "proxy_use_secret", "false") of
+ "true" ->
+ case couch_config:get("couch_httpd_auth", "secret", nil) of
+ nil ->
+ Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}};
+ Secret ->
+ ExpectedToken = couch_util:to_hex(crypto:sha_mac(Secret, UserName)),
+ case header_value(Req, XHeaderToken) of
+ Token when Token == ExpectedToken ->
+ Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName),
+ roles=Roles}};
+ _ -> nil
+ end
+ end;
+ _ ->
+ Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}}
+ end
+ end.
+
+
+cookie_authentication_handler(#httpd{mochi_req=MochiReq}=Req) ->
+ case MochiReq:get_cookie_value("AuthSession") of
+ undefined -> Req;
+ [] -> Req;
+ Cookie ->
+ [User, TimeStr | HashParts] = try
+ AuthSession = couch_util:decodeBase64Url(Cookie),
+ [_A, _B | _Cs] = string:tokens(?b2l(AuthSession), ":")
+ catch
+ _:_Error ->
+ Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>,
+ throw({bad_request, Reason})
+ end,
+ % Verify expiry and hash
+ CurrentTime = make_cookie_time(),
+ case couch_config:get("couch_httpd_auth", "secret", nil) of
+ nil ->
+ ?LOG_DEBUG("cookie auth secret is not set",[]),
+ Req;
+ SecretStr ->
+ Secret = ?l2b(SecretStr),
+ case couch_auth_cache:get_user_creds(User) of
+ nil -> Req;
+ UserProps ->
+ UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>),
+ FullSecret = <<Secret/binary, UserSalt/binary>>,
+ ExpectedHash = crypto:sha_mac(FullSecret, User ++ ":" ++ TimeStr),
+ Hash = ?l2b(string:join(HashParts, ":")),
+ Timeout = to_int(couch_config:get("couch_httpd_auth", "timeout", 600)),
+ ?LOG_DEBUG("timeout ~p", [Timeout]),
+ case (catch erlang:list_to_integer(TimeStr, 16)) of
+ TimeStamp when CurrentTime < TimeStamp + Timeout ->
+ case couch_util:verify(ExpectedHash, Hash) of
+ true ->
+ TimeLeft = TimeStamp + Timeout - CurrentTime,
+ ?LOG_DEBUG("Successful cookie auth as: ~p", [User]),
+ Req#httpd{user_ctx=#user_ctx{
+ name=?l2b(User),
+ roles=couch_util:get_value(<<"roles">>, UserProps, [])
+ }, auth={FullSecret, TimeLeft < Timeout*0.9}};
+ _Else ->
+ Req
+ end;
+ _Else ->
+ Req
+ end
+ end
+ end
+ end.
+
+cookie_auth_header(#httpd{user_ctx=#user_ctx{name=null}}, _Headers) -> [];
+cookie_auth_header(#httpd{user_ctx=#user_ctx{name=User}, auth={Secret, true}}=Req, Headers) ->
+ % Note: we only set the AuthSession cookie if:
+ % * a valid AuthSession cookie has been received
+ % * we are outside a 10% timeout window
+ % * and if an AuthSession cookie hasn't already been set e.g. by a login
+ % or logout handler.
+ % The login and logout handlers need to set the AuthSession cookie
+ % themselves.
+ CookieHeader = couch_util:get_value("Set-Cookie", Headers, ""),
+ Cookies = mochiweb_cookies:parse_cookie(CookieHeader),
+ AuthSession = couch_util:get_value("AuthSession", Cookies),
+ if AuthSession == undefined ->
+ TimeStamp = make_cookie_time(),
+ [cookie_auth_cookie(Req, ?b2l(User), Secret, TimeStamp)];
+ true ->
+ []
+ end;
+cookie_auth_header(_Req, _Headers) -> [].
+
+cookie_auth_cookie(Req, User, Secret, TimeStamp) ->
+ SessionData = User ++ ":" ++ erlang:integer_to_list(TimeStamp, 16),
+ Hash = crypto:sha_mac(Secret, SessionData),
+ mochiweb_cookies:cookie("AuthSession",
+ couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)),
+ [{path, "/"}, cookie_scheme(Req)]).
+
+hash_password(Password, Salt) ->
+ ?l2b(couch_util:to_hex(crypto:sha(<<Password/binary, Salt/binary>>))).
+
+ensure_cookie_auth_secret() ->
+ case couch_config:get("couch_httpd_auth", "secret", nil) of
+ nil ->
+ NewSecret = ?b2l(couch_uuids:random()),
+ couch_config:set("couch_httpd_auth", "secret", NewSecret),
+ NewSecret;
+ Secret -> Secret
+ end.
+
+% session handlers
+% Login handler with user db
+handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req) ->
+ ReqBody = MochiReq:recv_body(),
+ Form = case MochiReq:get_primary_header_value("content-type") of
+ % content type should be json
+ "application/x-www-form-urlencoded" ++ _ ->
+ mochiweb_util:parse_qs(ReqBody);
+ "application/json" ++ _ ->
+ {Pairs} = ?JSON_DECODE(ReqBody),
+ lists:map(fun({Key, Value}) ->
+ {?b2l(Key), ?b2l(Value)}
+ end, Pairs);
+ _ ->
+ []
+ end,
+ UserName = ?l2b(couch_util:get_value("name", Form, "")),
+ Password = ?l2b(couch_util:get_value("password", Form, "")),
+ ?LOG_DEBUG("Attempt Login: ~s",[UserName]),
+ User = case couch_auth_cache:get_user_creds(UserName) of
+ nil -> [];
+ Result -> Result
+ end,
+ UserSalt = couch_util:get_value(<<"salt">>, User, <<>>),
+ PasswordHash = hash_password(Password, UserSalt),
+ ExpectedHash = couch_util:get_value(<<"password_sha">>, User, nil),
+ case couch_util:verify(ExpectedHash, PasswordHash) of
+ true ->
+ % setup the session cookie
+ Secret = ?l2b(ensure_cookie_auth_secret()),
+ CurrentTime = make_cookie_time(),
+ Cookie = cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime),
+ % TODO document the "next" feature in Futon
+ {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
+ nil ->
+ {200, [Cookie]};
+ Redirect ->
+ {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
+ end,
+ send_json(Req#httpd{req_body=ReqBody}, Code, Headers,
+ {[
+ {ok, true},
+ {name, couch_util:get_value(<<"name">>, User, null)},
+ {roles, couch_util:get_value(<<"roles">>, User, [])}
+ ]});
+ _Else ->
+ % clear the session
+ Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}, cookie_scheme(Req)]),
+ send_json(Req, 401, [Cookie], {[{error, <<"unauthorized">>},{reason, <<"Name or password is incorrect.">>}]})
+ end;
+% get user info
+% GET /_session
+handle_session_req(#httpd{method='GET', user_ctx=UserCtx}=Req) ->
+ Name = UserCtx#user_ctx.name,
+ ForceLogin = couch_httpd:qs_value(Req, "basic", "false"),
+ case {Name, ForceLogin} of
+ {null, "true"} ->
+ throw({unauthorized, <<"Please login.">>});
+ {Name, _} ->
+ send_json(Req, {[
+ % remove this ok
+ {ok, true},
+ {<<"userCtx">>, {[
+ {name, Name},
+ {roles, UserCtx#user_ctx.roles}
+ ]}},
+ {info, {[
+ {authentication_db, ?l2b(couch_config:get("couch_httpd_auth", "authentication_db"))},
+ {authentication_handlers, [auth_name(H) || H <- couch_httpd:make_fun_spec_strs(
+ couch_config:get("httpd", "authentication_handlers"))]}
+ ] ++ maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) ->
+ auth_name(?b2l(Handler))
+ end)}}
+ ]})
+ end;
+% logout by deleting the session
+handle_session_req(#httpd{method='DELETE'}=Req) ->
+ Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}, cookie_scheme(Req)]),
+ {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
+ nil ->
+ {200, [Cookie]};
+ Redirect ->
+ {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
+ end,
+ send_json(Req, Code, Headers, {[{ok, true}]});
+handle_session_req(Req) ->
+ send_method_not_allowed(Req, "GET,HEAD,POST,DELETE").
+
+maybe_value(_Key, undefined, _Fun) -> [];
+maybe_value(Key, Else, Fun) ->
+ [{Key, Fun(Else)}].
+
+auth_name(String) when is_list(String) ->
+ [_,_,_,_,_,Name|_] = re:split(String, "[\\W_]", [{return, list}]),
+ ?l2b(Name).
+
+to_int(Value) when is_binary(Value) ->
+ to_int(?b2l(Value));
+to_int(Value) when is_list(Value) ->
+ list_to_integer(Value);
+to_int(Value) when is_integer(Value) ->
+ Value.
+
+make_cookie_time() ->
+ {NowMS, NowS, _} = erlang:now(),
+ NowMS * 1000000 + NowS.
+
+cookie_scheme(#httpd{mochi_req=MochiReq}) ->
+ case MochiReq:get(scheme) of
+ http -> {http_only, true};
+ https -> {secure, true}
+ end.
diff --git a/1.1.x/src/couchdb/couch_httpd_db.erl b/1.1.x/src/couchdb/couch_httpd_db.erl
new file mode 100644
index 00000000..0dbebb6e
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_httpd_db.erl
@@ -0,0 +1,1283 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_db).
+-include("couch_db.hrl").
+
+-export([handle_request/1, handle_compact_req/2, handle_design_req/2,
+ db_req/2, couch_doc_open/4,handle_changes_req/2,
+ update_doc_result_to_json/1, update_doc_result_to_json/2,
+ handle_design_info_req/3, handle_view_cleanup_req/2]).
+
+-import(couch_httpd,
+ [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
+ send_response/4,start_json_response/2,start_json_response/3,
+ send_chunk/2,last_chunk/1,end_json_response/1,
+ start_chunked_response/3, absolute_uri/2, send/2,
+ start_response_length/4]).
+
+-record(doc_query_args, {
+ options = [],
+ rev = nil,
+ open_revs = [],
+ update_type = interactive_edit,
+ atts_since = nil
+}).
+
+% Database request handlers
+handle_request(#httpd{path_parts=[DbName|RestParts],method=Method,
+ db_url_handlers=DbUrlHandlers}=Req)->
+ case {Method, RestParts} of
+ {'PUT', []} ->
+ create_db_req(Req, DbName);
+ {'DELETE', []} ->
+ % if we get ?rev=... the user is using a faulty script where the
+ % document id is empty by accident. Let them recover safely.
+ case couch_httpd:qs_value(Req, "rev", false) of
+ false -> delete_db_req(Req, DbName);
+ _Rev -> throw({bad_request,
+ "You tried to DELETE a database with a ?=rev parameter. "
+ ++ "Did you mean to DELETE a document instead?"})
+ end;
+ {_, []} ->
+ do_db_req(Req, fun db_req/2);
+ {_, [SecondPart|_]} ->
+ Handler = couch_util:dict_find(SecondPart, DbUrlHandlers, fun db_req/2),
+ do_db_req(Req, Handler)
+ end.
+
+handle_changes_req(#httpd{method='POST'}=Req, Db) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ handle_changes_req1(Req, Db);
+handle_changes_req(#httpd{method='GET'}=Req, Db) ->
+ handle_changes_req1(Req, Db);
+handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "GET,HEAD,POST").
+
+handle_changes_req1(Req, Db) ->
+ MakeCallback = fun(Resp) ->
+ fun({change, Change, _}, "continuous") ->
+ send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]);
+ ({change, Change, Prepend}, _) ->
+ send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]);
+ (start, "continuous") ->
+ ok;
+ (start, _) ->
+ send_chunk(Resp, "{\"results\":[\n");
+ ({stop, EndSeq}, "continuous") ->
+ send_chunk(
+ Resp,
+ [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]
+ ),
+ end_json_response(Resp);
+ ({stop, EndSeq}, _) ->
+ send_chunk(
+ Resp,
+ io_lib:format("\n],\n\"last_seq\":~w}\n", [EndSeq])
+ ),
+ end_json_response(Resp);
+ (timeout, _) ->
+ send_chunk(Resp, "\n")
+ end
+ end,
+ ChangesArgs = parse_changes_query(Req),
+ ChangesFun = couch_changes:handle_changes(ChangesArgs, Req, Db),
+ WrapperFun = case ChangesArgs#changes_args.feed of
+ "normal" ->
+ {ok, Info} = couch_db:get_db_info(Db),
+ CurrentEtag = couch_httpd:make_etag(Info),
+ fun(FeedChangesFun) ->
+ couch_httpd:etag_respond(
+ Req,
+ CurrentEtag,
+ fun() ->
+ {ok, Resp} = couch_httpd:start_json_response(
+ Req, 200, [{"Etag", CurrentEtag}]
+ ),
+ FeedChangesFun(MakeCallback(Resp))
+ end
+ )
+ end;
+ _ ->
+ % "longpoll" or "continuous"
+ {ok, Resp} = couch_httpd:start_json_response(Req, 200),
+ fun(FeedChangesFun) ->
+ FeedChangesFun(MakeCallback(Resp))
+ end
+ end,
+ couch_stats_collector:track_process_count(
+ {httpd, clients_requesting_changes}
+ ),
+ WrapperFun(ChangesFun).
+
+
+handle_compact_req(#httpd{method='POST',path_parts=[DbName,_,Id|_]}=Req, Db) ->
+ ok = couch_db:check_is_admin(Db),
+ couch_httpd:validate_ctype(Req, "application/json"),
+ ok = couch_view_compactor:start_compact(DbName, Id),
+ send_json(Req, 202, {[{ok, true}]});
+
+handle_compact_req(#httpd{method='POST'}=Req, Db) ->
+ ok = couch_db:check_is_admin(Db),
+ couch_httpd:validate_ctype(Req, "application/json"),
+ ok = couch_db:start_compact(Db),
+ send_json(Req, 202, {[{ok, true}]});
+
+handle_compact_req(Req, _Db) ->
+ send_method_not_allowed(Req, "POST").
+
+handle_view_cleanup_req(#httpd{method='POST'}=Req, Db) ->
+ % delete unreferenced index files
+ ok = couch_db:check_is_admin(Db),
+ couch_httpd:validate_ctype(Req, "application/json"),
+ ok = couch_view:cleanup_index_files(Db),
+ send_json(Req, 202, {[{ok, true}]});
+
+handle_view_cleanup_req(Req, _Db) ->
+ send_method_not_allowed(Req, "POST").
+
+
+handle_design_req(#httpd{
+ path_parts=[_DbName, _Design, DesignName, <<"_",_/binary>> = Action | _Rest],
+ design_url_handlers = DesignUrlHandlers
+ }=Req, Db) ->
+ % load ddoc
+ DesignId = <<"_design/", DesignName/binary>>,
+ DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, []),
+ Handler = couch_util:dict_find(Action, DesignUrlHandlers, fun(_, _, _) ->
+ throw({not_found, <<"missing handler: ", Action/binary>>})
+ end),
+ Handler(Req, Db, DDoc);
+
+handle_design_req(Req, Db) ->
+ db_req(Req, Db).
+
+handle_design_info_req(#httpd{
+ method='GET',
+ path_parts=[_DbName, _Design, DesignName, _]
+ }=Req, Db, _DDoc) ->
+ DesignId = <<"_design/", DesignName/binary>>,
+ {ok, GroupInfoList} = couch_view:get_group_info(Db, DesignId),
+ send_json(Req, 200, {[
+ {name, DesignName},
+ {view_index, {GroupInfoList}}
+ ]});
+
+handle_design_info_req(Req, _Db, _DDoc) ->
+ send_method_not_allowed(Req, "GET").
+
+create_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
+ ok = couch_httpd:verify_is_server_admin(Req),
+ case couch_server:create(DbName, [{user_ctx, UserCtx}]) of
+ {ok, Db} ->
+ couch_db:close(Db),
+ DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
+ send_json(Req, 201, [{"Location", DbUrl}], {[{ok, true}]});
+ Error ->
+ throw(Error)
+ end.
+
+delete_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
+ ok = couch_httpd:verify_is_server_admin(Req),
+ case couch_server:delete(DbName, [{user_ctx, UserCtx}]) of
+ ok ->
+ send_json(Req, 200, {[{ok, true}]});
+ Error ->
+ throw(Error)
+ end.
+
+do_db_req(#httpd{user_ctx=UserCtx,path_parts=[DbName|_]}=Req, Fun) ->
+ case couch_db:open(DbName, [{user_ctx, UserCtx}]) of
+ {ok, Db} ->
+ try
+ Fun(Req, Db)
+ after
+ catch couch_db:close(Db)
+ end;
+ Error ->
+ throw(Error)
+ end.
+
+db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
+ {ok, DbInfo} = couch_db:get_db_info(Db),
+ send_json(Req, {DbInfo});
+
+db_req(#httpd{method='POST',path_parts=[DbName]}=Req, Db) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ Doc = couch_doc:from_json_obj(couch_httpd:json_body(Req)),
+ Doc2 = case Doc#doc.id of
+ <<"">> ->
+ Doc#doc{id=couch_uuids:new(), revs={0, []}};
+ _ ->
+ Doc
+ end,
+ DocId = Doc2#doc.id,
+ case couch_httpd:qs_value(Req, "batch") of
+ "ok" ->
+ % async_batching
+ spawn(fun() ->
+ case catch(couch_db:update_doc(Db, Doc2, [])) of
+ {ok, _} -> ok;
+ Error ->
+ ?LOG_INFO("Batch doc error (~s): ~p",[DocId, Error])
+ end
+ end),
+
+ send_json(Req, 202, [], {[
+ {ok, true},
+ {id, DocId}
+ ]});
+ _Normal ->
+ % normal
+ {ok, NewRev} = couch_db:update_doc(Db, Doc2, []),
+ DocUrl = absolute_uri(
+ Req, binary_to_list(<<"/",DbName/binary,"/", DocId/binary>>)),
+ send_json(Req, 201, [{"Location", DocUrl}], {[
+ {ok, true},
+ {id, DocId},
+ {rev, couch_doc:rev_to_str(NewRev)}
+ ]})
+ end;
+
+
+db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
+ send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ UpdateSeq = couch_db:get_update_seq(Db),
+ CommittedSeq = couch_db:get_committed_update_seq(Db),
+ {ok, StartTime} =
+ case couch_httpd:qs_value(Req, "seq") of
+ undefined ->
+ couch_db:ensure_full_commit(Db);
+ RequiredStr ->
+ RequiredSeq = list_to_integer(RequiredStr),
+ if RequiredSeq > UpdateSeq ->
+ throw({bad_request,
+ "can't do a full commit ahead of current update_seq"});
+ RequiredSeq > CommittedSeq ->
+ couch_db:ensure_full_commit(Db);
+ true ->
+ {ok, Db#db.instance_start_time}
+ end
+ end,
+ send_json(Req, 201, {[
+ {ok, true},
+ {instance_start_time, StartTime}
+ ]});
+
+db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
+ couch_stats_collector:increment({httpd, bulk_requests}),
+ couch_httpd:validate_ctype(Req, "application/json"),
+ {JsonProps} = couch_httpd:json_body_obj(Req),
+ DocsArray = couch_util:get_value(<<"docs">>, JsonProps),
+ case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
+ "true" ->
+ Options = [full_commit];
+ "false" ->
+ Options = [delay_commit];
+ _ ->
+ Options = []
+ end,
+ case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
+ true ->
+ Docs = lists:map(
+ fun({ObjProps} = JsonObj) ->
+ Doc = couch_doc:from_json_obj(JsonObj),
+ validate_attachment_names(Doc),
+ Id = case Doc#doc.id of
+ <<>> -> couch_uuids:new();
+ Id0 -> Id0
+ end,
+ case couch_util:get_value(<<"_rev">>, ObjProps) of
+ undefined ->
+ Revs = {0, []};
+ Rev ->
+ {Pos, RevId} = couch_doc:parse_rev(Rev),
+ Revs = {Pos, [RevId]}
+ end,
+ Doc#doc{id=Id,revs=Revs}
+ end,
+ DocsArray),
+ Options2 =
+ case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
+ true -> [all_or_nothing|Options];
+ _ -> Options
+ end,
+ case couch_db:update_docs(Db, Docs, Options2) of
+ {ok, Results} ->
+ % output the results
+ DocResults = lists:zipwith(fun update_doc_result_to_json/2,
+ Docs, Results),
+ send_json(Req, 201, DocResults);
+ {aborted, Errors} ->
+ ErrorsJson =
+ lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 417, ErrorsJson)
+ end;
+ false ->
+ Docs = lists:map(fun(JsonObj) ->
+ Doc = couch_doc:from_json_obj(JsonObj),
+ validate_attachment_names(Doc),
+ Doc
+ end, DocsArray),
+ {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes),
+ ErrorsJson =
+ lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 201, ErrorsJson)
+ end;
+db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ {IdsRevs} = couch_httpd:json_body_obj(Req),
+ IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
+
+ case couch_db:purge_docs(Db, IdsRevs2) of
+ {ok, PurgeSeq, PurgedIdsRevs} ->
+ PurgedIdsRevs2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- PurgedIdsRevs],
+ send_json(Req, 200, {[{<<"purge_seq">>, PurgeSeq}, {<<"purged">>, {PurgedIdsRevs2}}]});
+ Error ->
+ throw(Error)
+ end;
+
+db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='GET',path_parts=[_,<<"_all_docs">>]}=Req, Db) ->
+ Keys = couch_httpd:qs_json_value(Req, "keys", nil),
+ all_docs_view(Req, Db, Keys);
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_all_docs">>]}=Req, Db) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ {Fields} = couch_httpd:json_body_obj(Req),
+ case couch_util:get_value(<<"keys">>, Fields, nil) of
+ nil ->
+ ?LOG_DEBUG("POST to _all_docs with no keys member.", []),
+ all_docs_view(Req, Db, nil);
+ Keys when is_list(Keys) ->
+ all_docs_view(Req, Db, Keys);
+ _ ->
+ throw({bad_request, "`keys` member must be a array."})
+ end;
+
+db_req(#httpd{path_parts=[_,<<"_all_docs">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "GET,HEAD,POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
+ {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
+ JsonDocIdRevs2 = [{Id, [couch_doc:parse_rev(RevStr) || RevStr <- RevStrs]} || {Id, RevStrs} <- JsonDocIdRevs],
+ {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
+ Results2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs, _} <- Results],
+ send_json(Req, {[
+ {missing_revs, {Results2}}
+ ]});
+
+db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
+ {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
+ JsonDocIdRevs2 =
+ [{Id, couch_doc:parse_revs(RevStrs)} || {Id, RevStrs} <- JsonDocIdRevs],
+ {ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
+ Results2 =
+ lists:map(fun({Id, MissingRevs, PossibleAncestors}) ->
+ {Id,
+ {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
+ if PossibleAncestors == [] ->
+ [];
+ true ->
+ [{possible_ancestors,
+ couch_doc:revs_to_strs(PossibleAncestors)}]
+ end}}
+ end, Results),
+ send_json(Req, {Results2});
+
+db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>]}=Req, Db) ->
+ SecObj = couch_httpd:json_body(Req),
+ ok = couch_db:set_security(Db, SecObj),
+ send_json(Req, {[{<<"ok">>, true}]});
+
+db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) ->
+ send_json(Req, couch_db:get_security(Db));
+
+db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "PUT,GET");
+
+db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>]}=Req,
+ Db) ->
+ Limit = couch_httpd:json_body(Req),
+ ok = couch_db:set_revs_limit(Db, Limit),
+ send_json(Req, {[{<<"ok">>, true}]});
+
+db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
+ send_json(Req, couch_db:get_revs_limit(Db));
+
+db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "PUT,GET");
+
+% Special case to enable using an unencoded slash in the URL of design docs,
+% as slashes in document IDs must otherwise be URL encoded.
+db_req(#httpd{method='GET',mochi_req=MochiReq, path_parts=[DbName,<<"_design/",_/binary>>|_]}=Req, _Db) ->
+ PathFront = "/" ++ couch_httpd:quote(binary_to_list(DbName)) ++ "/",
+ [_|PathTail] = re:split(MochiReq:get(raw_path), "_design%2F",
+ [{return, list}]),
+ couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++
+ mochiweb_util:join(PathTail, "_design%2F"));
+
+db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
+ db_doc_req(Req, Db, <<"_design/",Name/binary>>);
+
+db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
+ db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
+
+
+% Special case to allow for accessing local documents without %2F
+% encoding the docid. Throws out requests that don't have the second
+% path part or that specify an attachment name.
+db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) ->
+ throw({bad_request, <<"Invalid _local document id.">>});
+
+db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) ->
+ throw({bad_request, <<"Invalid _local document id.">>});
+
+db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) ->
+ db_doc_req(Req, Db, <<"_local/", Name/binary>>);
+
+db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) ->
+ throw({bad_request, <<"_local documents do not accept attachments.">>});
+
+db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
+ db_doc_req(Req, Db, DocId);
+
+db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
+ db_attachment_req(Req, Db, DocId, FileNameParts).
+
+all_docs_view(Req, Db, Keys) ->
+ #view_query_args{
+ start_key = StartKey,
+ start_docid = StartDocId,
+ end_key = EndKey,
+ end_docid = EndDocId,
+ limit = Limit,
+ skip = SkipCount,
+ direction = Dir,
+ inclusive_end = Inclusive
+ } = QueryArgs = couch_httpd_view:parse_view_params(Req, Keys, map),
+ {ok, Info} = couch_db:get_db_info(Db),
+ CurrentEtag = couch_httpd:make_etag(Info),
+ couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
+
+ TotalRowCount = couch_util:get_value(doc_count, Info),
+ StartId = if is_binary(StartKey) -> StartKey;
+ true -> StartDocId
+ end,
+ EndId = if is_binary(EndKey) -> EndKey;
+ true -> EndDocId
+ end,
+ FoldAccInit = {Limit, SkipCount, undefined, []},
+ UpdateSeq = couch_db:get_update_seq(Db),
+ JsonParams = case couch_httpd:qs_value(Req, "update_seq") of
+ "true" ->
+ [{update_seq, UpdateSeq}];
+ _Else ->
+ []
+ end,
+ case Keys of
+ nil ->
+ FoldlFun = couch_httpd_view:make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db, UpdateSeq,
+ TotalRowCount, #view_fold_helper_funs{
+ reduce_count = fun couch_db:enum_docs_reduce_to_count/1,
+ send_row = fun all_docs_send_json_view_row/6
+ }),
+ AdapterFun = fun(#full_doc_info{id=Id}=FullDocInfo, Offset, Acc) ->
+ case couch_doc:to_doc_info(FullDocInfo) of
+ #doc_info{revs=[#rev_info{deleted=false}|_]} = DocInfo ->
+ FoldlFun({{Id, Id}, DocInfo}, Offset, Acc);
+ #doc_info{revs=[#rev_info{deleted=true}|_]} ->
+ {ok, Acc}
+ end
+ end,
+ {ok, LastOffset, FoldResult} = couch_db:enum_docs(Db,
+ AdapterFun, FoldAccInit, [{start_key, StartId}, {dir, Dir},
+ {if Inclusive -> end_key; true -> end_key_gt end, EndId}]),
+ couch_httpd_view:finish_view_fold(Req, TotalRowCount, LastOffset, FoldResult, JsonParams);
+ _ ->
+ FoldlFun = couch_httpd_view:make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db, UpdateSeq,
+ TotalRowCount, #view_fold_helper_funs{
+ reduce_count = fun(Offset) -> Offset end,
+ send_row = fun all_docs_send_json_view_row/6
+ }),
+ KeyFoldFun = case Dir of
+ fwd ->
+ fun lists:foldl/3;
+ rev ->
+ fun lists:foldr/3
+ end,
+ FoldResult = KeyFoldFun(
+ fun(Key, FoldAcc) ->
+ DocInfo = (catch couch_db:get_doc_info(Db, Key)),
+ Doc = case DocInfo of
+ {ok, #doc_info{id = Id} = Di} ->
+ {{Id, Id}, Di};
+ not_found ->
+ {{Key, error}, not_found};
+ _ ->
+ ?LOG_ERROR("Invalid DocInfo: ~p", [DocInfo]),
+ throw({error, invalid_doc_info})
+ end,
+ {_, FoldAcc2} = FoldlFun(Doc, 0, FoldAcc),
+ FoldAcc2
+ end, FoldAccInit, Keys),
+ couch_httpd_view:finish_view_fold(Req, TotalRowCount, 0, FoldResult, JsonParams)
+ end
+ end).
+
+all_docs_send_json_view_row(Resp, Db, KV, IncludeDocs, Conflicts, RowFront) ->
+ JsonRow = all_docs_view_row_obj(Db, KV, IncludeDocs, Conflicts),
+ send_chunk(Resp, RowFront ++ ?JSON_ENCODE(JsonRow)),
+ {ok, ",\r\n"}.
+
+all_docs_view_row_obj(_Db, {{DocId, error}, Value}, _IncludeDocs, _Conflicts) ->
+ {[{key, DocId}, {error, Value}]};
+all_docs_view_row_obj(Db, {_KeyDocId, DocInfo}, true, Conflicts) ->
+ case DocInfo of
+ #doc_info{revs = [#rev_info{deleted = true} | _]} ->
+ {all_docs_row(DocInfo) ++ [{doc, null}]};
+ _ ->
+ {all_docs_row(DocInfo) ++ couch_httpd_view:doc_member(
+ Db, DocInfo, if Conflicts -> [conflicts]; true -> [] end)}
+ end;
+all_docs_view_row_obj(_Db, {_KeyDocId, DocInfo}, _IncludeDocs, _Conflicts) ->
+ {all_docs_row(DocInfo)}.
+
+all_docs_row(#doc_info{id = Id, revs = [RevInfo | _]}) ->
+ #rev_info{rev = Rev, deleted = Del} = RevInfo,
+ [ {id, Id}, {key, Id},
+ {value, {[{rev, couch_doc:rev_to_str(Rev)}] ++ case Del of
+ true -> [{deleted, true}];
+ false -> []
+ end}} ].
+
+
+db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
+ % check for the existence of the doc to handle the 404 case.
+ couch_doc_open(Db, DocId, nil, []),
+ case couch_httpd:qs_value(Req, "rev") of
+ undefined ->
+ update_doc(Req, Db, DocId,
+ couch_doc_from_req(Req, DocId, {[{<<"_deleted">>,true}]}));
+ Rev ->
+ update_doc(Req, Db, DocId,
+ couch_doc_from_req(Req, DocId,
+ {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}))
+ end;
+
+db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
+ #doc_query_args{
+ rev = Rev,
+ open_revs = Revs,
+ options = Options1,
+ atts_since = AttsSince
+ } = parse_doc_query(Req),
+ Options = case AttsSince of
+ nil ->
+ Options1;
+ RevList when is_list(RevList) ->
+ [{atts_since, RevList}, attachments | Options1]
+ end,
+ case Revs of
+ [] ->
+ Doc = couch_doc_open(Db, DocId, Rev, Options),
+ send_doc(Req, Doc, Options);
+ _ ->
+ {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options),
+ case MochiReq:accepts_content_type("multipart/mixed") of
+ false ->
+ {ok, Resp} = start_json_response(Req, 200),
+ send_chunk(Resp, "["),
+ % We loop through the docs. The first time through the separator
+ % is whitespace, then a comma on subsequent iterations.
+ lists:foldl(
+ fun(Result, AccSeparator) ->
+ case Result of
+ {ok, Doc} ->
+ JsonDoc = couch_doc:to_json_obj(Doc, Options),
+ Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
+ send_chunk(Resp, AccSeparator ++ Json);
+ {{not_found, missing}, RevId} ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
+ send_chunk(Resp, AccSeparator ++ Json)
+ end,
+ "," % AccSeparator now has a comma
+ end,
+ "", Results),
+ send_chunk(Resp, "]"),
+ end_json_response(Resp);
+ true ->
+ send_docs_multipart(Req, Results, Options)
+ end
+ end;
+
+
+db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
+ couch_httpd:validate_referer(Req),
+ couch_doc:validate_docid(DocId),
+ couch_httpd:validate_ctype(Req, "multipart/form-data"),
+ Form = couch_httpd:parse_form(Req),
+ case couch_util:get_value("_doc", Form) of
+ undefined ->
+ Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)),
+ {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []);
+ Json ->
+ Doc = couch_doc_from_req(Req, DocId, ?JSON_DECODE(Json))
+ end,
+ UpdatedAtts = [
+ #att{name=validate_attachment_name(Name),
+ type=list_to_binary(ContentType),
+ data=Content} ||
+ {Name, {ContentType, _}, Content} <-
+ proplists:get_all_values("_attachments", Form)
+ ],
+ #doc{atts=OldAtts} = Doc,
+ OldAtts2 = lists:flatmap(
+ fun(#att{name=OldName}=Att) ->
+ case [1 || A <- UpdatedAtts, A#att.name == OldName] of
+ [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it
+ _ -> [] % the attachment was in the UpdatedAtts, drop it
+ end
+ end, OldAtts),
+ NewDoc = Doc#doc{
+ atts = UpdatedAtts ++ OldAtts2
+ },
+ {ok, NewRev} = couch_db:update_doc(Db, NewDoc, []),
+
+ send_json(Req, 201, [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewRev)) ++ "\""}], {[
+ {ok, true},
+ {id, DocId},
+ {rev, couch_doc:rev_to_str(NewRev)}
+ ]});
+
+db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
+ #doc_query_args{
+ update_type = UpdateType
+ } = parse_doc_query(Req),
+ couch_doc:validate_docid(DocId),
+
+ Loc = absolute_uri(Req, "/" ++ ?b2l(Db#db.name) ++ "/" ++ ?b2l(DocId)),
+ RespHeaders = [{"Location", Loc}],
+ case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
+ ("multipart/related;" ++ _) = ContentType ->
+ {ok, Doc0} = couch_doc:doc_from_multi_part_stream(ContentType,
+ fun() -> receive_request_data(Req) end),
+ Doc = couch_doc_from_req(Req, DocId, Doc0),
+ update_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType);
+ _Else ->
+ case couch_httpd:qs_value(Req, "batch") of
+ "ok" ->
+ % batch
+ Doc = couch_doc_from_req(Req, DocId, couch_httpd:json_body(Req)),
+
+ spawn(fun() ->
+ case catch(couch_db:update_doc(Db, Doc, [])) of
+ {ok, _} -> ok;
+ Error ->
+ ?LOG_INFO("Batch doc error (~s): ~p",[DocId, Error])
+ end
+ end),
+ send_json(Req, 202, [], {[
+ {ok, true},
+ {id, DocId}
+ ]});
+ _Normal ->
+ % normal
+ Body = couch_httpd:json_body(Req),
+ Doc = couch_doc_from_req(Req, DocId, Body),
+ update_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType)
+ end
+ end;
+
+db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
+ SourceRev =
+ case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
+ missing_rev -> nil;
+ Rev -> Rev
+ end,
+ {TargetDocId, TargetRevs} = parse_copy_destination_header(Req),
+ % open old doc
+ Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
+ % save new doc
+ {ok, NewTargetRev} = couch_db:update_doc(Db,
+ Doc#doc{id=TargetDocId, revs=TargetRevs}, []),
+ % respond
+ send_json(Req, 201,
+ [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}],
+ update_doc_result_to_json(TargetDocId, {ok, NewTargetRev}));
+
+db_doc_req(Req, _Db, _DocId) ->
+ send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
+
+
+send_doc(Req, Doc, Options) ->
+ case Doc#doc.meta of
+ [] ->
+ DiskEtag = couch_httpd:doc_etag(Doc),
+ % output etag only when we have no meta
+ couch_httpd:etag_respond(Req, DiskEtag, fun() ->
+ send_doc_efficiently(Req, Doc, [{"Etag", DiskEtag}], Options)
+ end);
+ _ ->
+ send_doc_efficiently(Req, Doc, [], Options)
+ end.
+
+
+send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
+send_doc_efficiently(#httpd{mochi_req = MochiReq} = Req,
+ #doc{atts = Atts} = Doc, Headers, Options) ->
+ case lists:member(attachments, Options) of
+ true ->
+ case MochiReq:accepts_content_type("multipart/related") of
+ false ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
+ true ->
+ Boundary = couch_uuids:random(),
+ JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
+ [attachments, follows|Options])),
+ {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
+ Boundary,JsonBytes, Atts, true),
+ CType = {<<"Content-Type">>, ContentType},
+ {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len),
+ couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts,
+ fun(Data) -> couch_httpd:send(Resp, Data) end, true)
+ end;
+ false ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
+ end.
+
+send_docs_multipart(Req, Results, Options1) ->
+ OuterBoundary = couch_uuids:random(),
+ InnerBoundary = couch_uuids:random(),
+ Options = [attachments, follows, att_encoding_info | Options1],
+ CType = {"Content-Type",
+ "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
+ {ok, Resp} = start_chunked_response(Req, 200, [CType]),
+ couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
+ lists:foreach(
+ fun({ok, #doc{atts=Atts}=Doc}) ->
+ JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
+ {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
+ InnerBoundary, JsonBytes, Atts, true),
+ couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
+ ContentType/binary, "\r\n\r\n">>),
+ couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
+ fun(Data) -> couch_httpd:send_chunk(Resp, Data)
+ end, true),
+ couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>);
+ ({{not_found, missing}, RevId}) ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
+ couch_httpd:send_chunk(Resp,
+ [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
+ Json,
+ <<"\r\n--", OuterBoundary/binary>>])
+ end, Results),
+ couch_httpd:send_chunk(Resp, <<"--">>),
+ couch_httpd:last_chunk(Resp).
+
+send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
+ Boundary = couch_uuids:random(),
+ CType = {"Content-Type",
+ "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
+ {ok, Resp} = start_chunked_response(Req, 206, [CType]),
+ couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
+ lists:foreach(fun({From, To}) ->
+ ContentRange = make_content_range(From, To, Len),
+ couch_httpd:send_chunk(Resp,
+ <<"\r\nContent-Type: ", ContentType/binary, "\r\n",
+ "Content-Range: ", ContentRange/binary, "\r\n",
+ "\r\n">>),
+ couch_doc:range_att_foldl(Att, From, To + 1,
+ fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
+ couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
+ end, Ranges),
+ couch_httpd:send_chunk(Resp, <<"--">>),
+ couch_httpd:last_chunk(Resp),
+ {ok, Resp}.
+
+receive_request_data(Req) ->
+ {couch_httpd:recv(Req, 0), fun() -> receive_request_data(Req) end}.
+
+make_content_range(From, To, Len) ->
+ ?l2b(io_lib:format("bytes ~B-~B/~B", [From, To, Len])).
+
+update_doc_result_to_json({{Id, Rev}, Error}) ->
+ {_Code, Err, Msg} = couch_httpd:error_info(Error),
+ {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)},
+ {error, Err}, {reason, Msg}]}.
+
+update_doc_result_to_json(#doc{id=DocId}, Result) ->
+ update_doc_result_to_json(DocId, Result);
+update_doc_result_to_json(DocId, {ok, NewRev}) ->
+ {[{id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
+update_doc_result_to_json(DocId, Error) ->
+ {_Code, ErrorStr, Reason} = couch_httpd:error_info(Error),
+ {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
+
+
+update_doc(Req, Db, DocId, Doc) ->
+ update_doc(Req, Db, DocId, Doc, []).
+
+update_doc(Req, Db, DocId, Doc, Headers) ->
+ update_doc(Req, Db, DocId, Doc, Headers, interactive_edit).
+
+update_doc(Req, Db, DocId, #doc{deleted=Deleted}=Doc, Headers, UpdateType) ->
+ case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
+ "true" ->
+ Options = [full_commit];
+ "false" ->
+ Options = [delay_commit];
+ _ ->
+ Options = []
+ end,
+ {ok, NewRev} = couch_db:update_doc(Db, Doc, Options, UpdateType),
+ NewRevStr = couch_doc:rev_to_str(NewRev),
+ ResponseHeaders = [{"Etag", <<"\"", NewRevStr/binary, "\"">>}] ++ Headers,
+ send_json(Req, if Deleted -> 200; true -> 201 end,
+ ResponseHeaders, {[
+ {ok, true},
+ {id, DocId},
+ {rev, NewRevStr}]}).
+
+couch_doc_from_req(Req, DocId, #doc{revs=Revs}=Doc) ->
+ validate_attachment_names(Doc),
+ ExplicitDocRev =
+ case Revs of
+ {Start,[RevId|_]} -> {Start, RevId};
+ _ -> undefined
+ end,
+ case extract_header_rev(Req, ExplicitDocRev) of
+ missing_rev ->
+ Revs2 = {0, []};
+ ExplicitDocRev ->
+ Revs2 = Revs;
+ {Pos, Rev} ->
+ Revs2 = {Pos, [Rev]}
+ end,
+ Doc#doc{id=DocId, revs=Revs2};
+couch_doc_from_req(Req, DocId, Json) ->
+ couch_doc_from_req(Req, DocId, couch_doc:from_json_obj(Json)).
+
+
+% Useful for debugging
+% couch_doc_open(Db, DocId) ->
+% couch_doc_open(Db, DocId, nil, []).
+
+couch_doc_open(Db, DocId, Rev, Options) ->
+ case Rev of
+ nil -> % open most recent rev
+ case couch_db:open_doc(Db, DocId, Options) of
+ {ok, Doc} ->
+ Doc;
+ Error ->
+ throw(Error)
+ end;
+ _ -> % open a specific rev (deletions come back as stubs)
+ case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
+ {ok, [{ok, Doc}]} ->
+ Doc;
+ {ok, [{{not_found, missing}, Rev}]} ->
+ throw(not_found);
+ {ok, [Else]} ->
+ throw(Else)
+ end
+ end.
+
+% Attachment request handlers
+
+db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) ->
+ FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts),"/")),
+ #doc_query_args{
+ rev=Rev,
+ options=Options
+ } = parse_doc_query(Req),
+ #doc{
+ atts=Atts
+ } = Doc = couch_doc_open(Db, DocId, Rev, Options),
+ case [A || A <- Atts, A#att.name == FileName] of
+ [] ->
+ throw({not_found, "Document is missing attachment"});
+ [#att{type=Type, encoding=Enc, disk_len=DiskLen, att_len=AttLen}=Att] ->
+ Etag = couch_httpd:doc_etag(Doc),
+ ReqAcceptsAttEnc = lists:member(
+ atom_to_list(Enc),
+ couch_httpd:accepted_encodings(Req)
+ ),
+ Len = case {Enc, ReqAcceptsAttEnc} of
+ {identity, _} ->
+ % stored and served in identity form
+ DiskLen;
+ {_, false} when DiskLen =/= AttLen ->
+ % Stored encoded, but client doesn't accept the encoding we used,
+ % so we need to decode on the fly. DiskLen is the identity length
+ % of the attachment.
+ DiskLen;
+ {_, true} ->
+ % Stored and served encoded. AttLen is the encoded length.
+ AttLen;
+ _ ->
+ % We received an encoded attachment and stored it as such, so we
+ % don't know the identity length. The client doesn't accept the
+ % encoding, and since we cannot serve a correct Content-Length
+ % header we'll fall back to a chunked response.
+ undefined
+ end,
+ Headers = [
+ {"ETag", Etag},
+ {"Cache-Control", "must-revalidate"},
+ {"Content-Type", binary_to_list(Type)}
+ ] ++ case ReqAcceptsAttEnc of
+ true when Enc =/= identity ->
+ % RFC 2616 says that the 'identify' encoding should not be used in
+ % the Content-Encoding header
+ [{"Content-Encoding", atom_to_list(Enc)}];
+ _ ->
+ []
+ end ++ case Enc of
+ identity ->
+ [{"Accept-Ranges", "bytes"}];
+ _ ->
+ [{"Accept-Ranges", "none"}]
+ end,
+ AttFun = case ReqAcceptsAttEnc of
+ false ->
+ fun couch_doc:att_foldl_decode/3;
+ true ->
+ fun couch_doc:att_foldl/3
+ end,
+ couch_httpd:etag_respond(
+ Req,
+ Etag,
+ fun() ->
+ case Len of
+ undefined ->
+ {ok, Resp} = start_chunked_response(Req, 200, Headers),
+ AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
+ last_chunk(Resp);
+ _ ->
+ Ranges = parse_ranges(MochiReq:get(range), Len),
+ case {Enc, Ranges} of
+ {identity, [{From, To}]} ->
+ Headers1 = [{<<"Content-Range">>, make_content_range(From, To, Len)}]
+ ++ Headers,
+ {ok, Resp} = start_response_length(Req, 206, Headers1, To - From + 1),
+ couch_doc:range_att_foldl(Att, From, To + 1,
+ fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp});
+ {identity, Ranges} when is_list(Ranges) ->
+ send_ranges_multipart(Req, Type, Len, Att, Ranges);
+ _ ->
+ {ok, Resp} = start_response_length(Req, 200, Headers, Len),
+ AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
+ end
+ end
+ end
+ )
+ end;
+
+
+db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileNameParts)
+ when (Method == 'PUT') or (Method == 'DELETE') ->
+ FileName = validate_attachment_name(
+ mochiweb_util:join(
+ lists:map(fun binary_to_list/1,
+ FileNameParts),"/")),
+
+ NewAtt = case Method of
+ 'DELETE' ->
+ [];
+ _ ->
+ [#att{
+ name = FileName,
+ type = case couch_httpd:header_value(Req,"Content-Type") of
+ undefined ->
+ % We could throw an error here or guess by the FileName.
+ % Currently, just giving it a default.
+ <<"application/octet-stream">>;
+ CType ->
+ list_to_binary(CType)
+ end,
+ data = case couch_httpd:body_length(Req) of
+ undefined ->
+ <<"">>;
+ {unknown_transfer_encoding, Unknown} ->
+ exit({unknown_transfer_encoding, Unknown});
+ chunked ->
+ fun(MaxChunkSize, ChunkFun, InitState) ->
+ couch_httpd:recv_chunked(Req, MaxChunkSize,
+ ChunkFun, InitState)
+ end;
+ 0 ->
+ <<"">>;
+ Length when is_integer(Length) ->
+ Expect = case couch_httpd:header_value(Req, "expect") of
+ undefined ->
+ undefined;
+ Value when is_list(Value) ->
+ string:to_lower(Value)
+ end,
+ case Expect of
+ "100-continue" ->
+ MochiReq:start_raw_response({100, gb_trees:empty()});
+ _Else ->
+ ok
+ end,
+
+
+ fun() -> couch_httpd:recv(Req, 0) end
+ end,
+ att_len = case couch_httpd:header_value(Req,"Content-Length") of
+ undefined ->
+ undefined;
+ Length ->
+ list_to_integer(Length)
+ end,
+ md5 = get_md5_header(Req),
+ encoding = case string:to_lower(string:strip(
+ couch_httpd:header_value(Req,"Content-Encoding","identity")
+ )) of
+ "identity" ->
+ identity;
+ "gzip" ->
+ gzip;
+ _ ->
+ throw({
+ bad_ctype,
+ "Only gzip and identity content-encodings are supported"
+ })
+ end
+ }]
+ end,
+
+ Doc = case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
+ missing_rev -> % make the new doc
+ couch_doc:validate_docid(DocId),
+ #doc{id=DocId};
+ Rev ->
+ case couch_db:open_doc_revs(Db, DocId, [Rev], []) of
+ {ok, [{ok, Doc0}]} -> Doc0;
+ {ok, [Error]} -> throw(Error)
+ end
+ end,
+
+ #doc{atts=Atts, revs = {Pos, Revs}} = Doc,
+ DocEdited = Doc#doc{
+ % prune revision list as a workaround for key tree bug (COUCHDB-902)
+ revs = {Pos, case Revs of [] -> []; [Hd|_] -> [Hd] end},
+ atts = NewAtt ++ [A || A <- Atts, A#att.name /= FileName]
+ },
+ {ok, UpdatedRev} = couch_db:update_doc(Db, DocEdited, []),
+ #db{name=DbName} = Db,
+
+ {Status, Headers} = case Method of
+ 'DELETE' ->
+ {200, []};
+ _ ->
+ {201, [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(UpdatedRev)) ++ "\""},
+ {"Location", absolute_uri(Req, "/" ++
+ binary_to_list(DbName) ++ "/" ++
+ binary_to_list(DocId) ++ "/" ++
+ binary_to_list(FileName)
+ )}]}
+ end,
+ send_json(Req,Status, Headers, {[
+ {ok, true},
+ {id, DocId},
+ {rev, couch_doc:rev_to_str(UpdatedRev)}
+ ]});
+
+db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
+ send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
+
+parse_ranges(undefined, _Len) ->
+ undefined;
+parse_ranges(fail, _Len) ->
+ undefined;
+parse_ranges(Ranges, Len) ->
+ parse_ranges(Ranges, Len, []).
+
+parse_ranges([], _Len, Acc) ->
+ lists:reverse(Acc);
+parse_ranges([{From, To}|_], _Len, _Acc) when is_integer(From) andalso is_integer(To) andalso To < From ->
+ throw(requested_range_not_satisfiable);
+parse_ranges([{From, To}|Rest], Len, Acc) when is_integer(To) andalso To >= Len ->
+ parse_ranges([{From, Len-1}] ++ Rest, Len, Acc);
+parse_ranges([{none, To}|Rest], Len, Acc) ->
+ parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
+parse_ranges([{From, none}|Rest], Len, Acc) ->
+ parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
+parse_ranges([{From,To}|Rest], Len, Acc) ->
+ parse_ranges(Rest, Len, [{From, To}] ++ Acc).
+
+get_md5_header(Req) ->
+ ContentMD5 = couch_httpd:header_value(Req, "Content-MD5"),
+ Length = couch_httpd:body_length(Req),
+ Trailer = couch_httpd:header_value(Req, "Trailer"),
+ case {ContentMD5, Length, Trailer} of
+ _ when is_list(ContentMD5) orelse is_binary(ContentMD5) ->
+ base64:decode(ContentMD5);
+ {_, chunked, undefined} ->
+ <<>>;
+ {_, chunked, _} ->
+ case re:run(Trailer, "\\bContent-MD5\\b", [caseless]) of
+ {match, _} ->
+ md5_in_footer;
+ _ ->
+ <<>>
+ end;
+ _ ->
+ <<>>
+ end.
+
+parse_doc_query(Req) ->
+ lists:foldl(fun({Key,Value}, Args) ->
+ case {Key, Value} of
+ {"attachments", "true"} ->
+ Options = [attachments | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"meta", "true"} ->
+ Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"revs", "true"} ->
+ Options = [revs | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"local_seq", "true"} ->
+ Options = [local_seq | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"revs_info", "true"} ->
+ Options = [revs_info | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"conflicts", "true"} ->
+ Options = [conflicts | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"deleted_conflicts", "true"} ->
+ Options = [deleted_conflicts | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"rev", Rev} ->
+ Args#doc_query_args{rev=couch_doc:parse_rev(Rev)};
+ {"open_revs", "all"} ->
+ Args#doc_query_args{open_revs=all};
+ {"open_revs", RevsJsonStr} ->
+ JsonArray = ?JSON_DECODE(RevsJsonStr),
+ Args#doc_query_args{open_revs=couch_doc:parse_revs(JsonArray)};
+ {"atts_since", RevsJsonStr} ->
+ JsonArray = ?JSON_DECODE(RevsJsonStr),
+ Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
+ {"new_edits", "false"} ->
+ Args#doc_query_args{update_type=replicated_changes};
+ {"new_edits", "true"} ->
+ Args#doc_query_args{update_type=interactive_edit};
+ {"att_encoding_info", "true"} ->
+ Options = [att_encoding_info | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ _Else -> % unknown key value pair, ignore.
+ Args
+ end
+ end, #doc_query_args{}, couch_httpd:qs(Req)).
+
+parse_changes_query(Req) ->
+ lists:foldl(fun({Key, Value}, Args) ->
+ case {Key, Value} of
+ {"feed", _} ->
+ Args#changes_args{feed=Value};
+ {"descending", "true"} ->
+ Args#changes_args{dir=rev};
+ {"since", _} ->
+ Args#changes_args{since=list_to_integer(Value)};
+ {"limit", _} ->
+ Args#changes_args{limit=list_to_integer(Value)};
+ {"style", _} ->
+ Args#changes_args{style=list_to_existing_atom(Value)};
+ {"heartbeat", "true"} ->
+ Args#changes_args{heartbeat=true};
+ {"heartbeat", _} ->
+ Args#changes_args{heartbeat=list_to_integer(Value)};
+ {"timeout", _} ->
+ Args#changes_args{timeout=list_to_integer(Value)};
+ {"include_docs", "true"} ->
+ Args#changes_args{include_docs=true};
+ {"conflicts", "true"} ->
+ Args#changes_args{conflicts=true};
+ {"filter", _} ->
+ Args#changes_args{filter=Value};
+ _Else -> % unknown key value pair, ignore.
+ Args
+ end
+ end, #changes_args{}, couch_httpd:qs(Req)).
+
+extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
+ extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
+extract_header_rev(Req, ExplicitRev) ->
+ Etag = case couch_httpd:header_value(Req, "If-Match") of
+ undefined -> undefined;
+ Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
+ end,
+ case {ExplicitRev, Etag} of
+ {undefined, undefined} -> missing_rev;
+ {_, undefined} -> ExplicitRev;
+ {undefined, _} -> Etag;
+ _ when ExplicitRev == Etag -> Etag;
+ _ ->
+ throw({bad_request, "Document rev and etag have different values"})
+ end.
+
+
+parse_copy_destination_header(Req) ->
+ case couch_httpd:header_value(Req, "Destination") of
+ undefined ->
+ throw({bad_request, "Destination header in mandatory for COPY."});
+ Destination ->
+ case re:run(Destination, "\\?", [{capture, none}]) of
+ nomatch ->
+ {list_to_binary(Destination), {0, []}};
+ match ->
+ [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
+ [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
+ {Pos, RevId} = couch_doc:parse_rev(Rev),
+ {list_to_binary(DocId), {Pos, [RevId]}}
+ end
+ end.
+
+validate_attachment_names(Doc) ->
+ lists:foreach(fun(#att{name=Name}) ->
+ validate_attachment_name(Name)
+ end, Doc#doc.atts).
+
+validate_attachment_name(Name) when is_list(Name) ->
+ validate_attachment_name(list_to_binary(Name));
+validate_attachment_name(<<"_",_/binary>>) ->
+ throw({bad_request, <<"Attachment name can't start with '_'">>});
+validate_attachment_name(Name) ->
+ case couch_util:validate_utf8(Name) of
+ true -> Name;
+ false -> throw({bad_request, <<"Attachment name is not UTF-8 encoded">>})
+ end.
+
diff --git a/1.1.x/src/couchdb/couch_httpd_external.erl b/1.1.x/src/couchdb/couch_httpd_external.erl
new file mode 100644
index 00000000..2e91fb50
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_httpd_external.erl
@@ -0,0 +1,169 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_external).
+
+-export([handle_external_req/2, handle_external_req/3]).
+-export([send_external_response/2, json_req_obj/2, json_req_obj/3]).
+-export([default_or_content_type/2, parse_external_response/1]).
+
+-import(couch_httpd,[send_error/4]).
+
+-include("couch_db.hrl").
+
+% handle_external_req/2
+% for the old type of config usage:
+% _external = {couch_httpd_external, handle_external_req}
+% with urls like
+% /db/_external/action/design/name
+handle_external_req(#httpd{
+ path_parts=[_DbName, _External, UrlName | _Path]
+ }=HttpReq, Db) ->
+ process_external_req(HttpReq, Db, UrlName);
+handle_external_req(#httpd{path_parts=[_, _]}=Req, _Db) ->
+ send_error(Req, 404, <<"external_server_error">>, <<"No server name specified.">>);
+handle_external_req(Req, _) ->
+ send_error(Req, 404, <<"external_server_error">>, <<"Broken assumption">>).
+
+% handle_external_req/3
+% for this type of config usage:
+% _action = {couch_httpd_external, handle_external_req, <<"action">>}
+% with urls like
+% /db/_action/design/name
+handle_external_req(HttpReq, Db, Name) ->
+ process_external_req(HttpReq, Db, Name).
+
+process_external_req(HttpReq, Db, Name) ->
+
+ Response = couch_external_manager:execute(binary_to_list(Name),
+ json_req_obj(HttpReq, Db)),
+
+ case Response of
+ {unknown_external_server, Msg} ->
+ send_error(HttpReq, 404, <<"external_server_error">>, Msg);
+ _ ->
+ send_external_response(HttpReq, Response)
+ end.
+json_req_obj(Req, Db) -> json_req_obj(Req, Db, null).
+json_req_obj(#httpd{mochi_req=Req,
+ method=Method,
+ requested_path_parts=RequestedPath,
+ path_parts=Path,
+ req_body=ReqBody
+ }, Db, DocId) ->
+ Body = case ReqBody of
+ undefined -> Req:recv_body();
+ Else -> Else
+ end,
+ ParsedForm = case Req:get_primary_header_value("content-type") of
+ "application/x-www-form-urlencoded" ++ _ ->
+ case Body of
+ undefined -> [];
+ _ -> mochiweb_util:parse_qs(Body)
+ end;
+ _ ->
+ []
+ end,
+ Headers = Req:get(headers),
+ Hlist = mochiweb_headers:to_list(Headers),
+ {ok, Info} = couch_db:get_db_info(Db),
+
+% add headers...
+ {[{<<"info">>, {Info}},
+ {<<"id">>, DocId},
+ {<<"uuid">>, couch_uuids:new()},
+ {<<"method">>, Method},
+ {<<"requested_path">>, RequestedPath},
+ {<<"path">>, Path},
+ {<<"query">>, json_query_keys(to_json_terms(Req:parse_qs()))},
+ {<<"headers">>, to_json_terms(Hlist)},
+ {<<"body">>, Body},
+ {<<"peer">>, ?l2b(Req:get(peer))},
+ {<<"form">>, to_json_terms(ParsedForm)},
+ {<<"cookie">>, to_json_terms(Req:parse_cookie())},
+ {<<"userCtx">>, couch_util:json_user_ctx(Db)},
+ {<<"secObj">>, couch_db:get_security(Db)}]}.
+
+to_json_terms(Data) ->
+ to_json_terms(Data, []).
+
+to_json_terms([], Acc) ->
+ {lists:reverse(Acc)};
+to_json_terms([{Key, Value} | Rest], Acc) when is_atom(Key) ->
+ to_json_terms(Rest, [{list_to_binary(atom_to_list(Key)), list_to_binary(Value)} | Acc]);
+to_json_terms([{Key, Value} | Rest], Acc) ->
+ to_json_terms(Rest, [{list_to_binary(Key), list_to_binary(Value)} | Acc]).
+
+json_query_keys({Json}) ->
+ json_query_keys(Json, []).
+json_query_keys([], Acc) ->
+ {lists:reverse(Acc)};
+json_query_keys([{<<"startkey">>, Value} | Rest], Acc) ->
+ json_query_keys(Rest, [{<<"startkey">>, couch_util:json_decode(Value)}|Acc]);
+json_query_keys([{<<"endkey">>, Value} | Rest], Acc) ->
+ json_query_keys(Rest, [{<<"endkey">>, couch_util:json_decode(Value)}|Acc]);
+json_query_keys([{<<"key">>, Value} | Rest], Acc) ->
+ json_query_keys(Rest, [{<<"key">>, couch_util:json_decode(Value)}|Acc]);
+json_query_keys([Term | Rest], Acc) ->
+ json_query_keys(Rest, [Term|Acc]).
+
+send_external_response(#httpd{mochi_req=MochiReq}=Req, Response) ->
+ #extern_resp_args{
+ code = Code,
+ data = Data,
+ ctype = CType,
+ headers = Headers
+ } = parse_external_response(Response),
+ couch_httpd:log_request(Req, Code),
+ Resp = MochiReq:respond({Code,
+ default_or_content_type(CType, Headers ++ couch_httpd:server_header()), Data}),
+ {ok, Resp}.
+
+parse_external_response({Response}) ->
+ lists:foldl(fun({Key,Value}, Args) ->
+ case {Key, Value} of
+ {"", _} ->
+ Args;
+ {<<"code">>, Value} ->
+ Args#extern_resp_args{code=Value};
+ {<<"stop">>, true} ->
+ Args#extern_resp_args{stop=true};
+ {<<"json">>, Value} ->
+ Args#extern_resp_args{
+ data=?JSON_ENCODE(Value),
+ ctype="application/json"};
+ {<<"body">>, Value} ->
+ Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"};
+ {<<"base64">>, Value} ->
+ Args#extern_resp_args{
+ data=base64:decode(Value),
+ ctype="application/binary"
+ };
+ {<<"headers">>, {Headers}} ->
+ NewHeaders = lists:map(fun({Header, HVal}) ->
+ {binary_to_list(Header), binary_to_list(HVal)}
+ end, Headers),
+ Args#extern_resp_args{headers=NewHeaders};
+ _ -> % unknown key
+ Msg = lists:flatten(io_lib:format("Invalid data from external server: ~p", [{Key, Value}])),
+ throw({external_response_error, Msg})
+ end
+ end, #extern_resp_args{}, Response).
+
+default_or_content_type(DefaultContentType, Headers) ->
+ IsContentType = fun({X, _}) -> string:to_lower(X) == "content-type" end,
+ case lists:any(IsContentType, Headers) of
+ false ->
+ [{"Content-Type", DefaultContentType} | Headers];
+ true ->
+ Headers
+ end.
diff --git a/1.1.x/src/couchdb/couch_httpd_misc_handlers.erl b/1.1.x/src/couchdb/couch_httpd_misc_handlers.erl
new file mode 100644
index 00000000..213cbfd4
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_httpd_misc_handlers.erl
@@ -0,0 +1,284 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_misc_handlers).
+
+-export([handle_welcome_req/2,handle_favicon_req/2,handle_utils_dir_req/2,
+ handle_all_dbs_req/1,handle_replicate_req/1,handle_restart_req/1,
+ handle_uuids_req/1,handle_config_req/1,handle_log_req/1,
+ handle_task_status_req/1]).
+
+-export([increment_update_seq_req/2]).
+
+
+-include("couch_db.hrl").
+
+-import(couch_httpd,
+ [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
+ start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
+ start_chunked_response/3, send_error/4]).
+
+% httpd global handlers
+
+handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
+ send_json(Req, {[
+ {couchdb, WelcomeMessage},
+ {version, list_to_binary(couch_server:get_version())}
+ ]});
+handle_welcome_req(Req, _) ->
+ send_method_not_allowed(Req, "GET,HEAD").
+
+handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
+ {{Year,Month,Day},Time} = erlang:localtime(),
+ OneYearFromNow = {{Year+1,Month,Day},Time},
+ CachingHeaders = [
+ %favicon should expire a year from now
+ {"Cache-Control", "public, max-age=31536000"},
+ {"Expires", httpd_util:rfc1123_date(OneYearFromNow)}
+ ],
+ couch_httpd:serve_file(Req, "favicon.ico", DocumentRoot, CachingHeaders);
+
+handle_favicon_req(Req, _) ->
+ send_method_not_allowed(Req, "GET,HEAD").
+
+handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
+ "/" ++ UrlPath = couch_httpd:path(Req),
+ case couch_httpd:partition(UrlPath) of
+ {_ActionKey, "/", RelativePath} ->
+ % GET /_utils/path or GET /_utils/
+ couch_httpd:serve_file(Req, RelativePath, DocumentRoot);
+ {_ActionKey, "", _RelativePath} ->
+ % GET /_utils
+ RedirectPath = couch_httpd:path(Req) ++ "/",
+ couch_httpd:send_redirect(Req, RedirectPath)
+ end;
+handle_utils_dir_req(Req, _) ->
+ send_method_not_allowed(Req, "GET,HEAD").
+
+handle_all_dbs_req(#httpd{method='GET'}=Req) ->
+ {ok, DbNames} = couch_server:all_databases(),
+ send_json(Req, DbNames);
+handle_all_dbs_req(Req) ->
+ send_method_not_allowed(Req, "GET,HEAD").
+
+
+handle_task_status_req(#httpd{method='GET'}=Req) ->
+ ok = couch_httpd:verify_is_server_admin(Req),
+ % convert the list of prop lists to a list of json objects
+ send_json(Req, [{Props} || Props <- couch_task_status:all()]);
+handle_task_status_req(Req) ->
+ send_method_not_allowed(Req, "GET,HEAD").
+
+handle_replicate_req(#httpd{method='POST'}=Req) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ PostBody = couch_httpd:json_body_obj(Req),
+ try couch_rep:replicate(PostBody, Req#httpd.user_ctx) of
+ {ok, {continuous, RepId}} ->
+ send_json(Req, 202, {[{ok, true}, {<<"_local_id">>, RepId}]});
+ {ok, {cancelled, RepId}} ->
+ send_json(Req, 200, {[{ok, true}, {<<"_local_id">>, RepId}]});
+ {ok, {JsonResults}} ->
+ send_json(Req, {[{ok, true} | JsonResults]});
+ {error, {Type, Details}} ->
+ send_json(Req, 500, {[{error, Type}, {reason, Details}]});
+ {error, not_found} ->
+ send_json(Req, 404, {[{error, not_found}]});
+ {error, Reason} ->
+ try
+ send_json(Req, 500, {[{error, Reason}]})
+ catch
+ exit:{json_encode, _} ->
+ send_json(Req, 500, {[{error, couch_util:to_binary(Reason)}]})
+ end
+ catch
+ throw:{db_not_found, Msg} ->
+ send_json(Req, 404, {[{error, db_not_found}, {reason, Msg}]});
+ throw:{unauthorized, Msg} ->
+ send_json(Req, 404, {[{error, unauthorized}, {reason, Msg}]})
+ end;
+handle_replicate_req(Req) ->
+ send_method_not_allowed(Req, "POST").
+
+
+handle_restart_req(#httpd{method='POST'}=Req) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ ok = couch_httpd:verify_is_server_admin(Req),
+ couch_server_sup:restart_core_server(),
+ send_json(Req, 200, {[{ok, true}]});
+handle_restart_req(Req) ->
+ send_method_not_allowed(Req, "POST").
+
+
+handle_uuids_req(#httpd{method='GET'}=Req) ->
+ Count = list_to_integer(couch_httpd:qs_value(Req, "count", "1")),
+ UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)],
+ Etag = couch_httpd:make_etag(UUIDs),
+ couch_httpd:etag_respond(Req, Etag, fun() ->
+ CacheBustingHeaders = [
+ {"Date", httpd_util:rfc1123_date()},
+ {"Cache-Control", "no-cache"},
+ % Past date, ON PURPOSE!
+ {"Expires", "Fri, 01 Jan 1990 00:00:00 GMT"},
+ {"Pragma", "no-cache"},
+ {"ETag", Etag}
+ ],
+ send_json(Req, 200, CacheBustingHeaders, {[{<<"uuids">>, UUIDs}]})
+ end);
+handle_uuids_req(Req) ->
+ send_method_not_allowed(Req, "GET").
+
+
+% Config request handler
+
+
+% GET /_config/
+% GET /_config
+handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) ->
+ ok = couch_httpd:verify_is_server_admin(Req),
+ Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
+ case dict:is_key(Section, Acc) of
+ true ->
+ dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
+ false ->
+ dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
+ end
+ end, dict:new(), couch_config:all()),
+ KVs = dict:fold(fun(Section, Values, Acc) ->
+ [{list_to_binary(Section), {Values}} | Acc]
+ end, [], Grouped),
+ send_json(Req, 200, {KVs});
+% GET /_config/Section
+handle_config_req(#httpd{method='GET', path_parts=[_,Section]}=Req) ->
+ ok = couch_httpd:verify_is_server_admin(Req),
+ KVs = [{list_to_binary(Key), list_to_binary(Value)}
+ || {Key, Value} <- couch_config:get(Section)],
+ send_json(Req, 200, {KVs});
+% GET /_config/Section/Key
+handle_config_req(#httpd{method='GET', path_parts=[_, Section, Key]}=Req) ->
+ ok = couch_httpd:verify_is_server_admin(Req),
+ case couch_config:get(Section, Key, null) of
+ null ->
+ throw({not_found, unknown_config_value});
+ Value ->
+ send_json(Req, 200, list_to_binary(Value))
+ end;
+% PUT or DELETE /_config/Section/Key
+handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req)
+ when (Method == 'PUT') or (Method == 'DELETE') ->
+ ok = couch_httpd:verify_is_server_admin(Req),
+ Persist = couch_httpd:header_value(Req, "X-Couch-Persist") /= "false",
+ case couch_config:get(<<"httpd">>, <<"config_whitelist">>, null) of
+ null ->
+ % No whitelist; allow all changes.
+ handle_approved_config_req(Req, Persist);
+ WhitelistValue ->
+ % Provide a failsafe to protect against inadvertently locking
+ % onesself out of the config by supplying a syntactically-incorrect
+ % Erlang term. To intentionally lock down the whitelist, supply a
+ % well-formed list which does not include the whitelist config
+ % variable itself.
+ FallbackWhitelist = [{<<"httpd">>, <<"config_whitelist">>}],
+
+ Whitelist = case couch_util:parse_term(WhitelistValue) of
+ {ok, Value} when is_list(Value) ->
+ Value;
+ {ok, _NonListValue} ->
+ FallbackWhitelist;
+ {error, _} ->
+ [{WhitelistSection, WhitelistKey}] = FallbackWhitelist,
+ ?LOG_ERROR("Only whitelisting ~s/~s due to error parsing: ~p",
+ [WhitelistSection, WhitelistKey, WhitelistValue]),
+ FallbackWhitelist
+ end,
+
+ IsRequestedKeyVal = fun(Element) ->
+ case Element of
+ {A, B} ->
+ % For readability, tuples may be used instead of binaries
+ % in the whitelist.
+ case {couch_util:to_binary(A), couch_util:to_binary(B)} of
+ {Section, Key} ->
+ true;
+ {Section, <<"*">>} ->
+ true;
+ _Else ->
+ false
+ end;
+ _Else ->
+ false
+ end
+ end,
+
+ case lists:any(IsRequestedKeyVal, Whitelist) of
+ true ->
+ % Allow modifying this whitelisted variable.
+ handle_approved_config_req(Req, Persist);
+ _NotWhitelisted ->
+ % Disallow modifying this non-whitelisted variable.
+ send_error(Req, 400, <<"modification_not_allowed">>,
+ ?l2b("This config variable is read-only"))
+ end
+ end;
+handle_config_req(Req) ->
+ send_method_not_allowed(Req, "GET,PUT,DELETE").
+
+% PUT /_config/Section/Key
+% "value"
+handle_approved_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Req, Persist) ->
+ Value = couch_httpd:json_body(Req),
+ OldValue = couch_config:get(Section, Key, ""),
+ case couch_config:set(Section, Key, ?b2l(Value), Persist) of
+ ok ->
+ send_json(Req, 200, list_to_binary(OldValue));
+ Error ->
+ throw(Error)
+ end;
+% DELETE /_config/Section/Key
+handle_approved_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Req, Persist) ->
+ case couch_config:get(Section, Key, null) of
+ null ->
+ throw({not_found, unknown_config_value});
+ OldValue ->
+ couch_config:delete(Section, Key, Persist),
+ send_json(Req, 200, list_to_binary(OldValue))
+ end.
+
+
+% httpd db handlers
+
+increment_update_seq_req(#httpd{method='POST'}=Req, Db) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ {ok, NewSeq} = couch_db:increment_update_seq(Db),
+ send_json(Req, {[{ok, true},
+ {update_seq, NewSeq}
+ ]});
+increment_update_seq_req(Req, _Db) ->
+ send_method_not_allowed(Req, "POST").
+
+% httpd log handlers
+
+handle_log_req(#httpd{method='GET'}=Req) ->
+ ok = couch_httpd:verify_is_server_admin(Req),
+ Bytes = list_to_integer(couch_httpd:qs_value(Req, "bytes", "1000")),
+ Offset = list_to_integer(couch_httpd:qs_value(Req, "offset", "0")),
+ Chunk = couch_log:read(Bytes, Offset),
+ {ok, Resp} = start_chunked_response(Req, 200, [
+ % send a plaintext response
+ {"Content-Type", "text/plain; charset=utf-8"},
+ {"Content-Length", integer_to_list(length(Chunk))}
+ ]),
+ send_chunk(Resp, Chunk),
+ last_chunk(Resp);
+handle_log_req(Req) ->
+ send_method_not_allowed(Req, "GET").
+
+
diff --git a/1.1.x/src/couchdb/couch_httpd_oauth.erl b/1.1.x/src/couchdb/couch_httpd_oauth.erl
new file mode 100644
index 00000000..05ee10e2
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_httpd_oauth.erl
@@ -0,0 +1,176 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_oauth).
+-include("couch_db.hrl").
+
+-export([oauth_authentication_handler/1, handle_oauth_req/1, consumer_lookup/2]).
+
+% OAuth auth handler using per-node user db
+oauth_authentication_handler(#httpd{mochi_req=MochiReq}=Req) ->
+ serve_oauth(Req, fun(URL, Params, Consumer, Signature) ->
+ AccessToken = couch_util:get_value("oauth_token", Params),
+ case couch_config:get("oauth_token_secrets", AccessToken) of
+ undefined ->
+ couch_httpd:send_error(Req, 400, <<"invalid_token">>,
+ <<"Invalid OAuth token.">>);
+ TokenSecret ->
+ ?LOG_DEBUG("OAuth URL is: ~p", [URL]),
+ case oauth:verify(Signature, atom_to_list(MochiReq:get(method)), URL, Params, Consumer, TokenSecret) of
+ true ->
+ set_user_ctx(Req, AccessToken);
+ false ->
+ Req
+ end
+ end
+ end, true).
+
+% Look up the consumer key and get the roles to give the consumer
+set_user_ctx(Req, AccessToken) ->
+ % TODO move to db storage
+ Name = case couch_config:get("oauth_token_users", AccessToken) of
+ undefined -> throw({bad_request, unknown_oauth_token});
+ Value -> ?l2b(Value)
+ end,
+ case couch_auth_cache:get_user_creds(Name) of
+ nil -> Req;
+ User ->
+ Roles = couch_util:get_value(<<"roles">>, User, []),
+ Req#httpd{user_ctx=#user_ctx{name=Name, roles=Roles}}
+ end.
+
+% OAuth request_token
+handle_oauth_req(#httpd{path_parts=[_OAuth, <<"request_token">>], method=Method}=Req) ->
+ serve_oauth(Req, fun(URL, Params, Consumer, Signature) ->
+ AccessToken = couch_util:get_value("oauth_token", Params),
+ TokenSecret = couch_config:get("oauth_token_secrets", AccessToken),
+ case oauth:verify(Signature, atom_to_list(Method), URL, Params, Consumer, TokenSecret) of
+ true ->
+ ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>);
+ false ->
+ invalid_signature(Req)
+ end
+ end, false);
+handle_oauth_req(#httpd{path_parts=[_OAuth, <<"authorize">>]}=Req) ->
+ {ok, serve_oauth_authorize(Req)};
+handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>], method='GET'}=Req) ->
+ serve_oauth(Req, fun(URL, Params, Consumer, Signature) ->
+ case oauth:token(Params) of
+ "requestkey" ->
+ case oauth:verify(Signature, "GET", URL, Params, Consumer, "requestsecret") of
+ true ->
+ ok(Req, <<"oauth_token=accesskey&oauth_token_secret=accesssecret">>);
+ false ->
+ invalid_signature(Req)
+ end;
+ _ ->
+ couch_httpd:send_error(Req, 400, <<"invalid_token">>, <<"Invalid OAuth token.">>)
+ end
+ end, false);
+handle_oauth_req(#httpd{path_parts=[_OAuth, <<"access_token">>]}=Req) ->
+ couch_httpd:send_method_not_allowed(Req, "GET").
+
+invalid_signature(Req) ->
+ couch_httpd:send_error(Req, 400, <<"invalid_signature">>, <<"Invalid signature value.">>).
+
+% This needs to be protected i.e. force user to login using HTTP Basic Auth or form-based login.
+serve_oauth_authorize(#httpd{method=Method}=Req) ->
+ case Method of
+ 'GET' ->
+ % Confirm with the User that they want to authenticate the Consumer
+ serve_oauth(Req, fun(URL, Params, Consumer, Signature) ->
+ AccessToken = couch_util:get_value("oauth_token", Params),
+ TokenSecret = couch_config:get("oauth_token_secrets", AccessToken),
+ case oauth:verify(Signature, "GET", URL, Params, Consumer, TokenSecret) of
+ true ->
+ ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>);
+ false ->
+ invalid_signature(Req)
+ end
+ end, false);
+ 'POST' ->
+ % If the User has confirmed, we direct the User back to the Consumer with a verification code
+ serve_oauth(Req, fun(URL, Params, Consumer, Signature) ->
+ AccessToken = couch_util:get_value("oauth_token", Params),
+ TokenSecret = couch_config:get("oauth_token_secrets", AccessToken),
+ case oauth:verify(Signature, "POST", URL, Params, Consumer, TokenSecret) of
+ true ->
+ %redirect(oauth_callback, oauth_token, oauth_verifier),
+ ok(Req, <<"oauth_token=requestkey&oauth_token_secret=requestsecret">>);
+ false ->
+ invalid_signature(Req)
+ end
+ end, false);
+ _ ->
+ couch_httpd:send_method_not_allowed(Req, "GET,POST")
+ end.
+
+serve_oauth(#httpd{mochi_req=MochiReq}=Req, Fun, FailSilently) ->
+ % 1. In the HTTP Authorization header as defined in OAuth HTTP Authorization Scheme.
+ % 2. As the HTTP POST request body with a content-type of application/x-www-form-urlencoded.
+ % 3. Added to the URLs in the query part (as defined by [RFC3986] section 3).
+ AuthHeader = case MochiReq:get_header_value("authorization") of
+ undefined ->
+ "";
+ Else ->
+ [Head | Tail] = re:split(Else, "\\s", [{parts, 2}, {return, list}]),
+ case [string:to_lower(Head) | Tail] of
+ ["oauth", Rest] -> Rest;
+ _ -> ""
+ end
+ end,
+ HeaderParams = oauth_uri:params_from_header_string(AuthHeader),
+ %Realm = couch_util:get_value("realm", HeaderParams),
+ Params = proplists:delete("realm", HeaderParams) ++ MochiReq:parse_qs(),
+ ?LOG_DEBUG("OAuth Params: ~p", [Params]),
+ case couch_util:get_value("oauth_version", Params, "1.0") of
+ "1.0" ->
+ case couch_util:get_value("oauth_consumer_key", Params, undefined) of
+ undefined ->
+ case FailSilently of
+ true -> Req;
+ false -> couch_httpd:send_error(Req, 400, <<"invalid_consumer">>, <<"Invalid consumer.">>)
+ end;
+ ConsumerKey ->
+ SigMethod = couch_util:get_value("oauth_signature_method", Params),
+ case consumer_lookup(ConsumerKey, SigMethod) of
+ none ->
+ couch_httpd:send_error(Req, 400, <<"invalid_consumer">>, <<"Invalid consumer (key or signature method).">>);
+ Consumer ->
+ Signature = couch_util:get_value("oauth_signature", Params),
+ URL = couch_httpd:absolute_uri(Req, MochiReq:get(raw_path)),
+ Fun(URL, proplists:delete("oauth_signature", Params),
+ Consumer, Signature)
+ end
+ end;
+ _ ->
+ couch_httpd:send_error(Req, 400, <<"invalid_oauth_version">>, <<"Invalid OAuth version.">>)
+ end.
+
+consumer_lookup(Key, MethodStr) ->
+ SignatureMethod = case MethodStr of
+ "PLAINTEXT" -> plaintext;
+ "HMAC-SHA1" -> hmac_sha1;
+ %"RSA-SHA1" -> rsa_sha1;
+ _Else -> undefined
+ end,
+ case SignatureMethod of
+ undefined -> none;
+ _SupportedMethod ->
+ case couch_config:get("oauth_consumer_secrets", Key, undefined) of
+ undefined -> none;
+ Secret -> {Key, Secret, SignatureMethod}
+ end
+ end.
+
+ok(#httpd{mochi_req=MochiReq}, Body) ->
+ {ok, MochiReq:respond({200, [], Body})}.
diff --git a/1.1.x/src/couchdb/couch_httpd_proxy.erl b/1.1.x/src/couchdb/couch_httpd_proxy.erl
new file mode 100644
index 00000000..65e3e432
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_httpd_proxy.erl
@@ -0,0 +1,431 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+-module(couch_httpd_proxy).
+
+-export([handle_proxy_req/2]).
+
+-include("couch_db.hrl").
+-include("../ibrowse/ibrowse.hrl").
+
+-define(TIMEOUT, infinity).
+-define(PKT_SIZE, 4096).
+
+
+handle_proxy_req(Req, ProxyDest) ->
+
+ %% Bug in Mochiweb?
+ %% Reported here: http://github.com/mochi/mochiweb/issues/issue/16
+ erase(mochiweb_request_body_length),
+
+ Method = get_method(Req),
+ Url = get_url(Req, ProxyDest),
+ Version = get_version(Req),
+ Headers = get_headers(Req),
+ Body = get_body(Req),
+ Options = [
+ {http_vsn, Version},
+ {headers_as_is, true},
+ {response_format, binary},
+ {stream_to, {self(), once}}
+ ],
+ case ibrowse:send_req(Url, Headers, Method, Body, Options, ?TIMEOUT) of
+ {ibrowse_req_id, ReqId} ->
+ stream_response(Req, ProxyDest, ReqId);
+ {error, Reason} ->
+ throw({error, Reason})
+ end.
+
+
+get_method(#httpd{mochi_req=MochiReq}) ->
+ case MochiReq:get(method) of
+ Method when is_atom(Method) ->
+ list_to_atom(string:to_lower(atom_to_list(Method)));
+ Method when is_list(Method) ->
+ list_to_atom(string:to_lower(Method));
+ Method when is_binary(Method) ->
+ list_to_atom(string:to_lower(?b2l(Method)))
+ end.
+
+
+get_url(Req, ProxyDest) when is_binary(ProxyDest) ->
+ get_url(Req, ?b2l(ProxyDest));
+get_url(#httpd{mochi_req=MochiReq}=Req, ProxyDest) ->
+ BaseUrl = case mochiweb_util:partition(ProxyDest, "/") of
+ {[], "/", _} -> couch_httpd:absolute_uri(Req, ProxyDest);
+ _ -> ProxyDest
+ end,
+ ProxyPrefix = "/" ++ ?b2l(hd(Req#httpd.path_parts)),
+ RequestedPath = MochiReq:get(raw_path),
+ case mochiweb_util:partition(RequestedPath, ProxyPrefix) of
+ {[], ProxyPrefix, []} ->
+ BaseUrl;
+ {[], ProxyPrefix, [$/ | DestPath]} ->
+ remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
+ {[], ProxyPrefix, DestPath} ->
+ remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
+ _Else ->
+ throw({invalid_url_path, {ProxyPrefix, RequestedPath}})
+ end.
+
+get_version(#httpd{mochi_req=MochiReq}) ->
+ MochiReq:get(version).
+
+
+get_headers(#httpd{mochi_req=MochiReq}) ->
+ to_ibrowse_headers(mochiweb_headers:to_list(MochiReq:get(headers)), []).
+
+to_ibrowse_headers([], Acc) ->
+ lists:reverse(Acc);
+to_ibrowse_headers([{K, V} | Rest], Acc) when is_atom(K) ->
+ to_ibrowse_headers([{atom_to_list(K), V} | Rest], Acc);
+to_ibrowse_headers([{K, V} | Rest], Acc) when is_list(K) ->
+ case string:to_lower(K) of
+ "content-length" ->
+ to_ibrowse_headers(Rest, [{content_length, V} | Acc]);
+ % This appears to make ibrowse too smart.
+ %"transfer-encoding" ->
+ % to_ibrowse_headers(Rest, [{transfer_encoding, V} | Acc]);
+ _ ->
+ to_ibrowse_headers(Rest, [{K, V} | Acc])
+ end.
+
+get_body(#httpd{method='GET'}) ->
+ fun() -> eof end;
+get_body(#httpd{method='HEAD'}) ->
+ fun() -> eof end;
+get_body(#httpd{method='DELETE'}) ->
+ fun() -> eof end;
+get_body(#httpd{mochi_req=MochiReq}) ->
+ case MochiReq:get(body_length) of
+ undefined ->
+ <<>>;
+ {unknown_transfer_encoding, Unknown} ->
+ exit({unknown_transfer_encoding, Unknown});
+ chunked ->
+ {fun stream_chunked_body/1, {init, MochiReq, 0}};
+ 0 ->
+ <<>>;
+ Length when is_integer(Length) andalso Length > 0 ->
+ {fun stream_length_body/1, {init, MochiReq, Length}};
+ Length ->
+ exit({invalid_body_length, Length})
+ end.
+
+
+remove_trailing_slash(Url) ->
+ rem_slash(lists:reverse(Url)).
+
+rem_slash([]) ->
+ [];
+rem_slash([$\s | RevUrl]) ->
+ rem_slash(RevUrl);
+rem_slash([$\t | RevUrl]) ->
+ rem_slash(RevUrl);
+rem_slash([$\r | RevUrl]) ->
+ rem_slash(RevUrl);
+rem_slash([$\n | RevUrl]) ->
+ rem_slash(RevUrl);
+rem_slash([$/ | RevUrl]) ->
+ rem_slash(RevUrl);
+rem_slash(RevUrl) ->
+ lists:reverse(RevUrl).
+
+
+stream_chunked_body({init, MReq, 0}) ->
+ % First chunk, do expect-continue dance.
+ init_body_stream(MReq),
+ stream_chunked_body({stream, MReq, 0, [], ?PKT_SIZE});
+stream_chunked_body({stream, MReq, 0, Buf, BRem}) ->
+ % Finished a chunk, get next length. If next length
+ % is 0, its time to try and read trailers.
+ {CRem, Data} = read_chunk_length(MReq),
+ case CRem of
+ 0 ->
+ BodyData = lists:reverse(Buf, Data),
+ {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}};
+ _ ->
+ stream_chunked_body(
+ {stream, MReq, CRem, [Data | Buf], BRem-size(Data)}
+ )
+ end;
+stream_chunked_body({stream, MReq, CRem, Buf, BRem}) when BRem =< 0 ->
+ % Time to empty our buffers to the upstream socket.
+ BodyData = lists:reverse(Buf),
+ {ok, BodyData, {stream, MReq, CRem, [], ?PKT_SIZE}};
+stream_chunked_body({stream, MReq, CRem, Buf, BRem}) ->
+ % Buffer some more data from the client.
+ Length = lists:min([CRem, BRem]),
+ Socket = MReq:get(socket),
+ NewState = case mochiweb_socket:recv(Socket, Length, ?TIMEOUT) of
+ {ok, Data} when size(Data) == CRem ->
+ case mochiweb_socket:recv(Socket, 2, ?TIMEOUT) of
+ {ok, <<"\r\n">>} ->
+ {stream, MReq, 0, [<<"\r\n">>, Data | Buf], BRem-Length-2};
+ _ ->
+ exit(normal)
+ end;
+ {ok, Data} ->
+ {stream, MReq, CRem-Length, [Data | Buf], BRem-Length};
+ _ ->
+ exit(normal)
+ end,
+ stream_chunked_body(NewState);
+stream_chunked_body({trailers, MReq, Buf, BRem}) when BRem =< 0 ->
+ % Empty our buffers and send data upstream.
+ BodyData = lists:reverse(Buf),
+ {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}};
+stream_chunked_body({trailers, MReq, Buf, BRem}) ->
+ % Read another trailer into the buffer or stop on an
+ % empty line.
+ Socket = MReq:get(socket),
+ mochiweb_socket:setopts(Socket, [{packet, line}]),
+ case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
+ {ok, <<"\r\n">>} ->
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ BodyData = lists:reverse(Buf, <<"\r\n">>),
+ {ok, BodyData, eof};
+ {ok, Footer} ->
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ NewState = {trailers, MReq, [Footer | Buf], BRem-size(Footer)},
+ stream_chunked_body(NewState);
+ _ ->
+ exit(normal)
+ end;
+stream_chunked_body(eof) ->
+ % Tell ibrowse we're done sending data.
+ eof.
+
+
+stream_length_body({init, MochiReq, Length}) ->
+ % Do the expect-continue dance
+ init_body_stream(MochiReq),
+ stream_length_body({stream, MochiReq, Length});
+stream_length_body({stream, _MochiReq, 0}) ->
+ % Finished streaming.
+ eof;
+stream_length_body({stream, MochiReq, Length}) ->
+ BufLen = lists:min([Length, ?PKT_SIZE]),
+ case MochiReq:recv(BufLen) of
+ <<>> -> eof;
+ Bin -> {ok, Bin, {stream, MochiReq, Length-BufLen}}
+ end.
+
+
+init_body_stream(MochiReq) ->
+ Expect = case MochiReq:get_header_value("expect") of
+ undefined ->
+ undefined;
+ Value when is_list(Value) ->
+ string:to_lower(Value)
+ end,
+ case Expect of
+ "100-continue" ->
+ MochiReq:start_raw_response({100, gb_trees:empty()});
+ _Else ->
+ ok
+ end.
+
+
+read_chunk_length(MochiReq) ->
+ Socket = MochiReq:get(socket),
+ mochiweb_socket:setopts(Socket, [{packet, line}]),
+ case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
+ {ok, Header} ->
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ Splitter = fun(C) ->
+ C =/= $\r andalso C =/= $\n andalso C =/= $\s
+ end,
+ {Hex, _Rest} = lists:splitwith(Splitter, ?b2l(Header)),
+ {mochihex:to_int(Hex), Header};
+ _ ->
+ exit(normal)
+ end.
+
+
+stream_response(Req, ProxyDest, ReqId) ->
+ receive
+ {ibrowse_async_headers, ReqId, "100", _} ->
+ % ibrowse doesn't handle 100 Continue responses which
+ % means we have to discard them so the proxy client
+ % doesn't get confused.
+ ibrowse:stream_next(ReqId),
+ stream_response(Req, ProxyDest, ReqId);
+ {ibrowse_async_headers, ReqId, Status, Headers} ->
+ {Source, Dest} = get_urls(Req, ProxyDest),
+ FixedHeaders = fix_headers(Source, Dest, Headers, []),
+ case body_length(FixedHeaders) of
+ chunked ->
+ {ok, Resp} = couch_httpd:start_chunked_response(
+ Req, list_to_integer(Status), FixedHeaders
+ ),
+ ibrowse:stream_next(ReqId),
+ stream_chunked_response(Req, ReqId, Resp),
+ {ok, Resp};
+ Length when is_integer(Length) ->
+ {ok, Resp} = couch_httpd:start_response_length(
+ Req, list_to_integer(Status), FixedHeaders, Length
+ ),
+ ibrowse:stream_next(ReqId),
+ stream_length_response(Req, ReqId, Resp),
+ {ok, Resp};
+ _ ->
+ {ok, Resp} = couch_httpd:start_response(
+ Req, list_to_integer(Status), FixedHeaders
+ ),
+ ibrowse:stream_next(ReqId),
+ stream_length_response(Req, ReqId, Resp),
+ % XXX: MochiWeb apparently doesn't look at the
+ % response to see if it must force close the
+ % connection. So we help it out here.
+ erlang:put(mochiweb_request_force_close, true),
+ {ok, Resp}
+ end
+ end.
+
+
+stream_chunked_response(Req, ReqId, Resp) ->
+ receive
+ {ibrowse_async_response, ReqId, {error, Reason}} ->
+ throw({error, Reason});
+ {ibrowse_async_response, ReqId, Chunk} ->
+ couch_httpd:send_chunk(Resp, Chunk),
+ ibrowse:stream_next(ReqId),
+ stream_chunked_response(Req, ReqId, Resp);
+ {ibrowse_async_response_end, ReqId} ->
+ couch_httpd:last_chunk(Resp)
+ end.
+
+
+stream_length_response(Req, ReqId, Resp) ->
+ receive
+ {ibrowse_async_response, ReqId, {error, Reason}} ->
+ throw({error, Reason});
+ {ibrowse_async_response, ReqId, Chunk} ->
+ couch_httpd:send(Resp, Chunk),
+ ibrowse:stream_next(ReqId),
+ stream_length_response(Req, ReqId, Resp);
+ {ibrowse_async_response_end, ReqId} ->
+ ok
+ end.
+
+
+get_urls(Req, ProxyDest) ->
+ SourceUrl = couch_httpd:absolute_uri(Req, "/" ++ hd(Req#httpd.path_parts)),
+ Source = parse_url(?b2l(iolist_to_binary(SourceUrl))),
+ case (catch parse_url(ProxyDest)) of
+ Dest when is_record(Dest, url) ->
+ {Source, Dest};
+ _ ->
+ DestUrl = couch_httpd:absolute_uri(Req, ProxyDest),
+ {Source, parse_url(DestUrl)}
+ end.
+
+
+fix_headers(_, _, [], Acc) ->
+ lists:reverse(Acc);
+fix_headers(Source, Dest, [{K, V} | Rest], Acc) ->
+ Fixed = case string:to_lower(K) of
+ "location" -> rewrite_location(Source, Dest, V);
+ "content-location" -> rewrite_location(Source, Dest, V);
+ "uri" -> rewrite_location(Source, Dest, V);
+ "destination" -> rewrite_location(Source, Dest, V);
+ "set-cookie" -> rewrite_cookie(Source, Dest, V);
+ _ -> V
+ end,
+ fix_headers(Source, Dest, Rest, [{K, Fixed} | Acc]).
+
+
+rewrite_location(Source, #url{host=Host, port=Port, protocol=Proto}, Url) ->
+ case (catch parse_url(Url)) of
+ #url{host=Host, port=Port, protocol=Proto} = Location ->
+ DestLoc = #url{
+ protocol=Source#url.protocol,
+ host=Source#url.host,
+ port=Source#url.port,
+ path=join_url_path(Source#url.path, Location#url.path)
+ },
+ url_to_url(DestLoc);
+ #url{} ->
+ Url;
+ _ ->
+ url_to_url(Source#url{path=join_url_path(Source#url.path, Url)})
+ end.
+
+
+rewrite_cookie(_Source, _Dest, Cookie) ->
+ Cookie.
+
+
+parse_url(Url) when is_binary(Url) ->
+ ibrowse_lib:parse_url(?b2l(Url));
+parse_url(Url) when is_list(Url) ->
+ ibrowse_lib:parse_url(?b2l(iolist_to_binary(Url))).
+
+
+join_url_path(Src, Dst) ->
+ Src2 = case lists:reverse(Src) of
+ "/" ++ RestSrc -> lists:reverse(RestSrc);
+ _ -> Src
+ end,
+ Dst2 = case Dst of
+ "/" ++ RestDst -> RestDst;
+ _ -> Dst
+ end,
+ Src2 ++ "/" ++ Dst2.
+
+
+url_to_url(#url{host=Host, port=Port, path=Path, protocol=Proto} = Url) ->
+ LPort = case {Proto, Port} of
+ {http, 80} -> "";
+ {https, 443} -> "";
+ _ -> ":" ++ integer_to_list(Port)
+ end,
+ LPath = case Path of
+ "/" ++ _RestPath -> Path;
+ _ -> "/" ++ Path
+ end,
+ HostPart = case Url#url.host_type of
+ ipv6_address ->
+ "[" ++ Host ++ "]";
+ _ ->
+ Host
+ end,
+ atom_to_list(Proto) ++ "://" ++ HostPart ++ LPort ++ LPath.
+
+
+body_length(Headers) ->
+ case is_chunked(Headers) of
+ true -> chunked;
+ _ -> content_length(Headers)
+ end.
+
+
+is_chunked([]) ->
+ false;
+is_chunked([{K, V} | Rest]) ->
+ case string:to_lower(K) of
+ "transfer-encoding" ->
+ string:to_lower(V) == "chunked";
+ _ ->
+ is_chunked(Rest)
+ end.
+
+content_length([]) ->
+ undefined;
+content_length([{K, V} | Rest]) ->
+ case string:to_lower(K) of
+ "content-length" ->
+ list_to_integer(V);
+ _ ->
+ content_length(Rest)
+ end.
+
diff --git a/1.1.x/src/couchdb/couch_httpd_rewrite.erl b/1.1.x/src/couchdb/couch_httpd_rewrite.erl
new file mode 100644
index 00000000..a8297ae1
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_httpd_rewrite.erl
@@ -0,0 +1,434 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+%
+% bind_path is based on bind method from Webmachine
+
+
+%% @doc Module for URL rewriting by pattern matching.
+
+-module(couch_httpd_rewrite).
+-export([handle_rewrite_req/3]).
+-include("couch_db.hrl").
+
+-define(SEPARATOR, $\/).
+-define(MATCH_ALL, {bind, <<"*">>}).
+
+
+%% doc The http rewrite handler. All rewriting is done from
+%% /dbname/_design/ddocname/_rewrite by default.
+%%
+%% each rules should be in rewrites member of the design doc.
+%% Ex of a complete rule :
+%%
+%% {
+%% ....
+%% "rewrites": [
+%% {
+%% "from": "",
+%% "to": "index.html",
+%% "method": "GET",
+%% "query": {}
+%% }
+%% ]
+%% }
+%%
+%% from: is the path rule used to bind current uri to the rule. It
+%% use pattern matching for that.
+%%
+%% to: rule to rewrite an url. It can contain variables depending on binding
+%% variables discovered during pattern matching and query args (url args and from
+%% the query member.)
+%%
+%% method: method to bind the request method to the rule. by default "*"
+%% query: query args you want to define they can contain dynamic variable
+%% by binding the key to the bindings
+%%
+%%
+%% to and from are path with patterns. pattern can be string starting with ":" or
+%% "*". ex:
+%% /somepath/:var/*
+%%
+%% This path is converted in erlang list by splitting "/". Each var are
+%% converted in atom. "*" is converted to '*' atom. The pattern matching is done
+%% by splitting "/" in request url in a list of token. A string pattern will
+%% match equal token. The star atom ('*' in single quotes) will match any number
+%% of tokens, but may only be present as the last pathtern in a pathspec. If all
+%% tokens are matched and all pathterms are used, then the pathspec matches. It works
+%% like webmachine. Each identified token will be reused in to rule and in query
+%%
+%% The pattern matching is done by first matching the request method to a rule. by
+%% default all methods match a rule. (method is equal to "*" by default). Then
+%% It will try to match the path to one rule. If no rule match, then a 404 error
+%% is displayed.
+%%
+%% Once a rule is found we rewrite the request url using the "to" and
+%% "query" members. The identified token are matched to the rule and
+%% will replace var. if '*' is found in the rule it will contain the remaining
+%% part if it exists.
+%%
+%% Examples:
+%%
+%% Dispatch rule URL TO Tokens
+%%
+%% {"from": "/a/b", /a/b?k=v /some/b?k=v var =:= b
+%% "to": "/some/"} k = v
+%%
+%% {"from": "/a/b", /a/b /some/b?var=b var =:= b
+%% "to": "/some/:var"}
+%%
+%% {"from": "/a", /a /some
+%% "to": "/some/*"}
+%%
+%% {"from": "/a/*", /a/b/c /some/b/c
+%% "to": "/some/*"}
+%%
+%% {"from": "/a", /a /some
+%% "to": "/some/*"}
+%%
+%% {"from": "/a/:foo/*", /a/b/c /some/b/c?foo=b foo =:= b
+%% "to": "/some/:foo/*"}
+%%
+%% {"from": "/a/:foo", /a/b /some/?k=b&foo=b foo =:= b
+%% "to": "/some",
+%% "query": {
+%% "k": ":foo"
+%% }}
+%%
+%% {"from": "/a", /a?foo=b /some/b foo =:= b
+%% "to": "/some/:foo",
+%% }}
+
+
+
+handle_rewrite_req(#httpd{
+ path_parts=[DbName, <<"_design">>, DesignName, _Rewrite|PathParts],
+ method=Method,
+ mochi_req=MochiReq}=Req, _Db, DDoc) ->
+
+ % we are in a design handler
+ DesignId = <<"_design/", DesignName/binary>>,
+ Prefix = <<"/", DbName/binary, "/", DesignId/binary>>,
+ QueryList = couch_httpd:qs(Req),
+ QueryList1 = [{to_binding(K), V} || {K, V} <- QueryList],
+
+ #doc{body={Props}} = DDoc,
+
+ % get rules from ddoc
+ case couch_util:get_value(<<"rewrites">>, Props) of
+ undefined ->
+ couch_httpd:send_error(Req, 404, <<"rewrite_error">>,
+ <<"Invalid path.">>);
+ Rules ->
+ % create dispatch list from rules
+ DispatchList = [make_rule(Rule) || {Rule} <- Rules],
+
+ %% get raw path by matching url to a rule.
+ RawPath = case try_bind_path(DispatchList, couch_util:to_binary(Method), PathParts,
+ QueryList1) of
+ no_dispatch_path ->
+ throw(not_found);
+ {NewPathParts, Bindings} ->
+ Parts = [quote_plus(X) || X <- NewPathParts],
+
+ % build new path, reencode query args, eventually convert
+ % them to json
+ Path = lists:append(
+ string:join(Parts, [?SEPARATOR]),
+ case Bindings of
+ [] -> [];
+ _ -> [$?, encode_query(Bindings)]
+ end),
+
+ % if path is relative detect it and rewrite path
+ case mochiweb_util:safe_relative_path(Path) of
+ undefined ->
+ ?b2l(Prefix) ++ "/" ++ Path;
+ P1 ->
+ ?b2l(Prefix) ++ "/" ++ P1
+ end
+
+ end,
+
+ % normalize final path (fix levels "." and "..")
+ RawPath1 = ?b2l(iolist_to_binary(normalize_path(RawPath))),
+
+ ?LOG_DEBUG("rewrite to ~p ~n", [RawPath1]),
+
+ % build a new mochiweb request
+ MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
+ MochiReq:get(method),
+ RawPath1,
+ MochiReq:get(version),
+ MochiReq:get(headers)),
+
+ % cleanup, It force mochiweb to reparse raw uri.
+ MochiReq1:cleanup(),
+
+ #httpd{
+ db_url_handlers = DbUrlHandlers,
+ design_url_handlers = DesignUrlHandlers,
+ default_fun = DefaultFun,
+ url_handlers = UrlHandlers
+ } = Req,
+ couch_httpd:handle_request_int(MochiReq1, DefaultFun,
+ UrlHandlers, DbUrlHandlers, DesignUrlHandlers)
+ end.
+
+quote_plus({bind, X}) ->
+ mochiweb_util:quote_plus(X);
+quote_plus(X) ->
+ mochiweb_util:quote_plus(X).
+
+%% @doc Try to find a rule matching current url. If none is found
+%% 404 error not_found is raised
+try_bind_path([], _Method, _PathParts, _QueryList) ->
+ no_dispatch_path;
+try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
+ [{PathParts1, Method1}, RedirectPath, QueryArgs] = Dispatch,
+ case bind_method(Method1, Method) of
+ true ->
+ case bind_path(PathParts1, PathParts, []) of
+ {ok, Remaining, Bindings} ->
+ Bindings1 = Bindings ++ QueryList,
+ % we parse query args from the rule and fill
+ % it eventually with bindings vars
+ QueryArgs1 = make_query_list(QueryArgs, Bindings1, []),
+ % remove params in QueryLists1 that are already in
+ % QueryArgs1
+ Bindings2 = lists:foldl(fun({K, V}, Acc) ->
+ K1 = to_binding(K),
+ KV = case couch_util:get_value(K1, QueryArgs1) of
+ undefined -> [{K1, V}];
+ _V1 -> []
+ end,
+ Acc ++ KV
+ end, [], Bindings1),
+
+ FinalBindings = Bindings2 ++ QueryArgs1,
+ NewPathParts = make_new_path(RedirectPath, FinalBindings,
+ Remaining, []),
+ {NewPathParts, FinalBindings};
+ fail ->
+ try_bind_path(Rest, Method, PathParts, QueryList)
+ end;
+ false ->
+ try_bind_path(Rest, Method, PathParts, QueryList)
+ end.
+
+%% rewriting dynamically the quey list given as query member in
+%% rewrites. Each value is replaced by one binding or an argument
+%% passed in url.
+make_query_list([], _Bindings, Acc) ->
+ Acc;
+make_query_list([{Key, {Value}}|Rest], Bindings, Acc) ->
+ Value1 = to_json({Value}),
+ make_query_list(Rest, Bindings, [{to_binding(Key), Value1}|Acc]);
+make_query_list([{Key, Value}|Rest], Bindings, Acc) when is_binary(Value) ->
+ Value1 = replace_var(Key, Value, Bindings),
+ make_query_list(Rest, Bindings, [{to_binding(Key), Value1}|Acc]);
+make_query_list([{Key, Value}|Rest], Bindings, Acc) when is_list(Value) ->
+ Value1 = replace_var(Key, Value, Bindings),
+ make_query_list(Rest, Bindings, [{to_binding(Key), Value1}|Acc]);
+make_query_list([{Key, Value}|Rest], Bindings, Acc) ->
+ make_query_list(Rest, Bindings, [{to_binding(Key), Value}|Acc]).
+
+replace_var(Key, Value, Bindings) ->
+ case Value of
+ <<":", Var/binary>> ->
+ get_var(Var, Bindings, Value);
+ <<"*">> ->
+ get_var(Value, Bindings, Value);
+ _ when is_list(Value) ->
+ Value1 = lists:foldr(fun(V, Acc) ->
+ V1 = case V of
+ <<":", VName/binary>> ->
+ case get_var(VName, Bindings, V) of
+ V2 when is_list(V2) ->
+ iolist_to_binary(V2);
+ V2 -> V2
+ end;
+ <<"*">> ->
+ get_var(V, Bindings, V);
+ _ ->
+ V
+ end,
+ [V1|Acc]
+ end, [], Value),
+ to_json(Value1);
+ _ when is_binary(Value) ->
+ Value;
+ _ ->
+ case Key of
+ <<"key">> -> to_json(Value);
+ <<"startkey">> -> to_json(Value);
+ <<"start_key">> -> to_json(Value);
+ <<"endkey">> -> to_json(Value);
+ <<"end_key">> -> to_json(Value);
+ _ ->
+ lists:flatten(?JSON_ENCODE(Value))
+ end
+ end.
+
+
+get_var(VarName, Props, Default) ->
+ VarName1 = to_binding(VarName),
+ couch_util:get_value(VarName1, Props, Default).
+
+%% doc: build new patch from bindings. bindings are query args
+%% (+ dynamic query rewritten if needed) and bindings found in
+%% bind_path step.
+make_new_path([], _Bindings, _Remaining, Acc) ->
+ lists:reverse(Acc);
+make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
+ Acc1 = lists:reverse(Acc) ++ Remaining,
+ Acc1;
+make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
+ Acc1 = lists:reverse(Acc) ++ Remaining,
+ Acc1;
+make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
+ P2 = case couch_util:get_value({bind, P}, Bindings) of
+ undefined -> << "undefined">>;
+ P1 -> P1
+ end,
+ make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
+make_new_path([P|Rest], Bindings, Remaining, Acc) ->
+ make_new_path(Rest, Bindings, Remaining, [P|Acc]).
+
+
+%% @doc If method of the query fith the rule method. If the
+%% method rule is '*', which is the default, all
+%% request method will bind. It allows us to make rules
+%% depending on HTTP method.
+bind_method(?MATCH_ALL, _Method ) ->
+ true;
+bind_method({bind, Method}, Method) ->
+ true;
+bind_method(_, _) ->
+ false.
+
+
+%% @doc bind path. Using the rule from we try to bind variables given
+%% to the current url by pattern matching
+bind_path([], [], Bindings) ->
+ {ok, [], Bindings};
+bind_path([?MATCH_ALL], [Match|_RestMatch]=Rest, Bindings) ->
+ {ok, Rest, [{?MATCH_ALL, Match}|Bindings]};
+bind_path(_, [], _) ->
+ fail;
+bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
+ bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
+bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
+ bind_path(RestToken, RestMatch, Bindings);
+bind_path(_, _, _) ->
+ fail.
+
+
+%% normalize path.
+normalize_path(Path) ->
+ "/" ++ string:join(normalize_path1(string:tokens(Path,
+ "/"), []), [?SEPARATOR]).
+
+
+normalize_path1([], Acc) ->
+ lists:reverse(Acc);
+normalize_path1([".."|Rest], Acc) ->
+ Acc1 = case Acc of
+ [] -> [".."|Acc];
+ [T|_] when T =:= ".." -> [".."|Acc];
+ [_|R] -> R
+ end,
+ normalize_path1(Rest, Acc1);
+normalize_path1(["."|Rest], Acc) ->
+ normalize_path1(Rest, Acc);
+normalize_path1([Path|Rest], Acc) ->
+ normalize_path1(Rest, [Path|Acc]).
+
+
+%% @doc transform json rule in erlang for pattern matching
+make_rule(Rule) ->
+ Method = case couch_util:get_value(<<"method">>, Rule) of
+ undefined -> ?MATCH_ALL;
+ M -> to_binding(M)
+ end,
+ QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
+ undefined -> [];
+ {Args} -> Args
+ end,
+ FromParts = case couch_util:get_value(<<"from">>, Rule) of
+ undefined -> [?MATCH_ALL];
+ From ->
+ parse_path(From)
+ end,
+ ToParts = case couch_util:get_value(<<"to">>, Rule) of
+ undefined ->
+ throw({error, invalid_rewrite_target});
+ To ->
+ parse_path(To)
+ end,
+ [{FromParts, Method}, ToParts, QueryArgs].
+
+parse_path(Path) ->
+ {ok, SlashRE} = re:compile(<<"\\/">>),
+ path_to_list(re:split(Path, SlashRE), [], 0).
+
+%% @doc convert a path rule (from or to) to an erlang list
+%% * and path variable starting by ":" are converted
+%% in erlang atom.
+path_to_list([], Acc, _DotDotCount) ->
+ lists:reverse(Acc);
+path_to_list([<<>>|R], Acc, DotDotCount) ->
+ path_to_list(R, Acc, DotDotCount);
+path_to_list([<<"*">>|R], Acc, DotDotCount) ->
+ path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
+path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
+ case couch_config:get("httpd", "secure_rewrites", "true") of
+ "false" ->
+ path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
+ _Else ->
+ ?LOG_INFO("insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
+ throw({insecure_rewrite_rule, "too many ../.. segments"})
+ end;
+path_to_list([<<"..">>|R], Acc, DotDotCount) ->
+ path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
+path_to_list([P|R], Acc, DotDotCount) ->
+ P1 = case P of
+ <<":", Var/binary>> ->
+ to_binding(Var);
+ _ -> P
+ end,
+ path_to_list(R, [P1|Acc], DotDotCount).
+
+encode_query(Props) ->
+ Props1 = lists:foldl(fun ({{bind, K}, V}, Acc) ->
+ case K of
+ <<"*">> -> Acc;
+ _ ->
+ V1 = case is_list(V) orelse is_binary(V) of
+ true -> V;
+ false ->
+ % probably it's a number
+ quote_plus(V)
+ end,
+ [{K, V1} | Acc]
+ end
+ end, [], Props),
+ lists:flatten(mochiweb_util:urlencode(Props1)).
+
+to_binding({bind, V}) ->
+ {bind, V};
+to_binding(V) when is_list(V) ->
+ to_binding(?l2b(V));
+to_binding(V) ->
+ {bind, V}.
+
+to_json(V) ->
+ iolist_to_binary(?JSON_ENCODE(V)).
diff --git a/1.1.x/src/couchdb/couch_httpd_show.erl b/1.1.x/src/couchdb/couch_httpd_show.erl
new file mode 100644
index 00000000..59f74e1c
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_httpd_show.erl
@@ -0,0 +1,404 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_show).
+
+-export([handle_doc_show_req/3, handle_doc_update_req/3, handle_view_list_req/3,
+ handle_view_list/6, get_fun_key/3]).
+
+-include("couch_db.hrl").
+
+-import(couch_httpd,
+ [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
+ start_json_response/2,send_chunk/2,last_chunk/1,send_chunked_error/2,
+ start_chunked_response/3, send_error/4]).
+
+
+% /db/_design/foo/_show/bar/docid
+% show converts a json doc to a response of any content-type.
+% it looks up the doc an then passes it to the query server.
+% then it sends the response from the query server to the http client.
+
+maybe_open_doc(Db, DocId) ->
+ case catch couch_httpd_db:couch_doc_open(Db, DocId, nil, [conflicts]) of
+ {not_found, missing} -> nil;
+ {not_found,deleted} -> nil;
+ Doc -> Doc
+ end.
+handle_doc_show_req(#httpd{
+ path_parts=[_, _, _, _, ShowName, DocId]
+ }=Req, Db, DDoc) ->
+
+ % open the doc
+ Doc = maybe_open_doc(Db, DocId),
+
+ % we don't handle revs here b/c they are an internal api
+ % returns 404 if there is no doc with DocId
+ handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
+
+handle_doc_show_req(#httpd{
+ path_parts=[_, _, _, _, ShowName, DocId|Rest]
+ }=Req, Db, DDoc) ->
+
+ DocParts = [DocId|Rest],
+ DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")),
+
+ % open the doc
+ Doc = maybe_open_doc(Db, DocId1),
+
+ % we don't handle revs here b/c they are an internal api
+ % pass 404 docs to the show function
+ handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
+
+handle_doc_show_req(#httpd{
+ path_parts=[_, _, _, _, ShowName]
+ }=Req, Db, DDoc) ->
+ % with no docid the doc is nil
+ handle_doc_show(Req, Db, DDoc, ShowName, nil);
+
+handle_doc_show_req(Req, _Db, _DDoc) ->
+ send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
+
+handle_doc_show(Req, Db, DDoc, ShowName, Doc) ->
+ handle_doc_show(Req, Db, DDoc, ShowName, Doc, null).
+
+handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
+ % get responder for ddoc/showname
+ CurrentEtag = show_etag(Req, Doc, DDoc, []),
+ couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
+ JsonReq = couch_httpd_external:json_req_obj(Req, Db, DocId),
+ JsonDoc = couch_query_servers:json_doc(Doc),
+ [<<"resp">>, ExternalResp] =
+ couch_query_servers:ddoc_prompt(DDoc, [<<"shows">>, ShowName], [JsonDoc, JsonReq]),
+ JsonResp = apply_etag(ExternalResp, CurrentEtag),
+ couch_httpd_external:send_external_response(Req, JsonResp)
+ end).
+
+
+
+show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
+ Accept = couch_httpd:header_value(Req, "Accept"),
+ DocPart = case Doc of
+ nil -> nil;
+ Doc -> couch_httpd:doc_etag(Doc)
+ end,
+ couch_httpd:make_etag({couch_httpd:doc_etag(DDoc), DocPart, Accept, UserCtx#user_ctx.roles, More}).
+
+get_fun_key(DDoc, Type, Name) ->
+ #doc{body={Props}} = DDoc,
+ Lang = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
+ Src = couch_util:get_nested_json_value({Props}, [Type, Name]),
+ {Lang, Src}.
+
+% /db/_design/foo/update/bar/docid
+% updates a doc based on a request
+% handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) ->
+% % anything but GET
+% send_method_not_allowed(Req, "POST,PUT,DELETE,ETC");
+
+handle_doc_update_req(#httpd{
+ path_parts=[_, _, _, _, UpdateName, DocId]
+ }=Req, Db, DDoc) ->
+ Doc = try couch_httpd_db:couch_doc_open(Db, DocId, nil, [conflicts])
+ catch
+ _ -> nil
+ end,
+ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId);
+
+handle_doc_update_req(#httpd{
+ path_parts=[_, _, _, _, UpdateName]
+ }=Req, Db, DDoc) ->
+ send_doc_update_response(Req, Db, DDoc, UpdateName, nil, null);
+
+handle_doc_update_req(Req, _Db, _DDoc) ->
+ send_error(Req, 404, <<"update_error">>, <<"Invalid path.">>).
+
+send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
+ JsonReq = couch_httpd_external:json_req_obj(Req, Db, DocId),
+ JsonDoc = couch_query_servers:json_doc(Doc),
+ {Code, JsonResp1} = case couch_query_servers:ddoc_prompt(DDoc,
+ [<<"updates">>, UpdateName], [JsonDoc, JsonReq]) of
+ [<<"up">>, {NewJsonDoc}, {JsonResp}] ->
+ Options = case couch_httpd:header_value(Req, "X-Couch-Full-Commit",
+ "false") of
+ "true" ->
+ [full_commit];
+ _ ->
+ []
+ end,
+ NewDoc = couch_doc:from_json_obj({NewJsonDoc}),
+ {ok, NewRev} = couch_db:update_doc(Db, NewDoc, Options),
+ NewRevStr = couch_doc:rev_to_str(NewRev),
+ JsonRespWithRev = {[{<<"headers">>,
+ {[{<<"X-Couch-Update-NewRev">>, NewRevStr}]}} | JsonResp]},
+ {201, JsonRespWithRev};
+ [<<"up">>, _Other, JsonResp] ->
+ {200, JsonResp}
+ end,
+
+ JsonResp2 = couch_util:json_apply_field({<<"code">>, Code}, JsonResp1),
+ % todo set location field
+ couch_httpd_external:send_external_response(Req, JsonResp2).
+
+
+% view-list request with view and list from same design doc.
+handle_view_list_req(#httpd{method='GET',
+ path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) ->
+ Keys = couch_httpd:qs_json_value(Req, "keys", nil),
+ handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
+
+% view-list request with view and list from different design docs.
+handle_view_list_req(#httpd{method='GET',
+ path_parts=[_, _, _, _, ListName, ViewDesignName, ViewName]}=Req, Db, DDoc) ->
+ Keys = couch_httpd:qs_json_value(Req, "keys", nil),
+ handle_view_list(Req, Db, DDoc, ListName, {ViewDesignName, ViewName}, Keys);
+
+handle_view_list_req(#httpd{method='GET'}=Req, _Db, _DDoc) ->
+ send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
+
+handle_view_list_req(#httpd{method='POST',
+ path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) ->
+ % {Props2} = couch_httpd:json_body(Req),
+ ReqBody = couch_httpd:body(Req),
+ {Props2} = ?JSON_DECODE(ReqBody),
+ Keys = couch_util:get_value(<<"keys">>, Props2, nil),
+ handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
+
+handle_view_list_req(#httpd{method='POST',
+ path_parts=[_, _, _, _, ListName, ViewDesignName, ViewName]}=Req, Db, DDoc) ->
+ % {Props2} = couch_httpd:json_body(Req),
+ ReqBody = couch_httpd:body(Req),
+ {Props2} = ?JSON_DECODE(ReqBody),
+ Keys = couch_util:get_value(<<"keys">>, Props2, nil),
+ handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName, {ViewDesignName, ViewName}, Keys);
+
+handle_view_list_req(#httpd{method='POST'}=Req, _Db, _DDoc) ->
+ send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
+
+handle_view_list_req(Req, _Db, _DDoc) ->
+ send_method_not_allowed(Req, "GET,POST,HEAD").
+
+handle_view_list(Req, Db, DDoc, LName, {ViewDesignName, ViewName}, Keys) ->
+ ViewDesignId = <<"_design/", ViewDesignName/binary>>,
+ {ViewType, View, Group, QueryArgs} = couch_httpd_view:load_view(Req, Db, {ViewDesignId, ViewName}, Keys),
+ Etag = list_etag(Req, Db, Group, View, {couch_httpd:doc_etag(DDoc), Keys}),
+ couch_httpd:etag_respond(Req, Etag, fun() ->
+ output_list(ViewType, Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group)
+ end).
+
+list_etag(#httpd{user_ctx=UserCtx}=Req, Db, Group, View, More) ->
+ Accept = couch_httpd:header_value(Req, "Accept"),
+ couch_httpd_view:view_etag(Db, Group, View, {More, Accept, UserCtx#user_ctx.roles}).
+
+output_list(map, Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group) ->
+ output_map_list(Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group);
+output_list(reduce, Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group) ->
+ output_reduce_list(Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group).
+
+% next step:
+% use with_ddoc_proc/2 to make this simpler
+output_map_list(Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group) ->
+ #view_query_args{
+ limit = Limit,
+ skip = SkipCount
+ } = QueryArgs,
+
+ FoldAccInit = {Limit, SkipCount, undefined, []},
+ {ok, RowCount} = couch_view:get_row_count(View),
+
+
+ couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
+
+ ListFoldHelpers = #view_fold_helper_funs{
+ reduce_count = fun couch_view:reduce_to_count/1,
+ start_response = StartListRespFun = make_map_start_resp_fun(QServer, Db, LName),
+ send_row = make_map_send_row_fun(QServer)
+ },
+ CurrentSeq = Group#group.current_seq,
+
+ {ok, _, FoldResult} = case Keys of
+ nil ->
+ FoldlFun = couch_httpd_view:make_view_fold_fun(Req, QueryArgs, Etag, Db, CurrentSeq, RowCount, ListFoldHelpers),
+ couch_view:fold(View, FoldlFun, FoldAccInit,
+ couch_httpd_view:make_key_options(QueryArgs));
+ Keys ->
+ lists:foldl(
+ fun(Key, {ok, _, FoldAcc}) ->
+ QueryArgs2 = QueryArgs#view_query_args{
+ start_key = Key,
+ end_key = Key
+ },
+ FoldlFun = couch_httpd_view:make_view_fold_fun(Req, QueryArgs2, Etag, Db, CurrentSeq, RowCount, ListFoldHelpers),
+ couch_view:fold(View, FoldlFun, FoldAcc,
+ couch_httpd_view:make_key_options(QueryArgs2))
+ end, {ok, nil, FoldAccInit}, Keys)
+ end,
+ finish_list(Req, QServer, Etag, FoldResult, StartListRespFun, CurrentSeq, RowCount)
+ end).
+
+
+output_reduce_list(Req, Db, DDoc, LName, View, QueryArgs, Etag, Keys, Group) ->
+ #view_query_args{
+ limit = Limit,
+ skip = SkipCount,
+ group_level = GroupLevel
+ } = QueryArgs,
+
+ CurrentSeq = Group#group.current_seq,
+
+ couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
+ StartListRespFun = make_reduce_start_resp_fun(QServer, Db, LName),
+ SendListRowFun = make_reduce_send_row_fun(QServer, Db),
+ {ok, GroupRowsFun, RespFun} = couch_httpd_view:make_reduce_fold_funs(Req,
+ GroupLevel, QueryArgs, Etag, CurrentSeq,
+ #reduce_fold_helper_funs{
+ start_response = StartListRespFun,
+ send_row = SendListRowFun
+ }),
+ FoldAccInit = {Limit, SkipCount, undefined, []},
+ {ok, FoldResult} = case Keys of
+ nil ->
+ couch_view:fold_reduce(View, RespFun, FoldAccInit, [{key_group_fun, GroupRowsFun} |
+ couch_httpd_view:make_key_options(QueryArgs)]);
+ Keys ->
+ lists:foldl(
+ fun(Key, {ok, FoldAcc}) ->
+ couch_view:fold_reduce(View, RespFun, FoldAcc,
+ [{key_group_fun, GroupRowsFun} |
+ couch_httpd_view:make_key_options(
+ QueryArgs#view_query_args{start_key=Key, end_key=Key})]
+ )
+ end, {ok, FoldAccInit}, Keys)
+ end,
+ finish_list(Req, QServer, Etag, FoldResult, StartListRespFun, CurrentSeq, null)
+ end).
+
+
+make_map_start_resp_fun(QueryServer, Db, LName) ->
+ fun(Req, Etag, TotalRows, Offset, _Acc, UpdateSeq) ->
+ Head = {[{<<"total_rows">>, TotalRows}, {<<"offset">>, Offset}, {<<"update_seq">>, UpdateSeq}]},
+ start_list_resp(QueryServer, LName, Req, Db, Head, Etag)
+ end.
+
+make_reduce_start_resp_fun(QueryServer, Db, LName) ->
+ fun(Req2, Etag, _Acc, UpdateSeq) ->
+ start_list_resp(QueryServer, LName, Req2, Db, {[{<<"update_seq">>, UpdateSeq}]}, Etag)
+ end.
+
+start_list_resp(QServer, LName, Req, Db, Head, Etag) ->
+ JsonReq = couch_httpd_external:json_req_obj(Req, Db),
+ [<<"start">>,Chunks,JsonResp] = couch_query_servers:ddoc_proc_prompt(QServer,
+ [<<"lists">>, LName], [Head, JsonReq]),
+ JsonResp2 = apply_etag(JsonResp, Etag),
+ #extern_resp_args{
+ code = Code,
+ ctype = CType,
+ headers = ExtHeaders
+ } = couch_httpd_external:parse_external_response(JsonResp2),
+ JsonHeaders = couch_httpd_external:default_or_content_type(CType, ExtHeaders),
+ {ok, Resp} = start_chunked_response(Req, Code, JsonHeaders),
+ {ok, Resp, ?b2l(?l2b(Chunks))}.
+
+make_map_send_row_fun(QueryServer) ->
+ fun(Resp, Db, Row, IncludeDocs, Conflicts, RowFront) ->
+ send_list_row(
+ Resp, QueryServer, Db, Row, RowFront, IncludeDocs, Conflicts)
+ end.
+
+make_reduce_send_row_fun(QueryServer, Db) ->
+ fun(Resp, Row, RowFront) ->
+ send_list_row(Resp, QueryServer, Db, Row, RowFront, false, false)
+ end.
+
+send_list_row(Resp, QueryServer, Db, Row, RowFront, IncludeDoc, Conflicts) ->
+ try
+ [Go,Chunks] = prompt_list_row(
+ QueryServer, Db, Row, IncludeDoc, Conflicts),
+ Chunk = RowFront ++ ?b2l(?l2b(Chunks)),
+ send_non_empty_chunk(Resp, Chunk),
+ case Go of
+ <<"chunks">> ->
+ {ok, ""};
+ <<"end">> ->
+ {stop, stop}
+ end
+ catch
+ throw:Error ->
+ send_chunked_error(Resp, Error),
+ throw({already_sent, Resp, Error})
+ end.
+
+
+prompt_list_row({Proc, _DDocId}, Db, {{_Key, _DocId}, _} = Kv,
+ IncludeDoc, Conflicts) ->
+ JsonRow = couch_httpd_view:view_row_obj(Db, Kv, IncludeDoc, Conflicts),
+ couch_query_servers:proc_prompt(Proc, [<<"list_row">>, JsonRow]);
+
+prompt_list_row({Proc, _DDocId}, _, {Key, Value}, _IncludeDoc, _Conflicts) ->
+ JsonRow = {[{key, Key}, {value, Value}]},
+ couch_query_servers:proc_prompt(Proc, [<<"list_row">>, JsonRow]).
+
+send_non_empty_chunk(Resp, Chunk) ->
+ case Chunk of
+ [] -> ok;
+ _ -> send_chunk(Resp, Chunk)
+ end.
+
+finish_list(Req, {Proc, _DDocId}, Etag, FoldResult, StartFun, CurrentSeq, TotalRows) ->
+ FoldResult2 = case FoldResult of
+ {Limit, SkipCount, Response, RowAcc} ->
+ {Limit, SkipCount, Response, RowAcc, nil};
+ Else ->
+ Else
+ end,
+ case FoldResult2 of
+ {_, _, undefined, _, _} ->
+ {ok, Resp, BeginBody} =
+ render_head_for_empty_list(StartFun, Req, Etag, CurrentSeq, TotalRows),
+ [<<"end">>, Chunks] = couch_query_servers:proc_prompt(Proc, [<<"list_end">>]),
+ Chunk = BeginBody ++ ?b2l(?l2b(Chunks)),
+ send_non_empty_chunk(Resp, Chunk);
+ {_, _, Resp, stop, _} ->
+ ok;
+ {_, _, Resp, _, _} ->
+ [<<"end">>, Chunks] = couch_query_servers:proc_prompt(Proc, [<<"list_end">>]),
+ send_non_empty_chunk(Resp, ?b2l(?l2b(Chunks)))
+ end,
+ last_chunk(Resp).
+
+
+render_head_for_empty_list(StartListRespFun, Req, Etag, CurrentSeq, null) ->
+ StartListRespFun(Req, Etag, [], CurrentSeq); % for reduce
+render_head_for_empty_list(StartListRespFun, Req, Etag, CurrentSeq, TotalRows) ->
+ StartListRespFun(Req, Etag, TotalRows, null, [], CurrentSeq).
+
+apply_etag({ExternalResponse}, CurrentEtag) ->
+ % Here we embark on the delicate task of replacing or creating the
+ % headers on the JsonResponse object. We need to control the Etag and
+ % Vary headers. If the external function controls the Etag, we'd have to
+ % run it to check for a match, which sort of defeats the purpose.
+ case couch_util:get_value(<<"headers">>, ExternalResponse, nil) of
+ nil ->
+ % no JSON headers
+ % add our Etag and Vary headers to the response
+ {[{<<"headers">>, {[{<<"Etag">>, CurrentEtag}, {<<"Vary">>, <<"Accept">>}]}} | ExternalResponse]};
+ JsonHeaders ->
+ {[case Field of
+ {<<"headers">>, JsonHeaders} -> % add our headers
+ JsonHeadersEtagged = couch_util:json_apply_field({<<"Etag">>, CurrentEtag}, JsonHeaders),
+ JsonHeadersVaried = couch_util:json_apply_field({<<"Vary">>, <<"Accept">>}, JsonHeadersEtagged),
+ {<<"headers">>, JsonHeadersVaried};
+ _ -> % skip non-header fields
+ Field
+ end || Field <- ExternalResponse]}
+ end.
+
diff --git a/1.1.x/src/couchdb/couch_httpd_stats_handlers.erl b/1.1.x/src/couchdb/couch_httpd_stats_handlers.erl
new file mode 100644
index 00000000..41aeaed0
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_httpd_stats_handlers.erl
@@ -0,0 +1,56 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_stats_handlers).
+-include("couch_db.hrl").
+
+-export([handle_stats_req/1]).
+-import(couch_httpd, [
+ send_json/2, send_json/3, send_json/4, send_method_not_allowed/2,
+ start_json_response/2, send_chunk/2, end_json_response/1,
+ start_chunked_response/3, send_error/4
+]).
+
+handle_stats_req(#httpd{method='GET', path_parts=[_]}=Req) ->
+ flush(Req),
+ send_json(Req, couch_stats_aggregator:all(range(Req)));
+
+handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod]}) ->
+ throw({bad_request, <<"Stat names must have exactly to parts.">>});
+
+handle_stats_req(#httpd{method='GET', path_parts=[_, Mod, Key]}=Req) ->
+ flush(Req),
+ Stats = couch_stats_aggregator:get_json({list_to_atom(binary_to_list(Mod)),
+ list_to_atom(binary_to_list(Key))}, range(Req)),
+ send_json(Req, {[{Mod, {[{Key, Stats}]}}]});
+
+handle_stats_req(#httpd{method='GET', path_parts=[_, _Mod, _Key | _Extra]}) ->
+ throw({bad_request, <<"Stat names must have exactly two parts.">>});
+
+handle_stats_req(Req) ->
+ send_method_not_allowed(Req, "GET").
+
+range(Req) ->
+ case couch_util:get_value("range", couch_httpd:qs(Req)) of
+ undefined ->
+ 0;
+ Value ->
+ list_to_integer(Value)
+ end.
+
+flush(Req) ->
+ case couch_util:get_value("flush", couch_httpd:qs(Req)) of
+ "true" ->
+ couch_stats_aggregator:collect_sample();
+ _Else ->
+ ok
+ end.
diff --git a/1.1.x/src/couchdb/couch_httpd_vhost.erl b/1.1.x/src/couchdb/couch_httpd_vhost.erl
new file mode 100644
index 00000000..9bfb5951
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_httpd_vhost.erl
@@ -0,0 +1,403 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+
+-module(couch_httpd_vhost).
+-behaviour(gen_server).
+
+-export([start_link/0, init/1, handle_call/3, handle_info/2, handle_cast/2]).
+-export([code_change/3, terminate/2]).
+-export([match_vhost/1, urlsplit_netloc/2]).
+-export([redirect_to_vhost/2]).
+
+-include("couch_db.hrl").
+
+-define(SEPARATOR, $\/).
+-define(MATCH_ALL, {bind, '*'}).
+
+-record(vhosts, {
+ vhost_globals,
+ vhosts = [],
+ vhost_fun
+}).
+
+
+%% doc the vhost manager.
+%% This gen_server keep state of vhosts added to the ini and try to
+%% match the Host header (or forwarded) against rules built against
+%% vhost list.
+%%
+%% Declaration of vhosts take place in the configuration file :
+%%
+%% [vhosts]
+%% example.com = /example
+%% *.example.com = /example
+%%
+%% The first line will rewrite the rquest to display the content of the
+%% example database. This rule works only if the Host header is
+%% 'example.com' and won't work for CNAMEs. Second rule on the other hand
+%% match all CNAMES to example db. So www.example.com or db.example.com
+%% will work.
+%%
+%% The wildcard ('*') should always be the last in the cnames:
+%%
+%% "*.db.example.com = /" will match all cname on top of db
+%% examples to the root of the machine.
+%%
+%%
+%% Rewriting Hosts to path
+%% -----------------------
+%%
+%% Like in the _rewrite handler you could match some variable and use
+%them to create the target path. Some examples:
+%%
+%% [vhosts]
+%% *.example.com = /*
+%% :dbname.example.com = /:dbname
+%% :ddocname.:dbname.example.com = /:dbname/_design/:ddocname/_rewrite
+%%
+%% First rule pass wildcard as dbname, second do the same but use a
+%% variable name and the third one allows you to use any app with
+%% @ddocname in any db with @dbname .
+%%
+%% You could also change the default function to handle request by
+%% changing the setting `redirect_vhost_handler` in `httpd` section of
+%% the Ini:
+%%
+%% [httpd]
+%% redirect_vhost_handler = {Module, Fun}
+%%
+%% The function take 2 args : the mochiweb request object and the target
+%%% path.
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+%% @doc Try to find a rule matching current Host heade. some rule is
+%% found it rewrite the Mochiweb Request else it return current Request.
+match_vhost(MochiReq) ->
+ {ok, MochiReq1} = gen_server:call(couch_httpd_vhost, {match_vhost,
+ MochiReq}),
+
+ MochiReq1.
+
+
+%% --------------------
+%% gen_server functions
+%% --------------------
+
+init(_) ->
+ process_flag(trap_exit, true),
+
+ % init state
+ VHosts = make_vhosts(),
+ VHostGlobals = re:split(
+ couch_config:get("httpd", "vhost_global_handlers", ""),
+ ", ?",
+ [{return, list}]
+ ),
+
+ % Set vhost fun
+ DefaultVHostFun = "{couch_httpd_vhost, redirect_to_vhost}",
+ VHostFun = couch_httpd:make_arity_2_fun(
+ couch_config:get("httpd", "redirect_vhost_handler", DefaultVHostFun)
+ ),
+
+
+ Self = self(),
+ % register for changes in vhosts section
+ ok = couch_config:register(
+ fun("vhosts") ->
+ ok = gen_server:call(Self, vhosts_changed, infinity)
+ end
+ ),
+
+ % register for changes in vhost_global_handlers key
+ ok = couch_config:register(
+ fun("httpd", "vhost_global_handlers") ->
+ ok = gen_server:call(Self, vhosts_global_changed, infinity)
+ end
+ ),
+
+ ok = couch_config:register(
+ fun("httpd", "redirect_vhost_handler") ->
+ ok = gen_server:call(Self, fun_changed, infinity)
+ end
+ ),
+
+ {ok, #vhosts{
+ vhost_globals = VHostGlobals,
+ vhosts = VHosts,
+ vhost_fun = VHostFun}
+ }.
+
+
+handle_call({match_vhost, MochiReq}, _From, State) ->
+ #vhosts{
+ vhost_globals = VHostGlobals,
+ vhosts = VHosts,
+ vhost_fun = Fun
+ } = State,
+
+ {"/" ++ VPath, Query, Fragment} = mochiweb_util:urlsplit_path(MochiReq:get(raw_path)),
+ VPathParts = string:tokens(VPath, "/"),
+
+ XHost = couch_config:get("httpd", "x_forwarded_host", "X-Forwarded-Host"),
+ VHost = case MochiReq:get_header_value(XHost) of
+ undefined ->
+ case MochiReq:get_header_value("Host") of
+ undefined -> [];
+ Value1 -> Value1
+ end;
+ Value -> Value
+ end,
+ {VHostParts, VhostPort} = split_host_port(VHost),
+ FinalMochiReq = case try_bind_vhost(VHosts, lists:reverse(VHostParts),
+ VhostPort, VPathParts) of
+ no_vhost_matched -> MochiReq;
+ {VhostTarget, NewPath} ->
+ case vhost_global(VHostGlobals, MochiReq) of
+ true ->
+ MochiReq;
+ _Else ->
+ NewPath1 = mochiweb_util:urlunsplit_path({NewPath, Query,
+ Fragment}),
+ MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
+ MochiReq:get(method),
+ NewPath1,
+ MochiReq:get(version),
+ MochiReq:get(headers)),
+ Fun(MochiReq1, VhostTarget)
+ end
+ end,
+ {reply, {ok, FinalMochiReq}, State};
+
+% update vhosts
+handle_call(vhosts_changed, _From, State) ->
+ {reply, ok, State#vhosts{vhosts= make_vhosts()}};
+
+
+% update vhosts_globals
+handle_call(vhosts_global_changed, _From, State) ->
+ VHostGlobals = re:split(
+ couch_config:get("httpd", "vhost_global_handlers", ""),
+ ", ?",
+ [{return, list}]
+ ),
+ {reply, ok, State#vhosts{vhost_globals=VHostGlobals}};
+% change fun
+handle_call(fun_changed, _From, State) ->
+ DefaultVHostFun = "{couch_httpd_vhosts, redirect_to_vhost}",
+ VHostFun = couch_httpd:make_arity_2_fun(
+ couch_config:get("httpd", "redirect_vhost_handler", DefaultVHostFun)
+ ),
+ {reply, ok, State#vhosts{vhost_fun=VHostFun}}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+
+% default redirect vhost handler
+
+redirect_to_vhost(MochiReq, VhostTarget) ->
+ Path = MochiReq:get(raw_path),
+ Target = VhostTarget ++ Path,
+
+ ?LOG_DEBUG("Vhost Target: '~p'~n", [Target]),
+
+ Headers = mochiweb_headers:enter("x-couchdb-vhost-path", Path,
+ MochiReq:get(headers)),
+
+ % build a new mochiweb request
+ MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
+ MochiReq:get(method),
+ Target,
+ MochiReq:get(version),
+ Headers),
+ % cleanup, It force mochiweb to reparse raw uri.
+ MochiReq1:cleanup(),
+
+ MochiReq1.
+
+%% if so, then it will not be rewritten, but will run as a normal couchdb request.
+%* normally you'd use this for _uuids _utils and a few of the others you want to
+%% keep available on vhosts. You can also use it to make databases 'global'.
+vhost_global( VhostGlobals, MochiReq) ->
+ RawUri = MochiReq:get(raw_path),
+ {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
+
+ Front = case couch_httpd:partition(Path) of
+ {"", "", ""} ->
+ "/"; % Special case the root url handler
+ {FirstPart, _, _} ->
+ FirstPart
+ end,
+ [true] == [true||V <- VhostGlobals, V == Front].
+
+%% bind host
+%% first it try to bind the port then the hostname.
+try_bind_vhost([], _HostParts, _Port, _PathParts) ->
+ no_vhost_matched;
+try_bind_vhost([VhostSpec|Rest], HostParts, Port, PathParts) ->
+ {{VHostParts, VPort, VPath}, Path} = VhostSpec,
+ case bind_port(VPort, Port) of
+ ok ->
+ case bind_vhost(lists:reverse(VHostParts), HostParts, []) of
+ {ok, Bindings, Remainings} ->
+ case bind_path(VPath, PathParts) of
+ {ok, PathParts1} ->
+ Path1 = make_target(Path, Bindings, Remainings, []),
+ {make_path(Path1), make_path(PathParts1)};
+ fail ->
+ try_bind_vhost(Rest, HostParts, Port,
+ PathParts)
+ end;
+ fail -> try_bind_vhost(Rest, HostParts, Port, PathParts)
+ end;
+ fail -> try_bind_vhost(Rest, HostParts, Port, PathParts)
+ end.
+
+%% doc: build new patch from bindings. bindings are query args
+%% (+ dynamic query rewritten if needed) and bindings found in
+%% bind_path step.
+%% TODO: merge code wit rewrite. But we need to make sure we are
+%% in string here.
+make_target([], _Bindings, _Remaining, Acc) ->
+ lists:reverse(Acc);
+make_target([?MATCH_ALL], _Bindings, Remaining, Acc) ->
+ Acc1 = lists:reverse(Acc) ++ Remaining,
+ Acc1;
+make_target([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
+ Acc1 = lists:reverse(Acc) ++ Remaining,
+ Acc1;
+make_target([{bind, P}|Rest], Bindings, Remaining, Acc) ->
+ P2 = case couch_util:get_value({bind, P}, Bindings) of
+ undefined -> "undefined";
+ P1 -> P1
+ end,
+ make_target(Rest, Bindings, Remaining, [P2|Acc]);
+make_target([P|Rest], Bindings, Remaining, Acc) ->
+ make_target(Rest, Bindings, Remaining, [P|Acc]).
+
+%% bind port
+bind_port(Port, Port) -> ok;
+bind_port('*', _) -> ok;
+bind_port(_,_) -> fail.
+
+%% bind bhost
+bind_vhost([],[], Bindings) -> {ok, Bindings, []};
+bind_vhost([?MATCH_ALL], [], _Bindings) -> fail;
+bind_vhost([?MATCH_ALL], Rest, Bindings) -> {ok, Bindings, Rest};
+bind_vhost([], _HostParts, _Bindings) -> fail;
+bind_vhost([{bind, Token}|Rest], [Match|RestHost], Bindings) ->
+ bind_vhost(Rest, RestHost, [{{bind, Token}, Match}|Bindings]);
+bind_vhost([Cname|Rest], [Cname|RestHost], Bindings) ->
+ bind_vhost(Rest, RestHost, Bindings);
+bind_vhost(_, _, _) -> fail.
+
+%% bind path
+bind_path([], PathParts) ->
+ {ok, PathParts};
+bind_path(_VPathParts, []) ->
+ fail;
+bind_path([Path|VRest],[Path|Rest]) ->
+ bind_path(VRest, Rest);
+bind_path(_, _) ->
+ fail.
+
+% utilities
+
+
+%% create vhost list from ini
+make_vhosts() ->
+ Vhosts = lists:foldl(fun({Vhost, Path}, Acc) ->
+ [{parse_vhost(Vhost), split_path(Path)}|Acc]
+ end, [], couch_config:get("vhosts")),
+ lists:reverse(lists:usort(Vhosts)).
+
+parse_vhost(Vhost) ->
+ case urlsplit_netloc(Vhost, []) of
+ {[], Path} ->
+ {make_spec("*", []), '*', Path};
+ {HostPort, []} ->
+ {H, P} = split_host_port(HostPort),
+ H1 = make_spec(H, []),
+ {H1, P, []};
+ {HostPort, Path} ->
+ {H, P} = split_host_port(HostPort),
+ H1 = make_spec(H, []),
+ {H1, P, string:tokens(Path, "/")}
+ end.
+
+
+split_host_port(HostAsString) ->
+ case string:rchr(HostAsString, $:) of
+ 0 ->
+ {split_host(HostAsString), '*'};
+ N ->
+ HostPart = string:substr(HostAsString, 1, N-1),
+ case (catch erlang:list_to_integer(HostAsString, N+1,
+ length(HostAsString))) of
+ {'EXIT', _} ->
+ {split_host(HostAsString), '*'};
+ Port ->
+ {split_host(HostPart), Port}
+ end
+ end.
+
+split_host(HostAsString) ->
+ string:tokens(HostAsString, "\.").
+
+split_path(Path) ->
+ make_spec(string:tokens(Path, "/"), []).
+
+
+make_spec([], Acc) ->
+ lists:reverse(Acc);
+make_spec([""|R], Acc) ->
+ make_spec(R, Acc);
+make_spec(["*"|R], Acc) ->
+ make_spec(R, [?MATCH_ALL|Acc]);
+make_spec([P|R], Acc) ->
+ P1 = parse_var(P),
+ make_spec(R, [P1|Acc]).
+
+
+parse_var(P) ->
+ case P of
+ ":" ++ Var ->
+ {bind, Var};
+ _ -> P
+ end.
+
+
+% mochiweb doesn't export it.
+urlsplit_netloc("", Acc) ->
+ {lists:reverse(Acc), ""};
+urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
+ {lists:reverse(Acc), Rest};
+urlsplit_netloc([C | Rest], Acc) ->
+ urlsplit_netloc(Rest, [C | Acc]).
+
+make_path(Parts) ->
+ "/" ++ string:join(Parts,[?SEPARATOR]).
diff --git a/1.1.x/src/couchdb/couch_httpd_view.erl b/1.1.x/src/couchdb/couch_httpd_view.erl
new file mode 100644
index 00000000..b71fc2c6
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_httpd_view.erl
@@ -0,0 +1,755 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_httpd_view).
+-include("couch_db.hrl").
+
+-export([handle_view_req/3,handle_temp_view_req/2]).
+
+-export([parse_view_params/3]).
+-export([make_view_fold_fun/7, finish_view_fold/4, finish_view_fold/5, view_row_obj/4]).
+-export([view_etag/3, view_etag/4, make_reduce_fold_funs/6]).
+-export([design_doc_view/5, parse_bool_param/1, doc_member/3]).
+-export([make_key_options/1, load_view/4]).
+
+-import(couch_httpd,
+ [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,send_chunk/2,
+ start_json_response/2, start_json_response/3, end_json_response/1,
+ send_chunked_error/2]).
+
+-import(couch_db,[get_update_seq/1]).
+
+design_doc_view(Req, Db, DName, ViewName, Keys) ->
+ DesignId = <<"_design/", DName/binary>>,
+ Stale = get_stale_type(Req),
+ Reduce = get_reduce_type(Req),
+ Result = case couch_view:get_map_view(Db, DesignId, ViewName, Stale) of
+ {ok, View, Group} ->
+ QueryArgs = parse_view_params(Req, Keys, map),
+ output_map_view(Req, View, Group, Db, QueryArgs, Keys);
+ {not_found, Reason} ->
+ case couch_view:get_reduce_view(Db, DesignId, ViewName, Stale) of
+ {ok, ReduceView, Group} ->
+ case Reduce of
+ false ->
+ QueryArgs = parse_view_params(Req, Keys, red_map),
+ MapView = couch_view:extract_map_view(ReduceView),
+ output_map_view(Req, MapView, Group, Db, QueryArgs, Keys);
+ _ ->
+ QueryArgs = parse_view_params(Req, Keys, reduce),
+ output_reduce_view(Req, Db, ReduceView, Group, QueryArgs, Keys)
+ end;
+ _ ->
+ throw({not_found, Reason})
+ end
+ end,
+ couch_stats_collector:increment({httpd, view_reads}),
+ Result.
+
+handle_view_req(#httpd{method='GET',
+ path_parts=[_, _, DName, _, ViewName]}=Req, Db, _DDoc) ->
+ Keys = couch_httpd:qs_json_value(Req, "keys", nil),
+ design_doc_view(Req, Db, DName, ViewName, Keys);
+
+handle_view_req(#httpd{method='POST',
+ path_parts=[_, _, DName, _, ViewName]}=Req, Db, _DDoc) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ {Fields} = couch_httpd:json_body_obj(Req),
+ case couch_util:get_value(<<"keys">>, Fields, nil) of
+ nil ->
+ Fmt = "POST to view ~p/~p in database ~p with no keys member.",
+ ?LOG_DEBUG(Fmt, [DName, ViewName, Db]),
+ design_doc_view(Req, Db, DName, ViewName, nil);
+ Keys when is_list(Keys) ->
+ design_doc_view(Req, Db, DName, ViewName, Keys);
+ _ ->
+ throw({bad_request, "`keys` member must be a array."})
+ end;
+
+handle_view_req(Req, _Db, _DDoc) ->
+ send_method_not_allowed(Req, "GET,POST,HEAD").
+
+handle_temp_view_req(#httpd{method='POST'}=Req, Db) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ ok = couch_db:check_is_admin(Db),
+ couch_stats_collector:increment({httpd, temporary_view_reads}),
+ {Props} = couch_httpd:json_body_obj(Req),
+ Language = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
+ {DesignOptions} = couch_util:get_value(<<"options">>, Props, {[]}),
+ MapSrc = couch_util:get_value(<<"map">>, Props),
+ Keys = couch_util:get_value(<<"keys">>, Props, nil),
+ Reduce = get_reduce_type(Req),
+ case couch_util:get_value(<<"reduce">>, Props, null) of
+ null ->
+ QueryArgs = parse_view_params(Req, Keys, map),
+ {ok, View, Group} = couch_view:get_temp_map_view(Db, Language,
+ DesignOptions, MapSrc),
+ output_map_view(Req, View, Group, Db, QueryArgs, Keys);
+ _ when Reduce =:= false ->
+ QueryArgs = parse_view_params(Req, Keys, red_map),
+ {ok, View, Group} = couch_view:get_temp_map_view(Db, Language,
+ DesignOptions, MapSrc),
+ output_map_view(Req, View, Group, Db, QueryArgs, Keys);
+ RedSrc ->
+ QueryArgs = parse_view_params(Req, Keys, reduce),
+ {ok, View, Group} = couch_view:get_temp_reduce_view(Db, Language,
+ DesignOptions, MapSrc, RedSrc),
+ output_reduce_view(Req, Db, View, Group, QueryArgs, Keys)
+ end;
+
+handle_temp_view_req(Req, _Db) ->
+ send_method_not_allowed(Req, "POST").
+
+output_map_view(Req, View, Group, Db, QueryArgs, nil) ->
+ #view_query_args{
+ limit = Limit,
+ skip = SkipCount
+ } = QueryArgs,
+ CurrentEtag = view_etag(Db, Group, View),
+ couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
+ {ok, RowCount} = couch_view:get_row_count(View),
+ FoldlFun = make_view_fold_fun(Req, QueryArgs, CurrentEtag, Db, Group#group.current_seq, RowCount, #view_fold_helper_funs{reduce_count=fun couch_view:reduce_to_count/1}),
+ FoldAccInit = {Limit, SkipCount, undefined, []},
+ {ok, LastReduce, FoldResult} = couch_view:fold(View,
+ FoldlFun, FoldAccInit, make_key_options(QueryArgs)),
+ finish_view_fold(Req, RowCount,
+ couch_view:reduce_to_count(LastReduce), FoldResult)
+ end);
+
+output_map_view(Req, View, Group, Db, QueryArgs, Keys) ->
+ #view_query_args{
+ limit = Limit,
+ skip = SkipCount
+ } = QueryArgs,
+ CurrentEtag = view_etag(Db, Group, View, Keys),
+ couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
+ {ok, RowCount} = couch_view:get_row_count(View),
+ FoldAccInit = {Limit, SkipCount, undefined, []},
+ {LastReduce, FoldResult} = lists:foldl(fun(Key, {_, FoldAcc}) ->
+ FoldlFun = make_view_fold_fun(Req, QueryArgs#view_query_args{},
+ CurrentEtag, Db, Group#group.current_seq, RowCount,
+ #view_fold_helper_funs{
+ reduce_count = fun couch_view:reduce_to_count/1
+ }),
+ {ok, LastReduce, FoldResult} = couch_view:fold(View, FoldlFun,
+ FoldAcc, make_key_options(
+ QueryArgs#view_query_args{start_key=Key, end_key=Key})),
+ {LastReduce, FoldResult}
+ end, {{[],[]}, FoldAccInit}, Keys),
+ finish_view_fold(Req, RowCount, couch_view:reduce_to_count(LastReduce),
+ FoldResult, [{update_seq,Group#group.current_seq}])
+ end).
+
+output_reduce_view(Req, Db, View, Group, QueryArgs, nil) ->
+ #view_query_args{
+ limit = Limit,
+ skip = Skip,
+ group_level = GroupLevel
+ } = QueryArgs,
+ CurrentEtag = view_etag(Db, Group, View),
+ couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
+ {ok, GroupRowsFun, RespFun} = make_reduce_fold_funs(Req, GroupLevel,
+ QueryArgs, CurrentEtag, Group#group.current_seq,
+ #reduce_fold_helper_funs{}),
+ FoldAccInit = {Limit, Skip, undefined, []},
+ {ok, {_, _, Resp, _}} = couch_view:fold_reduce(View,
+ RespFun, FoldAccInit, [{key_group_fun, GroupRowsFun} |
+ make_key_options(QueryArgs)]),
+ finish_reduce_fold(Req, Resp)
+ end);
+
+output_reduce_view(Req, Db, View, Group, QueryArgs, Keys) ->
+ #view_query_args{
+ limit = Limit,
+ skip = Skip,
+ group_level = GroupLevel
+ } = QueryArgs,
+ CurrentEtag = view_etag(Db, Group, View, Keys),
+ couch_httpd:etag_respond(Req, CurrentEtag, fun() ->
+ {ok, GroupRowsFun, RespFun} = make_reduce_fold_funs(Req, GroupLevel,
+ QueryArgs, CurrentEtag, Group#group.current_seq,
+ #reduce_fold_helper_funs{}),
+ {Resp, _RedAcc3} = lists:foldl(
+ fun(Key, {Resp, RedAcc}) ->
+ % run the reduce once for each key in keys, with limit etc
+ % reapplied for each key
+ FoldAccInit = {Limit, Skip, Resp, RedAcc},
+ {_, {_, _, Resp2, RedAcc2}} = couch_view:fold_reduce(View,
+ RespFun, FoldAccInit, [{key_group_fun, GroupRowsFun} |
+ make_key_options(QueryArgs#view_query_args{
+ start_key=Key, end_key=Key})]),
+ % Switch to comma
+ {Resp2, RedAcc2}
+ end,
+ {undefined, []}, Keys), % Start with no comma
+ finish_reduce_fold(Req, Resp, [{update_seq,Group#group.current_seq}])
+ end).
+
+reverse_key_default(?MIN_STR) -> ?MAX_STR;
+reverse_key_default(?MAX_STR) -> ?MIN_STR;
+reverse_key_default(Key) -> Key.
+
+get_stale_type(Req) ->
+ list_to_existing_atom(couch_httpd:qs_value(Req, "stale", "nil")).
+
+get_reduce_type(Req) ->
+ list_to_existing_atom(couch_httpd:qs_value(Req, "reduce", "true")).
+
+load_view(Req, Db, {ViewDesignId, ViewName}, Keys) ->
+ Stale = get_stale_type(Req),
+ Reduce = get_reduce_type(Req),
+ case couch_view:get_map_view(Db, ViewDesignId, ViewName, Stale) of
+ {ok, View, Group} ->
+ QueryArgs = parse_view_params(Req, Keys, map),
+ {map, View, Group, QueryArgs};
+ {not_found, _Reason} ->
+ case couch_view:get_reduce_view(Db, ViewDesignId, ViewName, Stale) of
+ {ok, ReduceView, Group} ->
+ case Reduce of
+ false ->
+ QueryArgs = parse_view_params(Req, Keys, map_red),
+ MapView = couch_view:extract_map_view(ReduceView),
+ {map, MapView, Group, QueryArgs};
+ _ ->
+ QueryArgs = parse_view_params(Req, Keys, reduce),
+ {reduce, ReduceView, Group, QueryArgs}
+ end;
+ {not_found, Reason} ->
+ throw({not_found, Reason})
+ end
+ end.
+
+% query_parse_error could be removed
+% we wouldn't need to pass the view type, it'd just parse params.
+% I'm not sure what to do about the error handling, but
+% it might simplify things to have a parse_view_params function
+% that doesn't throw().
+parse_view_params(Req, Keys, ViewType) ->
+ QueryList = couch_httpd:qs(Req),
+ QueryParams =
+ lists:foldl(fun({K, V}, Acc) ->
+ parse_view_param(K, V) ++ Acc
+ end, [], QueryList),
+ IsMultiGet = (Keys =/= nil),
+ Args = #view_query_args{
+ view_type=ViewType,
+ multi_get=IsMultiGet
+ },
+ QueryArgs = lists:foldl(fun({K, V}, Args2) ->
+ validate_view_query(K, V, Args2)
+ end, Args, lists:reverse(QueryParams)), % Reverse to match QS order.
+ warn_on_empty_key_range(QueryArgs),
+ GroupLevel = QueryArgs#view_query_args.group_level,
+ case {ViewType, GroupLevel, IsMultiGet} of
+ {reduce, exact, true} ->
+ QueryArgs;
+ {reduce, _, false} ->
+ QueryArgs;
+ {reduce, _, _} ->
+ % we can simplify code if we just drop this error message.
+ Msg = <<"Multi-key fetchs for reduce "
+ "view must include `group=true`">>,
+ throw({query_parse_error, Msg});
+ _ ->
+ QueryArgs
+ end,
+ QueryArgs.
+
+parse_view_param("", _) ->
+ [];
+parse_view_param("key", Value) ->
+ JsonKey = ?JSON_DECODE(Value),
+ [{start_key, JsonKey}, {end_key, JsonKey}];
+% TODO: maybe deprecate startkey_docid
+parse_view_param("startkey_docid", Value) ->
+ [{start_docid, ?l2b(Value)}];
+parse_view_param("start_key_doc_id", Value) ->
+ [{start_docid, ?l2b(Value)}];
+% TODO: maybe deprecate endkey_docid
+parse_view_param("endkey_docid", Value) ->
+ [{end_docid, ?l2b(Value)}];
+parse_view_param("end_key_doc_id", Value) ->
+ [{end_docid, ?l2b(Value)}];
+% TODO: maybe deprecate startkey
+parse_view_param("startkey", Value) ->
+ [{start_key, ?JSON_DECODE(Value)}];
+parse_view_param("start_key", Value) ->
+ [{start_key, ?JSON_DECODE(Value)}];
+% TODO: maybe deprecate endkey
+parse_view_param("endkey", Value) ->
+ [{end_key, ?JSON_DECODE(Value)}];
+parse_view_param("end_key", Value) ->
+ [{end_key, ?JSON_DECODE(Value)}];
+parse_view_param("limit", Value) ->
+ [{limit, parse_positive_int_param(Value)}];
+parse_view_param("count", _Value) ->
+ throw({query_parse_error, <<"Query parameter 'count' is now 'limit'.">>});
+parse_view_param("stale", "ok") ->
+ [{stale, ok}];
+parse_view_param("stale", "update_after") ->
+ [{stale, update_after}];
+parse_view_param("stale", _Value) ->
+ throw({query_parse_error,
+ <<"stale only available as stale=ok or as stale=update_after">>});
+parse_view_param("update", _Value) ->
+ throw({query_parse_error, <<"update=false is now stale=ok">>});
+parse_view_param("descending", Value) ->
+ [{descending, parse_bool_param(Value)}];
+parse_view_param("skip", Value) ->
+ [{skip, parse_int_param(Value)}];
+parse_view_param("group", Value) ->
+ case parse_bool_param(Value) of
+ true -> [{group_level, exact}];
+ false -> [{group_level, 0}]
+ end;
+parse_view_param("group_level", Value) ->
+ [{group_level, parse_positive_int_param(Value)}];
+parse_view_param("inclusive_end", Value) ->
+ [{inclusive_end, parse_bool_param(Value)}];
+parse_view_param("reduce", Value) ->
+ [{reduce, parse_bool_param(Value)}];
+parse_view_param("include_docs", Value) ->
+ [{include_docs, parse_bool_param(Value)}];
+parse_view_param("conflicts", Value) ->
+ [{conflicts, parse_bool_param(Value)}];
+parse_view_param("list", Value) ->
+ [{list, ?l2b(Value)}];
+parse_view_param("callback", _) ->
+ []; % Verified in the JSON response functions
+parse_view_param(Key, Value) ->
+ [{extra, {Key, Value}}].
+
+warn_on_empty_key_range(#view_query_args{start_key=undefined}) ->
+ ok;
+warn_on_empty_key_range(#view_query_args{end_key=undefined}) ->
+ ok;
+warn_on_empty_key_range(#view_query_args{start_key=A, end_key=A}) ->
+ ok;
+warn_on_empty_key_range(#view_query_args{
+ start_key=StartKey, end_key=EndKey, direction=Dir}) ->
+ case {Dir, couch_view:less_json(StartKey, EndKey)} of
+ {fwd, false} ->
+ throw({query_parse_error,
+ <<"No rows can match your key range, reverse your ",
+ "start_key and end_key or set descending=true">>});
+ {rev, true} ->
+ throw({query_parse_error,
+ <<"No rows can match your key range, reverse your ",
+ "start_key and end_key or set descending=false">>});
+ _ -> ok
+ end.
+
+validate_view_query(start_key, Value, Args) ->
+ case Args#view_query_args.multi_get of
+ true ->
+ Msg = <<"Query parameter `start_key` is "
+ "not compatible with multi-get">>,
+ throw({query_parse_error, Msg});
+ _ ->
+ Args#view_query_args{start_key=Value}
+ end;
+validate_view_query(start_docid, Value, Args) ->
+ Args#view_query_args{start_docid=Value};
+validate_view_query(end_key, Value, Args) ->
+ case Args#view_query_args.multi_get of
+ true->
+ Msg = <<"Query parameter `end_key` is "
+ "not compatible with multi-get">>,
+ throw({query_parse_error, Msg});
+ _ ->
+ Args#view_query_args{end_key=Value}
+ end;
+validate_view_query(end_docid, Value, Args) ->
+ Args#view_query_args{end_docid=Value};
+validate_view_query(limit, Value, Args) ->
+ Args#view_query_args{limit=Value};
+validate_view_query(list, Value, Args) ->
+ Args#view_query_args{list=Value};
+validate_view_query(stale, ok, Args) ->
+ Args#view_query_args{stale=ok};
+validate_view_query(stale, update_after, Args) ->
+ Args#view_query_args{stale=update_after};
+validate_view_query(stale, _, Args) ->
+ Args;
+validate_view_query(descending, true, Args) ->
+ case Args#view_query_args.direction of
+ rev -> Args; % Already reversed
+ fwd ->
+ Args#view_query_args{
+ direction = rev,
+ start_docid =
+ reverse_key_default(Args#view_query_args.start_docid),
+ end_docid =
+ reverse_key_default(Args#view_query_args.end_docid)
+ }
+ end;
+validate_view_query(descending, false, Args) ->
+ Args; % Ignore default condition
+validate_view_query(skip, Value, Args) ->
+ Args#view_query_args{skip=Value};
+validate_view_query(group_level, Value, Args) ->
+ case Args#view_query_args.view_type of
+ reduce ->
+ Args#view_query_args{group_level=Value};
+ _ ->
+ Msg = <<"Invalid URL parameter 'group' or "
+ " 'group_level' for non-reduce view.">>,
+ throw({query_parse_error, Msg})
+ end;
+validate_view_query(inclusive_end, Value, Args) ->
+ Args#view_query_args{inclusive_end=Value};
+validate_view_query(reduce, false, Args) ->
+ Args;
+validate_view_query(reduce, _, Args) ->
+ case Args#view_query_args.view_type of
+ map ->
+ Msg = <<"Invalid URL parameter `reduce` for map view.">>,
+ throw({query_parse_error, Msg});
+ _ ->
+ Args
+ end;
+validate_view_query(include_docs, true, Args) ->
+ case Args#view_query_args.view_type of
+ reduce ->
+ Msg = <<"Query parameter `include_docs` "
+ "is invalid for reduce views.">>,
+ throw({query_parse_error, Msg});
+ _ ->
+ Args#view_query_args{include_docs=true}
+ end;
+% Use the view_query_args record's default value
+validate_view_query(include_docs, _Value, Args) ->
+ Args;
+validate_view_query(conflicts, true, Args) ->
+ case Args#view_query_args.view_type of
+ reduce ->
+ Msg = <<"Query parameter `conflicts` "
+ "is invalid for reduce views.">>,
+ throw({query_parse_error, Msg});
+ _ ->
+ Args#view_query_args{conflicts = true}
+ end;
+validate_view_query(extra, _Value, Args) ->
+ Args.
+
+make_view_fold_fun(Req, QueryArgs, Etag, Db, UpdateSeq, TotalViewCount, HelperFuns) ->
+ #view_fold_helper_funs{
+ start_response = StartRespFun,
+ send_row = SendRowFun,
+ reduce_count = ReduceCountFun
+ } = apply_default_helper_funs(HelperFuns),
+
+ #view_query_args{
+ include_docs = IncludeDocs,
+ conflicts = Conflicts
+ } = QueryArgs,
+
+ fun({{Key, DocId}, Value}, OffsetReds,
+ {AccLimit, AccSkip, Resp, RowFunAcc}) ->
+ case {AccLimit, AccSkip, Resp} of
+ {0, _, _} ->
+ % we've done "limit" rows, stop foldling
+ {stop, {0, 0, Resp, RowFunAcc}};
+ {_, AccSkip, _} when AccSkip > 0 ->
+ % just keep skipping
+ {ok, {AccLimit, AccSkip - 1, Resp, RowFunAcc}};
+ {_, _, undefined} ->
+ % rendering the first row, first we start the response
+ Offset = ReduceCountFun(OffsetReds),
+ {ok, Resp2, RowFunAcc0} = StartRespFun(Req, Etag,
+ TotalViewCount, Offset, RowFunAcc, UpdateSeq),
+ {Go, RowFunAcc2} = SendRowFun(Resp2, Db, {{Key, DocId}, Value},
+ IncludeDocs, Conflicts, RowFunAcc0),
+ {Go, {AccLimit - 1, 0, Resp2, RowFunAcc2}};
+ {AccLimit, _, Resp} when (AccLimit > 0) ->
+ % rendering all other rows
+ {Go, RowFunAcc2} = SendRowFun(Resp, Db, {{Key, DocId}, Value},
+ IncludeDocs, Conflicts, RowFunAcc),
+ {Go, {AccLimit - 1, 0, Resp, RowFunAcc2}}
+ end
+ end.
+
+make_reduce_fold_funs(Req, GroupLevel, _QueryArgs, Etag, UpdateSeq, HelperFuns) ->
+ #reduce_fold_helper_funs{
+ start_response = StartRespFun,
+ send_row = SendRowFun
+ } = apply_default_helper_funs(HelperFuns),
+
+ GroupRowsFun =
+ fun({_Key1,_}, {_Key2,_}) when GroupLevel == 0 ->
+ true;
+ ({Key1,_}, {Key2,_})
+ when is_integer(GroupLevel) and is_list(Key1) and is_list(Key2) ->
+ lists:sublist(Key1, GroupLevel) == lists:sublist(Key2, GroupLevel);
+ ({Key1,_}, {Key2,_}) ->
+ Key1 == Key2
+ end,
+
+ RespFun = fun
+ (_Key, _Red, {AccLimit, AccSkip, Resp, RowAcc}) when AccSkip > 0 ->
+ % keep skipping
+ {ok, {AccLimit, AccSkip - 1, Resp, RowAcc}};
+ (_Key, _Red, {0, _AccSkip, Resp, RowAcc}) ->
+ % we've exhausted limit rows, stop
+ {stop, {0, _AccSkip, Resp, RowAcc}};
+
+ (_Key, Red, {AccLimit, 0, undefined, RowAcc0}) when GroupLevel == 0 ->
+ % we haven't started responding yet and group=false
+ {ok, Resp2, RowAcc} = StartRespFun(Req, Etag, RowAcc0, UpdateSeq),
+ {Go, RowAcc2} = SendRowFun(Resp2, {null, Red}, RowAcc),
+ {Go, {AccLimit - 1, 0, Resp2, RowAcc2}};
+ (_Key, Red, {AccLimit, 0, Resp, RowAcc}) when GroupLevel == 0 ->
+ % group=false but we've already started the response
+ {Go, RowAcc2} = SendRowFun(Resp, {null, Red}, RowAcc),
+ {Go, {AccLimit - 1, 0, Resp, RowAcc2}};
+
+ (Key, Red, {AccLimit, 0, undefined, RowAcc0})
+ when is_integer(GroupLevel), is_list(Key) ->
+ % group_level and we haven't responded yet
+ {ok, Resp2, RowAcc} = StartRespFun(Req, Etag, RowAcc0, UpdateSeq),
+ {Go, RowAcc2} = SendRowFun(Resp2,
+ {lists:sublist(Key, GroupLevel), Red}, RowAcc),
+ {Go, {AccLimit - 1, 0, Resp2, RowAcc2}};
+ (Key, Red, {AccLimit, 0, Resp, RowAcc})
+ when is_integer(GroupLevel), is_list(Key) ->
+ % group_level and we've already started the response
+ {Go, RowAcc2} = SendRowFun(Resp,
+ {lists:sublist(Key, GroupLevel), Red}, RowAcc),
+ {Go, {AccLimit - 1, 0, Resp, RowAcc2}};
+
+ (Key, Red, {AccLimit, 0, undefined, RowAcc0}) ->
+ % group=true and we haven't responded yet
+ {ok, Resp2, RowAcc} = StartRespFun(Req, Etag, RowAcc0, UpdateSeq),
+ {Go, RowAcc2} = SendRowFun(Resp2, {Key, Red}, RowAcc),
+ {Go, {AccLimit - 1, 0, Resp2, RowAcc2}};
+ (Key, Red, {AccLimit, 0, Resp, RowAcc}) ->
+ % group=true and we've already started the response
+ {Go, RowAcc2} = SendRowFun(Resp, {Key, Red}, RowAcc),
+ {Go, {AccLimit - 1, 0, Resp, RowAcc2}}
+ end,
+ {ok, GroupRowsFun, RespFun}.
+
+apply_default_helper_funs(
+ #view_fold_helper_funs{
+ start_response = StartResp,
+ send_row = SendRow
+ }=Helpers) ->
+ StartResp2 = case StartResp of
+ undefined -> fun json_view_start_resp/6;
+ _ -> StartResp
+ end,
+
+ SendRow2 = case SendRow of
+ undefined -> fun send_json_view_row/6;
+ _ -> SendRow
+ end,
+
+ Helpers#view_fold_helper_funs{
+ start_response = StartResp2,
+ send_row = SendRow2
+ };
+
+
+apply_default_helper_funs(
+ #reduce_fold_helper_funs{
+ start_response = StartResp,
+ send_row = SendRow
+ }=Helpers) ->
+ StartResp2 = case StartResp of
+ undefined -> fun json_reduce_start_resp/4;
+ _ -> StartResp
+ end,
+
+ SendRow2 = case SendRow of
+ undefined -> fun send_json_reduce_row/3;
+ _ -> SendRow
+ end,
+
+ Helpers#reduce_fold_helper_funs{
+ start_response = StartResp2,
+ send_row = SendRow2
+ }.
+
+make_key_options(#view_query_args{direction = Dir}=QueryArgs) ->
+ [{dir,Dir} | make_start_key_option(QueryArgs) ++
+ make_end_key_option(QueryArgs)].
+
+make_start_key_option(
+ #view_query_args{
+ start_key = StartKey,
+ start_docid = StartDocId}) ->
+ if StartKey == undefined ->
+ [];
+ true ->
+ [{start_key, {StartKey, StartDocId}}]
+ end.
+
+make_end_key_option(#view_query_args{end_key = undefined}) ->
+ [];
+make_end_key_option(
+ #view_query_args{end_key = EndKey,
+ end_docid = EndDocId,
+ inclusive_end = true}) ->
+ [{end_key, {EndKey, EndDocId}}];
+make_end_key_option(
+ #view_query_args{
+ end_key = EndKey,
+ end_docid = EndDocId,
+ inclusive_end = false}) ->
+ [{end_key_gt, {EndKey,reverse_key_default(EndDocId)}}].
+
+json_view_start_resp(Req, Etag, TotalViewCount, Offset, _Acc, UpdateSeq) ->
+ {ok, Resp} = start_json_response(Req, 200, [{"Etag", Etag}]),
+ BeginBody = case couch_httpd:qs_value(Req, "update_seq") of
+ "true" ->
+ io_lib:format(
+ "{\"total_rows\":~w,\"update_seq\":~w,"
+ "\"offset\":~w,\"rows\":[\r\n",
+ [TotalViewCount, UpdateSeq, Offset]);
+ _Else ->
+ io_lib:format(
+ "{\"total_rows\":~w,\"offset\":~w,\"rows\":[\r\n",
+ [TotalViewCount, Offset])
+ end,
+ {ok, Resp, BeginBody}.
+
+send_json_view_row(Resp, Db, Kv, IncludeDocs, Conflicts, RowFront) ->
+ JsonObj = view_row_obj(Db, Kv, IncludeDocs, Conflicts),
+ send_chunk(Resp, RowFront ++ ?JSON_ENCODE(JsonObj)),
+ {ok, ",\r\n"}.
+
+json_reduce_start_resp(Req, Etag, _Acc0, UpdateSeq) ->
+ {ok, Resp} = start_json_response(Req, 200, [{"Etag", Etag}]),
+ case couch_httpd:qs_value(Req, "update_seq") of
+ "true" ->
+ {ok, Resp, io_lib:format("{\"update_seq\":~w,\"rows\":[\r\n",[UpdateSeq])};
+ _Else ->
+ {ok, Resp, "{\"rows\":[\r\n"}
+ end.
+
+send_json_reduce_row(Resp, {Key, Value}, RowFront) ->
+ send_chunk(Resp, RowFront ++ ?JSON_ENCODE({[{key, Key}, {value, Value}]})),
+ {ok, ",\r\n"}.
+
+view_etag(Db, Group, View) ->
+ view_etag(Db, Group, View, nil).
+
+view_etag(Db, Group, {reduce, _, _, View}, Extra) ->
+ view_etag(Db, Group, View, Extra);
+view_etag(Db, Group, {temp_reduce, View}, Extra) ->
+ view_etag(Db, Group, View, Extra);
+view_etag(_Db, #group{sig=Sig}, #view{update_seq=UpdateSeq, purge_seq=PurgeSeq}, Extra) ->
+ couch_httpd:make_etag({Sig, UpdateSeq, PurgeSeq, Extra}).
+
+% the view row has an error
+view_row_obj(_Db, {{Key, error}, Value}, _IncludeDocs, _Conflicts) ->
+ {[{key, Key}, {error, Value}]};
+% include docs in the view output
+view_row_obj(Db, {{Key, DocId}, {Props}}, true, Conflicts) ->
+ Rev = case couch_util:get_value(<<"_rev">>, Props) of
+ undefined ->
+ nil;
+ Rev0 ->
+ couch_doc:parse_rev(Rev0)
+ end,
+ IncludeId = couch_util:get_value(<<"_id">>, Props, DocId),
+ view_row_with_doc(Db, {{Key, DocId}, {Props}}, {IncludeId, Rev}, Conflicts);
+view_row_obj(Db, {{Key, DocId}, Value}, true, Conflicts) ->
+ view_row_with_doc(Db, {{Key, DocId}, Value}, {DocId, nil}, Conflicts);
+% the normal case for rendering a view row
+view_row_obj(_Db, {{Key, DocId}, Value}, _IncludeDocs, _Conflicts) ->
+ {[{id, DocId}, {key, Key}, {value, Value}]}.
+
+view_row_with_doc(Db, {{Key, DocId}, Value}, IdRev, Conflicts) ->
+ {[{id, DocId}, {key, Key}, {value, Value}] ++
+ doc_member(Db, IdRev, if Conflicts -> [conflicts]; true -> [] end)}.
+
+doc_member(Db, #doc_info{id = Id, revs = [#rev_info{rev = Rev} | _]} = Info,
+ Options) ->
+ ?LOG_DEBUG("Include Doc: ~p ~p", [Id, Rev]),
+ case couch_db:open_doc(Db, Info, [deleted | Options]) of
+ {ok, Doc} ->
+ [{doc, couch_doc:to_json_obj(Doc, [])}];
+ _ ->
+ [{doc, null}]
+ end;
+doc_member(Db, {DocId, Rev}, Options) ->
+ ?LOG_DEBUG("Include Doc: ~p ~p", [DocId, Rev]),
+ case (catch couch_httpd_db:couch_doc_open(Db, DocId, Rev, Options)) of
+ #doc{} = Doc ->
+ JsonDoc = couch_doc:to_json_obj(Doc, []),
+ [{doc, JsonDoc}];
+ _Else ->
+ [{doc, null}]
+ end.
+
+finish_view_fold(Req, TotalRows, Offset, FoldResult) ->
+ finish_view_fold(Req, TotalRows, Offset, FoldResult, []).
+
+finish_view_fold(Req, TotalRows, Offset, FoldResult, Fields) ->
+ case FoldResult of
+ {_, _, undefined, _} ->
+ % nothing found in the view or keys, nothing has been returned
+ % send empty view
+ send_json(Req, 200, {[
+ {total_rows, TotalRows},
+ {offset, Offset},
+ {rows, []}
+ ] ++ Fields});
+ {_, _, Resp, _} ->
+ % end the view
+ send_chunk(Resp, "\r\n]}"),
+ end_json_response(Resp)
+ end.
+
+finish_reduce_fold(Req, Resp) ->
+ finish_reduce_fold(Req, Resp, []).
+
+finish_reduce_fold(Req, Resp, Fields) ->
+ case Resp of
+ undefined ->
+ send_json(Req, 200, {[
+ {rows, []}
+ ] ++ Fields});
+ Resp ->
+ send_chunk(Resp, "\r\n]}"),
+ end_json_response(Resp)
+ end.
+
+parse_bool_param(Val) ->
+ case string:to_lower(Val) of
+ "true" -> true;
+ "false" -> false;
+ _ ->
+ Msg = io_lib:format("Invalid boolean parameter: ~p", [Val]),
+ throw({query_parse_error, ?l2b(Msg)})
+ end.
+
+parse_int_param(Val) ->
+ case (catch list_to_integer(Val)) of
+ IntVal when is_integer(IntVal) ->
+ IntVal;
+ _ ->
+ Msg = io_lib:format("Invalid value for integer parameter: ~p", [Val]),
+ throw({query_parse_error, ?l2b(Msg)})
+ end.
+
+parse_positive_int_param(Val) ->
+ case parse_int_param(Val) of
+ IntVal when IntVal >= 0 ->
+ IntVal;
+ _ ->
+ Fmt = "Invalid value for positive integer parameter: ~p",
+ Msg = io_lib:format(Fmt, [Val]),
+ throw({query_parse_error, ?l2b(Msg)})
+ end.
+
diff --git a/1.1.x/src/couchdb/couch_js_functions.hrl b/1.1.x/src/couchdb/couch_js_functions.hrl
new file mode 100644
index 00000000..0cc49d62
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_js_functions.hrl
@@ -0,0 +1,226 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-define(AUTH_DB_DOC_VALIDATE_FUNCTION, <<"
+ function(newDoc, oldDoc, userCtx) {
+ if (newDoc._deleted === true) {
+ // allow deletes by admins and matching users
+ // without checking the other fields
+ if ((userCtx.roles.indexOf('_admin') !== -1) ||
+ (userCtx.name == oldDoc.name)) {
+ return;
+ } else {
+ throw({forbidden: 'Only admins may delete other user docs.'});
+ }
+ }
+
+ if ((oldDoc && oldDoc.type !== 'user') || newDoc.type !== 'user') {
+ throw({forbidden : 'doc.type must be user'});
+ } // we only allow user docs for now
+
+ if (!newDoc.name) {
+ throw({forbidden: 'doc.name is required'});
+ }
+
+ if (newDoc.roles && !isArray(newDoc.roles)) {
+ throw({forbidden: 'doc.roles must be an array'});
+ }
+
+ if (newDoc._id !== ('org.couchdb.user:' + newDoc.name)) {
+ throw({
+ forbidden: 'Doc ID must be of the form org.couchdb.user:name'
+ });
+ }
+
+ if (oldDoc) { // validate all updates
+ if (oldDoc.name !== newDoc.name) {
+ throw({forbidden: 'Usernames can not be changed.'});
+ }
+ }
+
+ if (newDoc.password_sha && !newDoc.salt) {
+ throw({
+ forbidden: 'Users with password_sha must have a salt.' +
+ 'See /_utils/script/couch.js for example code.'
+ });
+ }
+
+ if (userCtx.roles.indexOf('_admin') === -1) {
+ if (oldDoc) { // validate non-admin updates
+ if (userCtx.name !== newDoc.name) {
+ throw({
+ forbidden: 'You may only update your own user document.'
+ });
+ }
+ // validate role updates
+ var oldRoles = oldDoc.roles.sort();
+ var newRoles = newDoc.roles.sort();
+
+ if (oldRoles.length !== newRoles.length) {
+ throw({forbidden: 'Only _admin may edit roles'});
+ }
+
+ for (var i = 0; i < oldRoles.length; i++) {
+ if (oldRoles[i] !== newRoles[i]) {
+ throw({forbidden: 'Only _admin may edit roles'});
+ }
+ }
+ } else if (newDoc.roles.length > 0) {
+ throw({forbidden: 'Only _admin may set roles'});
+ }
+ }
+
+ // no system roles in users db
+ for (var i = 0; i < newDoc.roles.length; i++) {
+ if (newDoc.roles[i][0] === '_') {
+ throw({
+ forbidden:
+ 'No system roles (starting with underscore) in users db.'
+ });
+ }
+ }
+
+ // no system names as names
+ if (newDoc.name[0] === '_') {
+ throw({forbidden: 'Username may not start with underscore.'});
+ }
+ }
+">>).
+
+
+-define(REP_DB_DOC_VALIDATE_FUN, <<"
+ function(newDoc, oldDoc, userCtx) {
+ function reportError(error_msg) {
+ log('Error writing document `' + newDoc._id +
+ '\\' to the replicator database: ' + error_msg);
+ throw({forbidden: error_msg});
+ }
+
+ function validateEndpoint(endpoint, fieldName) {
+ if ((typeof endpoint !== 'string') &&
+ ((typeof endpoint !== 'object') || (endpoint === null))) {
+
+ reportError('The `' + fieldName + '\\' property must exist' +
+ ' and be either a string or an object.');
+ }
+
+ if (typeof endpoint === 'object') {
+ if ((typeof endpoint.url !== 'string') || !endpoint.url) {
+ reportError('The url property must exist in the `' +
+ fieldName + '\\' field and must be a non-empty string.');
+ }
+
+ if ((typeof endpoint.auth !== 'undefined') &&
+ ((typeof endpoint.auth !== 'object') ||
+ endpoint.auth === null)) {
+
+ reportError('`' + fieldName +
+ '.auth\\' must be a non-null object.');
+ }
+
+ if ((typeof endpoint.headers !== 'undefined') &&
+ ((typeof endpoint.headers !== 'object') ||
+ endpoint.headers === null)) {
+
+ reportError('`' + fieldName +
+ '.headers\\' must be a non-null object.');
+ }
+ }
+ }
+
+ var isReplicator = (userCtx.roles.indexOf('_replicator') >= 0);
+ var isAdmin = (userCtx.roles.indexOf('_admin') >= 0);
+
+ if (oldDoc && !newDoc._deleted && !isReplicator) {
+ reportError('Only the replicator can edit replication documents.');
+ }
+
+ if (!newDoc._deleted) {
+ validateEndpoint(newDoc.source, 'source');
+ validateEndpoint(newDoc.target, 'target');
+
+ if ((typeof newDoc.create_target !== 'undefined') &&
+ (typeof newDoc.create_target !== 'boolean')) {
+
+ reportError('The `create_target\\' field must be a boolean.');
+ }
+
+ if ((typeof newDoc.continuous !== 'undefined') &&
+ (typeof newDoc.continuous !== 'boolean')) {
+
+ reportError('The `continuous\\' field must be a boolean.');
+ }
+
+ if ((typeof newDoc.doc_ids !== 'undefined') &&
+ !isArray(newDoc.doc_ids)) {
+
+ reportError('The `doc_ids\\' field must be an array of strings.');
+ }
+
+ if ((typeof newDoc.filter !== 'undefined') &&
+ ((typeof newDoc.filter !== 'string') || !newDoc.filter)) {
+
+ reportError('The `filter\\' field must be a non-empty string.');
+ }
+
+ if ((typeof newDoc.query_params !== 'undefined') &&
+ ((typeof newDoc.query_params !== 'object') ||
+ newDoc.query_params === null)) {
+
+ reportError('The `query_params\\' field must be an object.');
+ }
+
+ if (newDoc.user_ctx) {
+ if (!isAdmin) {
+ reportError('Delegated replications (use of the ' +
+ '`user_ctx\\' property) can only be triggered by ' +
+ 'administrators.');
+ }
+
+ var user_ctx = newDoc.user_ctx;
+
+ if ((typeof user_ctx !== 'object') || (user_ctx === null)) {
+ reportError('The `user_ctx\\' property must be a ' +
+ 'non-null object.');
+ }
+
+ if (!(user_ctx.name === null ||
+ (typeof user_ctx.name === 'undefined') ||
+ ((typeof user_ctx.name === 'string') &&
+ user_ctx.name.length > 0))) {
+
+ reportError('The `user_ctx.name\\' property must be a ' +
+ 'non-empty string or null.');
+ }
+
+ if (user_ctx.roles && !isArray(user_ctx.roles)) {
+ reportError('The `user_ctx.roles\\' property must be ' +
+ 'an array of strings.');
+ }
+
+ if (user_ctx.roles) {
+ for (var i = 0; i < user_ctx.roles.length; i++) {
+ var role = user_ctx.roles[i];
+
+ if (typeof role !== 'string' || role.length === 0) {
+ reportError('Roles must be non-empty strings.');
+ }
+ if (role[0] === '_') {
+ reportError('System roles (starting with an ' +
+ 'underscore) are not allowed.');
+ }
+ }
+ }
+ }
+ }
+ }
+">>).
diff --git a/1.1.x/src/couchdb/couch_key_tree.erl b/1.1.x/src/couchdb/couch_key_tree.erl
new file mode 100644
index 00000000..bc723cc2
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_key_tree.erl
@@ -0,0 +1,332 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_key_tree).
+
+-export([merge/3, find_missing/2, get_key_leafs/2, get_full_key_paths/2, get/2]).
+-export([map/2, get_all_leafs/1, count_leafs/1, remove_leafs/2,
+ get_all_leafs_full/1,stem/2,map_leafs/2]).
+
+% Tree::term() is really a tree(), but we don't want to require R13B04 yet
+-type branch() :: {Key::term(), Value::term(), Tree::term()}.
+-type path() :: {Start::pos_integer(), branch()}.
+-type tree() :: [branch()]. % sorted by key
+
+% partial trees arranged by how much they are cut off.
+
+-spec merge([path()], path(), pos_integer()) -> {[path()],
+ conflicts | no_conflicts}.
+merge(Paths, Path, Depth) ->
+ {Merged, Conflicts} = merge(Paths, Path),
+ {stem(Merged, Depth), Conflicts}.
+
+-spec merge([path()], path()) -> {[path()], conflicts | no_conflicts}.
+merge(Paths, Path) ->
+ {ok, Merged, HasConflicts} = merge_one(Paths, Path, [], false),
+ if HasConflicts ->
+ Conflicts = conflicts;
+ (length(Merged) =/= length(Paths)) and (length(Merged) =/= 1) ->
+ Conflicts = conflicts;
+ true ->
+ Conflicts = no_conflicts
+ end,
+ {lists:sort(Merged), Conflicts}.
+
+-spec merge_one(Original::[path()], Inserted::path(), [path()], boolean()) ->
+ {ok, Merged::[path()], NewConflicts::boolean()}.
+merge_one([], Insert, OutAcc, ConflictsAcc) ->
+ {ok, [Insert | OutAcc], ConflictsAcc};
+merge_one([{Start, Tree}|Rest], {StartInsert, TreeInsert}, Acc, HasConflicts) ->
+ case merge_at([Tree], StartInsert - Start, [TreeInsert]) of
+ {ok, [Merged], Conflicts} ->
+ MergedStart = lists:min([Start, StartInsert]),
+ {ok, Rest ++ [{MergedStart, Merged} | Acc], Conflicts or HasConflicts};
+ no ->
+ AccOut = [{Start, Tree} | Acc],
+ merge_one(Rest, {StartInsert, TreeInsert}, AccOut, HasConflicts)
+ end.
+
+-spec merge_at(tree(), Place::integer(), tree()) ->
+ {ok, Merged::tree(), HasConflicts::boolean()} | no.
+merge_at(_Ours, _Place, []) ->
+ no;
+merge_at([], _Place, _Insert) ->
+ no;
+merge_at([{Key, Value, SubTree}|Sibs], Place, InsertTree) when Place > 0 ->
+ % inserted starts later than committed, need to drill into committed subtree
+ case merge_at(SubTree, Place - 1, InsertTree) of
+ {ok, Merged, Conflicts} ->
+ {ok, [{Key, Value, Merged} | Sibs], Conflicts};
+ no ->
+ case merge_at(Sibs, Place, InsertTree) of
+ {ok, Merged, Conflicts} ->
+ {ok, [{Key, Value, SubTree} | Merged], Conflicts};
+ no ->
+ no
+ end
+ end;
+merge_at(OurTree, Place, [{Key, Value, SubTree}]) when Place < 0 ->
+ % inserted starts earlier than committed, need to drill into insert subtree
+ case merge_at(OurTree, Place + 1, SubTree) of
+ {ok, Merged, Conflicts} ->
+ {ok, [{Key, Value, Merged}], Conflicts};
+ no ->
+ no
+ end;
+merge_at([{Key, Value, SubTree}|Sibs], 0, [{Key, _Value, InsertSubTree}]) ->
+ {Merged, Conflicts} = merge_simple(SubTree, InsertSubTree),
+ {ok, [{Key, Value, Merged} | Sibs], Conflicts};
+merge_at([{OurKey, _, _} | _], 0, [{Key, _, _}]) when OurKey > Key ->
+ % siblings keys are ordered, no point in continuing
+ no;
+merge_at([Tree | Sibs], 0, InsertTree) ->
+ case merge_at(Sibs, 0, InsertTree) of
+ {ok, Merged, Conflicts} ->
+ {ok, [Tree | Merged], Conflicts};
+ no ->
+ no
+ end.
+
+% key tree functions
+
+-spec merge_simple(tree(), tree()) -> {Merged::tree(), NewConflicts::boolean()}.
+merge_simple([], B) ->
+ {B, false};
+merge_simple(A, []) ->
+ {A, false};
+merge_simple([{Key, Value, SubA} | NextA], [{Key, _, SubB} | NextB]) ->
+ {MergedSubTree, Conflict1} = merge_simple(SubA, SubB),
+ {MergedNextTree, Conflict2} = merge_simple(NextA, NextB),
+ {[{Key, Value, MergedSubTree} | MergedNextTree], Conflict1 or Conflict2};
+merge_simple([{A, _, _} = Tree | Next], [{B, _, _} | _] = Insert) when A < B ->
+ {Merged, _} = merge_simple(Next, Insert),
+ {[Tree | Merged], true};
+merge_simple(Ours, [Tree | Next]) ->
+ {Merged, _} = merge_simple(Ours, Next),
+ {[Tree | Merged], true}.
+
+find_missing(_Tree, []) ->
+ [];
+find_missing([], SeachKeys) ->
+ SeachKeys;
+find_missing([{Start, {Key, Value, SubTree}} | RestTree], SeachKeys) ->
+ PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Start],
+ ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Start],
+ Missing = find_missing_simple(Start, [{Key, Value, SubTree}], PossibleKeys),
+ find_missing(RestTree, ImpossibleKeys ++ Missing).
+
+find_missing_simple(_Pos, _Tree, []) ->
+ [];
+find_missing_simple(_Pos, [], SeachKeys) ->
+ SeachKeys;
+find_missing_simple(Pos, [{Key, _, SubTree} | RestTree], SeachKeys) ->
+ PossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos >= Pos],
+ ImpossibleKeys = [{KeyPos, KeyValue} || {KeyPos, KeyValue} <- SeachKeys, KeyPos < Pos],
+
+ SrcKeys2 = PossibleKeys -- [{Pos, Key}],
+ SrcKeys3 = find_missing_simple(Pos + 1, SubTree, SrcKeys2),
+ ImpossibleKeys ++ find_missing_simple(Pos, RestTree, SrcKeys3).
+
+
+filter_leafs([], _Keys, FilteredAcc, RemovedKeysAcc) ->
+ {FilteredAcc, RemovedKeysAcc};
+filter_leafs([{Pos, [{LeafKey, _}|_]} = Path |Rest], Keys, FilteredAcc, RemovedKeysAcc) ->
+ FilteredKeys = lists:delete({Pos, LeafKey}, Keys),
+ if FilteredKeys == Keys ->
+ % this leaf is not a key we are looking to remove
+ filter_leafs(Rest, Keys, [Path | FilteredAcc], RemovedKeysAcc);
+ true ->
+ % this did match a key, remove both the node and the input key
+ filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc])
+ end.
+
+% Removes any branches from the tree whose leaf node(s) are in the Keys
+remove_leafs(Trees, Keys) ->
+ % flatten each branch in a tree into a tree path
+ Paths = get_all_leafs_full(Trees),
+
+ % filter out any that are in the keys list.
+ {FilteredPaths, RemovedKeys} = filter_leafs(Paths, Keys, [], []),
+
+ % convert paths back to trees
+ NewTree = lists:foldl(
+ fun({PathPos, Path},TreeAcc) ->
+ [SingleTree] = lists:foldl(
+ fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path),
+ {NewTrees, _} = merge(TreeAcc, {PathPos + 1 - length(Path), SingleTree}),
+ NewTrees
+ end, [], FilteredPaths),
+ {NewTree, RemovedKeys}.
+
+
+% get the leafs in the tree matching the keys. The matching key nodes can be
+% leafs or an inner nodes. If an inner node, then the leafs for that node
+% are returned.
+get_key_leafs(Tree, Keys) ->
+ get_key_leafs(Tree, Keys, []).
+
+get_key_leafs(_, [], Acc) ->
+ {Acc, []};
+get_key_leafs([], Keys, Acc) ->
+ {Acc, Keys};
+get_key_leafs([{Pos, Tree}|Rest], Keys, Acc) ->
+ {Gotten, RemainingKeys} = get_key_leafs_simple(Pos, [Tree], Keys, []),
+ get_key_leafs(Rest, RemainingKeys, Gotten ++ Acc).
+
+get_key_leafs_simple(_Pos, _Tree, [], _KeyPathAcc) ->
+ {[], []};
+get_key_leafs_simple(_Pos, [], KeysToGet, _KeyPathAcc) ->
+ {[], KeysToGet};
+get_key_leafs_simple(Pos, [{Key, _Value, SubTree}=Tree | RestTree], KeysToGet, KeyPathAcc) ->
+ case lists:delete({Pos, Key}, KeysToGet) of
+ KeysToGet -> % same list, key not found
+ {LeafsFound, KeysToGet2} = get_key_leafs_simple(Pos + 1, SubTree, KeysToGet, [Key | KeyPathAcc]),
+ {RestLeafsFound, KeysRemaining} = get_key_leafs_simple(Pos, RestTree, KeysToGet2, KeyPathAcc),
+ {LeafsFound ++ RestLeafsFound, KeysRemaining};
+ KeysToGet2 ->
+ LeafsFound = get_all_leafs_simple(Pos, [Tree], KeyPathAcc),
+ LeafKeysFound = [LeafKeyFound || {LeafKeyFound, _} <- LeafsFound],
+ KeysToGet2 = KeysToGet2 -- LeafKeysFound,
+ {RestLeafsFound, KeysRemaining} = get_key_leafs_simple(Pos, RestTree, KeysToGet2, KeyPathAcc),
+ {LeafsFound ++ RestLeafsFound, KeysRemaining}
+ end.
+
+get(Tree, KeysToGet) ->
+ {KeyPaths, KeysNotFound} = get_full_key_paths(Tree, KeysToGet),
+ FixedResults = [ {Value, {Pos, [Key0 || {Key0, _} <- Path]}} || {Pos, [{_Key, Value}|_]=Path} <- KeyPaths],
+ {FixedResults, KeysNotFound}.
+
+get_full_key_paths(Tree, Keys) ->
+ get_full_key_paths(Tree, Keys, []).
+
+get_full_key_paths(_, [], Acc) ->
+ {Acc, []};
+get_full_key_paths([], Keys, Acc) ->
+ {Acc, Keys};
+get_full_key_paths([{Pos, Tree}|Rest], Keys, Acc) ->
+ {Gotten, RemainingKeys} = get_full_key_paths(Pos, [Tree], Keys, []),
+ get_full_key_paths(Rest, RemainingKeys, Gotten ++ Acc).
+
+
+get_full_key_paths(_Pos, _Tree, [], _KeyPathAcc) ->
+ {[], []};
+get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) ->
+ {[], KeysToGet};
+get_full_key_paths(Pos, [{KeyId, Value, SubTree} | RestTree], KeysToGet, KeyPathAcc) ->
+ KeysToGet2 = KeysToGet -- [{Pos, KeyId}],
+ CurrentNodeResult =
+ case length(KeysToGet2) =:= length(KeysToGet) of
+ true -> % not in the key list.
+ [];
+ false -> % this node is the key list. return it
+ [{Pos, [{KeyId, Value} | KeyPathAcc]}]
+ end,
+ {KeysGotten, KeysRemaining} = get_full_key_paths(Pos + 1, SubTree, KeysToGet2, [{KeyId, Value} | KeyPathAcc]),
+ {KeysGotten2, KeysRemaining2} = get_full_key_paths(Pos, RestTree, KeysRemaining, KeyPathAcc),
+ {CurrentNodeResult ++ KeysGotten ++ KeysGotten2, KeysRemaining2}.
+
+get_all_leafs_full(Tree) ->
+ get_all_leafs_full(Tree, []).
+
+get_all_leafs_full([], Acc) ->
+ Acc;
+get_all_leafs_full([{Pos, Tree} | Rest], Acc) ->
+ get_all_leafs_full(Rest, get_all_leafs_full_simple(Pos, [Tree], []) ++ Acc).
+
+get_all_leafs_full_simple(_Pos, [], _KeyPathAcc) ->
+ [];
+get_all_leafs_full_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
+ [{Pos, [{KeyId, Value} | KeyPathAcc]} | get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc)];
+get_all_leafs_full_simple(Pos, [{KeyId, Value, SubTree} | RestTree], KeyPathAcc) ->
+ get_all_leafs_full_simple(Pos + 1, SubTree, [{KeyId, Value} | KeyPathAcc]) ++ get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc).
+
+get_all_leafs(Trees) ->
+ get_all_leafs(Trees, []).
+
+get_all_leafs([], Acc) ->
+ Acc;
+get_all_leafs([{Pos, Tree}|Rest], Acc) ->
+ get_all_leafs(Rest, get_all_leafs_simple(Pos, [Tree], []) ++ Acc).
+
+get_all_leafs_simple(_Pos, [], _KeyPathAcc) ->
+ [];
+get_all_leafs_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
+ [{Value, {Pos, [KeyId | KeyPathAcc]}} | get_all_leafs_simple(Pos, RestTree, KeyPathAcc)];
+get_all_leafs_simple(Pos, [{KeyId, _Value, SubTree} | RestTree], KeyPathAcc) ->
+ get_all_leafs_simple(Pos + 1, SubTree, [KeyId | KeyPathAcc]) ++ get_all_leafs_simple(Pos, RestTree, KeyPathAcc).
+
+
+count_leafs([]) ->
+ 0;
+count_leafs([{_Pos,Tree}|Rest]) ->
+ count_leafs_simple([Tree]) + count_leafs(Rest).
+
+count_leafs_simple([]) ->
+ 0;
+count_leafs_simple([{_Key, _Value, []} | RestTree]) ->
+ 1 + count_leafs_simple(RestTree);
+count_leafs_simple([{_Key, _Value, SubTree} | RestTree]) ->
+ count_leafs_simple(SubTree) + count_leafs_simple(RestTree).
+
+
+map(_Fun, []) ->
+ [];
+map(Fun, [{Pos, Tree}|Rest]) ->
+ case erlang:fun_info(Fun, arity) of
+ {arity, 2} ->
+ [NewTree] = map_simple(fun(A,B,_C) -> Fun(A,B) end, Pos, [Tree]),
+ [{Pos, NewTree} | map(Fun, Rest)];
+ {arity, 3} ->
+ [NewTree] = map_simple(Fun, Pos, [Tree]),
+ [{Pos, NewTree} | map(Fun, Rest)]
+ end.
+
+map_simple(_Fun, _Pos, []) ->
+ [];
+map_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
+ Value2 = Fun({Pos, Key}, Value,
+ if SubTree == [] -> leaf; true -> branch end),
+ [{Key, Value2, map_simple(Fun, Pos + 1, SubTree)} | map_simple(Fun, Pos, RestTree)].
+
+
+map_leafs(_Fun, []) ->
+ [];
+map_leafs(Fun, [{Pos, Tree}|Rest]) ->
+ [NewTree] = map_leafs_simple(Fun, Pos, [Tree]),
+ [{Pos, NewTree} | map_leafs(Fun, Rest)].
+
+map_leafs_simple(_Fun, _Pos, []) ->
+ [];
+map_leafs_simple(Fun, Pos, [{Key, Value, []} | RestTree]) ->
+ Value2 = Fun({Pos, Key}, Value),
+ [{Key, Value2, []} | map_leafs_simple(Fun, Pos, RestTree)];
+map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
+ [{Key, Value, map_leafs_simple(Fun, Pos + 1, SubTree)} | map_leafs_simple(Fun, Pos, RestTree)].
+
+
+stem(Trees, Limit) ->
+ % flatten each branch in a tree into a tree path
+ Paths = get_all_leafs_full(Trees),
+
+ Paths2 = [{Pos, lists:sublist(Path, Limit)} || {Pos, Path} <- Paths],
+
+ % convert paths back to trees
+ lists:foldl(
+ fun({PathPos, Path},TreeAcc) ->
+ [SingleTree] = lists:foldl(
+ fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path),
+ {NewTrees, _} = merge(TreeAcc, {PathPos + 1 - length(Path), SingleTree}),
+ NewTrees
+ end, [], Paths2).
+
+% Tests moved to test/etap/06?-*.t
+
diff --git a/1.1.x/src/couchdb/couch_log.erl b/1.1.x/src/couchdb/couch_log.erl
new file mode 100644
index 00000000..b3d3297c
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_log.erl
@@ -0,0 +1,193 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_log).
+-behaviour(gen_event).
+
+-export([start_link/0,stop/0]).
+-export([debug/2, info/2, error/2]).
+-export([debug_on/0,info_on/0,get_level/0,get_level_integer/0, set_level/1]).
+-export([init/1, handle_event/2, terminate/2, code_change/3, handle_info/2, handle_call/2]).
+-export([read/2]).
+
+-define(LEVEL_ERROR, 3).
+-define(LEVEL_INFO, 2).
+-define(LEVEL_DEBUG, 1).
+-define(LEVEL_TMI, 0).
+
+debug(Format, Args) ->
+ case debug_on() of
+ false ->
+ ok;
+ true ->
+ {ConsoleMsg, FileMsg} = get_log_messages(self(), debug, Format, Args),
+ gen_event:sync_notify(error_logger, {couch_debug, ConsoleMsg, FileMsg})
+ end.
+
+info(Format, Args) ->
+ case info_on() of
+ false ->
+ ok;
+ true ->
+ {ConsoleMsg, FileMsg} = get_log_messages(self(), info, Format, Args),
+ gen_event:sync_notify(error_logger, {couch_info, ConsoleMsg, FileMsg})
+ end.
+
+error(Format, Args) ->
+ {ConsoleMsg, FileMsg} = get_log_messages(self(), error, Format, Args),
+ gen_event:sync_notify(error_logger, {couch_error, ConsoleMsg, FileMsg}).
+
+
+level_integer(error) -> ?LEVEL_ERROR;
+level_integer(info) -> ?LEVEL_INFO;
+level_integer(debug) -> ?LEVEL_DEBUG;
+level_integer(tmi) -> ?LEVEL_TMI;
+level_integer(_Else) -> ?LEVEL_ERROR. % anything else default to ERROR level
+
+level_atom(?LEVEL_ERROR) -> error;
+level_atom(?LEVEL_INFO) -> info;
+level_atom(?LEVEL_DEBUG) -> debug;
+level_atom(?LEVEL_TMI) -> tmi.
+
+
+start_link() ->
+ couch_event_sup:start_link({local, couch_log}, error_logger, couch_log, []).
+
+stop() ->
+ couch_event_sup:stop(couch_log).
+
+init([]) ->
+ % read config and register for configuration changes
+
+ % just stop if one of the config settings change. couch_server_sup
+ % will restart us and then we will pick up the new settings.
+ ok = couch_config:register(
+ fun("log", "file") ->
+ ?MODULE:stop();
+ ("log", "level") ->
+ ?MODULE:stop();
+ ("log", "include_sasl") ->
+ ?MODULE:stop()
+ end),
+
+ Filename = couch_config:get("log", "file", "couchdb.log"),
+ Level = level_integer(list_to_atom(couch_config:get("log", "level", "info"))),
+ Sasl = list_to_atom(couch_config:get("log", "include_sasl", "true")),
+
+ case ets:info(?MODULE) of
+ undefined -> ets:new(?MODULE, [named_table]);
+ _ -> ok
+ end,
+ ets:insert(?MODULE, {level, Level}),
+
+ case file:open(Filename, [append]) of
+ {ok, Fd} ->
+ {ok, {Fd, Level, Sasl}};
+ {error, eacces} ->
+ {stop, {file_permission_error, Filename}};
+ Error ->
+ {stop, Error}
+ end.
+
+debug_on() ->
+ get_level_integer() =< ?LEVEL_DEBUG.
+
+info_on() ->
+ get_level_integer() =< ?LEVEL_INFO.
+
+set_level(LevelAtom) ->
+ set_level_integer(level_integer(LevelAtom)).
+
+get_level() ->
+ level_atom(get_level_integer()).
+
+get_level_integer() ->
+ try
+ ets:lookup_element(?MODULE, level, 2)
+ catch error:badarg ->
+ ?LEVEL_ERROR
+ end.
+
+set_level_integer(Int) ->
+ gen_event:call(error_logger, couch_log, {set_level_integer, Int}).
+
+handle_event({couch_error, ConMsg, FileMsg}, {Fd, _LogLevel, _Sasl}=State) ->
+ log(Fd, ConMsg, FileMsg),
+ {ok, State};
+handle_event({couch_info, ConMsg, FileMsg}, {Fd, LogLevel, _Sasl}=State)
+when LogLevel =< ?LEVEL_INFO ->
+ log(Fd, ConMsg, FileMsg),
+ {ok, State};
+handle_event({couch_debug, ConMsg, FileMsg}, {Fd, LogLevel, _Sasl}=State)
+when LogLevel =< ?LEVEL_DEBUG ->
+ log(Fd, ConMsg, FileMsg),
+ {ok, State};
+handle_event({error_report, _, {Pid, _, _}}=Event, {Fd, _LogLevel, Sasl}=State)
+when Sasl =/= false ->
+ {ConMsg, FileMsg} = get_log_messages(Pid, error, "~p", [Event]),
+ log(Fd, ConMsg, FileMsg),
+ {ok, State};
+handle_event({error, _, {Pid, Format, Args}}, {Fd, _LogLevel, Sasl}=State)
+when Sasl =/= false ->
+ {ConMsg, FileMsg} = get_log_messages(Pid, error, Format, Args),
+ log(Fd, ConMsg, FileMsg),
+ {ok, State};
+handle_event({_, _, {Pid, _, _}}=Event, {Fd, LogLevel, _Sasl}=State)
+when LogLevel =< ?LEVEL_TMI ->
+ % log every remaining event if tmi!
+ log(Fd, Pid, tmi, "~p", [Event]),
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_call({set_level_integer, NewLevel}, {Fd, _LogLevel, Sasl}) ->
+ ets:insert(?MODULE, {level, NewLevel}),
+ {ok, ok, {Fd, NewLevel, Sasl}}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Arg, {Fd, _LoggingLevel, _Sasl}) ->
+ file:close(Fd).
+
+log(Fd, Pid, Level, Format, Args) ->
+ Msg = io_lib:format(Format, Args),
+ ok = io:format("[~s] [~p] ~s~n", [Level, Pid, Msg]), % dump to console too
+ Msg2 = re:replace(lists:flatten(Msg),"\\r\\n|\\r|\\n", "\r\n",
+ [global, {return, list}]),
+ ok = io:format(Fd, "[~s] [~s] [~p] ~s\r~n", [httpd_util:rfc1123_date(), Level, Pid, Msg2]).
+
+log(Fd, ConsoleMsg, FileMsg) ->
+ ok = io:put_chars(ConsoleMsg),
+ ok = io:put_chars(Fd, FileMsg).
+
+get_log_messages(Pid, Level, Format, Args) ->
+ ConsoleMsg = unicode:characters_to_binary(io_lib:format(
+ "[~s] [~p] " ++ Format ++ "~n", [Level, Pid | Args])),
+ FileMsg = ["[", httpd_util:rfc1123_date(), "] ", ConsoleMsg],
+ {ConsoleMsg, iolist_to_binary(FileMsg)}.
+
+read(Bytes, Offset) ->
+ LogFileName = couch_config:get("log", "file"),
+ LogFileSize = filelib:file_size(LogFileName),
+
+ {ok, Fd} = file:open(LogFileName, [read]),
+ Start = lists:max([LogFileSize - Bytes, 0]) + Offset,
+
+ % TODO: truncate chopped first line
+ % TODO: make streaming
+
+ {ok, Chunk} = file:pread(Fd, Start, LogFileSize),
+ Chunk.
diff --git a/1.1.x/src/couchdb/couch_native_process.erl b/1.1.x/src/couchdb/couch_native_process.erl
new file mode 100644
index 00000000..b512f712
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_native_process.erl
@@ -0,0 +1,402 @@
+% Licensed under the Apache License, Version 2.0 (the "License");
+% you may not use this file except in compliance with the License.
+%
+% You may obtain a copy of the License at
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing,
+% software distributed under the License is distributed on an
+% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+% either express or implied.
+%
+% See the License for the specific language governing permissions
+% and limitations under the License.
+%
+% This file drew much inspiration from erlview, which was written by and
+% copyright Michael McDaniel [http://autosys.us], and is also under APL 2.0
+%
+%
+% This module provides the smallest possible native view-server.
+% With this module in-place, you can add the following to your couch INI files:
+% [native_query_servers]
+% erlang={couch_native_process, start_link, []}
+%
+% Which will then allow following example map function to be used:
+%
+% fun({Doc}) ->
+% % Below, we emit a single record - the _id as key, null as value
+% DocId = couch_util:get_value(Doc, <<"_id">>, null),
+% Emit(DocId, null)
+% end.
+%
+% which should be roughly the same as the javascript:
+% emit(doc._id, null);
+%
+% This module exposes enough functions such that a native erlang server can
+% act as a fully-fleged view server, but no 'helper' functions specifically
+% for simplifying your erlang view code. It is expected other third-party
+% extensions will evolve which offer useful layers on top of this view server
+% to help simplify your view code.
+-module(couch_native_process).
+-behaviour(gen_server).
+
+-export([start_link/0,init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,
+ handle_info/2]).
+-export([set_timeout/2, prompt/2]).
+
+-define(STATE, native_proc_state).
+-record(evstate, {ddocs, funs=[], query_config=[], list_pid=nil, timeout=5000}).
+
+-include("couch_db.hrl").
+
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+% this is a bit messy, see also couch_query_servers handle_info
+% stop(_Pid) ->
+% ok.
+
+set_timeout(Pid, TimeOut) ->
+ gen_server:call(Pid, {set_timeout, TimeOut}).
+
+prompt(Pid, Data) when is_list(Data) ->
+ gen_server:call(Pid, {prompt, Data}).
+
+% gen_server callbacks
+init([]) ->
+ {ok, #evstate{ddocs=dict:new()}}.
+
+handle_call({set_timeout, TimeOut}, _From, State) ->
+ {reply, ok, State#evstate{timeout=TimeOut}};
+
+handle_call({prompt, Data}, _From, State) ->
+ ?LOG_DEBUG("Prompt native qs: ~s",[?JSON_ENCODE(Data)]),
+ {NewState, Resp} = try run(State, to_binary(Data)) of
+ {S, R} -> {S, R}
+ catch
+ throw:{error, Why} ->
+ {State, [<<"error">>, Why, Why]}
+ end,
+
+ case Resp of
+ {error, Reason} ->
+ Msg = io_lib:format("couch native server error: ~p", [Reason]),
+ {reply, [<<"error">>, <<"native_query_server">>, list_to_binary(Msg)], NewState};
+ [<<"error">> | Rest] ->
+ % Msg = io_lib:format("couch native server error: ~p", [Rest]),
+ % TODO: markh? (jan)
+ {reply, [<<"error">> | Rest], NewState};
+ [<<"fatal">> | Rest] ->
+ % Msg = io_lib:format("couch native server error: ~p", [Rest]),
+ % TODO: markh? (jan)
+ {stop, fatal, [<<"error">> | Rest], NewState};
+ Resp ->
+ {reply, Resp, NewState}
+ end.
+
+handle_cast(foo, State) -> {noreply, State}.
+handle_info({'EXIT',_,normal}, State) -> {noreply, State};
+handle_info({'EXIT',_,Reason}, State) ->
+ {stop, Reason, State}.
+terminate(_Reason, _State) -> ok.
+code_change(_OldVersion, State, _Extra) -> {ok, State}.
+
+run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) ->
+ Pid ! {self(), list_row, Row},
+ receive
+ {Pid, chunks, Data} ->
+ {State, [<<"chunks">>, Data]};
+ {Pid, list_end, Data} ->
+ receive
+ {'EXIT', Pid, normal} -> ok
+ after State#evstate.timeout ->
+ throw({timeout, list_cleanup})
+ end,
+ process_flag(trap_exit, erlang:get(do_trap)),
+ {State#evstate{list_pid=nil}, [<<"end">>, Data]}
+ after State#evstate.timeout ->
+ throw({timeout, list_row})
+ end;
+run(#evstate{list_pid=Pid}=State, [<<"list_end">>]) when is_pid(Pid) ->
+ Pid ! {self(), list_end},
+ Resp =
+ receive
+ {Pid, list_end, Data} ->
+ receive
+ {'EXIT', Pid, normal} -> ok
+ after State#evstate.timeout ->
+ throw({timeout, list_cleanup})
+ end,
+ [<<"end">>, Data]
+ after State#evstate.timeout ->
+ throw({timeout, list_end})
+ end,
+ process_flag(trap_exit, erlang:get(do_trap)),
+ {State#evstate{list_pid=nil}, Resp};
+run(#evstate{list_pid=Pid}=State, _Command) when is_pid(Pid) ->
+ {State, [<<"error">>, list_error, list_error]};
+run(#evstate{ddocs=DDocs}, [<<"reset">>]) ->
+ {#evstate{ddocs=DDocs}, true};
+run(#evstate{ddocs=DDocs}, [<<"reset">>, QueryConfig]) ->
+ {#evstate{ddocs=DDocs, query_config=QueryConfig}, true};
+run(#evstate{funs=Funs}=State, [<<"add_fun">> , BinFunc]) ->
+ FunInfo = makefun(State, BinFunc),
+ {State#evstate{funs=Funs ++ [FunInfo]}, true};
+run(State, [<<"map_doc">> , Doc]) ->
+ Resp = lists:map(fun({Sig, Fun}) ->
+ erlang:put(Sig, []),
+ Fun(Doc),
+ lists:reverse(erlang:get(Sig))
+ end, State#evstate.funs),
+ {State, Resp};
+run(State, [<<"reduce">>, Funs, KVs]) ->
+ {Keys, Vals} =
+ lists:foldl(fun([K, V], {KAcc, VAcc}) ->
+ {[K | KAcc], [V | VAcc]}
+ end, {[], []}, KVs),
+ Keys2 = lists:reverse(Keys),
+ Vals2 = lists:reverse(Vals),
+ {State, catch reduce(State, Funs, Keys2, Vals2, false)};
+run(State, [<<"rereduce">>, Funs, Vals]) ->
+ {State, catch reduce(State, Funs, null, Vals, true)};
+run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) ->
+ DDocs2 = store_ddoc(DDocs, DDocId, DDoc),
+ {State#evstate{ddocs=DDocs2}, true};
+run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, DDocId | Rest]) ->
+ DDoc = load_ddoc(DDocs, DDocId),
+ ddoc(State, DDoc, Rest);
+run(_, Unknown) ->
+ ?LOG_ERROR("Native Process: Unknown command: ~p~n", [Unknown]),
+ throw({error, unknown_command}).
+
+ddoc(State, {DDoc}, [FunPath, Args]) ->
+ % load fun from the FunPath
+ BFun = lists:foldl(fun
+ (Key, {Props}) when is_list(Props) ->
+ couch_util:get_value(Key, Props, nil);
+ (_Key, Fun) when is_binary(Fun) ->
+ Fun;
+ (_Key, nil) ->
+ throw({error, not_found});
+ (_Key, _Fun) ->
+ throw({error, malformed_ddoc})
+ end, {DDoc}, FunPath),
+ ddoc(State, makefun(State, BFun, {DDoc}), FunPath, Args).
+
+ddoc(State, {_, Fun}, [<<"validate_doc_update">>], Args) ->
+ {State, (catch apply(Fun, Args))};
+ddoc(State, {_, Fun}, [<<"filters">>|_], [Docs, Req]) ->
+ Resp = lists:map(fun(Doc) -> (catch Fun(Doc, Req)) =:= true end, Docs),
+ {State, [true, Resp]};
+ddoc(State, {_, Fun}, [<<"shows">>|_], Args) ->
+ Resp = case (catch apply(Fun, Args)) of
+ FunResp when is_list(FunResp) ->
+ FunResp;
+ {FunResp} ->
+ [<<"resp">>, {FunResp}];
+ FunResp ->
+ FunResp
+ end,
+ {State, Resp};
+ddoc(State, {_, Fun}, [<<"updates">>|_], Args) ->
+ Resp = case (catch apply(Fun, Args)) of
+ [JsonDoc, JsonResp] ->
+ [<<"up">>, JsonDoc, JsonResp]
+ end,
+ {State, Resp};
+ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) ->
+ Self = self(),
+ SpawnFun = fun() ->
+ LastChunk = (catch apply(Fun, Args)),
+ case start_list_resp(Self, Sig) of
+ started ->
+ receive
+ {Self, list_row, _Row} -> ignore;
+ {Self, list_end} -> ignore
+ after State#evstate.timeout ->
+ throw({timeout, list_cleanup_pid})
+ end;
+ _ ->
+ ok
+ end,
+ LastChunks =
+ case erlang:get(Sig) of
+ undefined -> [LastChunk];
+ OtherChunks -> [LastChunk | OtherChunks]
+ end,
+ Self ! {self(), list_end, lists:reverse(LastChunks)}
+ end,
+ erlang:put(do_trap, process_flag(trap_exit, true)),
+ Pid = spawn_link(SpawnFun),
+ Resp =
+ receive
+ {Pid, start, Chunks, JsonResp} ->
+ [<<"start">>, Chunks, JsonResp]
+ after State#evstate.timeout ->
+ throw({timeout, list_start})
+ end,
+ {State#evstate{list_pid=Pid}, Resp}.
+
+store_ddoc(DDocs, DDocId, DDoc) ->
+ dict:store(DDocId, DDoc, DDocs).
+load_ddoc(DDocs, DDocId) ->
+ try dict:fetch(DDocId, DDocs) of
+ {DDoc} -> {DDoc}
+ catch
+ _:_Else -> throw({error, ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s",[DDocId]))})
+ end.
+
+bindings(State, Sig) ->
+ bindings(State, Sig, nil).
+bindings(State, Sig, DDoc) ->
+ Self = self(),
+
+ Log = fun(Msg) ->
+ ?LOG_INFO(Msg, [])
+ end,
+
+ Emit = fun(Id, Value) ->
+ Curr = erlang:get(Sig),
+ erlang:put(Sig, [[Id, Value] | Curr])
+ end,
+
+ Start = fun(Headers) ->
+ erlang:put(list_headers, Headers)
+ end,
+
+ Send = fun(Chunk) ->
+ Curr =
+ case erlang:get(Sig) of
+ undefined -> [];
+ Else -> Else
+ end,
+ erlang:put(Sig, [Chunk | Curr])
+ end,
+
+ GetRow = fun() ->
+ case start_list_resp(Self, Sig) of
+ started ->
+ ok;
+ _ ->
+ Chunks =
+ case erlang:get(Sig) of
+ undefined -> [];
+ CurrChunks -> CurrChunks
+ end,
+ Self ! {self(), chunks, lists:reverse(Chunks)}
+ end,
+ erlang:put(Sig, []),
+ receive
+ {Self, list_row, Row} -> Row;
+ {Self, list_end} -> nil
+ after State#evstate.timeout ->
+ throw({timeout, list_pid_getrow})
+ end
+ end,
+
+ FoldRows = fun(Fun, Acc) -> foldrows(GetRow, Fun, Acc) end,
+
+ Bindings = [
+ {'Log', Log},
+ {'Emit', Emit},
+ {'Start', Start},
+ {'Send', Send},
+ {'GetRow', GetRow},
+ {'FoldRows', FoldRows}
+ ],
+ case DDoc of
+ {_Props} ->
+ Bindings ++ [{'DDoc', DDoc}];
+ _Else -> Bindings
+ end.
+
+% thanks to erlview, via:
+% http://erlang.org/pipermail/erlang-questions/2003-November/010544.html
+makefun(State, Source) ->
+ Sig = couch_util:md5(Source),
+ BindFuns = bindings(State, Sig),
+ {Sig, makefun(State, Source, BindFuns)}.
+makefun(State, Source, {DDoc}) ->
+ Sig = couch_util:md5(lists:flatten([Source, term_to_binary(DDoc)])),
+ BindFuns = bindings(State, Sig, {DDoc}),
+ {Sig, makefun(State, Source, BindFuns)};
+makefun(_State, Source, BindFuns) when is_list(BindFuns) ->
+ FunStr = binary_to_list(Source),
+ {ok, Tokens, _} = erl_scan:string(FunStr),
+ Form = case (catch erl_parse:parse_exprs(Tokens)) of
+ {ok, [ParsedForm]} ->
+ ParsedForm;
+ {error, {LineNum, _Mod, [Mesg, Params]}}=Error ->
+ io:format(standard_error, "Syntax error on line: ~p~n", [LineNum]),
+ io:format(standard_error, "~s~p~n", [Mesg, Params]),
+ throw(Error)
+ end,
+ Bindings = lists:foldl(fun({Name, Fun}, Acc) ->
+ erl_eval:add_binding(Name, Fun, Acc)
+ end, erl_eval:new_bindings(), BindFuns),
+ {value, Fun, _} = erl_eval:expr(Form, Bindings),
+ Fun.
+
+reduce(State, BinFuns, Keys, Vals, ReReduce) ->
+ Funs = case is_list(BinFuns) of
+ true ->
+ lists:map(fun(BF) -> makefun(State, BF) end, BinFuns);
+ _ ->
+ [makefun(State, BinFuns)]
+ end,
+ Reds = lists:map(fun({_Sig, Fun}) ->
+ Fun(Keys, Vals, ReReduce)
+ end, Funs),
+ [true, Reds].
+
+foldrows(GetRow, ProcRow, Acc) ->
+ case GetRow() of
+ nil ->
+ {ok, Acc};
+ Row ->
+ case (catch ProcRow(Row, Acc)) of
+ {ok, Acc2} ->
+ foldrows(GetRow, ProcRow, Acc2);
+ {stop, Acc2} ->
+ {ok, Acc2}
+ end
+ end.
+
+start_list_resp(Self, Sig) ->
+ case erlang:get(list_started) of
+ undefined ->
+ Headers =
+ case erlang:get(list_headers) of
+ undefined -> {[{<<"headers">>, {[]}}]};
+ CurrHdrs -> CurrHdrs
+ end,
+ Chunks =
+ case erlang:get(Sig) of
+ undefined -> [];
+ CurrChunks -> CurrChunks
+ end,
+ Self ! {self(), start, lists:reverse(Chunks), Headers},
+ erlang:put(list_started, true),
+ erlang:put(Sig, []),
+ started;
+ _ ->
+ ok
+ end.
+
+to_binary({Data}) ->
+ Pred = fun({Key, Value}) ->
+ {to_binary(Key), to_binary(Value)}
+ end,
+ {lists:map(Pred, Data)};
+to_binary(Data) when is_list(Data) ->
+ [to_binary(D) || D <- Data];
+to_binary(null) ->
+ null;
+to_binary(true) ->
+ true;
+to_binary(false) ->
+ false;
+to_binary(Data) when is_atom(Data) ->
+ list_to_binary(atom_to_list(Data));
+to_binary(Data) ->
+ Data.
diff --git a/1.1.x/src/couchdb/couch_os_daemons.erl b/1.1.x/src/couchdb/couch_os_daemons.erl
new file mode 100644
index 00000000..d03f550c
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_os_daemons.erl
@@ -0,0 +1,364 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+-module(couch_os_daemons).
+-behaviour(gen_server).
+
+-export([start_link/0, info/0, info/1, config_change/2]).
+
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+-include("couch_db.hrl").
+
+-record(daemon, {
+ port,
+ name,
+ cmd,
+ kill,
+ status=running,
+ cfg_patterns=[],
+ errors=[],
+ buf=[]
+}).
+
+-define(PORT_OPTIONS, [stream, {line, 1024}, binary, exit_status, hide]).
+-define(TIMEOUT, 5000).
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+info() ->
+ info([]).
+
+info(Options) ->
+ gen_server:call(?MODULE, {daemon_info, Options}).
+
+config_change(Section, Key) ->
+ gen_server:cast(?MODULE, {config_change, Section, Key}).
+
+init(_) ->
+ process_flag(trap_exit, true),
+ ok = couch_config:register(fun couch_os_daemons:config_change/2),
+ Table = ets:new(?MODULE, [protected, set, {keypos, #daemon.port}]),
+ reload_daemons(Table),
+ {ok, Table}.
+
+terminate(_Reason, Table) ->
+ [stop_port(D) || D <- ets:tab2list(Table)],
+ ok.
+
+handle_call({daemon_info, Options}, _From, Table) when is_list(Options) ->
+ case lists:member(table, Options) of
+ true ->
+ {reply, {ok, ets:tab2list(Table)}, Table};
+ _ ->
+ {reply, {ok, Table}, Table}
+ end;
+handle_call(Msg, From, Table) ->
+ ?LOG_ERROR("Unknown call message to ~p from ~p: ~p", [?MODULE, From, Msg]),
+ {stop, error, Table}.
+
+handle_cast({config_change, Sect, Key}, Table) ->
+ restart_daemons(Table, Sect, Key),
+ case Sect of
+ "os_daemons" -> reload_daemons(Table);
+ _ -> ok
+ end,
+ {noreply, Table};
+handle_cast(stop, Table) ->
+ {stop, normal, Table};
+handle_cast(Msg, Table) ->
+ ?LOG_ERROR("Unknown cast message to ~p: ~p", [?MODULE, Msg]),
+ {stop, error, Table}.
+
+handle_info({'EXIT', Port, Reason}, Table) ->
+ case ets:lookup(Table, Port) of
+ [] ->
+ ?LOG_INFO("Port ~p exited after stopping: ~p~n", [Port, Reason]);
+ [#daemon{status=stopping}] ->
+ true = ets:delete(Table, Port);
+ [#daemon{name=Name, status=restarting}=D] ->
+ ?LOG_INFO("Daemon ~P restarting after config change.", [Name]),
+ true = ets:delete(Table, Port),
+ {ok, Port2} = start_port(D#daemon.cmd),
+ true = ets:insert(Table, D#daemon{
+ port=Port2, status=running, kill=undefined, buf=[]
+ });
+ [#daemon{name=Name, status=halted}] ->
+ ?LOG_ERROR("Halted daemon process: ~p", [Name]);
+ [D] ->
+ ?LOG_ERROR("Invalid port state at exit: ~p", [D])
+ end,
+ {noreply, Table};
+handle_info({Port, closed}, Table) ->
+ handle_info({Port, {exit_status, closed}}, Table);
+handle_info({Port, {exit_status, Status}}, Table) ->
+ case ets:lookup(Table, Port) of
+ [] ->
+ ?LOG_ERROR("Unknown port ~p exiting ~p", [Port, Status]),
+ {stop, {error, unknown_port_died, Status}, Table};
+ [#daemon{name=Name, status=restarting}=D] ->
+ ?LOG_INFO("Daemon ~P restarting after config change.", [Name]),
+ true = ets:delete(Table, Port),
+ {ok, Port2} = start_port(D#daemon.cmd),
+ true = ets:insert(Table, D#daemon{
+ port=Port2, status=running, kill=undefined, buf=[]
+ }),
+ {noreply, Table};
+ [#daemon{status=stopping}=D] ->
+ % The configuration changed and this daemon is no
+ % longer needed.
+ ?LOG_DEBUG("Port ~p shut down.", [D#daemon.name]),
+ true = ets:delete(Table, Port),
+ {noreply, Table};
+ [D] ->
+ % Port died for unknown reason. Check to see if it's
+ % died too many times or if we should boot it back up.
+ case should_halt([now() | D#daemon.errors]) of
+ {true, _} ->
+ % Halting the process. We won't try and reboot
+ % until the configuration changes.
+ Fmt = "Daemon ~p halted with exit_status ~p",
+ ?LOG_ERROR(Fmt, [D#daemon.name, Status]),
+ D2 = D#daemon{status=halted, errors=nil, buf=nil},
+ true = ets:insert(Table, D2),
+ {noreply, Table};
+ {false, Errors} ->
+ % We're guessing it was a random error, this daemon
+ % has behaved so we'll give it another chance.
+ Fmt = "Daemon ~p is being rebooted after exit_status ~p",
+ ?LOG_INFO(Fmt, [D#daemon.name, Status]),
+ true = ets:delete(Table, Port),
+ {ok, Port2} = start_port(D#daemon.cmd),
+ true = ets:insert(Table, D#daemon{
+ port=Port2, status=running, kill=undefined,
+ errors=Errors, buf=[]
+ }),
+ {noreply, Table}
+ end;
+ _Else ->
+ throw(error)
+ end;
+handle_info({Port, {data, {noeol, Data}}}, Table) ->
+ [#daemon{buf=Buf}=D] = ets:lookup(Table, Port),
+ true = ets:insert(Table, D#daemon{buf=[Data | Buf]}),
+ {noreply, Table};
+handle_info({Port, {data, {eol, Data}}}, Table) ->
+ [#daemon{buf=Buf}=D] = ets:lookup(Table, Port),
+ Line = lists:reverse(Buf, Data),
+ % The first line echoed back is the kill command
+ % for when we go to get rid of the port. Lines after
+ % that are considered part of the stdio API.
+ case D#daemon.kill of
+ undefined ->
+ true = ets:insert(Table, D#daemon{kill=?b2l(Line), buf=[]});
+ _Else ->
+ D2 = case (catch ?JSON_DECODE(Line)) of
+ {invalid_json, Rejected} ->
+ ?LOG_ERROR("Ignoring OS daemon request: ~p", [Rejected]),
+ D;
+ JSON ->
+ {ok, D3} = handle_port_message(D, JSON),
+ D3
+ end,
+ true = ets:insert(Table, D2#daemon{buf=[]})
+ end,
+ {noreply, Table};
+handle_info({Port, Error}, Table) ->
+ ?LOG_ERROR("Unexpectd message from port ~p: ~p", [Port, Error]),
+ stop_port(Port),
+ [D] = ets:lookup(Table, Port),
+ true = ets:insert(Table, D#daemon{status=restarting, buf=nil}),
+ {noreply, Table};
+handle_info(Msg, Table) ->
+ ?LOG_ERROR("Unexpected info message to ~p: ~p", [?MODULE, Msg]),
+ {stop, error, Table}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+% Internal API
+
+%
+% Port management helpers
+%
+
+start_port(Command) ->
+ PrivDir = couch_util:priv_dir(),
+ Spawnkiller = filename:join(PrivDir, "couchspawnkillable"),
+ Port = open_port({spawn, Spawnkiller ++ " " ++ Command}, ?PORT_OPTIONS),
+ {ok, Port}.
+
+
+stop_port(#daemon{port=Port, kill=undefined}=D) ->
+ ?LOG_ERROR("Stopping daemon without a kill command: ~p", [D#daemon.name]),
+ catch port_close(Port);
+stop_port(#daemon{port=Port}=D) ->
+ ?LOG_DEBUG("Stopping daemon: ~p", [D#daemon.name]),
+ os:cmd(D#daemon.kill),
+ catch port_close(Port).
+
+
+handle_port_message(#daemon{port=Port}=Daemon, [<<"get">>, Section]) ->
+ KVs = couch_config:get(Section),
+ Data = lists:map(fun({K, V}) -> {?l2b(K), ?l2b(V)} end, KVs),
+ Json = iolist_to_binary(?JSON_ENCODE({Data})),
+ port_command(Port, <<Json/binary, "\n">>),
+ {ok, Daemon};
+handle_port_message(#daemon{port=Port}=Daemon, [<<"get">>, Section, Key]) ->
+ Value = case couch_config:get(Section, Key, null) of
+ null -> null;
+ String -> ?l2b(String)
+ end,
+ Json = iolist_to_binary(?JSON_ENCODE(Value)),
+ port_command(Port, <<Json/binary, "\n">>),
+ {ok, Daemon};
+handle_port_message(Daemon, [<<"register">>, Sec]) when is_binary(Sec) ->
+ Patterns = lists:usort(Daemon#daemon.cfg_patterns ++ [{?b2l(Sec)}]),
+ {ok, Daemon#daemon{cfg_patterns=Patterns}};
+handle_port_message(Daemon, [<<"register">>, Sec, Key])
+ when is_binary(Sec) andalso is_binary(Key) ->
+ Pattern = {?b2l(Sec), ?b2l(Key)},
+ Patterns = lists:usort(Daemon#daemon.cfg_patterns ++ [Pattern]),
+ {ok, Daemon#daemon{cfg_patterns=Patterns}};
+handle_port_message(#daemon{name=Name}=Daemon, [<<"log">>, Msg]) ->
+ handle_log_message(Name, Msg, <<"info">>),
+ {ok, Daemon};
+handle_port_message(#daemon{name=Name}=Daemon, [<<"log">>, Msg, {Opts}]) ->
+ Level = couch_util:get_value(<<"level">>, Opts, <<"info">>),
+ handle_log_message(Name, Msg, Level),
+ {ok, Daemon};
+handle_port_message(#daemon{name=Name}=Daemon, Else) ->
+ ?LOG_ERROR("Daemon ~p made invalid request: ~p", [Name, Else]),
+ {ok, Daemon}.
+
+
+handle_log_message(Name, Msg, _Level) when not is_binary(Msg) ->
+ ?LOG_ERROR("Invalid log message from daemon ~p: ~p", [Name, Msg]);
+handle_log_message(Name, Msg, <<"debug">>) ->
+ ?LOG_DEBUG("Daemon ~p :: ~s", [Name, ?b2l(Msg)]);
+handle_log_message(Name, Msg, <<"info">>) ->
+ ?LOG_INFO("Daemon ~p :: ~s", [Name, ?b2l(Msg)]);
+handle_log_message(Name, Msg, <<"error">>) ->
+ ?LOG_ERROR("Daemon: ~p :: ~s", [Name, ?b2l(Msg)]);
+handle_log_message(Name, Msg, Level) ->
+ ?LOG_ERROR("Invalid log level from daemon: ~p", [Level]),
+ ?LOG_INFO("Daemon: ~p :: ~s", [Name, ?b2l(Msg)]).
+
+%
+% Daemon management helpers
+%
+
+reload_daemons(Table) ->
+ % List of daemons we want to have running.
+ Configured = lists:sort(couch_config:get("os_daemons")),
+
+ % Remove records for daemons that were halted.
+ MSpecHalted = #daemon{name='$1', cmd='$2', status=halted, _='_'},
+ Halted = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecHalted)]),
+ ok = stop_os_daemons(Table, find_to_stop(Configured, Halted, [])),
+
+ % Stop daemons that are running
+ % Start newly configured daemons
+ MSpecRunning = #daemon{name='$1', cmd='$2', status=running, _='_'},
+ Running = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecRunning)]),
+ ok = stop_os_daemons(Table, find_to_stop(Configured, Running, [])),
+ ok = boot_os_daemons(Table, find_to_boot(Configured, Running, [])),
+ ok.
+
+
+restart_daemons(Table, Sect, Key) ->
+ restart_daemons(Table, Sect, Key, ets:first(Table)).
+
+restart_daemons(_, _, _, '$end_of_table') ->
+ ok;
+restart_daemons(Table, Sect, Key, Port) ->
+ [D] = ets:lookup(Table, Port),
+ HasSect = lists:member({Sect}, D#daemon.cfg_patterns),
+ HasKey = lists:member({Sect, Key}, D#daemon.cfg_patterns),
+ case HasSect or HasKey of
+ true ->
+ stop_port(D),
+ D2 = D#daemon{status=restarting, buf=nil},
+ true = ets:insert(Table, D2);
+ _ ->
+ ok
+ end,
+ restart_daemons(Table, Sect, Key, ets:next(Table, Port)).
+
+
+stop_os_daemons(_Table, []) ->
+ ok;
+stop_os_daemons(Table, [{Name, Cmd} | Rest]) ->
+ [[Port]] = ets:match(Table, #daemon{port='$1', name=Name, cmd=Cmd, _='_'}),
+ [D] = ets:lookup(Table, Port),
+ case D#daemon.status of
+ halted ->
+ ets:delete(Table, Port);
+ _ ->
+ stop_port(D),
+ D2 = D#daemon{status=stopping, errors=nil, buf=nil},
+ true = ets:insert(Table, D2)
+ end,
+ stop_os_daemons(Table, Rest).
+
+boot_os_daemons(_Table, []) ->
+ ok;
+boot_os_daemons(Table, [{Name, Cmd} | Rest]) ->
+ {ok, Port} = start_port(Cmd),
+ true = ets:insert(Table, #daemon{port=Port, name=Name, cmd=Cmd}),
+ boot_os_daemons(Table, Rest).
+
+% Elements unique to the configured set need to be booted.
+find_to_boot([], _Rest, Acc) ->
+ % Nothing else configured.
+ Acc;
+find_to_boot([D | R1], [D | R2], Acc) ->
+ % Elements are equal, daemon already running.
+ find_to_boot(R1, R2, Acc);
+find_to_boot([D1 | R1], [D2 | _]=A2, Acc) when D1 < D2 ->
+ find_to_boot(R1, A2, [D1 | Acc]);
+find_to_boot(A1, [_ | R2], Acc) ->
+ find_to_boot(A1, R2, Acc);
+find_to_boot(Rest, [], Acc) ->
+ % No more candidates for already running. Boot all.
+ Rest ++ Acc.
+
+% Elements unique to the running set need to be killed.
+find_to_stop([], Rest, Acc) ->
+ % The rest haven't been found, so they must all
+ % be ready to die.
+ Rest ++ Acc;
+find_to_stop([D | R1], [D | R2], Acc) ->
+ % Elements are equal, daemon already running.
+ find_to_stop(R1, R2, Acc);
+find_to_stop([D1 | R1], [D2 | _]=A2, Acc) when D1 < D2 ->
+ find_to_stop(R1, A2, Acc);
+find_to_stop(A1, [D2 | R2], Acc) ->
+ find_to_stop(A1, R2, [D2 | Acc]);
+find_to_stop(_, [], Acc) ->
+ % No more running daemons to worry about.
+ Acc.
+
+should_halt(Errors) ->
+ RetryTimeCfg = couch_config:get("os_daemon_settings", "retry_time", "5"),
+ RetryTime = list_to_integer(RetryTimeCfg),
+
+ Now = now(),
+ RecentErrors = lists:filter(fun(Time) ->
+ timer:now_diff(Now, Time) =< RetryTime * 1000000
+ end, Errors),
+
+ RetryCfg = couch_config:get("os_daemon_settings", "max_retries", "3"),
+ Retries = list_to_integer(RetryCfg),
+
+ {length(RecentErrors) >= Retries, RecentErrors}.
diff --git a/1.1.x/src/couchdb/couch_os_process.erl b/1.1.x/src/couchdb/couch_os_process.erl
new file mode 100644
index 00000000..5776776b
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_os_process.erl
@@ -0,0 +1,185 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_os_process).
+-behaviour(gen_server).
+
+-export([start_link/1, start_link/2, start_link/3, stop/1]).
+-export([set_timeout/2, prompt/2]).
+-export([send/2, writeline/2, readline/1, writejson/2, readjson/1]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
+
+-include("couch_db.hrl").
+
+-define(PORT_OPTIONS, [stream, {line, 1024}, binary, exit_status, hide]).
+
+-record(os_proc,
+ {command,
+ port,
+ writer,
+ reader,
+ timeout=5000
+ }).
+
+start_link(Command) ->
+ start_link(Command, []).
+start_link(Command, Options) ->
+ start_link(Command, Options, ?PORT_OPTIONS).
+start_link(Command, Options, PortOptions) ->
+ gen_server:start_link(couch_os_process, [Command, Options, PortOptions], []).
+
+stop(Pid) ->
+ gen_server:cast(Pid, stop).
+
+% Read/Write API
+set_timeout(Pid, TimeOut) when is_integer(TimeOut) ->
+ ok = gen_server:call(Pid, {set_timeout, TimeOut}).
+
+% Used by couch_db_update_notifier.erl
+send(Pid, Data) ->
+ gen_server:cast(Pid, {send, Data}).
+
+prompt(Pid, Data) ->
+ case gen_server:call(Pid, {prompt, Data}, infinity) of
+ {ok, Result} ->
+ Result;
+ Error ->
+ ?LOG_ERROR("OS Process Error ~p :: ~p",[Pid,Error]),
+ throw(Error)
+ end.
+
+% Utility functions for reading and writing
+% in custom functions
+writeline(OsProc, Data) when is_record(OsProc, os_proc) ->
+ port_command(OsProc#os_proc.port, Data ++ "\n").
+
+readline(#os_proc{} = OsProc) ->
+ readline(OsProc, []).
+readline(#os_proc{port = Port} = OsProc, Acc) ->
+ receive
+ {Port, {data, {noeol, Data}}} ->
+ readline(OsProc, [Data|Acc]);
+ {Port, {data, {eol, Data}}} ->
+ lists:reverse(Acc, Data);
+ {Port, Err} ->
+ catch port_close(Port),
+ throw({os_process_error, Err})
+ after OsProc#os_proc.timeout ->
+ catch port_close(Port),
+ throw({os_process_error, "OS process timed out."})
+ end.
+
+% Standard JSON functions
+writejson(OsProc, Data) when is_record(OsProc, os_proc) ->
+ JsonData = ?JSON_ENCODE(Data),
+ ?LOG_DEBUG("OS Process ~p Input :: ~s", [OsProc#os_proc.port, JsonData]),
+ true = writeline(OsProc, JsonData).
+
+readjson(OsProc) when is_record(OsProc, os_proc) ->
+ Line = readline(OsProc),
+ ?LOG_DEBUG("OS Process ~p Output :: ~s", [OsProc#os_proc.port, Line]),
+ case ?JSON_DECODE(Line) of
+ [<<"log">>, Msg] when is_binary(Msg) ->
+ % we got a message to log. Log it and continue
+ ?LOG_INFO("OS Process ~p Log :: ~s", [OsProc#os_proc.port, Msg]),
+ readjson(OsProc);
+ [<<"error">>, Id, Reason] ->
+ throw({couch_util:to_existing_atom(Id),Reason});
+ [<<"fatal">>, Id, Reason] ->
+ ?LOG_INFO("OS Process ~p Fatal Error :: ~s ~p",[OsProc#os_proc.port, Id, Reason]),
+ throw({couch_util:to_existing_atom(Id),Reason});
+ Result ->
+ Result
+ end.
+
+
+% gen_server API
+init([Command, Options, PortOptions]) ->
+ process_flag(trap_exit, true),
+ PrivDir = couch_util:priv_dir(),
+ Spawnkiller = filename:join(PrivDir, "couchspawnkillable"),
+ BaseProc = #os_proc{
+ command=Command,
+ port=open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions),
+ writer=fun writejson/2,
+ reader=fun readjson/1
+ },
+ KillCmd = readline(BaseProc),
+ Pid = self(),
+ ?LOG_DEBUG("OS Process Start :: ~p", [BaseProc#os_proc.port]),
+ spawn(fun() ->
+ % this ensure the real os process is killed when this process dies.
+ erlang:monitor(process, Pid),
+ receive _ -> ok end,
+ os:cmd(?b2l(KillCmd))
+ end),
+ OsProc =
+ lists:foldl(fun(Opt, Proc) ->
+ case Opt of
+ {writer, Writer} when is_function(Writer) ->
+ Proc#os_proc{writer=Writer};
+ {reader, Reader} when is_function(Reader) ->
+ Proc#os_proc{reader=Reader};
+ {timeout, TimeOut} when is_integer(TimeOut) ->
+ Proc#os_proc{timeout=TimeOut}
+ end
+ end, BaseProc, Options),
+ {ok, OsProc}.
+
+terminate(_Reason, #os_proc{port=Port}) ->
+ catch port_close(Port),
+ ok.
+
+handle_call({set_timeout, TimeOut}, _From, OsProc) ->
+ {reply, ok, OsProc#os_proc{timeout=TimeOut}};
+handle_call({prompt, Data}, _From, OsProc) ->
+ #os_proc{writer=Writer, reader=Reader} = OsProc,
+ try
+ Writer(OsProc, Data),
+ {reply, {ok, Reader(OsProc)}, OsProc}
+ catch
+ throw:{error, OsError} ->
+ {reply, OsError, OsProc};
+ throw:{fatal, OsError} ->
+ {stop, normal, OsError, OsProc};
+ throw:OtherError ->
+ {stop, normal, OtherError, OsProc}
+ end.
+
+handle_cast({send, Data}, #os_proc{writer=Writer}=OsProc) ->
+ try
+ Writer(OsProc, Data),
+ {noreply, OsProc}
+ catch
+ throw:OsError ->
+ ?LOG_ERROR("Failed sending data: ~p -> ~p", [Data, OsError]),
+ {stop, normal, OsProc}
+ end;
+handle_cast(stop, OsProc) ->
+ {stop, normal, OsProc};
+handle_cast(Msg, OsProc) ->
+ ?LOG_DEBUG("OS Proc: Unknown cast: ~p", [Msg]),
+ {noreply, OsProc}.
+
+handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) ->
+ ?LOG_INFO("OS Process terminated normally", []),
+ {stop, normal, OsProc};
+handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) ->
+ ?LOG_ERROR("OS Process died with status: ~p", [Status]),
+ {stop, {exit_status, Status}, OsProc};
+handle_info(Msg, OsProc) ->
+ ?LOG_DEBUG("OS Proc: Unknown info: ~p", [Msg]),
+ {noreply, OsProc}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
diff --git a/1.1.x/src/couchdb/couch_query_servers.erl b/1.1.x/src/couchdb/couch_query_servers.erl
new file mode 100644
index 00000000..b0e46937
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_query_servers.erl
@@ -0,0 +1,589 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_query_servers).
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]).
+-export([start_doc_map/3, map_docs/2, stop_doc_map/1]).
+-export([reduce/3, rereduce/3,validate_doc_update/5]).
+-export([filter_docs/5]).
+
+-export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]).
+
+% -export([test/0]).
+
+-include("couch_db.hrl").
+
+-record(proc, {
+ pid,
+ lang,
+ ddoc_keys = [],
+ prompt_fun,
+ set_timeout_fun,
+ stop_fun
+}).
+
+-record(qserver, {
+ langs, % Keyed by language name, value is {Mod,Func,Arg}
+ pid_procs, % Keyed by PID, valus is a #proc record.
+ lang_procs, % Keyed by language name, value is a #proc record
+ lang_limits, % Keyed by language name, value is {Lang, Limit, Current}
+ waitlist = [],
+ config
+}).
+
+start_link() ->
+ gen_server:start_link({local, couch_query_servers}, couch_query_servers, [], []).
+
+start_doc_map(Lang, Functions, Lib) ->
+ Proc = get_os_process(Lang),
+ case Lib of
+ {[]} -> ok;
+ Lib ->
+ true = proc_prompt(Proc, [<<"add_lib">>, Lib])
+ end,
+ lists:foreach(fun(FunctionSource) ->
+ true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource])
+ end, Functions),
+ {ok, Proc}.
+
+map_docs(Proc, Docs) ->
+ % send the documents
+ Results = lists:map(
+ fun(Doc) ->
+ Json = couch_doc:to_json_obj(Doc, []),
+
+ FunsResults = proc_prompt(Proc, [<<"map_doc">>, Json]),
+ % the results are a json array of function map yields like this:
+ % [FunResults1, FunResults2 ...]
+ % where funresults is are json arrays of key value pairs:
+ % [[Key1, Value1], [Key2, Value2]]
+ % Convert the key, value pairs to tuples like
+ % [{Key1, Value1}, {Key2, Value2}]
+ lists:map(
+ fun(FunRs) ->
+ [list_to_tuple(FunResult) || FunResult <- FunRs]
+ end,
+ FunsResults)
+ end,
+ Docs),
+ {ok, Results}.
+
+
+stop_doc_map(nil) ->
+ ok;
+stop_doc_map(Proc) ->
+ ok = ret_os_process(Proc).
+
+group_reductions_results([]) ->
+ [];
+group_reductions_results(List) ->
+ {Heads, Tails} = lists:foldl(
+ fun([H|T], {HAcc,TAcc}) ->
+ {[H|HAcc], [T|TAcc]}
+ end, {[], []}, List),
+ case Tails of
+ [[]|_] -> % no tails left
+ [Heads];
+ _ ->
+ [Heads | group_reductions_results(Tails)]
+ end.
+
+rereduce(_Lang, [], _ReducedValues) ->
+ {ok, []};
+rereduce(Lang, RedSrcs, ReducedValues) ->
+ Grouped = group_reductions_results(ReducedValues),
+ Results = lists:zipwith(
+ fun
+ (<<"_", _/binary>> = FunSrc, Values) ->
+ {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
+ Result;
+ (FunSrc, Values) ->
+ os_rereduce(Lang, [FunSrc], Values)
+ end, RedSrcs, Grouped),
+ {ok, Results}.
+
+reduce(_Lang, [], _KVs) ->
+ {ok, []};
+reduce(Lang, RedSrcs, KVs) ->
+ {OsRedSrcs, BuiltinReds} = lists:partition(fun
+ (<<"_", _/binary>>) -> false;
+ (_OsFun) -> true
+ end, RedSrcs),
+ {ok, OsResults} = os_reduce(Lang, OsRedSrcs, KVs),
+ {ok, BuiltinResults} = builtin_reduce(reduce, BuiltinReds, KVs, []),
+ recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, []).
+
+recombine_reduce_results([], [], [], Acc) ->
+ {ok, lists:reverse(Acc)};
+recombine_reduce_results([<<"_", _/binary>>|RedSrcs], OsResults, [BRes|BuiltinResults], Acc) ->
+ recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes|Acc]);
+recombine_reduce_results([_OsFun|RedSrcs], [OsR|OsResults], BuiltinResults, Acc) ->
+ recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR|Acc]).
+
+os_reduce(_Lang, [], _KVs) ->
+ {ok, []};
+os_reduce(Lang, OsRedSrcs, KVs) ->
+ Proc = get_os_process(Lang),
+ OsResults = try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
+ [true, Reductions] -> Reductions
+ after
+ ok = ret_os_process(Proc)
+ end,
+ {ok, OsResults}.
+
+os_rereduce(Lang, OsRedSrcs, KVs) ->
+ Proc = get_os_process(Lang),
+ try proc_prompt(Proc, [<<"rereduce">>, OsRedSrcs, KVs]) of
+ [true, [Reduction]] -> Reduction
+ after
+ ok = ret_os_process(Proc)
+ end.
+
+
+builtin_reduce(_Re, [], _KVs, Acc) ->
+ {ok, lists:reverse(Acc)};
+builtin_reduce(Re, [<<"_sum",_/binary>>|BuiltinReds], KVs, Acc) ->
+ Sum = builtin_sum_rows(KVs),
+ builtin_reduce(Re, BuiltinReds, KVs, [Sum|Acc]);
+builtin_reduce(reduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
+ Count = length(KVs),
+ builtin_reduce(reduce, BuiltinReds, KVs, [Count|Acc]);
+builtin_reduce(rereduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
+ Count = builtin_sum_rows(KVs),
+ builtin_reduce(rereduce, BuiltinReds, KVs, [Count|Acc]);
+builtin_reduce(Re, [<<"_stats",_/binary>>|BuiltinReds], KVs, Acc) ->
+ Stats = builtin_stats(Re, KVs),
+ builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]).
+
+builtin_sum_rows(KVs) ->
+ lists:foldl(fun
+ ([_Key, Value], Acc) when is_number(Value), is_number(Acc) ->
+ Acc + Value;
+ ([_Key, Value], Acc) when is_list(Value), is_list(Acc) ->
+ sum_terms(Acc, Value);
+ ([_Key, Value], Acc) when is_number(Value), is_list(Acc) ->
+ sum_terms(Acc, [Value]);
+ ([_Key, Value], Acc) when is_list(Value), is_number(Acc) ->
+ sum_terms([Acc], Value);
+ (_Else, _Acc) ->
+ throw({invalid_value, <<"builtin _sum function requires map values to be numbers or lists of numbers">>})
+ end, 0, KVs).
+
+sum_terms([], []) ->
+ [];
+sum_terms([_|_]=Xs, []) ->
+ Xs;
+sum_terms([], [_|_]=Ys) ->
+ Ys;
+sum_terms([X|Xs], [Y|Ys]) when is_number(X), is_number(Y) ->
+ [X+Y | sum_terms(Xs,Ys)];
+sum_terms(_, _) ->
+ throw({invalid_value, <<"builtin _sum function requires map values to be numbers or lists of numbers">>}).
+
+builtin_stats(reduce, [[_,First]|Rest]) when is_number(First) ->
+ Stats = lists:foldl(fun([_K,V], {S,C,Mi,Ma,Sq}) when is_number(V) ->
+ {S+V, C+1, lists:min([Mi, V]), lists:max([Ma, V]), Sq+(V*V)};
+ (_, _) ->
+ throw({invalid_value,
+ <<"builtin _stats function requires map values to be numbers">>})
+ end, {First,1,First,First,First*First}, Rest),
+ {Sum, Cnt, Min, Max, Sqr} = Stats,
+ {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]};
+
+builtin_stats(rereduce, [[_,First]|Rest]) ->
+ {[{sum,Sum0}, {count,Cnt0}, {min,Min0}, {max,Max0}, {sumsqr,Sqr0}]} = First,
+ Stats = lists:foldl(fun([_K,Red], {S,C,Mi,Ma,Sq}) ->
+ {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]} = Red,
+ {Sum+S, Cnt+C, lists:min([Min, Mi]), lists:max([Max, Ma]), Sqr+Sq}
+ end, {Sum0,Cnt0,Min0,Max0,Sqr0}, Rest),
+ {Sum, Cnt, Min, Max, Sqr} = Stats,
+ {[{sum,Sum}, {count,Cnt}, {min,Min}, {max,Max}, {sumsqr,Sqr}]}.
+
+% use the function stored in ddoc.validate_doc_update to test an update.
+validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
+ JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]),
+ JsonDiskDoc = json_doc(DiskDoc),
+ case ddoc_prompt(DDoc, [<<"validate_doc_update">>], [JsonEditDoc, JsonDiskDoc, Ctx, SecObj]) of
+ 1 ->
+ ok;
+ {[{<<"forbidden">>, Message}]} ->
+ throw({forbidden, Message});
+ {[{<<"unauthorized">>, Message}]} ->
+ throw({unauthorized, Message})
+ end.
+
+json_doc(nil) -> null;
+json_doc(Doc) ->
+ couch_doc:to_json_obj(Doc, [revs]).
+
+filter_docs(Req, Db, DDoc, FName, Docs) ->
+ JsonReq = case Req of
+ {json_req, JsonObj} ->
+ JsonObj;
+ #httpd{} = HttpReq ->
+ couch_httpd_external:json_req_obj(HttpReq, Db)
+ end,
+ JsonDocs = [couch_doc:to_json_obj(Doc, [revs]) || Doc <- Docs],
+ [true, Passes] = ddoc_prompt(DDoc, [<<"filters">>, FName], [JsonDocs, JsonReq]),
+ {ok, Passes}.
+
+ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) ->
+ proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args]).
+
+ddoc_prompt(DDoc, FunPath, Args) ->
+ with_ddoc_proc(DDoc, fun({Proc, DDocId}) ->
+ proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args])
+ end).
+
+with_ddoc_proc(#doc{id=DDocId,revs={Start, [DiskRev|_]}}=DDoc, Fun) ->
+ Rev = couch_doc:rev_to_str({Start, DiskRev}),
+ DDocKey = {DDocId, Rev},
+ Proc = get_ddoc_process(DDoc, DDocKey),
+ try Fun({Proc, DDocId})
+ after
+ ok = ret_os_process(Proc)
+ end.
+
+init([]) ->
+ % read config and register for configuration changes
+
+ % just stop if one of the config settings change. couch_server_sup
+ % will restart us and then we will pick up the new settings.
+
+ ok = couch_config:register(
+ fun("query_servers" ++ _, _) ->
+ supervisor:terminate_child(couch_secondary_services, query_servers),
+ supervisor:restart_child(couch_secondary_services, query_servers)
+ end),
+ ok = couch_config:register(
+ fun("native_query_servers" ++ _, _) ->
+ supervisor:terminate_child(couch_secondary_services, query_servers),
+ [supervisor:restart_child(couch_secondary_services, query_servers)]
+ end),
+ ok = couch_config:register(
+ fun("query_server_config" ++ _, _) ->
+ supervisor:terminate_child(couch_secondary_services, query_servers),
+ supervisor:restart_child(couch_secondary_services, query_servers)
+ end),
+
+ Langs = ets:new(couch_query_server_langs, [set, private]),
+ LangLimits = ets:new(couch_query_server_lang_limits, [set, private]),
+ PidProcs = ets:new(couch_query_server_pid_langs, [set, private]),
+ LangProcs = ets:new(couch_query_server_procs, [set, private]),
+
+ ProcTimeout = list_to_integer(couch_config:get(
+ "couchdb", "os_process_timeout", "5000")),
+ ReduceLimit = list_to_atom(
+ couch_config:get("query_server_config","reduce_limit","true")),
+ OsProcLimit = list_to_integer(
+ couch_config:get("query_server_config","os_process_limit","10")),
+
+ % 'query_servers' specifies an OS command-line to execute.
+ lists:foreach(fun({Lang, Command}) ->
+ true = ets:insert(LangLimits, {?l2b(Lang), OsProcLimit, 0}),
+ true = ets:insert(Langs, {?l2b(Lang),
+ couch_os_process, start_link, [Command]})
+ end, couch_config:get("query_servers")),
+ % 'native_query_servers' specifies a {Module, Func, Arg} tuple.
+ lists:foreach(fun({Lang, SpecStr}) ->
+ {ok, {Mod, Fun, SpecArg}} = couch_util:parse_term(SpecStr),
+ true = ets:insert(LangLimits, {?l2b(Lang), 0, 0}), % 0 means no limit
+ true = ets:insert(Langs, {?l2b(Lang),
+ Mod, Fun, SpecArg})
+ end, couch_config:get("native_query_servers")),
+
+
+ process_flag(trap_exit, true),
+ {ok, #qserver{
+ langs = Langs, % Keyed by language name, value is {Mod,Func,Arg}
+ pid_procs = PidProcs, % Keyed by PID, valus is a #proc record.
+ lang_procs = LangProcs, % Keyed by language name, value is a #proc record
+ lang_limits = LangLimits, % Keyed by language name, value is {Lang, Limit, Current}
+ config = {[{<<"reduce_limit">>, ReduceLimit},{<<"timeout">>, ProcTimeout}]}
+ }}.
+
+terminate(_Reason, #qserver{pid_procs=PidProcs}) ->
+ [couch_util:shutdown_sync(P) || {P,_} <- ets:tab2list(PidProcs)],
+ ok.
+
+handle_call({get_proc, #doc{body={Props}}=DDoc, DDocKey}, From, Server) ->
+ Lang = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
+ case lang_proc(Lang, Server, fun(Procs) ->
+ % find a proc in the set that has the DDoc
+ proc_with_ddoc(DDoc, DDocKey, Procs)
+ end) of
+ {ok, Proc} ->
+ {reply, {ok, Proc, Server#qserver.config}, Server};
+ wait ->
+ {noreply, add_to_waitlist({DDoc, DDocKey}, From, Server)};
+ Error ->
+ {reply, Error, Server}
+ end;
+handle_call({get_proc, Lang}, From, Server) ->
+ case lang_proc(Lang, Server, fun([P|_Procs]) ->
+ {ok, P}
+ end) of
+ {ok, Proc} ->
+ {reply, {ok, Proc, Server#qserver.config}, Server};
+ wait ->
+ {noreply, add_to_waitlist({Lang}, From, Server)};
+ Error ->
+ {reply, Error, Server}
+ end;
+handle_call({unlink_proc, Pid}, _From, #qserver{pid_procs=PidProcs}=Server) ->
+ rem_value(PidProcs, Pid),
+ unlink(Pid),
+ {reply, ok, Server};
+handle_call({ret_proc, Proc}, _From, #qserver{
+ pid_procs=PidProcs,
+ lang_procs=LangProcs}=Server) ->
+ % Along with max process limit, here we should check
+ % if we're over the limit and discard when we are.
+ add_value(PidProcs, Proc#proc.pid, Proc),
+ add_to_list(LangProcs, Proc#proc.lang, Proc),
+ link(Proc#proc.pid),
+ {reply, true, service_waitlist(Server)}.
+
+handle_cast(_Whatever, Server) ->
+ {noreply, Server}.
+
+handle_info({'EXIT', Pid, Status}, #qserver{
+ pid_procs=PidProcs,
+ lang_procs=LangProcs,
+ lang_limits=LangLimits}=Server) ->
+ case ets:lookup(PidProcs, Pid) of
+ [{Pid, Proc}] ->
+ case Status of
+ normal -> ok;
+ _ -> ?LOG_DEBUG("Linked process died abnormally: ~p (reason: ~p)", [Pid, Status])
+ end,
+ rem_value(PidProcs, Pid),
+ catch rem_from_list(LangProcs, Proc#proc.lang, Proc),
+ [{Lang, Lim, Current}] = ets:lookup(LangLimits, Proc#proc.lang),
+ true = ets:insert(LangLimits, {Lang, Lim, Current-1}),
+ {noreply, service_waitlist(Server)};
+ [] ->
+ case Status of
+ normal ->
+ {noreply, Server};
+ _ ->
+ {stop, Status, Server}
+ end
+ end.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+% Private API
+
+add_to_waitlist(Info, From, #qserver{waitlist=Waitlist}=Server) ->
+ Server#qserver{waitlist=[{Info, From}|Waitlist]}.
+
+service_waitlist(#qserver{waitlist=[]}=Server) ->
+ Server;
+service_waitlist(#qserver{waitlist=Waitlist}=Server) ->
+ [Oldest|RevWList] = lists:reverse(Waitlist),
+ case service_waiting(Oldest, Server) of
+ ok ->
+ Server#qserver{waitlist=lists:reverse(RevWList)};
+ wait ->
+ Server#qserver{waitlist=Waitlist}
+ end.
+
+% todo get rid of duplication
+service_waiting({{#doc{body={Props}}=DDoc, DDocKey}, From}, Server) ->
+ Lang = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
+ case lang_proc(Lang, Server, fun(Procs) ->
+ % find a proc in the set that has the DDoc
+ proc_with_ddoc(DDoc, DDocKey, Procs)
+ end) of
+ {ok, Proc} ->
+ gen_server:reply(From, {ok, Proc, Server#qserver.config}),
+ ok;
+ wait -> % this should never happen
+ wait;
+ Error ->
+ gen_server:reply(From, Error),
+ ok
+ end;
+service_waiting({{Lang}, From}, Server) ->
+ case lang_proc(Lang, Server, fun([P|_Procs]) ->
+ {ok, P}
+ end) of
+ {ok, Proc} ->
+ gen_server:reply(From, {ok, Proc, Server#qserver.config}),
+ ok;
+ wait -> % this should never happen
+ wait;
+ Error ->
+ gen_server:reply(From, Error),
+ ok
+ end.
+
+lang_proc(Lang, #qserver{
+ langs=Langs,
+ pid_procs=PidProcs,
+ lang_procs=LangProcs,
+ lang_limits=LangLimits}, PickFun) ->
+ % Note to future self. Add max process limit.
+ case ets:lookup(LangProcs, Lang) of
+ [{Lang, [P|Procs]}] ->
+ {ok, Proc} = PickFun([P|Procs]),
+ rem_from_list(LangProcs, Lang, Proc),
+ {ok, Proc};
+ _ ->
+ case (catch new_process(Langs, LangLimits, Lang)) of
+ {ok, Proc} ->
+ add_value(PidProcs, Proc#proc.pid, Proc),
+ PickFun([Proc]);
+ ErrorOrWait ->
+ ErrorOrWait
+ end
+ end.
+
+new_process(Langs, LangLimits, Lang) ->
+ [{Lang, Lim, Current}] = ets:lookup(LangLimits, Lang),
+ if (Lim == 0) or (Current < Lim) -> % Lim == 0 means no limit
+ % we are below the limit for our language, make a new one
+ case ets:lookup(Langs, Lang) of
+ [{Lang, Mod, Func, Arg}] ->
+ {ok, Pid} = apply(Mod, Func, Arg),
+ true = ets:insert(LangLimits, {Lang, Lim, Current+1}),
+ {ok, #proc{lang=Lang,
+ pid=Pid,
+ % Called via proc_prompt, proc_set_timeout, and proc_stop
+ prompt_fun={Mod, prompt},
+ set_timeout_fun={Mod, set_timeout},
+ stop_fun={Mod, stop}}};
+ _ ->
+ {unknown_query_language, Lang}
+ end;
+ true ->
+ wait
+ end.
+
+proc_with_ddoc(DDoc, DDocKey, LangProcs) ->
+ DDocProcs = lists:filter(fun(#proc{ddoc_keys=Keys}) ->
+ lists:any(fun(Key) ->
+ Key == DDocKey
+ end, Keys)
+ end, LangProcs),
+ case DDocProcs of
+ [DDocProc|_] ->
+ ?LOG_DEBUG("DDocProc found for DDocKey: ~p",[DDocKey]),
+ {ok, DDocProc};
+ [] ->
+ [TeachProc|_] = LangProcs,
+ ?LOG_DEBUG("Teach ddoc to new proc ~p with DDocKey: ~p",[TeachProc, DDocKey]),
+ {ok, SmartProc} = teach_ddoc(DDoc, DDocKey, TeachProc),
+ {ok, SmartProc}
+ end.
+
+proc_prompt(Proc, Args) ->
+ {Mod, Func} = Proc#proc.prompt_fun,
+ apply(Mod, Func, [Proc#proc.pid, Args]).
+
+proc_stop(Proc) ->
+ {Mod, Func} = Proc#proc.stop_fun,
+ apply(Mod, Func, [Proc#proc.pid]).
+
+proc_set_timeout(Proc, Timeout) ->
+ {Mod, Func} = Proc#proc.set_timeout_fun,
+ apply(Mod, Func, [Proc#proc.pid, Timeout]).
+
+teach_ddoc(DDoc, {DDocId, _Rev}=DDocKey, #proc{ddoc_keys=Keys}=Proc) ->
+ % send ddoc over the wire
+ % we only share the rev with the client we know to update code
+ % but it only keeps the latest copy, per each ddoc, around.
+ true = proc_prompt(Proc, [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]),
+ % we should remove any other ddocs keys for this docid
+ % because the query server overwrites without the rev
+ Keys2 = [{D,R} || {D,R} <- Keys, D /= DDocId],
+ % add ddoc to the proc
+ {ok, Proc#proc{ddoc_keys=[DDocKey|Keys2]}}.
+
+get_ddoc_process(#doc{} = DDoc, DDocKey) ->
+ % remove this case statement
+ case gen_server:call(couch_query_servers, {get_proc, DDoc, DDocKey}) of
+ {ok, Proc, {QueryConfig}} ->
+ % process knows the ddoc
+ case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
+ true ->
+ proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
+ link(Proc#proc.pid),
+ gen_server:call(couch_query_servers, {unlink_proc, Proc#proc.pid}),
+ Proc;
+ _ ->
+ catch proc_stop(Proc),
+ get_ddoc_process(DDoc, DDocKey)
+ end;
+ Error ->
+ throw(Error)
+ end.
+
+get_os_process(Lang) ->
+ case gen_server:call(couch_query_servers, {get_proc, Lang}) of
+ {ok, Proc, {QueryConfig}} ->
+ case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
+ true ->
+ proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
+ link(Proc#proc.pid),
+ gen_server:call(couch_query_servers, {unlink_proc, Proc#proc.pid}),
+ Proc;
+ _ ->
+ catch proc_stop(Proc),
+ get_os_process(Lang)
+ end;
+ Error ->
+ throw(Error)
+ end.
+
+ret_os_process(Proc) ->
+ true = gen_server:call(couch_query_servers, {ret_proc, Proc}),
+ catch unlink(Proc#proc.pid),
+ ok.
+
+add_value(Tid, Key, Value) ->
+ true = ets:insert(Tid, {Key, Value}).
+
+rem_value(Tid, Key) ->
+ true = ets:delete(Tid, Key).
+
+add_to_list(Tid, Key, Value) ->
+ case ets:lookup(Tid, Key) of
+ [{Key, Vals}] ->
+ true = ets:insert(Tid, {Key, [Value|Vals]});
+ [] ->
+ true = ets:insert(Tid, {Key, [Value]})
+ end.
+
+rem_from_list(Tid, Key, Value) when is_record(Value, proc)->
+ Pid = Value#proc.pid,
+ case ets:lookup(Tid, Key) of
+ [{Key, Vals}] ->
+ % make a new values list that doesn't include the Value arg
+ NewValues = [Val || #proc{pid=P}=Val <- Vals, P /= Pid],
+ ets:insert(Tid, {Key, NewValues});
+ [] -> ok
+ end;
+rem_from_list(Tid, Key, Value) ->
+ case ets:lookup(Tid, Key) of
+ [{Key, Vals}] ->
+ % make a new values list that doesn't include the Value arg
+ NewValues = [Val || Val <- Vals, Val /= Value],
+ ets:insert(Tid, {Key, NewValues});
+ [] -> ok
+ end.
diff --git a/1.1.x/src/couchdb/couch_ref_counter.erl b/1.1.x/src/couchdb/couch_ref_counter.erl
new file mode 100644
index 00000000..5a111ab6
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_ref_counter.erl
@@ -0,0 +1,111 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_ref_counter).
+-behaviour(gen_server).
+
+-export([start/1, init/1, terminate/2, handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
+-export([drop/1,drop/2,add/1,add/2,count/1]).
+
+start(ChildProcs) ->
+ gen_server:start(couch_ref_counter, {self(), ChildProcs}, []).
+
+
+drop(RefCounterPid) ->
+ drop(RefCounterPid, self()).
+
+drop(RefCounterPid, Pid) ->
+ gen_server:call(RefCounterPid, {drop, Pid}).
+
+
+add(RefCounterPid) ->
+ add(RefCounterPid, self()).
+
+add(RefCounterPid, Pid) ->
+ gen_server:call(RefCounterPid, {add, Pid}).
+
+count(RefCounterPid) ->
+ gen_server:call(RefCounterPid, count).
+
+% server functions
+
+-record(srv,
+ {
+ referrers=dict:new(), % a dict of each ref counting proc.
+ child_procs=[]
+ }).
+
+init({Pid, ChildProcs}) ->
+ [link(ChildProc) || ChildProc <- ChildProcs],
+ Referrers = dict:from_list([{Pid, {erlang:monitor(process, Pid), 1}}]),
+ {ok, #srv{referrers=Referrers, child_procs=ChildProcs}}.
+
+
+terminate(_Reason, #srv{child_procs=ChildProcs}) ->
+ [couch_util:shutdown_sync(Pid) || Pid <- ChildProcs],
+ ok.
+
+
+handle_call({add, Pid},_From, #srv{referrers=Referrers}=Srv) ->
+ Referrers2 =
+ case dict:find(Pid, Referrers) of
+ error ->
+ dict:store(Pid, {erlang:monitor(process, Pid), 1}, Referrers);
+ {ok, {MonRef, RefCnt}} ->
+ dict:store(Pid, {MonRef, RefCnt + 1}, Referrers)
+ end,
+ {reply, ok, Srv#srv{referrers=Referrers2}};
+handle_call(count, _From, Srv) ->
+ {monitors, Monitors} = process_info(self(), monitors),
+ {reply, length(Monitors), Srv};
+handle_call({drop, Pid}, _From, #srv{referrers=Referrers}=Srv) ->
+ Referrers2 =
+ case dict:find(Pid, Referrers) of
+ {ok, {MonRef, 1}} ->
+ erlang:demonitor(MonRef, [flush]),
+ dict:erase(Pid, Referrers);
+ {ok, {MonRef, Num}} ->
+ dict:store(Pid, {MonRef, Num-1}, Referrers);
+ error ->
+ Referrers
+ end,
+ Srv2 = Srv#srv{referrers=Referrers2},
+ case should_close() of
+ true ->
+ {stop,normal,ok,Srv2};
+ false ->
+ {reply, ok, Srv2}
+ end.
+
+handle_cast(Msg, _Srv)->
+ exit({unknown_msg,Msg}).
+
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_info({'DOWN', MonRef, _, Pid, _}, #srv{referrers=Referrers}=Srv) ->
+ {ok, {MonRef, _RefCount}} = dict:find(Pid, Referrers),
+ Srv2 = Srv#srv{referrers=dict:erase(Pid, Referrers)},
+ case should_close() of
+ true ->
+ {stop,normal,Srv2};
+ false ->
+ {noreply,Srv2}
+ end.
+
+
+should_close() ->
+ case process_info(self(), monitors) of
+ {monitors, []} -> true;
+ _ -> false
+ end.
diff --git a/1.1.x/src/couchdb/couch_rep.erl b/1.1.x/src/couchdb/couch_rep.erl
new file mode 100644
index 00000000..5c9fbce6
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_rep.erl
@@ -0,0 +1,972 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rep).
+-behaviour(gen_server).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([replicate/2, checkpoint/1]).
+-export([ensure_rep_db_exists/0, make_replication_id/2]).
+-export([start_replication/3, end_replication/1, get_result/4]).
+-export([update_rep_doc/2]).
+
+-include("couch_db.hrl").
+-include("couch_js_functions.hrl").
+-include("../ibrowse/ibrowse.hrl").
+
+-define(REP_ID_VERSION, 2).
+
+-record(state, {
+ changes_feed,
+ missing_revs,
+ reader,
+ writer,
+
+ source,
+ target,
+ continuous,
+ create_target,
+ init_args,
+ checkpoint_scheduled = nil,
+
+ start_seq,
+ history,
+ session_id,
+ source_log,
+ target_log,
+ rep_starttime,
+ src_starttime,
+ tgt_starttime,
+ checkpoint_history = nil,
+
+ listeners = [],
+ complete = false,
+ committed_seq = 0,
+
+ stats = nil,
+ rep_doc = nil,
+ source_db_update_notifier = nil,
+ target_db_update_notifier = nil
+}).
+
+%% convenience function to do a simple replication from the shell
+replicate(Source, Target) when is_list(Source) ->
+ replicate(?l2b(Source), Target);
+replicate(Source, Target) when is_binary(Source), is_list(Target) ->
+ replicate(Source, ?l2b(Target));
+replicate(Source, Target) when is_binary(Source), is_binary(Target) ->
+ replicate({[{<<"source">>, Source}, {<<"target">>, Target}]}, #user_ctx{});
+
+%% function handling POST to _replicate
+replicate({Props}=PostBody, UserCtx) ->
+ RepId = make_replication_id(PostBody, UserCtx),
+ case couch_util:get_value(<<"cancel">>, Props, false) of
+ true ->
+ end_replication(RepId);
+ false ->
+ Server = start_replication(PostBody, RepId, UserCtx),
+ get_result(Server, RepId, PostBody, UserCtx)
+ end.
+
+end_replication({BaseId, Extension}) ->
+ RepId = BaseId ++ Extension,
+ case supervisor:terminate_child(couch_rep_sup, RepId) of
+ {error, not_found} = R ->
+ R;
+ ok ->
+ ok = supervisor:delete_child(couch_rep_sup, RepId),
+ {ok, {cancelled, ?l2b(BaseId)}}
+ end.
+
+start_replication(RepDoc, {BaseId, Extension}, UserCtx) ->
+ Replicator = {
+ BaseId ++ Extension,
+ {gen_server, start_link,
+ [?MODULE, [BaseId, RepDoc, UserCtx], []]},
+ temporary,
+ 1,
+ worker,
+ [?MODULE]
+ },
+ start_replication_server(Replicator).
+
+checkpoint(Server) ->
+ gen_server:cast(Server, do_checkpoint).
+
+get_result(Server, {BaseId, _Extension}, {Props} = PostBody, UserCtx) ->
+ case couch_util:get_value(<<"continuous">>, Props, false) of
+ true ->
+ {ok, {continuous, ?l2b(BaseId)}};
+ false ->
+ try gen_server:call(Server, get_result, infinity) of
+ retry -> replicate(PostBody, UserCtx);
+ Else -> Else
+ catch
+ exit:{noproc, {gen_server, call, [Server, get_result, infinity]}} ->
+ %% oops, this replication just finished -- restart it.
+ replicate(PostBody, UserCtx);
+ exit:{normal, {gen_server, call, [Server, get_result, infinity]}} ->
+ %% we made the call during terminate
+ replicate(PostBody, UserCtx)
+ end
+ end.
+
+init(InitArgs) ->
+ try
+ do_init(InitArgs)
+ catch
+ throw:Error ->
+ {stop, Error}
+ end.
+
+do_init([RepId, {PostProps} = RepDoc, UserCtx] = InitArgs) ->
+ process_flag(trap_exit, true),
+
+ SourceProps = couch_util:get_value(<<"source">>, PostProps),
+ TargetProps = couch_util:get_value(<<"target">>, PostProps),
+
+ Continuous = couch_util:get_value(<<"continuous">>, PostProps, false),
+ CreateTarget = couch_util:get_value(<<"create_target">>, PostProps, false),
+
+ ProxyParams = parse_proxy_params(
+ couch_util:get_value(<<"proxy">>, PostProps, [])),
+ Source = open_db(SourceProps, UserCtx, ProxyParams),
+ Target = open_db(TargetProps, UserCtx, ProxyParams, CreateTarget),
+
+ SourceInfo = dbinfo(Source),
+ TargetInfo = dbinfo(Target),
+
+ maybe_set_triggered(RepDoc, RepId),
+
+ [SourceLog, TargetLog] = find_replication_logs(
+ [Source, Target], RepId, {PostProps}, UserCtx),
+ {StartSeq, History} = compare_replication_logs(SourceLog, TargetLog),
+
+ {ok, ChangesFeed} =
+ couch_rep_changes_feed:start_link(self(), Source, StartSeq, PostProps),
+ {ok, MissingRevs} =
+ couch_rep_missing_revs:start_link(self(), Target, ChangesFeed, PostProps),
+ {ok, Reader} =
+ couch_rep_reader:start_link(self(), Source, MissingRevs, PostProps),
+ {ok, Writer} =
+ couch_rep_writer:start_link(self(), Target, Reader, PostProps),
+
+ Stats = ets:new(replication_stats, [set, private]),
+ ets:insert(Stats, {total_revs,0}),
+ ets:insert(Stats, {missing_revs, 0}),
+ ets:insert(Stats, {docs_read, 0}),
+ ets:insert(Stats, {docs_written, 0}),
+ ets:insert(Stats, {doc_write_failures, 0}),
+
+ {ShortId, _} = lists:split(6, RepId),
+ couch_task_status:add_task("Replication", io_lib:format("~s: ~s -> ~s",
+ [ShortId, dbname(Source), dbname(Target)]), "Starting"),
+
+ State = #state{
+ changes_feed = ChangesFeed,
+ missing_revs = MissingRevs,
+ reader = Reader,
+ writer = Writer,
+
+ source = Source,
+ target = Target,
+ continuous = Continuous,
+ create_target = CreateTarget,
+ init_args = InitArgs,
+ stats = Stats,
+ checkpoint_scheduled = nil,
+
+ start_seq = StartSeq,
+ history = History,
+ session_id = couch_uuids:random(),
+ source_log = SourceLog,
+ target_log = TargetLog,
+ rep_starttime = httpd_util:rfc1123_date(),
+ src_starttime = couch_util:get_value(instance_start_time, SourceInfo),
+ tgt_starttime = couch_util:get_value(instance_start_time, TargetInfo),
+ rep_doc = RepDoc,
+ source_db_update_notifier = source_db_update_notifier(Source),
+ target_db_update_notifier = target_db_update_notifier(Target)
+ },
+ {ok, State}.
+
+handle_call(get_result, From, #state{complete=true, listeners=[]} = State) ->
+ {stop, normal, State#state{listeners=[From]}};
+handle_call(get_result, From, State) ->
+ Listeners = State#state.listeners,
+ {noreply, State#state{listeners=[From|Listeners]}};
+
+handle_call(get_source_db, _From, #state{source = Source} = State) ->
+ {reply, {ok, Source}, State};
+
+handle_call(get_target_db, _From, #state{target = Target} = State) ->
+ {reply, {ok, Target}, State}.
+
+handle_cast(reopen_source_db, #state{source = Source} = State) ->
+ {ok, NewSource} = couch_db:reopen(Source),
+ {noreply, State#state{source = NewSource}};
+
+handle_cast(reopen_target_db, #state{target = Target} = State) ->
+ {ok, NewTarget} = couch_db:reopen(Target),
+ {noreply, State#state{target = NewTarget}};
+
+handle_cast(do_checkpoint, State) ->
+ {noreply, do_checkpoint(State)};
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info({missing_revs_checkpoint, SourceSeq}, State) ->
+ couch_task_status:update("MR Processed source update #~p", [SourceSeq]),
+ {noreply, schedule_checkpoint(State#state{committed_seq = SourceSeq})};
+
+handle_info({writer_checkpoint, SourceSeq}, #state{committed_seq=N} = State)
+ when SourceSeq > N ->
+ MissingRevs = State#state.missing_revs,
+ ok = gen_server:cast(MissingRevs, {update_committed_seq, SourceSeq}),
+ couch_task_status:update("W Processed source update #~p", [SourceSeq]),
+ {noreply, schedule_checkpoint(State#state{committed_seq = SourceSeq})};
+handle_info({writer_checkpoint, _}, State) ->
+ {noreply, State};
+
+handle_info({update_stats, Key, N}, State) ->
+ ets:update_counter(State#state.stats, Key, N),
+ {noreply, State};
+
+handle_info({'DOWN', _, _, _, _}, State) ->
+ ?LOG_INFO("replication terminating because local DB is shutting down", []),
+ timer:cancel(State#state.checkpoint_scheduled),
+ {stop, shutdown, State};
+
+handle_info({'EXIT', Writer, normal}, #state{writer=Writer} = State) ->
+ case State#state.listeners of
+ [] ->
+ {noreply, State#state{complete = true}};
+ _Else ->
+ {stop, normal, State}
+ end;
+
+handle_info({'EXIT', _, normal}, State) ->
+ {noreply, State};
+handle_info({'EXIT', _Pid, {Err, Reason}}, State) when Err == source_error;
+ Err == target_error ->
+ ?LOG_INFO("replication terminating due to ~p: ~p", [Err, Reason]),
+ timer:cancel(State#state.checkpoint_scheduled),
+ {stop, shutdown, State};
+handle_info({'EXIT', _Pid, Reason}, State) ->
+ {stop, Reason, State}.
+
+terminate(normal, #state{checkpoint_scheduled=nil} = State) ->
+ do_terminate(State),
+ update_rep_doc(
+ State#state.rep_doc, [{<<"_replication_state">>, <<"completed">>}]);
+
+terminate(normal, State) ->
+ timer:cancel(State#state.checkpoint_scheduled),
+ do_terminate(do_checkpoint(State)),
+ update_rep_doc(
+ State#state.rep_doc, [{<<"_replication_state">>, <<"completed">>}]);
+
+terminate(shutdown, #state{listeners = Listeners} = State) ->
+ % continuous replication stopped
+ [gen_server:reply(L, {ok, stopped}) || L <- Listeners],
+ terminate_cleanup(State);
+
+terminate(Reason, #state{listeners = Listeners} = State) ->
+ [gen_server:reply(L, {error, Reason}) || L <- Listeners],
+ terminate_cleanup(State),
+ update_rep_doc(
+ State#state.rep_doc, [{<<"_replication_state">>, <<"error">>}]).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+% internal funs
+
+start_replication_server(Replicator) ->
+ RepId = element(1, Replicator),
+ case supervisor:start_child(couch_rep_sup, Replicator) of
+ {ok, Pid} ->
+ ?LOG_INFO("starting new replication ~p at ~p", [RepId, Pid]),
+ Pid;
+ {error, already_present} ->
+ case supervisor:restart_child(couch_rep_sup, RepId) of
+ {ok, Pid} ->
+ ?LOG_INFO("starting replication ~p at ~p", [RepId, Pid]),
+ Pid;
+ {error, running} ->
+ %% this error occurs if multiple replicators are racing
+ %% each other to start and somebody else won. Just grab
+ %% the Pid by calling start_child again.
+ {error, {already_started, Pid}} =
+ supervisor:start_child(couch_rep_sup, Replicator),
+ ?LOG_DEBUG("replication ~p already running at ~p", [RepId, Pid]),
+ Pid;
+ {error, {db_not_found, DbUrl}} ->
+ throw({db_not_found, <<"could not open ", DbUrl/binary>>});
+ {error, {unauthorized, DbUrl}} ->
+ throw({unauthorized,
+ <<"unauthorized to access database ", DbUrl/binary>>});
+ {error, {'EXIT', {badarg,
+ [{erlang, apply, [gen_server, start_link, undefined]} | _]}}} ->
+ % Clause to deal with a change in the supervisor module introduced
+ % in R14B02. For more details consult the thread at:
+ % http://erlang.org/pipermail/erlang-bugs/2011-March/002273.html
+ _ = supervisor:delete_child(couch_rep_sup, RepId),
+ start_replication_server(Replicator)
+ end;
+ {error, {already_started, Pid}} ->
+ ?LOG_DEBUG("replication ~p already running at ~p", [RepId, Pid]),
+ Pid;
+ {error, {{db_not_found, DbUrl}, _}} ->
+ throw({db_not_found, <<"could not open ", DbUrl/binary>>});
+ {error, {{unauthorized, DbUrl}, _}} ->
+ throw({unauthorized,
+ <<"unauthorized to access database ", DbUrl/binary>>})
+ end.
+
+compare_replication_logs(SrcDoc, TgtDoc) ->
+ #doc{body={RepRecProps}} = SrcDoc,
+ #doc{body={RepRecPropsTgt}} = TgtDoc,
+ case couch_util:get_value(<<"session_id">>, RepRecProps) ==
+ couch_util:get_value(<<"session_id">>, RepRecPropsTgt) of
+ true ->
+ % if the records have the same session id,
+ % then we have a valid replication history
+ OldSeqNum = couch_util:get_value(<<"source_last_seq">>, RepRecProps, 0),
+ OldHistory = couch_util:get_value(<<"history">>, RepRecProps, []),
+ {OldSeqNum, OldHistory};
+ false ->
+ SourceHistory = couch_util:get_value(<<"history">>, RepRecProps, []),
+ TargetHistory = couch_util:get_value(<<"history">>, RepRecPropsTgt, []),
+ ?LOG_INFO("Replication records differ. "
+ "Scanning histories to find a common ancestor.", []),
+ ?LOG_DEBUG("Record on source:~p~nRecord on target:~p~n",
+ [RepRecProps, RepRecPropsTgt]),
+ compare_rep_history(SourceHistory, TargetHistory)
+ end.
+
+compare_rep_history(S, T) when S =:= [] orelse T =:= [] ->
+ ?LOG_INFO("no common ancestry -- performing full replication", []),
+ {0, []};
+compare_rep_history([{S}|SourceRest], [{T}|TargetRest]=Target) ->
+ SourceId = couch_util:get_value(<<"session_id">>, S),
+ case has_session_id(SourceId, Target) of
+ true ->
+ RecordSeqNum = couch_util:get_value(<<"recorded_seq">>, S, 0),
+ ?LOG_INFO("found a common replication record with source_seq ~p",
+ [RecordSeqNum]),
+ {RecordSeqNum, SourceRest};
+ false ->
+ TargetId = couch_util:get_value(<<"session_id">>, T),
+ case has_session_id(TargetId, SourceRest) of
+ true ->
+ RecordSeqNum = couch_util:get_value(<<"recorded_seq">>, T, 0),
+ ?LOG_INFO("found a common replication record with source_seq ~p",
+ [RecordSeqNum]),
+ {RecordSeqNum, TargetRest};
+ false ->
+ compare_rep_history(SourceRest, TargetRest)
+ end
+ end.
+
+close_db(#http_db{}) ->
+ ok;
+close_db(Db) ->
+ couch_db:close(Db).
+
+dbname(#http_db{url = Url}) ->
+ couch_util:url_strip_password(Url);
+dbname(#db{name = Name}) ->
+ Name.
+
+dbinfo(#http_db{} = Db) ->
+ {DbProps} = couch_rep_httpc:request(Db),
+ [{couch_util:to_existing_atom(K), V} || {K,V} <- DbProps];
+dbinfo(Db) ->
+ {ok, Info} = couch_db:get_db_info(Db),
+ Info.
+
+do_terminate(State) ->
+ #state{
+ checkpoint_history = CheckpointHistory,
+ committed_seq = NewSeq,
+ listeners = Listeners,
+ source = Source,
+ continuous = Continuous,
+ source_log = #doc{body={OldHistory}}
+ } = State,
+
+ NewRepHistory = case CheckpointHistory of
+ nil ->
+ {[{<<"no_changes">>, true} | OldHistory]};
+ _Else ->
+ CheckpointHistory
+ end,
+
+ %% reply to original requester
+ OtherListeners = case Continuous of
+ true ->
+ []; % continuous replications have no listeners
+ _ ->
+ [Original|Rest] = lists:reverse(Listeners),
+ gen_server:reply(Original, {ok, NewRepHistory}),
+ Rest
+ end,
+
+ %% maybe trigger another replication. If this replicator uses a local
+ %% source Db, changes to that Db since we started will not be included in
+ %% this pass.
+ case up_to_date(Source, NewSeq) of
+ true ->
+ [gen_server:reply(R, {ok, NewRepHistory}) || R <- OtherListeners];
+ false ->
+ [gen_server:reply(R, retry) || R <- OtherListeners]
+ end,
+ couch_task_status:update("Finishing"),
+ terminate_cleanup(State).
+
+terminate_cleanup(State) ->
+ close_db(State#state.source),
+ close_db(State#state.target),
+ stop_db_update_notifier(State#state.source_db_update_notifier),
+ stop_db_update_notifier(State#state.target_db_update_notifier),
+ ets:delete(State#state.stats).
+
+stop_db_update_notifier(nil) ->
+ ok;
+stop_db_update_notifier(Notifier) ->
+ couch_db_update_notifier:stop(Notifier).
+
+has_session_id(_SessionId, []) ->
+ false;
+has_session_id(SessionId, [{Props} | Rest]) ->
+ case couch_util:get_value(<<"session_id">>, Props, nil) of
+ SessionId ->
+ true;
+ _Else ->
+ has_session_id(SessionId, Rest)
+ end.
+
+maybe_append_options(Options, {Props}) ->
+ lists:foldl(fun(Option, Acc) ->
+ Acc ++
+ case couch_util:get_value(Option, Props, false) of
+ true ->
+ "+" ++ ?b2l(Option);
+ false ->
+ ""
+ end
+ end, [], Options).
+
+make_replication_id(RepProps, UserCtx) ->
+ BaseId = make_replication_id(RepProps, UserCtx, ?REP_ID_VERSION),
+ Extension = maybe_append_options(
+ [<<"continuous">>, <<"create_target">>], RepProps),
+ {BaseId, Extension}.
+
+% Versioned clauses for generating replication ids
+% If a change is made to how replications are identified
+% add a new clause and increase ?REP_ID_VERSION at the top
+make_replication_id({Props}, UserCtx, 2) ->
+ {ok, HostName} = inet:gethostname(),
+ Port = mochiweb_socket_server:get(couch_httpd, port),
+ Src = get_rep_endpoint(UserCtx, couch_util:get_value(<<"source">>, Props)),
+ Tgt = get_rep_endpoint(UserCtx, couch_util:get_value(<<"target">>, Props)),
+ maybe_append_filters({Props}, [HostName, Port, Src, Tgt], UserCtx);
+make_replication_id({Props}, UserCtx, 1) ->
+ {ok, HostName} = inet:gethostname(),
+ Src = get_rep_endpoint(UserCtx, couch_util:get_value(<<"source">>, Props)),
+ Tgt = get_rep_endpoint(UserCtx, couch_util:get_value(<<"target">>, Props)),
+ maybe_append_filters({Props}, [HostName, Src, Tgt], UserCtx).
+
+maybe_append_filters({Props}, Base, UserCtx) ->
+ Base2 = Base ++
+ case couch_util:get_value(<<"filter">>, Props) of
+ undefined ->
+ case couch_util:get_value(<<"doc_ids">>, Props) of
+ undefined ->
+ [];
+ DocIds ->
+ [DocIds]
+ end;
+ Filter ->
+ [filter_code(Filter, Props, UserCtx),
+ couch_util:get_value(<<"query_params">>, Props, {[]})]
+ end,
+ couch_util:to_hex(couch_util:md5(term_to_binary(Base2))).
+
+filter_code(Filter, Props, UserCtx) ->
+ {match, [DDocName, FilterName]} =
+ re:run(Filter, "(.*?)/(.*)", [{capture, [1, 2], binary}]),
+ ProxyParams = parse_proxy_params(
+ couch_util:get_value(<<"proxy">>, Props, [])),
+ Source = open_db(
+ couch_util:get_value(<<"source">>, Props), UserCtx, ProxyParams),
+ try
+ {ok, DDoc} = open_doc(Source, <<"_design/", DDocName/binary>>),
+ Code = couch_util:get_nested_json_value(
+ DDoc#doc.body, [<<"filters">>, FilterName]),
+ re:replace(Code, "^\s*(.*?)\s*$", "\\1", [{return, binary}])
+ after
+ close_db(Source)
+ end.
+
+maybe_add_trailing_slash(Url) ->
+ re:replace(Url, "[^/]$", "&/", [{return, list}]).
+
+get_rep_endpoint(_UserCtx, {Props}) ->
+ Url = maybe_add_trailing_slash(couch_util:get_value(<<"url">>, Props)),
+ {BinHeaders} = couch_util:get_value(<<"headers">>, Props, {[]}),
+ {Auth} = couch_util:get_value(<<"auth">>, Props, {[]}),
+ case couch_util:get_value(<<"oauth">>, Auth) of
+ undefined ->
+ {remote, Url, [{?b2l(K),?b2l(V)} || {K,V} <- BinHeaders]};
+ {OAuth} ->
+ {remote, Url, [{?b2l(K),?b2l(V)} || {K,V} <- BinHeaders], OAuth}
+ end;
+get_rep_endpoint(_UserCtx, <<"http://",_/binary>>=Url) ->
+ {remote, maybe_add_trailing_slash(Url), []};
+get_rep_endpoint(_UserCtx, <<"https://",_/binary>>=Url) ->
+ {remote, maybe_add_trailing_slash(Url), []};
+get_rep_endpoint(UserCtx, <<DbName/binary>>) ->
+ {local, DbName, UserCtx}.
+
+find_replication_logs(DbList, RepId, RepProps, UserCtx) ->
+ LogId = ?l2b(?LOCAL_DOC_PREFIX ++ RepId),
+ fold_replication_logs(DbList, ?REP_ID_VERSION,
+ LogId, LogId, RepProps, UserCtx, []).
+
+% Accumulate the replication logs
+% Falls back to older log document ids and migrates them
+fold_replication_logs([], _Vsn, _LogId, _NewId, _RepProps, _UserCtx, Acc) ->
+ lists:reverse(Acc);
+fold_replication_logs([Db|Rest]=Dbs, Vsn, LogId, NewId,
+ RepProps, UserCtx, Acc) ->
+ case open_replication_log(Db, LogId) of
+ {error, not_found} when Vsn > 1 ->
+ OldRepId = make_replication_id(RepProps, UserCtx, Vsn - 1),
+ fold_replication_logs(Dbs, Vsn - 1,
+ ?l2b(?LOCAL_DOC_PREFIX ++ OldRepId), NewId, RepProps, UserCtx, Acc);
+ {error, not_found} ->
+ fold_replication_logs(Rest, ?REP_ID_VERSION, NewId, NewId,
+ RepProps, UserCtx, [#doc{id=NewId}|Acc]);
+ {ok, Doc} when LogId =:= NewId ->
+ fold_replication_logs(Rest, ?REP_ID_VERSION, NewId, NewId,
+ RepProps, UserCtx, [Doc|Acc]);
+ {ok, Doc} ->
+ MigratedLog = #doc{id=NewId,body=Doc#doc.body},
+ fold_replication_logs(Rest, ?REP_ID_VERSION, NewId, NewId,
+ RepProps, UserCtx, [MigratedLog|Acc])
+ end.
+
+open_replication_log(Db, DocId) ->
+ case open_doc(Db, DocId) of
+ {ok, Doc} ->
+ ?LOG_DEBUG("found a replication log for ~s", [dbname(Db)]),
+ {ok, Doc};
+ _ ->
+ ?LOG_DEBUG("didn't find a replication log for ~s", [dbname(Db)]),
+ {error, not_found}
+ end.
+
+open_doc(#http_db{} = Db, DocId) ->
+ Req = Db#http_db{resource = couch_util:encode_doc_id(DocId)},
+ case couch_rep_httpc:request(Req) of
+ {[{<<"error">>, _}, {<<"reason">>, _}]} ->
+ {error, not_found};
+ Doc ->
+ {ok, couch_doc:from_json_obj(Doc)}
+ end;
+open_doc(Db, DocId) ->
+ couch_db:open_doc(Db, DocId).
+
+open_db(Props, UserCtx, ProxyParams) ->
+ open_db(Props, UserCtx, ProxyParams, false).
+
+open_db({Props}, _UserCtx, ProxyParams, CreateTarget) ->
+ Url = maybe_add_trailing_slash(couch_util:get_value(<<"url">>, Props)),
+ {AuthProps} = couch_util:get_value(<<"auth">>, Props, {[]}),
+ {BinHeaders} = couch_util:get_value(<<"headers">>, Props, {[]}),
+ Headers = [{?b2l(K),?b2l(V)} || {K,V} <- BinHeaders],
+ DefaultHeaders = (#http_db{})#http_db.headers,
+ Db1 = #http_db{
+ url = Url,
+ auth = AuthProps,
+ headers = lists:ukeymerge(1, Headers, DefaultHeaders)
+ },
+ Db = Db1#http_db{
+ options = Db1#http_db.options ++ ProxyParams ++
+ couch_rep_httpc:ssl_options(Db1)
+ },
+ couch_rep_httpc:db_exists(Db, CreateTarget);
+open_db(<<"http://",_/binary>>=Url, _, ProxyParams, CreateTarget) ->
+ open_db({[{<<"url">>,Url}]}, [], ProxyParams, CreateTarget);
+open_db(<<"https://",_/binary>>=Url, _, ProxyParams, CreateTarget) ->
+ open_db({[{<<"url">>,Url}]}, [], ProxyParams, CreateTarget);
+open_db(<<DbName/binary>>, UserCtx, _ProxyParams, CreateTarget) ->
+ try
+ case CreateTarget of
+ true ->
+ ok = couch_httpd:verify_is_server_admin(UserCtx),
+ couch_server:create(DbName, [{user_ctx, UserCtx}]);
+ false ->
+ ok
+ end,
+
+ case couch_db:open(DbName, [{user_ctx, UserCtx}]) of
+ {ok, Db} ->
+ couch_db:monitor(Db),
+ Db;
+ {not_found, no_db_file} ->
+ throw({db_not_found, DbName})
+ end
+ catch throw:{unauthorized, _} ->
+ throw({unauthorized, DbName})
+ end.
+
+schedule_checkpoint(#state{checkpoint_scheduled = nil} = State) ->
+ Server = self(),
+ case timer:apply_after(5000, couch_rep, checkpoint, [Server]) of
+ {ok, TRef} ->
+ State#state{checkpoint_scheduled = TRef};
+ Error ->
+ ?LOG_ERROR("tried to schedule a checkpoint but got ~p", [Error]),
+ State
+ end;
+schedule_checkpoint(State) ->
+ State.
+
+do_checkpoint(State) ->
+ #state{
+ source = Source,
+ target = Target,
+ committed_seq = NewSeqNum,
+ start_seq = StartSeqNum,
+ history = OldHistory,
+ session_id = SessionId,
+ source_log = SourceLog,
+ target_log = TargetLog,
+ rep_starttime = ReplicationStartTime,
+ src_starttime = SrcInstanceStartTime,
+ tgt_starttime = TgtInstanceStartTime,
+ stats = Stats,
+ rep_doc = {RepDoc}
+ } = State,
+ case commit_to_both(Source, Target, NewSeqNum) of
+ {SrcInstanceStartTime, TgtInstanceStartTime} ->
+ ?LOG_INFO("recording a checkpoint for ~s -> ~s at source update_seq ~p",
+ [dbname(Source), dbname(Target), NewSeqNum]),
+ EndTime = ?l2b(httpd_util:rfc1123_date()),
+ StartTime = ?l2b(ReplicationStartTime),
+ DocsRead = ets:lookup_element(Stats, docs_read, 2),
+ DocsWritten = ets:lookup_element(Stats, docs_written, 2),
+ DocWriteFailures = ets:lookup_element(Stats, doc_write_failures, 2),
+ NewHistoryEntry = {[
+ {<<"session_id">>, SessionId},
+ {<<"start_time">>, StartTime},
+ {<<"end_time">>, EndTime},
+ {<<"start_last_seq">>, StartSeqNum},
+ {<<"end_last_seq">>, NewSeqNum},
+ {<<"recorded_seq">>, NewSeqNum},
+ {<<"missing_checked">>, ets:lookup_element(Stats, total_revs, 2)},
+ {<<"missing_found">>, ets:lookup_element(Stats, missing_revs, 2)},
+ {<<"docs_read">>, DocsRead},
+ {<<"docs_written">>, DocsWritten},
+ {<<"doc_write_failures">>, DocWriteFailures}
+ ]},
+ BaseHistory = [
+ {<<"session_id">>, SessionId},
+ {<<"source_last_seq">>, NewSeqNum},
+ {<<"replication_id_version">>, ?REP_ID_VERSION}
+ ] ++ case couch_util:get_value(<<"doc_ids">>, RepDoc) of
+ undefined ->
+ [];
+ DocIds when is_list(DocIds) ->
+ % backwards compatibility with the result of a replication by
+ % doc IDs in versions 0.11.x and 1.0.x
+ [
+ {<<"start_time">>, StartTime},
+ {<<"end_time">>, EndTime},
+ {<<"docs_read">>, DocsRead},
+ {<<"docs_written">>, DocsWritten},
+ {<<"doc_write_failures">>, DocWriteFailures}
+ ]
+ end,
+ % limit history to 50 entries
+ NewRepHistory = {
+ BaseHistory ++
+ [{<<"history">>, lists:sublist([NewHistoryEntry | OldHistory], 50)}]
+ },
+
+ try
+ {SrcRevPos,SrcRevId} =
+ update_local_doc(Source, SourceLog#doc{body=NewRepHistory}),
+ {TgtRevPos,TgtRevId} =
+ update_local_doc(Target, TargetLog#doc{body=NewRepHistory}),
+ State#state{
+ checkpoint_scheduled = nil,
+ checkpoint_history = NewRepHistory,
+ source_log = SourceLog#doc{revs={SrcRevPos, [SrcRevId]}},
+ target_log = TargetLog#doc{revs={TgtRevPos, [TgtRevId]}}
+ }
+ catch throw:conflict ->
+ ?LOG_ERROR("checkpoint failure: conflict (are you replicating to "
+ "yourself?)", []),
+ State
+ end;
+ _Else ->
+ ?LOG_INFO("rebooting ~s -> ~s from last known replication checkpoint",
+ [dbname(Source), dbname(Target)]),
+ #state{
+ changes_feed = CF,
+ missing_revs = MR,
+ reader = Reader,
+ writer = Writer
+ } = State,
+ Pids = [Writer, Reader, MR, CF],
+ [unlink(Pid) || Pid <- Pids],
+ [exit(Pid, shutdown) || Pid <- Pids],
+ close_db(Target),
+ close_db(Source),
+ {ok, NewState} = init(State#state.init_args),
+ NewState#state{listeners=State#state.listeners}
+ end.
+
+commit_to_both(Source, Target, RequiredSeq) ->
+ % commit the src async
+ ParentPid = self(),
+ SrcCommitPid = spawn_link(fun() ->
+ ParentPid ! {self(), ensure_full_commit(Source, RequiredSeq)} end),
+
+ % commit tgt sync
+ TargetStartTime = ensure_full_commit(Target),
+
+ SourceStartTime =
+ receive
+ {SrcCommitPid, Timestamp} ->
+ Timestamp;
+ {'EXIT', SrcCommitPid, {http_request_failed, _}} ->
+ exit(replication_link_failure)
+ end,
+ {SourceStartTime, TargetStartTime}.
+
+ensure_full_commit(#http_db{headers = Headers} = Target) ->
+ Headers1 = [
+ {"Content-Length", 0} |
+ couch_util:proplist_apply_field(
+ {"Content-Type", "application/json"}, Headers)
+ ],
+ Req = Target#http_db{
+ resource = "_ensure_full_commit",
+ method = post,
+ headers = Headers1
+ },
+ {ResultProps} = couch_rep_httpc:request(Req),
+ true = couch_util:get_value(<<"ok">>, ResultProps),
+ couch_util:get_value(<<"instance_start_time">>, ResultProps);
+ensure_full_commit(Target) ->
+ {ok, NewDb} = couch_db:open_int(Target#db.name, []),
+ UpdateSeq = couch_db:get_update_seq(Target),
+ CommitSeq = couch_db:get_committed_update_seq(NewDb),
+ InstanceStartTime = NewDb#db.instance_start_time,
+ couch_db:close(NewDb),
+ if UpdateSeq > CommitSeq ->
+ ?LOG_DEBUG("target needs a full commit: update ~p commit ~p",
+ [UpdateSeq, CommitSeq]),
+ {ok, DbStartTime} = couch_db:ensure_full_commit(Target),
+ DbStartTime;
+ true ->
+ ?LOG_DEBUG("target doesn't need a full commit", []),
+ InstanceStartTime
+ end.
+
+ensure_full_commit(#http_db{headers = Headers} = Source, RequiredSeq) ->
+ Headers1 = [
+ {"Content-Length", 0} |
+ couch_util:proplist_apply_field(
+ {"Content-Type", "application/json"}, Headers)
+ ],
+ Req = Source#http_db{
+ resource = "_ensure_full_commit",
+ method = post,
+ qs = [{seq, RequiredSeq}],
+ headers = Headers1
+ },
+ {ResultProps} = couch_rep_httpc:request(Req),
+ case couch_util:get_value(<<"ok">>, ResultProps) of
+ true ->
+ couch_util:get_value(<<"instance_start_time">>, ResultProps);
+ undefined -> nil end;
+ensure_full_commit(Source, RequiredSeq) ->
+ {ok, NewDb} = couch_db:open_int(Source#db.name, []),
+ CommitSeq = couch_db:get_committed_update_seq(NewDb),
+ InstanceStartTime = NewDb#db.instance_start_time,
+ couch_db:close(NewDb),
+ if RequiredSeq > CommitSeq ->
+ ?LOG_DEBUG("source needs a full commit: required ~p committed ~p",
+ [RequiredSeq, CommitSeq]),
+ {ok, DbStartTime} = couch_db:ensure_full_commit(Source),
+ DbStartTime;
+ true ->
+ ?LOG_DEBUG("source doesn't need a full commit", []),
+ InstanceStartTime
+ end.
+
+update_local_doc(#http_db{} = Db, Doc) ->
+ Req = Db#http_db{
+ resource = couch_util:encode_doc_id(Doc),
+ method = put,
+ body = couch_doc:to_json_obj(Doc, [attachments]),
+ headers = [{"x-couch-full-commit", "false"} | Db#http_db.headers]
+ },
+ {ResponseMembers} = couch_rep_httpc:request(Req),
+ Rev = couch_util:get_value(<<"rev">>, ResponseMembers),
+ couch_doc:parse_rev(Rev);
+update_local_doc(Db, Doc) ->
+ {ok, Result} = couch_db:update_doc(Db, Doc, [delay_commit]),
+ Result.
+
+up_to_date(#http_db{}, _Seq) ->
+ true;
+up_to_date(Source, Seq) ->
+ {ok, NewDb} = couch_db:open_int(Source#db.name, []),
+ T = NewDb#db.update_seq == Seq,
+ couch_db:close(NewDb),
+ T.
+
+parse_proxy_params(ProxyUrl) when is_binary(ProxyUrl) ->
+ parse_proxy_params(?b2l(ProxyUrl));
+parse_proxy_params([]) ->
+ [];
+parse_proxy_params(ProxyUrl) ->
+ #url{
+ host = Host,
+ port = Port,
+ username = User,
+ password = Passwd
+ } = ibrowse_lib:parse_url(ProxyUrl),
+ [{proxy_host, Host}, {proxy_port, Port}] ++
+ case is_list(User) andalso is_list(Passwd) of
+ false ->
+ [];
+ true ->
+ [{proxy_user, User}, {proxy_password, Passwd}]
+ end.
+
+update_rep_doc({Props} = _RepDoc, KVs) ->
+ case couch_util:get_value(<<"_id">>, Props) of
+ undefined ->
+ % replication triggered by POSTing to _replicate/
+ ok;
+ RepDocId ->
+ % replication triggered by adding a Rep Doc to the replicator DB
+ {ok, RepDb} = ensure_rep_db_exists(),
+ case couch_db:open_doc(RepDb, RepDocId, []) of
+ {ok, LatestRepDoc} ->
+ update_rep_doc(RepDb, LatestRepDoc, KVs);
+ _ ->
+ ok
+ end,
+ couch_db:close(RepDb)
+ end.
+
+update_rep_doc(RepDb, #doc{body = {RepDocBody}} = RepDoc, KVs) ->
+ NewRepDocBody = lists:foldl(
+ fun({<<"_replication_state">> = K, _V} = KV, Body) ->
+ Body1 = lists:keystore(K, 1, Body, KV),
+ {Mega, Secs, _} = erlang:now(),
+ UnixTime = Mega * 1000000 + Secs,
+ lists:keystore(
+ <<"_replication_state_time">>, 1,
+ Body1, {<<"_replication_state_time">>, UnixTime});
+ ({K, _V} = KV, Body) ->
+ lists:keystore(K, 1, Body, KV)
+ end,
+ RepDocBody,
+ KVs
+ ),
+ % might not succeed - when the replication doc is deleted right
+ % before this update (not an error)
+ couch_db:update_doc(
+ RepDb,
+ RepDoc#doc{body = {NewRepDocBody}},
+ []
+ ).
+
+maybe_set_triggered({RepProps} = RepDoc, RepId) ->
+ case couch_util:get_value(<<"_replication_state">>, RepProps) of
+ <<"triggered">> ->
+ ok;
+ _ ->
+ update_rep_doc(
+ RepDoc,
+ [
+ {<<"_replication_state">>, <<"triggered">>},
+ {<<"_replication_id">>, ?l2b(RepId)}
+ ]
+ )
+ end.
+
+ensure_rep_db_exists() ->
+ DbName = ?l2b(couch_config:get("replicator", "db", "_replicator")),
+ Opts = [
+ {user_ctx, #user_ctx{roles=[<<"_admin">>, <<"_replicator">>]}},
+ sys_db
+ ],
+ case couch_db:open(DbName, Opts) of
+ {ok, Db} ->
+ Db;
+ _Error ->
+ {ok, Db} = couch_db:create(DbName, Opts)
+ end,
+ ok = ensure_rep_ddoc_exists(Db, <<"_design/_replicator">>),
+ {ok, Db}.
+
+ensure_rep_ddoc_exists(RepDb, DDocID) ->
+ case couch_db:open_doc(RepDb, DDocID, []) of
+ {ok, _Doc} ->
+ ok;
+ _ ->
+ DDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, DDocID},
+ {<<"language">>, <<"javascript">>},
+ {<<"validate_doc_update">>, ?REP_DB_DOC_VALIDATE_FUN}
+ ]}),
+ {ok, _Rev} = couch_db:update_doc(RepDb, DDoc, [])
+ end,
+ ok.
+
+source_db_update_notifier(#db{name = DbName}) ->
+ Server = self(),
+ {ok, Notifier} = couch_db_update_notifier:start_link(
+ fun({compacted, DbName1}) when DbName1 =:= DbName ->
+ ok = gen_server:cast(Server, reopen_source_db);
+ (_) ->
+ ok
+ end),
+ Notifier;
+source_db_update_notifier(_) ->
+ nil.
+
+target_db_update_notifier(#db{name = DbName}) ->
+ Server = self(),
+ {ok, Notifier} = couch_db_update_notifier:start_link(
+ fun({compacted, DbName1}) when DbName1 =:= DbName ->
+ ok = gen_server:cast(Server, reopen_target_db);
+ (_) ->
+ ok
+ end),
+ Notifier;
+target_db_update_notifier(_) ->
+ nil.
diff --git a/1.1.x/src/couchdb/couch_rep_att.erl b/1.1.x/src/couchdb/couch_rep_att.erl
new file mode 100644
index 00000000..6bb993a8
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_rep_att.erl
@@ -0,0 +1,119 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rep_att).
+
+-export([convert_stub/2, cleanup/0]).
+
+-include("couch_db.hrl").
+
+convert_stub(#att{data=stub, name=Name} = Attachment,
+ {#http_db{} = Db, Id, Rev}) ->
+ {Pos, [RevId|_]} = Rev,
+ Request = Db#http_db{
+ resource = lists:flatten([couch_util:url_encode(Id), "/",
+ couch_util:url_encode(Name)]),
+ qs = [{rev, couch_doc:rev_to_str({Pos,RevId})}]
+ },
+ Ref = make_ref(),
+ RcvFun = fun() -> attachment_receiver(Ref, Request) end,
+ Attachment#att{data=RcvFun}.
+
+cleanup() ->
+ receive
+ {ibrowse_async_response, _, _} ->
+ %% TODO maybe log, didn't expect to have data here
+ cleanup();
+ {ibrowse_async_response_end, _} ->
+ cleanup();
+ {ibrowse_async_headers, _, _, _} ->
+ cleanup()
+ after 0 ->
+ erase(),
+ ok
+ end.
+
+% internal funs
+
+attachment_receiver(Ref, Request) ->
+ try case get(Ref) of
+ undefined ->
+ {ReqId, ContentEncoding} = start_http_request(Request),
+ put(Ref, {ReqId, ContentEncoding}),
+ receive_data(Ref, ReqId, ContentEncoding);
+ {ReqId, ContentEncoding} ->
+ receive_data(Ref, ReqId, ContentEncoding)
+ end
+ catch
+ throw:{attachment_request_failed, _} ->
+ case {Request#http_db.retries, Request#http_db.pause} of
+ {0, _} ->
+ ?LOG_INFO("request for ~p failed", [Request#http_db.resource]),
+ throw({attachment_request_failed, max_retries_reached});
+ {N, Pause} when N > 0 ->
+ ?LOG_INFO("request for ~p timed out, retrying in ~p seconds",
+ [Request#http_db.resource, Pause/1000]),
+ timer:sleep(Pause),
+ cleanup(),
+ attachment_receiver(Ref, Request#http_db{retries = N-1})
+ end
+ end.
+
+receive_data(Ref, ReqId, ContentEncoding) ->
+ receive
+ {ibrowse_async_response, ReqId, {chunk_start,_}} ->
+ receive_data(Ref, ReqId, ContentEncoding);
+ {ibrowse_async_response, ReqId, chunk_end} ->
+ receive_data(Ref, ReqId, ContentEncoding);
+ {ibrowse_async_response, ReqId, {error, Err}} ->
+ ?LOG_ERROR("streaming attachment ~p failed with ~p", [ReqId, Err]),
+ throw({attachment_request_failed, Err});
+ {ibrowse_async_response, ReqId, Data} ->
+ % ?LOG_DEBUG("got ~p bytes for ~p", [size(Data), ReqId]),
+ Data;
+ {ibrowse_async_response_end, ReqId} ->
+ ?LOG_ERROR("streaming att. ended but more data requested ~p", [ReqId]),
+ throw({attachment_request_failed, premature_end})
+ after 31000 ->
+ throw({attachment_request_failed, timeout})
+ end.
+
+start_http_request(Req) ->
+ %% set stream_to here because self() has changed
+ Req2 = Req#http_db{options = [{stream_to,self()} | Req#http_db.options]},
+ {ibrowse_req_id, ReqId} = couch_rep_httpc:request(Req2),
+ receive {ibrowse_async_headers, ReqId, Code, Headers} ->
+ case validate_headers(Req2, list_to_integer(Code), Headers) of
+ {ok, ContentEncoding} ->
+ {ReqId, ContentEncoding};
+ {ok, ContentEncoding, NewReqId} ->
+ {NewReqId, ContentEncoding}
+ end
+ after 10000 ->
+ throw({attachment_request_failed, timeout})
+ end.
+
+validate_headers(_Req, 200, Headers) ->
+ MochiHeaders = mochiweb_headers:make(Headers),
+ {ok, mochiweb_headers:get_value("Content-Encoding", MochiHeaders)};
+validate_headers(Req, Code, Headers) when Code > 299, Code < 400 ->
+ NewReq = couch_rep_httpc:redirected_request(Code, Headers, Req),
+ {ibrowse_req_id, ReqId} = couch_rep_httpc:request(NewReq),
+ receive {ibrowse_async_headers, ReqId, NewCode, NewHeaders} ->
+ {ok, Encoding} = validate_headers(NewReq, list_to_integer(NewCode),
+ NewHeaders)
+ end,
+ {ok, Encoding, ReqId};
+validate_headers(Req, Code, _Headers) ->
+ #http_db{url=Url, resource=Resource} = Req,
+ ?LOG_ERROR("got ~p for ~s~s", [Code, Url, Resource]),
+ throw({attachment_request_failed, {bad_code, Code}}).
diff --git a/1.1.x/src/couchdb/couch_rep_changes_feed.erl b/1.1.x/src/couchdb/couch_rep_changes_feed.erl
new file mode 100644
index 00000000..1c298937
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_rep_changes_feed.erl
@@ -0,0 +1,503 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rep_changes_feed).
+-behaviour(gen_server).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([start_link/4, next/1, stop/1]).
+
+-define(BUFFER_SIZE, 1000).
+-define(DOC_IDS_FILTER_NAME, "_doc_ids").
+
+-include("couch_db.hrl").
+-include("../ibrowse/ibrowse.hrl").
+
+-record (state, {
+ changes_from = nil,
+ changes_loop = nil,
+ init_args,
+ last_seq,
+ conn = nil,
+ reqid = nil,
+ complete = false,
+ count = 0,
+ partial_chunk = <<>>,
+ reply_to = nil,
+ rows = queue:new(),
+ doc_ids = nil
+}).
+
+-import(couch_util, [
+ get_value/2,
+ get_value/3
+]).
+
+start_link(Parent, Source, StartSeq, PostProps) ->
+ gen_server:start_link(?MODULE, [Parent, Source, StartSeq, PostProps], []).
+
+next(Server) ->
+ gen_server:call(Server, next_changes, infinity).
+
+stop(Server) ->
+ catch gen_server:call(Server, stop),
+ ok.
+
+init([Parent, #http_db{headers = Headers0} = Source, Since, PostProps]) ->
+ process_flag(trap_exit, true),
+ Feed = case get_value(<<"continuous">>, PostProps, false) of
+ false ->
+ normal;
+ true ->
+ continuous
+ end,
+ BaseQS = [
+ {"style", all_docs},
+ {"heartbeat", 10000},
+ {"since", Since},
+ {"feed", Feed}
+ ],
+ {QS, Method, Body, Headers} = case get_value(<<"doc_ids">>, PostProps) of
+ undefined ->
+ {maybe_add_filter_qs_params(PostProps, BaseQS), get, nil, Headers0};
+ DocIds when is_list(DocIds) ->
+ Headers1 = [{"Content-Type", "application/json"} | Headers0],
+ QS1 = [{"filter", ?l2b(?DOC_IDS_FILTER_NAME)} | BaseQS],
+ {QS1, post, {[{<<"doc_ids">>, DocIds}]}, Headers1}
+ end,
+ Pid = couch_rep_httpc:spawn_link_worker_process(Source),
+ Req = Source#http_db{
+ method = Method,
+ body = Body,
+ resource = "_changes",
+ qs = QS,
+ conn = Pid,
+ options = [{stream_to, {self(), once}}] ++
+ lists:keydelete(inactivity_timeout, 1, Source#http_db.options),
+ headers = Headers -- [{"Accept-Encoding", "gzip"}]
+ },
+ {ibrowse_req_id, ReqId} = couch_rep_httpc:request(Req),
+ Args = [Parent, Req, Since, PostProps],
+ State = #state{
+ conn = Pid,
+ last_seq = Since,
+ reqid = ReqId,
+ init_args = Args,
+ doc_ids = get_value(<<"doc_ids">>, PostProps, nil)
+ },
+
+ receive
+ {ibrowse_async_headers, ReqId, "200", _} ->
+ ibrowse:stream_next(ReqId),
+ {ok, State};
+ {ibrowse_async_headers, ReqId, Code, Hdrs}
+ when Code =:= "301"; Code =:= "302"; Code =:= "303" ->
+ {ReqId2, Req2} = redirect_req(Req, Code, Hdrs),
+ receive
+ {ibrowse_async_headers, ReqId2, "200", _} ->
+ {ok, State#state{
+ conn = Req2#http_db.conn,
+ reqid = ReqId2,
+ init_args = [Parent, Req2, Since, PostProps]}};
+ {ibrowse_async_headers, ReqId2, "405", _} when Method =:= post ->
+ {ReqId3, Req3} = req_no_builtin_doc_ids(Req2, ReqId2),
+ receive
+ {ibrowse_async_headers, ReqId3, "200", _} ->
+ {ok, State#state{
+ conn = Req3#http_db.conn,
+ reqid = ReqId3,
+ init_args = [Parent, Req3, Since, PostProps]}}
+ after 30000 ->
+ {stop, changes_timeout}
+ end
+ after 30000 ->
+ {stop, changes_timeout}
+ end;
+ {ibrowse_async_headers, ReqId, "404", _} ->
+ stop_link_worker(Pid),
+ ?LOG_INFO("source doesn't have _changes, trying _all_docs_by_seq", []),
+ Self = self(),
+ BySeqPid = spawn_link(fun() -> by_seq_loop(Self, Source, Since) end),
+ {ok, State#state{changes_loop = BySeqPid}};
+ {ibrowse_async_headers, ReqId, "405", _} when Method =:= post ->
+ {ReqId2, Req2} = req_no_builtin_doc_ids(Req, ReqId),
+ receive
+ {ibrowse_async_headers, ReqId2, "200", _} ->
+ {ok, State#state{
+ conn = Req2#http_db.conn,
+ reqid = ReqId2,
+ init_args = [Parent, Req2, Since, PostProps]}};
+ {ibrowse_async_headers, ReqId, Code, Hdrs}
+ when Code =:= "301"; Code =:= "302"; Code =:= "303" ->
+ {ReqId3, Req3} = redirect_req(Req2, Code, Hdrs),
+ receive
+ {ibrowse_async_headers, ReqId3, "200", _} ->
+ {ok, State#state{
+ conn = Req3#http_db.conn,
+ reqid = ReqId3,
+ init_args = [Parent, Req3, Since, PostProps]}}
+ after 30000 ->
+ {stop, changes_timeout}
+ end
+ after 30000 ->
+ {stop, changes_timeout}
+ end;
+ {ibrowse_async_headers, ReqId, Code, _} ->
+ {stop, {changes_error_code, list_to_integer(Code)}}
+ after 10000 ->
+ {stop, changes_timeout}
+ end;
+
+init([_Parent, Source, Since, PostProps] = InitArgs) ->
+ process_flag(trap_exit, true),
+ Server = self(),
+ Filter = case get_value(<<"doc_ids">>, PostProps) of
+ undefined ->
+ ?b2l(get_value(<<"filter">>, PostProps, <<>>));
+ DocIds when is_list(DocIds) ->
+ ?DOC_IDS_FILTER_NAME
+ end,
+ ChangesArgs = #changes_args{
+ style = all_docs,
+ since = Since,
+ filter = Filter,
+ feed = case get_value(<<"continuous">>, PostProps, false) of
+ true ->
+ "continuous";
+ false ->
+ "normal"
+ end,
+ timeout = infinity
+ },
+ ChangesPid = spawn_link(fun() ->
+ ChangesFeedFun = couch_changes:handle_changes(
+ ChangesArgs,
+ {json_req, filter_json_req(Filter, Source, PostProps)},
+ Source
+ ),
+ ChangesFeedFun(fun({change, Change, _}, _) ->
+ gen_server:call(Server, {add_change, Change}, infinity);
+ (_, _) ->
+ ok
+ end)
+ end),
+ {ok, #state{changes_loop=ChangesPid, init_args=InitArgs}}.
+
+maybe_add_filter_qs_params(PostProps, BaseQS) ->
+ case get_value(<<"filter">>, PostProps) of
+ undefined ->
+ BaseQS;
+ FilterName ->
+ {Params} = get_value(<<"query_params">>, PostProps, {[]}),
+ lists:foldr(
+ fun({K, V}, QSAcc) ->
+ Ks = couch_util:to_list(K),
+ case proplists:is_defined(Ks, QSAcc) of
+ true ->
+ QSAcc;
+ false ->
+ [{Ks, V} | QSAcc]
+ end
+ end,
+ [{"filter", FilterName} | BaseQS],
+ Params
+ )
+ end.
+
+filter_json_req([], _Db, _PostProps) ->
+ {[]};
+filter_json_req(?DOC_IDS_FILTER_NAME, _Db, PostProps) ->
+ {[{<<"doc_ids">>, get_value(<<"doc_ids">>, PostProps)}]};
+filter_json_req(FilterName, Db, PostProps) ->
+ {Query} = get_value(<<"query_params">>, PostProps, {[]}),
+ {ok, Info} = couch_db:get_db_info(Db),
+ % simulate a request to db_name/_changes
+ {[
+ {<<"info">>, {Info}},
+ {<<"id">>, null},
+ {<<"method">>, 'GET'},
+ {<<"path">>, [couch_db:name(Db), <<"_changes">>]},
+ {<<"query">>, {[{<<"filter">>, FilterName} | Query]}},
+ {<<"headers">>, []},
+ {<<"body">>, []},
+ {<<"peer">>, <<"replicator">>},
+ {<<"form">>, []},
+ {<<"cookie">>, []},
+ {<<"userCtx">>, couch_util:json_user_ctx(Db)}
+ ]}.
+
+handle_call({add_change, Row}, From, State) ->
+ handle_add_change(Row, From, State);
+
+handle_call(next_changes, From, State) ->
+ handle_next_changes(From, State);
+
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info({ibrowse_async_headers, Id, Code, Hdrs}, #state{reqid=Id}=State) ->
+ handle_headers(list_to_integer(Code), Hdrs, State);
+
+handle_info({ibrowse_async_response, Id, {error, sel_conn_closed}},
+ #state{reqid=Id}=State) ->
+ handle_retry(State);
+
+handle_info({ibrowse_async_response, Id, {error, connection_closed}},
+ #state{reqid=Id}=State) ->
+ handle_retry(State);
+
+handle_info({ibrowse_async_response, Id, {error,E}}, #state{reqid=Id}=State) ->
+ {stop, {error, E}, State};
+
+handle_info({ibrowse_async_response, Id, Chunk}, #state{reqid=Id}=State) ->
+ Messages = [M || M <- re:split(Chunk, ",?\n", [trim]), M =/= <<>>],
+ handle_messages(Messages, State);
+
+handle_info({ibrowse_async_response_end, Id}, #state{reqid=Id} = State) ->
+ handle_feed_completion(State);
+
+handle_info({'EXIT', From, normal}, #state{changes_loop=From} = State) ->
+ handle_feed_completion(State);
+
+handle_info({'EXIT', From, normal}, #state{conn=From, complete=true} = State) ->
+ {noreply, State};
+
+handle_info({'EXIT', From, Reason}, #state{changes_loop=From} = State) ->
+ ?LOG_ERROR("changes_loop died with reason ~p", [Reason]),
+ {stop, changes_loop_died, State};
+
+handle_info({'EXIT', From, Reason}, State) ->
+ ?LOG_ERROR("changes loop, process ~p died with reason ~p", [From, Reason]),
+ {stop, {From, Reason}, State};
+
+handle_info(Msg, #state{init_args = InitArgs} = State) ->
+ case Msg of
+ changes_timeout ->
+ [_, #http_db{url = Url} | _] = InitArgs,
+ ?LOG_ERROR("changes loop timeout, no data received from ~s",
+ [couch_util:url_strip_password(Url)]);
+ _ ->
+ ?LOG_ERROR("changes loop received unexpected message ~p", [Msg])
+ end,
+ {stop, Msg, State}.
+
+terminate(_Reason, State) ->
+ #state{
+ changes_loop = ChangesPid,
+ conn = Conn
+ } = State,
+ if is_pid(ChangesPid) -> exit(ChangesPid, stop); true -> ok end,
+ stop_link_worker(Conn).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%internal funs
+
+handle_add_change(Row, From, #state{reply_to=nil} = State) ->
+ {Rows2, Count2} = queue_changes_row(Row, State),
+ NewState = State#state{count = Count2, rows = Rows2},
+ if Count2 =< ?BUFFER_SIZE ->
+ {reply, ok, NewState};
+ true ->
+ {noreply, NewState#state{changes_from=From}}
+ end;
+handle_add_change(Row, _From, #state{count=0} = State) ->
+ gen_server:reply(State#state.reply_to, [Row]),
+ {reply, ok, State#state{reply_to=nil}}.
+
+handle_next_changes(From, #state{count=0}=State) ->
+ if State#state.complete ->
+ {stop, normal, complete, State};
+ true ->
+ {noreply, State#state{reply_to=From}}
+ end;
+handle_next_changes(_From, State) ->
+ #state{
+ changes_from = ChangesFrom,
+ rows = Rows
+ } = State,
+ NewState = State#state{count=0, changes_from=nil, rows=queue:new()},
+ maybe_stream_next(NewState),
+ if ChangesFrom =/= nil -> gen_server:reply(ChangesFrom, ok); true -> ok end,
+ {reply, queue:to_list(Rows), NewState}.
+
+handle_headers(200, _, State) ->
+ maybe_stream_next(State),
+ {noreply, State};
+handle_headers(Code, Hdrs, #state{init_args = InitArgs} = State)
+ when Code =:= 301 ; Code =:= 302 ; Code =:= 303 ->
+ stop_link_worker(State#state.conn),
+ [Parent, Source, Since, PostProps] = InitArgs,
+ Source2 = couch_rep_httpc:redirected_request(Code, Hdrs, Source),
+ Pid2 = couch_rep_httpc:spawn_link_worker_process(Source2),
+ Source3 = Source2#http_db{conn = Pid2},
+ {ibrowse_req_id, ReqId} = couch_rep_httpc:request(Source3),
+ InitArgs2 = [Parent, Source3, Since, PostProps],
+ {noreply, State#state{conn=Pid2, reqid=ReqId, init_args=InitArgs2}};
+handle_headers(Code, Hdrs, State) ->
+ ?LOG_ERROR("replicator changes feed failed with code ~s and Headers ~n~p",
+ [Code,Hdrs]),
+ {stop, {error, Code}, State}.
+
+handle_messages([], State) ->
+ maybe_stream_next(State),
+ {noreply, State};
+handle_messages([<<"{\"results\":[">>|Rest], State) ->
+ handle_messages(Rest, State);
+handle_messages([<<"]">>, <<"\"last_seq\":", _/binary>>], State) ->
+ handle_feed_completion(State);
+handle_messages([<<"{\"last_seq\":", _/binary>>], State) ->
+ handle_feed_completion(State);
+handle_messages([Chunk|Rest], #state{partial_chunk = Partial} = State) ->
+ NewState = try
+ Row = {Props} = decode_row(<<Partial/binary, Chunk/binary>>),
+ case State of
+ #state{reply_to=nil} ->
+ {Rows2, Count2} = queue_changes_row(Row, State),
+ State#state{
+ last_seq = couch_util:get_value(<<"seq">>, Props),
+ partial_chunk = <<>>,
+ rows = Rows2,
+ count = Count2
+ };
+ #state{count=0, reply_to=From}->
+ gen_server:reply(From, [Row]),
+ State#state{reply_to = nil, partial_chunk = <<>>}
+ end
+ catch
+ throw:{invalid_json, Bad} ->
+ State#state{partial_chunk = Bad}
+ end,
+ handle_messages(Rest, NewState).
+
+handle_feed_completion(#state{reply_to=nil} = State)->
+ {noreply, State#state{complete=true}};
+handle_feed_completion(#state{count=0} = State) ->
+ gen_server:reply(State#state.reply_to, complete),
+ {stop, normal, State}.
+
+handle_retry(State) ->
+ ?LOG_DEBUG("retrying changes feed because our connection closed", []),
+ #state{
+ count = Count,
+ init_args = [_, Source, _, PostProps],
+ last_seq = Since,
+ reply_to = ReplyTo,
+ rows = Rows
+ } = State,
+ case init([nil, Source, Since, PostProps]) of
+ {ok, State1} ->
+ MergedState = State1#state{
+ count = Count,
+ reply_to = ReplyTo,
+ rows = Rows
+ },
+ {noreply, MergedState};
+ _ ->
+ {stop, {error, connection_closed}, State}
+ end.
+
+by_seq_loop(Server, Source, StartSeq) ->
+ Req = Source#http_db{
+ resource = "_all_docs_by_seq",
+ qs = [{limit, 1000}, {startkey, StartSeq}]
+ },
+ {Results} = couch_rep_httpc:request(Req),
+ Rows = couch_util:get_value(<<"rows">>, Results),
+ if Rows =:= [] -> exit(normal); true -> ok end,
+ EndSeq = lists:foldl(fun({RowInfoList}, _) ->
+ Id = couch_util:get_value(<<"id">>, RowInfoList),
+ Seq = couch_util:get_value(<<"key">>, RowInfoList),
+ {RowProps} = couch_util:get_value(<<"value">>, RowInfoList),
+ RawRevs = [
+ couch_util:get_value(<<"rev">>, RowProps),
+ couch_util:get_value(<<"conflicts">>, RowProps, []),
+ couch_util:get_value(<<"deleted_conflicts">>, RowProps, [])
+ ],
+ ParsedRevs = couch_doc:parse_revs(lists:flatten(RawRevs)),
+ Change = {[
+ {<<"seq">>, Seq},
+ {<<"id">>, Id},
+ {<<"changes">>, [{[{<<"rev">>,R}]} || R <- ParsedRevs]}
+ ]},
+ gen_server:call(Server, {add_change, Change}, infinity),
+ Seq
+ end, 0, Rows),
+ by_seq_loop(Server, Source, EndSeq).
+
+decode_row(<<",", Rest/binary>>) ->
+ decode_row(Rest);
+decode_row(Row) ->
+ ?JSON_DECODE(Row).
+
+maybe_stream_next(#state{reqid=nil}) ->
+ ok;
+maybe_stream_next(#state{complete=false, count=N} = S) when N < ?BUFFER_SIZE ->
+ timer:cancel(get(timeout)),
+ {ok, Timeout} = timer:send_after(31000, changes_timeout),
+ put(timeout, Timeout),
+ ibrowse:stream_next(S#state.reqid);
+maybe_stream_next(_) ->
+ timer:cancel(get(timeout)).
+
+stop_link_worker(Conn) when is_pid(Conn) ->
+ unlink(Conn),
+ receive {'EXIT', Conn, _} -> ok after 0 -> ok end,
+ catch ibrowse:stop_worker_process(Conn);
+stop_link_worker(_) ->
+ ok.
+
+redirect_req(#http_db{conn = WorkerPid} = Req, Code, Headers) ->
+ stop_link_worker(WorkerPid),
+ Req2 = couch_rep_httpc:redirected_request(Code, Headers, Req),
+ WorkerPid2 = couch_rep_httpc:spawn_link_worker_process(Req2),
+ Req3 = Req2#http_db{conn = WorkerPid2},
+ {ibrowse_req_id, ReqId} = couch_rep_httpc:request(Req3),
+ {ReqId, Req3}.
+
+req_no_builtin_doc_ids(#http_db{conn = WorkerPid, qs = QS} = Req, ReqId) ->
+ % CouchDB versions prior to 1.1.0 don't have the builtin filter _doc_ids
+ % and don't allow POSTing to /database/_changes
+ purge_req_messages(ReqId),
+ stop_link_worker(WorkerPid),
+ Req2 = Req#http_db{method = get, qs = lists:keydelete("filter", 1, QS)},
+ WorkerPid2 = couch_rep_httpc:spawn_link_worker_process(Req2),
+ Req3 = Req2#http_db{conn = WorkerPid2},
+ {ibrowse_req_id, ReqId2} = couch_rep_httpc:request(Req3),
+ {ReqId2, Req3}.
+
+purge_req_messages(ReqId) ->
+ ibrowse:stream_next(ReqId),
+ receive
+ {ibrowse_async_response, ReqId, {error, _}} ->
+ ok;
+ {ibrowse_async_response, ReqId, _Data} ->
+ purge_req_messages(ReqId);
+ {ibrowse_async_response_end, ReqId} ->
+ ok
+ end.
+
+queue_changes_row(Row, #state{doc_ids = nil, count = Count, rows = Rows}) ->
+ {queue:in(Row, Rows), Count + 1};
+queue_changes_row({RowProps} = Row,
+ #state{doc_ids = Ids, count = Count, rows = Rows}) ->
+ case lists:member(get_value(<<"id">>, RowProps), Ids) of
+ true ->
+ {queue:in(Row, Rows), Count + 1};
+ false ->
+ {Rows, Count}
+ end.
diff --git a/1.1.x/src/couchdb/couch_rep_httpc.erl b/1.1.x/src/couchdb/couch_rep_httpc.erl
new file mode 100644
index 00000000..bbe390a9
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_rep_httpc.erl
@@ -0,0 +1,317 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rep_httpc).
+-include("couch_db.hrl").
+-include("../ibrowse/ibrowse.hrl").
+
+-export([db_exists/1, db_exists/2]).
+-export([full_url/1, request/1, redirected_request/3]).
+-export([spawn_worker_process/1, spawn_link_worker_process/1]).
+-export([ssl_options/1]).
+
+request(#http_db{} = Req) ->
+ do_request(Req).
+
+do_request(#http_db{url=Url} = Req) when is_binary(Url) ->
+ do_request(Req#http_db{url = ?b2l(Url)});
+
+do_request(Req) ->
+ #http_db{
+ auth = Auth,
+ body = B,
+ conn = Conn,
+ headers = Headers0,
+ method = Method,
+ options = Opts,
+ qs = QS
+ } = Req,
+ Url = full_url(Req),
+ Headers = case couch_util:get_value(<<"oauth">>, Auth) of
+ undefined ->
+ Headers0;
+ {OAuthProps} ->
+ [oauth_header(Url, QS, Method, OAuthProps) | Headers0]
+ end,
+ Body = case B of
+ {Fun, InitialState} when is_function(Fun) ->
+ {Fun, InitialState};
+ nil ->
+ [];
+ _Else ->
+ iolist_to_binary(?JSON_ENCODE(B))
+ end,
+ Resp = case Conn of
+ nil ->
+ ibrowse:send_req(Url, Headers, Method, Body, Opts, infinity);
+ _ ->
+ ibrowse:send_req_direct(Conn, Url, Headers, Method, Body, Opts, infinity)
+ end,
+ process_response(Resp, Req).
+
+db_exists(Req) ->
+ db_exists(Req, Req#http_db.url).
+
+db_exists(Req, true) ->
+ db_exists(Req, Req#http_db.url, true);
+
+db_exists(Req, false) ->
+ db_exists(Req, Req#http_db.url, false);
+
+db_exists(Req, CanonicalUrl) ->
+ db_exists(Req, CanonicalUrl, false).
+
+db_exists(Req, CanonicalUrl, CreateDB) ->
+ #http_db{
+ auth = Auth,
+ headers = Headers0,
+ options = Options,
+ url = Url
+ } = Req,
+ HeadersFun = fun(Method) ->
+ case couch_util:get_value(<<"oauth">>, Auth) of
+ undefined ->
+ Headers0;
+ {OAuthProps} ->
+ [oauth_header(Url, [], Method, OAuthProps) | Headers0]
+ end
+ end,
+ case CreateDB of
+ true ->
+ Headers = [{"Content-Length", 0} | HeadersFun(put)],
+ catch ibrowse:send_req(Url, Headers, put, [], Options);
+ _Else -> ok
+ end,
+ case catch ibrowse:send_req(Url, HeadersFun(head), head, [], Options) of
+ {ok, "200", _, _} ->
+ config_http(CanonicalUrl),
+ Req#http_db{url = CanonicalUrl};
+ {ok, "301", RespHeaders, _} ->
+ RedirectUrl = redirect_url(RespHeaders, Req#http_db.url),
+ db_exists(Req#http_db{url = RedirectUrl}, RedirectUrl);
+ {ok, "302", RespHeaders, _} ->
+ RedirectUrl = redirect_url(RespHeaders, Req#http_db.url),
+ db_exists(Req#http_db{url = RedirectUrl}, CanonicalUrl);
+ {ok, "303", RespHeaders, _} ->
+ RedirectUrl = redirect_url(RespHeaders, Req#http_db.url),
+ db_exists(Req#http_db{method = get, url = RedirectUrl}, CanonicalUrl);
+ {ok, "401", _, _} ->
+ throw({unauthorized, ?l2b(Url)});
+ Error ->
+ ?LOG_DEBUG("DB at ~s could not be found because ~p", [Url, Error]),
+ throw({db_not_found, ?l2b(Url)})
+ end.
+
+config_http(Url) ->
+ #url{host = Host, port = Port} = ibrowse_lib:parse_url(Url),
+ ok = ibrowse:set_max_sessions(Host, Port, list_to_integer(
+ couch_config:get("replicator", "max_http_sessions", "20"))),
+ ok = ibrowse:set_max_pipeline_size(Host, Port, list_to_integer(
+ couch_config:get("replicator", "max_http_pipeline_size", "50"))),
+ ok = couch_config:register(
+ fun("replicator", "max_http_sessions", MaxSessions) ->
+ ibrowse:set_max_sessions(Host, Port, list_to_integer(MaxSessions));
+ ("replicator", "max_http_pipeline_size", PipeSize) ->
+ ibrowse:set_max_pipeline_size(Host, Port, list_to_integer(PipeSize))
+ end).
+
+redirect_url(RespHeaders, OrigUrl) ->
+ MochiHeaders = mochiweb_headers:make(RespHeaders),
+ RedUrl = mochiweb_headers:get_value("Location", MochiHeaders),
+ #url{
+ host = Host, host_type = HostType, port = Port,
+ path = Path, protocol = Proto
+ } = ibrowse_lib:parse_url(RedUrl),
+ #url{username = User, password = Passwd} = ibrowse_lib:parse_url(OrigUrl),
+ Creds = case is_list(User) andalso is_list(Passwd) of
+ true ->
+ User ++ ":" ++ Passwd ++ "@";
+ false ->
+ []
+ end,
+ HostPart = case HostType of
+ ipv6_address ->
+ "[" ++ Host ++ "]";
+ _ ->
+ Host
+ end,
+ atom_to_list(Proto) ++ "://" ++ Creds ++ HostPart ++ ":" ++
+ integer_to_list(Port) ++ Path.
+
+full_url(#http_db{url=Url} = Req) when is_binary(Url) ->
+ full_url(Req#http_db{url = ?b2l(Url)});
+
+full_url(#http_db{qs=[]} = Req) ->
+ Req#http_db.url ++ Req#http_db.resource;
+
+full_url(Req) ->
+ #http_db{
+ url = Url,
+ resource = Resource,
+ qs = QS
+ } = Req,
+ QStr = lists:map(fun({K,V}) -> io_lib:format("~s=~s",
+ [couch_util:to_list(K), couch_util:to_list(V)]) end, QS),
+ lists:flatten([Url, Resource, "?", string:join(QStr, "&")]).
+
+process_response({ok, Status, Headers, Body}, Req) ->
+ Code = list_to_integer(Status),
+ if Code =:= 200; Code =:= 201 ->
+ ?JSON_DECODE(maybe_decompress(Headers, Body));
+ Code =:= 301; Code =:= 302 ; Code =:= 303 ->
+ do_request(redirected_request(Code, Headers, Req));
+ Code =:= 409 ->
+ throw(conflict);
+ Code >= 400, Code < 500 ->
+ ?JSON_DECODE(maybe_decompress(Headers, Body));
+ Code =:= 500; Code =:= 502; Code =:= 503 ->
+ #http_db{pause = Pause, retries = Retries} = Req,
+ ?LOG_INFO("retrying couch_rep_httpc request in ~p seconds " ++
+ % "due to remote server error: ~s~s", [Pause/1000, Req#http_db.url,
+ "due to remote server error: ~p Body ~s", [Pause/1000, Code,
+ Body]),
+ timer:sleep(Pause),
+ do_request(Req#http_db{retries = Retries-1, pause = 2*Pause});
+ true ->
+ exit({http_request_failed, ?l2b(["unhandled response code ", Status])})
+ end;
+
+process_response({ibrowse_req_id, Id}, _Req) ->
+ {ibrowse_req_id, Id};
+
+process_response({error, _Reason}, #http_db{url=Url, retries=0}) ->
+ ?LOG_ERROR("couch_rep_httpc request failed after 10 retries: ~s", [Url]),
+ exit({http_request_failed, ?l2b(["failed to replicate ", Url])});
+process_response({error, Reason}, Req) ->
+ #http_db{
+ method = Method,
+ retries = Retries,
+ pause = Pause
+ } = Req,
+ ShortReason = case Reason of
+ sel_conn_closed ->
+ connection_closed;
+ {'EXIT', {noproc, _}} ->
+ noproc;
+ {'EXIT', {normal, _}} ->
+ normal;
+ Else ->
+ Else
+ end,
+ ?LOG_DEBUG("retrying couch_rep_httpc ~p request in ~p seconds due to " ++
+ "{error, ~p}", [Method, Pause/1000, ShortReason]),
+ timer:sleep(Pause),
+ if Reason == worker_is_dead ->
+ C = spawn_link_worker_process(Req),
+ do_request(Req#http_db{retries = Retries-1, pause = 2*Pause, conn=C});
+ true ->
+ do_request(Req#http_db{retries = Retries-1, pause = 2*Pause})
+ end.
+
+redirected_request(Code, Headers, Req) ->
+ RedirectUrl = redirect_url(Headers, Req#http_db.url),
+ {Base, QStr, _} = mochiweb_util:urlsplit_path(RedirectUrl),
+ QS = mochiweb_util:parse_qs(QStr),
+ ReqHeaders = case couch_util:get_value(<<"oauth">>, Req#http_db.auth) of
+ undefined ->
+ Req#http_db.headers;
+ _Else ->
+ lists:keydelete("Authorization", 1, Req#http_db.headers)
+ end,
+ Req#http_db{
+ method = case couch_util:to_integer(Code) of
+ 303 -> get;
+ _ -> Req#http_db.method
+ end,
+ url = Base,
+ resource = "",
+ qs = QS,
+ headers = ReqHeaders
+ }.
+
+spawn_worker_process(Req) ->
+ Url = ibrowse_lib:parse_url(Req#http_db.url),
+ {ok, Pid} = ibrowse_http_client:start(Url),
+ Pid.
+
+spawn_link_worker_process(Req) ->
+ {ok, Pid} = ibrowse:spawn_link_worker_process(Req#http_db.url),
+ Pid.
+
+maybe_decompress(Headers, Body) ->
+ MochiHeaders = mochiweb_headers:make(Headers),
+ case mochiweb_headers:get_value("Content-Encoding", MochiHeaders) of
+ "gzip" ->
+ zlib:gunzip(Body);
+ _ ->
+ Body
+ end.
+
+oauth_header(Url, QS, Action, Props) ->
+ % erlang-oauth doesn't like iolists
+ QSL = [{couch_util:to_list(K), ?b2l(?l2b(couch_util:to_list(V)))} ||
+ {K,V} <- QS],
+ ConsumerKey = ?b2l(couch_util:get_value(<<"consumer_key">>, Props)),
+ Token = ?b2l(couch_util:get_value(<<"token">>, Props)),
+ TokenSecret = ?b2l(couch_util:get_value(<<"token_secret">>, Props)),
+ ConsumerSecret = ?b2l(couch_util:get_value(<<"consumer_secret">>, Props)),
+ SignatureMethodStr = ?b2l(couch_util:get_value(<<"signature_method">>, Props, <<"HMAC-SHA1">>)),
+ SignatureMethodAtom = case SignatureMethodStr of
+ "PLAINTEXT" ->
+ plaintext;
+ "HMAC-SHA1" ->
+ hmac_sha1;
+ "RSA-SHA1" ->
+ rsa_sha1
+ end,
+ Consumer = {ConsumerKey, ConsumerSecret, SignatureMethodAtom},
+ Method = case Action of
+ get -> "GET";
+ post -> "POST";
+ put -> "PUT";
+ head -> "HEAD"
+ end,
+ Params = oauth:signed_params(Method, Url, QSL, Consumer, Token, TokenSecret)
+ -- QSL,
+ {"Authorization", "OAuth " ++ oauth_uri:params_to_header_string(Params)}.
+
+ssl_options(#http_db{url = Url}) ->
+ case ibrowse_lib:parse_url(Url) of
+ #url{protocol = https} ->
+ Depth = list_to_integer(
+ couch_config:get("replicator", "ssl_certificate_max_depth", "3")
+ ),
+ SslOpts = [{depth, Depth} |
+ case couch_config:get("replicator", "verify_ssl_certificates") of
+ "true" ->
+ ssl_verify_options(true);
+ _ ->
+ ssl_verify_options(false)
+ end],
+ [{is_ssl, true}, {ssl_options, SslOpts}];
+ #url{protocol = http} ->
+ []
+ end.
+
+ssl_verify_options(Value) ->
+ ssl_verify_options(Value, erlang:system_info(otp_release)).
+
+ssl_verify_options(true, OTPVersion) when OTPVersion >= "R14" ->
+ CAFile = couch_config:get("replicator", "ssl_trusted_certificates_file"),
+ [{verify, verify_peer}, {cacertfile, CAFile}];
+ssl_verify_options(false, OTPVersion) when OTPVersion >= "R14" ->
+ [{verify, verify_none}];
+ssl_verify_options(true, _OTPVersion) ->
+ CAFile = couch_config:get("replicator", "ssl_trusted_certificates_file"),
+ [{verify, 2}, {cacertfile, CAFile}];
+ssl_verify_options(false, _OTPVersion) ->
+ [{verify, 0}].
diff --git a/1.1.x/src/couchdb/couch_rep_missing_revs.erl b/1.1.x/src/couchdb/couch_rep_missing_revs.erl
new file mode 100644
index 00000000..9809ca5e
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_rep_missing_revs.erl
@@ -0,0 +1,198 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rep_missing_revs).
+-behaviour(gen_server).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([start_link/4, next/1, stop/1]).
+
+-define(BUFFER_SIZE, 1000).
+
+-include("couch_db.hrl").
+
+-record (state, {
+ changes_loop,
+ changes_from = nil,
+ parent,
+ complete = false,
+ count = 0,
+ reply_to = nil,
+ rows = queue:new(),
+ high_source_seq = 0,
+ high_missing_seq = 0,
+ high_committed_seq = 0
+}).
+
+start_link(Parent, Target, ChangesFeed, PostProps) ->
+ gen_server:start_link(?MODULE, [Parent, Target, ChangesFeed, PostProps], []).
+
+next(Server) ->
+ gen_server:call(Server, next_missing_revs, infinity).
+
+stop(Server) ->
+ gen_server:call(Server, stop).
+
+init([Parent, _Target, ChangesFeed, _PostProps]) ->
+ process_flag(trap_exit, true),
+ Self = self(),
+ Pid = spawn_link(fun() -> changes_loop(Self, ChangesFeed, Parent) end),
+ {ok, #state{changes_loop=Pid, parent=Parent}}.
+
+handle_call({add_missing_revs, {HighSeq, Revs}}, From, State) ->
+ State#state.parent ! {update_stats, missing_revs, length(Revs)},
+ handle_add_missing_revs(HighSeq, Revs, From, State);
+
+handle_call(next_missing_revs, From, State) ->
+ handle_next_missing_revs(From, State).
+
+handle_cast({update_committed_seq, N}, State) ->
+ if State#state.high_committed_seq < N ->
+ ?LOG_DEBUG("missing_revs updating committed seq to ~p", [N]);
+ true -> ok end,
+ {noreply, State#state{high_committed_seq=N}}.
+
+handle_info({'EXIT', Pid, Reason}, #state{changes_loop=Pid} = State) ->
+ handle_changes_loop_exit(Reason, State);
+
+handle_info(Msg, State) ->
+ ?LOG_INFO("unexpected message ~p", [Msg]),
+ {noreply, State}.
+
+terminate(_Reason, #state{changes_loop=Pid}) when is_pid(Pid) ->
+ exit(Pid, shutdown),
+ ok;
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%internal funs
+
+handle_add_missing_revs(HighSeq, [], _From, State) ->
+ NewState = State#state{high_source_seq=HighSeq},
+ maybe_checkpoint(NewState),
+ {reply, ok, NewState};
+handle_add_missing_revs(HighSeq, Revs, From, #state{reply_to=nil} = State) ->
+ #state{rows=Rows, count=Count} = State,
+ NewState = State#state{
+ rows = queue:join(Rows, queue:from_list(Revs)),
+ count = Count + length(Revs),
+ high_source_seq = HighSeq,
+ high_missing_seq = HighSeq
+ },
+ if NewState#state.count < ?BUFFER_SIZE ->
+ {reply, ok, NewState};
+ true ->
+ {noreply, NewState#state{changes_from=From}}
+ end;
+handle_add_missing_revs(HighSeq, Revs, _From, #state{count=0} = State) ->
+ gen_server:reply(State#state.reply_to, {HighSeq, Revs}),
+ NewState = State#state{
+ high_source_seq = HighSeq,
+ high_missing_seq = HighSeq,
+ reply_to = nil
+ },
+ {reply, ok, NewState}.
+
+handle_next_missing_revs(From, #state{count=0} = State) ->
+ if State#state.complete ->
+ {stop, normal, complete, State};
+ true ->
+ {noreply, State#state{reply_to=From}}
+ end;
+handle_next_missing_revs(_From, State) ->
+ #state{
+ changes_from = ChangesFrom,
+ high_missing_seq = HighSeq,
+ rows = Rows
+ } = State,
+ if ChangesFrom =/= nil -> gen_server:reply(ChangesFrom, ok); true -> ok end,
+ NewState = State#state{count=0, changes_from=nil, rows=queue:new()},
+ {reply, {HighSeq, queue:to_list(Rows)}, NewState}.
+
+handle_changes_loop_exit(normal, State) ->
+ if State#state.reply_to =/= nil ->
+ gen_server:reply(State#state.reply_to, complete),
+ {stop, normal, State};
+ true ->
+ {noreply, State#state{complete=true, changes_loop=nil}}
+ end;
+handle_changes_loop_exit(Reason, State) ->
+ {stop, Reason, State#state{changes_loop=nil}}.
+
+changes_loop(OurServer, SourceChangesServer, Parent) ->
+ case couch_rep_changes_feed:next(SourceChangesServer) of
+ complete ->
+ exit(normal);
+ Changes ->
+ {ok, Target} = gen_server:call(Parent, get_target_db, infinity),
+ MissingRevs = get_missing_revs(Target, Changes),
+ gen_server:call(OurServer, {add_missing_revs, MissingRevs}, infinity)
+ end,
+ changes_loop(OurServer, SourceChangesServer, Parent).
+
+get_missing_revs(#http_db{}=Target, Changes) ->
+ Transform = fun({Props}) ->
+ C = couch_util:get_value(<<"changes">>, Props),
+ Id = couch_util:get_value(<<"id">>, Props),
+ {Id, [R || {[{<<"rev">>, R}]} <- C]}
+ end,
+ IdRevsList = [Transform(Change) || Change <- Changes],
+ SeqDict = changes_dictionary(Changes),
+ {LastProps} = lists:last(Changes),
+ HighSeq = couch_util:get_value(<<"seq">>, LastProps),
+ Request = Target#http_db{
+ resource = "_missing_revs",
+ method = post,
+ body = {IdRevsList}
+ },
+ {Resp} = couch_rep_httpc:request(Request),
+ case couch_util:get_value(<<"missing_revs">>, Resp) of
+ {MissingRevs} ->
+ X = [{Id, dict:fetch(Id, SeqDict), couch_doc:parse_revs(RevStrs)} ||
+ {Id,RevStrs} <- MissingRevs],
+ {HighSeq, X};
+ _ ->
+ exit({target_error, couch_util:get_value(<<"error">>, Resp)})
+ end;
+
+get_missing_revs(Target, Changes) ->
+ Transform = fun({Props}) ->
+ C = couch_util:get_value(<<"changes">>, Props),
+ Id = couch_util:get_value(<<"id">>, Props),
+ {Id, [couch_doc:parse_rev(R) || {[{<<"rev">>, R}]} <- C]}
+ end,
+ IdRevsList = [Transform(Change) || Change <- Changes],
+ SeqDict = changes_dictionary(Changes),
+ {LastProps} = lists:last(Changes),
+ HighSeq = couch_util:get_value(<<"seq">>, LastProps),
+ {ok, Results} = couch_db:get_missing_revs(Target, IdRevsList),
+ {HighSeq, [{Id, dict:fetch(Id, SeqDict), Revs} || {Id, Revs, _} <- Results]}.
+
+changes_dictionary(ChangeList) ->
+ KVs = [{couch_util:get_value(<<"id">>,C), couch_util:get_value(<<"seq">>,C)}
+ || {C} <- ChangeList],
+ dict:from_list(KVs).
+
+%% save a checkpoint if no revs are missing on target so we don't
+%% rescan metadata unnecessarily
+maybe_checkpoint(#state{high_missing_seq=N, high_committed_seq=N} = State) ->
+ #state{
+ parent = Parent,
+ high_source_seq = SourceSeq
+ } = State,
+ Parent ! {missing_revs_checkpoint, SourceSeq};
+maybe_checkpoint(_State) ->
+ ok.
diff --git a/1.1.x/src/couchdb/couch_rep_reader.erl b/1.1.x/src/couchdb/couch_rep_reader.erl
new file mode 100644
index 00000000..0d344e5c
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_rep_reader.erl
@@ -0,0 +1,283 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rep_reader).
+-behaviour(gen_server).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([start_link/4, next/1]).
+
+-import(couch_util, [encode_doc_id/1]).
+
+-define (BUFFER_SIZE, 1000).
+-define (MAX_CONCURRENT_REQUESTS, 100).
+
+-include("couch_db.hrl").
+
+-record (state, {
+ parent,
+ source,
+ missing_revs,
+ reader_loop,
+ reader_from = [],
+ count = 0,
+ docs = queue:new(),
+ reply_to = nil,
+ complete = false,
+ monitor_count = 0,
+ pending_doc_request = nil,
+ requested_seqs = [],
+ opened_seqs = []
+}).
+
+start_link(Parent, Source, MissingRevs, PostProps) ->
+ gen_server:start_link(?MODULE, [Parent, Source, MissingRevs, PostProps], []).
+
+next(Pid) ->
+ gen_server:call(Pid, next_docs, infinity).
+
+init([Parent, Source, MissingRevs, _PostProps]) ->
+ process_flag(trap_exit, true),
+ Self = self(),
+ ReaderLoop = spawn_link(
+ fun() -> reader_loop(Self, Parent, Source, MissingRevs) end),
+ State = #state{
+ parent = Parent,
+ source = Source,
+ missing_revs = MissingRevs,
+ reader_loop = ReaderLoop
+ },
+ {ok, State}.
+
+handle_call({add_docs, Seq, Docs}, From, State) ->
+ State#state.parent ! {update_stats, docs_read, length(Docs)},
+ handle_add_docs(Seq, lists:flatten(Docs), From, State);
+
+handle_call({add_request_seqs, Seqs}, _From, State) ->
+ SeqList = State#state.requested_seqs,
+ {reply, ok, State#state{requested_seqs = lists:merge(Seqs, SeqList)}};
+
+handle_call(next_docs, From, State) ->
+ handle_next_docs(From, State);
+
+handle_call({open_remote_doc, Id, Seq, Revs}, From, State) ->
+ handle_open_remote_doc(Id, Seq, Revs, From, State).
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info({'DOWN', _, _, _, Reason}, State) ->
+ handle_monitor_down(Reason, State);
+
+handle_info({'EXIT', Loop, complete}, #state{reader_loop=Loop} = State) ->
+ handle_reader_loop_complete(State).
+
+terminate(_Reason, _State) ->
+ % ?LOG_INFO("rep reader terminating with reason ~p", [_Reason]),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%internal funs
+
+handle_add_docs(_Seq, [], _From, State) ->
+ {reply, ok, State};
+handle_add_docs(Seq, DocsToAdd, From, #state{reply_to=nil} = State) ->
+ State1 = update_sequence_lists(Seq, State),
+ NewState = State1#state{
+ docs = queue:join(State1#state.docs, queue:from_list(DocsToAdd)),
+ count = State1#state.count + length(DocsToAdd)
+ },
+ if NewState#state.count < ?BUFFER_SIZE ->
+ {reply, ok, NewState};
+ true ->
+ {noreply, NewState#state{reader_from=[From|State#state.reader_from]}}
+ end;
+handle_add_docs(Seq, DocsToAdd, _From, #state{count=0} = State) ->
+ NewState = update_sequence_lists(Seq, State),
+ HighSeq = calculate_new_high_seq(NewState),
+ gen_server:reply(State#state.reply_to, {HighSeq, DocsToAdd}),
+ {reply, ok, NewState#state{reply_to=nil}}.
+
+handle_next_docs(From, #state{count=0} = State) ->
+ if State#state.complete ->
+ {stop, normal, {complete, calculate_new_high_seq(State)}, State};
+ true ->
+ {noreply, State#state{reply_to=From}}
+ end;
+handle_next_docs(_From, State) ->
+ #state{
+ reader_from = ReaderFrom,
+ docs = Docs
+ } = State,
+ [gen_server:reply(F, ok) || F <- ReaderFrom],
+ NewState = State#state{count=0, reader_from=[], docs=queue:new()},
+ {reply, {calculate_new_high_seq(State), queue:to_list(Docs)}, NewState}.
+
+handle_open_remote_doc(Id, Seq, Revs, From, #state{monitor_count=N} = State)
+ when N > ?MAX_CONCURRENT_REQUESTS ->
+ {noreply, State#state{pending_doc_request={From,Id,Seq,Revs}}};
+handle_open_remote_doc(Id, Seq, Revs, _, #state{source=#http_db{}} = State) ->
+ #state{
+ monitor_count = Count,
+ source = Source
+ } = State,
+ {_, _Ref} = spawn_document_request(Source, Id, Seq, Revs),
+ {reply, ok, State#state{monitor_count = Count+1}}.
+
+handle_monitor_down(normal, #state{pending_doc_request=nil, reply_to=nil,
+ monitor_count=1, complete=waiting_on_monitors} = State) ->
+ {noreply, State#state{complete=true, monitor_count=0}};
+handle_monitor_down(normal, #state{pending_doc_request=nil, reply_to=From,
+ monitor_count=1, complete=waiting_on_monitors} = State) ->
+ gen_server:reply(From, {complete, calculate_new_high_seq(State)}),
+ {stop, normal, State#state{complete=true, monitor_count=0}};
+handle_monitor_down(normal, #state{pending_doc_request=nil} = State) ->
+ #state{monitor_count = Count} = State,
+ {noreply, State#state{monitor_count = Count-1}};
+handle_monitor_down(normal, State) ->
+ #state{
+ source = Source,
+ pending_doc_request = {From, Id, Seq, Revs}
+ } = State,
+ gen_server:reply(From, ok),
+ {_, _NewRef} = spawn_document_request(Source, Id, Seq, Revs),
+ {noreply, State#state{pending_doc_request=nil}};
+handle_monitor_down(Reason, State) ->
+ {stop, Reason, State}.
+
+handle_reader_loop_complete(#state{reply_to=nil, monitor_count=0} = State) ->
+ {noreply, State#state{complete = true}};
+handle_reader_loop_complete(#state{monitor_count=0} = State) ->
+ HighSeq = calculate_new_high_seq(State),
+ gen_server:reply(State#state.reply_to, {complete, HighSeq}),
+ {stop, normal, State};
+handle_reader_loop_complete(State) ->
+ {noreply, State#state{complete = waiting_on_monitors}}.
+
+calculate_new_high_seq(#state{requested_seqs=[], opened_seqs=[Open|_]}) ->
+ Open;
+calculate_new_high_seq(#state{requested_seqs=[Req|_], opened_seqs=[Open|_]})
+ when Req < Open ->
+ 0;
+calculate_new_high_seq(#state{opened_seqs=[]}) ->
+ 0;
+calculate_new_high_seq(State) ->
+ hd(State#state.opened_seqs).
+
+split_revlist(Rev, {[CurrentAcc|Rest], BaseLength, Length}) ->
+ case Length+size(Rev)+3 > 8192 of
+ false ->
+ {[[Rev|CurrentAcc] | Rest], BaseLength, Length+size(Rev)+3};
+ true ->
+ {[[Rev],CurrentAcc|Rest], BaseLength, BaseLength}
+ end.
+
+% We store outstanding requested sequences and a subset of already opened
+% sequences in 2 ordered lists. The subset of opened seqs is a) the largest
+% opened seq smaller than the smallest outstanding request seq plus b) all the
+% opened seqs greater than the smallest outstanding request. I believe its the
+% minimal set of info needed to correctly calculate which seqs have been
+% replicated (because remote docs can be opened out-of-order) -- APK
+update_sequence_lists(Seq, State) ->
+ Requested = lists:delete(Seq, State#state.requested_seqs),
+ AllOpened = lists:merge([Seq], State#state.opened_seqs),
+ Opened = case Requested of
+ [] ->
+ [lists:last(AllOpened)];
+ [EarliestReq|_] ->
+ case lists:splitwith(fun(X) -> X < EarliestReq end, AllOpened) of
+ {[], Greater} ->
+ Greater;
+ {Less, Greater} ->
+ [lists:last(Less) | Greater]
+ end
+ end,
+ State#state{
+ requested_seqs = Requested,
+ opened_seqs = Opened
+ }.
+
+open_doc_revs(#http_db{url = Url} = DbS, DocId, Revs) ->
+ %% all this logic just splits up revision lists that are too long for
+ %% MochiWeb into multiple requests
+ BaseQS = [{revs,true}, {latest,true}, {att_encoding_info,true}],
+ BaseReq = DbS#http_db{resource=encode_doc_id(DocId), qs=BaseQS},
+ BaseLength = length(couch_rep_httpc:full_url(BaseReq) ++ "&open_revs=[]"),
+
+ {RevLists, _, _} = lists:foldl(fun split_revlist/2,
+ {[[]], BaseLength, BaseLength}, couch_doc:revs_to_strs(Revs)),
+
+ Requests = [BaseReq#http_db{
+ qs = [{open_revs, ?JSON_ENCODE(RevList)} | BaseQS]
+ } || RevList <- RevLists],
+ JsonResults = lists:flatten([couch_rep_httpc:request(R) || R <- Requests]),
+
+ Transform =
+ fun({[{<<"ok">>, Json}]}, Acc) ->
+ #doc{id=Id, revs=Rev, atts=Atts} = Doc = couch_doc:from_json_obj(Json),
+ Doc1 = Doc#doc{
+ atts=[couch_rep_att:convert_stub(A, {DbS,Id,Rev}) || A <- Atts]
+ },
+ [Doc1 | Acc];
+ ({ErrorProps}, Acc) ->
+ Err = couch_util:get_value(<<"error">>, ErrorProps,
+ ?JSON_ENCODE({ErrorProps})),
+ ?LOG_ERROR("Replicator: error accessing doc ~s at ~s, reason: ~s",
+ [DocId, couch_util:url_strip_password(Url), Err]),
+ Acc
+ end,
+ lists:reverse(lists:foldl(Transform, [], JsonResults)).
+
+reader_loop(ReaderServer, Parent, Source, MissingRevsServer) ->
+ case couch_rep_missing_revs:next(MissingRevsServer) of
+ complete ->
+ exit(complete);
+ {HighSeq, IdsRevs} ->
+ % to be safe, make sure Results are sorted by source_seq
+ SortedIdsRevs = lists:keysort(2, IdsRevs),
+ RequestSeqs = [S || {_,S,_} <- SortedIdsRevs],
+ gen_server:call(ReaderServer, {add_request_seqs, RequestSeqs}, infinity),
+ case Source of
+ #http_db{} ->
+ [gen_server:call(ReaderServer, {open_remote_doc, Id, Seq, Revs},
+ infinity) || {Id,Seq,Revs} <- SortedIdsRevs],
+ reader_loop(ReaderServer, Parent, Source, MissingRevsServer);
+ _Local ->
+ {ok, Source1} = gen_server:call(Parent, get_source_db, infinity),
+ Source2 = maybe_reopen_db(Source1, HighSeq),
+ lists:foreach(fun({Id,Seq,Revs}) ->
+ {ok, Docs} = couch_db:open_doc_revs(Source2, Id, Revs, [latest]),
+ JustTheDocs = [Doc || {ok, Doc} <- Docs],
+ gen_server:call(ReaderServer, {add_docs, Seq, JustTheDocs},
+ infinity)
+ end, SortedIdsRevs),
+ couch_db:close(Source2),
+ reader_loop(ReaderServer, Parent, Source2, MissingRevsServer)
+ end
+ end.
+
+maybe_reopen_db(#db{update_seq=OldSeq} = Db, HighSeq) when HighSeq > OldSeq ->
+ {ok, NewDb} = couch_db:open(Db#db.name, [{user_ctx, Db#db.user_ctx}]),
+ NewDb;
+maybe_reopen_db(Db, _HighSeq) ->
+ Db.
+
+spawn_document_request(Source, Id, Seq, Revs) ->
+ Server = self(),
+ SpawnFun = fun() ->
+ Results = open_doc_revs(Source, Id, Revs),
+ gen_server:call(Server, {add_docs, Seq, Results}, infinity)
+ end,
+ spawn_monitor(SpawnFun).
diff --git a/1.1.x/src/couchdb/couch_rep_sup.erl b/1.1.x/src/couchdb/couch_rep_sup.erl
new file mode 100644
index 00000000..1318c598
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_rep_sup.erl
@@ -0,0 +1,31 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rep_sup).
+-behaviour(supervisor).
+-export([init/1, start_link/0]).
+
+-include("couch_db.hrl").
+
+start_link() ->
+ supervisor:start_link({local,?MODULE}, ?MODULE, []).
+
+%%=============================================================================
+%% supervisor callbacks
+%%=============================================================================
+
+init([]) ->
+ {ok, {{one_for_one, 3, 10}, []}}.
+
+%%=============================================================================
+%% internal functions
+%%=============================================================================
diff --git a/1.1.x/src/couchdb/couch_rep_writer.erl b/1.1.x/src/couchdb/couch_rep_writer.erl
new file mode 100644
index 00000000..12d6dec5
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_rep_writer.erl
@@ -0,0 +1,165 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rep_writer).
+
+-export([start_link/4]).
+
+-include("couch_db.hrl").
+
+start_link(Parent, _Target, Reader, _PostProps) ->
+ {ok, spawn_link(fun() -> writer_loop(Parent, Reader) end)}.
+
+writer_loop(Parent, Reader) ->
+ case couch_rep_reader:next(Reader) of
+ {complete, FinalSeq} ->
+ Parent ! {writer_checkpoint, FinalSeq},
+ ok;
+ {HighSeq, Docs} ->
+ DocCount = length(Docs),
+ {ok, Target} = gen_server:call(Parent, get_target_db, infinity),
+ try write_docs(Target, Docs) of
+ {ok, []} ->
+ Parent ! {update_stats, docs_written, DocCount};
+ {ok, Errors} ->
+ ErrorCount = length(Errors),
+ Parent ! {update_stats, doc_write_failures, ErrorCount},
+ Parent ! {update_stats, docs_written, DocCount - ErrorCount}
+ catch
+ {attachment_request_failed, Err} ->
+ ?LOG_DEBUG("writer failed to write an attachment ~p", [Err]),
+ exit({attachment_request_failed, Err, Docs})
+ end,
+ Parent ! {writer_checkpoint, HighSeq},
+ couch_rep_att:cleanup(),
+ couch_util:should_flush(),
+ writer_loop(Parent, Reader)
+ end.
+
+write_docs(#http_db{} = Db, Docs) ->
+ {DocsAtts, DocsNoAtts} = lists:partition(
+ fun(#doc{atts=[]}) -> false; (_) -> true end,
+ Docs
+ ),
+ ErrorsJson0 = write_bulk_docs(Db, DocsNoAtts),
+ ErrorsJson = lists:foldl(
+ fun(Doc, Acc) -> write_multi_part_doc(Db, Doc) ++ Acc end,
+ ErrorsJson0,
+ DocsAtts
+ ),
+ {ok, ErrorsJson};
+write_docs(Db, Docs) ->
+ couch_db:update_docs(Db, Docs, [delay_commit], replicated_changes).
+
+write_bulk_docs(_Db, []) ->
+ [];
+write_bulk_docs(#http_db{headers = Headers} = Db, Docs) ->
+ JsonDocs = [
+ couch_doc:to_json_obj(Doc, [revs, att_gzip_length]) || Doc <- Docs
+ ],
+ Request = Db#http_db{
+ resource = "_bulk_docs",
+ method = post,
+ body = {[{new_edits, false}, {docs, JsonDocs}]},
+ headers = couch_util:proplist_apply_field({"Content-Type", "application/json"}, [{"X-Couch-Full-Commit", "false"} | Headers])
+ },
+ ErrorsJson = case couch_rep_httpc:request(Request) of
+ {FailProps} ->
+ exit({target_error, couch_util:get_value(<<"error">>, FailProps)});
+ List when is_list(List) ->
+ List
+ end,
+ [write_docs_1(V) || V <- ErrorsJson].
+
+write_multi_part_doc(#http_db{headers=Headers} = Db, #doc{atts=Atts} = Doc) ->
+ JsonBytes = ?JSON_ENCODE(
+ couch_doc:to_json_obj(
+ Doc,
+ [follows, att_encoding_info, attachments]
+ )
+ ),
+ Boundary = couch_uuids:random(),
+ {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
+ Boundary, JsonBytes, Atts, true
+ ),
+ StreamerPid = spawn_link(
+ fun() -> streamer_fun(Boundary, JsonBytes, Atts) end
+ ),
+ BodyFun = fun(Acc) ->
+ DataQueue = case Acc of
+ nil ->
+ StreamerPid ! {start, self()},
+ receive
+ {queue, Q} ->
+ Q
+ end;
+ Queue ->
+ Queue
+ end,
+ case couch_work_queue:dequeue(DataQueue) of
+ closed ->
+ eof;
+ {ok, Data} ->
+ {ok, iolist_to_binary(Data), DataQueue}
+ end
+ end,
+ Request = Db#http_db{
+ resource = couch_util:encode_doc_id(Doc),
+ method = put,
+ qs = [{new_edits, false}],
+ body = {BodyFun, nil},
+ headers = [
+ {"x-couch-full-commit", "false"},
+ {"Content-Type", ?b2l(ContentType)},
+ {"Content-Length", Len} | Headers
+ ]
+ },
+ Result = case couch_rep_httpc:request(Request) of
+ {[{<<"error">>, Error}, {<<"reason">>, Reason}]} ->
+ {Pos, [RevId | _]} = Doc#doc.revs,
+ ErrId = couch_util:to_existing_atom(Error),
+ [{Doc#doc.id, couch_doc:rev_to_str({Pos, RevId})}, {ErrId, Reason}];
+ _ ->
+ []
+ end,
+ StreamerPid ! stop,
+ Result.
+
+streamer_fun(Boundary, JsonBytes, Atts) ->
+ receive
+ stop ->
+ ok;
+ {start, From} ->
+ % better use a brand new queue, to ensure there's no garbage from
+ % a previous (failed) iteration
+ {ok, DataQueue} = couch_work_queue:new(
+ [{max_size, 1024 * 1024}, {max_items, 1000}]),
+ From ! {queue, DataQueue},
+ couch_doc:doc_to_multi_part_stream(
+ Boundary,
+ JsonBytes,
+ Atts,
+ fun(Data) ->
+ couch_work_queue:queue(DataQueue, Data)
+ end,
+ true
+ ),
+ couch_work_queue:close(DataQueue),
+ streamer_fun(Boundary, JsonBytes, Atts)
+ end.
+
+write_docs_1({Props}) ->
+ Id = couch_util:get_value(<<"id">>, Props),
+ Rev = couch_doc:parse_rev(couch_util:get_value(<<"rev">>, Props)),
+ ErrId = couch_util:to_existing_atom(couch_util:get_value(<<"error">>, Props)),
+ Reason = couch_util:get_value(<<"reason">>, Props),
+ {{Id, Rev}, {ErrId, Reason}}.
diff --git a/1.1.x/src/couchdb/couch_replication_manager.erl b/1.1.x/src/couchdb/couch_replication_manager.erl
new file mode 100644
index 00000000..6101c9c5
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_replication_manager.erl
@@ -0,0 +1,383 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_replication_manager).
+-behaviour(gen_server).
+
+-export([start_link/0, init/1, handle_call/3, handle_info/2, handle_cast/2]).
+-export([code_change/3, terminate/2]).
+
+-include("couch_db.hrl").
+
+-define(DOC_ID_TO_REP_ID, rep_doc_id_to_rep_id).
+-define(REP_ID_TO_DOC_ID, rep_id_to_rep_doc_id).
+-define(INITIAL_WAIT, 5).
+
+-record(state, {
+ changes_feed_loop = nil,
+ db_notifier = nil,
+ rep_db_name = nil,
+ rep_start_pids = [],
+ max_retries
+}).
+
+-import(couch_util, [
+ get_value/2,
+ get_value/3
+]).
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+init(_) ->
+ process_flag(trap_exit, true),
+ _ = ets:new(?DOC_ID_TO_REP_ID, [named_table, set, protected]),
+ _ = ets:new(?REP_ID_TO_DOC_ID, [named_table, set, private]),
+ Server = self(),
+ ok = couch_config:register(
+ fun("replicator", "db", NewName) ->
+ ok = gen_server:cast(Server, {rep_db_changed, ?l2b(NewName)});
+ ("replicator", "max_replication_retry_count", NewMaxRetries1) ->
+ NewMaxRetries = list_to_integer(NewMaxRetries1),
+ ok = gen_server:cast(Server, {set_max_retries, NewMaxRetries})
+ end
+ ),
+ {Loop, RepDbName} = changes_feed_loop(),
+ {ok, #state{
+ changes_feed_loop = Loop,
+ rep_db_name = RepDbName,
+ db_notifier = db_update_notifier(),
+ max_retries = list_to_integer(
+ couch_config:get("replicator", "max_replication_retry_count", "10"))
+ }}.
+
+
+handle_call({rep_db_update, Change}, _From, State) ->
+ {reply, ok, process_update(State, Change)};
+
+handle_call({triggered, {BaseId, _}}, _From, State) ->
+ [{BaseId, {DocId, true}}] = ets:lookup(?REP_ID_TO_DOC_ID, BaseId),
+ true = ets:insert(?REP_ID_TO_DOC_ID, {BaseId, {DocId, false}}),
+ {reply, ok, State};
+
+handle_call({restart_failure, {Props} = RepDoc, Error}, _From, State) ->
+ DocId = get_value(<<"_id">>, Props),
+ [{DocId, {{BaseId, _} = RepId, MaxRetries}}] = ets:lookup(
+ ?DOC_ID_TO_REP_ID, DocId),
+ ?LOG_ERROR("Failed to start replication `~s` after ~p attempts using "
+ "the document `~s`. Last error reason was: ~p",
+ [pp_rep_id(RepId), MaxRetries, DocId, Error]),
+ couch_rep:update_rep_doc(
+ RepDoc,
+ [{<<"_replication_state">>, <<"error">>},
+ {<<"_replication_id">>, ?l2b(BaseId)}]),
+ true = ets:delete(?REP_ID_TO_DOC_ID, BaseId),
+ true = ets:delete(?DOC_ID_TO_REP_ID, DocId),
+ {reply, ok, State};
+
+handle_call(Msg, From, State) ->
+ ?LOG_ERROR("Replication manager received unexpected call ~p from ~p",
+ [Msg, From]),
+ {stop, {error, {unexpected_call, Msg}}, State}.
+
+
+handle_cast({rep_db_changed, NewName}, #state{rep_db_name = NewName} = State) ->
+ {noreply, State};
+
+handle_cast({rep_db_changed, _NewName}, State) ->
+ {noreply, restart(State)};
+
+handle_cast({rep_db_created, NewName}, #state{rep_db_name = NewName} = State) ->
+ {noreply, State};
+
+handle_cast({rep_db_created, _NewName}, State) ->
+ {noreply, restart(State)};
+
+handle_cast({set_max_retries, MaxRetries}, State) ->
+ {noreply, State#state{max_retries = MaxRetries}};
+
+handle_cast(Msg, State) ->
+ ?LOG_ERROR("Replication manager received unexpected cast ~p", [Msg]),
+ {stop, {error, {unexpected_cast, Msg}}, State}.
+
+
+handle_info({'EXIT', From, normal}, #state{changes_feed_loop = From} = State) ->
+ % replicator DB deleted
+ {noreply, State#state{changes_feed_loop = nil, rep_db_name = nil}};
+
+handle_info({'EXIT', From, Reason}, #state{db_notifier = From} = State) ->
+ ?LOG_ERROR("Database update notifier died. Reason: ~p", [Reason]),
+ {stop, {db_update_notifier_died, Reason}, State};
+
+handle_info({'EXIT', From, normal}, #state{rep_start_pids = Pids} = State) ->
+ % one of the replication start processes terminated successfully
+ {noreply, State#state{rep_start_pids = Pids -- [From]}};
+
+handle_info(Msg, State) ->
+ ?LOG_ERROR("Replication manager received unexpected message ~p", [Msg]),
+ {stop, {unexpected_msg, Msg}, State}.
+
+
+terminate(_Reason, State) ->
+ #state{
+ rep_start_pids = StartPids,
+ changes_feed_loop = Loop,
+ db_notifier = Notifier
+ } = State,
+ stop_all_replications(),
+ lists:foreach(
+ fun(Pid) ->
+ catch unlink(Pid),
+ catch exit(Pid, stop)
+ end,
+ [Loop | StartPids]),
+ true = ets:delete(?REP_ID_TO_DOC_ID),
+ true = ets:delete(?DOC_ID_TO_REP_ID),
+ couch_db_update_notifier:stop(Notifier).
+
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+changes_feed_loop() ->
+ {ok, RepDb} = couch_rep:ensure_rep_db_exists(),
+ Server = self(),
+ Pid = spawn_link(
+ fun() ->
+ ChangesFeedFun = couch_changes:handle_changes(
+ #changes_args{
+ include_docs = true,
+ feed = "continuous",
+ timeout = infinity,
+ db_open_options = [sys_db]
+ },
+ {json_req, null},
+ RepDb
+ ),
+ ChangesFeedFun(
+ fun({change, Change, _}, _) ->
+ case has_valid_rep_id(Change) of
+ true ->
+ ok = gen_server:call(
+ Server, {rep_db_update, Change}, infinity);
+ false ->
+ ok
+ end;
+ (_, _) ->
+ ok
+ end
+ )
+ end
+ ),
+ couch_db:close(RepDb),
+ {Pid, couch_db:name(RepDb)}.
+
+
+has_valid_rep_id({Change}) ->
+ has_valid_rep_id(get_value(<<"id">>, Change));
+has_valid_rep_id(<<?DESIGN_DOC_PREFIX, _Rest/binary>>) ->
+ false;
+has_valid_rep_id(_Else) ->
+ true.
+
+
+db_update_notifier() ->
+ Server = self(),
+ {ok, Notifier} = couch_db_update_notifier:start_link(
+ fun({created, DbName}) ->
+ case ?l2b(couch_config:get("replicator", "db", "_replicator")) of
+ DbName ->
+ ok = gen_server:cast(Server, {rep_db_created, DbName});
+ _ ->
+ ok
+ end;
+ (_) ->
+ % no need to handle the 'deleted' event - the changes feed loop
+ % dies when the database is deleted
+ ok
+ end
+ ),
+ Notifier.
+
+
+restart(#state{changes_feed_loop = Loop, rep_start_pids = StartPids} = State) ->
+ stop_all_replications(),
+ lists:foreach(
+ fun(Pid) ->
+ catch unlink(Pid),
+ catch exit(Pid, rep_db_changed)
+ end,
+ [Loop | StartPids]),
+ {NewLoop, NewRepDbName} = changes_feed_loop(),
+ State#state{
+ changes_feed_loop = NewLoop,
+ rep_db_name = NewRepDbName,
+ rep_start_pids = []
+ }.
+
+
+process_update(State, {Change}) ->
+ {RepProps} = JsonRepDoc = get_value(doc, Change),
+ DocId = get_value(<<"_id">>, RepProps),
+ case get_value(<<"deleted">>, Change, false) of
+ true ->
+ rep_doc_deleted(DocId),
+ State;
+ false ->
+ case get_value(<<"_replication_state">>, RepProps) of
+ <<"completed">> ->
+ replication_complete(DocId),
+ State;
+ <<"error">> ->
+ stop_replication(DocId),
+ State;
+ <<"triggered">> ->
+ maybe_start_replication(State, DocId, JsonRepDoc);
+ undefined ->
+ maybe_start_replication(State, DocId, JsonRepDoc)
+ end
+ end.
+
+
+rep_user_ctx({RepDoc}) ->
+ case get_value(<<"user_ctx">>, RepDoc) of
+ undefined ->
+ #user_ctx{roles = [<<"_admin">>]};
+ {UserCtx} ->
+ #user_ctx{
+ name = get_value(<<"name">>, UserCtx, null),
+ roles = get_value(<<"roles">>, UserCtx, [])
+ }
+ end.
+
+
+maybe_start_replication(#state{max_retries = MaxRetries} = State,
+ DocId, JsonRepDoc) ->
+ UserCtx = rep_user_ctx(JsonRepDoc),
+ {BaseId, _} = RepId = couch_rep:make_replication_id(JsonRepDoc, UserCtx),
+ case ets:lookup(?REP_ID_TO_DOC_ID, BaseId) of
+ [] ->
+ true = ets:insert(?REP_ID_TO_DOC_ID, {BaseId, {DocId, true}}),
+ true = ets:insert(?DOC_ID_TO_REP_ID, {DocId, {RepId, MaxRetries}}),
+ Server = self(),
+ Pid = spawn_link(fun() ->
+ start_replication(Server, JsonRepDoc, RepId, UserCtx, MaxRetries)
+ end),
+ State#state{rep_start_pids = [Pid | State#state.rep_start_pids]};
+ [{BaseId, {DocId, _}}] ->
+ State;
+ [{BaseId, {OtherDocId, false}}] ->
+ ?LOG_INFO("The replication specified by the document `~s` was already"
+ " triggered by the document `~s`", [DocId, OtherDocId]),
+ maybe_tag_rep_doc(JsonRepDoc, ?l2b(BaseId)),
+ State;
+ [{BaseId, {OtherDocId, true}}] ->
+ ?LOG_INFO("The replication specified by the document `~s` is already"
+ " being triggered by the document `~s`", [DocId, OtherDocId]),
+ maybe_tag_rep_doc(JsonRepDoc, ?l2b(BaseId)),
+ State
+ end.
+
+
+maybe_tag_rep_doc({Props} = JsonRepDoc, RepId) ->
+ case get_value(<<"_replication_id">>, Props) of
+ RepId ->
+ ok;
+ _ ->
+ couch_rep:update_rep_doc(JsonRepDoc, [{<<"_replication_id">>, RepId}])
+ end.
+
+
+start_replication(Server, {RepProps} = RepDoc, RepId, UserCtx, MaxRetries) ->
+ case (catch couch_rep:start_replication(RepDoc, RepId, UserCtx)) of
+ Pid when is_pid(Pid) ->
+ ?LOG_INFO("Document `~s` triggered replication `~s`",
+ [get_value(<<"_id">>, RepProps), pp_rep_id(RepId)]),
+ ok = gen_server:call(Server, {triggered, RepId}, infinity),
+ couch_rep:get_result(Pid, RepId, RepDoc, UserCtx);
+ Error ->
+ keep_retrying(
+ Server, RepId, RepDoc, UserCtx, Error, ?INITIAL_WAIT, MaxRetries)
+ end.
+
+
+keep_retrying(Server, _RepId, RepDoc, _UserCtx, Error, _Wait, 0) ->
+ ok = gen_server:call(Server, {restart_failure, RepDoc, Error}, infinity);
+
+keep_retrying(Server, RepId, RepDoc, UserCtx, Error, Wait, RetriesLeft) ->
+ {RepProps} = RepDoc,
+ DocId = get_value(<<"_id">>, RepProps),
+ ?LOG_ERROR("Error starting replication `~s` (document `~s`): ~p. "
+ "Retrying in ~p seconds", [pp_rep_id(RepId), DocId, Error, Wait]),
+ ok = timer:sleep(Wait * 1000),
+ case (catch couch_rep:start_replication(RepDoc, RepId, UserCtx)) of
+ Pid when is_pid(Pid) ->
+ ok = gen_server:call(Server, {triggered, RepId}, infinity),
+ [{DocId, {RepId, MaxRetries}}] = ets:lookup(?DOC_ID_TO_REP_ID, DocId),
+ ?LOG_INFO("Document `~s` triggered replication `~s` after ~p attempts",
+ [DocId, pp_rep_id(RepId), MaxRetries - RetriesLeft + 1]),
+ couch_rep:get_result(Pid, RepId, RepDoc, UserCtx);
+ NewError ->
+ keep_retrying(
+ Server, RepId, RepDoc, UserCtx, NewError, Wait * 2, RetriesLeft - 1)
+ end.
+
+
+rep_doc_deleted(DocId) ->
+ case stop_replication(DocId) of
+ {ok, RepId} ->
+ ?LOG_INFO("Stopped replication `~s` because replication document `~s`"
+ " was deleted", [pp_rep_id(RepId), DocId]);
+ none ->
+ ok
+ end.
+
+
+replication_complete(DocId) ->
+ case stop_replication(DocId) of
+ {ok, RepId} ->
+ ?LOG_INFO("Replication `~s` finished (triggered by document `~s`)",
+ [pp_rep_id(RepId), DocId]);
+ none ->
+ ok
+ end.
+
+
+stop_replication(DocId) ->
+ case ets:lookup(?DOC_ID_TO_REP_ID, DocId) of
+ [{DocId, {{BaseId, _} = RepId, _MaxRetries}}] ->
+ couch_rep:end_replication(RepId),
+ true = ets:delete(?REP_ID_TO_DOC_ID, BaseId),
+ true = ets:delete(?DOC_ID_TO_REP_ID, DocId),
+ {ok, RepId};
+ [] ->
+ none
+ end.
+
+
+stop_all_replications() ->
+ ?LOG_INFO("Stopping all ongoing replications because the replicator"
+ " database was deleted or changed", []),
+ ets:foldl(
+ fun({_, {RepId, _}}, _) ->
+ couch_rep:end_replication(RepId)
+ end,
+ ok, ?DOC_ID_TO_REP_ID),
+ true = ets:delete_all_objects(?REP_ID_TO_DOC_ID),
+ true = ets:delete_all_objects(?DOC_ID_TO_REP_ID).
+
+
+% pretty-print replication id
+pp_rep_id({Base, Extension}) ->
+ Base ++ Extension.
diff --git a/1.1.x/src/couchdb/couch_server.erl b/1.1.x/src/couchdb/couch_server.erl
new file mode 100644
index 00000000..7870d69e
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_server.erl
@@ -0,0 +1,405 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_server).
+-behaviour(gen_server).
+
+-export([open/2,create/2,delete/2,all_databases/0,get_version/0]).
+-export([init/1, handle_call/3,sup_start_link/0]).
+-export([handle_cast/2,code_change/3,handle_info/2,terminate/2]).
+-export([dev_start/0,is_admin/2,has_admins/0,get_stats/0]).
+
+-include("couch_db.hrl").
+
+-record(server,{
+ root_dir = [],
+ dbname_regexp,
+ max_dbs_open=100,
+ dbs_open=0,
+ start_time=""
+ }).
+
+dev_start() ->
+ couch:stop(),
+ up_to_date = make:all([load, debug_info]),
+ couch:start().
+
+get_version() ->
+ Apps = application:loaded_applications(),
+ case lists:keysearch(couch, 1, Apps) of
+ {value, {_, _, Vsn}} ->
+ Vsn;
+ false ->
+ "0.0.0"
+ end.
+
+get_stats() ->
+ {ok, #server{start_time=Time,dbs_open=Open}} =
+ gen_server:call(couch_server, get_server),
+ [{start_time, ?l2b(Time)}, {dbs_open, Open}].
+
+sup_start_link() ->
+ gen_server:start_link({local, couch_server}, couch_server, [], []).
+
+open(DbName, Options) ->
+ case gen_server:call(couch_server, {open, DbName, Options}, infinity) of
+ {ok, Db} ->
+ Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
+ {ok, Db#db{user_ctx=Ctx}};
+ Error ->
+ Error
+ end.
+
+create(DbName, Options) ->
+ case gen_server:call(couch_server, {create, DbName, Options}, infinity) of
+ {ok, Db} ->
+ Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
+ {ok, Db#db{user_ctx=Ctx}};
+ Error ->
+ Error
+ end.
+
+delete(DbName, Options) ->
+ gen_server:call(couch_server, {delete, DbName, Options}, infinity).
+
+check_dbname(#server{dbname_regexp=RegExp}, DbName) ->
+ case re:run(DbName, RegExp, [{capture, none}]) of
+ nomatch ->
+ case DbName of
+ "_users" -> ok;
+ "_replicator" -> ok;
+ _Else ->
+ {error, illegal_database_name}
+ end;
+ match ->
+ ok
+ end.
+
+is_admin(User, ClearPwd) ->
+ case couch_config:get("admins", User) of
+ "-hashed-" ++ HashedPwdAndSalt ->
+ [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
+ couch_util:to_hex(crypto:sha(ClearPwd ++ Salt)) == HashedPwd;
+ _Else ->
+ false
+ end.
+
+has_admins() ->
+ couch_config:get("admins") /= [].
+
+get_full_filename(Server, DbName) ->
+ filename:join([Server#server.root_dir, "./" ++ DbName ++ ".couch"]).
+
+hash_admin_passwords() ->
+ hash_admin_passwords(true).
+
+hash_admin_passwords(Persist) ->
+ lists:foreach(
+ fun({_User, "-hashed-" ++ _}) ->
+ ok; % already hashed
+ ({User, ClearPassword}) ->
+ Salt = ?b2l(couch_uuids:random()),
+ Hashed = couch_util:to_hex(crypto:sha(ClearPassword ++ Salt)),
+ couch_config:set("admins",
+ User, "-hashed-" ++ Hashed ++ "," ++ Salt, Persist)
+ end, couch_config:get("admins")).
+
+init([]) ->
+ % read config and register for configuration changes
+
+ % just stop if one of the config settings change. couch_server_sup
+ % will restart us and then we will pick up the new settings.
+
+ RootDir = couch_config:get("couchdb", "database_dir", "."),
+ MaxDbsOpen = list_to_integer(
+ couch_config:get("couchdb", "max_dbs_open")),
+ Self = self(),
+ ok = couch_config:register(
+ fun("couchdb", "database_dir") ->
+ exit(Self, config_change)
+ end),
+ ok = couch_config:register(
+ fun("couchdb", "max_dbs_open", Max) ->
+ gen_server:call(couch_server,
+ {set_max_dbs_open, list_to_integer(Max)})
+ end),
+ ok = couch_file:init_delete_dir(RootDir),
+ hash_admin_passwords(),
+ ok = couch_config:register(
+ fun("admins", _Key, _Value, Persist) ->
+ % spawn here so couch_config doesn't try to call itself
+ spawn(fun() -> hash_admin_passwords(Persist) end)
+ end, false),
+ {ok, RegExp} = re:compile("^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*$"),
+ ets:new(couch_dbs_by_name, [set, private, named_table]),
+ ets:new(couch_dbs_by_pid, [set, private, named_table]),
+ ets:new(couch_dbs_by_lru, [ordered_set, private, named_table]),
+ ets:new(couch_sys_dbs, [set, private, named_table]),
+ process_flag(trap_exit, true),
+ {ok, #server{root_dir=RootDir,
+ dbname_regexp=RegExp,
+ max_dbs_open=MaxDbsOpen,
+ start_time=httpd_util:rfc1123_date()}}.
+
+terminate(_Reason, _Srv) ->
+ [couch_util:shutdown_sync(Pid) || {_, {Pid, _LruTime}} <-
+ ets:tab2list(couch_dbs_by_name)],
+ ok.
+
+all_databases() ->
+ {ok, #server{root_dir=Root}} = gen_server:call(couch_server, get_server),
+ NormRoot = couch_util:normpath(Root),
+ Filenames =
+ filelib:fold_files(Root, "^[a-z0-9\\_\\$()\\+\\-]*[\\.]couch$", true,
+ fun(Filename, AccIn) ->
+ NormFilename = couch_util:normpath(Filename),
+ case NormFilename -- NormRoot of
+ [$/ | RelativeFilename] -> ok;
+ RelativeFilename -> ok
+ end,
+ [list_to_binary(filename:rootname(RelativeFilename, ".couch")) | AccIn]
+ end, []),
+ {ok, lists:usort(Filenames)}.
+
+
+maybe_close_lru_db(#server{dbs_open=NumOpen, max_dbs_open=MaxOpen}=Server)
+ when NumOpen < MaxOpen ->
+ {ok, Server};
+maybe_close_lru_db(#server{dbs_open=NumOpen}=Server) ->
+ % must free up the lru db.
+ case try_close_lru(now()) of
+ ok ->
+ {ok, Server#server{dbs_open=NumOpen - 1}};
+ Error -> Error
+ end.
+
+try_close_lru(StartTime) ->
+ LruTime = get_lru(),
+ if LruTime > StartTime ->
+ % this means we've looped through all our opened dbs and found them
+ % all in use.
+ {error, all_dbs_active};
+ true ->
+ [{_, DbName}] = ets:lookup(couch_dbs_by_lru, LruTime),
+ [{_, {opened, MainPid, LruTime}}] = ets:lookup(couch_dbs_by_name, DbName),
+ case couch_db:is_idle(MainPid) of
+ true ->
+ ok = shutdown_idle_db(DbName, MainPid, LruTime);
+ false ->
+ % this still has referrers. Go ahead and give it a current lru time
+ % and try the next one in the table.
+ NewLruTime = now(),
+ true = ets:insert(couch_dbs_by_name, {DbName, {opened, MainPid, NewLruTime}}),
+ true = ets:insert(couch_dbs_by_pid, {MainPid, DbName}),
+ true = ets:delete(couch_dbs_by_lru, LruTime),
+ true = ets:insert(couch_dbs_by_lru, {NewLruTime, DbName}),
+ try_close_lru(StartTime)
+ end
+ end.
+
+get_lru() ->
+ get_lru(ets:first(couch_dbs_by_lru)).
+
+get_lru(LruTime) ->
+ [{LruTime, DbName}] = ets:lookup(couch_dbs_by_lru, LruTime),
+ case ets:member(couch_sys_dbs, DbName) of
+ false ->
+ LruTime;
+ true ->
+ [{_, {opened, MainPid, _}}] = ets:lookup(couch_dbs_by_name, DbName),
+ case couch_db:is_idle(MainPid) of
+ true ->
+ NextLru = ets:next(couch_dbs_by_lru, LruTime),
+ ok = shutdown_idle_db(DbName, MainPid, LruTime),
+ get_lru(NextLru);
+ false ->
+ get_lru(ets:next(couch_dbs_by_lru, LruTime))
+ end
+ end.
+
+shutdown_idle_db(DbName, MainPid, LruTime) ->
+ couch_util:shutdown_sync(MainPid),
+ true = ets:delete(couch_dbs_by_lru, LruTime),
+ true = ets:delete(couch_dbs_by_name, DbName),
+ true = ets:delete(couch_dbs_by_pid, MainPid),
+ true = ets:delete(couch_sys_dbs, DbName),
+ ok.
+
+open_async(Server, From, DbName, Filepath, Options) ->
+ Parent = self(),
+ Opener = spawn_link(fun() ->
+ Res = couch_db:start_link(DbName, Filepath, Options),
+ gen_server:call(
+ Parent, {open_result, DbName, Res, Options}, infinity
+ ),
+ unlink(Parent),
+ case Res of
+ {ok, DbReader} ->
+ unlink(DbReader);
+ _ ->
+ ok
+ end
+ end),
+ true = ets:insert(couch_dbs_by_name, {DbName, {opening, Opener, [From]}}),
+ true = ets:insert(couch_dbs_by_pid, {Opener, DbName}),
+ DbsOpen = case lists:member(sys_db, Options) of
+ true ->
+ true = ets:insert(couch_sys_dbs, {DbName, true}),
+ Server#server.dbs_open;
+ false ->
+ Server#server.dbs_open + 1
+ end,
+ Server#server{dbs_open = DbsOpen}.
+
+handle_call({set_max_dbs_open, Max}, _From, Server) ->
+ {reply, ok, Server#server{max_dbs_open=Max}};
+handle_call(get_server, _From, Server) ->
+ {reply, {ok, Server}, Server};
+handle_call({open_result, DbName, {ok, OpenedDbPid}, Options}, _From, Server) ->
+ link(OpenedDbPid),
+ [{DbName, {opening,Opener,Froms}}] = ets:lookup(couch_dbs_by_name, DbName),
+ lists:foreach(fun({FromPid,_}=From) ->
+ gen_server:reply(From,
+ catch couch_db:open_ref_counted(OpenedDbPid, FromPid))
+ end, Froms),
+ LruTime = now(),
+ true = ets:insert(couch_dbs_by_name,
+ {DbName, {opened, OpenedDbPid, LruTime}}),
+ true = ets:delete(couch_dbs_by_pid, Opener),
+ true = ets:insert(couch_dbs_by_pid, {OpenedDbPid, DbName}),
+ true = ets:insert(couch_dbs_by_lru, {LruTime, DbName}),
+ case lists:member(create, Options) of
+ true ->
+ couch_db_update_notifier:notify({created, DbName});
+ false ->
+ ok
+ end,
+ {reply, ok, Server};
+handle_call({open_result, DbName, Error, Options}, _From, Server) ->
+ [{DbName, {opening,Opener,Froms}}] = ets:lookup(couch_dbs_by_name, DbName),
+ lists:foreach(fun(From) ->
+ gen_server:reply(From, Error)
+ end, Froms),
+ true = ets:delete(couch_dbs_by_name, DbName),
+ true = ets:delete(couch_dbs_by_pid, Opener),
+ DbsOpen = case lists:member(sys_db, Options) of
+ true ->
+ true = ets:delete(couch_sys_dbs, DbName),
+ Server#server.dbs_open;
+ false ->
+ Server#server.dbs_open - 1
+ end,
+ {reply, ok, Server#server{dbs_open = DbsOpen}};
+handle_call({open, DbName, Options}, {FromPid,_}=From, Server) ->
+ LruTime = now(),
+ case ets:lookup(couch_dbs_by_name, DbName) of
+ [] ->
+ open_db(DbName, Server, Options, From);
+ [{_, {opening, Opener, Froms}}] ->
+ true = ets:insert(couch_dbs_by_name, {DbName, {opening, Opener, [From|Froms]}}),
+ {noreply, Server};
+ [{_, {opened, MainPid, PrevLruTime}}] ->
+ true = ets:insert(couch_dbs_by_name, {DbName, {opened, MainPid, LruTime}}),
+ true = ets:delete(couch_dbs_by_lru, PrevLruTime),
+ true = ets:insert(couch_dbs_by_lru, {LruTime, DbName}),
+ {reply, couch_db:open_ref_counted(MainPid, FromPid), Server}
+ end;
+handle_call({create, DbName, Options}, From, Server) ->
+ case ets:lookup(couch_dbs_by_name, DbName) of
+ [] ->
+ open_db(DbName, Server, [create | Options], From);
+ [_AlreadyRunningDb] ->
+ {reply, file_exists, Server}
+ end;
+handle_call({delete, DbName, _Options}, _From, Server) ->
+ DbNameList = binary_to_list(DbName),
+ case check_dbname(Server, DbNameList) of
+ ok ->
+ FullFilepath = get_full_filename(Server, DbNameList),
+ UpdateState =
+ case ets:lookup(couch_dbs_by_name, DbName) of
+ [] -> false;
+ [{_, {opening, Pid, Froms}}] ->
+ couch_util:shutdown_sync(Pid),
+ true = ets:delete(couch_dbs_by_name, DbName),
+ true = ets:delete(couch_dbs_by_pid, Pid),
+ [gen_server:reply(F, not_found) || F <- Froms],
+ true;
+ [{_, {opened, Pid, LruTime}}] ->
+ couch_util:shutdown_sync(Pid),
+ true = ets:delete(couch_dbs_by_name, DbName),
+ true = ets:delete(couch_dbs_by_pid, Pid),
+ true = ets:delete(couch_dbs_by_lru, LruTime),
+ true
+ end,
+ Server2 = case UpdateState of
+ true ->
+ DbsOpen = case ets:member(couch_sys_dbs, DbName) of
+ true ->
+ true = ets:delete(couch_sys_dbs, DbName),
+ Server#server.dbs_open;
+ false ->
+ Server#server.dbs_open - 1
+ end,
+ Server#server{dbs_open = DbsOpen};
+ false ->
+ Server
+ end,
+
+ %% Delete any leftover .compact files. If we don't do this a subsequent
+ %% request for this DB will try to open the .compact file and use it.
+ couch_file:delete(Server#server.root_dir, FullFilepath ++ ".compact"),
+
+ case couch_file:delete(Server#server.root_dir, FullFilepath) of
+ ok ->
+ couch_db_update_notifier:notify({deleted, DbName}),
+ {reply, ok, Server2};
+ {error, enoent} ->
+ {reply, not_found, Server2};
+ Else ->
+ {reply, Else, Server2}
+ end;
+ Error ->
+ {reply, Error, Server}
+ end.
+
+handle_cast(Msg, _Server) ->
+ exit({unknown_cast_message, Msg}).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_info({'EXIT', _Pid, config_change}, Server) ->
+ {noreply, shutdown, Server};
+handle_info(Error, _Server) ->
+ ?LOG_ERROR("Unexpected message, restarting couch_server: ~p", [Error]),
+ exit(kill).
+
+open_db(DbName, Server, Options, From) ->
+ DbNameList = binary_to_list(DbName),
+ case check_dbname(Server, DbNameList) of
+ ok ->
+ Filepath = get_full_filename(Server, DbNameList),
+ case lists:member(sys_db, Options) of
+ true ->
+ {noreply, open_async(Server, From, DbName, Filepath, Options)};
+ false ->
+ case maybe_close_lru_db(Server) of
+ {ok, Server2} ->
+ {noreply, open_async(Server2, From, DbName, Filepath, Options)};
+ CloseError ->
+ {reply, CloseError, Server}
+ end
+ end;
+ Error ->
+ {reply, Error, Server}
+ end.
diff --git a/1.1.x/src/couchdb/couch_server_sup.erl b/1.1.x/src/couchdb/couch_server_sup.erl
new file mode 100644
index 00000000..fafd83ed
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_server_sup.erl
@@ -0,0 +1,220 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_server_sup).
+-behaviour(supervisor).
+
+
+-export([start_link/1,stop/0, couch_config_start_link_wrapper/2,
+ start_primary_services/0,start_secondary_services/0,
+ restart_core_server/0]).
+
+-include("couch_db.hrl").
+
+%% supervisor callbacks
+-export([init/1]).
+
+start_link(IniFiles) ->
+ case whereis(couch_server_sup) of
+ undefined ->
+ start_server(IniFiles);
+ _Else ->
+ {error, already_started}
+ end.
+
+restart_core_server() ->
+ init:restart().
+
+couch_config_start_link_wrapper(IniFiles, FirstConfigPid) ->
+ case is_process_alive(FirstConfigPid) of
+ true ->
+ link(FirstConfigPid),
+ {ok, FirstConfigPid};
+ false -> couch_config:start_link(IniFiles)
+ end.
+
+start_server(IniFiles) ->
+ case init:get_argument(pidfile) of
+ {ok, [PidFile]} ->
+ case file:write_file(PidFile, os:getpid()) of
+ ok -> ok;
+ Error -> io:format("Failed to write PID file ~s, error: ~p", [PidFile, Error])
+ end;
+ _ -> ok
+ end,
+
+ {ok, ConfigPid} = couch_config:start_link(IniFiles),
+
+ LogLevel = couch_config:get("log", "level", "info"),
+ % announce startup
+ io:format("Apache CouchDB ~s (LogLevel=~s) is starting.~n", [
+ couch_server:get_version(),
+ LogLevel
+ ]),
+ case LogLevel of
+ "debug" ->
+ io:format("Configuration Settings ~p:~n", [IniFiles]),
+ [io:format(" [~s] ~s=~p~n", [Module, Variable, Value])
+ || {{Module, Variable}, Value} <- couch_config:all()];
+ _ -> ok
+ end,
+
+ LibDir =
+ case couch_config:get("couchdb", "util_driver_dir", null) of
+ null ->
+ filename:join(couch_util:priv_dir(), "lib");
+ LibDir0 -> LibDir0
+ end,
+
+ ok = couch_util:start_driver(LibDir),
+
+ BaseChildSpecs =
+ {{one_for_all, 10, 3600},
+ [{couch_config,
+ {couch_server_sup, couch_config_start_link_wrapper, [IniFiles, ConfigPid]},
+ permanent,
+ brutal_kill,
+ worker,
+ [couch_config]},
+ {couch_primary_services,
+ {couch_server_sup, start_primary_services, []},
+ permanent,
+ infinity,
+ supervisor,
+ [couch_server_sup]},
+ {couch_secondary_services,
+ {couch_server_sup, start_secondary_services, []},
+ permanent,
+ infinity,
+ supervisor,
+ [couch_server_sup]}
+ ]},
+
+ % ensure these applications are running
+ application:start(ibrowse),
+ application:start(crypto),
+
+ {ok, Pid} = supervisor:start_link(
+ {local, couch_server_sup}, couch_server_sup, BaseChildSpecs),
+
+ % launch the icu bridge
+ % just restart if one of the config settings change.
+
+ couch_config:register(
+ fun("couchdb", "util_driver_dir") ->
+ ?MODULE:stop();
+ ("daemons", _) ->
+ ?MODULE:stop()
+ end, Pid),
+
+ unlink(ConfigPid),
+
+ Ip = couch_config:get("httpd", "bind_address"),
+ io:format("Apache CouchDB has started. Time to relax.~n"),
+ Uris = [get_uri(Name, Ip) || Name <- [couch_httpd, https]],
+ [begin
+ case Uri of
+ undefined -> ok;
+ Uri -> ?LOG_INFO("Apache CouchDB has started on ~s", [Uri])
+ end
+ end
+ || Uri <- Uris],
+ case couch_config:get("couchdb", "uri_file", null) of
+ null -> ok;
+ UriFile ->
+ Lines = [begin case Uri of
+ undefined -> [];
+ Uri -> io_lib:format("~s~n", [Uri])
+ end end || Uri <- Uris],
+ file:write_file(UriFile, Lines)
+ end,
+
+ {ok, Pid}.
+
+start_primary_services() ->
+ supervisor:start_link({local, couch_primary_services}, couch_server_sup,
+ {{one_for_one, 10, 3600},
+ [{couch_log,
+ {couch_log, start_link, []},
+ permanent,
+ brutal_kill,
+ worker,
+ [couch_log]},
+ {couch_replication_supervisor,
+ {couch_rep_sup, start_link, []},
+ permanent,
+ infinity,
+ supervisor,
+ [couch_rep_sup]},
+ {couch_task_status,
+ {couch_task_status, start_link, []},
+ permanent,
+ brutal_kill,
+ worker,
+ [couch_task_status]},
+ {couch_server,
+ {couch_server, sup_start_link, []},
+ permanent,
+ 1000,
+ worker,
+ [couch_server]},
+ {couch_db_update_event,
+ {gen_event, start_link, [{local, couch_db_update}]},
+ permanent,
+ brutal_kill,
+ worker,
+ dynamic}
+ ]
+ }).
+
+start_secondary_services() ->
+ DaemonChildSpecs = [
+ begin
+ {ok, {Module, Fun, Args}} = couch_util:parse_term(SpecStr),
+
+ {list_to_atom(Name),
+ {Module, Fun, Args},
+ permanent,
+ 1000,
+ worker,
+ [Module]}
+ end
+ || {Name, SpecStr}
+ <- couch_config:get("daemons"), SpecStr /= ""],
+
+ supervisor:start_link({local, couch_secondary_services}, couch_server_sup,
+ {{one_for_one, 10, 3600}, DaemonChildSpecs}).
+
+stop() ->
+ catch exit(whereis(couch_server_sup), normal).
+
+init(ChildSpecs) ->
+ {ok, ChildSpecs}.
+
+get_uri(Name, Ip) ->
+ case get_port(Name) of
+ undefined ->
+ undefined;
+ Port ->
+ io_lib:format("~s://~s:~w/", [get_scheme(Name), Ip, Port])
+ end.
+
+get_scheme(couch_httpd) -> "http";
+get_scheme(https) -> "https".
+
+get_port(Name) ->
+ try
+ mochiweb_socket_server:get(Name, port)
+ catch
+ exit:{noproc, _}->
+ undefined
+ end.
diff --git a/1.1.x/src/couchdb/couch_stats_aggregator.erl b/1.1.x/src/couchdb/couch_stats_aggregator.erl
new file mode 100644
index 00000000..6090355d
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_stats_aggregator.erl
@@ -0,0 +1,297 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_stats_aggregator).
+-behaviour(gen_server).
+
+-export([start/0, start/1, stop/0]).
+-export([all/0, all/1, get/1, get/2, get_json/1, get_json/2, collect_sample/0]).
+
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+-record(aggregate, {
+ description = <<"">>,
+ seconds = 0,
+ count = 0,
+ current = null,
+ sum = null,
+ mean = null,
+ variance = null,
+ stddev = null,
+ min = null,
+ max = null,
+ samples = []
+}).
+
+
+start() ->
+ PrivDir = couch_util:priv_dir(),
+ start(filename:join(PrivDir, "stat_descriptions.cfg")).
+
+start(FileName) ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [FileName], []).
+
+stop() ->
+ gen_server:cast(?MODULE, stop).
+
+all() ->
+ ?MODULE:all(0).
+all(Time) when is_binary(Time) ->
+ ?MODULE:all(list_to_integer(binary_to_list(Time)));
+all(Time) when is_atom(Time) ->
+ ?MODULE:all(list_to_integer(atom_to_list(Time)));
+all(Time) when is_integer(Time) ->
+ Aggs = ets:match(?MODULE, {{'$1', Time}, '$2'}),
+ Stats = lists:map(fun([Key, Agg]) -> {Key, Agg} end, Aggs),
+ case Stats of
+ [] ->
+ {[]};
+ _ ->
+ Ret = lists:foldl(fun({{Mod, Key}, Agg}, Acc) ->
+ CurrKeys = case proplists:lookup(Mod, Acc) of
+ none -> [];
+ {Mod, {Keys}} -> Keys
+ end,
+ NewMod = {[{Key, to_json_term(Agg)} | CurrKeys]},
+ [{Mod, NewMod} | proplists:delete(Mod, Acc)]
+ end, [], Stats),
+ {Ret}
+ end.
+
+get(Key) ->
+ ?MODULE:get(Key, 0).
+get(Key, Time) when is_binary(Time) ->
+ ?MODULE:get(Key, list_to_integer(binary_to_list(Time)));
+get(Key, Time) when is_atom(Time) ->
+ ?MODULE:get(Key, list_to_integer(atom_to_list(Time)));
+get(Key, Time) when is_integer(Time) ->
+ case ets:lookup(?MODULE, {make_key(Key), Time}) of
+ [] -> #aggregate{seconds=Time};
+ [{_, Agg}] -> Agg
+ end.
+
+get_json(Key) ->
+ get_json(Key, 0).
+get_json(Key, Time) ->
+ to_json_term(?MODULE:get(Key, Time)).
+
+collect_sample() ->
+ gen_server:call(?MODULE, collect_sample, infinity).
+
+
+init(StatDescsFileName) ->
+ % Create an aggregate entry for each {description, rate} pair.
+ ets:new(?MODULE, [named_table, set, protected]),
+ SampleStr = couch_config:get("stats", "samples", "[0]"),
+ {ok, Samples} = couch_util:parse_term(SampleStr),
+ {ok, Descs} = file:consult(StatDescsFileName),
+ lists:foreach(fun({Sect, Key, Value}) ->
+ lists:foreach(fun(Secs) ->
+ Agg = #aggregate{
+ description=list_to_binary(Value),
+ seconds=Secs
+ },
+ ets:insert(?MODULE, {{{Sect, Key}, Secs}, Agg})
+ end, Samples)
+ end, Descs),
+
+ Self = self(),
+ ok = couch_config:register(
+ fun("stats", _) -> exit(Self, config_change) end
+ ),
+
+ Rate = list_to_integer(couch_config:get("stats", "rate", "1000")),
+ % TODO: Add timer_start to kernel start options.
+ {ok, TRef} = timer:apply_after(Rate, ?MODULE, collect_sample, []),
+ {ok, {TRef, Rate}}.
+
+terminate(_Reason, {TRef, _Rate}) ->
+ timer:cancel(TRef),
+ ok.
+
+handle_call(collect_sample, _, {OldTRef, SampleInterval}) ->
+ timer:cancel(OldTRef),
+ {ok, TRef} = timer:apply_after(SampleInterval, ?MODULE, collect_sample, []),
+ % Gather new stats values to add.
+ Incs = lists:map(fun({Key, Value}) ->
+ {Key, {incremental, Value}}
+ end, couch_stats_collector:all(incremental)),
+ Abs = lists:map(fun({Key, Values}) ->
+ couch_stats_collector:clear(Key),
+ Values2 = case Values of
+ X when is_list(X) -> X;
+ Else -> [Else]
+ end,
+ {_, Mean} = lists:foldl(fun(Val, {Count, Curr}) ->
+ {Count+1, Curr + (Val - Curr) / (Count+1)}
+ end, {0, 0}, Values2),
+ {Key, {absolute, Mean}}
+ end, couch_stats_collector:all(absolute)),
+
+ Values = Incs ++ Abs,
+ Now = erlang:now(),
+ lists:foreach(fun({{Key, Rate}, Agg}) ->
+ NewAgg = case proplists:lookup(Key, Values) of
+ none ->
+ rem_values(Now, Agg);
+ {Key, {Type, Value}} ->
+ NewValue = new_value(Type, Value, Agg#aggregate.current),
+ Agg2 = add_value(Now, NewValue, Agg),
+ rem_values(Now, Agg2)
+ end,
+ ets:insert(?MODULE, {{Key, Rate}, NewAgg})
+ end, ets:tab2list(?MODULE)),
+ {reply, ok, {TRef, SampleInterval}}.
+
+handle_cast(stop, State) ->
+ {stop, normal, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+code_change(_OldVersion, State, _Extra) ->
+ {ok, State}.
+
+
+new_value(incremental, Value, null) ->
+ Value;
+new_value(incremental, Value, Current) ->
+ Value - Current;
+new_value(absolute, Value, _Current) ->
+ Value.
+
+add_value(Time, Value, #aggregate{count=Count, seconds=Secs}=Agg) when Count < 1 ->
+ Samples = case Secs of
+ 0 -> [];
+ _ -> [{Time, Value}]
+ end,
+ Agg#aggregate{
+ count=1,
+ current=Value,
+ sum=Value,
+ mean=Value,
+ variance=0.0,
+ stddev=null,
+ min=Value,
+ max=Value,
+ samples=Samples
+ };
+add_value(Time, Value, Agg) ->
+ #aggregate{
+ count=Count,
+ current=Current,
+ sum=Sum,
+ mean=Mean,
+ variance=Variance,
+ samples=Samples
+ } = Agg,
+
+ NewCount = Count + 1,
+ NewMean = Mean + (Value - Mean) / NewCount,
+ NewVariance = Variance + (Value - Mean) * (Value - NewMean),
+ StdDev = case NewCount > 1 of
+ false -> null;
+ _ -> math:sqrt(NewVariance / (NewCount - 1))
+ end,
+ Agg2 = Agg#aggregate{
+ count=NewCount,
+ current=Current + Value,
+ sum=Sum + Value,
+ mean=NewMean,
+ variance=NewVariance,
+ stddev=StdDev,
+ min=lists:min([Agg#aggregate.min, Value]),
+ max=lists:max([Agg#aggregate.max, Value])
+ },
+ case Agg2#aggregate.seconds of
+ 0 -> Agg2;
+ _ -> Agg2#aggregate{samples=[{Time, Value} | Samples]}
+ end.
+
+rem_values(Time, Agg) ->
+ Seconds = Agg#aggregate.seconds,
+ Samples = Agg#aggregate.samples,
+ Pred = fun({When, _Value}) ->
+ timer:now_diff(Time, When) =< (Seconds * 1000000)
+ end,
+ {Keep, Remove} = lists:splitwith(Pred, Samples),
+ Agg2 = lists:foldl(fun({_, Value}, Acc) ->
+ rem_value(Value, Acc)
+ end, Agg, Remove),
+ Agg2#aggregate{samples=Keep}.
+
+rem_value(_Value, #aggregate{count=Count, seconds=Secs}) when Count =< 1 ->
+ #aggregate{seconds=Secs};
+rem_value(Value, Agg) ->
+ #aggregate{
+ count=Count,
+ sum=Sum,
+ mean=Mean,
+ variance=Variance
+ } = Agg,
+
+ OldMean = (Mean * Count - Value) / (Count - 1),
+ OldVariance = Variance - (Value - OldMean) * (Value - Mean),
+ OldCount = Count - 1,
+ StdDev = case OldCount > 1 of
+ false -> null;
+ _ -> math:sqrt(clamp_value(OldVariance / (OldCount - 1)))
+ end,
+ Agg#aggregate{
+ count=OldCount,
+ sum=Sum-Value,
+ mean=clamp_value(OldMean),
+ variance=clamp_value(OldVariance),
+ stddev=StdDev
+ }.
+
+to_json_term(Agg) ->
+ {Min, Max} = case Agg#aggregate.seconds > 0 of
+ false ->
+ {Agg#aggregate.min, Agg#aggregate.max};
+ _ ->
+ case length(Agg#aggregate.samples) > 0 of
+ true ->
+ Extract = fun({_Time, Value}) -> Value end,
+ Samples = lists:map(Extract, Agg#aggregate.samples),
+ {lists:min(Samples), lists:max(Samples)};
+ _ ->
+ {null, null}
+ end
+ end,
+ {[
+ {description, Agg#aggregate.description},
+ {current, round_value(Agg#aggregate.sum)},
+ {sum, round_value(Agg#aggregate.sum)},
+ {mean, round_value(Agg#aggregate.mean)},
+ {stddev, round_value(Agg#aggregate.stddev)},
+ {min, Min},
+ {max, Max}
+ ]}.
+
+make_key({Mod, Val}) when is_integer(Val) ->
+ {Mod, list_to_atom(integer_to_list(Val))};
+make_key(Key) ->
+ Key.
+
+round_value(Val) when not is_number(Val) ->
+ Val;
+round_value(Val) when Val == 0 ->
+ Val;
+round_value(Val) ->
+ erlang:round(Val * 1000.0) / 1000.0.
+
+clamp_value(Val) when Val > 0.00000000000001 ->
+ Val;
+clamp_value(_) ->
+ 0.0.
diff --git a/1.1.x/src/couchdb/couch_stats_collector.erl b/1.1.x/src/couchdb/couch_stats_collector.erl
new file mode 100644
index 00000000..f7b9bb48
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_stats_collector.erl
@@ -0,0 +1,136 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% todo
+% - remove existance check on increment(), decrement() and record(). have
+% modules initialize counters on startup.
+
+-module(couch_stats_collector).
+
+-behaviour(gen_server).
+
+-export([start/0, stop/0]).
+-export([all/0, all/1, get/1, increment/1, decrement/1, record/2, clear/1]).
+-export([track_process_count/1, track_process_count/2]).
+
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+-define(HIT_TABLE, stats_hit_table).
+-define(ABS_TABLE, stats_abs_table).
+
+start() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+stop() ->
+ gen_server:call(?MODULE, stop).
+
+all() ->
+ ets:tab2list(?HIT_TABLE) ++ abs_to_list().
+
+all(Type) ->
+ case Type of
+ incremental -> ets:tab2list(?HIT_TABLE);
+ absolute -> abs_to_list()
+ end.
+
+get(Key) ->
+ case ets:lookup(?HIT_TABLE, Key) of
+ [] ->
+ case ets:lookup(?ABS_TABLE, Key) of
+ [] ->
+ nil;
+ AbsVals ->
+ lists:map(fun({_, Value}) -> Value end, AbsVals)
+ end;
+ [{_, Counter}] ->
+ Counter
+ end.
+
+increment(Key) ->
+ Key2 = make_key(Key),
+ case catch ets:update_counter(?HIT_TABLE, Key2, 1) of
+ {'EXIT', {badarg, _}} ->
+ catch ets:insert(?HIT_TABLE, {Key2, 1}),
+ ok;
+ _ ->
+ ok
+ end.
+
+decrement(Key) ->
+ Key2 = make_key(Key),
+ case catch ets:update_counter(?HIT_TABLE, Key2, -1) of
+ {'EXIT', {badarg, _}} ->
+ catch ets:insert(?HIT_TABLE, {Key2, -1}),
+ ok;
+ _ -> ok
+ end.
+
+record(Key, Value) ->
+ catch ets:insert(?ABS_TABLE, {make_key(Key), Value}).
+
+clear(Key) ->
+ catch ets:delete(?ABS_TABLE, make_key(Key)).
+
+track_process_count(Stat) ->
+ track_process_count(self(), Stat).
+
+track_process_count(Pid, Stat) ->
+ MonitorFun = fun() ->
+ Ref = erlang:monitor(process, Pid),
+ receive {'DOWN', Ref, _, _, _} -> ok end,
+ couch_stats_collector:decrement(Stat)
+ end,
+ case (catch couch_stats_collector:increment(Stat)) of
+ ok -> spawn(MonitorFun);
+ _ -> ok
+ end.
+
+
+init(_) ->
+ ets:new(?HIT_TABLE, [named_table, set, public]),
+ ets:new(?ABS_TABLE, [named_table, duplicate_bag, public]),
+ {ok, nil}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+handle_call(stop, _, State) ->
+ {stop, normal, stopped, State}.
+
+handle_cast(foo, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+code_change(_OldVersion, State, _Extra) ->
+ {ok, State}.
+
+
+make_key({Module, Key}) when is_integer(Key) ->
+ {Module, list_to_atom(integer_to_list(Key))};
+make_key(Key) ->
+ Key.
+
+abs_to_list() ->
+ SortedKVs = lists:sort(ets:tab2list(?ABS_TABLE)),
+ lists:foldl(fun({Key, Val}, Acc) ->
+ case Acc of
+ [] ->
+ [{Key, [Val]}];
+ [{Key, Prev} | Rest] ->
+ [{Key, [Val | Prev]} | Rest];
+ Others ->
+ [{Key, [Val]} | Others]
+ end
+ end, [], SortedKVs). \ No newline at end of file
diff --git a/1.1.x/src/couchdb/couch_stream.erl b/1.1.x/src/couchdb/couch_stream.erl
new file mode 100644
index 00000000..60af1c2b
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_stream.erl
@@ -0,0 +1,357 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_stream).
+-behaviour(gen_server).
+
+
+-define(FILE_POINTER_BYTES, 8).
+-define(FILE_POINTER_BITS, 8*(?FILE_POINTER_BYTES)).
+
+-define(STREAM_OFFSET_BYTES, 4).
+-define(STREAM_OFFSET_BITS, 8*(?STREAM_OFFSET_BYTES)).
+
+-define(HUGE_CHUNK, 1000000000). % Huge chuck size when reading all in one go
+
+-define(DEFAULT_STREAM_CHUNK, 16#00100000). % 1 meg chunks when streaming data
+
+-export([open/1, open/3, close/1, write/2, foldl/4, foldl/5, range_foldl/6, foldl_decode/6,
+ old_foldl/5,old_copy_to_new_stream/4]).
+-export([copy_to_new_stream/3,old_read_term/2]).
+-export([init/1, terminate/2, handle_call/3]).
+-export([handle_cast/2,code_change/3,handle_info/2]).
+
+-include("couch_db.hrl").
+
+-record(stream,
+ {fd = 0,
+ written_pointers=[],
+ buffer_list = [],
+ buffer_len = 0,
+ max_buffer = 4096,
+ written_len = 0,
+ md5,
+ % md5 of the content without any transformation applied (e.g. compression)
+ % needed for the attachment upload integrity check (ticket 558)
+ identity_md5,
+ identity_len = 0,
+ encoding_fun,
+ end_encoding_fun
+ }).
+
+
+%%% Interface functions %%%
+
+open(Fd) ->
+ open(Fd, identity, []).
+
+open(Fd, Encoding, Options) ->
+ gen_server:start_link(couch_stream, {Fd, Encoding, Options}, []).
+
+close(Pid) ->
+ gen_server:call(Pid, close, infinity).
+
+copy_to_new_stream(Fd, PosList, DestFd) ->
+ {ok, Dest} = open(DestFd),
+ foldl(Fd, PosList,
+ fun(Bin, _) ->
+ ok = write(Dest, Bin)
+ end, ok),
+ close(Dest).
+
+
+% 09 UPGRADE CODE
+old_copy_to_new_stream(Fd, Pos, Len, DestFd) ->
+ {ok, Dest} = open(DestFd),
+ old_foldl(Fd, Pos, Len,
+ fun(Bin, _) ->
+ ok = write(Dest, Bin)
+ end, ok),
+ close(Dest).
+
+% 09 UPGRADE CODE
+old_foldl(_Fd, null, 0, _Fun, Acc) ->
+ Acc;
+old_foldl(Fd, OldPointer, Len, Fun, Acc) when is_tuple(OldPointer)->
+ {ok, Acc2, _} = old_stream_data(Fd, OldPointer, Len, ?DEFAULT_STREAM_CHUNK, Fun, Acc),
+ Acc2.
+
+foldl(_Fd, [], _Fun, Acc) ->
+ Acc;
+foldl(Fd, [Pos|Rest], Fun, Acc) ->
+ {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
+ foldl(Fd, Rest, Fun, Fun(Bin, Acc)).
+
+foldl(Fd, PosList, <<>>, Fun, Acc) ->
+ foldl(Fd, PosList, Fun, Acc);
+foldl(Fd, PosList, Md5, Fun, Acc) ->
+ foldl(Fd, PosList, Md5, couch_util:md5_init(), Fun, Acc).
+
+foldl_decode(Fd, PosList, Md5, Enc, Fun, Acc) ->
+ {DecDataFun, DecEndFun} = case Enc of
+ gzip ->
+ ungzip_init();
+ identity ->
+ identity_enc_dec_funs()
+ end,
+ Result = foldl_decode(
+ DecDataFun, Fd, PosList, Md5, couch_util:md5_init(), Fun, Acc
+ ),
+ DecEndFun(),
+ Result.
+
+foldl(_Fd, [], Md5, Md5Acc, _Fun, Acc) ->
+ Md5 = couch_util:md5_final(Md5Acc),
+ Acc;
+foldl(Fd, [{Pos, _Size}], Md5, Md5Acc, Fun, Acc) -> % 0110 UPGRADE CODE
+ foldl(Fd, [Pos], Md5, Md5Acc, Fun, Acc);
+foldl(Fd, [Pos], Md5, Md5Acc, Fun, Acc) ->
+ {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
+ Md5 = couch_util:md5_final(couch_util:md5_update(Md5Acc, Bin)),
+ Fun(Bin, Acc);
+foldl(Fd, [{Pos, _Size}|Rest], Md5, Md5Acc, Fun, Acc) ->
+ foldl(Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc);
+foldl(Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) ->
+ {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
+ foldl(Fd, Rest, Md5, couch_util:md5_update(Md5Acc, Bin), Fun, Fun(Bin, Acc)).
+
+range_foldl(Fd, PosList, From, To, Fun, Acc) ->
+ range_foldl(Fd, PosList, From, To, 0, Fun, Acc).
+
+range_foldl(_Fd, _PosList, _From, To, Off, _Fun, Acc) when Off >= To ->
+ Acc;
+range_foldl(Fd, [Pos|Rest], From, To, Off, Fun, Acc) when is_integer(Pos) -> % old-style attachment
+ {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
+ range_foldl(Fd, [{Pos, iolist_size(Bin)}] ++ Rest, From, To, Off, Fun, Acc);
+range_foldl(Fd, [{_Pos, Size}|Rest], From, To, Off, Fun, Acc) when From > Off + Size ->
+ range_foldl(Fd, Rest, From, To, Off + Size, Fun, Acc);
+range_foldl(Fd, [{Pos, Size}|Rest], From, To, Off, Fun, Acc) ->
+ {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
+ Bin1 = if
+ From =< Off andalso To >= Off + Size -> Bin; %% the whole block is covered
+ true ->
+ PrefixLen = clip(From - Off, 0, Size),
+ PostfixLen = clip(Off + Size - To, 0, Size),
+ MatchLen = Size - PrefixLen - PostfixLen,
+ <<_Prefix:PrefixLen/binary,Match:MatchLen/binary,_Postfix:PostfixLen/binary>> = iolist_to_binary(Bin),
+ Match
+ end,
+ range_foldl(Fd, Rest, From, To, Off + Size, Fun, Fun(Bin1, Acc)).
+
+clip(Value, Lo, Hi) ->
+ if
+ Value < Lo -> Lo;
+ Value > Hi -> Hi;
+ true -> Value
+ end.
+
+foldl_decode(_DecFun, _Fd, [], Md5, Md5Acc, _Fun, Acc) ->
+ Md5 = couch_util:md5_final(Md5Acc),
+ Acc;
+foldl_decode(DecFun, Fd, [{Pos, _Size}], Md5, Md5Acc, Fun, Acc) ->
+ foldl_decode(DecFun, Fd, [Pos], Md5, Md5Acc, Fun, Acc);
+foldl_decode(DecFun, Fd, [Pos], Md5, Md5Acc, Fun, Acc) ->
+ {ok, EncBin} = couch_file:pread_iolist(Fd, Pos),
+ Md5 = couch_util:md5_final(couch_util:md5_update(Md5Acc, EncBin)),
+ Bin = DecFun(EncBin),
+ Fun(Bin, Acc);
+foldl_decode(DecFun, Fd, [{Pos, _Size}|Rest], Md5, Md5Acc, Fun, Acc) ->
+ foldl_decode(DecFun, Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc);
+foldl_decode(DecFun, Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) ->
+ {ok, EncBin} = couch_file:pread_iolist(Fd, Pos),
+ Bin = DecFun(EncBin),
+ Md5Acc2 = couch_util:md5_update(Md5Acc, EncBin),
+ foldl_decode(DecFun, Fd, Rest, Md5, Md5Acc2, Fun, Fun(Bin, Acc)).
+
+gzip_init(Options) ->
+ case couch_util:get_value(compression_level, Options, 0) of
+ Lvl when Lvl >= 1 andalso Lvl =< 9 ->
+ Z = zlib:open(),
+ % 15 = ?MAX_WBITS (defined in the zlib module)
+ % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1
+ ok = zlib:deflateInit(Z, Lvl, deflated, 16 + 15, 8, default),
+ {
+ fun(Data) ->
+ zlib:deflate(Z, Data)
+ end,
+ fun() ->
+ Last = zlib:deflate(Z, [], finish),
+ ok = zlib:deflateEnd(Z),
+ ok = zlib:close(Z),
+ Last
+ end
+ };
+ _ ->
+ identity_enc_dec_funs()
+ end.
+
+ungzip_init() ->
+ Z = zlib:open(),
+ zlib:inflateInit(Z, 16 + 15),
+ {
+ fun(Data) ->
+ zlib:inflate(Z, Data)
+ end,
+ fun() ->
+ ok = zlib:inflateEnd(Z),
+ ok = zlib:close(Z)
+ end
+ }.
+
+identity_enc_dec_funs() ->
+ {
+ fun(Data) -> Data end,
+ fun() -> [] end
+ }.
+
+write(_Pid, <<>>) ->
+ ok;
+write(Pid, Bin) ->
+ gen_server:call(Pid, {write, Bin}, infinity).
+
+
+init({Fd, Encoding, Options}) ->
+ {EncodingFun, EndEncodingFun} = case Encoding of
+ identity ->
+ identity_enc_dec_funs();
+ gzip ->
+ gzip_init(Options)
+ end,
+ {ok, #stream{
+ fd=Fd,
+ md5=couch_util:md5_init(),
+ identity_md5=couch_util:md5_init(),
+ encoding_fun=EncodingFun,
+ end_encoding_fun=EndEncodingFun
+ }
+ }.
+
+terminate(_Reason, _Stream) ->
+ ok.
+
+handle_call({write, Bin}, _From, Stream) ->
+ BinSize = iolist_size(Bin),
+ #stream{
+ fd = Fd,
+ written_len = WrittenLen,
+ written_pointers = Written,
+ buffer_len = BufferLen,
+ buffer_list = Buffer,
+ max_buffer = Max,
+ md5 = Md5,
+ identity_md5 = IdenMd5,
+ identity_len = IdenLen,
+ encoding_fun = EncodingFun} = Stream,
+ if BinSize + BufferLen > Max ->
+ WriteBin = lists:reverse(Buffer, [Bin]),
+ IdenMd5_2 = couch_util:md5_update(IdenMd5, WriteBin),
+ case EncodingFun(WriteBin) of
+ [] ->
+ % case where the encoder did some internal buffering
+ % (zlib does it for example)
+ WrittenLen2 = WrittenLen,
+ Md5_2 = Md5,
+ Written2 = Written;
+ WriteBin2 ->
+ {ok, Pos} = couch_file:append_binary(Fd, WriteBin2),
+ WrittenLen2 = WrittenLen + iolist_size(WriteBin2),
+ Md5_2 = couch_util:md5_update(Md5, WriteBin2),
+ Written2 = [{Pos, iolist_size(WriteBin2)}|Written]
+ end,
+
+ {reply, ok, Stream#stream{
+ written_len=WrittenLen2,
+ written_pointers=Written2,
+ buffer_list=[],
+ buffer_len=0,
+ md5=Md5_2,
+ identity_md5=IdenMd5_2,
+ identity_len=IdenLen + BinSize}};
+ true ->
+ {reply, ok, Stream#stream{
+ buffer_list=[Bin|Buffer],
+ buffer_len=BufferLen + BinSize,
+ identity_len=IdenLen + BinSize}}
+ end;
+handle_call(close, _From, Stream) ->
+ #stream{
+ fd = Fd,
+ written_len = WrittenLen,
+ written_pointers = Written,
+ buffer_list = Buffer,
+ md5 = Md5,
+ identity_md5 = IdenMd5,
+ identity_len = IdenLen,
+ encoding_fun = EncodingFun,
+ end_encoding_fun = EndEncodingFun} = Stream,
+
+ WriteBin = lists:reverse(Buffer),
+ IdenMd5Final = couch_util:md5_final(couch_util:md5_update(IdenMd5, WriteBin)),
+ WriteBin2 = EncodingFun(WriteBin) ++ EndEncodingFun(),
+ Md5Final = couch_util:md5_final(couch_util:md5_update(Md5, WriteBin2)),
+ Result = case WriteBin2 of
+ [] ->
+ {lists:reverse(Written), WrittenLen, IdenLen, Md5Final, IdenMd5Final};
+ _ ->
+ {ok, Pos} = couch_file:append_binary(Fd, WriteBin2),
+ StreamInfo = lists:reverse(Written, [{Pos, iolist_size(WriteBin2)}]),
+ StreamLen = WrittenLen + iolist_size(WriteBin2),
+ {StreamInfo, StreamLen, IdenLen, Md5Final, IdenMd5Final}
+ end,
+ {stop, normal, Result, Stream}.
+
+handle_cast(_Msg, State) ->
+ {noreply,State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+
+
+% 09 UPGRADE CODE
+old_read_term(Fd, Sp) ->
+ {ok, <<TermLen:(?STREAM_OFFSET_BITS)>>, Sp2}
+ = old_read(Fd, Sp, ?STREAM_OFFSET_BYTES),
+ {ok, Bin, _Sp3} = old_read(Fd, Sp2, TermLen),
+ {ok, binary_to_term(Bin)}.
+
+old_read(Fd, Sp, Num) ->
+ {ok, RevBin, Sp2} = old_stream_data(Fd, Sp, Num, ?HUGE_CHUNK, fun(Bin, Acc) -> [Bin | Acc] end, []),
+ Bin = list_to_binary(lists:reverse(RevBin)),
+ {ok, Bin, Sp2}.
+
+% 09 UPGRADE CODE
+old_stream_data(_Fd, Sp, 0, _MaxChunk, _Fun, Acc) ->
+ {ok, Acc, Sp};
+old_stream_data(Fd, {Pos, 0}, Num, MaxChunk, Fun, Acc) ->
+ {ok, <<NextPos:(?FILE_POINTER_BITS), NextOffset:(?STREAM_OFFSET_BITS)>>}
+ = couch_file:old_pread(Fd, Pos, ?FILE_POINTER_BYTES + ?STREAM_OFFSET_BYTES),
+ Sp = {NextPos, NextOffset},
+ % Check NextPos is past current Pos (this is always true in a stream)
+ % Guards against potential infinite loops caused by corruption.
+ case NextPos > Pos of
+ true -> ok;
+ false -> throw({error, stream_corruption})
+ end,
+ old_stream_data(Fd, Sp, Num, MaxChunk, Fun, Acc);
+old_stream_data(Fd, {Pos, Offset}, Num, MaxChunk, Fun, Acc) ->
+ ReadAmount = lists:min([MaxChunk, Num, Offset]),
+ {ok, Bin} = couch_file:old_pread(Fd, Pos, ReadAmount),
+ Sp = {Pos + ReadAmount, Offset - ReadAmount},
+ old_stream_data(Fd, Sp, Num - ReadAmount, MaxChunk, Fun, Fun(Bin, Acc)).
+
+
+% Tests moved to tests/etap/050-stream.t
+
diff --git a/1.1.x/src/couchdb/couch_task_status.erl b/1.1.x/src/couchdb/couch_task_status.erl
new file mode 100644
index 00000000..c4487dc4
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_task_status.erl
@@ -0,0 +1,124 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_task_status).
+-behaviour(gen_server).
+
+% This module allows is used to track the status of long running tasks.
+% Long running tasks register (add_task/3) then update their status (update/1)
+% and the task and status is added to tasks list. When the tracked task dies
+% it will be automatically removed the tracking. To get the tasks list, use the
+% all/0 function
+
+-export([start_link/0, stop/0]).
+-export([all/0, add_task/3, update/1, update/2, set_update_frequency/1]).
+
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+-import(couch_util, [to_binary/1]).
+
+-include("couch_db.hrl").
+
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+
+stop() ->
+ gen_server:cast(?MODULE, stop).
+
+
+all() ->
+ gen_server:call(?MODULE, all).
+
+
+add_task(Type, TaskName, StatusText) ->
+ put(task_status_update, {{0, 0, 0}, 0}),
+ Msg = {
+ add_task,
+ to_binary(Type),
+ to_binary(TaskName),
+ to_binary(StatusText)
+ },
+ gen_server:call(?MODULE, Msg).
+
+
+set_update_frequency(Msecs) ->
+ put(task_status_update, {{0, 0, 0}, Msecs * 1000}).
+
+
+update(StatusText) ->
+ update("~s", [StatusText]).
+
+update(Format, Data) ->
+ {LastUpdateTime, Frequency} = get(task_status_update),
+ case timer:now_diff(Now = now(), LastUpdateTime) >= Frequency of
+ true ->
+ put(task_status_update, {Now, Frequency}),
+ Msg = ?l2b(io_lib:format(Format, Data)),
+ gen_server:cast(?MODULE, {update_status, self(), Msg});
+ false ->
+ ok
+ end.
+
+
+init([]) ->
+ % read configuration settings and register for configuration changes
+ ets:new(?MODULE, [ordered_set, protected, named_table]),
+ {ok, nil}.
+
+
+terminate(_Reason,_State) ->
+ ok.
+
+
+handle_call({add_task, Type, TaskName, StatusText}, {From, _}, Server) ->
+ case ets:lookup(?MODULE, From) of
+ [] ->
+ true = ets:insert(?MODULE, {From, {Type, TaskName, StatusText}}),
+ erlang:monitor(process, From),
+ {reply, ok, Server};
+ [_] ->
+ {reply, {add_task_error, already_registered}, Server}
+ end;
+handle_call(all, _, Server) ->
+ All = [
+ [
+ {type, Type},
+ {task, Task},
+ {status, Status},
+ {pid, ?l2b(pid_to_list(Pid))}
+ ]
+ ||
+ {Pid, {Type, Task, Status}} <- ets:tab2list(?MODULE)
+ ],
+ {reply, All, Server}.
+
+
+handle_cast({update_status, Pid, StatusText}, Server) ->
+ [{Pid, {Type, TaskName, _StatusText}}] = ets:lookup(?MODULE, Pid),
+ ?LOG_DEBUG("New task status for ~s: ~s",[TaskName, StatusText]),
+ true = ets:insert(?MODULE, {Pid, {Type, TaskName, StatusText}}),
+ {noreply, Server};
+handle_cast(stop, State) ->
+ {stop, normal, State}.
+
+handle_info({'DOWN', _MonitorRef, _Type, Pid, _Info}, Server) ->
+ %% should we also erlang:demonitor(_MonitorRef), ?
+ ets:delete(?MODULE, Pid),
+ {noreply, Server}.
+
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
diff --git a/1.1.x/src/couchdb/couch_util.erl b/1.1.x/src/couchdb/couch_util.erl
new file mode 100644
index 00000000..53dfe5e3
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_util.erl
@@ -0,0 +1,478 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_util).
+
+-export([priv_dir/0, start_driver/1, normpath/1]).
+-export([should_flush/0, should_flush/1, to_existing_atom/1]).
+-export([rand32/0, implode/2, collate/2, collate/3]).
+-export([abs_pathname/1,abs_pathname/2, trim/1]).
+-export([encodeBase64Url/1, decodeBase64Url/1]).
+-export([validate_utf8/1, to_hex/1, parse_term/1, dict_find/3]).
+-export([get_nested_json_value/2, json_user_ctx/1]).
+-export([proplist_apply_field/2, json_apply_field/2]).
+-export([to_binary/1, to_integer/1, to_list/1, url_encode/1]).
+-export([json_encode/1, json_decode/1]).
+-export([verify/2,simple_call/2,shutdown_sync/1]).
+-export([compressible_att_type/1]).
+-export([get_value/2, get_value/3]).
+-export([md5/1, md5_init/0, md5_update/2, md5_final/1]).
+-export([reorder_results/2]).
+-export([url_strip_password/1]).
+-export([encode_doc_id/1]).
+
+-include("couch_db.hrl").
+
+% arbitrarily chosen amount of memory to use before flushing to disk
+-define(FLUSH_MAX_MEM, 10000000).
+
+priv_dir() ->
+ case code:priv_dir(couch) of
+ {error, bad_name} ->
+ % small hack, in dev mode "app" is couchdb. Fixing requires
+ % renaming src/couch to src/couch. Not really worth the hassle.
+ % -Damien
+ code:priv_dir(couchdb);
+ Dir -> Dir
+ end.
+
+start_driver(LibDir) ->
+ case erl_ddll:load_driver(LibDir, "couch_icu_driver") of
+ ok ->
+ ok;
+ {error, already_loaded} ->
+ ok = erl_ddll:reload_driver(LibDir, "couch_icu_driver");
+ {error, Error} ->
+ exit(erl_ddll:format_error(Error))
+ end.
+
+% Normalize a pathname by removing .. and . components.
+normpath(Path) ->
+ normparts(filename:split(Path), []).
+
+normparts([], Acc) ->
+ filename:join(lists:reverse(Acc));
+normparts([".." | RestParts], [_Drop | RestAcc]) ->
+ normparts(RestParts, RestAcc);
+normparts(["." | RestParts], Acc) ->
+ normparts(RestParts, Acc);
+normparts([Part | RestParts], Acc) ->
+ normparts(RestParts, [Part | Acc]).
+
+% works like list_to_existing_atom, except can be list or binary and it
+% gives you the original value instead of an error if no existing atom.
+to_existing_atom(V) when is_list(V) ->
+ try list_to_existing_atom(V) catch _:_ -> V end;
+to_existing_atom(V) when is_binary(V) ->
+ try list_to_existing_atom(?b2l(V)) catch _:_ -> V end;
+to_existing_atom(V) when is_atom(V) ->
+ V.
+
+shutdown_sync(Pid) when not is_pid(Pid)->
+ ok;
+shutdown_sync(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ try
+ catch unlink(Pid),
+ catch exit(Pid, shutdown),
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ ok
+ end
+ after
+ erlang:demonitor(MRef, [flush])
+ end.
+
+
+simple_call(Pid, Message) ->
+ MRef = erlang:monitor(process, Pid),
+ try
+ Pid ! {self(), Message},
+ receive
+ {Pid, Result} ->
+ Result;
+ {'DOWN', MRef, _, _, Reason} ->
+ exit(Reason)
+ end
+ after
+ erlang:demonitor(MRef, [flush])
+ end.
+
+validate_utf8(Data) when is_list(Data) ->
+ validate_utf8(?l2b(Data));
+validate_utf8(Bin) when is_binary(Bin) ->
+ validate_utf8_fast(Bin, 0).
+
+validate_utf8_fast(B, O) ->
+ case B of
+ <<_:O/binary>> ->
+ true;
+ <<_:O/binary, C1, _/binary>> when
+ C1 < 128 ->
+ validate_utf8_fast(B, 1 + O);
+ <<_:O/binary, C1, C2, _/binary>> when
+ C1 >= 194, C1 =< 223,
+ C2 >= 128, C2 =< 191 ->
+ validate_utf8_fast(B, 2 + O);
+ <<_:O/binary, C1, C2, C3, _/binary>> when
+ C1 >= 224, C1 =< 239,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191 ->
+ validate_utf8_fast(B, 3 + O);
+ <<_:O/binary, C1, C2, C3, C4, _/binary>> when
+ C1 >= 240, C1 =< 244,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191,
+ C4 >= 128, C4 =< 191 ->
+ validate_utf8_fast(B, 4 + O);
+ _ ->
+ false
+ end.
+
+to_hex([]) ->
+ [];
+to_hex(Bin) when is_binary(Bin) ->
+ to_hex(binary_to_list(Bin));
+to_hex([H|T]) ->
+ [to_digit(H div 16), to_digit(H rem 16) | to_hex(T)].
+
+to_digit(N) when N < 10 -> $0 + N;
+to_digit(N) -> $a + N-10.
+
+
+parse_term(Bin) when is_binary(Bin) ->
+ parse_term(binary_to_list(Bin));
+parse_term(List) ->
+ {ok, Tokens, _} = erl_scan:string(List ++ "."),
+ erl_parse:parse_term(Tokens).
+
+get_value(Key, List) ->
+ get_value(Key, List, undefined).
+
+get_value(Key, List, Default) ->
+ case lists:keysearch(Key, 1, List) of
+ {value, {Key,Value}} ->
+ Value;
+ false ->
+ Default
+ end.
+
+get_nested_json_value({Props}, [Key|Keys]) ->
+ case couch_util:get_value(Key, Props, nil) of
+ nil -> throw({not_found, <<"missing json key: ", Key/binary>>});
+ Value -> get_nested_json_value(Value, Keys)
+ end;
+get_nested_json_value(Value, []) ->
+ Value;
+get_nested_json_value(_NotJSONObj, _) ->
+ throw({not_found, json_mismatch}).
+
+proplist_apply_field(H, L) ->
+ {R} = json_apply_field(H, {L}),
+ R.
+
+json_apply_field(H, {L}) ->
+ json_apply_field(H, L, []).
+json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
+ json_apply_field({Key, NewValue}, Headers, Acc);
+json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
+ json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
+json_apply_field({Key, NewValue}, [], Acc) ->
+ {[{Key, NewValue}|Acc]}.
+
+json_user_ctx(#db{name=DbName, user_ctx=Ctx}) ->
+ {[{<<"db">>, DbName},
+ {<<"name">>,Ctx#user_ctx.name},
+ {<<"roles">>,Ctx#user_ctx.roles}]}.
+
+
+% returns a random integer
+rand32() ->
+ crypto:rand_uniform(0, 16#100000000).
+
+% given a pathname "../foo/bar/" it gives back the fully qualified
+% absolute pathname.
+abs_pathname(" " ++ Filename) ->
+ % strip leading whitspace
+ abs_pathname(Filename);
+abs_pathname([$/ |_]=Filename) ->
+ Filename;
+abs_pathname(Filename) ->
+ {ok, Cwd} = file:get_cwd(),
+ {Filename2, Args} = separate_cmd_args(Filename, ""),
+ abs_pathname(Filename2, Cwd) ++ Args.
+
+abs_pathname(Filename, Dir) ->
+ Name = filename:absname(Filename, Dir ++ "/"),
+ OutFilename = filename:join(fix_path_list(filename:split(Name), [])),
+ % If the filename is a dir (last char slash, put back end slash
+ case string:right(Filename,1) of
+ "/" ->
+ OutFilename ++ "/";
+ "\\" ->
+ OutFilename ++ "/";
+ _Else->
+ OutFilename
+ end.
+
+% if this as an executable with arguments, seperate out the arguments
+% ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"}
+separate_cmd_args("", CmdAcc) ->
+ {lists:reverse(CmdAcc), ""};
+separate_cmd_args("\\ " ++ Rest, CmdAcc) -> % handle skipped value
+ separate_cmd_args(Rest, " \\" ++ CmdAcc);
+separate_cmd_args(" " ++ Rest, CmdAcc) ->
+ {lists:reverse(CmdAcc), " " ++ Rest};
+separate_cmd_args([Char|Rest], CmdAcc) ->
+ separate_cmd_args(Rest, [Char | CmdAcc]).
+
+% Is a character whitespace?
+is_whitespace($\s) -> true;
+is_whitespace($\t) -> true;
+is_whitespace($\n) -> true;
+is_whitespace($\r) -> true;
+is_whitespace(_Else) -> false.
+
+
+% removes leading and trailing whitespace from a string
+trim(String) ->
+ String2 = lists:dropwhile(fun is_whitespace/1, String),
+ lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))).
+
+% takes a heirarchical list of dirs and removes the dots ".", double dots
+% ".." and the corresponding parent dirs.
+fix_path_list([], Acc) ->
+ lists:reverse(Acc);
+fix_path_list([".."|Rest], [_PrevAcc|RestAcc]) ->
+ fix_path_list(Rest, RestAcc);
+fix_path_list(["."|Rest], Acc) ->
+ fix_path_list(Rest, Acc);
+fix_path_list([Dir | Rest], Acc) ->
+ fix_path_list(Rest, [Dir | Acc]).
+
+
+implode(List, Sep) ->
+ implode(List, Sep, []).
+
+implode([], _Sep, Acc) ->
+ lists:flatten(lists:reverse(Acc));
+implode([H], Sep, Acc) ->
+ implode([], Sep, [H|Acc]);
+implode([H|T], Sep, Acc) ->
+ implode(T, Sep, [Sep,H|Acc]).
+
+
+drv_port() ->
+ case get(couch_drv_port) of
+ undefined ->
+ Port = open_port({spawn, "couch_icu_driver"}, []),
+ put(couch_drv_port, Port),
+ Port;
+ Port ->
+ Port
+ end.
+
+collate(A, B) ->
+ collate(A, B, []).
+
+collate(A, B, Options) when is_binary(A), is_binary(B) ->
+ Operation =
+ case lists:member(nocase, Options) of
+ true -> 1; % Case insensitive
+ false -> 0 % Case sensitive
+ end,
+ SizeA = byte_size(A),
+ SizeB = byte_size(B),
+ Bin = <<SizeA:32/native, A/binary, SizeB:32/native, B/binary>>,
+ [Result] = erlang:port_control(drv_port(), Operation, Bin),
+ % Result is 0 for lt, 1 for eq and 2 for gt. Subtract 1 to return the
+ % expected typical -1, 0, 1
+ Result - 1.
+
+should_flush() ->
+ should_flush(?FLUSH_MAX_MEM).
+
+should_flush(MemThreshHold) ->
+ {memory, ProcMem} = process_info(self(), memory),
+ BinMem = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
+ 0, element(2,process_info(self(), binary))),
+ if ProcMem+BinMem > 2*MemThreshHold ->
+ garbage_collect(),
+ {memory, ProcMem2} = process_info(self(), memory),
+ BinMem2 = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
+ 0, element(2,process_info(self(), binary))),
+ ProcMem2+BinMem2 > MemThreshHold;
+ true -> false end.
+
+encodeBase64Url(Url) ->
+ Url1 = iolist_to_binary(re:replace(base64:encode(Url), "=+$", "")),
+ Url2 = iolist_to_binary(re:replace(Url1, "/", "_", [global])),
+ iolist_to_binary(re:replace(Url2, "\\+", "-", [global])).
+
+decodeBase64Url(Url64) ->
+ Url1 = re:replace(iolist_to_binary(Url64), "-", "+", [global]),
+ Url2 = iolist_to_binary(
+ re:replace(iolist_to_binary(Url1), "_", "/", [global])
+ ),
+ Padding = ?l2b(lists:duplicate((4 - size(Url2) rem 4) rem 4, $=)),
+ base64:decode(<<Url2/binary, Padding/binary>>).
+
+dict_find(Key, Dict, DefaultValue) ->
+ case dict:find(Key, Dict) of
+ {ok, Value} ->
+ Value;
+ error ->
+ DefaultValue
+ end.
+
+to_binary(V) when is_binary(V) ->
+ V;
+to_binary(V) when is_list(V) ->
+ try
+ list_to_binary(V)
+ catch
+ _:_ ->
+ list_to_binary(io_lib:format("~p", [V]))
+ end;
+to_binary(V) when is_atom(V) ->
+ list_to_binary(atom_to_list(V));
+to_binary(V) ->
+ list_to_binary(io_lib:format("~p", [V])).
+
+to_integer(V) when is_integer(V) ->
+ V;
+to_integer(V) when is_list(V) ->
+ erlang:list_to_integer(V);
+to_integer(V) when is_binary(V) ->
+ erlang:list_to_integer(binary_to_list(V)).
+
+to_list(V) when is_list(V) ->
+ V;
+to_list(V) when is_binary(V) ->
+ binary_to_list(V);
+to_list(V) when is_atom(V) ->
+ atom_to_list(V);
+to_list(V) ->
+ lists:flatten(io_lib:format("~p", [V])).
+
+url_encode(Bin) when is_binary(Bin) ->
+ url_encode(binary_to_list(Bin));
+url_encode([H|T]) ->
+ if
+ H >= $a, $z >= H ->
+ [H|url_encode(T)];
+ H >= $A, $Z >= H ->
+ [H|url_encode(T)];
+ H >= $0, $9 >= H ->
+ [H|url_encode(T)];
+ H == $_; H == $.; H == $-; H == $: ->
+ [H|url_encode(T)];
+ true ->
+ case lists:flatten(io_lib:format("~.16.0B", [H])) of
+ [X, Y] ->
+ [$%, X, Y | url_encode(T)];
+ [X] ->
+ [$%, $0, X | url_encode(T)]
+ end
+ end;
+url_encode([]) ->
+ [].
+
+json_encode(V) ->
+ Handler =
+ fun({L}) when is_list(L) ->
+ {struct,L};
+ (Bad) ->
+ exit({json_encode, {bad_term, Bad}})
+ end,
+ (mochijson2:encoder([{handler, Handler}]))(V).
+
+json_decode(V) ->
+ try (mochijson2:decoder([{object_hook, fun({struct,L}) -> {L} end}]))(V)
+ catch
+ _Type:_Error ->
+ throw({invalid_json,V})
+ end.
+
+verify([X|RestX], [Y|RestY], Result) ->
+ verify(RestX, RestY, (X bxor Y) bor Result);
+verify([], [], Result) ->
+ Result == 0.
+
+verify(<<X/binary>>, <<Y/binary>>) ->
+ verify(?b2l(X), ?b2l(Y));
+verify(X, Y) when is_list(X) and is_list(Y) ->
+ case length(X) == length(Y) of
+ true ->
+ verify(X, Y, 0);
+ false ->
+ false
+ end;
+verify(_X, _Y) -> false.
+
+compressible_att_type(MimeType) when is_binary(MimeType) ->
+ compressible_att_type(?b2l(MimeType));
+compressible_att_type(MimeType) ->
+ TypeExpList = re:split(
+ couch_config:get("attachments", "compressible_types", ""),
+ "\\s*,\\s*",
+ [{return, list}]
+ ),
+ lists:any(
+ fun(TypeExp) ->
+ Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
+ "(?:\\s*;.*?)?\\s*", $$],
+ re:run(MimeType, Regexp, [caseless]) =/= nomatch
+ end,
+ [T || T <- TypeExpList, T /= []]
+ ).
+
+-spec md5(Data::(iolist() | binary())) -> Digest::binary().
+md5(Data) ->
+ try crypto:md5(Data) catch error:_ -> erlang:md5(Data) end.
+
+-spec md5_init() -> Context::binary().
+md5_init() ->
+ try crypto:md5_init() catch error:_ -> erlang:md5_init() end.
+
+-spec md5_update(Context::binary(), Data::(iolist() | binary())) ->
+ NewContext::binary().
+md5_update(Ctx, D) ->
+ try crypto:md5_update(Ctx,D) catch error:_ -> erlang:md5_update(Ctx,D) end.
+
+-spec md5_final(Context::binary()) -> Digest::binary().
+md5_final(Ctx) ->
+ try crypto:md5_final(Ctx) catch error:_ -> erlang:md5_final(Ctx) end.
+
+% linear search is faster for small lists, length() is 0.5 ms for 100k list
+reorder_results(Keys, SortedResults) when length(Keys) < 100 ->
+ [couch_util:get_value(Key, SortedResults) || Key <- Keys];
+reorder_results(Keys, SortedResults) ->
+ KeyDict = dict:from_list(SortedResults),
+ [dict:fetch(Key, KeyDict) || Key <- Keys].
+
+url_strip_password(Url) ->
+ re:replace(Url,
+ "http(s)?://([^:]+):[^@]+@(.*)$",
+ "http\\1://\\2:*****@\\3",
+ [{return, list}]).
+
+encode_doc_id(#doc{id = Id}) ->
+ encode_doc_id(Id);
+encode_doc_id(Id) when is_list(Id) ->
+ encode_doc_id(?l2b(Id));
+encode_doc_id(<<"_design/", Rest/binary>>) ->
+ "_design/" ++ url_encode(Rest);
+encode_doc_id(<<"_local/", Rest/binary>>) ->
+ "_local/" ++ url_encode(Rest);
+encode_doc_id(Id) ->
+ url_encode(Id).
diff --git a/1.1.x/src/couchdb/couch_uuids.erl b/1.1.x/src/couchdb/couch_uuids.erl
new file mode 100644
index 00000000..e1851e1d
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_uuids.erl
@@ -0,0 +1,95 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+-module(couch_uuids).
+-include("couch_db.hrl").
+
+-behaviour(gen_server).
+
+-export([start/0, stop/0]).
+-export([new/0, random/0, utc_random/0]).
+
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+start() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+stop() ->
+ gen_server:cast(?MODULE, stop).
+
+new() ->
+ gen_server:call(?MODULE, create).
+
+random() ->
+ list_to_binary(couch_util:to_hex(crypto:rand_bytes(16))).
+
+utc_random() ->
+ Now = {_, _, Micro} = now(),
+ Nowish = calendar:now_to_universal_time(Now),
+ Nowsecs = calendar:datetime_to_gregorian_seconds(Nowish),
+ Then = calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}),
+ Prefix = io_lib:format("~14.16.0b", [(Nowsecs - Then) * 1000000 + Micro]),
+ list_to_binary(Prefix ++ couch_util:to_hex(crypto:rand_bytes(9))).
+
+init([]) ->
+ ok = couch_config:register(
+ fun("uuids", _) -> gen_server:cast(?MODULE, change) end
+ ),
+ {ok, state()}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+handle_call(create, _From, random) ->
+ {reply, random(), random};
+handle_call(create, _From, utc_random) ->
+ {reply, utc_random(), utc_random};
+handle_call(create, _From, {sequential, Pref, Seq}) ->
+ Result = ?l2b(Pref ++ io_lib:format("~6.16.0b", [Seq])),
+ case Seq >= 16#fff000 of
+ true ->
+ {reply, Result, {sequential, new_prefix(), inc()}};
+ _ ->
+ {reply, Result, {sequential, Pref, Seq + inc()}}
+ end.
+
+handle_cast(change, _State) ->
+ {noreply, state()};
+handle_cast(stop, State) ->
+ {stop, normal, State};
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+new_prefix() ->
+ couch_util:to_hex((crypto:rand_bytes(13))).
+
+inc() ->
+ crypto:rand_uniform(1, 16#ffe).
+
+state() ->
+ AlgoStr = couch_config:get("uuids", "algorithm", "random"),
+ case couch_util:to_existing_atom(AlgoStr) of
+ random ->
+ random;
+ utc_random ->
+ utc_random;
+ sequential ->
+ {sequential, new_prefix(), inc()};
+ Unknown ->
+ throw({unknown_uuid_algorithm, Unknown})
+ end.
diff --git a/1.1.x/src/couchdb/couch_view.erl b/1.1.x/src/couchdb/couch_view.erl
new file mode 100644
index 00000000..911f1aa6
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_view.erl
@@ -0,0 +1,460 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_view).
+-behaviour(gen_server).
+
+-export([start_link/0,fold/4,less_json/2,less_json_ids/2,expand_dups/2,
+ detuple_kvs/2,init/1,terminate/2,handle_call/3,handle_cast/2,handle_info/2,
+ code_change/3,get_reduce_view/4,get_temp_reduce_view/5,get_temp_map_view/4,
+ get_map_view/4,get_row_count/1,reduce_to_count/1,fold_reduce/4,
+ extract_map_view/1,get_group_server/2,get_group_info/2,cleanup_index_files/1]).
+
+-include("couch_db.hrl").
+
+
+-record(server,{
+ root_dir = []}).
+
+start_link() ->
+ gen_server:start_link({local, couch_view}, couch_view, [], []).
+
+get_temp_updater(DbName, Language, DesignOptions, MapSrc, RedSrc) ->
+ {ok, Group} =
+ couch_view_group:open_temp_group(DbName, Language, DesignOptions, MapSrc, RedSrc),
+ case gen_server:call(couch_view, {get_group_server, DbName, Group}, infinity) of
+ {ok, Pid} ->
+ Pid;
+ Error ->
+ throw(Error)
+ end.
+
+get_group_server(DbName, GroupId) ->
+ case couch_view_group:open_db_group(DbName, GroupId) of
+ {ok, Group} ->
+ case gen_server:call(couch_view, {get_group_server, DbName, Group}, infinity) of
+ {ok, Pid} ->
+ Pid;
+ Error ->
+ throw(Error)
+ end;
+ Error ->
+ throw(Error)
+ end.
+
+get_group(Db, GroupId, Stale) ->
+ MinUpdateSeq = case Stale of
+ ok -> 0;
+ update_after -> 0;
+ _Else -> couch_db:get_update_seq(Db)
+ end,
+ GroupPid = get_group_server(couch_db:name(Db), GroupId),
+ Result = couch_view_group:request_group(GroupPid, MinUpdateSeq),
+ case Stale of
+ update_after ->
+ % best effort, process might die
+ spawn(fun() ->
+ LastSeq = couch_db:get_update_seq(Db),
+ couch_view_group:request_group(GroupPid, LastSeq)
+ end);
+ _ ->
+ ok
+ end,
+ Result.
+
+get_temp_group(Db, Language, DesignOptions, MapSrc, RedSrc) ->
+ couch_view_group:request_group(
+ get_temp_updater(couch_db:name(Db), Language, DesignOptions, MapSrc, RedSrc),
+ couch_db:get_update_seq(Db)).
+
+get_group_info(Db, GroupId) ->
+ couch_view_group:request_group_info(
+ get_group_server(couch_db:name(Db), GroupId)).
+
+cleanup_index_files(Db) ->
+ % load all ddocs
+ {ok, DesignDocs} = couch_db:get_design_docs(Db),
+
+ % make unique list of group sigs
+ Sigs = lists:map(fun(#doc{id = GroupId}) ->
+ {ok, Info} = get_group_info(Db, GroupId),
+ ?b2l(couch_util:get_value(signature, Info))
+ end, [DD||DD <- DesignDocs, DD#doc.deleted == false]),
+
+ FileList = list_index_files(Db),
+
+ % regex that matches all ddocs
+ RegExp = "("++ string:join(Sigs, "|") ++")",
+
+ % filter out the ones in use
+ DeleteFiles = [FilePath
+ || FilePath <- FileList,
+ re:run(FilePath, RegExp, [{capture, none}]) =:= nomatch],
+ % delete unused files
+ ?LOG_DEBUG("deleting unused view index files: ~p",[DeleteFiles]),
+ RootDir = couch_config:get("couchdb", "view_index_dir"),
+ [couch_file:delete(RootDir,File,false)||File <- DeleteFiles],
+ ok.
+
+list_index_files(Db) ->
+ % call server to fetch the index files
+ RootDir = couch_config:get("couchdb", "view_index_dir"),
+ filelib:wildcard(RootDir ++ "/." ++ ?b2l(couch_db:name(Db)) ++ "_design"++"/*").
+
+
+get_row_count(#view{btree=Bt}) ->
+ {ok, {Count, _Reds}} = couch_btree:full_reduce(Bt),
+ {ok, Count}.
+
+get_temp_reduce_view(Db, Language, DesignOptions, MapSrc, RedSrc) ->
+ {ok, #group{views=[View]}=Group} =
+ get_temp_group(Db, Language, DesignOptions, MapSrc, RedSrc),
+ {ok, {temp_reduce, View}, Group}.
+
+
+get_reduce_view(Db, GroupId, Name, Update) ->
+ case get_group(Db, GroupId, Update) of
+ {ok, #group{views=Views,def_lang=Lang}=Group} ->
+ case get_reduce_view0(Name, Lang, Views) of
+ {ok, View} ->
+ {ok, View, Group};
+ Else ->
+ Else
+ end;
+ Error ->
+ Error
+ end.
+
+get_reduce_view0(_Name, _Lang, []) ->
+ {not_found, missing_named_view};
+get_reduce_view0(Name, Lang, [#view{reduce_funs=RedFuns}=View|Rest]) ->
+ case get_key_pos(Name, RedFuns, 0) of
+ 0 -> get_reduce_view0(Name, Lang, Rest);
+ N -> {ok, {reduce, N, Lang, View}}
+ end.
+
+extract_map_view({reduce, _N, _Lang, View}) ->
+ View.
+
+detuple_kvs([], Acc) ->
+ lists:reverse(Acc);
+detuple_kvs([KV | Rest], Acc) ->
+ {{Key,Id},Value} = KV,
+ NKV = [[Key, Id], Value],
+ detuple_kvs(Rest, [NKV | Acc]).
+
+expand_dups([], Acc) ->
+ lists:reverse(Acc);
+expand_dups([{Key, {dups, Vals}} | Rest], Acc) ->
+ Expanded = [{Key, Val} || Val <- Vals],
+ expand_dups(Rest, Expanded ++ Acc);
+expand_dups([KV | Rest], Acc) ->
+ expand_dups(Rest, [KV | Acc]).
+
+fold_reduce({temp_reduce, #view{btree=Bt}}, Fun, Acc, Options) ->
+ WrapperFun = fun({GroupedKey, _}, PartialReds, Acc0) ->
+ {_, [Red]} = couch_btree:final_reduce(Bt, PartialReds),
+ Fun(GroupedKey, Red, Acc0)
+ end,
+ couch_btree:fold_reduce(Bt, WrapperFun, Acc, Options);
+
+fold_reduce({reduce, NthRed, Lang, #view{btree=Bt, reduce_funs=RedFuns}}, Fun, Acc, Options) ->
+ PreResultPadding = lists:duplicate(NthRed - 1, []),
+ PostResultPadding = lists:duplicate(length(RedFuns) - NthRed, []),
+ {_Name, FunSrc} = lists:nth(NthRed,RedFuns),
+ ReduceFun =
+ fun(reduce, KVs) ->
+ {ok, Reduced} = couch_query_servers:reduce(Lang, [FunSrc], detuple_kvs(expand_dups(KVs, []),[])),
+ {0, PreResultPadding ++ Reduced ++ PostResultPadding};
+ (rereduce, Reds) ->
+ UserReds = [[lists:nth(NthRed, UserRedsList)] || {_, UserRedsList} <- Reds],
+ {ok, Reduced} = couch_query_servers:rereduce(Lang, [FunSrc], UserReds),
+ {0, PreResultPadding ++ Reduced ++ PostResultPadding}
+ end,
+ WrapperFun = fun({GroupedKey, _}, PartialReds, Acc0) ->
+ {_, Reds} = couch_btree:final_reduce(ReduceFun, PartialReds),
+ Fun(GroupedKey, lists:nth(NthRed, Reds), Acc0)
+ end,
+ couch_btree:fold_reduce(Bt, WrapperFun, Acc, Options).
+
+get_key_pos(_Key, [], _N) ->
+ 0;
+get_key_pos(Key, [{Key1,_Value}|_], N) when Key == Key1 ->
+ N + 1;
+get_key_pos(Key, [_|Rest], N) ->
+ get_key_pos(Key, Rest, N+1).
+
+
+get_temp_map_view(Db, Language, DesignOptions, Src) ->
+ {ok, #group{views=[View]}=Group} = get_temp_group(Db, Language, DesignOptions, Src, []),
+ {ok, View, Group}.
+
+get_map_view(Db, GroupId, Name, Stale) ->
+ case get_group(Db, GroupId, Stale) of
+ {ok, #group{views=Views}=Group} ->
+ case get_map_view0(Name, Views) of
+ {ok, View} ->
+ {ok, View, Group};
+ Else ->
+ Else
+ end;
+ Error ->
+ Error
+ end.
+
+get_map_view0(_Name, []) ->
+ {not_found, missing_named_view};
+get_map_view0(Name, [#view{map_names=MapNames}=View|Rest]) ->
+ case lists:member(Name, MapNames) of
+ true -> {ok, View};
+ false -> get_map_view0(Name, Rest)
+ end.
+
+reduce_to_count(Reductions) ->
+ {Count, _} =
+ couch_btree:final_reduce(
+ fun(reduce, KVs) ->
+ Count = lists:sum(
+ [case V of {dups, Vals} -> length(Vals); _ -> 1 end
+ || {_,V} <- KVs]),
+ {Count, []};
+ (rereduce, Reds) ->
+ {lists:sum([Count0 || {Count0, _} <- Reds]), []}
+ end, Reductions),
+ Count.
+
+
+
+fold_fun(_Fun, [], _, Acc) ->
+ {ok, Acc};
+fold_fun(Fun, [KV|Rest], {KVReds, Reds}, Acc) ->
+ case Fun(KV, {KVReds, Reds}, Acc) of
+ {ok, Acc2} ->
+ fold_fun(Fun, Rest, {[KV|KVReds], Reds}, Acc2);
+ {stop, Acc2} ->
+ {stop, Acc2}
+ end.
+
+
+fold(#view{btree=Btree}, Fun, Acc, Options) ->
+ WrapperFun =
+ fun(KV, Reds, Acc2) ->
+ fold_fun(Fun, expand_dups([KV],[]), Reds, Acc2)
+ end,
+ {ok, _LastReduce, _AccResult} = couch_btree:fold(Btree, WrapperFun, Acc, Options).
+
+
+init([]) ->
+ % read configuration settings and register for configuration changes
+ RootDir = couch_config:get("couchdb", "view_index_dir"),
+ Self = self(),
+ ok = couch_config:register(
+ fun("couchdb", "view_index_dir")->
+ exit(Self, config_change)
+ end),
+
+ couch_db_update_notifier:start_link(
+ fun({deleted, DbName}) ->
+ gen_server:cast(couch_view, {reset_indexes, DbName});
+ ({created, DbName}) ->
+ gen_server:cast(couch_view, {reset_indexes, DbName});
+ (_Else) ->
+ ok
+ end),
+ ets:new(couch_groups_by_db, [bag, private, named_table]),
+ ets:new(group_servers_by_sig, [set, protected, named_table]),
+ ets:new(couch_groups_by_updater, [set, private, named_table]),
+ process_flag(trap_exit, true),
+ ok = couch_file:init_delete_dir(RootDir),
+ {ok, #server{root_dir=RootDir}}.
+
+
+terminate(_Reason, _Srv) ->
+ [couch_util:shutdown_sync(Pid) || {Pid, _} <-
+ ets:tab2list(couch_groups_by_updater)],
+ ok.
+
+
+handle_call({get_group_server, DbName, #group{sig=Sig}=Group}, From,
+ #server{root_dir=Root}=Server) ->
+ case ets:lookup(group_servers_by_sig, {DbName, Sig}) of
+ [] ->
+ spawn_monitor(fun() -> new_group(Root, DbName, Group) end),
+ ets:insert(group_servers_by_sig, {{DbName, Sig}, [From]}),
+ {noreply, Server};
+ [{_, WaitList}] when is_list(WaitList) ->
+ ets:insert(group_servers_by_sig, {{DbName, Sig}, [From | WaitList]}),
+ {noreply, Server};
+ [{_, ExistingPid}] ->
+ {reply, {ok, ExistingPid}, Server}
+ end;
+
+handle_call({reset_indexes, DbName}, _From, #server{root_dir=Root}=Server) ->
+ do_reset_indexes(DbName, Root),
+ {reply, ok, Server}.
+
+handle_cast({reset_indexes, DbName}, #server{root_dir=Root}=Server) ->
+ do_reset_indexes(DbName, Root),
+ {noreply, Server}.
+
+new_group(Root, DbName, #group{name=GroupId, sig=Sig} = Group) ->
+ ?LOG_DEBUG("Spawning new group server for view group ~s in database ~s.",
+ [GroupId, DbName]),
+ case (catch couch_view_group:start_link({Root, DbName, Group})) of
+ {ok, NewPid} ->
+ unlink(NewPid),
+ exit({DbName, Sig, {ok, NewPid}});
+ {error, invalid_view_seq} ->
+ ok = gen_server:call(couch_view, {reset_indexes, DbName}),
+ new_group(Root, DbName, Group);
+ Error ->
+ exit({DbName, Sig, Error})
+ end.
+
+do_reset_indexes(DbName, Root) ->
+ % shutdown all the updaters and clear the files, the db got changed
+ Names = ets:lookup(couch_groups_by_db, DbName),
+ lists:foreach(
+ fun({_DbName, Sig}) ->
+ ?LOG_DEBUG("Killing update process for view group ~s. in database ~s.", [Sig, DbName]),
+ [{_, Pid}] = ets:lookup(group_servers_by_sig, {DbName, Sig}),
+ couch_util:shutdown_sync(Pid),
+ delete_from_ets(Pid, DbName, Sig)
+ end, Names),
+ delete_index_dir(Root, DbName),
+ RootDelDir = couch_config:get("couchdb", "view_index_dir"),
+ couch_file:delete(RootDelDir, Root ++ "/." ++ ?b2l(DbName) ++ "_temp").
+
+handle_info({'EXIT', FromPid, Reason}, Server) ->
+ case ets:lookup(couch_groups_by_updater, FromPid) of
+ [] ->
+ if Reason /= normal ->
+ % non-updater linked process died, we propagate the error
+ ?LOG_ERROR("Exit on non-updater process: ~p", [Reason]),
+ exit(Reason);
+ true -> ok
+ end;
+ [{_, {DbName, GroupId}}] ->
+ delete_from_ets(FromPid, DbName, GroupId)
+ end,
+ {noreply, Server};
+
+handle_info({'DOWN', _, _, _, {DbName, Sig, Reply}}, Server) ->
+ [{_, WaitList}] = ets:lookup(group_servers_by_sig, {DbName, Sig}),
+ [gen_server:reply(From, Reply) || From <- WaitList],
+ case Reply of {ok, NewPid} ->
+ link(NewPid),
+ add_to_ets(NewPid, DbName, Sig);
+ _ -> ok end,
+ {noreply, Server}.
+
+add_to_ets(Pid, DbName, Sig) ->
+ true = ets:insert(couch_groups_by_updater, {Pid, {DbName, Sig}}),
+ true = ets:insert(group_servers_by_sig, {{DbName, Sig}, Pid}),
+ true = ets:insert(couch_groups_by_db, {DbName, Sig}).
+
+delete_from_ets(Pid, DbName, Sig) ->
+ true = ets:delete(couch_groups_by_updater, Pid),
+ true = ets:delete(group_servers_by_sig, {DbName, Sig}),
+ true = ets:delete_object(couch_groups_by_db, {DbName, Sig}).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+delete_index_dir(RootDir, DbName) ->
+ nuke_dir(RootDir, RootDir ++ "/." ++ ?b2l(DbName) ++ "_design").
+
+nuke_dir(RootDelDir, Dir) ->
+ case file:list_dir(Dir) of
+ {error, enoent} -> ok; % doesn't exist
+ {ok, Files} ->
+ lists:foreach(
+ fun(File)->
+ Full = Dir ++ "/" ++ File,
+ case couch_file:delete(RootDelDir, Full, false) of
+ ok -> ok;
+ {error, eperm} ->
+ ok = nuke_dir(RootDelDir, Full)
+ end
+ end,
+ Files),
+ ok = file:del_dir(Dir)
+ end.
+
+
+% keys come back in the language of btree - tuples.
+less_json_ids({JsonA, IdA}, {JsonB, IdB}) ->
+ case less_json0(JsonA, JsonB) of
+ 0 ->
+ IdA < IdB;
+ Result ->
+ Result < 0
+ end.
+
+less_json(A,B) ->
+ less_json0(A,B) < 0.
+
+less_json0(A,A) -> 0;
+
+less_json0(A,B) when is_atom(A), is_atom(B) -> atom_sort(A) - atom_sort(B);
+less_json0(A,_) when is_atom(A) -> -1;
+less_json0(_,B) when is_atom(B) -> 1;
+
+less_json0(A,B) when is_number(A), is_number(B) -> A - B;
+less_json0(A,_) when is_number(A) -> -1;
+less_json0(_,B) when is_number(B) -> 1;
+
+less_json0(A,B) when is_binary(A), is_binary(B) -> couch_util:collate(A,B);
+less_json0(A,_) when is_binary(A) -> -1;
+less_json0(_,B) when is_binary(B) -> 1;
+
+less_json0(A,B) when is_list(A), is_list(B) -> less_list(A,B);
+less_json0(A,_) when is_list(A) -> -1;
+less_json0(_,B) when is_list(B) -> 1;
+
+less_json0({A},{B}) when is_list(A), is_list(B) -> less_props(A,B);
+less_json0({A},_) when is_list(A) -> -1;
+less_json0(_,{B}) when is_list(B) -> 1.
+
+atom_sort(null) -> 1;
+atom_sort(false) -> 2;
+atom_sort(true) -> 3.
+
+less_props([], [_|_]) ->
+ -1;
+less_props(_, []) ->
+ 1;
+less_props([{AKey, AValue}|RestA], [{BKey, BValue}|RestB]) ->
+ case couch_util:collate(AKey, BKey) of
+ 0 ->
+ case less_json0(AValue, BValue) of
+ 0 ->
+ less_props(RestA, RestB);
+ Result ->
+ Result
+ end;
+ Result ->
+ Result
+ end.
+
+less_list([], [_|_]) ->
+ -1;
+less_list(_, []) ->
+ 1;
+less_list([A|RestA], [B|RestB]) ->
+ case less_json0(A,B) of
+ 0 ->
+ less_list(RestA, RestB);
+ Result ->
+ Result
+ end.
diff --git a/1.1.x/src/couchdb/couch_view_compactor.erl b/1.1.x/src/couchdb/couch_view_compactor.erl
new file mode 100644
index 00000000..9a47f5f8
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_view_compactor.erl
@@ -0,0 +1,102 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_view_compactor).
+
+-include ("couch_db.hrl").
+
+-export([start_compact/2]).
+
+%% @spec start_compact(DbName::binary(), GroupId:binary()) -> ok
+%% @doc Compacts the views. GroupId must not include the _design/ prefix
+start_compact(DbName, GroupId) ->
+ Pid = couch_view:get_group_server(DbName, <<"_design/",GroupId/binary>>),
+ gen_server:cast(Pid, {start_compact, fun compact_group/2}).
+
+%%=============================================================================
+%% internal functions
+%%=============================================================================
+
+%% @spec compact_group(Group, NewGroup) -> ok
+compact_group(Group, EmptyGroup) ->
+ #group{
+ current_seq = Seq,
+ id_btree = IdBtree,
+ name = GroupId,
+ views = Views
+ } = Group,
+
+ #group{
+ db = Db,
+ id_btree = EmptyIdBtree,
+ views = EmptyViews
+ } = EmptyGroup,
+
+ {ok, {Count, _}} = couch_btree:full_reduce(Db#db.fulldocinfo_by_id_btree),
+
+ <<"_design", ShortName/binary>> = GroupId,
+ DbName = couch_db:name(Db),
+ TaskName = <<DbName/binary, ShortName/binary>>,
+ couch_task_status:add_task(<<"View Group Compaction">>, TaskName, <<"">>),
+
+ Fun = fun({DocId, _ViewIdKeys} = KV, {Bt, Acc, TotalCopied, LastId}) ->
+ if DocId =:= LastId -> % COUCHDB-999
+ Msg = "Duplicates of ~s detected in ~s ~s - rebuild required",
+ exit(io_lib:format(Msg, [DocId, DbName, GroupId]));
+ true -> ok end,
+ if TotalCopied rem 10000 =:= 0 ->
+ couch_task_status:update("Copied ~p of ~p Ids (~p%)",
+ [TotalCopied, Count, (TotalCopied*100) div Count]),
+ {ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV|Acc])),
+ {ok, {Bt2, [], TotalCopied+1, DocId}};
+ true ->
+ {ok, {Bt, [KV|Acc], TotalCopied+1, DocId}}
+ end
+ end,
+ {ok, _, {Bt3, Uncopied, _Total, _LastId}} = couch_btree:foldl(IdBtree, Fun,
+ {EmptyIdBtree, [], 0, nil}),
+ {ok, NewIdBtree} = couch_btree:add(Bt3, lists:reverse(Uncopied)),
+
+ NewViews = lists:map(fun({View, EmptyView}) ->
+ compact_view(View, EmptyView)
+ end, lists:zip(Views, EmptyViews)),
+
+ NewGroup = EmptyGroup#group{
+ id_btree=NewIdBtree,
+ views=NewViews,
+ current_seq=Seq
+ },
+
+ Pid = couch_view:get_group_server(DbName, GroupId),
+ gen_server:cast(Pid, {compact_done, NewGroup}).
+
+%% @spec compact_view(View, EmptyView, Retry) -> CompactView
+compact_view(View, EmptyView) ->
+ {ok, Count} = couch_view:get_row_count(View),
+
+ %% Key is {Key,DocId}
+ Fun = fun(KV, {Bt, Acc, TotalCopied}) ->
+ if TotalCopied rem 10000 =:= 0 ->
+ couch_task_status:update("View #~p: copied ~p of ~p KVs (~p%)",
+ [View#view.id_num, TotalCopied, Count, (TotalCopied*100) div Count]),
+ {ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV|Acc])),
+ {ok, {Bt2, [], TotalCopied + 1}};
+ true ->
+ {ok, {Bt, [KV|Acc], TotalCopied + 1}}
+ end
+ end,
+
+ {ok, _, {Bt3, Uncopied, _Total}} = couch_btree:foldl(View#view.btree, Fun,
+ {EmptyView#view.btree, [], 0}),
+ {ok, NewBt} = couch_btree:add(Bt3, lists:reverse(Uncopied)),
+ EmptyView#view{btree = NewBt}.
+
diff --git a/1.1.x/src/couchdb/couch_view_group.erl b/1.1.x/src/couchdb/couch_view_group.erl
new file mode 100644
index 00000000..6ef1dcb4
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_view_group.erl
@@ -0,0 +1,642 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_view_group).
+-behaviour(gen_server).
+
+%% API
+-export([start_link/1, request_group/2, request_group_info/1]).
+-export([open_db_group/2, open_temp_group/5, design_doc_to_view_group/1,design_root/2]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-include("couch_db.hrl").
+
+-record(group_state, {
+ type,
+ db_name,
+ init_args,
+ group,
+ updater_pid=nil,
+ compactor_pid=nil,
+ waiting_commit=false,
+ waiting_list=[],
+ ref_counter=nil
+}).
+
+% api methods
+request_group(Pid, Seq) ->
+ ?LOG_DEBUG("request_group {Pid, Seq} ~p", [{Pid, Seq}]),
+ case gen_server:call(Pid, {request_group, Seq}, infinity) of
+ {ok, Group, RefCounter} ->
+ couch_ref_counter:add(RefCounter),
+ {ok, Group};
+ Error ->
+ ?LOG_DEBUG("request_group Error ~p", [Error]),
+ throw(Error)
+ end.
+
+request_group_info(Pid) ->
+ case gen_server:call(Pid, request_group_info) of
+ {ok, GroupInfoList} ->
+ {ok, GroupInfoList};
+ Error ->
+ throw(Error)
+ end.
+
+% from template
+start_link(InitArgs) ->
+ case gen_server:start_link(couch_view_group,
+ {InitArgs, self(), Ref = make_ref()}, []) of
+ {ok, Pid} ->
+ {ok, Pid};
+ ignore ->
+ receive
+ {Ref, Pid, Error} ->
+ case process_info(self(), trap_exit) of
+ {trap_exit, true} -> receive {'EXIT', Pid, _} -> ok end;
+ {trap_exit, false} -> ok
+ end,
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+% init creates a closure which spawns the appropriate view_updater.
+init({{_, DbName, _} = InitArgs, ReturnPid, Ref}) ->
+ process_flag(trap_exit, true),
+ try prepare_group(InitArgs, false) of
+ {ok, #group{db=Db, fd=Fd, current_seq=Seq}=Group} ->
+ case Seq > couch_db:get_update_seq(Db) of
+ true ->
+ ReturnPid ! {Ref, self(), {error, invalid_view_seq}},
+ ignore;
+ _ ->
+ couch_db:monitor(Db),
+ couch_db:close(Db),
+ {ok, RefCounter} = couch_ref_counter:start([Fd]),
+ {ok, #group_state{
+ db_name=DbName,
+ init_args=InitArgs,
+ group=Group#group{db=nil},
+ ref_counter=RefCounter}}
+ end;
+ Error ->
+ ReturnPid ! {Ref, self(), Error},
+ ignore
+ catch exit:no_db_file ->
+ ReturnPid ! {Ref, self(), {error, no_db_file}},
+ ignore
+ end.
+
+
+
+
+% There are two sources of messages: couch_view, which requests an up to date
+% view group, and the couch_view_updater, which when spawned, updates the
+% group and sends it back here. We employ a caching mechanism, so that between
+% database writes, we don't have to spawn a couch_view_updater with every view
+% request.
+
+% The caching mechanism: each request is submitted with a seq_id for the
+% database at the time it was read. We guarantee to return a view from that
+% sequence or newer.
+
+% If the request sequence is higher than our current high_target seq, we set
+% that as the highest seqence. If the updater is not running, we launch it.
+
+handle_call({request_group, RequestSeq}, From,
+ #group_state{
+ db_name=DbName,
+ group=#group{current_seq=Seq}=Group,
+ updater_pid=nil,
+ waiting_list=WaitList
+ }=State) when RequestSeq > Seq ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ Group2 = Group#group{db=Db},
+ Owner = self(),
+ Pid = spawn_link(fun()-> couch_view_updater:update(Owner, Group2) end),
+
+ {noreply, State#group_state{
+ updater_pid=Pid,
+ group=Group2,
+ waiting_list=[{From,RequestSeq}|WaitList]
+ }, infinity};
+
+
+% If the request seqence is less than or equal to the seq_id of a known Group,
+% we respond with that Group.
+handle_call({request_group, RequestSeq}, _From, #group_state{
+ group = #group{current_seq=GroupSeq} = Group,
+ ref_counter = RefCounter
+ } = State) when RequestSeq =< GroupSeq ->
+ {reply, {ok, Group, RefCounter}, State};
+
+% Otherwise: TargetSeq => RequestSeq > GroupSeq
+% We've already initiated the appropriate action, so just hold the response until the group is up to the RequestSeq
+handle_call({request_group, RequestSeq}, From,
+ #group_state{waiting_list=WaitList}=State) ->
+ {noreply, State#group_state{
+ waiting_list=[{From, RequestSeq}|WaitList]
+ }, infinity};
+
+handle_call(request_group_info, _From, State) ->
+ GroupInfo = get_group_info(State),
+ {reply, {ok, GroupInfo}, State}.
+
+handle_cast({start_compact, CompactFun}, #group_state{compactor_pid=nil}
+ = State) ->
+ #group_state{
+ group = #group{name = GroupId, sig = GroupSig} = Group,
+ init_args = {RootDir, DbName, _}
+ } = State,
+ ?LOG_INFO("View index compaction starting for ~s ~s", [DbName, GroupId]),
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {ok, Fd} = open_index_file(compact, RootDir, DbName, GroupSig),
+ NewGroup = reset_file(Db, Fd, DbName, Group),
+ Pid = spawn_link(fun() -> CompactFun(Group, NewGroup) end),
+ {noreply, State#group_state{compactor_pid = Pid}};
+handle_cast({start_compact, _}, State) ->
+ %% compact already running, this is a no-op
+ {noreply, State};
+
+handle_cast({compact_done, #group{current_seq=NewSeq} = NewGroup},
+ #group_state{group = #group{current_seq=OldSeq}} = State)
+ when NewSeq >= OldSeq ->
+ #group_state{
+ group = #group{name=GroupId, fd=OldFd, sig=GroupSig} = Group,
+ init_args = {RootDir, DbName, _},
+ updater_pid = UpdaterPid,
+ compactor_pid = CompactorPid,
+ ref_counter = RefCounter
+ } = State,
+
+ ?LOG_INFO("View index compaction complete for ~s ~s", [DbName, GroupId]),
+ FileName = index_file_name(RootDir, DbName, GroupSig),
+ CompactName = index_file_name(compact, RootDir, DbName, GroupSig),
+ ok = couch_file:delete(RootDir, FileName),
+ ok = file:rename(CompactName, FileName),
+
+ %% if an updater is running, kill it and start a new one
+ NewUpdaterPid =
+ if is_pid(UpdaterPid) ->
+ unlink(UpdaterPid),
+ exit(UpdaterPid, view_compaction_complete),
+ Owner = self(),
+ spawn_link(fun()-> couch_view_updater:update(Owner, NewGroup) end);
+ true ->
+ nil
+ end,
+
+ %% cleanup old group
+ unlink(CompactorPid),
+ receive {'EXIT', CompactorPid, normal} -> ok after 0 -> ok end,
+ unlink(OldFd),
+ couch_ref_counter:drop(RefCounter),
+ {ok, NewRefCounter} = couch_ref_counter:start([NewGroup#group.fd]),
+ case Group#group.db of
+ nil -> ok;
+ Else -> couch_db:close(Else)
+ end,
+
+ case NewGroup#group.db of
+ nil -> ok;
+ _ -> couch_db:close(NewGroup#group.db)
+ end,
+
+ self() ! delayed_commit,
+ {noreply, State#group_state{
+ group=NewGroup#group{db = nil},
+ ref_counter=NewRefCounter,
+ compactor_pid=nil,
+ updater_pid=NewUpdaterPid
+ }};
+handle_cast({compact_done, NewGroup}, State) ->
+ #group_state{
+ group = #group{name = GroupId, current_seq = CurrentSeq},
+ init_args={_RootDir, DbName, _}
+ } = State,
+ ?LOG_INFO("View index compaction still behind for ~s ~s -- current: ~p " ++
+ "compact: ~p", [DbName, GroupId, CurrentSeq, NewGroup#group.current_seq]),
+ couch_db:close(NewGroup#group.db),
+ Pid = spawn_link(fun() ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ {_,Ref} = erlang:spawn_monitor(fun() ->
+ couch_view_updater:update(nil, NewGroup#group{db = Db})
+ end),
+ receive
+ {'DOWN', Ref, _, _, {new_group, NewGroup2}} ->
+ couch_db:close(Db),
+ #group{name=GroupId} = NewGroup2,
+ Pid2 = couch_view:get_group_server(DbName, GroupId),
+ gen_server:cast(Pid2, {compact_done, NewGroup2#group{db = nil}})
+ end
+ end),
+ {noreply, State#group_state{compactor_pid = Pid}};
+
+handle_cast({partial_update, Pid, NewGroup}, #group_state{updater_pid=Pid}
+ = State) ->
+ #group_state{
+ db_name = DbName,
+ waiting_commit = WaitingCommit
+ } = State,
+ NewSeq = NewGroup#group.current_seq,
+ ?LOG_INFO("checkpointing view update at seq ~p for ~s ~s", [NewSeq,
+ DbName, NewGroup#group.name]),
+ if not WaitingCommit ->
+ erlang:send_after(1000, self(), delayed_commit);
+ true -> ok
+ end,
+ {noreply, State#group_state{group=NewGroup, waiting_commit=true}};
+handle_cast({partial_update, _, _}, State) ->
+ %% message from an old (probably pre-compaction) updater; ignore
+ {noreply, State}.
+
+handle_info(delayed_commit, #group_state{db_name=DbName,group=Group}=State) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ CommittedSeq = couch_db:get_committed_update_seq(Db),
+ couch_db:close(Db),
+ if CommittedSeq >= Group#group.current_seq ->
+ % save the header
+ Header = {Group#group.sig, get_index_header_data(Group)},
+ ok = couch_file:write_header(Group#group.fd, Header),
+ {noreply, State#group_state{waiting_commit=false}};
+ true ->
+ % We can't commit the header because the database seq that's fully
+ % committed to disk is still behind us. If we committed now and the
+ % database lost those changes our view could be forever out of sync
+ % with the database. But a crash before we commit these changes, no big
+ % deal, we only lose incremental changes since last committal.
+ erlang:send_after(1000, self(), delayed_commit),
+ {noreply, State#group_state{waiting_commit=true}}
+ end;
+
+handle_info({'EXIT', FromPid, {new_group, #group{db=Db}=Group}},
+ #group_state{db_name=DbName,
+ updater_pid=UpPid,
+ ref_counter=RefCounter,
+ waiting_list=WaitList,
+ waiting_commit=WaitingCommit}=State) when UpPid == FromPid ->
+ ok = couch_db:close(Db),
+ if not WaitingCommit ->
+ erlang:send_after(1000, self(), delayed_commit);
+ true -> ok
+ end,
+ case reply_with_group(Group, WaitList, [], RefCounter) of
+ [] ->
+ {noreply, State#group_state{waiting_commit=true, waiting_list=[],
+ group=Group#group{db=nil}, updater_pid=nil}};
+ StillWaiting ->
+ % we still have some waiters, reopen the database and reupdate the index
+ {ok, Db2} = couch_db:open_int(DbName, []),
+ Group2 = Group#group{db=Db2},
+ Owner = self(),
+ Pid = spawn_link(fun() -> couch_view_updater:update(Owner, Group2) end),
+ {noreply, State#group_state{waiting_commit=true,
+ waiting_list=StillWaiting, group=Group2, updater_pid=Pid}}
+ end;
+handle_info({'EXIT', _, {new_group, _}}, State) ->
+ %% message from an old (probably pre-compaction) updater; ignore
+ {noreply, State};
+
+handle_info({'EXIT', FromPid, reset},
+ #group_state{
+ init_args=InitArgs,
+ updater_pid=UpPid,
+ group=Group}=State) when UpPid == FromPid ->
+ ok = couch_db:close(Group#group.db),
+ case prepare_group(InitArgs, true) of
+ {ok, ResetGroup} ->
+ Owner = self(),
+ Pid = spawn_link(fun()-> couch_view_updater:update(Owner, ResetGroup) end),
+ {noreply, State#group_state{
+ updater_pid=Pid,
+ group=ResetGroup}};
+ Error ->
+ {stop, normal, reply_all(State, Error)}
+ end;
+handle_info({'EXIT', _, reset}, State) ->
+ %% message from an old (probably pre-compaction) updater; ignore
+ {noreply, State};
+
+handle_info({'EXIT', _FromPid, normal}, State) ->
+ {noreply, State};
+
+handle_info({'EXIT', FromPid, {{nocatch, Reason}, _Trace}}, State) ->
+ ?LOG_DEBUG("Uncaught throw() in linked pid: ~p", [{FromPid, Reason}]),
+ {stop, Reason, State};
+
+handle_info({'EXIT', FromPid, Reason}, State) ->
+ ?LOG_DEBUG("Exit from linked pid: ~p", [{FromPid, Reason}]),
+ {stop, Reason, State};
+
+handle_info({'DOWN',_,_,_,_}, State) ->
+ ?LOG_INFO("Shutting down view group server, monitored db is closing.", []),
+ {stop, normal, reply_all(State, shutdown)}.
+
+
+terminate(Reason, #group_state{updater_pid=Update, compactor_pid=Compact}=S) ->
+ reply_all(S, Reason),
+ couch_util:shutdown_sync(Update),
+ couch_util:shutdown_sync(Compact),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% Local Functions
+
+% reply_with_group/3
+% for each item in the WaitingList {Pid, Seq}
+% if the Seq is =< GroupSeq, reply
+reply_with_group(Group=#group{current_seq=GroupSeq}, [{Pid, Seq}|WaitList],
+ StillWaiting, RefCounter) when Seq =< GroupSeq ->
+ gen_server:reply(Pid, {ok, Group, RefCounter}),
+ reply_with_group(Group, WaitList, StillWaiting, RefCounter);
+
+% else
+% put it in the continuing waiting list
+reply_with_group(Group, [{Pid, Seq}|WaitList], StillWaiting, RefCounter) ->
+ reply_with_group(Group, WaitList, [{Pid, Seq}|StillWaiting], RefCounter);
+
+% return the still waiting list
+reply_with_group(_Group, [], StillWaiting, _RefCounter) ->
+ StillWaiting.
+
+reply_all(#group_state{waiting_list=WaitList}=State, Reply) ->
+ [catch gen_server:reply(Pid, Reply) || {Pid, _} <- WaitList],
+ State#group_state{waiting_list=[]}.
+
+prepare_group({RootDir, DbName, #group{sig=Sig}=Group}, ForceReset)->
+ case couch_db:open_int(DbName, []) of
+ {ok, Db} ->
+ case open_index_file(RootDir, DbName, Sig) of
+ {ok, Fd} ->
+ if ForceReset ->
+ % this can happen if we missed a purge
+ {ok, reset_file(Db, Fd, DbName, Group)};
+ true ->
+ % 09 UPGRADE CODE
+ ok = couch_file:upgrade_old_header(Fd, <<$r, $c, $k, 0>>),
+ case (catch couch_file:read_header(Fd)) of
+ {ok, {Sig, HeaderInfo}} ->
+ % sigs match!
+ {ok, init_group(Db, Fd, Group, HeaderInfo)};
+ _ ->
+ % this happens on a new file
+ {ok, reset_file(Db, Fd, DbName, Group)}
+ end
+ end;
+ Error ->
+ catch delete_index_file(RootDir, DbName, Sig),
+ Error
+ end;
+ Else ->
+ Else
+ end.
+
+get_index_header_data(#group{current_seq=Seq, purge_seq=PurgeSeq,
+ id_btree=IdBtree,views=Views}) ->
+ ViewStates = [
+ {couch_btree:get_state(V#view.btree), V#view.update_seq, V#view.purge_seq} || V <- Views
+ ],
+ #index_header{
+ seq=Seq,
+ purge_seq=PurgeSeq,
+ id_btree_state=couch_btree:get_state(IdBtree),
+ view_states=ViewStates
+ }.
+
+hex_sig(GroupSig) ->
+ couch_util:to_hex(?b2l(GroupSig)).
+
+design_root(RootDir, DbName) ->
+ RootDir ++ "/." ++ ?b2l(DbName) ++ "_design/".
+
+index_file_name(RootDir, DbName, GroupSig) ->
+ design_root(RootDir, DbName) ++ hex_sig(GroupSig) ++".view".
+
+index_file_name(compact, RootDir, DbName, GroupSig) ->
+ design_root(RootDir, DbName) ++ hex_sig(GroupSig) ++".compact.view".
+
+
+open_index_file(RootDir, DbName, GroupSig) ->
+ FileName = index_file_name(RootDir, DbName, GroupSig),
+ case couch_file:open(FileName) of
+ {ok, Fd} -> {ok, Fd};
+ {error, enoent} -> couch_file:open(FileName, [create]);
+ Error -> Error
+ end.
+
+open_index_file(compact, RootDir, DbName, GroupSig) ->
+ FileName = index_file_name(compact, RootDir, DbName, GroupSig),
+ case couch_file:open(FileName) of
+ {ok, Fd} -> {ok, Fd};
+ {error, enoent} -> couch_file:open(FileName, [create]);
+ Error -> Error
+ end.
+
+open_temp_group(DbName, Language, DesignOptions, MapSrc, RedSrc) ->
+ case couch_db:open_int(DbName, []) of
+ {ok, Db} ->
+ View = #view{map_names=[<<"_temp">>],
+ id_num=0,
+ btree=nil,
+ def=MapSrc,
+ reduce_funs= if RedSrc==[] -> []; true -> [{<<"_temp">>, RedSrc}] end,
+ options=DesignOptions},
+ couch_db:close(Db),
+ {ok, set_view_sig(#group{name = <<"_temp">>,lib={[]}, views=[View],
+ def_lang=Language, design_options=DesignOptions})};
+ Error ->
+ Error
+ end.
+
+set_view_sig(#group{
+ views=Views,
+ lib={[]},
+ def_lang=Language,
+ design_options=DesignOptions}=G) ->
+ ViewInfo = [old_view_format(V) || V <- Views],
+ G#group{sig=couch_util:md5(term_to_binary({ViewInfo, Language, DesignOptions}))};
+set_view_sig(#group{
+ views=Views,
+ lib=Lib,
+ def_lang=Language,
+ design_options=DesignOptions}=G) ->
+ ViewInfo = [old_view_format(V) || V <- Views],
+ G#group{sig=couch_util:md5(term_to_binary({ViewInfo, Language, DesignOptions, sort_lib(Lib)}))}.
+
+% Use the old view record format so group sig's don't change
+old_view_format(View) ->
+ {
+ view,
+ View#view.id_num,
+ View#view.map_names,
+ View#view.def,
+ View#view.btree,
+ View#view.reduce_funs,
+ View#view.options
+ }.
+
+sort_lib({Lib}) ->
+ sort_lib(Lib, []).
+sort_lib([], LAcc) ->
+ lists:keysort(1, LAcc);
+sort_lib([{LName, {LObj}}|Rest], LAcc) ->
+ LSorted = sort_lib(LObj, []), % descend into nested object
+ sort_lib(Rest, [{LName, LSorted}|LAcc]);
+sort_lib([{LName, LCode}|Rest], LAcc) ->
+ sort_lib(Rest, [{LName, LCode}|LAcc]).
+
+open_db_group(DbName, GroupId) ->
+ case couch_db:open_int(DbName, []) of
+ {ok, Db} ->
+ case couch_db:open_doc(Db, GroupId) of
+ {ok, Doc} ->
+ couch_db:close(Db),
+ {ok, design_doc_to_view_group(Doc)};
+ Else ->
+ couch_db:close(Db),
+ Else
+ end;
+ Else ->
+ Else
+ end.
+
+get_group_info(State) ->
+ #group_state{
+ group=Group,
+ updater_pid=UpdaterPid,
+ compactor_pid=CompactorPid,
+ waiting_commit=WaitingCommit,
+ waiting_list=WaitersList
+ } = State,
+ #group{
+ fd = Fd,
+ sig = GroupSig,
+ def_lang = Lang,
+ current_seq=CurrentSeq,
+ purge_seq=PurgeSeq
+ } = Group,
+ {ok, Size} = couch_file:bytes(Fd),
+ [
+ {signature, ?l2b(hex_sig(GroupSig))},
+ {language, Lang},
+ {disk_size, Size},
+ {updater_running, UpdaterPid /= nil},
+ {compact_running, CompactorPid /= nil},
+ {waiting_commit, WaitingCommit},
+ {waiting_clients, length(WaitersList)},
+ {update_seq, CurrentSeq},
+ {purge_seq, PurgeSeq}
+ ].
+
+% maybe move to another module
+design_doc_to_view_group(#doc{id=Id,body={Fields}}) ->
+ Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>),
+ {DesignOptions} = couch_util:get_value(<<"options">>, Fields, {[]}),
+ {RawViews} = couch_util:get_value(<<"views">>, Fields, {[]}),
+ Lib = couch_util:get_value(<<"lib">>, RawViews, {[]}),
+ % add the views to a dictionary object, with the map source as the key
+ DictBySrc =
+ lists:foldl(
+ fun({Name, {MRFuns}}, DictBySrcAcc) ->
+ case couch_util:get_value(<<"map">>, MRFuns) of
+ undefined -> DictBySrcAcc;
+ MapSrc ->
+ RedSrc = couch_util:get_value(<<"reduce">>, MRFuns, null),
+ {ViewOptions} = couch_util:get_value(<<"options">>, MRFuns, {[]}),
+ View =
+ case dict:find({MapSrc, ViewOptions}, DictBySrcAcc) of
+ {ok, View0} -> View0;
+ error -> #view{def=MapSrc, options=ViewOptions} % create new view object
+ end,
+ View2 =
+ if RedSrc == null ->
+ View#view{map_names=[Name|View#view.map_names]};
+ true ->
+ View#view{reduce_funs=[{Name,RedSrc}|View#view.reduce_funs]}
+ end,
+ dict:store({MapSrc, ViewOptions}, View2, DictBySrcAcc)
+ end
+ end, dict:new(), RawViews),
+ % number the views
+ {Views, _N} = lists:mapfoldl(
+ fun({_Src, View}, N) ->
+ {View#view{id_num=N},N+1}
+ end, 0, lists:sort(dict:to_list(DictBySrc))),
+ set_view_sig(#group{name=Id, lib=Lib, views=Views, def_lang=Language, design_options=DesignOptions}).
+
+reset_group(#group{views=Views}=Group) ->
+ Views2 = [View#view{btree=nil} || View <- Views],
+ Group#group{db=nil,fd=nil,query_server=nil,current_seq=0,
+ id_btree=nil,views=Views2}.
+
+reset_file(Db, Fd, DbName, #group{sig=Sig,name=Name} = Group) ->
+ ?LOG_DEBUG("Resetting group index \"~s\" in db ~s", [Name, DbName]),
+ ok = couch_file:truncate(Fd, 0),
+ ok = couch_file:write_header(Fd, {Sig, nil}),
+ init_group(Db, Fd, reset_group(Group), nil).
+
+delete_index_file(RootDir, DbName, GroupSig) ->
+ couch_file:delete(RootDir, index_file_name(RootDir, DbName, GroupSig)).
+
+init_group(Db, Fd, #group{views=Views}=Group, nil) ->
+ init_group(Db, Fd, Group,
+ #index_header{seq=0, purge_seq=couch_db:get_purge_seq(Db),
+ id_btree_state=nil, view_states=[{nil, 0, 0} || _ <- Views]});
+init_group(Db, Fd, #group{def_lang=Lang,views=Views}=
+ Group, IndexHeader) ->
+ #index_header{seq=Seq, purge_seq=PurgeSeq,
+ id_btree_state=IdBtreeState, view_states=ViewStates} = IndexHeader,
+ StateUpdate = fun
+ ({_, _, _}=State) -> State;
+ (State) -> {State, 0, 0}
+ end,
+ ViewStates2 = lists:map(StateUpdate, ViewStates),
+ {ok, IdBtree} = couch_btree:open(IdBtreeState, Fd),
+ Views2 = lists:zipwith(
+ fun({BTState, USeq, PSeq}, #view{reduce_funs=RedFuns,options=Options}=View) ->
+ FunSrcs = [FunSrc || {_Name, FunSrc} <- RedFuns],
+ ReduceFun =
+ fun(reduce, KVs) ->
+ KVs2 = couch_view:expand_dups(KVs,[]),
+ KVs3 = couch_view:detuple_kvs(KVs2,[]),
+ {ok, Reduced} = couch_query_servers:reduce(Lang, FunSrcs,
+ KVs3),
+ {length(KVs3), Reduced};
+ (rereduce, Reds) ->
+ Count = lists:sum([Count0 || {Count0, _} <- Reds]),
+ UserReds = [UserRedsList || {_, UserRedsList} <- Reds],
+ {ok, Reduced} = couch_query_servers:rereduce(Lang, FunSrcs,
+ UserReds),
+ {Count, Reduced}
+ end,
+
+ case couch_util:get_value(<<"collation">>, Options, <<"default">>) of
+ <<"default">> ->
+ Less = fun couch_view:less_json_ids/2;
+ <<"raw">> ->
+ Less = fun(A,B) -> A < B end
+ end,
+ {ok, Btree} = couch_btree:open(BTState, Fd,
+ [{less, Less}, {reduce, ReduceFun}]
+ ),
+ View#view{btree=Btree, update_seq=USeq, purge_seq=PSeq}
+ end,
+ ViewStates2, Views),
+ Group#group{db=Db, fd=Fd, current_seq=Seq, purge_seq=PurgeSeq,
+ id_btree=IdBtree, views=Views2}.
diff --git a/1.1.x/src/couchdb/couch_view_updater.erl b/1.1.x/src/couchdb/couch_view_updater.erl
new file mode 100644
index 00000000..8e089fa9
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_view_updater.erl
@@ -0,0 +1,265 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_view_updater).
+
+-export([update/2]).
+
+-include("couch_db.hrl").
+
+-spec update(_, #group{}) -> no_return().
+
+update(Owner, Group) ->
+ #group{
+ db = #db{name=DbName} = Db,
+ name = GroupName,
+ current_seq = Seq,
+ purge_seq = PurgeSeq
+ } = Group,
+ couch_task_status:add_task(<<"View Group Indexer">>, <<DbName/binary," ",GroupName/binary>>, <<"Starting index update">>),
+
+ DbPurgeSeq = couch_db:get_purge_seq(Db),
+ Group2 =
+ if DbPurgeSeq == PurgeSeq ->
+ Group;
+ DbPurgeSeq == PurgeSeq + 1 ->
+ couch_task_status:update(<<"Removing purged entries from view index.">>),
+ purge_index(Group);
+ true ->
+ couch_task_status:update(<<"Resetting view index due to lost purge entries.">>),
+ exit(reset)
+ end,
+ {ok, MapQueue} = couch_work_queue:new(
+ [{max_size, 100000}, {max_items, 500}]),
+ {ok, WriteQueue} = couch_work_queue:new(
+ [{max_size, 100000}, {max_items, 500}]),
+ Self = self(),
+ ViewEmptyKVs = [{View, []} || View <- Group2#group.views],
+ spawn_link(fun() -> do_maps(Group, MapQueue, WriteQueue, ViewEmptyKVs) end),
+ spawn_link(fun() -> do_writes(Self, Owner, Group2, WriteQueue, Seq == 0) end),
+ % compute on all docs modified since we last computed.
+ TotalChanges = couch_db:count_changes_since(Db, Seq),
+ % update status every half second
+ couch_task_status:set_update_frequency(500),
+ #group{ design_options = DesignOptions } = Group,
+ IncludeDesign = couch_util:get_value(<<"include_design">>,
+ DesignOptions, false),
+ LocalSeq = couch_util:get_value(<<"local_seq">>, DesignOptions, false),
+ DocOpts =
+ case LocalSeq of
+ true -> [conflicts, deleted_conflicts, local_seq];
+ _ -> [conflicts, deleted_conflicts]
+ end,
+ {ok, _, _}
+ = couch_db:enum_docs_since(
+ Db,
+ Seq,
+ fun(DocInfo, _, ChangesProcessed) ->
+ couch_task_status:update("Processed ~p of ~p changes (~p%)",
+ [ChangesProcessed, TotalChanges, (ChangesProcessed*100) div TotalChanges]),
+ load_doc(Db, DocInfo, MapQueue, DocOpts, IncludeDesign),
+ {ok, ChangesProcessed+1}
+ end,
+ 0, []),
+ couch_task_status:set_update_frequency(0),
+ couch_task_status:update("Finishing."),
+ couch_work_queue:close(MapQueue),
+ receive {new_group, NewGroup} ->
+ exit({new_group,
+ NewGroup#group{current_seq=couch_db:get_update_seq(Db)}})
+ end.
+
+
+purge_index(#group{db=Db, views=Views, id_btree=IdBtree}=Group) ->
+ {ok, PurgedIdsRevs} = couch_db:get_last_purged(Db),
+ Ids = [Id || {Id, _Revs} <- PurgedIdsRevs],
+ {ok, Lookups, IdBtree2} = couch_btree:query_modify(IdBtree, Ids, [], Ids),
+
+ % now populate the dictionary with all the keys to delete
+ ViewKeysToRemoveDict = lists:foldl(
+ fun({ok,{DocId,ViewNumRowKeys}}, ViewDictAcc) ->
+ lists:foldl(
+ fun({ViewNum, RowKey}, ViewDictAcc2) ->
+ dict:append(ViewNum, {RowKey, DocId}, ViewDictAcc2)
+ end, ViewDictAcc, ViewNumRowKeys);
+ ({not_found, _}, ViewDictAcc) ->
+ ViewDictAcc
+ end, dict:new(), Lookups),
+
+ % Now remove the values from the btrees
+ PurgeSeq = couch_db:get_purge_seq(Db),
+ Views2 = lists:map(
+ fun(#view{id_num=Num,btree=Btree}=View) ->
+ case dict:find(Num, ViewKeysToRemoveDict) of
+ {ok, RemoveKeys} ->
+ {ok, ViewBtree2} = couch_btree:add_remove(Btree, [], RemoveKeys),
+ case ViewBtree2 =/= Btree of
+ true ->
+ View#view{btree=ViewBtree2, purge_seq=PurgeSeq};
+ _ ->
+ View#view{btree=ViewBtree2}
+ end;
+ error -> % no keys to remove in this view
+ View
+ end
+ end, Views),
+ Group#group{id_btree=IdBtree2,
+ views=Views2,
+ purge_seq=PurgeSeq}.
+
+
+load_doc(Db, DocInfo, MapQueue, DocOpts, IncludeDesign) ->
+ #doc_info{id=DocId, high_seq=Seq, revs=[#rev_info{deleted=Deleted}|_]} = DocInfo,
+ case {IncludeDesign, DocId} of
+ {false, <<?DESIGN_DOC_PREFIX, _/binary>>} -> % we skip design docs
+ ok;
+ _ ->
+ if Deleted ->
+ couch_work_queue:queue(MapQueue, {Seq, #doc{id=DocId, deleted=true}});
+ true ->
+ {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts),
+ couch_work_queue:queue(MapQueue, {Seq, Doc})
+ end
+ end.
+
+do_maps(Group, MapQueue, WriteQueue, ViewEmptyKVs) ->
+ case couch_work_queue:dequeue(MapQueue) of
+ closed ->
+ couch_work_queue:close(WriteQueue),
+ couch_query_servers:stop_doc_map(Group#group.query_server);
+ {ok, Queue} ->
+ Docs = [Doc || {_,#doc{deleted=false}=Doc} <- Queue],
+ DelKVs = [{Id, []} || {_, #doc{deleted=true,id=Id}} <- Queue],
+ LastSeq = lists:max([Seq || {Seq, _Doc} <- Queue]),
+ {Group1, Results} = view_compute(Group, Docs),
+ {ViewKVs, DocIdViewIdKeys} = view_insert_query_results(Docs,
+ Results, ViewEmptyKVs, DelKVs),
+ couch_work_queue:queue(WriteQueue, {LastSeq, ViewKVs, DocIdViewIdKeys}),
+ do_maps(Group1, MapQueue, WriteQueue, ViewEmptyKVs)
+ end.
+
+do_writes(Parent, Owner, Group, WriteQueue, InitialBuild) ->
+ case couch_work_queue:dequeue(WriteQueue) of
+ closed ->
+ Parent ! {new_group, Group};
+ {ok, Queue} ->
+ {NewSeq, ViewKeyValues, DocIdViewIdKeys} = lists:foldl(
+ fun({Seq, ViewKVs, DocIdViewIdKeys}, nil) ->
+ {Seq, ViewKVs, DocIdViewIdKeys};
+ ({Seq, ViewKVs, DocIdViewIdKeys}, Acc) ->
+ {Seq2, AccViewKVs, AccDocIdViewIdKeys} = Acc,
+ AccViewKVs2 = lists:zipwith(
+ fun({View, KVsIn}, {_View, KVsAcc}) ->
+ {View, KVsIn ++ KVsAcc}
+ end, ViewKVs, AccViewKVs),
+ {lists:max([Seq, Seq2]),
+ AccViewKVs2, DocIdViewIdKeys ++ AccDocIdViewIdKeys}
+ end, nil, Queue),
+ Group2 = write_changes(Group, ViewKeyValues, DocIdViewIdKeys, NewSeq,
+ InitialBuild),
+ case Owner of
+ nil -> ok;
+ _ -> ok = gen_server:cast(Owner, {partial_update, Parent, Group2})
+ end,
+ do_writes(Parent, Owner, Group2, WriteQueue, InitialBuild)
+ end.
+
+view_insert_query_results([], [], ViewKVs, DocIdViewIdKeysAcc) ->
+ {ViewKVs, DocIdViewIdKeysAcc};
+view_insert_query_results([Doc|RestDocs], [QueryResults | RestResults], ViewKVs, DocIdViewIdKeysAcc) ->
+ {NewViewKVs, NewViewIdKeys} = view_insert_doc_query_results(Doc, QueryResults, ViewKVs, [], []),
+ NewDocIdViewIdKeys = [{Doc#doc.id, NewViewIdKeys} | DocIdViewIdKeysAcc],
+ view_insert_query_results(RestDocs, RestResults, NewViewKVs, NewDocIdViewIdKeys).
+
+
+view_insert_doc_query_results(_Doc, [], [], ViewKVsAcc, ViewIdKeysAcc) ->
+ {lists:reverse(ViewKVsAcc), lists:reverse(ViewIdKeysAcc)};
+view_insert_doc_query_results(#doc{id=DocId}=Doc, [ResultKVs|RestResults], [{View, KVs}|RestViewKVs], ViewKVsAcc, ViewIdKeysAcc) ->
+ % Take any identical keys and combine the values
+ ResultKVs2 = lists:foldl(
+ fun({Key,Value}, [{PrevKey,PrevVal}|AccRest]) ->
+ case Key == PrevKey of
+ true ->
+ case PrevVal of
+ {dups, Dups} ->
+ [{PrevKey, {dups, [Value|Dups]}} | AccRest];
+ _ ->
+ [{PrevKey, {dups, [Value,PrevVal]}} | AccRest]
+ end;
+ false ->
+ [{Key,Value},{PrevKey,PrevVal}|AccRest]
+ end;
+ (KV, []) ->
+ [KV]
+ end, [], lists:sort(ResultKVs)),
+ NewKVs = [{{Key, DocId}, Value} || {Key, Value} <- ResultKVs2],
+ NewViewKVsAcc = [{View, NewKVs ++ KVs} | ViewKVsAcc],
+ NewViewIdKeys = [{View#view.id_num, Key} || {Key, _Value} <- ResultKVs2],
+ NewViewIdKeysAcc = NewViewIdKeys ++ ViewIdKeysAcc,
+ view_insert_doc_query_results(Doc, RestResults, RestViewKVs, NewViewKVsAcc, NewViewIdKeysAcc).
+
+view_compute(Group, []) ->
+ {Group, []};
+view_compute(#group{def_lang=DefLang, lib=Lib, query_server=QueryServerIn}=Group, Docs) ->
+ {ok, QueryServer} =
+ case QueryServerIn of
+ nil -> % doc map not started
+ Definitions = [View#view.def || View <- Group#group.views],
+ couch_query_servers:start_doc_map(DefLang, Definitions, Lib);
+ _ ->
+ {ok, QueryServerIn}
+ end,
+ {ok, Results} = couch_query_servers:map_docs(QueryServer, Docs),
+ {Group#group{query_server=QueryServer}, Results}.
+
+
+
+write_changes(Group, ViewKeyValuesToAdd, DocIdViewIdKeys, NewSeq, InitialBuild) ->
+ #group{id_btree=IdBtree} = Group,
+
+ AddDocIdViewIdKeys = [{DocId, ViewIdKeys} || {DocId, ViewIdKeys} <- DocIdViewIdKeys, ViewIdKeys /= []],
+ if InitialBuild ->
+ RemoveDocIds = [],
+ LookupDocIds = [];
+ true ->
+ RemoveDocIds = [DocId || {DocId, ViewIdKeys} <- DocIdViewIdKeys, ViewIdKeys == []],
+ LookupDocIds = [DocId || {DocId, _ViewIdKeys} <- DocIdViewIdKeys]
+ end,
+ {ok, LookupResults, IdBtree2}
+ = couch_btree:query_modify(IdBtree, LookupDocIds, AddDocIdViewIdKeys, RemoveDocIds),
+ KeysToRemoveByView = lists:foldl(
+ fun(LookupResult, KeysToRemoveByViewAcc) ->
+ case LookupResult of
+ {ok, {DocId, ViewIdKeys}} ->
+ lists:foldl(
+ fun({ViewId, Key}, KeysToRemoveByViewAcc2) ->
+ dict:append(ViewId, {Key, DocId}, KeysToRemoveByViewAcc2)
+ end,
+ KeysToRemoveByViewAcc, ViewIdKeys);
+ {not_found, _} ->
+ KeysToRemoveByViewAcc
+ end
+ end,
+ dict:new(), LookupResults),
+ Views2 = lists:zipwith(fun(View, {_View, AddKeyValues}) ->
+ KeysToRemove = couch_util:dict_find(View#view.id_num, KeysToRemoveByView, []),
+ {ok, ViewBtree2} = couch_btree:add_remove(View#view.btree, AddKeyValues, KeysToRemove),
+ case ViewBtree2 =/= View#view.btree of
+ true ->
+ View#view{btree=ViewBtree2, update_seq=NewSeq};
+ _ ->
+ View#view{btree=ViewBtree2}
+ end
+ end, Group#group.views, ViewKeyValuesToAdd),
+ Group#group{views=Views2, current_seq=NewSeq, id_btree=IdBtree2}.
+
+
diff --git a/1.1.x/src/couchdb/couch_work_queue.erl b/1.1.x/src/couchdb/couch_work_queue.erl
new file mode 100644
index 00000000..13ec7335
--- /dev/null
+++ b/1.1.x/src/couchdb/couch_work_queue.erl
@@ -0,0 +1,155 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_work_queue).
+-behaviour(gen_server).
+
+% public API
+-export([new/1, queue/2, dequeue/1, dequeue/2, close/1]).
+
+% gen_server callbacks
+-export([init/1, terminate/2]).
+-export([handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
+
+-record(q, {
+ queue = queue:new(),
+ blocked = [],
+ max_size,
+ max_items,
+ items = 0,
+ size = 0,
+ work_waiters = [],
+ close_on_dequeue = false,
+ multi_workers = false
+}).
+
+
+new(Options) ->
+ gen_server:start_link(couch_work_queue, Options, []).
+
+
+queue(Wq, Item) ->
+ gen_server:call(Wq, {queue, Item}, infinity).
+
+
+dequeue(Wq) ->
+ dequeue(Wq, all).
+
+
+dequeue(Wq, MaxItems) ->
+ try
+ gen_server:call(Wq, {dequeue, MaxItems}, infinity)
+ catch
+ _:_ -> closed
+ end.
+
+
+close(Wq) ->
+ gen_server:cast(Wq, close).
+
+
+init(Options) ->
+ Q = #q{
+ max_size = couch_util:get_value(max_size, Options),
+ max_items = couch_util:get_value(max_items, Options),
+ multi_workers = couch_util:get_value(multi_workers, Options, false)
+ },
+ {ok, Q}.
+
+
+terminate(_Reason, #q{work_waiters=Workers}) ->
+ lists:foreach(fun({W, _}) -> gen_server:reply(W, closed) end, Workers).
+
+
+handle_call({queue, Item}, From, #q{work_waiters = []} = Q0) ->
+ Q = Q0#q{size = Q0#q.size + byte_size(term_to_binary(Item)),
+ items = Q0#q.items + 1,
+ queue = queue:in(Item, Q0#q.queue)},
+ case (Q#q.size >= Q#q.max_size) orelse
+ (Q#q.items >= Q#q.max_items) of
+ true ->
+ {noreply, Q#q{blocked = [From | Q#q.blocked]}};
+ false ->
+ {reply, ok, Q}
+ end;
+
+handle_call({queue, Item}, _From, #q{work_waiters = [{W, _Max} | Rest]} = Q) ->
+ gen_server:reply(W, {ok, [Item]}),
+ {reply, ok, Q#q{work_waiters = Rest}};
+
+handle_call({dequeue, Max}, From, Q) ->
+ #q{work_waiters = Workers, multi_workers = Multi, items = Count} = Q,
+ case {Workers, Multi} of
+ {[_ | _], false} ->
+ exit("Only one caller allowed to wait for this work at a time");
+ {[_ | _], true} ->
+ {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}};
+ _ ->
+ case Count of
+ 0 ->
+ {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}};
+ C when C > 0 ->
+ deliver_queue_items(Max, Q)
+ end
+ end.
+
+
+deliver_queue_items(Max, Q) ->
+ #q{
+ queue = Queue,
+ items = Count,
+ close_on_dequeue = Close,
+ blocked = Blocked
+ } = Q,
+ case (Max =:= all) orelse (Max >= Count) of
+ false ->
+ {Items, Queue2, Blocked2} = dequeue_items(Max, Queue, Blocked, []),
+ Q2 = Q#q{items = Count - Max, blocked = Blocked2, queue = Queue2},
+ {reply, {ok, Items}, Q2};
+ true ->
+ lists:foreach(fun(F) -> gen_server:reply(F, ok) end, Blocked),
+ Q2 = Q#q{items = 0, size = 0, blocked = [], queue = queue:new()},
+ case Close of
+ false ->
+ {reply, {ok, queue:to_list(Queue)}, Q2};
+ true ->
+ {stop, normal, {ok, queue:to_list(Queue)}, Q2}
+ end
+ end.
+
+
+dequeue_items(0, Queue, Blocked, DequeuedAcc) ->
+ {lists:reverse(DequeuedAcc), Queue, Blocked};
+
+dequeue_items(NumItems, Queue, Blocked, DequeuedAcc) ->
+ {{value, Item}, Queue2} = queue:out(Queue),
+ case Blocked of
+ [] ->
+ Blocked2 = Blocked;
+ [From | Blocked2] ->
+ gen_server:reply(From, ok)
+ end,
+ dequeue_items(NumItems - 1, Queue2, Blocked2, [Item | DequeuedAcc]).
+
+
+handle_cast(close, #q{items = 0} = Q) ->
+ {stop, normal, Q};
+
+handle_cast(close, Q) ->
+ {noreply, Q#q{close_on_dequeue = true}}.
+
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_info(X, Q) ->
+ {stop, X, Q}.
diff --git a/1.1.x/src/couchdb/priv/Makefile.am b/1.1.x/src/couchdb/priv/Makefile.am
new file mode 100644
index 00000000..b36d828d
--- /dev/null
+++ b/1.1.x/src/couchdb/priv/Makefile.am
@@ -0,0 +1,93 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+couchlibdir = $(localerlanglibdir)/couch-$(version)
+couchprivdir = $(couchlibdir)/priv
+couchprivlibdir = $(couchlibdir)/priv/lib
+
+EXTRA_DIST = \
+ spawnkillable/couchspawnkillable.sh \
+ stat_descriptions.cfg.in
+
+CLEANFILES = stat_descriptions.cfg
+
+ICU_LOCAL_FLAGS = $(ICU_LOCAL_CFLAGS) $(ICU_LOCAL_LDFLAGS)
+if WINDOWS
+ICU_LOCAL_LIBS=-licuuc -licudt -licuin
+else
+ICU_LOCAL_LIBS=-licuuc -licudata -licui18n
+endif
+
+couchprivlib_LTLIBRARIES = couch_icu_driver.la
+couch_icu_driver_la_SOURCES = icu_driver/couch_icu_driver.c
+couch_icu_driver_la_LDFLAGS = -module -avoid-version $(ICU_LOCAL_FLAGS)
+couch_icu_driver_la_CFLAGS = $(ICU_LOCAL_FLAGS)
+couch_icu_driver_la_LIBADD = $(ICU_LOCAL_LIBS)
+
+if WINDOWS
+couch_icu_driver_la_LDFLAGS += -no-undefined
+endif
+
+COUCHJS_SRCS = \
+ couch_js/http.c \
+ couch_js/http.h \
+ couch_js/main.c \
+ couch_js/utf8.c \
+ couch_js/utf8.h
+
+locallibbin_PROGRAMS = couchjs
+couchjs_SOURCES = $(COUCHJS_SRCS)
+couchjs_LDFLAGS = $(CURL_LDFLAGS)
+couchjs_CFLAGS = -D_BSD_SOURCE $(CURL_CFLAGS)
+couchjs_LDADD = $(CURL_LDFLAGS) @JSLIB@
+
+couchpriv_DATA = stat_descriptions.cfg
+couchpriv_PROGRAMS = couchspawnkillable
+
+%.cfg: %.cfg.in
+ cp $< $@
+
+if WINDOWS
+couchspawnkillable_SOURCES = spawnkillable/couchspawnkillable_win.c
+endif
+
+if !WINDOWS
+couchspawnkillable: spawnkillable/couchspawnkillable.sh
+ cp $< $@
+ chmod +x $@
+endif
+
+# libtool and automake have defeated markh. For each of our executables
+# we end up with 2 copies - one directly in the 'target' folder (eg, 'priv')
+# and another - the correct one - in .libs. The former doesn't work but is
+# what gets installed for 'couchspawnkillable' - but the correct one for
+# couchjs.exe *does* get copied. *shrug* So just clobber it with the
+# correct one as the last step. See bug COUCHDB-439
+install-data-hook:
+ if test -f "$(DESTDIR)$(couchprivlibdir)/couch_icu_driver"; then \
+ rm -f "$(DESTDIR)$(couchprivlibdir)/couch_icu_driver.so"; \
+ cd "$(DESTDIR)$(couchprivlibdir)" && \
+ $(LN_S) couch_icu_driver couch_icu_driver.so; \
+ fi
+if WINDOWS
+ $(INSTALL) $(ICU_LOCAL_BIN)/icuuc42.dll $(bindir)
+ $(INSTALL) $(ICU_LOCAL_BIN)/icudt42.dll $(bindir)
+ $(INSTALL) $(ICU_LOCAL_BIN)/icuin42.dll $(bindir)
+ $(INSTALL) $(JS_LIB_BINARY) $(bindir)
+ $(INSTALL) .libs/couchspawnkillable.exe \
+ "$(DESTDIR)$(couchprivdir)/couchspawnkillable.exe"
+endif
+
+uninstall-local:
+ if test -f "$(DESTDIR)$(couchprivlibdir)/couch_erl_driver"; then \
+ rm -f "$(DESTDIR)$(couchprivlibdir)/couch_erl_driver.so"; \
+ fi
diff --git a/1.1.x/src/couchdb/priv/couch_js/http.c b/1.1.x/src/couchdb/priv/couch_js/http.c
new file mode 100644
index 00000000..6c2a8a82
--- /dev/null
+++ b/1.1.x/src/couchdb/priv/couch_js/http.c
@@ -0,0 +1,675 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <jsapi.h>
+#include <curl/curl.h>
+
+#include "utf8.h"
+
+#ifdef XP_WIN
+// Map some of the string function names to things which exist on Windows
+#define strcasecmp _strcmpi
+#define strncasecmp _strnicmp
+#define snprintf _snprintf
+#endif
+
+typedef struct curl_slist CurlHeaders;
+
+typedef struct {
+ int method;
+ char* url;
+ CurlHeaders* req_headers;
+ jsint last_status;
+} HTTPData;
+
+char* METHODS[] = {"GET", "HEAD", "POST", "PUT", "DELETE", "COPY", NULL};
+
+#define GET 0
+#define HEAD 1
+#define POST 2
+#define PUT 3
+#define DELETE 4
+#define COPY 5
+
+static JSBool
+go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t blen);
+
+static JSString*
+str_from_binary(JSContext* cx, char* data, size_t length);
+
+static JSBool
+constructor(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
+{
+ HTTPData* http = NULL;
+ JSBool ret = JS_FALSE;
+
+ http = (HTTPData*) malloc(sizeof(HTTPData));
+ if(!http)
+ {
+ JS_ReportError(cx, "Failed to create CouchHTTP instance.");
+ goto error;
+ }
+
+ http->method = -1;
+ http->url = NULL;
+ http->req_headers = NULL;
+ http->last_status = -1;
+
+ if(!JS_SetPrivate(cx, obj, http))
+ {
+ JS_ReportError(cx, "Failed to set private CouchHTTP data.");
+ goto error;
+ }
+
+ ret = JS_TRUE;
+ goto success;
+
+error:
+ if(http) free(http);
+
+success:
+ return ret;
+}
+
+static void
+destructor(JSContext* cx, JSObject* obj)
+{
+ HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj);
+ if(!http)
+ {
+ fprintf(stderr, "Unable to destroy invalid CouchHTTP instance.\n");
+ }
+ else
+ {
+ if(http->url) free(http->url);
+ if(http->req_headers) curl_slist_free_all(http->req_headers);
+ free(http);
+ }
+}
+
+static JSBool
+open(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
+{
+ HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj);
+ char* method = NULL;
+ char* url = NULL;
+ JSBool ret = JS_FALSE;
+ int methid;
+
+ if(!http)
+ {
+ JS_ReportError(cx, "Invalid CouchHTTP instance.");
+ goto done;
+ }
+
+ if(argv[0] == JSVAL_VOID)
+ {
+ JS_ReportError(cx, "You must specify a method.");
+ goto done;
+ }
+
+ method = enc_string(cx, argv[0], NULL);
+ if(!method)
+ {
+ JS_ReportError(cx, "Failed to encode method.");
+ goto done;
+ }
+
+ for(methid = 0; METHODS[methid] != NULL; methid++)
+ {
+ if(strcasecmp(METHODS[methid], method) == 0) break;
+ }
+
+ if(methid > COPY)
+ {
+ JS_ReportError(cx, "Invalid method specified.");
+ goto done;
+ }
+
+ http->method = methid;
+
+ if(argv[1] == JSVAL_VOID)
+ {
+ JS_ReportError(cx, "You must specify a URL.");
+ goto done;
+ }
+
+ if(http->url)
+ {
+ free(http->url);
+ http->url = NULL;
+ }
+
+ http->url = enc_string(cx, argv[1], NULL);
+ if(!http->url)
+ {
+ JS_ReportError(cx, "Failed to encode URL.");
+ goto done;
+ }
+
+ if(argv[2] != JSVAL_VOID && argv[2] != JSVAL_FALSE)
+ {
+ JS_ReportError(cx, "Synchronous flag must be false if specified.");
+ goto done;
+ }
+
+ if(http->req_headers)
+ {
+ curl_slist_free_all(http->req_headers);
+ http->req_headers = NULL;
+ }
+
+ // Disable Expect: 100-continue
+ http->req_headers = curl_slist_append(http->req_headers, "Expect:");
+
+ ret = JS_TRUE;
+
+done:
+ if(method) free(method);
+ return ret;
+}
+
+static JSBool
+setheader(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
+{
+ HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj);
+ char* keystr = NULL;
+ char* valstr = NULL;
+ char* hdrbuf = NULL;
+ size_t hdrlen = -1;
+ JSBool ret = JS_FALSE;
+
+ if(!http)
+ {
+ JS_ReportError(cx, "Invalid CouchHTTP instance.");
+ goto done;
+ }
+
+ if(argv[0] == JSVAL_VOID)
+ {
+ JS_ReportError(cx, "You must speciy a header name.");
+ goto done;
+ }
+
+ keystr = enc_string(cx, argv[0], NULL);
+ if(!keystr)
+ {
+ JS_ReportError(cx, "Failed to encode header name.");
+ goto done;
+ }
+
+ if(argv[1] == JSVAL_VOID)
+ {
+ JS_ReportError(cx, "You must specify a header value.");
+ goto done;
+ }
+
+ valstr = enc_string(cx, argv[1], NULL);
+ if(!valstr)
+ {
+ JS_ReportError(cx, "Failed to encode header value.");
+ goto done;
+ }
+
+ hdrlen = strlen(keystr) + strlen(valstr) + 3;
+ hdrbuf = (char*) malloc(hdrlen * sizeof(char));
+ if(!hdrbuf)
+ {
+ JS_ReportError(cx, "Failed to allocate header buffer.");
+ goto done;
+ }
+
+ snprintf(hdrbuf, hdrlen, "%s: %s", keystr, valstr);
+ http->req_headers = curl_slist_append(http->req_headers, hdrbuf);
+
+ ret = JS_TRUE;
+
+done:
+ if(keystr) free(keystr);
+ if(valstr) free(valstr);
+ if(hdrbuf) free(hdrbuf);
+
+ return ret;
+}
+
+static JSBool
+sendreq(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval)
+{
+ HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj);
+ char* body = NULL;
+ size_t bodylen = 0;
+ JSBool ret = JS_FALSE;
+
+ if(!http)
+ {
+ JS_ReportError(cx, "Invalid CouchHTTP instance.");
+ goto done;
+ }
+
+ if(argv[0] != JSVAL_VOID && argv[0] != JS_GetEmptyStringValue(cx))
+ {
+ body = enc_string(cx, argv[0], &bodylen);
+ if(!body)
+ {
+ JS_ReportError(cx, "Failed to encode body.");
+ goto done;
+ }
+ }
+
+ ret = go(cx, obj, http, body, bodylen);
+
+done:
+ if(body) free(body);
+ return ret;
+}
+
+static JSBool
+status(JSContext* cx, JSObject* obj, jsval idval, jsval* vp)
+{
+ HTTPData* http = (HTTPData*) JS_GetPrivate(cx, obj);
+
+ if(!http)
+ {
+ JS_ReportError(cx, "Invalid CouchHTTP instance.");
+ return JS_FALSE;
+ }
+
+ if(INT_FITS_IN_JSVAL(http->last_status))
+ {
+ *vp = INT_TO_JSVAL(http->last_status);
+ return JS_TRUE;
+ }
+ else
+ {
+ JS_ReportError(cx, "INTERNAL: Invalid last_status");
+ return JS_FALSE;
+ }
+}
+
+JSClass CouchHTTPClass = {
+ "CouchHTTP",
+ JSCLASS_HAS_PRIVATE
+ | JSCLASS_CONSTRUCT_PROTOTYPE
+ | JSCLASS_HAS_RESERVED_SLOTS(2),
+ JS_PropertyStub,
+ JS_PropertyStub,
+ JS_PropertyStub,
+ JS_PropertyStub,
+ JS_EnumerateStub,
+ JS_ResolveStub,
+ JS_ConvertStub,
+ destructor,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+JSPropertySpec CouchHTTPProperties[] = {
+ {"status", 0, JSPROP_READONLY, status, NULL},
+ {0, 0, 0, 0, 0}
+};
+
+JSFunctionSpec CouchHTTPFunctions[] = {
+ {"_open", open, 3, 0, 0},
+ {"_setRequestHeader", setheader, 2, 0, 0},
+ {"_send", sendreq, 1, 0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+JSObject*
+install_http(JSContext* cx, JSObject* glbl)
+{
+ JSObject* klass = NULL;
+ HTTPData* http = NULL;
+
+ klass = JS_InitClass(
+ cx,
+ glbl,
+ NULL,
+ &CouchHTTPClass,
+ constructor,
+ 0,
+ CouchHTTPProperties,
+ CouchHTTPFunctions,
+ NULL,
+ NULL
+ );
+
+ if(!klass)
+ {
+ fprintf(stderr, "Failed to initialize CouchHTTP class.\n");
+ return NULL;
+ }
+
+ return klass;
+}
+
+
+// Curl Helpers
+
+typedef struct {
+ HTTPData* http;
+ JSContext* cx;
+ JSObject* resp_headers;
+ char* sendbuf;
+ size_t sendlen;
+ size_t sent;
+ char* recvbuf;
+ size_t recvlen;
+ size_t read;
+} CurlState;
+
+/*
+ * I really hate doing this but this doesn't have to be
+ * uber awesome, it just has to work.
+ */
+CURL* HTTP_HANDLE = NULL;
+char ERRBUF[CURL_ERROR_SIZE];
+
+static size_t send_body(void *ptr, size_t size, size_t nmem, void *data);
+static int seek_body(void *ptr, curl_off_t offset, int origin);
+static size_t recv_body(void *ptr, size_t size, size_t nmem, void *data);
+static size_t recv_header(void *ptr, size_t size, size_t nmem, void *data);
+
+static JSBool
+go(JSContext* cx, JSObject* obj, HTTPData* http, char* body, size_t bodylen)
+{
+ CurlState state;
+ JSString* jsbody;
+ JSBool ret = JS_FALSE;
+ jsval tmp;
+
+ state.cx = cx;
+ state.http = http;
+
+ state.sendbuf = body;
+ state.sendlen = bodylen;
+ state.sent = 0;
+
+ state.recvbuf = NULL;
+ state.recvlen = 0;
+ state.read = 0;
+
+ if(HTTP_HANDLE == NULL)
+ {
+ HTTP_HANDLE = curl_easy_init();
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_READFUNCTION, send_body);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_SEEKFUNCTION,
+ (curl_seek_callback) seek_body);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_HEADERFUNCTION, recv_header);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEFUNCTION, recv_body);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOPROGRESS, 1);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_ERRORBUFFER, ERRBUF);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_COOKIEFILE, "");
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_USERAGENT,
+ "CouchHTTP Client - Relax");
+ }
+
+ if(!HTTP_HANDLE)
+ {
+ JS_ReportError(cx, "Failed to initialize cURL handle.");
+ goto done;
+ }
+
+ if(http->method < 0 || http->method > COPY)
+ {
+ JS_ReportError(cx, "INTERNAL: Unknown method.");
+ goto done;
+ }
+
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_CUSTOMREQUEST, METHODS[http->method]);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOBODY, 0);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 1);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_UPLOAD, 0);
+
+ if(http->method == HEAD)
+ {
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_NOBODY, 1);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 0);
+ }
+ else if(http->method == POST || http->method == PUT)
+ {
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_UPLOAD, 1);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_FOLLOWLOCATION, 0);
+ }
+
+ if(body && bodylen)
+ {
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_INFILESIZE, bodylen);
+ }
+ else
+ {
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_INFILESIZE, 0);
+ }
+
+ //curl_easy_setopt(HTTP_HANDLE, CURLOPT_VERBOSE, 1);
+
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_URL, http->url);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_HTTPHEADER, http->req_headers);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_READDATA, &state);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_SEEKDATA, &state);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEHEADER, &state);
+ curl_easy_setopt(HTTP_HANDLE, CURLOPT_WRITEDATA, &state);
+
+ if(curl_easy_perform(HTTP_HANDLE) != 0)
+ {
+ JS_ReportError(cx, "Failed to execute HTTP request: %s", ERRBUF);
+ goto done;
+ }
+
+ if(!state.resp_headers)
+ {
+ JS_ReportError(cx, "Failed to recieve HTTP headers.");
+ goto done;
+ }
+
+ tmp = OBJECT_TO_JSVAL(state.resp_headers);
+ if(!JS_DefineProperty(
+ cx,
+ obj,
+ "_headers",
+ tmp,
+ NULL,
+ NULL,
+ JSPROP_READONLY
+ ))
+ {
+ JS_ReportError(cx, "INTERNAL: Failed to set response headers.");
+ goto done;
+ }
+
+ if(state.recvbuf) // Is good enough?
+ {
+ state.recvbuf[state.read] = '\0';
+ jsbody = dec_string(cx, state.recvbuf, state.read+1);
+ if(!jsbody)
+ {
+ // If we can't decode the body as UTF-8 we forcefully
+ // convert it to a string by just forcing each byte
+ // to a jschar.
+ jsbody = str_from_binary(cx, state.recvbuf, state.read);
+ if(!jsbody) {
+ if(!JS_IsExceptionPending(cx)) {
+ JS_ReportError(cx, "INTERNAL: Failed to decode body.");
+ }
+ goto done;
+ }
+ }
+ tmp = STRING_TO_JSVAL(jsbody);
+ }
+ else
+ {
+ tmp = JS_GetEmptyStringValue(cx);
+ }
+
+ if(!JS_DefineProperty(
+ cx,
+ obj,
+ "responseText",
+ tmp,
+ NULL,
+ NULL,
+ JSPROP_READONLY
+ ))
+ {
+ JS_ReportError(cx, "INTERNAL: Failed to set responseText.");
+ goto done;
+ }
+
+ ret = JS_TRUE;
+
+done:
+ if(state.recvbuf) JS_free(cx, state.recvbuf);
+ return ret;
+}
+
+static size_t
+send_body(void *ptr, size_t size, size_t nmem, void *data)
+{
+ CurlState* state = (CurlState*) data;
+ size_t length = size * nmem;
+ size_t towrite = state->sendlen - state->sent;
+ if(towrite == 0)
+ {
+ return 0;
+ }
+
+ if(length < towrite) towrite = length;
+
+ //fprintf(stderr, "%lu %lu %lu %lu\n", state->bodyused, state->bodyread, length, towrite);
+
+ memcpy(ptr, state->sendbuf + state->sent, towrite);
+ state->sent += towrite;
+
+ return towrite;
+}
+
+static int
+seek_body(void* ptr, curl_off_t offset, int origin)
+{
+ CurlState* state = (CurlState*) ptr;
+ if(origin != SEEK_SET) return -1;
+
+ state->sent = (size_t) offset;
+ return (int) state->sent;
+}
+
+static size_t
+recv_header(void *ptr, size_t size, size_t nmem, void *data)
+{
+ CurlState* state = (CurlState*) data;
+ char code[4];
+ char* header = (char*) ptr;
+ size_t length = size * nmem;
+ size_t index = 0;
+ JSString* hdr = NULL;
+ jsuint hdrlen;
+ jsval hdrval;
+
+ if(length > 7 && strncasecmp(header, "HTTP/1.", 7) == 0)
+ {
+ if(length < 12)
+ {
+ return CURLE_WRITE_ERROR;
+ }
+
+ memcpy(code, header+9, 3*sizeof(char));
+ code[3] = '\0';
+ state->http->last_status = atoi(code);
+
+ state->resp_headers = JS_NewArrayObject(state->cx, 0, NULL);
+ if(!state->resp_headers)
+ {
+ return CURLE_WRITE_ERROR;
+ }
+
+ return length;
+ }
+
+ // We get a notice at the \r\n\r\n after headers.
+ if(length <= 2)
+ {
+ return length;
+ }
+
+ // Append the new header to our array.
+ hdr = dec_string(state->cx, header, length);
+ if(!hdr)
+ {
+ return CURLE_WRITE_ERROR;
+ }
+
+ if(!JS_GetArrayLength(state->cx, state->resp_headers, &hdrlen))
+ {
+ return CURLE_WRITE_ERROR;
+ }
+
+ hdrval = STRING_TO_JSVAL(hdr);
+ if(!JS_SetElement(state->cx, state->resp_headers, hdrlen, &hdrval))
+ {
+ return CURLE_WRITE_ERROR;
+ }
+
+ return length;
+}
+
+static size_t
+recv_body(void *ptr, size_t size, size_t nmem, void *data)
+{
+ CurlState* state = (CurlState*) data;
+ size_t length = size * nmem;
+ char* tmp = NULL;
+
+ if(!state->recvbuf)
+ {
+ state->recvlen = 4096;
+ state->read = 0;
+ state->recvbuf = JS_malloc(state->cx, state->recvlen);
+ }
+
+ if(!state->recvbuf)
+ {
+ return CURLE_WRITE_ERROR;
+ }
+
+ // +1 so we can add '\0' back up in the go function.
+ while(length+1 > state->recvlen - state->read) state->recvlen *= 2;
+ tmp = JS_realloc(state->cx, state->recvbuf, state->recvlen);
+ if(!tmp) return CURLE_WRITE_ERROR;
+ state->recvbuf = tmp;
+
+ memcpy(state->recvbuf + state->read, ptr, length);
+ state->read += length;
+ return length;
+}
+
+JSString*
+str_from_binary(JSContext* cx, char* data, size_t length)
+{
+ jschar* conv = (jschar*) JS_malloc(cx, length * sizeof(jschar));
+ JSString* ret = NULL;
+ size_t i;
+
+ if(!conv) return NULL;
+
+ for(i = 0; i < length; i++)
+ {
+ conv[i] = (jschar) data[i];
+ }
+
+ ret = JS_NewUCString(cx, conv, length);
+ if(!ret) JS_free(cx, conv);
+
+ return ret;
+}
diff --git a/1.1.x/src/couchdb/priv/couch_js/http.h b/1.1.x/src/couchdb/priv/couch_js/http.h
new file mode 100644
index 00000000..b5f8c70f
--- /dev/null
+++ b/1.1.x/src/couchdb/priv/couch_js/http.h
@@ -0,0 +1,18 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#ifndef COUCH_JS_HTTP_H
+#define COUCH_JS_HTTP_H
+
+JSObject* install_http(JSContext* cx, JSObject* global);
+
+#endif \ No newline at end of file
diff --git a/1.1.x/src/couchdb/priv/couch_js/main.c b/1.1.x/src/couchdb/priv/couch_js/main.c
new file mode 100644
index 00000000..376aa15b
--- /dev/null
+++ b/1.1.x/src/couchdb/priv/couch_js/main.c
@@ -0,0 +1,338 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <jsapi.h>
+#include "config.h"
+
+#include "utf8.h"
+#include "http.h"
+
+int gExitCode = 0;
+
+#ifdef JS_THREADSAFE
+#define SETUP_REQUEST(cx) \
+ JS_SetContextThread(cx); \
+ JS_BeginRequest(cx);
+#define FINISH_REQUEST(cx) \
+ JS_EndRequest(cx); \
+ JS_ClearContextThread(cx);
+#else
+#define SETUP_REQUEST(cx)
+#define FINISH_REQUEST(cx)
+#endif
+
+static JSBool
+evalcx(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ JSObject *sandbox;
+ JSContext *subcx;
+ const jschar *src;
+ size_t srclen;
+ JSBool ret = JS_FALSE;
+ jsval v;
+
+ sandbox = NULL;
+ if(!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sandbox))
+ {
+ return JS_FALSE;
+ }
+
+ subcx = JS_NewContext(JS_GetRuntime(cx), 8L * 1024L);
+ if(!subcx)
+ {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ SETUP_REQUEST(subcx);
+
+ src = JS_GetStringChars(str);
+ srclen = JS_GetStringLength(str);
+
+ if(!sandbox)
+ {
+ sandbox = JS_NewObject(subcx, NULL, NULL, NULL);
+ if(!sandbox || !JS_InitStandardClasses(subcx, sandbox)) goto done;
+ }
+
+ if(srclen == 0)
+ {
+ *rval = OBJECT_TO_JSVAL(sandbox);
+ }
+ else
+ {
+ JS_EvaluateUCScript(subcx, sandbox, src, srclen, NULL, 0, rval);
+ }
+
+ ret = JS_TRUE;
+
+done:
+ FINISH_REQUEST(subcx);
+ JS_DestroyContext(subcx);
+ return ret;
+}
+
+static JSBool
+gc(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JS_GC(cx);
+ return JS_TRUE;
+}
+
+static JSBool
+print(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uintN i;
+ char *bytes;
+
+ for(i = 0; i < argc; i++)
+ {
+ bytes = enc_string(cx, argv[i], NULL);
+ if(!bytes) return JS_FALSE;
+
+ fprintf(stdout, "%s%s", i ? " " : "", bytes);
+ JS_free(cx, bytes);
+ }
+
+ fputc('\n', stdout);
+ fflush(stdout);
+ return JS_TRUE;
+}
+
+static JSBool
+quit(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JS_ConvertArguments(cx, argc, argv, "/ i", &gExitCode);
+ return JS_FALSE;
+}
+
+static char*
+readfp(JSContext* cx, FILE* fp, size_t* buflen)
+{
+ char* bytes = NULL;
+ char* tmp = NULL;
+ size_t used = 0;
+ size_t byteslen = 256;
+ size_t readlen = 0;
+
+ bytes = JS_malloc(cx, byteslen);
+ if(bytes == NULL) return NULL;
+
+ while((readlen = js_fgets(bytes+used, byteslen-used, stdin)) > 0)
+ {
+ used += readlen;
+
+ if(bytes[used-1] == '\n')
+ {
+ bytes[used-1] = '\0';
+ break;
+ }
+
+ // Double our buffer and read more.
+ byteslen *= 2;
+ tmp = JS_realloc(cx, bytes, byteslen);
+ if(!tmp)
+ {
+ JS_free(cx, bytes);
+ return NULL;
+ }
+ bytes = tmp;
+ }
+
+ *buflen = used;
+ return bytes;
+}
+
+static JSBool
+readline(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ jschar *chars;
+ JSString *str;
+ char* bytes;
+ char* tmp;
+ size_t byteslen;
+
+ /* GC Occasionally */
+ JS_MaybeGC(cx);
+
+ bytes = readfp(cx, stdin, &byteslen);
+ if(!bytes) return JS_FALSE;
+
+ /* Treat the empty string specially */
+ if(byteslen == 0)
+ {
+ *rval = JS_GetEmptyStringValue(cx);
+ JS_free(cx, bytes);
+ return JS_TRUE;
+ }
+
+ /* Shrink the buffer to the real size */
+ tmp = JS_realloc(cx, bytes, byteslen);
+ if(!tmp)
+ {
+ JS_free(cx, bytes);
+ return JS_FALSE;
+ }
+ bytes = tmp;
+
+ str = dec_string(cx, bytes, byteslen);
+ JS_free(cx, bytes);
+
+ if(!str) return JS_FALSE;
+
+ *rval = STRING_TO_JSVAL(str);
+
+ return JS_TRUE;
+}
+
+static JSBool
+seal(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ JSObject *target;
+ JSBool deep = JS_FALSE;
+
+ if (!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep))
+ return JS_FALSE;
+ if (!target)
+ return JS_TRUE;
+ return JS_SealObject(cx, target, deep);
+}
+
+static void
+execute_script(JSContext *cx, JSObject *obj, const char *filename) {
+ FILE *file;
+ JSScript *script;
+ jsval result;
+
+ if(!filename || strcmp(filename, "-") == 0)
+ {
+ file = stdin;
+ }
+ else
+ {
+ file = fopen(filename, "r");
+ if (!file)
+ {
+ fprintf(stderr, "could not open script file %s\n", filename);
+ gExitCode = 1;
+ return;
+ }
+ }
+
+ script = JS_CompileFileHandle(cx, obj, filename, file);
+ if(script)
+ {
+ JS_ExecuteScript(cx, obj, script, &result);
+ JS_DestroyScript(cx, script);
+ }
+}
+
+static void
+printerror(JSContext *cx, const char *mesg, JSErrorReport *report)
+{
+ if(!report || !JSREPORT_IS_WARNING(report->flags))
+ {
+ fprintf(stderr, "%s\n", mesg);
+ }
+}
+
+static JSFunctionSpec global_functions[] = {
+ {"evalcx", evalcx, 0, 0, 0},
+ {"gc", gc, 0, 0, 0},
+ {"print", print, 0, 0, 0},
+ {"quit", quit, 0, 0, 0},
+ {"readline", readline, 0, 0, 0},
+ {"seal", seal, 0, 0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static JSClass global_class = {
+ "GlobalClass",
+ JSCLASS_GLOBAL_FLAGS,
+ JS_PropertyStub,
+ JS_PropertyStub,
+ JS_PropertyStub,
+ JS_PropertyStub,
+ JS_EnumerateStub,
+ JS_ResolveStub,
+ JS_ConvertStub,
+ JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+int
+main(int argc, const char * argv[])
+{
+ JSRuntime* rt = NULL;
+ JSContext* cx = NULL;
+ JSObject* global = NULL;
+ JSFunctionSpec* sp = NULL;
+ int i = 0;
+
+ rt = JS_NewRuntime(64L * 1024L * 1024L);
+ if (!rt) return 1;
+
+ cx = JS_NewContext(rt, 8L * 1024L);
+ if (!cx) return 1;
+
+ JS_SetErrorReporter(cx, printerror);
+ JS_ToggleOptions(cx, JSOPTION_XML);
+
+ SETUP_REQUEST(cx);
+
+ global = JS_NewObject(cx, &global_class, NULL, NULL);
+ if (!global) return 1;
+ if (!JS_InitStandardClasses(cx, global)) return 1;
+
+ for(sp = global_functions; sp->name != NULL; sp++)
+ {
+ if(!JS_DefineFunction(cx, global,
+ sp->name, sp->call, sp->nargs, sp->flags))
+ {
+ fprintf(stderr, "Failed to create function: %s\n", sp->name);
+ return 1;
+ }
+ }
+
+ if(!install_http(cx, global))
+ {
+ return 1;
+ }
+
+ JS_SetGlobalObject(cx, global);
+
+ if(argc > 2)
+ {
+ fprintf(stderr, "incorrect number of arguments\n\n");
+ fprintf(stderr, "usage: %s <scriptfile>\n", argv[0]);
+ return 2;
+ }
+
+ if(argc == 0)
+ {
+ execute_script(cx, global, NULL);
+ }
+ else
+ {
+ execute_script(cx, global, argv[1]);
+ }
+
+ FINISH_REQUEST(cx);
+
+ JS_DestroyContext(cx);
+ JS_DestroyRuntime(rt);
+ JS_ShutDown();
+
+ return gExitCode;
+}
diff --git a/1.1.x/src/couchdb/priv/couch_js/utf8.c b/1.1.x/src/couchdb/priv/couch_js/utf8.c
new file mode 100644
index 00000000..699a6fee
--- /dev/null
+++ b/1.1.x/src/couchdb/priv/couch_js/utf8.c
@@ -0,0 +1,286 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <jsapi.h>
+
+static int
+enc_char(uint8 *utf8Buffer, uint32 ucs4Char)
+{
+ int utf8Length = 1;
+
+ if (ucs4Char < 0x80)
+ {
+ *utf8Buffer = (uint8)ucs4Char;
+ }
+ else
+ {
+ int i;
+ uint32 a = ucs4Char >> 11;
+ utf8Length = 2;
+ while(a)
+ {
+ a >>= 5;
+ utf8Length++;
+ }
+ i = utf8Length;
+ while(--i)
+ {
+ utf8Buffer[i] = (uint8)((ucs4Char & 0x3F) | 0x80);
+ ucs4Char >>= 6;
+ }
+ *utf8Buffer = (uint8)(0x100 - (1 << (8-utf8Length)) + ucs4Char);
+ }
+
+ return utf8Length;
+}
+
+static JSBool
+enc_charbuf(const jschar* src, size_t srclen, char* dst, size_t* dstlenp)
+{
+ size_t i;
+ size_t utf8Len;
+ size_t dstlen = *dstlenp;
+ size_t origDstlen = dstlen;
+ jschar c;
+ jschar c2;
+ uint32 v;
+ uint8 utf8buf[6];
+
+ if(!dst)
+ {
+ dstlen = origDstlen = (size_t) -1;
+ }
+
+ while(srclen)
+ {
+ c = *src++;
+ srclen--;
+
+ if((c >= 0xDC00) && (c <= 0xDFFF)) goto bad_surrogate;
+
+ if(c < 0xD800 || c > 0xDBFF)
+ {
+ v = c;
+ }
+ else
+ {
+ if(srclen < 1) goto buffer_too_small;
+ c2 = *src++;
+ srclen--;
+ if ((c2 < 0xDC00) || (c2 > 0xDFFF))
+ {
+ c = c2;
+ goto bad_surrogate;
+ }
+ v = ((c - 0xD800) << 10) + (c2 - 0xDC00) + 0x10000;
+ }
+ if(v < 0x0080)
+ {
+ /* no encoding necessary - performance hack */
+ if(!dstlen) goto buffer_too_small;
+ if(dst) *dst++ = (char) v;
+ utf8Len = 1;
+ }
+ else
+ {
+ utf8Len = enc_char(utf8buf, v);
+ if(utf8Len > dstlen) goto buffer_too_small;
+ if(dst)
+ {
+ for (i = 0; i < utf8Len; i++)
+ {
+ *dst++ = (char) utf8buf[i];
+ }
+ }
+ }
+ dstlen -= utf8Len;
+ }
+
+ *dstlenp = (origDstlen - dstlen);
+ return JS_TRUE;
+
+bad_surrogate:
+ *dstlenp = (origDstlen - dstlen);
+ return JS_FALSE;
+
+buffer_too_small:
+ *dstlenp = (origDstlen - dstlen);
+ return JS_FALSE;
+}
+
+char*
+enc_string(JSContext* cx, jsval arg, size_t* buflen)
+{
+ JSString* str = NULL;
+ jschar* src = NULL;
+ char* bytes = NULL;
+ size_t srclen = 0;
+ size_t byteslen = 0;
+
+ str = JS_ValueToString(cx, arg);
+ if(!str) goto error;
+
+ src = JS_GetStringChars(str);
+ srclen = JS_GetStringLength(str);
+
+ if(!enc_charbuf(src, srclen, NULL, &byteslen)) goto error;
+
+ bytes = JS_malloc(cx, (byteslen) + 1);
+ bytes[byteslen] = 0;
+
+ if(!enc_charbuf(src, srclen, bytes, &byteslen)) goto error;
+
+ if(buflen) *buflen = byteslen;
+ goto success;
+
+error:
+ if(bytes != NULL) JS_free(cx, bytes);
+ bytes = NULL;
+
+success:
+ return bytes;
+}
+
+static uint32
+dec_char(const uint8 *utf8Buffer, int utf8Length)
+{
+ uint32 ucs4Char;
+ uint32 minucs4Char;
+
+ /* from Unicode 3.1, non-shortest form is illegal */
+ static const uint32 minucs4Table[] = {
+ 0x00000080, 0x00000800, 0x0001000, 0x0020000, 0x0400000
+ };
+
+ if (utf8Length == 1)
+ {
+ ucs4Char = *utf8Buffer;
+ }
+ else
+ {
+ ucs4Char = *utf8Buffer++ & ((1<<(7-utf8Length))-1);
+ minucs4Char = minucs4Table[utf8Length-2];
+ while(--utf8Length)
+ {
+ ucs4Char = ucs4Char<<6 | (*utf8Buffer++ & 0x3F);
+ }
+ if(ucs4Char < minucs4Char || ucs4Char == 0xFFFE || ucs4Char == 0xFFFF)
+ {
+ ucs4Char = 0xFFFD;
+ }
+ }
+
+ return ucs4Char;
+}
+
+static JSBool
+dec_charbuf(const char *src, size_t srclen, jschar *dst, size_t *dstlenp)
+{
+ uint32 v;
+ size_t offset = 0;
+ size_t j;
+ size_t n;
+ size_t dstlen = *dstlenp;
+ size_t origDstlen = dstlen;
+
+ if(!dst) dstlen = origDstlen = (size_t) -1;
+
+ while(srclen)
+ {
+ v = (uint8) *src;
+ n = 1;
+
+ if(v & 0x80)
+ {
+ while(v & (0x80 >> n))
+ {
+ n++;
+ }
+
+ if(n > srclen) goto buffer_too_small;
+ if(n == 1 || n > 6) goto bad_character;
+
+ for(j = 1; j < n; j++)
+ {
+ if((src[j] & 0xC0) != 0x80) goto bad_character;
+ }
+
+ v = dec_char((const uint8 *) src, n);
+ if(v >= 0x10000)
+ {
+ v -= 0x10000;
+
+ if(v > 0xFFFFF || dstlen < 2)
+ {
+ *dstlenp = (origDstlen - dstlen);
+ return JS_FALSE;
+ }
+
+ if(dstlen < 2) goto buffer_too_small;
+
+ if(dst)
+ {
+ *dst++ = (jschar)((v >> 10) + 0xD800);
+ v = (jschar)((v & 0x3FF) + 0xDC00);
+ }
+ dstlen--;
+ }
+ }
+
+ if(!dstlen) goto buffer_too_small;
+ if(dst) *dst++ = (jschar) v;
+
+ dstlen--;
+ offset += n;
+ src += n;
+ srclen -= n;
+ }
+
+ *dstlenp = (origDstlen - dstlen);
+ return JS_TRUE;
+
+bad_character:
+ *dstlenp = (origDstlen - dstlen);
+ return JS_FALSE;
+
+buffer_too_small:
+ *dstlenp = (origDstlen - dstlen);
+ return JS_FALSE;
+}
+
+JSString*
+dec_string(JSContext* cx, const char* bytes, size_t byteslen)
+{
+ JSString* str = NULL;
+ jschar* chars = NULL;
+ size_t charslen;
+
+ if(!dec_charbuf(bytes, byteslen, NULL, &charslen)) goto error;
+
+ chars = JS_malloc(cx, (charslen + 1) * sizeof(jschar));
+ if(!chars) return NULL;
+ chars[charslen] = 0;
+
+ if(!dec_charbuf(bytes, byteslen, chars, &charslen)) goto error;
+
+ str = JS_NewUCString(cx, chars, charslen - 1);
+ if(!str) goto error;
+
+ goto success;
+
+error:
+ if(chars != NULL) JS_free(cx, chars);
+ str = NULL;
+
+success:
+ return str;
+} \ No newline at end of file
diff --git a/1.1.x/src/couchdb/priv/couch_js/utf8.h b/1.1.x/src/couchdb/priv/couch_js/utf8.h
new file mode 100644
index 00000000..00f6b736
--- /dev/null
+++ b/1.1.x/src/couchdb/priv/couch_js/utf8.h
@@ -0,0 +1,19 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#ifndef COUCH_JS_UTF_8_H
+#define COUCH_JS_UTF_8_H
+
+char* enc_string(JSContext* cx, jsval arg, size_t* buflen);
+JSString* dec_string(JSContext* cx, const char* buf, size_t buflen);
+
+#endif \ No newline at end of file
diff --git a/1.1.x/src/couchdb/priv/icu_driver/couch_icu_driver.c b/1.1.x/src/couchdb/priv/icu_driver/couch_icu_driver.c
new file mode 100644
index 00000000..1afe8eac
--- /dev/null
+++ b/1.1.x/src/couchdb/priv/icu_driver/couch_icu_driver.c
@@ -0,0 +1,177 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+// This file is the C port driver for Erlang. It provides a low overhead
+// means of calling into C code, however coding errors in this module can
+// crash the entire Erlang server.
+
+#ifdef DARWIN
+#define U_HIDE_DRAFT_API 1
+#define U_DISABLE_RENAMING 1
+#endif
+
+#include "erl_driver.h"
+#include "unicode/ucol.h"
+#include "unicode/ucasemap.h"
+#ifndef WIN32
+#include <string.h> // for memcpy
+#endif
+
+typedef struct {
+ ErlDrvPort port;
+ UCollator* collNoCase;
+ UCollator* coll;
+} couch_drv_data;
+
+static void couch_drv_stop(ErlDrvData data)
+{
+ couch_drv_data* pData = (couch_drv_data*)data;
+ if (pData->coll) {
+ ucol_close(pData->coll);
+ }
+ if (pData->collNoCase) {
+ ucol_close(pData->collNoCase);
+ }
+ driver_free((char*)pData);
+}
+
+static ErlDrvData couch_drv_start(ErlDrvPort port, char *buff)
+{
+ UErrorCode status = U_ZERO_ERROR;
+ couch_drv_data* pData = (couch_drv_data*)driver_alloc(sizeof(couch_drv_data));
+
+ if (pData == NULL)
+ return ERL_DRV_ERROR_GENERAL;
+
+ pData->port = port;
+
+ pData->coll = ucol_open("", &status);
+ if (U_FAILURE(status)) {
+ couch_drv_stop((ErlDrvData)pData);
+ return ERL_DRV_ERROR_GENERAL;
+ }
+
+ pData->collNoCase = ucol_open("", &status);
+ if (U_FAILURE(status)) {
+ couch_drv_stop((ErlDrvData)pData);
+ return ERL_DRV_ERROR_GENERAL;
+ }
+
+ ucol_setAttribute(pData->collNoCase, UCOL_STRENGTH, UCOL_PRIMARY, &status);
+ if (U_FAILURE(status)) {
+ couch_drv_stop((ErlDrvData)pData);
+ return ERL_DRV_ERROR_GENERAL;
+ }
+
+ return (ErlDrvData)pData;
+}
+
+static int return_control_result(void* pLocalResult, int localLen, char **ppRetBuf, int returnLen)
+{
+ if (*ppRetBuf == NULL || localLen > returnLen) {
+ *ppRetBuf = (char*)driver_alloc_binary(localLen);
+ if(*ppRetBuf == NULL) {
+ return -1;
+ }
+ }
+ memcpy(*ppRetBuf, pLocalResult, localLen);
+ return localLen;
+}
+
+static int couch_drv_control(ErlDrvData drv_data, unsigned int command, char *pBuf,
+ int bufLen, char **rbuf, int rlen)
+{
+
+ couch_drv_data* pData = (couch_drv_data*)drv_data;
+ switch(command) {
+ case 0: // COLLATE
+ case 1: // COLLATE_NO_CASE:
+ {
+ UErrorCode status = U_ZERO_ERROR;
+ int collResult;
+ char response;
+ UCharIterator iterA;
+ UCharIterator iterB;
+ int32_t length;
+
+ // 2 strings are in the buffer, consecutively
+ // The strings begin first with a 32 bit integer byte length, then the actual
+ // string bytes follow.
+
+ // first 32bits are the length
+ memcpy(&length, pBuf, sizeof(length));
+ pBuf += sizeof(length);
+
+ // point the iterator at it.
+ uiter_setUTF8(&iterA, pBuf, length);
+
+ pBuf += length; // now on to string b
+
+ // first 32bits are the length
+ memcpy(&length, pBuf, sizeof(length));
+ pBuf += sizeof(length);
+
+ // point the iterator at it.
+ uiter_setUTF8(&iterB, pBuf, length);
+
+ if (command == 0) // COLLATE
+ collResult = ucol_strcollIter(pData->coll, &iterA, &iterB, &status);
+ else // COLLATE_NO_CASE
+ collResult = ucol_strcollIter(pData->collNoCase, &iterA, &iterB, &status);
+
+ if (collResult < 0)
+ response = 0; //lt
+ else if (collResult > 0)
+ response = 2; //gt
+ else
+ response = 1; //eq
+
+ return return_control_result(&response, sizeof(response), rbuf, rlen);
+ }
+
+ default:
+ return -1;
+ }
+}
+
+ErlDrvEntry couch_driver_entry = {
+ NULL, /* F_PTR init, N/A */
+ couch_drv_start, /* L_PTR start, called when port is opened */
+ couch_drv_stop, /* F_PTR stop, called when port is closed */
+ NULL, /* F_PTR output, called when erlang has sent */
+ NULL, /* F_PTR ready_input, called when input descriptor ready */
+ NULL, /* F_PTR ready_output, called when output descriptor ready */
+ "couch_icu_driver", /* char *driver_name, the argument to open_port */
+ NULL, /* F_PTR finish, called when unloaded */
+ NULL, /* Not used */
+ couch_drv_control, /* F_PTR control, port_command callback */
+ NULL, /* F_PTR timeout, reserved */
+ NULL, /* F_PTR outputv, reserved */
+ NULL, /* F_PTR ready_async */
+ NULL, /* F_PTR flush */
+ NULL, /* F_PTR call */
+ NULL, /* F_PTR event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ ERL_DRV_FLAG_USE_PORT_LOCKING,
+ NULL, /* Reserved -- Used by emulator internally */
+ NULL, /* F_PTR process_exit */
+};
+
+DRIVER_INIT(couch_icu_driver) /* must match name in driver_entry */
+{
+ return &couch_driver_entry;
+}
diff --git a/1.1.x/src/couchdb/priv/spawnkillable/couchspawnkillable.sh b/1.1.x/src/couchdb/priv/spawnkillable/couchspawnkillable.sh
new file mode 100644
index 00000000..f8d042e3
--- /dev/null
+++ b/1.1.x/src/couchdb/priv/spawnkillable/couchspawnkillable.sh
@@ -0,0 +1,20 @@
+#! /bin/sh -e
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# The purpose of this script is to echo an OS specific command before launching
+# the actual process. This provides a way for Erlang to hard-kill its external
+# processes.
+
+echo "kill -9 $$"
+exec $*
diff --git a/1.1.x/src/couchdb/priv/spawnkillable/couchspawnkillable_win.c b/1.1.x/src/couchdb/priv/spawnkillable/couchspawnkillable_win.c
new file mode 100644
index 00000000..06782315
--- /dev/null
+++ b/1.1.x/src/couchdb/priv/spawnkillable/couchspawnkillable_win.c
@@ -0,0 +1,145 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do what 2 lines of shell script in couchspawnkillable does...
+// * Create a new suspended process with the same (duplicated) standard
+// handles as us.
+// * Write a line to stdout, consisting of the path to ourselves, plus
+// '--kill {pid}' where {pid} is the PID of the newly created process.
+// * Un-suspend the new process.
+// * Wait for the process to terminate.
+// * Terminate with the child's exit-code.
+
+// Later, couch will call us with --kill and the PID, so we dutifully
+// terminate the specified PID.
+
+#include <stdlib.h>
+#include "windows.h"
+
+char *get_child_cmdline(int argc, char **argv)
+{
+ // make a new command-line, but skipping me.
+ // XXX - todo - spaces etc in args???
+ int i;
+ char *p, *cmdline;
+ int nchars = 0;
+ int nthis = 1;
+ for (i=1;i<argc;i++)
+ nchars += strlen(argv[i])+1;
+ cmdline = p = malloc(nchars+1);
+ if (!cmdline)
+ return NULL;
+ for (i=1;i<argc;i++) {
+ nthis = strlen(argv[i]);
+ strncpy(p, argv[i], nthis);
+ p[nthis] = ' ';
+ p += nthis+1;
+ }
+ // Replace the last space we added above with a '\0'
+ cmdline[nchars-1] = '\0';
+ return cmdline;
+}
+
+// create the child process, returning 0, or the exit-code we will
+// terminate with.
+int create_child(int argc, char **argv, PROCESS_INFORMATION *pi)
+{
+ char buf[1024];
+ DWORD dwcreate;
+ STARTUPINFO si;
+ char *cmdline;
+ if (argc < 2)
+ return 1;
+ cmdline = get_child_cmdline(argc, argv);
+ if (!cmdline)
+ return 2;
+
+ memset(&si, 0, sizeof(si));
+ si.cb = sizeof(si);
+ // depending on how *our* parent is started, we may or may not have
+ // a valid stderr stream - so although we try and duplicate it, only
+ // failing to duplicate stdin and stdout are considered fatal.
+ if (!DuplicateHandle(GetCurrentProcess(),
+ GetStdHandle(STD_INPUT_HANDLE),
+ GetCurrentProcess(),
+ &si.hStdInput,
+ 0,
+ TRUE, // inheritable
+ DUPLICATE_SAME_ACCESS) ||
+ !DuplicateHandle(GetCurrentProcess(),
+ GetStdHandle(STD_OUTPUT_HANDLE),
+ GetCurrentProcess(),
+ &si.hStdOutput,
+ 0,
+ TRUE, // inheritable
+ DUPLICATE_SAME_ACCESS)) {
+ return 3;
+ }
+ DuplicateHandle(GetCurrentProcess(),
+ GetStdHandle(STD_ERROR_HANDLE),
+ GetCurrentProcess(),
+ &si.hStdError,
+ 0,
+ TRUE, // inheritable
+ DUPLICATE_SAME_ACCESS);
+
+ si.dwFlags = STARTF_USESTDHANDLES;
+ dwcreate = CREATE_SUSPENDED;
+ if (!CreateProcess( NULL, cmdline,
+ NULL,
+ NULL,
+ TRUE, // inherit handles
+ dwcreate,
+ NULL, // environ
+ NULL, // cwd
+ &si,
+ pi))
+ return 4;
+ return 0;
+}
+
+// and here we go...
+int main(int argc, char **argv)
+{
+ char out_buf[1024];
+ int rc;
+ DWORD cbwritten;
+ DWORD exitcode;
+ PROCESS_INFORMATION pi;
+ if (argc==3 && strcmp(argv[1], "--kill")==0) {
+ HANDLE h = OpenProcess(PROCESS_TERMINATE, 0, atoi(argv[2]));
+ if (!h)
+ return 1;
+ if (!TerminateProcess(h, 0))
+ return 2;
+ CloseHandle(h);
+ return 0;
+ }
+ // spawn the new suspended process
+ rc = create_child(argc, argv, &pi);
+ if (rc)
+ return rc;
+ // Write the 'terminate' command, which includes this PID, back to couch.
+ // *sob* - what about spaces etc?
+ sprintf_s(out_buf, sizeof(out_buf), "%s --kill %d\n",
+ argv[0], pi.dwProcessId);
+ WriteFile(GetStdHandle(STD_OUTPUT_HANDLE), out_buf, strlen(out_buf),
+ &cbwritten, NULL);
+ // Let the child process go...
+ ResumeThread(pi.hThread);
+ // Wait for the process to terminate so we can reflect the exit code
+ // back to couch.
+ WaitForSingleObject(pi.hProcess, INFINITE);
+ if (!GetExitCodeProcess(pi.hProcess, &exitcode))
+ return 6;
+ return exitcode;
+}
diff --git a/1.1.x/src/couchdb/priv/stat_descriptions.cfg.in b/1.1.x/src/couchdb/priv/stat_descriptions.cfg.in
new file mode 100644
index 00000000..b80d7684
--- /dev/null
+++ b/1.1.x/src/couchdb/priv/stat_descriptions.cfg.in
@@ -0,0 +1,50 @@
+%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+%% use this file except in compliance with the License. You may obtain a copy of
+%% the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+%% License for the specific language governing permissions and limitations under
+%% the License.
+
+% Style guide for descriptions: Start with a lowercase letter & do not add
+% a trailing full-stop / period
+% Please keep this in alphabetical order
+
+{couchdb, database_writes, "number of times a database was changed"}.
+{couchdb, database_reads, "number of times a document was read from a database"}.
+{couchdb, open_databases, "number of open databases"}.
+{couchdb, open_os_files, "number of file descriptors CouchDB has open"}.
+{couchdb, request_time, "length of a request inside CouchDB without MochiWeb"}.
+{couchdb, auth_cache_hits, "number of authentication cache hits"}.
+{couchdb, auth_cache_misses, "number of authentication cache misses"}.
+
+{httpd, bulk_requests, "number of bulk requests"}.
+{httpd, requests, "number of HTTP requests"}.
+{httpd, temporary_view_reads, "number of temporary view reads"}.
+{httpd, view_reads, "number of view reads"}.
+{httpd, clients_requesting_changes, "number of clients for continuous _changes"}.
+
+{httpd_request_methods, 'COPY', "number of HTTP COPY requests"}.
+{httpd_request_methods, 'DELETE', "number of HTTP DELETE requests"}.
+{httpd_request_methods, 'GET', "number of HTTP GET requests"}.
+{httpd_request_methods, 'HEAD', "number of HTTP HEAD requests"}.
+{httpd_request_methods, 'POST', "number of HTTP POST requests"}.
+{httpd_request_methods, 'PUT', "number of HTTP PUT requests"}.
+
+{httpd_status_codes, '200', "number of HTTP 200 OK responses"}.
+{httpd_status_codes, '201', "number of HTTP 201 Created responses"}.
+{httpd_status_codes, '202', "number of HTTP 202 Accepted responses"}.
+{httpd_status_codes, '301', "number of HTTP 301 Moved Permanently responses"}.
+{httpd_status_codes, '304', "number of HTTP 304 Not Modified responses"}.
+{httpd_status_codes, '400', "number of HTTP 400 Bad Request responses"}.
+{httpd_status_codes, '401', "number of HTTP 401 Unauthorized responses"}.
+{httpd_status_codes, '403', "number of HTTP 403 Forbidden responses"}.
+{httpd_status_codes, '404', "number of HTTP 404 Not Found responses"}.
+{httpd_status_codes, '405', "number of HTTP 405 Method Not Allowed responses"}.
+{httpd_status_codes, '409', "number of HTTP 409 Conflict responses"}.
+{httpd_status_codes, '412', "number of HTTP 412 Precondition Failed responses"}.
+{httpd_status_codes, '500', "number of HTTP 500 Internal Server Error responses"}.
diff --git a/1.1.x/src/erlang-oauth/Makefile.am b/1.1.x/src/erlang-oauth/Makefile.am
new file mode 100644
index 00000000..48b76482
--- /dev/null
+++ b/1.1.x/src/erlang-oauth/Makefile.am
@@ -0,0 +1,50 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+oauthebindir = $(localerlanglibdir)/erlang-oauth/ebin
+
+oauth_file_collection = \
+ oauth.app.in \
+ oauth.erl \
+ oauth_hmac_sha1.erl \
+ oauth_http.erl \
+ oauth_plaintext.erl \
+ oauth_rsa_sha1.erl \
+ oauth_unix.erl \
+ oauth_uri.erl
+
+# Removed oauth_rsa_sha1.beam until we require R12B5 or
+# we add a ./configure option to enable it.
+
+oauthebin_make_generated_file_list = \
+ oauth.app \
+ oauth.beam \
+ oauth_hmac_sha1.beam \
+ oauth_http.beam \
+ oauth_plaintext.beam \
+ oauth_unix.beam \
+ oauth_uri.beam
+
+oauthebin_DATA = \
+ $(oauthebin_make_generated_file_list)
+
+EXTRA_DIST = \
+ $(oauth_file_collection)
+
+CLEANFILES = \
+ $(oauthebin_make_generated_file_list)
+
+%.app: %.app.in
+ cp $< $@
+
+%.beam: %.erl
+ $(ERLC) $(ERLC_FLAGS) $<
diff --git a/1.1.x/src/erlang-oauth/oauth.app.in b/1.1.x/src/erlang-oauth/oauth.app.in
new file mode 100644
index 00000000..6357b9b0
--- /dev/null
+++ b/1.1.x/src/erlang-oauth/oauth.app.in
@@ -0,0 +1,20 @@
+{application, oauth, [
+ {description, "Erlang OAuth implementation"},
+ {vsn, "dev"},
+ {modules, [
+ oauth,
+ oauth_hmac_sha1,
+ oauth_http,
+ oauth_plaintext,
+ oauth_rsa_sha1,
+ oauth_unix,
+ oauth_uri
+ ]},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib,
+ crypto,
+ inets
+ ]}
+]}.
diff --git a/1.1.x/src/erlang-oauth/oauth.erl b/1.1.x/src/erlang-oauth/oauth.erl
new file mode 100644
index 00000000..866655c9
--- /dev/null
+++ b/1.1.x/src/erlang-oauth/oauth.erl
@@ -0,0 +1,107 @@
+-module(oauth).
+
+-export(
+ [ get/5
+ , header/1
+ , post/5
+ , signature/5
+ , signature_base_string/3
+ , signed_params/6
+ , token/1
+ , token_secret/1
+ , uri/2
+ , verify/6
+ ]).
+
+
+get(URL, ExtraParams, Consumer, Token, TokenSecret) ->
+ SignedParams = signed_params("GET", URL, ExtraParams, Consumer, Token, TokenSecret),
+ oauth_http:get(uri(URL, SignedParams)).
+
+post(URL, ExtraParams, Consumer, Token, TokenSecret) ->
+ SignedParams = signed_params("POST", URL, ExtraParams, Consumer, Token, TokenSecret),
+ oauth_http:post(URL, oauth_uri:params_to_string(SignedParams)).
+
+uri(Base, []) ->
+ Base;
+uri(Base, Params) ->
+ lists:concat([Base, "?", oauth_uri:params_to_string(Params)]).
+
+header(Params) ->
+ {"Authorization", "OAuth " ++ oauth_uri:params_to_header_string(Params)}.
+
+token(Params) ->
+ proplists:get_value("oauth_token", Params).
+
+token_secret(Params) ->
+ proplists:get_value("oauth_token_secret", Params).
+
+verify(Signature, HttpMethod, URL, Params, Consumer, TokenSecret) ->
+ case signature_method(Consumer) of
+ plaintext ->
+ oauth_plaintext:verify(Signature, consumer_secret(Consumer), TokenSecret);
+ hmac_sha1 ->
+ BaseString = signature_base_string(HttpMethod, URL, Params),
+ oauth_hmac_sha1:verify(Signature, BaseString, consumer_secret(Consumer), TokenSecret);
+ rsa_sha1 ->
+ BaseString = signature_base_string(HttpMethod, URL, Params),
+ oauth_rsa_sha1:verify(Signature, BaseString, consumer_secret(Consumer))
+ end.
+
+signed_params(HttpMethod, URL, ExtraParams, Consumer, Token, TokenSecret) ->
+ Params = token_param(Token, params(Consumer, ExtraParams)),
+ [{"oauth_signature", signature(HttpMethod, URL, Params, Consumer, TokenSecret)}|Params].
+
+signature(HttpMethod, URL, Params, Consumer, TokenSecret) ->
+ case signature_method(Consumer) of
+ plaintext ->
+ oauth_plaintext:signature(consumer_secret(Consumer), TokenSecret);
+ hmac_sha1 ->
+ BaseString = signature_base_string(HttpMethod, URL, Params),
+ oauth_hmac_sha1:signature(BaseString, consumer_secret(Consumer), TokenSecret);
+ rsa_sha1 ->
+ BaseString = signature_base_string(HttpMethod, URL, Params),
+ oauth_rsa_sha1:signature(BaseString, consumer_secret(Consumer))
+ end.
+
+signature_base_string(HttpMethod, URL, Params) ->
+ NormalizedURL = oauth_uri:normalize(URL),
+ NormalizedParams = oauth_uri:params_to_string(lists:sort(Params)),
+ oauth_uri:calate("&", [HttpMethod, NormalizedURL, NormalizedParams]).
+
+token_param("", Params) ->
+ Params;
+token_param(Token, Params) ->
+ [{"oauth_token", Token}|Params].
+
+params(Consumer, Params) ->
+ Nonce = base64:encode_to_string(crypto:rand_bytes(32)), % cf. ruby-oauth
+ params(Consumer, oauth_unix:timestamp(), Nonce, Params).
+
+params(Consumer, Timestamp, Nonce, Params) ->
+ [ {"oauth_version", "1.0"}
+ , {"oauth_nonce", Nonce}
+ , {"oauth_timestamp", integer_to_list(Timestamp)}
+ , {"oauth_signature_method", signature_method_string(Consumer)}
+ , {"oauth_consumer_key", consumer_key(Consumer)}
+ | Params
+ ].
+
+signature_method_string(Consumer) ->
+ case signature_method(Consumer) of
+ plaintext ->
+ "PLAINTEXT";
+ hmac_sha1 ->
+ "HMAC-SHA1";
+ rsa_sha1 ->
+ "RSA-SHA1"
+ end.
+
+signature_method(_Consumer={_, _, Method}) ->
+ Method.
+
+consumer_secret(_Consumer={_, Secret, _}) ->
+ Secret.
+
+consumer_key(_Consumer={Key, _, _}) ->
+ Key.
diff --git a/1.1.x/src/erlang-oauth/oauth_hmac_sha1.erl b/1.1.x/src/erlang-oauth/oauth_hmac_sha1.erl
new file mode 100644
index 00000000..79d59f37
--- /dev/null
+++ b/1.1.x/src/erlang-oauth/oauth_hmac_sha1.erl
@@ -0,0 +1,11 @@
+-module(oauth_hmac_sha1).
+
+-export([signature/3, verify/4]).
+
+
+signature(BaseString, CS, TS) ->
+ Key = oauth_uri:calate("&", [CS, TS]),
+ base64:encode_to_string(crypto:sha_mac(Key, BaseString)).
+
+verify(Signature, BaseString, CS, TS) ->
+ couch_util:verify(signature(BaseString, CS, TS), Signature).
diff --git a/1.1.x/src/erlang-oauth/oauth_http.erl b/1.1.x/src/erlang-oauth/oauth_http.erl
new file mode 100644
index 00000000..bf5a4bac
--- /dev/null
+++ b/1.1.x/src/erlang-oauth/oauth_http.erl
@@ -0,0 +1,22 @@
+-module(oauth_http).
+
+-export([get/1, post/2, response_params/1, response_body/1, response_code/1]).
+
+
+get(URL) ->
+ request(get, {URL, []}).
+
+post(URL, Data) ->
+ request(post, {URL, [], "application/x-www-form-urlencoded", Data}).
+
+request(Method, Request) ->
+ http:request(Method, Request, [{autoredirect, false}], []).
+
+response_params(Response) ->
+ oauth_uri:params_from_string(response_body(Response)).
+
+response_body({{_, _, _}, _, Body}) ->
+ Body.
+
+response_code({{_, Code, _}, _, _}) ->
+ Code.
diff --git a/1.1.x/src/erlang-oauth/oauth_plaintext.erl b/1.1.x/src/erlang-oauth/oauth_plaintext.erl
new file mode 100644
index 00000000..41a1e9b2
--- /dev/null
+++ b/1.1.x/src/erlang-oauth/oauth_plaintext.erl
@@ -0,0 +1,10 @@
+-module(oauth_plaintext).
+
+-export([signature/2, verify/3]).
+
+
+signature(CS, TS) ->
+ oauth_uri:calate("&", [CS, TS]).
+
+verify(Signature, CS, TS) ->
+ couch_util:verify(signature(CS, TS), Signature).
diff --git a/1.1.x/src/erlang-oauth/oauth_rsa_sha1.erl b/1.1.x/src/erlang-oauth/oauth_rsa_sha1.erl
new file mode 100644
index 00000000..6f4828e0
--- /dev/null
+++ b/1.1.x/src/erlang-oauth/oauth_rsa_sha1.erl
@@ -0,0 +1,30 @@
+-module(oauth_rsa_sha1).
+
+-export([signature/2, verify/3]).
+
+-include_lib("public_key/include/public_key.hrl").
+
+
+signature(BaseString, PrivateKeyPath) ->
+ {ok, [Info]} = public_key:pem_to_der(PrivateKeyPath),
+ {ok, PrivateKey} = public_key:decode_private_key(Info),
+ base64:encode_to_string(public_key:sign(list_to_binary(BaseString), PrivateKey)).
+
+verify(Signature, BaseString, PublicKey) ->
+ public_key:verify_signature(to_binary(BaseString), sha, base64:decode(Signature), public_key(PublicKey)).
+
+to_binary(Term) when is_list(Term) ->
+ list_to_binary(Term);
+to_binary(Term) when is_binary(Term) ->
+ Term.
+
+public_key(Path) when is_list(Path) ->
+ {ok, [{cert, DerCert, not_encrypted}]} = public_key:pem_to_der(Path),
+ {ok, Cert} = public_key:pkix_decode_cert(DerCert, otp),
+ public_key(Cert);
+public_key(#'OTPCertificate'{tbsCertificate=Cert}) ->
+ public_key(Cert);
+public_key(#'OTPTBSCertificate'{subjectPublicKeyInfo=Info}) ->
+ public_key(Info);
+public_key(#'OTPSubjectPublicKeyInfo'{subjectPublicKey=Key}) ->
+ Key.
diff --git a/1.1.x/src/erlang-oauth/oauth_unix.erl b/1.1.x/src/erlang-oauth/oauth_unix.erl
new file mode 100644
index 00000000..73ca3143
--- /dev/null
+++ b/1.1.x/src/erlang-oauth/oauth_unix.erl
@@ -0,0 +1,16 @@
+-module(oauth_unix).
+
+-export([timestamp/0]).
+
+
+timestamp() ->
+ timestamp(calendar:universal_time()).
+
+timestamp(DateTime) ->
+ seconds(DateTime) - epoch().
+
+epoch() ->
+ seconds({{1970,1,1},{00,00,00}}).
+
+seconds(DateTime) ->
+ calendar:datetime_to_gregorian_seconds(DateTime).
diff --git a/1.1.x/src/erlang-oauth/oauth_uri.erl b/1.1.x/src/erlang-oauth/oauth_uri.erl
new file mode 100644
index 00000000..3bdc9076
--- /dev/null
+++ b/1.1.x/src/erlang-oauth/oauth_uri.erl
@@ -0,0 +1,88 @@
+-module(oauth_uri).
+
+-export([normalize/1, calate/2, encode/1]).
+-export([params_from_string/1, params_to_string/1,
+ params_from_header_string/1, params_to_header_string/1]).
+
+-import(lists, [concat/1]).
+
+-define(is_uppercase_alpha(C), C >= $A, C =< $Z).
+-define(is_lowercase_alpha(C), C >= $a, C =< $z).
+-define(is_alpha(C), ?is_uppercase_alpha(C); ?is_lowercase_alpha(C)).
+-define(is_digit(C), C >= $0, C =< $9).
+-define(is_alphanumeric(C), ?is_alpha(C); ?is_digit(C)).
+-define(is_unreserved(C), ?is_alphanumeric(C); C =:= $-; C =:= $_; C =:= $.; C =:= $~).
+-define(is_hex(C), ?is_digit(C); C >= $A, C =< $F).
+
+
+normalize(URI) ->
+ case http_uri:parse(URI) of
+ {Scheme, UserInfo, Host, Port, Path, _Query} ->
+ normalize(Scheme, UserInfo, string:to_lower(Host), Port, [Path]);
+ Else ->
+ Else
+ end.
+
+normalize(http, UserInfo, Host, 80, Acc) ->
+ normalize(http, UserInfo, [Host|Acc]);
+normalize(https, UserInfo, Host, 443, Acc) ->
+ normalize(https, UserInfo, [Host|Acc]);
+normalize(Scheme, UserInfo, Host, Port, Acc) ->
+ normalize(Scheme, UserInfo, [Host, ":", Port|Acc]).
+
+normalize(Scheme, [], Acc) ->
+ concat([Scheme, "://"|Acc]);
+normalize(Scheme, UserInfo, Acc) ->
+ concat([Scheme, "://", UserInfo, "@"|Acc]).
+
+params_to_header_string(Params) ->
+ intercalate(", ", [concat([encode(K), "=\"", encode(V), "\""]) || {K, V} <- Params]).
+
+params_from_header_string(String) ->
+ [param_from_header_string(Param) || Param <- re:split(String, ",\\s*", [{return, list}]), Param =/= ""].
+
+param_from_header_string(Param) ->
+ [Key, QuotedValue] = string:tokens(Param, "="),
+ Value = string:substr(QuotedValue, 2, length(QuotedValue) - 2),
+ {decode(Key), decode(Value)}.
+
+params_from_string(Params) ->
+ [param_from_string(Param) || Param <- string:tokens(Params, "&")].
+
+param_from_string(Param) ->
+ list_to_tuple([decode(Value) || Value <- string:tokens(Param, "=")]).
+
+params_to_string(Params) ->
+ intercalate("&", [calate("=", [K, V]) || {K, V} <- Params]).
+
+calate(Sep, Xs) ->
+ intercalate(Sep, [encode(X) || X <- Xs]).
+
+intercalate(Sep, Xs) ->
+ concat(intersperse(Sep, Xs)).
+
+intersperse(_, []) -> [];
+intersperse(_, [X]) -> [X];
+intersperse(Sep, [X|Xs]) ->
+ [X, Sep|intersperse(Sep, Xs)].
+
+decode(Chars) ->
+ decode(Chars, []).
+
+decode([], Decoded) ->
+ lists:reverse(Decoded);
+decode([$%,A,B|Etc], Decoded) when ?is_hex(A), ?is_hex(B) ->
+ decode(Etc, [erlang:list_to_integer([A,B], 16)|Decoded]);
+decode([C|Etc], Decoded) when ?is_unreserved(C) ->
+ decode(Etc, [C|Decoded]).
+
+encode(Chars) ->
+ encode(Chars, []).
+
+encode([], Encoded) ->
+ lists:flatten(lists:reverse(Encoded));
+encode([C|Etc], Encoded) when ?is_unreserved(C) ->
+ encode(Etc, [C|Encoded]);
+encode([C|Etc], Encoded) ->
+ Value = io_lib:format("%~2.2.0s", [erlang:integer_to_list(C, 16)]),
+ encode(Etc, [Value|Encoded]).
diff --git a/1.1.x/src/etap/Makefile.am b/1.1.x/src/etap/Makefile.am
new file mode 100644
index 00000000..732347bf
--- /dev/null
+++ b/1.1.x/src/etap/Makefile.am
@@ -0,0 +1,44 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+etapebindir = $(localerlanglibdir)/etap/ebin
+
+etap_file_collection = \
+ etap.erl \
+ etap_application.erl \
+ etap_can.erl \
+ etap_exception.erl \
+ etap_process.erl \
+ etap_report.erl \
+ etap_request.erl \
+ etap_string.erl \
+ etap_web.erl
+
+etapebin_make_generated_file_list = \
+ etap.beam \
+ etap_application.beam \
+ etap_can.beam \
+ etap_exception.beam \
+ etap_process.beam \
+ etap_report.beam \
+ etap_request.beam \
+ etap_string.beam \
+ etap_web.beam
+
+etapebin_DATA = $(etapebin_make_generated_file_list)
+
+EXTRA_DIST = $(etap_file_collection)
+
+CLEANFILES = $(etapebin_make_generated_file_list)
+
+%.beam: %.erl
+ $(ERLC) $(ERLC_FLAGS) $<
diff --git a/1.1.x/src/etap/etap.erl b/1.1.x/src/etap/etap.erl
new file mode 100644
index 00000000..5ad5dba3
--- /dev/null
+++ b/1.1.x/src/etap/etap.erl
@@ -0,0 +1,416 @@
+%% Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
+%%
+%% Permission is hereby granted, free of charge, to any person
+%% obtaining a copy of this software and associated documentation
+%% files (the "Software"), to deal in the Software without
+%% restriction, including without limitation the rights to use,
+%% copy, modify, merge, publish, distribute, sublicense, and/or sell
+%% copies of the Software, and to permit persons to whom the
+%% Software is furnished to do so, subject to the following
+%% conditions:
+%%
+%% The above copyright notice and this permission notice shall be
+%% included in all copies or substantial portions of the Software.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+%% OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+%% HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+%% WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+%% OTHER DEALINGS IN THE SOFTWARE.
+%%
+%% @author Nick Gerakines <nick@gerakines.net> [http://socklabs.com/]
+%% @author Jeremy Wall <jeremy@marzhillstudios.com>
+%% @version 0.3.4
+%% @copyright 2007-2008 Jeremy Wall, 2008-2009 Nick Gerakines
+%% @reference http://testanything.org/wiki/index.php/Main_Page
+%% @reference http://en.wikipedia.org/wiki/Test_Anything_Protocol
+%% @todo Finish implementing the skip directive.
+%% @todo Document the messages handled by this receive loop.
+%% @todo Explain in documentation why we use a process to handle test input.
+%% @doc etap is a TAP testing module for Erlang components and applications.
+%% This module allows developers to test their software using the TAP method.
+%%
+%% <blockquote cite="http://en.wikipedia.org/wiki/Test_Anything_Protocol"><p>
+%% TAP, the Test Anything Protocol, is a simple text-based interface between
+%% testing modules in a test harness. TAP started life as part of the test
+%% harness for Perl but now has implementations in C/C++, Python, PHP, Perl
+%% and probably others by the time you read this.
+%% </p></blockquote>
+%%
+%% The testing process begins by defining a plan using etap:plan/1, running
+%% a number of etap tests and then calling eta:end_tests/0. Please refer to
+%% the Erlang modules in the t directory of this project for example tests.
+-module(etap).
+-export([
+ ensure_test_server/0, start_etap_server/0, test_server/1,
+ diag/1, diag/2, plan/1, end_tests/0, not_ok/2, ok/2, is/3, isnt/3,
+ any/3, none/3, fun_is/3, is_greater/3, skip/1, skip/2,
+ ensure_coverage_starts/0, ensure_coverage_ends/0, coverage_report/0,
+ datetime/1, skip/3, bail/0, bail/1
+]).
+-record(test_state, {planned = 0, count = 0, pass = 0, fail = 0, skip = 0, skip_reason = ""}).
+-vsn("0.3.4").
+
+%% @spec plan(N) -> Result
+%% N = unknown | skip | {skip, string()} | integer()
+%% Result = ok
+%% @doc Create a test plan and boot strap the test server.
+plan(unknown) ->
+ ensure_coverage_starts(),
+ ensure_test_server(),
+ etap_server ! {self(), plan, unknown},
+ ok;
+plan(skip) ->
+ io:format("1..0 # skip~n");
+plan({skip, Reason}) ->
+ io:format("1..0 # skip ~s~n", [Reason]);
+plan(N) when is_integer(N), N > 0 ->
+ ensure_coverage_starts(),
+ ensure_test_server(),
+ etap_server ! {self(), plan, N},
+ ok.
+
+%% @spec end_tests() -> ok
+%% @doc End the current test plan and output test results.
+%% @todo This should probably be done in the test_server process.
+end_tests() ->
+ ensure_coverage_ends(),
+ etap_server ! {self(), state},
+ State = receive X -> X end,
+ if
+ State#test_state.planned == -1 ->
+ io:format("1..~p~n", [State#test_state.count]);
+ true ->
+ ok
+ end,
+ case whereis(etap_server) of
+ undefined -> ok;
+ _ -> etap_server ! done, ok
+ end.
+
+%% @private
+ensure_coverage_starts() ->
+ case os:getenv("COVER") of
+ false -> ok;
+ _ ->
+ BeamDir = case os:getenv("COVER_BIN") of false -> "ebin"; X -> X end,
+ cover:compile_beam_directory(BeamDir)
+ end.
+
+%% @private
+%% @doc Attempts to write out any collected coverage data to the cover/
+%% directory. This function should not be called externally, but it could be.
+ensure_coverage_ends() ->
+ case os:getenv("COVER") of
+ false -> ok;
+ _ ->
+ filelib:ensure_dir("cover/"),
+ Name = lists:flatten([
+ io_lib:format("~.16b", [X]) || X <- binary_to_list(erlang:md5(
+ term_to_binary({make_ref(), now()})
+ ))
+ ]),
+ cover:export("cover/" ++ Name ++ ".coverdata")
+ end.
+
+%% @spec coverage_report() -> ok
+%% @doc Use the cover module's covreage report builder to create code coverage
+%% reports from recently created coverdata files.
+coverage_report() ->
+ [cover:import(File) || File <- filelib:wildcard("cover/*.coverdata")],
+ lists:foreach(
+ fun(Mod) ->
+ cover:analyse_to_file(Mod, atom_to_list(Mod) ++ "_coverage.txt", [])
+ end,
+ cover:imported_modules()
+ ),
+ ok.
+
+bail() ->
+ bail("").
+
+bail(Reason) ->
+ etap_server ! {self(), diag, "Bail out! " ++ Reason},
+ ensure_coverage_ends(),
+ etap_server ! done, ok,
+ ok.
+
+
+%% @spec diag(S) -> ok
+%% S = string()
+%% @doc Print a debug/status message related to the test suite.
+diag(S) -> etap_server ! {self(), diag, "# " ++ S}, ok.
+
+%% @spec diag(Format, Data) -> ok
+%% Format = atom() | string() | binary()
+%% Data = [term()]
+%% UnicodeList = [Unicode]
+%% Unicode = int()
+%% @doc Print a debug/status message related to the test suite.
+%% Function arguments are passed through io_lib:format/2.
+diag(Format, Data) -> diag(io_lib:format(Format, Data)).
+
+%% @spec ok(Expr, Desc) -> Result
+%% Expr = true | false
+%% Desc = string()
+%% Result = true | false
+%% @doc Assert that a statement is true.
+ok(Expr, Desc) -> mk_tap(Expr == true, Desc).
+
+%% @spec not_ok(Expr, Desc) -> Result
+%% Expr = true | false
+%% Desc = string()
+%% Result = true | false
+%% @doc Assert that a statement is false.
+not_ok(Expr, Desc) -> mk_tap(Expr == false, Desc).
+
+%% @spec is(Got, Expected, Desc) -> Result
+%% Got = any()
+%% Expected = any()
+%% Desc = string()
+%% Result = true | false
+%% @doc Assert that two values are the same.
+is(Got, Expected, Desc) ->
+ case mk_tap(Got == Expected, Desc) of
+ false ->
+ etap_server ! {self(), diag, " ---"},
+ etap_server ! {self(), diag, io_lib:format(" description: ~p", [Desc])},
+ etap_server ! {self(), diag, io_lib:format(" found: ~p", [Got])},
+ etap_server ! {self(), diag, io_lib:format(" wanted: ~p", [Expected])},
+ etap_server ! {self(), diag, " ..."},
+ false;
+ true -> true
+ end.
+
+%% @spec isnt(Got, Expected, Desc) -> Result
+%% Got = any()
+%% Expected = any()
+%% Desc = string()
+%% Result = true | false
+%% @doc Assert that two values are not the same.
+isnt(Got, Expected, Desc) -> mk_tap(Got /= Expected, Desc).
+
+%% @spec is_greater(ValueA, ValueB, Desc) -> Result
+%% ValueA = number()
+%% ValueB = number()
+%% Desc = string()
+%% Result = true | false
+%% @doc Assert that an integer is greater than another.
+is_greater(ValueA, ValueB, Desc) when is_integer(ValueA), is_integer(ValueB) ->
+ mk_tap(ValueA > ValueB, Desc).
+
+%% @spec any(Got, Items, Desc) -> Result
+%% Got = any()
+%% Items = [any()]
+%% Desc = string()
+%% Result = true | false
+%% @doc Assert that an item is in a list.
+any(Got, Items, Desc) ->
+ is(lists:member(Got, Items), true, Desc).
+
+%% @spec none(Got, Items, Desc) -> Result
+%% Got = any()
+%% Items = [any()]
+%% Desc = string()
+%% Result = true | false
+%% @doc Assert that an item is not in a list.
+none(Got, Items, Desc) ->
+ is(lists:member(Got, Items), false, Desc).
+
+%% @spec fun_is(Fun, Expected, Desc) -> Result
+%% Fun = function()
+%% Expected = any()
+%% Desc = string()
+%% Result = true | false
+%% @doc Use an anonymous function to assert a pattern match.
+fun_is(Fun, Expected, Desc) when is_function(Fun) ->
+ is(Fun(Expected), true, Desc).
+
+%% @equiv skip(TestFun, "")
+skip(TestFun) when is_function(TestFun) ->
+ skip(TestFun, "").
+
+%% @spec skip(TestFun, Reason) -> ok
+%% TestFun = function()
+%% Reason = string()
+%% @doc Skip a test.
+skip(TestFun, Reason) when is_function(TestFun), is_list(Reason) ->
+ begin_skip(Reason),
+ catch TestFun(),
+ end_skip(),
+ ok.
+
+%% @spec skip(Q, TestFun, Reason) -> ok
+%% Q = true | false | function()
+%% TestFun = function()
+%% Reason = string()
+%% @doc Skips a test conditionally. The first argument to this function can
+%% either be the 'true' or 'false' atoms or a function that returns 'true' or
+%% 'false'.
+skip(QFun, TestFun, Reason) when is_function(QFun), is_function(TestFun), is_list(Reason) ->
+ case QFun() of
+ true -> begin_skip(Reason), TestFun(), end_skip();
+ _ -> TestFun()
+ end,
+ ok;
+
+skip(Q, TestFun, Reason) when is_function(TestFun), is_list(Reason), Q == true ->
+ begin_skip(Reason),
+ TestFun(),
+ end_skip(),
+ ok;
+
+skip(_, TestFun, Reason) when is_function(TestFun), is_list(Reason) ->
+ TestFun(),
+ ok.
+
+%% @private
+begin_skip(Reason) ->
+ etap_server ! {self(), begin_skip, Reason}.
+
+%% @private
+end_skip() ->
+ etap_server ! {self(), end_skip}.
+
+% ---
+% Internal / Private functions
+
+%% @private
+%% @doc Start the etap_server process if it is not running already.
+ensure_test_server() ->
+ case whereis(etap_server) of
+ undefined ->
+ proc_lib:start(?MODULE, start_etap_server,[]);
+ _ ->
+ diag("The test server is already running.")
+ end.
+
+%% @private
+%% @doc Start the etap_server loop and register itself as the etap_server
+%% process.
+start_etap_server() ->
+ catch register(etap_server, self()),
+ proc_lib:init_ack(ok),
+ etap:test_server(#test_state{
+ planned = 0,
+ count = 0,
+ pass = 0,
+ fail = 0,
+ skip = 0,
+ skip_reason = ""
+ }).
+
+
+%% @private
+%% @doc The main etap_server receive/run loop. The etap_server receive loop
+%% responds to seven messages apperatining to failure or passing of tests.
+%% It is also used to initiate the testing process with the {_, plan, _}
+%% message that clears the current test state.
+test_server(State) ->
+ NewState = receive
+ {_From, plan, unknown} ->
+ io:format("# Current time local ~s~n", [datetime(erlang:localtime())]),
+ io:format("# Using etap version ~p~n", [ proplists:get_value(vsn, proplists:get_value(attributes, etap:module_info())) ]),
+ State#test_state{
+ planned = -1,
+ count = 0,
+ pass = 0,
+ fail = 0,
+ skip = 0,
+ skip_reason = ""
+ };
+ {_From, plan, N} ->
+ io:format("# Current time local ~s~n", [datetime(erlang:localtime())]),
+ io:format("# Using etap version ~p~n", [ proplists:get_value(vsn, proplists:get_value(attributes, etap:module_info())) ]),
+ io:format("1..~p~n", [N]),
+ State#test_state{
+ planned = N,
+ count = 0,
+ pass = 0,
+ fail = 0,
+ skip = 0,
+ skip_reason = ""
+ };
+ {_From, begin_skip, Reason} ->
+ State#test_state{
+ skip = 1,
+ skip_reason = Reason
+ };
+ {_From, end_skip} ->
+ State#test_state{
+ skip = 0,
+ skip_reason = ""
+ };
+ {_From, pass, Desc} ->
+ FullMessage = skip_diag(
+ " - " ++ Desc,
+ State#test_state.skip,
+ State#test_state.skip_reason
+ ),
+ io:format("ok ~p ~s~n", [State#test_state.count + 1, FullMessage]),
+ State#test_state{
+ count = State#test_state.count + 1,
+ pass = State#test_state.pass + 1
+ };
+
+ {_From, fail, Desc} ->
+ FullMessage = skip_diag(
+ " - " ++ Desc,
+ State#test_state.skip,
+ State#test_state.skip_reason
+ ),
+ io:format("not ok ~p ~s~n", [State#test_state.count + 1, FullMessage]),
+ State#test_state{
+ count = State#test_state.count + 1,
+ fail = State#test_state.fail + 1
+ };
+ {From, state} ->
+ From ! State,
+ State;
+ {_From, diag, Message} ->
+ io:format("~s~n", [Message]),
+ State;
+ {From, count} ->
+ From ! State#test_state.count,
+ State;
+ {From, is_skip} ->
+ From ! State#test_state.skip,
+ State;
+ done ->
+ exit(normal)
+ end,
+ test_server(NewState).
+
+%% @private
+%% @doc Process the result of a test and send it to the etap_server process.
+mk_tap(Result, Desc) ->
+ IsSkip = lib:sendw(etap_server, is_skip),
+ case [IsSkip, Result] of
+ [_, true] ->
+ etap_server ! {self(), pass, Desc},
+ true;
+ [1, _] ->
+ etap_server ! {self(), pass, Desc},
+ true;
+ _ ->
+ etap_server ! {self(), fail, Desc},
+ false
+ end.
+
+%% @private
+%% @doc Format a date/time string.
+datetime(DateTime) ->
+ {{Year, Month, Day}, {Hour, Min, Sec}} = DateTime,
+ io_lib:format("~4.10.0B-~2.10.0B-~2.10.0B ~2.10.0B:~2.10.0B:~2.10.0B", [Year, Month, Day, Hour, Min, Sec]).
+
+%% @private
+%% @doc Craft an output message taking skip/todo into consideration.
+skip_diag(Message, 0, _) ->
+ Message;
+skip_diag(_Message, 1, "") ->
+ " # SKIP";
+skip_diag(_Message, 1, Reason) ->
+ " # SKIP : " ++ Reason.
diff --git a/1.1.x/src/etap/etap_application.erl b/1.1.x/src/etap/etap_application.erl
new file mode 100644
index 00000000..98b52751
--- /dev/null
+++ b/1.1.x/src/etap/etap_application.erl
@@ -0,0 +1,72 @@
+%% Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
+%%
+%% Permission is hereby granted, free of charge, to any person
+%% obtaining a copy of this software and associated documentation
+%% files (the "Software"), to deal in the Software without
+%% restriction, including without limitation the rights to use,
+%% copy, modify, merge, publish, distribute, sublicense, and/or sell
+%% copies of the Software, and to permit persons to whom the
+%% Software is furnished to do so, subject to the following
+%% conditions:
+%%
+%% The above copyright notice and this permission notice shall be
+%% included in all copies or substantial portions of the Software.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+%% OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+%% HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+%% WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+%% OTHER DEALINGS IN THE SOFTWARE.
+%%
+%% @author Nick Gerakines <nick@gerakines.net> [http://socklabs.com/]
+%% @copyright 2008 Nick Gerakines
+%% @reference http://testanything.org/wiki/index.php/Main_Page
+%% @reference http://en.wikipedia.org/wiki/Test_Anything_Protocol
+%% @todo Explain in documentation why we use a process to handle test input.
+%% @todo Add test to verify the number of members in a pg2 group.
+%% @doc Provide test functionality to the application and related behaviors.
+-module(etap_application).
+-export([
+ start_ok/2, ensure_loaded/3, load_ok/2,
+ pg2_group_exists/2, pg2_group_doesntexist/2
+]).
+
+%% @spec load_ok(string(), string()) -> true | false
+%% @doc Assert that an application can be loaded successfully.
+load_ok(AppName, Desc) ->
+ etap:ok(application:load(AppName) == ok, Desc).
+
+%% @spec start_ok(string(), string()) -> true | false
+%% @doc Assert that an application can be started successfully.
+start_ok(AppName, Desc) ->
+ etap:ok(application:start(AppName) == ok, Desc).
+
+%% @spec ensure_loaded(string(), string(), string()) -> true | false
+%% @doc Assert that an application has been loaded successfully.
+ensure_loaded(AppName, AppVsn, Desc) ->
+ etap:any(
+ fun(Match) -> case Match of {AppName, _, AppVsn} -> true; _ -> false end end,
+ application:loaded_applications(),
+ Desc
+ ).
+
+%% @spec pg2_group_exists(string(), string()) -> true | false
+%% @doc Assert that a pg2 group exists.
+pg2_group_exists(GroupName, Desc) ->
+ etap:any(
+ fun(Match) -> Match == GroupName end,
+ pg2:which_groups(),
+ Desc
+ ).
+
+%% @spec pg2_group_doesntexist(string(), string()) -> true | false
+%% @doc Assert that a pg2 group does not exists.
+pg2_group_doesntexist(GroupName, Desc) ->
+ etap:none(
+ fun(Match) -> Match == GroupName end,
+ pg2:which_groups(),
+ Desc
+ ).
diff --git a/1.1.x/src/etap/etap_can.erl b/1.1.x/src/etap/etap_can.erl
new file mode 100644
index 00000000..552b7174
--- /dev/null
+++ b/1.1.x/src/etap/etap_can.erl
@@ -0,0 +1,79 @@
+%% Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
+%%
+%% Permission is hereby granted, free of charge, to any person
+%% obtaining a copy of this software and associated documentation
+%% files (the "Software"), to deal in the Software without
+%% restriction, including without limitation the rights to use,
+%% copy, modify, merge, publish, distribute, sublicense, and/or sell
+%% copies of the Software, and to permit persons to whom the
+%% Software is furnished to do so, subject to the following
+%% conditions:
+%%
+%% The above copyright notice and this permission notice shall be
+%% included in all copies or substantial portions of the Software.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+%% OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+%% HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+%% WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+%% OTHER DEALINGS IN THE SOFTWARE.
+%%
+%% @reference http://testanything.org/wiki/index.php/Main_Page
+%% @reference http://en.wikipedia.org/wiki/Test_Anything_Protocol
+%% @doc Provide test functionality modules
+-module(etap_can).
+
+-export([
+ loaded_ok/2, can_ok/2, can_ok/3,
+ has_attrib/2, is_attrib/3, is_behaviour/2
+]).
+
+%% @spec loaded_ok(atom(), string()) -> true | false
+%% @doc Assert that a module has been loaded successfully.
+loaded_ok(M, Desc) when is_atom(M) ->
+ etap:fun_is(fun({module, _}) -> true; (_) -> false end, code:load_file(M), Desc).
+
+%% @spec can_ok(atom(), atom()) -> true | false
+%% @doc Assert that a module exports a given function.
+can_ok(M, F) when is_atom(M), is_atom(F) ->
+ Matches = [X || {X, _} <- M:module_info(exports), X == F],
+ etap:ok(Matches > 0, lists:concat([M, " can ", F])).
+
+%% @spec can_ok(atom(), atom(), integer()) -> true | false
+%% @doc Assert that a module exports a given function with a given arity.
+can_ok(M, F, A) when is_atom(M); is_atom(F), is_number(A) ->
+ Matches = [X || X <- M:module_info(exports), X == {F, A}],
+ etap:ok(Matches > 0, lists:concat([M, " can ", F, "/", A])).
+
+%% @spec has_attrib(M, A) -> true | false
+%% M = atom()
+%% A = atom()
+%% @doc Asserts that a module has a given attribute.
+has_attrib(M, A) when is_atom(M), is_atom(A) ->
+ etap:isnt(
+ proplists:get_value(A, M:module_info(attributes), 'asdlkjasdlkads'),
+ 'asdlkjasdlkads',
+ lists:concat([M, " has attribute ", A])
+ ).
+
+%% @spec has_attrib(M, A. V) -> true | false
+%% M = atom()
+%% A = atom()
+%% V = any()
+%% @doc Asserts that a module has a given attribute with a given value.
+is_attrib(M, A, V) when is_atom(M) andalso is_atom(A) ->
+ etap:is(
+ proplists:get_value(A, M:module_info(attributes)),
+ [V],
+ lists:concat([M, "'s ", A, " is ", V])
+ ).
+
+%% @spec is_behavior(M, B) -> true | false
+%% M = atom()
+%% B = atom()
+%% @doc Asserts that a given module has a specific behavior.
+is_behaviour(M, B) when is_atom(M) andalso is_atom(B) ->
+ is_attrib(M, behaviour, B).
diff --git a/1.1.x/src/etap/etap_exception.erl b/1.1.x/src/etap/etap_exception.erl
new file mode 100644
index 00000000..ba660727
--- /dev/null
+++ b/1.1.x/src/etap/etap_exception.erl
@@ -0,0 +1,66 @@
+%% Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
+%%
+%% Permission is hereby granted, free of charge, to any person
+%% obtaining a copy of this software and associated documentation
+%% files (the "Software"), to deal in the Software without
+%% restriction, including without limitation the rights to use,
+%% copy, modify, merge, publish, distribute, sublicense, and/or sell
+%% copies of the Software, and to permit persons to whom the
+%% Software is furnished to do so, subject to the following
+%% conditions:
+%%
+%% The above copyright notice and this permission notice shall be
+%% included in all copies or substantial portions of the Software.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+%% OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+%% HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+%% WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+%% OTHER DEALINGS IN THE SOFTWARE.
+%%
+%% @reference http://testanything.org/wiki/index.php/Main_Page
+%% @reference http://en.wikipedia.org/wiki/Test_Anything_Protocol
+%% @doc Adds exception based testing to the etap suite.
+-module(etap_exception).
+
+-export([dies_ok/2, lives_ok/2, throws_ok/3]).
+
+% ---
+% External / Public functions
+
+%% @doc Assert that an exception is raised when running a given function.
+dies_ok(F, Desc) ->
+ case (catch F()) of
+ {'EXIT', _} -> etap:ok(true, Desc);
+ _ -> etap:ok(false, Desc)
+ end.
+
+%% @doc Assert that an exception is not raised when running a given function.
+lives_ok(F, Desc) ->
+ etap:is(try_this(F), success, Desc).
+
+%% @doc Assert that the exception thrown by a function matches the given exception.
+throws_ok(F, Exception, Desc) ->
+ try F() of
+ _ -> etap:ok(nok, Desc)
+ catch
+ _:E ->
+ etap:is(E, Exception, Desc)
+ end.
+
+% ---
+% Internal / Private functions
+
+%% @private
+%% @doc Run a function and catch any exceptions.
+try_this(F) when is_function(F, 0) ->
+ try F() of
+ _ -> success
+ catch
+ throw:E -> {throw, E};
+ error:E -> {error, E};
+ exit:E -> {exit, E}
+ end.
diff --git a/1.1.x/src/etap/etap_process.erl b/1.1.x/src/etap/etap_process.erl
new file mode 100644
index 00000000..69f5ba00
--- /dev/null
+++ b/1.1.x/src/etap/etap_process.erl
@@ -0,0 +1,42 @@
+%% Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
+%%
+%% Permission is hereby granted, free of charge, to any person
+%% obtaining a copy of this software and associated documentation
+%% files (the "Software"), to deal in the Software without
+%% restriction, including without limitation the rights to use,
+%% copy, modify, merge, publish, distribute, sublicense, and/or sell
+%% copies of the Software, and to permit persons to whom the
+%% Software is furnished to do so, subject to the following
+%% conditions:
+%%
+%% The above copyright notice and this permission notice shall be
+%% included in all copies or substantial portions of the Software.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+%% OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+%% HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+%% WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+%% OTHER DEALINGS IN THE SOFTWARE.
+%%
+%% @doc Adds process/pid testing to the etap suite.
+-module(etap_process).
+
+-export([is_pid/2, is_alive/2, is_mfa/3]).
+
+% ---
+% External / Public functions
+
+%% @doc Assert that a given variable is a pid.
+is_pid(Pid, Desc) when is_pid(Pid) -> etap:ok(true, Desc);
+is_pid(_, Desc) -> etap:ok(false, Desc).
+
+%% @doc Assert that a given process/pid is alive.
+is_alive(Pid, Desc) ->
+ etap:ok(erlang:is_process_alive(Pid), Desc).
+
+%% @doc Assert that the current function of a pid is a given {M, F, A} tuple.
+is_mfa(Pid, MFA, Desc) ->
+ etap:is({current_function, MFA}, erlang:process_info(Pid, current_function), Desc).
diff --git a/1.1.x/src/etap/etap_report.erl b/1.1.x/src/etap/etap_report.erl
new file mode 100644
index 00000000..6d692fb6
--- /dev/null
+++ b/1.1.x/src/etap/etap_report.erl
@@ -0,0 +1,343 @@
+%% Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
+%%
+%% Permission is hereby granted, free of charge, to any person
+%% obtaining a copy of this software and associated documentation
+%% files (the "Software"), to deal in the Software without
+%% restriction, including without limitation the rights to use,
+%% copy, modify, merge, publish, distribute, sublicense, and/or sell
+%% copies of the Software, and to permit persons to whom the
+%% Software is furnished to do so, subject to the following
+%% conditions:
+%%
+%% The above copyright notice and this permission notice shall be
+%% included in all copies or substantial portions of the Software.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+%% OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+%% HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+%% WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+%% OTHER DEALINGS IN THE SOFTWARE.
+%%
+%% @doc A module for creating nice looking code coverage reports.
+-module(etap_report).
+-export([create/0]).
+
+%% @spec create() -> ok
+%% @doc Create html code coverage reports for each module that code coverage
+%% data exists for.
+create() ->
+ [cover:import(File) || File <- filelib:wildcard("cover/*.coverdata")],
+ Modules = lists:foldl(
+ fun(Module, Acc) ->
+ [{Module, file_report(Module)} | Acc]
+ end,
+ [],
+ cover:imported_modules()
+ ),
+ index(Modules).
+
+%% @private
+index(Modules) ->
+ {ok, IndexFD} = file:open("cover/index.html", [write]),
+ io:format(IndexFD, "<html><head><style>
+ table.percent_graph { height: 12px; border:1px solid #E2E6EF; empty-cells: show; }
+ table.percent_graph td.covered { height: 10px; background: #00f000; }
+ table.percent_graph td.uncovered { height: 10px; background: #e00000; }
+ .odd { background-color: #ddd; }
+ .even { background-color: #fff; }
+ </style></head>", []),
+ io:format(IndexFD, "<body>", []),
+ lists:foldl(
+ fun({Module, {Good, Bad, Source}}, LastRow) ->
+ case {Good + Bad, Source} of
+ {0, _} -> LastRow;
+ {_, none} -> LastRow;
+ _ ->
+ CovPer = round((Good / (Good + Bad)) * 100),
+ UnCovPer = round((Bad / (Good + Bad)) * 100),
+ RowClass = case LastRow of 1 -> "odd"; _ -> "even" end,
+ io:format(IndexFD, "<div class=\"~s\">", [RowClass]),
+ io:format(IndexFD, "<a href=\"~s\">~s</a>", [atom_to_list(Module) ++ "_report.html", atom_to_list(Module)]),
+ io:format(IndexFD, "
+ <table cellspacing='0' cellpadding='0' align='right'>
+ <tr>
+ <td><tt>~p%</tt>&nbsp;</td><td>
+ <table cellspacing='0' class='percent_graph' cellpadding='0' width='100'>
+ <tr><td class='covered' width='~p' /><td class='uncovered' width='~p' /></tr>
+ </table>
+ </td>
+ </tr>
+ </table>
+ ", [CovPer, CovPer, UnCovPer]),
+ io:format(IndexFD, "</div>", []),
+ case LastRow of
+ 1 -> 0;
+ 0 -> 1
+ end
+ end
+ end,
+ 0,
+ lists:sort(Modules)
+ ),
+ {TotalGood, TotalBad} = lists:foldl(
+ fun({_, {Good, Bad, Source}}, {TGood, TBad}) ->
+ case Source of none -> {TGood, TBad}; _ -> {TGood + Good, TBad + Bad} end
+ end,
+ {0, 0},
+ Modules
+ ),
+ io:format(IndexFD, "<p>Generated on ~s.</p>~n", [etap:datetime({date(), time()})]),
+ case TotalGood + TotalBad of
+ 0 -> ok;
+ _ ->
+ TotalCovPer = round((TotalGood / (TotalGood + TotalBad)) * 100),
+ TotalUnCovPer = round((TotalBad / (TotalGood + TotalBad)) * 100),
+ io:format(IndexFD, "<div>", []),
+ io:format(IndexFD, "Total
+ <table cellspacing='0' cellpadding='0' align='right'>
+ <tr>
+ <td><tt>~p%</tt>&nbsp;</td><td>
+ <table cellspacing='0' class='percent_graph' cellpadding='0' width='100'>
+ <tr><td class='covered' width='~p' /><td class='uncovered' width='~p' /></tr>
+ </table>
+ </td>
+ </tr>
+ </table>
+ ", [TotalCovPer, TotalCovPer, TotalUnCovPer]),
+ io:format(IndexFD, "</div>", [])
+ end,
+ io:format(IndexFD, "</body></html>", []),
+ file:close(IndexFD),
+ ok.
+
+%% @private
+file_report(Module) ->
+ {ok, Data} = cover:analyse(Module, calls, line),
+ Source = find_source(Module),
+ {Good, Bad} = collect_coverage(Data, {0, 0}),
+ case {Source, Good + Bad} of
+ {none, _} -> ok;
+ {_, 0} -> ok;
+ _ ->
+ {ok, SourceFD} = file:open(Source, [read]),
+ {ok, WriteFD} = file:open("cover/" ++ atom_to_list(Module) ++ "_report.html", [write]),
+ io:format(WriteFD, "~s", [header(Module, Good, Bad)]),
+ output_lines(Data, WriteFD, SourceFD, 1),
+ io:format(WriteFD, "~s", [footer()]),
+ file:close(WriteFD),
+ file:close(SourceFD),
+ ok
+ end,
+ {Good, Bad, Source}.
+
+%% @private
+collect_coverage([], Acc) -> Acc;
+collect_coverage([{{_, _}, 0} | Data], {Good, Bad}) ->
+ collect_coverage(Data, {Good, Bad + 1});
+collect_coverage([_ | Data], {Good, Bad}) ->
+ collect_coverage(Data, {Good + 1, Bad}).
+
+%% @private
+output_lines(Data, WriteFD, SourceFD, LineNumber) ->
+ {Match, NextData} = datas_match(Data, LineNumber),
+ case io:get_line(SourceFD, '') of
+ eof -> ok;
+ Line = "%% @todo" ++ _ ->
+ io:format(WriteFD, "~s", [out_line(LineNumber, highlight, Line)]),
+ output_lines(NextData, WriteFD, SourceFD, LineNumber + 1);
+ Line = "% " ++ _ ->
+ io:format(WriteFD, "~s", [out_line(LineNumber, none, Line)]),
+ output_lines(NextData, WriteFD, SourceFD, LineNumber + 1);
+ Line ->
+ case Match of
+ {true, CC} ->
+ io:format(WriteFD, "~s", [out_line(LineNumber, CC, Line)]),
+ output_lines(NextData, WriteFD, SourceFD, LineNumber + 1);
+ false ->
+ io:format(WriteFD, "~s", [out_line(LineNumber, none, Line)]),
+ output_lines(NextData, WriteFD, SourceFD, LineNumber + 1)
+ end
+ end.
+
+%% @private
+out_line(Number, none, Line) ->
+ PadNu = string:right(integer_to_list(Number), 5, $.),
+ io_lib:format("<span class=\"marked\"><a name=\"line~p\"></a>~s ~s</span>", [Number, PadNu, Line]);
+out_line(Number, highlight, Line) ->
+ PadNu = string:right(integer_to_list(Number), 5, $.),
+ io_lib:format("<span class=\"highlight\"><a name=\"line~p\"></a>~s ~s</span>", [Number, PadNu, Line]);
+out_line(Number, 0, Line) ->
+ PadNu = string:right(integer_to_list(Number), 5, $.),
+ io_lib:format("<span class=\"uncovered\"><a name=\"line~p\"></a>~s ~s</span>", [Number, PadNu, Line]);
+out_line(Number, _, Line) ->
+ PadNu = string:right(integer_to_list(Number), 5, $.),
+ io_lib:format("<span class=\"covered\"><a name=\"line~p\"></a>~s ~s</span>", [Number, PadNu, Line]).
+
+%% @private
+datas_match([], _) -> {false, []};
+datas_match([{{_, Line}, CC} | Datas], LineNumber) when Line == LineNumber -> {{true, CC}, Datas};
+datas_match(Data, _) -> {false, Data}.
+
+%% @private
+find_source(Module) when is_atom(Module) ->
+ Root = filename:rootname(Module),
+ Dir = filename:dirname(Root),
+ XDir = case os:getenv("SRC") of false -> "src"; X -> X end,
+ find_source([
+ filename:join([Dir, Root ++ ".erl"]),
+ filename:join([Dir, "..", "src", Root ++ ".erl"]),
+ filename:join([Dir, "src", Root ++ ".erl"]),
+ filename:join([Dir, "elibs", Root ++ ".erl"]),
+ filename:join([Dir, "..", "elibs", Root ++ ".erl"]),
+ filename:join([Dir, XDir, Root ++ ".erl"])
+ ]);
+find_source([]) -> none;
+find_source([Test | Tests]) ->
+ case filelib:is_file(Test) of
+ true -> Test;
+ false -> find_source(Tests)
+ end.
+
+%% @private
+header(Module, Good, Bad) ->
+ io:format("Good ~p~n", [Good]),
+ io:format("Bad ~p~n", [Bad]),
+ CovPer = round((Good / (Good + Bad)) * 100),
+ UnCovPer = round((Bad / (Good + Bad)) * 100),
+ io:format("CovPer ~p~n", [CovPer]),
+ io_lib:format("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">
+ <html lang='en' xml:lang='en' xmlns='http://www.w3.org/1999/xhtml'>
+ <head>
+ <title>~s - C0 code coverage information</title>
+ <style type='text/css'>body { background-color: rgb(240, 240, 245); }</style>
+ <style type='text/css'>span.marked0 {
+ background-color: rgb(185, 210, 200);
+ display: block;
+ }
+ span.marked { display: block; background-color: #ffffff; }
+ span.highlight { display: block; background-color: #fff9d7; }
+ span.covered { display: block; background-color: #f7f7f7 ; }
+ span.uncovered { display: block; background-color: #ffebe8 ; }
+ span.overview {
+ border-bottom: 1px solid #E2E6EF;
+ }
+ div.overview {
+ border-bottom: 1px solid #E2E6EF;
+ }
+ body {
+ font-family: verdana, arial, helvetica;
+ }
+ div.footer {
+ font-size: 68%;
+ margin-top: 1.5em;
+ }
+ h1, h2, h3, h4, h5, h6 {
+ margin-bottom: 0.5em;
+ }
+ h5 {
+ margin-top: 0.5em;
+ }
+ .hidden {
+ display: none;
+ }
+ div.separator {
+ height: 10px;
+ }
+ table.percent_graph {
+ height: 12px;
+ border: 1px solid #E2E6EF;
+ empty-cells: show;
+ }
+ table.percent_graph td.covered {
+ height: 10px;
+ background: #00f000;
+ }
+ table.percent_graph td.uncovered {
+ height: 10px;
+ background: #e00000;
+ }
+ table.percent_graph td.NA {
+ height: 10px;
+ background: #eaeaea;
+ }
+ table.report {
+ border-collapse: collapse;
+ width: 100%;
+ }
+ table.report td.heading {
+ background: #dcecff;
+ border: 1px solid #E2E6EF;
+ font-weight: bold;
+ text-align: center;
+ }
+ table.report td.heading:hover {
+ background: #c0ffc0;
+ }
+ table.report td.text {
+ border: 1px solid #E2E6EF;
+ }
+ table.report td.value {
+ text-align: right;
+ border: 1px solid #E2E6EF;
+ }
+ table.report tr.light {
+ background-color: rgb(240, 240, 245);
+ }
+ table.report tr.dark {
+ background-color: rgb(230, 230, 235);
+ }
+ </style>
+ </head>
+ <body>
+ <h3>C0 code coverage information</h3>
+ <p>Generated on ~s with <a href='http://github.com/ngerakines/etap'>etap 0.3.4</a>.
+ </p>
+ <table class='report'>
+ <thead>
+ <tr>
+ <td class='heading'>Name</td>
+ <td class='heading'>Total lines</td>
+ <td class='heading'>Lines of code</td>
+ <td class='heading'>Total coverage</td>
+ <td class='heading'>Code coverage</td>
+ </tr>
+ </thead>
+ <tbody>
+ <tr class='light'>
+
+ <td>
+ <a href='~s'>~s</a>
+ </td>
+ <td class='value'>
+ <tt>??</tt>
+ </td>
+ <td class='value'>
+ <tt>??</tt>
+ </td>
+ <td class='value'>
+ <tt>??</tt>
+ </td>
+ <td>
+ <table cellspacing='0' cellpadding='0' align='right'>
+ <tr>
+ <td><tt>~p%</tt>&nbsp;</td><td>
+ <table cellspacing='0' class='percent_graph' cellpadding='0' width='100'>
+ <tr><td class='covered' width='~p' /><td class='uncovered' width='~p' /></tr>
+ </table>
+ </td>
+ </tr>
+ </table>
+ </td>
+ </tr>
+ </tbody>
+ </table><pre>", [Module, etap:datetime({date(), time()}), atom_to_list(Module) ++ "_report.html", Module, CovPer, CovPer, UnCovPer]).
+
+%% @private
+footer() ->
+ "</pre><hr /><p>Generated using <a href='http://github.com/ngerakines/etap'>etap 0.3.4</a>.</p>
+ </body>
+ </html>
+ ".
diff --git a/1.1.x/src/etap/etap_request.erl b/1.1.x/src/etap/etap_request.erl
new file mode 100644
index 00000000..9fd23aca
--- /dev/null
+++ b/1.1.x/src/etap/etap_request.erl
@@ -0,0 +1,89 @@
+%% Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
+%%
+%% Permission is hereby granted, free of charge, to any person
+%% obtaining a copy of this software and associated documentation
+%% files (the "Software"), to deal in the Software without
+%% restriction, including without limitation the rights to use,
+%% copy, modify, merge, publish, distribute, sublicense, and/or sell
+%% copies of the Software, and to permit persons to whom the
+%% Software is furnished to do so, subject to the following
+%% conditions:
+%%
+%% The above copyright notice and this permission notice shall be
+%% included in all copies or substantial portions of the Software.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+%% OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+%% HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+%% WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+%% OTHER DEALINGS IN THE SOFTWARE.
+%%
+%% @doc Provides test functionality against a specific web request. Many of
+%% the exported methods can be used to build your own more complex tests.
+-module(etap_request, [Method, Url, InHeaders, InBody, Status, OutHeaders, OutBody]).
+
+-export([status_is/2]).
+
+-export([
+ method/0, url/0, status/0, status_code/0, status_line/0, rheaders/0,
+ has_rheader/1, rheader/1, rbody/0, header_is/3, body_is/2,
+ body_has_string/2
+]).
+
+% ---
+% Tests
+
+%% @doc Assert that response status code is the given status code.
+status_is(Code, Desc) ->
+ etap:is(status_code(), Code, Desc).
+
+header_is(Name, Value, Desc) ->
+ etap:is(rheader(Name), Value, Desc).
+
+body_is(Value, Desc) ->
+ etap:is(rbody(), Value, Desc).
+
+body_has_string(String, Desc) when is_list(OutBody), is_list(String) ->
+ etap_string:contains_ok(OutBody, String, Desc).
+
+% ---
+% Accessor functions
+
+%% @doc Access a request's method.
+method() -> Method.
+
+%% @doc Access a request's URL.
+url() -> Url.
+
+%% @doc Access a request's status.
+status() -> Status.
+
+%% @doc Access a request's status code.
+status_code() ->
+ {_, Code, _} = Status,
+ Code.
+
+%% @doc Access a request's status line.
+status_line() ->
+ {_, _, Line} = Status,
+ Line.
+
+%% @doc Access a request's headers.
+rheaders() -> OutHeaders.
+
+%% @doc Dertermine if a specific request header exists.
+has_rheader(Key) ->
+ lists:keymember(Key, 1, OutHeaders).
+
+%% @doc Return a specific request header.
+rheader(Key) ->
+ case lists:keysearch(Key, 1, OutHeaders) of
+ false -> undefined;
+ {value, {Key, Value}} -> Value
+ end.
+
+%% @doc Access the request's body.
+rbody() -> OutBody.
diff --git a/1.1.x/src/etap/etap_string.erl b/1.1.x/src/etap/etap_string.erl
new file mode 100644
index 00000000..67aa3d54
--- /dev/null
+++ b/1.1.x/src/etap/etap_string.erl
@@ -0,0 +1,47 @@
+%% Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
+%%
+%% Permission is hereby granted, free of charge, to any person
+%% obtaining a copy of this software and associated documentation
+%% files (the "Software"), to deal in the Software without
+%% restriction, including without limitation the rights to use,
+%% copy, modify, merge, publish, distribute, sublicense, and/or sell
+%% copies of the Software, and to permit persons to whom the
+%% Software is furnished to do so, subject to the following
+%% conditions:
+%%
+%% The above copyright notice and this permission notice shall be
+%% included in all copies or substantial portions of the Software.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+%% OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+%% HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+%% WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+%% OTHER DEALINGS IN THE SOFTWARE.
+%%
+%% @author Nick Gerakines <nick@gerakines.net> [http://socklabs.com/]
+%% @copyright 2008 Nick Gerakines
+%% @doc Provide testing functionality for strings.
+-module(etap_string).
+
+-export([contains_ok/3, is_before/4]).
+
+%% @spec contains_ok(string(), string(), string()) -> true | false
+%% @doc Assert that a string is contained in another string.
+contains_ok(Source, String, Desc) ->
+ etap:isnt(
+ string:str(Source, String),
+ 0,
+ Desc
+ ).
+
+%% @spec is_before(string(), string(), string(), string()) -> true | false
+%% @doc Assert that a string comes before another string within a larger body.
+is_before(Source, StringA, StringB, Desc) ->
+ etap:is_greater(
+ string:str(Source, StringB),
+ string:str(Source, StringA),
+ Desc
+ ).
diff --git a/1.1.x/src/etap/etap_web.erl b/1.1.x/src/etap/etap_web.erl
new file mode 100644
index 00000000..fb7aee16
--- /dev/null
+++ b/1.1.x/src/etap/etap_web.erl
@@ -0,0 +1,65 @@
+%% Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
+%%
+%% Permission is hereby granted, free of charge, to any person
+%% obtaining a copy of this software and associated documentation
+%% files (the "Software"), to deal in the Software without
+%% restriction, including without limitation the rights to use,
+%% copy, modify, merge, publish, distribute, sublicense, and/or sell
+%% copies of the Software, and to permit persons to whom the
+%% Software is furnished to do so, subject to the following
+%% conditions:
+%%
+%% The above copyright notice and this permission notice shall be
+%% included in all copies or substantial portions of the Software.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+%% OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+%% HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+%% WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+%% OTHER DEALINGS IN THE SOFTWARE.
+%%
+%% @author Nick Gerakines <nick@gerakines.net> [http://socklabs.com/]
+%% @copyright 2008 Nick Gerakines
+%% @todo Support cookies.
+%% @doc Provide testing functionality for web requests.
+-module(etap_web).
+
+-export([simple_200/2, simple_404/2, build_request/4]).
+
+%% @doc Fetch a url and verify that it returned a 200 status.
+simple_200(Url, Desc) ->
+ Request = build_request(get, Url, [], []),
+ Request:status_is(200, Desc).
+
+%% @doc Fetch a url and verify that it returned a 404 status.
+simple_404(Url, Desc) ->
+ Request = build_request(get, Url, [], []),
+ Request:status_is(404, Desc).
+
+%% @doc Create and return a request structure.
+build_request(Method, Url, Headers, Body)
+ when Method==options;Method==get;Method==head;Method==delete;Method==trace ->
+ try http:request(Method, {Url, Headers}, [{autoredirect, false}], []) of
+ {ok, {OutStatus, OutHeaders, OutBody}} ->
+ etap_request:new(Method, Url, Headers, Body, OutStatus, OutHeaders, OutBody);
+ _ -> error
+ catch
+ _:_ -> error
+ end;
+
+%% @doc Create and return a request structure.
+build_request(Method, Url, Headers, Body) when Method == post; Method == put ->
+ ContentType = case lists:keysearch("Content-Type", 1, Headers) of
+ {value, {"Content-Type", X}} -> X;
+ _ -> []
+ end,
+ try http:request(Method, {Url, Headers, ContentType, Body}, [{autoredirect, false}], []) of
+ {ok, {OutStatus, OutHeaders, OutBody}} ->
+ etap_request:new(Method, Url, Headers, Body, OutStatus, OutHeaders, OutBody);
+ _ -> error
+ catch
+ _:_ -> error
+ end.
diff --git a/1.1.x/src/ibrowse/Makefile.am b/1.1.x/src/ibrowse/Makefile.am
new file mode 100644
index 00000000..869bd107
--- /dev/null
+++ b/1.1.x/src/ibrowse/Makefile.am
@@ -0,0 +1,49 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+ibrowseebindir = $(localerlanglibdir)/ibrowse-2.2.0/ebin
+
+ibrowse_file_collection = \
+ ibrowse.app.in \
+ ibrowse.erl \
+ ibrowse_app.erl \
+ ibrowse_http_client.erl \
+ ibrowse_lb.erl \
+ ibrowse_lib.erl \
+ ibrowse_sup.erl \
+ ibrowse_test.erl
+
+ibrowseebin_make_generated_file_list = \
+ ibrowse.app \
+ ibrowse.beam \
+ ibrowse_app.beam \
+ ibrowse_http_client.beam \
+ ibrowse_lb.beam \
+ ibrowse_lib.beam \
+ ibrowse_sup.beam \
+ ibrowse_test.beam
+
+ibrowseebin_DATA = \
+ $(ibrowseebin_make_generated_file_list)
+
+EXTRA_DIST = \
+ $(ibrowse_file_collection) \
+ ibrowse.hrl
+
+CLEANFILES = \
+ $(ibrowseebin_make_generated_file_list)
+
+%.app: %.app.in
+ cp $< $@
+
+%.beam: %.erl
+ $(ERLC) $(ERLC_FLAGS) $<
diff --git a/1.1.x/src/ibrowse/ibrowse.app.in b/1.1.x/src/ibrowse/ibrowse.app.in
new file mode 100644
index 00000000..af46d8a5
--- /dev/null
+++ b/1.1.x/src/ibrowse/ibrowse.app.in
@@ -0,0 +1,13 @@
+{application, ibrowse,
+ [{description, "HTTP client application"},
+ {vsn, "2.2.0"},
+ {modules, [ ibrowse,
+ ibrowse_http_client,
+ ibrowse_app,
+ ibrowse_sup,
+ ibrowse_lib,
+ ibrowse_lb ]},
+ {registered, []},
+ {applications, [kernel,stdlib,sasl]},
+ {env, []},
+ {mod, {ibrowse_app, []}}]}.
diff --git a/1.1.x/src/ibrowse/ibrowse.erl b/1.1.x/src/ibrowse/ibrowse.erl
new file mode 100644
index 00000000..f70f92f1
--- /dev/null
+++ b/1.1.x/src/ibrowse/ibrowse.erl
@@ -0,0 +1,863 @@
+%%%-------------------------------------------------------------------
+%%% File : ibrowse.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description : Load balancer process for HTTP client connections.
+%%%
+%%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+%% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
+%% @copyright 2005-2011 Chandrashekhar Mullaparthi
+%% @version 2.1.3
+%% @doc The ibrowse application implements an HTTP 1.1 client in erlang. This
+%% module implements the API of the HTTP client. There is one named
+%% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
+%% one process to handle one TCP connection to a webserver
+%% (implemented in the module ibrowse_http_client). Multiple connections to a
+%% webserver are setup based on the settings for each webserver. The
+%% ibrowse process also determines which connection to pipeline a
+%% certain request on. The functions to call are send_req/3,
+%% send_req/4, send_req/5, send_req/6.
+%%
+%% <p>Here are a few sample invocations.</p>
+%%
+%% <code>
+%% ibrowse:send_req("http://intranet/messenger/", [], get).
+%% <br/><br/>
+%%
+%% ibrowse:send_req("http://www.google.com/", [], get, [],
+%% [{proxy_user, "XXXXX"},
+%% {proxy_password, "XXXXX"},
+%% {proxy_host, "proxy"},
+%% {proxy_port, 8080}], 1000).
+%% <br/><br/>
+%%
+%%ibrowse:send_req("http://www.erlang.org/download/otp_src_R10B-3.tar.gz", [], get, [],
+%% [{proxy_user, "XXXXX"},
+%% {proxy_password, "XXXXX"},
+%% {proxy_host, "proxy"},
+%% {proxy_port, 8080},
+%% {save_response_to_file, true}], 1000).
+%% <br/><br/>
+%%
+%% ibrowse:send_req("http://www.erlang.org", [], head).
+%%
+%% <br/><br/>
+%% ibrowse:send_req("http://www.sun.com", [], options).
+%%
+%% <br/><br/>
+%% ibrowse:send_req("http://www.bbc.co.uk", [], trace).
+%%
+%% <br/><br/>
+%% ibrowse:send_req("http://www.google.com", [], get, [],
+%% [{stream_to, self()}]).
+%% </code>
+%%
+
+-module(ibrowse).
+-behaviour(gen_server).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+-export([start_link/0, start/0, stop/0]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% API interface
+-export([
+ rescan_config/0,
+ rescan_config/1,
+ get_config_value/1,
+ get_config_value/2,
+ spawn_worker_process/1,
+ spawn_worker_process/2,
+ spawn_link_worker_process/1,
+ spawn_link_worker_process/2,
+ stop_worker_process/1,
+ send_req/3,
+ send_req/4,
+ send_req/5,
+ send_req/6,
+ send_req_direct/4,
+ send_req_direct/5,
+ send_req_direct/6,
+ send_req_direct/7,
+ stream_next/1,
+ stream_close/1,
+ set_max_sessions/3,
+ set_max_pipeline_size/3,
+ set_dest/3,
+ trace_on/0,
+ trace_off/0,
+ trace_on/2,
+ trace_off/2,
+ all_trace_off/0,
+ show_dest_status/0,
+ show_dest_status/2
+ ]).
+
+-ifdef(debug).
+-compile(export_all).
+-endif.
+
+-import(ibrowse_lib, [
+ parse_url/1,
+ get_value/3,
+ do_trace/2
+ ]).
+
+-record(state, {trace = false}).
+
+-include("ibrowse.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+
+-define(DEF_MAX_SESSIONS,10).
+-define(DEF_MAX_PIPELINE_SIZE,10).
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link/0
+%% Description: Starts the server
+%%--------------------------------------------------------------------
+%% @doc Starts the ibrowse process linked to the calling process. Usually invoked by the supervisor ibrowse_sup
+%% @spec start_link() -> {ok, pid()}
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+%% @doc Starts the ibrowse process without linking. Useful when testing using the shell
+start() ->
+ gen_server:start({local, ?MODULE}, ?MODULE, [], [{debug, []}]).
+
+%% @doc Stop the ibrowse process. Useful when testing using the shell.
+stop() ->
+ catch gen_server:call(ibrowse, stop).
+
+%% @doc This is the basic function to send a HTTP request.
+%% The Status return value indicates the HTTP status code returned by the webserver
+%% @spec send_req(Url::string(), Headers::headerList(), Method::method()) -> response()
+%% headerList() = [{header(), value()}]
+%% header() = atom() | string()
+%% value() = term()
+%% method() = get | post | head | options | put | delete | trace | mkcol | propfind | proppatch | lock | unlock | move | copy
+%% Status = string()
+%% ResponseHeaders = [respHeader()]
+%% respHeader() = {headerName(), headerValue()}
+%% headerName() = string()
+%% headerValue() = string()
+%% response() = {ok, Status, ResponseHeaders, ResponseBody} | {ibrowse_req_id, req_id() } | {error, Reason}
+%% req_id() = term()
+%% ResponseBody = string() | {file, Filename}
+%% Reason = term()
+send_req(Url, Headers, Method) ->
+ send_req(Url, Headers, Method, [], []).
+
+%% @doc Same as send_req/3.
+%% If a list is specified for the body it has to be a flat list. The body can also be a fun/0 or a fun/1. <br/>
+%% If fun/0, the connection handling process will repeatdely call the fun until it returns an error or eof. <pre>Fun() = {ok, Data} | eof</pre><br/>
+%% If fun/1, the connection handling process will repeatedly call the fun with the supplied state until it returns an error or eof. <pre>Fun(State) = {ok, Data} | {ok, Data, NewState} | eof</pre>
+%% @spec send_req(Url, Headers, Method::method(), Body::body()) -> response()
+%% body() = [] | string() | binary() | fun_arity_0() | {fun_arity_1(), initial_state()}
+%% initial_state() = term()
+send_req(Url, Headers, Method, Body) ->
+ send_req(Url, Headers, Method, Body, []).
+
+%% @doc Same as send_req/4.
+%% For a description of SSL Options, look in the <a href="http://www.erlang.org/doc/apps/ssl/index.html">ssl</a> manpage. If the
+%% HTTP Version to use is not specified, the default is 1.1.
+%% <br/>
+%% <ul>
+%% <li>The <code>host_header</code> option is useful in the case where ibrowse is
+%% connecting to a component such as <a
+%% href="http://www.stunnel.org">stunnel</a> which then sets up a
+%% secure connection to a webserver. In this case, the URL supplied to
+%% ibrowse must have the stunnel host/port details, but that won't
+%% make sense to the destination webserver. This option can then be
+%% used to specify what should go in the <code>Host</code> header in
+%% the request.</li>
+%% <li>The <code>stream_to</code> option can be used to have the HTTP
+%% response streamed to a process as messages as data arrives on the
+%% socket. If the calling process wishes to control the rate at which
+%% data is received from the server, the option <code>{stream_to,
+%% {process(), once}}</code> can be specified. The calling process
+%% will have to invoke <code>ibrowse:stream_next(Request_id)</code> to
+%% receive the next packet.</li>
+%%
+%% <li>When both the options <code>save_response_to_file</code> and <code>stream_to</code>
+%% are specified, the former takes precedence.</li>
+%%
+%% <li>For the <code>save_response_to_file</code> option, the response body is saved to
+%% file only if the status code is in the 200-299 range. If not, the response body is returned
+%% as a string.</li>
+%% <li>Whenever an error occurs in the processing of a request, ibrowse will return as much
+%% information as it has, such as HTTP Status Code and HTTP Headers. When this happens, the response
+%% is of the form <code>{error, {Reason, {stat_code, StatusCode}, HTTP_headers}}</code></li>
+%%
+%% <li>The <code>inactivity_timeout</code> option is useful when
+%% dealing with large response bodies and/or slow links. In these
+%% cases, it might be hard to estimate how long a request will take to
+%% complete. In such cases, the client might want to timeout if no
+%% data has been received on the link for a certain time interval.
+%%
+%% This value is also used to close connections which are not in use for
+%% the specified timeout value.
+%% </li>
+%%
+%% <li>
+%% The <code>connect_timeout</code> option is to specify how long the
+%% client process should wait for connection establishment. This is
+%% useful in scenarios where connections to servers are usually setup
+%% very fast, but responses might take much longer compared to
+%% connection setup. In such cases, it is better for the calling
+%% process to timeout faster if there is a problem (DNS lookup
+%% delays/failures, network routing issues, etc). The total timeout
+%% value specified for the request will enforced. To illustrate using
+%% an example:
+%% <code>
+%% ibrowse:send_req("http://www.example.com/cgi-bin/request", [], get, [], [{connect_timeout, 100}], 1000).
+%% </code>
+%% In the above invocation, if the connection isn't established within
+%% 100 milliseconds, the request will fail with
+%% <code>{error, conn_failed}</code>.<br/>
+%% If connection setup succeeds, the total time allowed for the
+%% request to complete will be 1000 milliseconds minus the time taken
+%% for connection setup.
+%% </li>
+%%
+%% <li> The <code>socket_options</code> option can be used to set
+%% specific options on the socket. The <code>{active, true | false | once}</code>
+%% and <code>{packet_type, Packet_type}</code> will be filtered out by ibrowse. </li>
+%%
+%% <li> The <code>headers_as_is</code> option is to enable the caller
+%% to send headers exactly as specified in the request without ibrowse
+%% adding some of its own. Required for some picky servers apparently. </li>
+%%
+%% <li>The <code>give_raw_headers</code> option is to enable the
+%% caller to get access to the raw status line and raw unparsed
+%% headers. Not quite sure why someone would want this, but one of my
+%% users asked for it, so here it is. </li>
+%%
+%% <li> The <code>preserve_chunked_encoding</code> option enables the caller
+%% to receive the raw data stream when the Transfer-Encoding of the server
+%% response is Chunked.
+%% </li>
+%% </ul>
+%%
+%% @spec send_req(Url::string(), Headers::headerList(), Method::method(), Body::body(), Options::optionList()) -> response()
+%% optionList() = [option()]
+%% option() = {max_sessions, integer()} |
+%% {response_format,response_format()}|
+%% {stream_chunk_size, integer()} |
+%% {max_pipeline_size, integer()} |
+%% {trace, boolean()} |
+%% {is_ssl, boolean()} |
+%% {ssl_options, [SSLOpt]} |
+%% {pool_name, atom()} |
+%% {proxy_host, string()} |
+%% {proxy_port, integer()} |
+%% {proxy_user, string()} |
+%% {proxy_password, string()} |
+%% {use_absolute_uri, boolean()} |
+%% {basic_auth, {username(), password()}} |
+%% {cookie, string()} |
+%% {content_length, integer()} |
+%% {content_type, string()} |
+%% {save_response_to_file, srtf()} |
+%% {stream_to, stream_to()} |
+%% {http_vsn, {MajorVsn, MinorVsn}} |
+%% {host_header, string()} |
+%% {inactivity_timeout, integer()} |
+%% {connect_timeout, integer()} |
+%% {socket_options, Sock_opts} |
+%% {transfer_encoding, {chunked, ChunkSize}} |
+%% {headers_as_is, boolean()} |
+%% {give_raw_headers, boolean()} |
+%% {preserve_chunked_encoding,boolean()}
+%%
+%% stream_to() = process() | {process(), once}
+%% process() = pid() | atom()
+%% username() = string()
+%% password() = string()
+%% SSLOpt = term()
+%% Sock_opts = [Sock_opt]
+%% Sock_opt = term()
+%% ChunkSize = integer()
+%% srtf() = boolean() | filename()
+%% filename() = string()
+%% response_format() = list | binary
+send_req(Url, Headers, Method, Body, Options) ->
+ send_req(Url, Headers, Method, Body, Options, 30000).
+
+%% @doc Same as send_req/5.
+%% All timeout values are in milliseconds.
+%% @spec send_req(Url, Headers::headerList(), Method::method(), Body::body(), Options::optionList(), Timeout) -> response()
+%% Timeout = integer() | infinity
+send_req(Url, Headers, Method, Body, Options, Timeout) ->
+ case catch parse_url(Url) of
+ #url{host = Host,
+ port = Port,
+ protocol = Protocol} = Parsed_url ->
+ Lb_pid = case ets:lookup(ibrowse_lb, {Host, Port}) of
+ [] ->
+ get_lb_pid(Parsed_url);
+ [#lb_pid{pid = Lb_pid_1}] ->
+ Lb_pid_1
+ end,
+ Max_sessions = get_max_sessions(Host, Port, Options),
+ Max_pipeline_size = get_max_pipeline_size(Host, Port, Options),
+ Options_1 = merge_options(Host, Port, Options),
+ {SSLOptions, IsSSL} =
+ case (Protocol == https) orelse
+ get_value(is_ssl, Options_1, false) of
+ false -> {[], false};
+ true -> {get_value(ssl_options, Options_1, []), true}
+ end,
+ try_routing_request(Lb_pid, Parsed_url,
+ Max_sessions,
+ Max_pipeline_size,
+ {SSLOptions, IsSSL},
+ Headers, Method, Body, Options_1, Timeout, 0);
+ Err ->
+ {error, {url_parsing_failed, Err}}
+ end.
+
+try_routing_request(Lb_pid, Parsed_url,
+ Max_sessions,
+ Max_pipeline_size,
+ {SSLOptions, IsSSL},
+ Headers, Method, Body, Options_1, Timeout, Try_count) when Try_count < 3 ->
+ case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
+ Max_sessions,
+ Max_pipeline_size,
+ {SSLOptions, IsSSL}) of
+ {ok, Conn_Pid} ->
+ case do_send_req(Conn_Pid, Parsed_url, Headers,
+ Method, Body, Options_1, Timeout) of
+ {error, sel_conn_closed} ->
+ try_routing_request(Lb_pid, Parsed_url,
+ Max_sessions,
+ Max_pipeline_size,
+ {SSLOptions, IsSSL},
+ Headers, Method, Body, Options_1, Timeout, Try_count + 1);
+ Res ->
+ Res
+ end;
+ Err ->
+ Err
+ end;
+try_routing_request(_, _, _, _, _, _, _, _, _, _, _) ->
+ {error, retry_later}.
+
+merge_options(Host, Port, Options) ->
+ Config_options = get_config_value({options, Host, Port}, []),
+ lists:foldl(
+ fun({Key, Val}, Acc) ->
+ case lists:keysearch(Key, 1, Options) of
+ false ->
+ [{Key, Val} | Acc];
+ _ ->
+ Acc
+ end
+ end, Options, Config_options).
+
+get_lb_pid(Url) ->
+ gen_server:call(?MODULE, {get_lb_pid, Url}).
+
+get_max_sessions(Host, Port, Options) ->
+ get_value(max_sessions, Options,
+ get_config_value({max_sessions, Host, Port},
+ default_max_sessions())).
+
+get_max_pipeline_size(Host, Port, Options) ->
+ get_value(max_pipeline_size, Options,
+ get_config_value({max_pipeline_size, Host, Port},
+ default_max_pipeline_size())).
+
+default_max_sessions() ->
+ safe_get_env(ibrowse, default_max_sessions, ?DEF_MAX_SESSIONS).
+
+default_max_pipeline_size() ->
+ safe_get_env(ibrowse, default_max_pipeline_size, ?DEF_MAX_PIPELINE_SIZE).
+
+safe_get_env(App, Key, Def_val) ->
+ case application:get_env(App, Key) of
+ undefined ->
+ Def_val;
+ {ok, Val} ->
+ Val
+ end.
+
+%% @doc Deprecated. Use set_max_sessions/3 and set_max_pipeline_size/3
+%% for achieving the same effect.
+set_dest(Host, Port, [{max_sessions, Max} | T]) ->
+ set_max_sessions(Host, Port, Max),
+ set_dest(Host, Port, T);
+set_dest(Host, Port, [{max_pipeline_size, Max} | T]) ->
+ set_max_pipeline_size(Host, Port, Max),
+ set_dest(Host, Port, T);
+set_dest(Host, Port, [{trace, Bool} | T]) when Bool == true; Bool == false ->
+ ibrowse ! {trace, true, Host, Port},
+ set_dest(Host, Port, T);
+set_dest(_Host, _Port, [H | _]) ->
+ exit({invalid_option, H});
+set_dest(_, _, []) ->
+ ok.
+
+%% @doc Set the maximum number of connections allowed to a specific Host:Port.
+%% @spec set_max_sessions(Host::string(), Port::integer(), Max::integer()) -> ok
+set_max_sessions(Host, Port, Max) when is_integer(Max), Max > 0 ->
+ gen_server:call(?MODULE, {set_config_value, {max_sessions, Host, Port}, Max}).
+
+%% @doc Set the maximum pipeline size for each connection to a specific Host:Port.
+%% @spec set_max_pipeline_size(Host::string(), Port::integer(), Max::integer()) -> ok
+set_max_pipeline_size(Host, Port, Max) when is_integer(Max), Max > 0 ->
+ gen_server:call(?MODULE, {set_config_value, {max_pipeline_size, Host, Port}, Max}).
+
+do_send_req(Conn_Pid, Parsed_url, Headers, Method, Body, Options, Timeout) ->
+ case catch ibrowse_http_client:send_req(Conn_Pid, Parsed_url,
+ Headers, Method, ensure_bin(Body),
+ Options, Timeout) of
+ {'EXIT', {timeout, _}} ->
+ {error, req_timedout};
+ {'EXIT', {noproc, {gen_server, call, [Conn_Pid, _, _]}}} ->
+ {error, sel_conn_closed};
+ {error, connection_closed} ->
+ {error, sel_conn_closed};
+ {'EXIT', Reason} ->
+ {error, {'EXIT', Reason}};
+ {ok, St_code, Headers, Body} = Ret when is_binary(Body) ->
+ case get_value(response_format, Options, list) of
+ list ->
+ {ok, St_code, Headers, binary_to_list(Body)};
+ binary ->
+ Ret
+ end;
+ Ret ->
+ Ret
+ end.
+
+ensure_bin(L) when is_list(L) -> list_to_binary(L);
+ensure_bin(B) when is_binary(B) -> B;
+ensure_bin(Fun) when is_function(Fun) -> Fun;
+ensure_bin({Fun}) when is_function(Fun) -> Fun;
+ensure_bin({Fun, _} = Body) when is_function(Fun) -> Body.
+
+%% @doc Creates a HTTP client process to the specified Host:Port which
+%% is not part of the load balancing pool. This is useful in cases
+%% where some requests to a webserver might take a long time whereas
+%% some might take a very short time. To avoid getting these quick
+%% requests stuck in the pipeline behind time consuming requests, use
+%% this function to get a handle to a connection process. <br/>
+%% <b>Note:</b> Calling this function only creates a worker process. No connection
+%% is setup. The connection attempt is made only when the first
+%% request is sent via any of the send_req_direct/4,5,6,7 functions.<br/>
+%% <b>Note:</b> It is the responsibility of the calling process to control
+%% pipeline size on such connections.
+%%
+%% @spec spawn_worker_process(Url::string()) -> {ok, pid()}
+spawn_worker_process(Url) ->
+ ibrowse_http_client:start(Url).
+
+%% @doc Same as spawn_worker_process/1 but takes as input a Host and Port
+%% instead of a URL.
+%% @spec spawn_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
+spawn_worker_process(Host, Port) ->
+ ibrowse_http_client:start({Host, Port}).
+
+%% @doc Same as spawn_worker_process/1 except the the calling process
+%% is linked to the worker process which is spawned.
+%% @spec spawn_link_worker_process(Url::string()) -> {ok, pid()}
+spawn_link_worker_process(Url) ->
+ ibrowse_http_client:start_link(Url).
+
+%% @doc Same as spawn_worker_process/2 except the the calling process
+%% is linked to the worker process which is spawned.
+%% @spec spawn_link_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
+spawn_link_worker_process(Host, Port) ->
+ ibrowse_http_client:start_link({Host, Port}).
+
+%% @doc Terminate a worker process spawned using
+%% spawn_worker_process/2 or spawn_link_worker_process/2. Requests in
+%% progress will get the error response <pre>{error, closing_on_request}</pre>
+%% @spec stop_worker_process(Conn_pid::pid()) -> ok
+stop_worker_process(Conn_pid) ->
+ ibrowse_http_client:stop(Conn_pid).
+
+%% @doc Same as send_req/3 except that the first argument is the PID
+%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
+send_req_direct(Conn_pid, Url, Headers, Method) ->
+ send_req_direct(Conn_pid, Url, Headers, Method, [], []).
+
+%% @doc Same as send_req/4 except that the first argument is the PID
+%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
+send_req_direct(Conn_pid, Url, Headers, Method, Body) ->
+ send_req_direct(Conn_pid, Url, Headers, Method, Body, []).
+
+%% @doc Same as send_req/5 except that the first argument is the PID
+%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
+send_req_direct(Conn_pid, Url, Headers, Method, Body, Options) ->
+ send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, 30000).
+
+%% @doc Same as send_req/6 except that the first argument is the PID
+%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
+send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, Timeout) ->
+ case catch parse_url(Url) of
+ #url{host = Host,
+ port = Port} = Parsed_url ->
+ Options_1 = merge_options(Host, Port, Options),
+ case do_send_req(Conn_pid, Parsed_url, Headers, Method, Body, Options_1, Timeout) of
+ {error, {'EXIT', {noproc, _}}} ->
+ {error, worker_is_dead};
+ Ret ->
+ Ret
+ end;
+ Err ->
+ {error, {url_parsing_failed, Err}}
+ end.
+
+%% @doc Tell ibrowse to stream the next chunk of data to the
+%% caller. Should be used in conjunction with the
+%% <code>stream_to</code> option
+%% @spec stream_next(Req_id :: req_id()) -> ok | {error, unknown_req_id}
+stream_next(Req_id) ->
+ case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of
+ [] ->
+ {error, unknown_req_id};
+ [{_, Pid}] ->
+ catch Pid ! {stream_next, Req_id},
+ ok
+ end.
+
+%% @doc Tell ibrowse to close the connection associated with the
+%% specified stream. Should be used in conjunction with the
+%% <code>stream_to</code> option. Note that all requests in progress on
+%% the connection which is serving this Req_id will be aborted, and an
+%% error returned.
+%% @spec stream_close(Req_id :: req_id()) -> ok | {error, unknown_req_id}
+stream_close(Req_id) ->
+ case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of
+ [] ->
+ {error, unknown_req_id};
+ [{_, Pid}] ->
+ catch Pid ! {stream_close, Req_id},
+ ok
+ end.
+
+%% @doc Turn tracing on for the ibrowse process
+trace_on() ->
+ ibrowse ! {trace, true}.
+%% @doc Turn tracing off for the ibrowse process
+trace_off() ->
+ ibrowse ! {trace, false}.
+
+%% @doc Turn tracing on for all connections to the specified HTTP
+%% server. Host is whatever is specified as the domain name in the URL
+%% @spec trace_on(Host, Port) -> ok
+%% Host = string()
+%% Port = integer()
+trace_on(Host, Port) ->
+ ibrowse ! {trace, true, Host, Port},
+ ok.
+
+%% @doc Turn tracing OFF for all connections to the specified HTTP
+%% server.
+%% @spec trace_off(Host, Port) -> ok
+trace_off(Host, Port) ->
+ ibrowse ! {trace, false, Host, Port},
+ ok.
+
+%% @doc Turn Off ALL tracing
+%% @spec all_trace_off() -> ok
+all_trace_off() ->
+ ibrowse ! all_trace_off,
+ ok.
+
+%% @doc Shows some internal information about load balancing. Info
+%% about workers spawned using spawn_worker_process/2 or
+%% spawn_link_worker_process/2 is not included.
+show_dest_status() ->
+ Dests = lists:filter(fun({lb_pid, {Host, Port}, _}) when is_list(Host),
+ is_integer(Port) ->
+ true;
+ (_) ->
+ false
+ end, ets:tab2list(ibrowse_lb)),
+ All_ets = ets:all(),
+ io:format("~-40.40s | ~-5.5s | ~-10.10s | ~s~n",
+ ["Server:port", "ETS", "Num conns", "LB Pid"]),
+ io:format("~80.80.=s~n", [""]),
+ lists:foreach(fun({lb_pid, {Host, Port}, Lb_pid}) ->
+ case lists:dropwhile(
+ fun(Tid) ->
+ ets:info(Tid, owner) /= Lb_pid
+ end, All_ets) of
+ [] ->
+ io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
+ [Host ++ ":" ++ integer_to_list(Port),
+ "",
+ "",
+ io_lib:format("~p", [Lb_pid])]
+ );
+ [Tid | _] ->
+ catch (
+ begin
+ Size = ets:info(Tid, size),
+ io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
+ [Host ++ ":" ++ integer_to_list(Port),
+ io_lib:format("~p", [Tid]),
+ integer_to_list(Size),
+ io_lib:format("~p", [Lb_pid])]
+ )
+ end
+ )
+ end
+ end, Dests).
+
+%% @doc Shows some internal information about load balancing to a
+%% specified Host:Port. Info about workers spawned using
+%% spawn_worker_process/2 or spawn_link_worker_process/2 is not
+%% included.
+show_dest_status(Host, Port) ->
+ case ets:lookup(ibrowse_lb, {Host, Port}) of
+ [] ->
+ no_active_processes;
+ [#lb_pid{pid = Lb_pid}] ->
+ io:format("Load Balancer Pid : ~p~n", [Lb_pid]),
+ io:format("LB process msg q size : ~p~n", [(catch process_info(Lb_pid, message_queue_len))]),
+ case lists:dropwhile(
+ fun(Tid) ->
+ ets:info(Tid, owner) /= Lb_pid
+ end, ets:all()) of
+ [] ->
+ io:format("Couldn't locate ETS table for ~p~n", [Lb_pid]);
+ [Tid | _] ->
+ First = ets:first(Tid),
+ Last = ets:last(Tid),
+ Size = ets:info(Tid, size),
+ io:format("LB ETS table id : ~p~n", [Tid]),
+ io:format("Num Connections : ~p~n", [Size]),
+ case Size of
+ 0 ->
+ ok;
+ _ ->
+ {First_p_sz, _} = First,
+ {Last_p_sz, _} = Last,
+ io:format("Smallest pipeline : ~1000.p~n", [First_p_sz]),
+ io:format("Largest pipeline : ~1000.p~n", [Last_p_sz])
+ end
+ end
+ end.
+
+%% @doc Clear current configuration for ibrowse and load from the file
+%% ibrowse.conf in the IBROWSE_EBIN/../priv directory. Current
+%% configuration is cleared only if the ibrowse.conf file is readable
+%% using file:consult/1
+rescan_config() ->
+ gen_server:call(?MODULE, rescan_config).
+
+%% Clear current configuration for ibrowse and load from the specified
+%% file. Current configuration is cleared only if the specified
+%% file is readable using file:consult/1
+rescan_config(File) when is_list(File) ->
+ gen_server:call(?MODULE, {rescan_config, File}).
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: init/1
+%% Description: Initiates the server
+%% Returns: {ok, State} |
+%% {ok, State, Timeout} |
+%% ignore |
+%% {stop, Reason}
+%%--------------------------------------------------------------------
+init(_) ->
+ process_flag(trap_exit, true),
+ State = #state{},
+ put(my_trace_flag, State#state.trace),
+ put(ibrowse_trace_token, "ibrowse"),
+ ibrowse_lb = ets:new(ibrowse_lb, [named_table, public, {keypos, 2}]),
+ ibrowse_conf = ets:new(ibrowse_conf, [named_table, protected, {keypos, 2}]),
+ ibrowse_stream = ets:new(ibrowse_stream, [named_table, public]),
+ import_config(),
+ {ok, #state{}}.
+
+import_config() ->
+ case code:priv_dir(ibrowse) of
+ {error, _} ->
+ ok;
+ PrivDir ->
+ Filename = filename:join(PrivDir, "ibrowse.conf"),
+ import_config(Filename)
+ end.
+
+import_config(Filename) ->
+ case file:consult(Filename) of
+ {ok, Terms} ->
+ ets:delete_all_objects(ibrowse_conf),
+ Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options})
+ when is_list(Host), is_integer(Port),
+ is_integer(MaxSess), MaxSess > 0,
+ is_integer(MaxPipe), MaxPipe > 0, is_list(Options) ->
+ I = [{{max_sessions, Host, Port}, MaxSess},
+ {{max_pipeline_size, Host, Port}, MaxPipe},
+ {{options, Host, Port}, Options}],
+ lists:foreach(
+ fun({X, Y}) ->
+ ets:insert(ibrowse_conf,
+ #ibrowse_conf{key = X,
+ value = Y})
+ end, I);
+ ({K, V}) ->
+ ets:insert(ibrowse_conf,
+ #ibrowse_conf{key = K,
+ value = V});
+ (X) ->
+ io:format("Skipping unrecognised term: ~p~n", [X])
+ end,
+ lists:foreach(Fun, Terms);
+ _Err ->
+ ok
+ end.
+
+%% @doc Internal export
+get_config_value(Key) ->
+ [#ibrowse_conf{value = V}] = ets:lookup(ibrowse_conf, Key),
+ V.
+
+%% @doc Internal export
+get_config_value(Key, DefVal) ->
+ case ets:lookup(ibrowse_conf, Key) of
+ [] ->
+ DefVal;
+ [#ibrowse_conf{value = V}] ->
+ V
+ end.
+
+set_config_value(Key, Val) ->
+ ets:insert(ibrowse_conf, #ibrowse_conf{key = Key, value = Val}).
+%%--------------------------------------------------------------------
+%% Function: handle_call/3
+%% Description: Handling call messages
+%% Returns: {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} | (terminate/2 is called)
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_call({get_lb_pid, #url{host = Host, port = Port} = Url}, _From, State) ->
+ Pid = do_get_connection(Url, ets:lookup(ibrowse_lb, {Host, Port})),
+ {reply, Pid, State};
+
+handle_call(stop, _From, State) ->
+ do_trace("IBROWSE shutting down~n", []),
+ ets:foldl(fun(#lb_pid{pid = Pid}, Acc) ->
+ ibrowse_lb:stop(Pid),
+ Acc
+ end, [], ibrowse_lb),
+ {stop, normal, ok, State};
+
+handle_call({set_config_value, Key, Val}, _From, State) ->
+ set_config_value(Key, Val),
+ {reply, ok, State};
+
+handle_call(rescan_config, _From, State) ->
+ Ret = (catch import_config()),
+ {reply, Ret, State};
+
+handle_call({rescan_config, File}, _From, State) ->
+ Ret = (catch import_config(File)),
+ {reply, Ret, State};
+
+handle_call(Request, _From, State) ->
+ Reply = {unknown_request, Request},
+ {reply, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast/2
+%% Description: Handling cast messages
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info/2
+%% Description: Handling all non call/cast messages
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_info(all_trace_off, State) ->
+ Mspec = [{{ibrowse_conf,{trace,'$1','$2'},true},[],[{{'$1','$2'}}]}],
+ Trace_on_dests = ets:select(ibrowse_conf, Mspec),
+ Fun = fun(#lb_pid{host_port = {H, P}, pid = Pid}, _) ->
+ case lists:member({H, P}, Trace_on_dests) of
+ false ->
+ ok;
+ true ->
+ catch Pid ! {trace, false}
+ end;
+ (_, Acc) ->
+ Acc
+ end,
+ ets:foldl(Fun, undefined, ibrowse_lb),
+ ets:select_delete(ibrowse_conf, [{{ibrowse_conf,{trace,'$1','$2'},true},[],['true']}]),
+ {noreply, State};
+
+handle_info({trace, Bool}, State) ->
+ put(my_trace_flag, Bool),
+ {noreply, State};
+
+handle_info({trace, Bool, Host, Port}, State) ->
+ Fun = fun(#lb_pid{host_port = {H, P}, pid = Pid}, _)
+ when H == Host,
+ P == Port ->
+ catch Pid ! {trace, Bool};
+ (_, Acc) ->
+ Acc
+ end,
+ ets:foldl(Fun, undefined, ibrowse_lb),
+ ets:insert(ibrowse_conf, #ibrowse_conf{key = {trace, Host, Port},
+ value = Bool}),
+ {noreply, State};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: terminate/2
+%% Description: Shutdown the server
+%% Returns: any (ignored by gen_server)
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Func: code_change/3
+%% Purpose: Convert process state when code is changed
+%% Returns: {ok, NewState}
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+do_get_connection(#url{host = Host, port = Port}, []) ->
+ {ok, Pid} = ibrowse_lb:start_link([Host, Port]),
+ ets:insert(ibrowse_lb, #lb_pid{host_port = {Host, Port}, pid = Pid}),
+ Pid;
+do_get_connection(_Url, [#lb_pid{pid = Pid}]) ->
+ Pid.
diff --git a/1.1.x/src/ibrowse/ibrowse.hrl b/1.1.x/src/ibrowse/ibrowse.hrl
new file mode 100644
index 00000000..18dde827
--- /dev/null
+++ b/1.1.x/src/ibrowse/ibrowse.hrl
@@ -0,0 +1,21 @@
+-ifndef(IBROWSE_HRL).
+-define(IBROWSE_HRL, "ibrowse.hrl").
+
+-record(url, {
+ abspath,
+ host,
+ port,
+ username,
+ password,
+ path,
+ protocol,
+ host_type % 'hostname', 'ipv4_address' or 'ipv6_address'
+}).
+
+-record(lb_pid, {host_port, pid}).
+
+-record(client_conn, {key, cur_pipeline_size = 0, reqs_served = 0}).
+
+-record(ibrowse_conf, {key, value}).
+
+-endif.
diff --git a/1.1.x/src/ibrowse/ibrowse_app.erl b/1.1.x/src/ibrowse/ibrowse_app.erl
new file mode 100644
index 00000000..d3a0f7bb
--- /dev/null
+++ b/1.1.x/src/ibrowse/ibrowse_app.erl
@@ -0,0 +1,63 @@
+%%%-------------------------------------------------------------------
+%%% File : ibrowse_app.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description :
+%%%
+%%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+-module(ibrowse_app).
+
+-behaviour(application).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+%%--------------------------------------------------------------------
+-export([
+ start/2,
+ stop/1
+ ]).
+
+%%--------------------------------------------------------------------
+%% Internal exports
+%%--------------------------------------------------------------------
+-export([
+ ]).
+
+%%--------------------------------------------------------------------
+%% Macros
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% Records
+%%--------------------------------------------------------------------
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Func: start/2
+%% Returns: {ok, Pid} |
+%% {ok, Pid, State} |
+%% {error, Reason}
+%%--------------------------------------------------------------------
+start(_Type, _StartArgs) ->
+ case ibrowse_sup:start_link() of
+ {ok, Pid} ->
+ {ok, Pid};
+ Error ->
+ Error
+ end.
+
+%%--------------------------------------------------------------------
+%% Func: stop/1
+%% Returns: any
+%%--------------------------------------------------------------------
+stop(_State) ->
+ ok.
+
+%%====================================================================
+%% Internal functions
+%%====================================================================
diff --git a/1.1.x/src/ibrowse/ibrowse_http_client.erl b/1.1.x/src/ibrowse/ibrowse_http_client.erl
new file mode 100644
index 00000000..eb2bf315
--- /dev/null
+++ b/1.1.x/src/ibrowse/ibrowse_http_client.erl
@@ -0,0 +1,1855 @@
+%%%-------------------------------------------------------------------
+%%% File : ibrowse_http_client.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description : The name says it all
+%%%
+%%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+-module(ibrowse_http_client).
+-behaviour(gen_server).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+-export([
+ start_link/1,
+ start/1,
+ stop/1,
+ send_req/7
+ ]).
+
+-ifdef(debug).
+-compile(export_all).
+-endif.
+
+%% gen_server callbacks
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+ ]).
+
+-include("ibrowse.hrl").
+-include_lib("kernel/include/inet.hrl").
+
+-record(state, {host, port, connect_timeout,
+ inactivity_timer_ref,
+ use_proxy = false, proxy_auth_digest,
+ ssl_options = [], is_ssl = false, socket,
+ proxy_tunnel_setup = false,
+ tunnel_setup_queue = [],
+ reqs=queue:new(), cur_req, status=idle, http_status_code,
+ reply_buffer = <<>>, rep_buf_size=0, streamed_size = 0,
+ recvd_headers=[],
+ status_line, raw_headers,
+ is_closing, send_timer, content_length,
+ deleted_crlf = false, transfer_encoding,
+ chunk_size, chunk_size_buffer = <<>>,
+ recvd_chunk_size, interim_reply_sent = false,
+ lb_ets_tid, cur_pipeline_size = 0, prev_req_id
+ }).
+
+-record(request, {url, method, options, from,
+ stream_to, caller_controls_socket = false,
+ caller_socket_options = [],
+ req_id,
+ stream_chunk_size,
+ save_response_to_file = false,
+ tmp_file_name, tmp_file_fd, preserve_chunked_encoding,
+ response_format}).
+
+-import(ibrowse_lib, [
+ get_value/2,
+ get_value/3,
+ do_trace/2
+ ]).
+
+-define(DEFAULT_STREAM_CHUNK_SIZE, 1024*1024).
+-define(dec2hex(X), erlang:integer_to_list(X, 16)).
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link/0
+%% Description: Starts the server
+%%--------------------------------------------------------------------
+start(Args) ->
+ gen_server:start(?MODULE, Args, []).
+
+start_link(Args) ->
+ gen_server:start_link(?MODULE, Args, []).
+
+stop(Conn_pid) ->
+ case catch gen_server:call(Conn_pid, stop) of
+ {'EXIT', {timeout, _}} ->
+ exit(Conn_pid, kill),
+ ok;
+ _ ->
+ ok
+ end.
+
+send_req(Conn_Pid, Url, Headers, Method, Body, Options, Timeout) ->
+ gen_server:call(
+ Conn_Pid,
+ {send_req, {Url, Headers, Method, Body, Options, Timeout}}, Timeout).
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: init/1
+%% Description: Initiates the server
+%% Returns: {ok, State} |
+%% {ok, State, Timeout} |
+%% ignore |
+%% {stop, Reason}
+%%--------------------------------------------------------------------
+init({Lb_Tid, #url{host = Host, port = Port}, {SSLOptions, Is_ssl}}) ->
+ State = #state{host = Host,
+ port = Port,
+ ssl_options = SSLOptions,
+ is_ssl = Is_ssl,
+ lb_ets_tid = Lb_Tid},
+ put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
+ put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
+ {ok, State};
+init(Url) when is_list(Url) ->
+ case catch ibrowse_lib:parse_url(Url) of
+ #url{protocol = Protocol} = Url_rec ->
+ init({undefined, Url_rec, {[], Protocol == https}});
+ {'EXIT', _} ->
+ {error, invalid_url}
+ end;
+init({Host, Port}) ->
+ State = #state{host = Host,
+ port = Port},
+ put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
+ put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_call/3
+%% Description: Handling call messages
+%% Returns: {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} | (terminate/2 is called)
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+%% Received a request when the remote server has already sent us a
+%% Connection: Close header
+handle_call({send_req, _}, _From, #state{is_closing = true} = State) ->
+ {reply, {error, connection_closing}, State};
+
+handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}},
+ From, State) ->
+ send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State);
+
+handle_call(stop, _From, State) ->
+ do_close(State),
+ do_error_reply(State, closing_on_request),
+ {stop, normal, ok, State};
+
+handle_call(Request, _From, State) ->
+ Reply = {unknown_request, Request},
+ {reply, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast/2
+%% Description: Handling cast messages
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info/2
+%% Description: Handling all non call/cast messages
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_info({tcp, _Sock, Data}, #state{status = Status} = State) ->
+%% io:format("Recvd data: ~p~n", [Data]),
+ do_trace("Data recvd in state: ~p. Size: ~p. ~p~n~n", [Status, size(Data), Data]),
+ handle_sock_data(Data, State);
+handle_info({ssl, _Sock, Data}, State) ->
+ handle_sock_data(Data, State);
+
+handle_info({stream_next, Req_id}, #state{socket = Socket,
+ cur_req = #request{req_id = Req_id}} = State) ->
+ %% io:format("Client process set {active, once}~n", []),
+ do_setopts(Socket, [{active, once}], State),
+ {noreply, set_inac_timer(State)};
+
+handle_info({stream_next, _Req_id}, State) ->
+ _Cur_req_id = case State#state.cur_req of
+ #request{req_id = Cur} ->
+ Cur;
+ _ ->
+ undefined
+ end,
+%% io:format("Ignoring stream_next as ~1000.p is not cur req (~1000.p)~n",
+%% [_Req_id, _Cur_req_id]),
+ {noreply, State};
+
+handle_info({stream_close, _Req_id}, State) ->
+ shutting_down(State),
+ do_close(State),
+ do_error_reply(State, closing_on_request),
+ {stop, normal, State};
+
+handle_info({tcp_closed, _Sock}, State) ->
+ do_trace("TCP connection closed by peer!~n", []),
+ handle_sock_closed(State),
+ {stop, normal, State};
+handle_info({ssl_closed, _Sock}, State) ->
+ do_trace("SSL connection closed by peer!~n", []),
+ handle_sock_closed(State),
+ {stop, normal, State};
+
+handle_info({tcp_error, _Sock, Reason}, State) ->
+ do_trace("Error on connection to ~1000.p:~1000.p -> ~1000.p~n",
+ [State#state.host, State#state.port, Reason]),
+ handle_sock_closed(State),
+ {stop, normal, State};
+handle_info({ssl_error, _Sock, Reason}, State) ->
+ do_trace("Error on SSL connection to ~1000.p:~1000.p -> ~1000.p~n",
+ [State#state.host, State#state.port, Reason]),
+ handle_sock_closed(State),
+ {stop, normal, State};
+
+handle_info({req_timedout, From}, State) ->
+ case lists:keymember(From, #request.from, queue:to_list(State#state.reqs)) of
+ false ->
+ {noreply, State};
+ true ->
+ shutting_down(State),
+ do_error_reply(State, req_timedout),
+ {stop, normal, State}
+ end;
+
+handle_info(timeout, State) ->
+ do_trace("Inactivity timeout triggered. Shutting down connection~n", []),
+ shutting_down(State),
+ do_error_reply(State, req_timedout),
+ {stop, normal, State};
+
+handle_info({trace, Bool}, State) ->
+ put(my_trace_flag, Bool),
+ {noreply, State};
+
+handle_info(Info, State) ->
+ io:format("Unknown message recvd for ~1000.p:~1000.p -> ~p~n",
+ [State#state.host, State#state.port, Info]),
+ io:format("Recvd unknown message ~p when in state: ~p~n", [Info, State]),
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: terminate/2
+%% Description: Shutdown the server
+%% Returns: any (ignored by gen_server)
+%%--------------------------------------------------------------------
+terminate(_Reason, State) ->
+ do_close(State),
+ ok.
+
+%%--------------------------------------------------------------------
+%% Func: code_change/3
+%% Purpose: Convert process state when code is changed
+%% Returns: {ok, NewState}
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% Handles data recvd on the socket
+%%--------------------------------------------------------------------
+handle_sock_data(Data, #state{status=idle}=State) ->
+ do_trace("Data recvd on socket in state idle!. ~1000.p~n", [Data]),
+ shutting_down(State),
+ do_error_reply(State, data_in_status_idle),
+ do_close(State),
+ {stop, normal, State};
+
+handle_sock_data(Data, #state{status = get_header}=State) ->
+ case parse_response(Data, State) of
+ {error, _Reason} ->
+ shutting_down(State),
+ {stop, normal, State};
+ #state{socket = Socket, status = Status, cur_req = CurReq} = State_1 ->
+ case {Status, CurReq} of
+ {get_header, #request{caller_controls_socket = true}} ->
+ do_setopts(Socket, [{active, once}], State_1);
+ _ ->
+ active_once(State_1)
+ end,
+ {noreply, set_inac_timer(State_1)}
+ end;
+
+handle_sock_data(Data, #state{status = get_body,
+ socket = Socket,
+ content_length = CL,
+ http_status_code = StatCode,
+ recvd_headers = Headers,
+ chunk_size = CSz} = State) ->
+ case (CL == undefined) and (CSz == undefined) of
+ true ->
+ case accumulate_response(Data, State) of
+ {error, Reason} ->
+ shutting_down(State),
+ fail_pipelined_requests(State,
+ {error, {Reason, {stat_code, StatCode}, Headers}}),
+ {stop, normal, State};
+ State_1 ->
+ active_once(State_1),
+ State_2 = set_inac_timer(State_1),
+ {noreply, State_2}
+ end;
+ _ ->
+ case parse_11_response(Data, State) of
+ {error, Reason} ->
+ shutting_down(State),
+ fail_pipelined_requests(State,
+ {error, {Reason, {stat_code, StatCode}, Headers}}),
+ {stop, normal, State};
+ #state{cur_req = #request{caller_controls_socket = Ccs},
+ interim_reply_sent = Irs} = State_1 ->
+ case Irs of
+ true ->
+ active_once(State_1);
+ false when Ccs == true ->
+ do_setopts(Socket, [{active, once}], State);
+ false ->
+ active_once(State_1)
+ end,
+ State_2 = State_1#state{interim_reply_sent = false},
+ case Ccs of
+ true ->
+ cancel_timer(State_2#state.inactivity_timer_ref, {eat_message, timeout}),
+ {noreply, State_2#state{inactivity_timer_ref = undefined}};
+ _ ->
+ {noreply, set_inac_timer(State_2)}
+ end;
+ State_1 ->
+ active_once(State_1),
+ State_2 = set_inac_timer(State_1),
+ {noreply, State_2}
+ end
+ end.
+
+accumulate_response(Data,
+ #state{
+ cur_req = #request{save_response_to_file = Srtf,
+ tmp_file_fd = undefined} = CurReq,
+ http_status_code=[$2 | _]}=State) when Srtf /= false ->
+ TmpFilename = make_tmp_filename(Srtf),
+ case file:open(TmpFilename, [write, delayed_write, raw]) of
+ {ok, Fd} ->
+ accumulate_response(Data, State#state{
+ cur_req = CurReq#request{
+ tmp_file_fd = Fd,
+ tmp_file_name = TmpFilename}});
+ {error, Reason} ->
+ {error, {file_open_error, Reason}}
+ end;
+accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf,
+ tmp_file_fd = Fd},
+ transfer_encoding=chunked,
+ reply_buffer = Reply_buf,
+ http_status_code=[$2 | _]
+ } = State) when Srtf /= false ->
+ case file:write(Fd, [Reply_buf, Data]) of
+ ok ->
+ State#state{reply_buffer = <<>>};
+ {error, Reason} ->
+ {error, {file_write_error, Reason}}
+ end;
+accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf,
+ tmp_file_fd = Fd},
+ reply_buffer = RepBuf,
+ http_status_code=[$2 | _]
+ } = State) when Srtf /= false ->
+ case file:write(Fd, [RepBuf, Data]) of
+ ok ->
+ State#state{reply_buffer = <<>>};
+ {error, Reason} ->
+ {error, {file_write_error, Reason}}
+ end;
+accumulate_response(Data, #state{reply_buffer = RepBuf,
+ rep_buf_size = RepBufSize,
+ streamed_size = Streamed_size,
+ cur_req = CurReq}=State) ->
+ #request{stream_to = StreamTo,
+ req_id = ReqId,
+ stream_chunk_size = Stream_chunk_size,
+ response_format = Response_format,
+ caller_controls_socket = Caller_controls_socket} = CurReq,
+ RepBuf_1 = <<RepBuf/binary, Data/binary>>,
+ New_data_size = RepBufSize - Streamed_size,
+ case StreamTo of
+ undefined ->
+ State#state{reply_buffer = RepBuf_1};
+ _ when Caller_controls_socket == true ->
+ do_interim_reply(StreamTo, Response_format, ReqId, RepBuf_1),
+ State#state{reply_buffer = <<>>,
+ interim_reply_sent = true,
+ streamed_size = Streamed_size + size(RepBuf_1)};
+ _ when New_data_size >= Stream_chunk_size ->
+ {Stream_chunk, Rem_data} = split_binary(RepBuf_1, Stream_chunk_size),
+ do_interim_reply(StreamTo, Response_format, ReqId, Stream_chunk),
+ State_1 = State#state{
+ reply_buffer = <<>>,
+ interim_reply_sent = true,
+ streamed_size = Streamed_size + Stream_chunk_size},
+ case Rem_data of
+ <<>> ->
+ State_1;
+ _ ->
+ accumulate_response(Rem_data, State_1)
+ end;
+ _ ->
+ State#state{reply_buffer = RepBuf_1}
+ end.
+
+make_tmp_filename(true) ->
+ DownloadDir = ibrowse:get_config_value(download_dir, filename:absname("./")),
+ {A,B,C} = now(),
+ filename:join([DownloadDir,
+ "ibrowse_tmp_file_"++
+ integer_to_list(A) ++
+ integer_to_list(B) ++
+ integer_to_list(C)]);
+make_tmp_filename(File) when is_list(File) ->
+ File.
+
+
+%%--------------------------------------------------------------------
+%% Handles the case when the server closes the socket
+%%--------------------------------------------------------------------
+handle_sock_closed(#state{status=get_header} = State) ->
+ shutting_down(State),
+ do_error_reply(State, connection_closed);
+
+handle_sock_closed(#state{cur_req=undefined} = State) ->
+ shutting_down(State);
+
+%% We check for IsClosing because this the server could have sent a
+%% Connection-Close header and has closed the socket to indicate end
+%% of response. There maybe requests pipelined which need a response.
+handle_sock_closed(#state{reply_buffer = Buf, reqs = Reqs, http_status_code = SC,
+ is_closing = IsClosing,
+ cur_req = #request{tmp_file_name=TmpFilename,
+ tmp_file_fd=Fd} = CurReq,
+ status = get_body,
+ recvd_headers = Headers,
+ status_line = Status_line,
+ raw_headers = Raw_headers
+ }=State) ->
+ #request{from=From, stream_to=StreamTo, req_id=ReqId,
+ response_format = Resp_format,
+ options = Options} = CurReq,
+ case IsClosing of
+ true ->
+ {_, Reqs_1} = queue:out(Reqs),
+ Body = case TmpFilename of
+ undefined ->
+ Buf;
+ _ ->
+ ok = file:close(Fd),
+ {file, TmpFilename}
+ end,
+ Reply = case get_value(give_raw_headers, Options, false) of
+ true ->
+ {ok, Status_line, Raw_headers, Body};
+ false ->
+ {ok, SC, Headers, Buf}
+ end,
+ State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+ ok = do_error_reply(State_1#state{reqs = Reqs_1}, connection_closed),
+ State_1;
+ _ ->
+ ok = do_error_reply(State, connection_closed),
+ State
+ end.
+
+do_connect(Host, Port, Options, #state{is_ssl = true,
+ use_proxy = false,
+ ssl_options = SSLOptions},
+ Timeout) ->
+ ssl:connect(Host, Port, get_sock_options(Host, Options, SSLOptions), Timeout);
+do_connect(Host, Port, Options, _State, Timeout) ->
+ gen_tcp:connect(Host, Port, get_sock_options(Host, Options, []), Timeout).
+
+get_sock_options(Host, Options, SSLOptions) ->
+ Caller_socket_options = get_value(socket_options, Options, []),
+ Ipv6Options = case is_ipv6_host(Host) of
+ true ->
+ [inet6];
+ false ->
+ []
+ end,
+ Other_sock_options = filter_sock_options(SSLOptions ++ Caller_socket_options ++ Ipv6Options),
+ case lists:keysearch(nodelay, 1, Other_sock_options) of
+ false ->
+ [{nodelay, true}, binary, {active, false} | Other_sock_options];
+ {value, _} ->
+ [binary, {active, false} | Other_sock_options]
+ end.
+
+is_ipv6_host(Host) ->
+ case inet_parse:address(Host) of
+ {ok, {_, _, _, _, _, _, _, _}} ->
+ true;
+ {ok, {_, _, _, _}} ->
+ false;
+ _ ->
+ case inet:gethostbyname(Host) of
+ {ok, #hostent{h_addrtype = inet6}} ->
+ true;
+ _ ->
+ false
+ end
+ end.
+
+%% We don't want the caller to specify certain options
+filter_sock_options(Opts) ->
+ lists:filter(fun({active, _}) ->
+ false;
+ ({packet, _}) ->
+ false;
+ (list) ->
+ false;
+ (_) ->
+ true
+ end, Opts).
+
+do_send(Req, #state{socket = Sock,
+ is_ssl = true,
+ use_proxy = true,
+ proxy_tunnel_setup = Pts}) when Pts /= done -> gen_tcp:send(Sock, Req);
+do_send(Req, #state{socket = Sock, is_ssl = true}) -> ssl:send(Sock, Req);
+do_send(Req, #state{socket = Sock, is_ssl = false}) -> gen_tcp:send(Sock, Req).
+
+%% @spec do_send_body(Sock::socket_descriptor(), Source::source_descriptor(), IsSSL::boolean()) -> ok | error()
+%% source_descriptor() = fun_arity_0 |
+%% {fun_arity_0} |
+%% {fun_arity_1, term()}
+%% error() = term()
+do_send_body(Source, State, TE) when is_function(Source) ->
+ do_send_body({Source}, State, TE);
+do_send_body({Source}, State, TE) when is_function(Source) ->
+ do_send_body1(Source, Source(), State, TE);
+do_send_body({Source, Source_state}, State, TE) when is_function(Source) ->
+ do_send_body1(Source, Source(Source_state), State, TE);
+do_send_body(Body, State, _TE) ->
+ do_send(Body, State).
+
+do_send_body1(Source, Resp, State, TE) ->
+ case Resp of
+ {ok, Data} ->
+ do_send(maybe_chunked_encode(Data, TE), State),
+ do_send_body({Source}, State, TE);
+ {ok, Data, New_source_state} ->
+ do_send(maybe_chunked_encode(Data, TE), State),
+ do_send_body({Source, New_source_state}, State, TE);
+ eof when TE == true ->
+ do_send(<<"0\r\n\r\n">>, State),
+ ok;
+ eof ->
+ ok;
+ Err ->
+ Err
+ end.
+
+maybe_chunked_encode(Data, false) ->
+ Data;
+maybe_chunked_encode(Data, true) ->
+ [?dec2hex(iolist_size(Data)), "\r\n", Data, "\r\n"].
+
+do_close(#state{socket = undefined}) -> ok;
+do_close(#state{socket = Sock,
+ is_ssl = true,
+ use_proxy = true,
+ proxy_tunnel_setup = Pts
+ }) when Pts /= done -> catch gen_tcp:close(Sock);
+do_close(#state{socket = Sock, is_ssl = true}) -> catch ssl:close(Sock);
+do_close(#state{socket = Sock, is_ssl = false}) -> catch gen_tcp:close(Sock).
+
+active_once(#state{cur_req = #request{caller_controls_socket = true}}) ->
+ ok;
+active_once(#state{socket = Socket} = State) ->
+ do_setopts(Socket, [{active, once}], State).
+
+do_setopts(_Sock, [], _) -> ok;
+do_setopts(Sock, Opts, #state{is_ssl = true,
+ use_proxy = true,
+ proxy_tunnel_setup = Pts}
+ ) when Pts /= done -> inet:setopts(Sock, Opts);
+do_setopts(Sock, Opts, #state{is_ssl = true}) -> ssl:setopts(Sock, Opts);
+do_setopts(Sock, Opts, _) -> inet:setopts(Sock, Opts).
+
+check_ssl_options(Options, State) ->
+ case get_value(is_ssl, Options, false) of
+ false ->
+ State;
+ true ->
+ State#state{is_ssl=true, ssl_options=get_value(ssl_options, Options)}
+ end.
+
+send_req_1(From,
+ #url{host = Host,
+ port = Port} = Url,
+ Headers, Method, Body, Options, Timeout,
+ #state{socket = undefined} = State) ->
+ {Host_1, Port_1, State_1} =
+ case get_value(proxy_host, Options, false) of
+ false ->
+ {Host, Port, State};
+ PHost ->
+ ProxyUser = get_value(proxy_user, Options, []),
+ ProxyPassword = get_value(proxy_password, Options, []),
+ Digest = http_auth_digest(ProxyUser, ProxyPassword),
+ {PHost, get_value(proxy_port, Options, 80),
+ State#state{use_proxy = true,
+ proxy_auth_digest = Digest}}
+ end,
+ State_2 = check_ssl_options(Options, State_1),
+ do_trace("Connecting...~n", []),
+ Conn_timeout = get_value(connect_timeout, Options, Timeout),
+ case do_connect(Host_1, Port_1, Options, State_2, Conn_timeout) of
+ {ok, Sock} ->
+ do_trace("Connected! Socket: ~1000.p~n", [Sock]),
+ State_3 = State_2#state{socket = Sock,
+ connect_timeout = Conn_timeout},
+ send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State_3);
+ Err ->
+ shutting_down(State_2),
+ do_trace("Error connecting. Reason: ~1000.p~n", [Err]),
+ gen_server:reply(From, {error, {conn_failed, Err}}),
+ {stop, normal, State_2}
+ end;
+
+%% Send a CONNECT request.
+%% Wait for 200 OK
+%% Upgrade to SSL connection
+%% Then send request
+
+send_req_1(From,
+ #url{
+ host = Server_host,
+ port = Server_port
+ } = Url,
+ Headers, Method, Body, Options, Timeout,
+ #state{
+ proxy_tunnel_setup = false,
+ use_proxy = true,
+ is_ssl = true} = State) ->
+ NewReq = #request{
+ method = connect,
+ preserve_chunked_encoding = get_value(preserve_chunked_encoding, Options, false),
+ options = Options
+ },
+ State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
+ Pxy_auth_headers = maybe_modify_headers(Url, Method, Options, [], State_1),
+ Path = [Server_host, $:, integer_to_list(Server_port)],
+ {Req, Body_1} = make_request(connect, Pxy_auth_headers,
+ Path, Path,
+ [], Options, State_1, undefined),
+ TE = is_chunked_encoding_specified(Options),
+ trace_request(Req),
+ case do_send(Req, State) of
+ ok ->
+ case do_send_body(Body_1, State_1, TE) of
+ ok ->
+ trace_request_body(Body_1),
+ active_once(State_1),
+ Ref = case Timeout of
+ infinity ->
+ undefined;
+ _ ->
+ erlang:send_after(Timeout, self(), {req_timedout, From})
+ end,
+ State_2 = State_1#state{status = get_header,
+ cur_req = NewReq,
+ send_timer = Ref,
+ proxy_tunnel_setup = in_progress,
+ tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout}]},
+ State_3 = set_inac_timer(State_2),
+ {noreply, State_3};
+ Err ->
+ shutting_down(State_1),
+ do_trace("Send failed... Reason: ~p~n", [Err]),
+ gen_server:reply(From, {error, {send_failed, Err}}),
+ {stop, normal, State_1}
+ end;
+ Err ->
+ shutting_down(State_1),
+ do_trace("Send failed... Reason: ~p~n", [Err]),
+ gen_server:reply(From, {error, {send_failed, Err}}),
+ {stop, normal, State_1}
+ end;
+
+send_req_1(From, Url, Headers, Method, Body, Options, Timeout,
+ #state{proxy_tunnel_setup = in_progress,
+ tunnel_setup_queue = Q} = State) ->
+ do_trace("Queued SSL request awaiting tunnel setup: ~n"
+ "URL : ~s~n"
+ "Method : ~p~n"
+ "Headers : ~p~n", [Url, Method, Headers]),
+ {noreply, State#state{tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout} | Q]}};
+
+send_req_1(From,
+ #url{abspath = AbsPath,
+ path = RelPath} = Url,
+ Headers, Method, Body, Options, Timeout,
+ #state{status = Status,
+ socket = Socket} = State) ->
+ cancel_timer(State#state.inactivity_timer_ref, {eat_message, timeout}),
+ ReqId = make_req_id(),
+ Resp_format = get_value(response_format, Options, list),
+ Caller_socket_options = get_value(socket_options, Options, []),
+ {StreamTo, Caller_controls_socket} =
+ case get_value(stream_to, Options, undefined) of
+ {Caller, once} when is_pid(Caller) or
+ is_atom(Caller) ->
+ Async_pid_rec = {{req_id_pid, ReqId}, self()},
+ true = ets:insert(ibrowse_stream, Async_pid_rec),
+ {Caller, true};
+ undefined ->
+ {undefined, false};
+ Caller when is_pid(Caller) or
+ is_atom(Caller) ->
+ {Caller, false};
+ Stream_to_inv ->
+ exit({invalid_option, {stream_to, Stream_to_inv}})
+ end,
+ SaveResponseToFile = get_value(save_response_to_file, Options, false),
+ NewReq = #request{url = Url,
+ method = Method,
+ stream_to = StreamTo,
+ caller_controls_socket = Caller_controls_socket,
+ caller_socket_options = Caller_socket_options,
+ options = Options,
+ req_id = ReqId,
+ save_response_to_file = SaveResponseToFile,
+ stream_chunk_size = get_stream_chunk_size(Options),
+ response_format = Resp_format,
+ from = From,
+ preserve_chunked_encoding = get_value(preserve_chunked_encoding, Options, false)
+ },
+ State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
+ Headers_1 = maybe_modify_headers(Url, Method, Options, Headers, State_1),
+ {Req, Body_1} = make_request(Method,
+ Headers_1,
+ AbsPath, RelPath, Body, Options, State_1,
+ ReqId),
+ trace_request(Req),
+ do_setopts(Socket, Caller_socket_options, State_1),
+ TE = is_chunked_encoding_specified(Options),
+ case do_send(Req, State_1) of
+ ok ->
+ case do_send_body(Body_1, State_1, TE) of
+ ok ->
+ trace_request_body(Body_1),
+ State_2 = inc_pipeline_counter(State_1),
+ active_once(State_2),
+ Ref = case Timeout of
+ infinity ->
+ undefined;
+ _ ->
+ erlang:send_after(Timeout, self(), {req_timedout, From})
+ end,
+ State_3 = case Status of
+ idle ->
+ State_2#state{status = get_header,
+ cur_req = NewReq,
+ send_timer = Ref};
+ _ ->
+ State_2#state{send_timer = Ref}
+ end,
+ case StreamTo of
+ undefined ->
+ ok;
+ _ ->
+ gen_server:reply(From, {ibrowse_req_id, ReqId})
+ end,
+ State_4 = set_inac_timer(State_3),
+ {noreply, State_4};
+ Err ->
+ shutting_down(State_1),
+ do_trace("Send failed... Reason: ~p~n", [Err]),
+ gen_server:reply(From, {error, {send_failed, Err}}),
+ {stop, normal, State_1}
+ end;
+ Err ->
+ shutting_down(State_1),
+ do_trace("Send failed... Reason: ~p~n", [Err]),
+ gen_server:reply(From, {error, {send_failed, Err}}),
+ {stop, normal, State_1}
+ end.
+
+maybe_modify_headers(#url{}, connect, _, Headers, State) ->
+ add_proxy_auth_headers(State, Headers);
+maybe_modify_headers(#url{host = Host, port = Port} = Url,
+ _Method,
+ Options, Headers, State) ->
+ case get_value(headers_as_is, Options, false) of
+ false ->
+ Headers_1 = add_auth_headers(Url, Options, Headers, State),
+ HostHeaderValue = case lists:keysearch(host_header, 1, Options) of
+ false ->
+ case Port of
+ 80 -> Host;
+ 443 -> Host;
+ _ -> [Host, ":", integer_to_list(Port)]
+ end;
+ {value, {_, Host_h_val}} ->
+ Host_h_val
+ end,
+ [{"Host", HostHeaderValue} | Headers_1];
+ true ->
+ Headers
+ end.
+
+add_auth_headers(#url{username = User,
+ password = UPw},
+ Options,
+ Headers,
+ State) ->
+ Headers_1 = case User of
+ undefined ->
+ case get_value(basic_auth, Options, undefined) of
+ undefined ->
+ Headers;
+ {U,P} ->
+ [{"Authorization", ["Basic ", http_auth_digest(U, P)]} | Headers]
+ end;
+ _ ->
+ [{"Authorization", ["Basic ", http_auth_digest(User, UPw)]} | Headers]
+ end,
+ add_proxy_auth_headers(State, Headers_1).
+
+add_proxy_auth_headers(#state{use_proxy = false}, Headers) ->
+ Headers;
+add_proxy_auth_headers(#state{proxy_auth_digest = []}, Headers) ->
+ Headers;
+add_proxy_auth_headers(#state{proxy_auth_digest = Auth_digest}, Headers) ->
+ [{"Proxy-Authorization", ["Basic ", Auth_digest]} | Headers].
+
+http_auth_digest([], []) ->
+ [];
+http_auth_digest(Username, Password) ->
+ ibrowse_lib:encode_base64(Username ++ [$: | Password]).
+
+make_request(Method, Headers, AbsPath, RelPath, Body, Options,
+ #state{use_proxy = UseProxy, is_ssl = Is_ssl}, ReqId) ->
+ HttpVsn = http_vsn_string(get_value(http_vsn, Options, {1,1})),
+ Fun1 = fun({X, Y}) when is_atom(X) ->
+ {to_lower(atom_to_list(X)), X, Y};
+ ({X, Y}) when is_list(X) ->
+ {to_lower(X), X, Y}
+ end,
+ Headers_0 = [Fun1(X) || X <- Headers],
+ Headers_1 =
+ case lists:keysearch("content-length", 1, Headers_0) of
+ false when (Body =:= [] orelse Body =:= <<>>) andalso
+ (Method =:= post orelse Method =:= put) ->
+ [{"content-length", "Content-Length", "0"} | Headers_0];
+ false when is_binary(Body) orelse is_list(Body) ->
+ [{"content-length", "Content-Length", integer_to_list(iolist_size(Body))} | Headers_0];
+ _ ->
+ %% Content-Length is already specified or Body is a
+ %% function or function/state pair
+ Headers_0
+ end,
+ {Headers_2, Body_1} =
+ case is_chunked_encoding_specified(Options) of
+ false ->
+ {[{Y, Z} || {_, Y, Z} <- Headers_1], Body};
+ true ->
+ Chunk_size_1 = case get_value(transfer_encoding, Options) of
+ chunked ->
+ 5120;
+ {chunked, Chunk_size} ->
+ Chunk_size
+ end,
+ {[{Y, Z} || {X, Y, Z} <- Headers_1,
+ X /= "content-length"] ++
+ [{"Transfer-Encoding", "chunked"}],
+ chunk_request_body(Body, Chunk_size_1)}
+ end,
+ Headers_3 = case lists:member({include_ibrowse_req_id, true}, Options) of
+ true ->
+ [{"x-ibrowse-request-id", io_lib:format("~1000.p",[ReqId])} | Headers_2];
+ false ->
+ Headers_2
+ end,
+ Headers_4 = cons_headers(Headers_3),
+ Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of
+ true ->
+ case Is_ssl of
+ true ->
+ RelPath;
+ false ->
+ AbsPath
+ end;
+ false ->
+ RelPath
+ end,
+ {[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_4, crnl()], Body_1}.
+
+is_chunked_encoding_specified(Options) ->
+ case get_value(transfer_encoding, Options, false) of
+ false ->
+ false;
+ {chunked, _} ->
+ true;
+ chunked ->
+ true
+ end.
+
+http_vsn_string({0,9}) -> "HTTP/0.9";
+http_vsn_string({1,0}) -> "HTTP/1.0";
+http_vsn_string({1,1}) -> "HTTP/1.1".
+
+cons_headers(Headers) ->
+ cons_headers(Headers, []).
+cons_headers([], Acc) ->
+ encode_headers(Acc);
+cons_headers([{basic_auth, {U,P}} | T], Acc) ->
+ cons_headers(T, [{"Authorization",
+ ["Basic ", ibrowse_lib:encode_base64(U++":"++P)]} | Acc]);
+cons_headers([{cookie, Cookie} | T], Acc) ->
+ cons_headers(T, [{"Cookie", Cookie} | Acc]);
+cons_headers([{content_length, L} | T], Acc) ->
+ cons_headers(T, [{"Content-Length", L} | Acc]);
+cons_headers([{content_type, L} | T], Acc) ->
+ cons_headers(T, [{"Content-Type", L} | Acc]);
+cons_headers([H | T], Acc) ->
+ cons_headers(T, [H | Acc]).
+
+encode_headers(L) ->
+ encode_headers(L, []).
+encode_headers([{http_vsn, _Val} | T], Acc) ->
+ encode_headers(T, Acc);
+encode_headers([{Name,Val} | T], Acc) when is_list(Name) ->
+ encode_headers(T, [[Name, ": ", fmt_val(Val), crnl()] | Acc]);
+encode_headers([{Name,Val} | T], Acc) when is_atom(Name) ->
+ encode_headers(T, [[atom_to_list(Name), ": ", fmt_val(Val), crnl()] | Acc]);
+encode_headers([], Acc) ->
+ lists:reverse(Acc).
+
+chunk_request_body(Body, _ChunkSize) when is_tuple(Body) orelse
+ is_function(Body) ->
+ Body;
+chunk_request_body(Body, ChunkSize) ->
+ chunk_request_body(Body, ChunkSize, []).
+
+chunk_request_body(Body, _ChunkSize, Acc) when Body == <<>>; Body == [] ->
+ LastChunk = "0\r\n",
+ lists:reverse(["\r\n", LastChunk | Acc]);
+chunk_request_body(Body, ChunkSize, Acc) when is_binary(Body),
+ size(Body) >= ChunkSize ->
+ <<ChunkBody:ChunkSize/binary, Rest/binary>> = Body,
+ Chunk = [?dec2hex(ChunkSize),"\r\n",
+ ChunkBody, "\r\n"],
+ chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
+chunk_request_body(Body, _ChunkSize, Acc) when is_binary(Body) ->
+ BodySize = size(Body),
+ Chunk = [?dec2hex(BodySize),"\r\n",
+ Body, "\r\n"],
+ LastChunk = "0\r\n",
+ lists:reverse(["\r\n", LastChunk, Chunk | Acc]);
+chunk_request_body(Body, ChunkSize, Acc) when length(Body) >= ChunkSize ->
+ {ChunkBody, Rest} = split_list_at(Body, ChunkSize),
+ Chunk = [?dec2hex(ChunkSize),"\r\n",
+ ChunkBody, "\r\n"],
+ chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
+chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) ->
+ BodySize = length(Body),
+ Chunk = [?dec2hex(BodySize),"\r\n",
+ Body, "\r\n"],
+ LastChunk = "0\r\n",
+ lists:reverse(["\r\n", LastChunk, Chunk | Acc]).
+
+
+parse_response(_Data, #state{cur_req = undefined}=State) ->
+ State#state{status = idle};
+parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
+ cur_req = CurReq} = State) ->
+ #request{from=From, stream_to=StreamTo, req_id=ReqId,
+ method=Method, response_format = Resp_format,
+ options = Options
+ } = CurReq,
+ MaxHeaderSize = ibrowse:get_config_value(max_headers_size, infinity),
+ case scan_header(Acc, Data) of
+ {yes, Headers, Data_1} ->
+ do_trace("Recvd Header Data -> ~s~n----~n", [Headers]),
+ do_trace("Recvd headers~n--- Headers Begin ---~n~s~n--- Headers End ---~n~n", [Headers]),
+ {HttpVsn, StatCode, Headers_1, Status_line, Raw_headers} = parse_headers(Headers),
+ do_trace("HttpVsn: ~p StatusCode: ~p Headers_1 -> ~1000.p~n", [HttpVsn, StatCode, Headers_1]),
+ LCHeaders = [{to_lower(X), Y} || {X,Y} <- Headers_1],
+ ConnClose = to_lower(get_value("connection", LCHeaders, "false")),
+ IsClosing = is_connection_closing(HttpVsn, ConnClose),
+ case IsClosing of
+ true ->
+ shutting_down(State);
+ false ->
+ ok
+ end,
+ Give_raw_headers = get_value(give_raw_headers, Options, false),
+ State_1 = case Give_raw_headers of
+ true ->
+ State#state{recvd_headers=Headers_1, status=get_body,
+ reply_buffer = <<>>,
+ status_line = Status_line,
+ raw_headers = Raw_headers,
+ http_status_code=StatCode, is_closing=IsClosing};
+ false ->
+ State#state{recvd_headers=Headers_1, status=get_body,
+ reply_buffer = <<>>,
+ http_status_code=StatCode, is_closing=IsClosing}
+ end,
+ put(conn_close, ConnClose),
+ TransferEncoding = to_lower(get_value("transfer-encoding", LCHeaders, "false")),
+ case get_value("content-length", LCHeaders, undefined) of
+ _ when Method == connect,
+ hd(StatCode) == $2 ->
+ cancel_timer(State#state.send_timer),
+ {_, Reqs_1} = queue:out(Reqs),
+ upgrade_to_ssl(set_cur_request(State#state{reqs = Reqs_1,
+ recvd_headers = [],
+ status = idle
+ }));
+ _ when Method == connect ->
+ {_, Reqs_1} = queue:out(Reqs),
+ do_error_reply(State#state{reqs = Reqs_1},
+ {error, proxy_tunnel_failed}),
+ {error, proxy_tunnel_failed};
+ _ when Method == head ->
+ {_, Reqs_1} = queue:out(Reqs),
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
+ {ok, StatCode, Headers_1, []}),
+ cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
+ State_2 = reset_state(State_1_1),
+ State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
+ parse_response(Data_1, State_3);
+ _ when hd(StatCode) =:= $1 ->
+ %% No message body is expected. Server may send
+ %% one or more 1XX responses before a proper
+ %% response.
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ do_trace("Recvd a status code of ~p. Ignoring and waiting for a proper response~n", [StatCode]),
+ parse_response(Data_1, State_1#state{recvd_headers = [],
+ status = get_header});
+ _ when StatCode =:= "204";
+ StatCode =:= "304" ->
+ %% No message body is expected for these Status Codes.
+ %% RFC2616 - Sec 4.4
+ {_, Reqs_1} = queue:out(Reqs),
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
+ {ok, StatCode, Headers_1, []}),
+ cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
+ State_2 = reset_state(State_1_1),
+ State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
+ parse_response(Data_1, State_3);
+ _ when TransferEncoding =:= "chunked" ->
+ do_trace("Chunked encoding detected...~n",[]),
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ case parse_11_response(Data_1, State_1#state{transfer_encoding=chunked,
+ chunk_size=chunk_start,
+ reply_buffer = <<>>}) of
+ {error, Reason} ->
+ fail_pipelined_requests(State_1,
+ {error, {Reason,
+ {stat_code, StatCode}, Headers_1}}),
+ {error, Reason};
+ State_2 ->
+ State_2
+ end;
+ undefined when HttpVsn =:= "HTTP/1.0";
+ ConnClose =:= "close" ->
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ State_1#state{reply_buffer = Data_1};
+ undefined ->
+ fail_pipelined_requests(State_1,
+ {error, {content_length_undefined,
+ {stat_code, StatCode}, Headers}}),
+ {error, content_length_undefined};
+ V ->
+ case catch list_to_integer(V) of
+ V_1 when is_integer(V_1), V_1 >= 0 ->
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ do_trace("Recvd Content-Length of ~p~n", [V_1]),
+ State_2 = State_1#state{rep_buf_size=0,
+ reply_buffer = <<>>,
+ content_length=V_1},
+ case parse_11_response(Data_1, State_2) of
+ {error, Reason} ->
+ fail_pipelined_requests(State_1,
+ {error, {Reason,
+ {stat_code, StatCode}, Headers_1}}),
+ {error, Reason};
+ State_3 ->
+ State_3
+ end;
+ _ ->
+ fail_pipelined_requests(State_1,
+ {error, {content_length_undefined,
+ {stat_code, StatCode}, Headers}}),
+ {error, content_length_undefined}
+ end
+ end;
+ {no, Acc_1} when MaxHeaderSize == infinity ->
+ State#state{reply_buffer = Acc_1};
+ {no, Acc_1} when size(Acc_1) < MaxHeaderSize ->
+ State#state{reply_buffer = Acc_1};
+ {no, _Acc_1} ->
+ fail_pipelined_requests(State, {error, max_headers_size_exceeded}),
+ {error, max_headers_size_exceeded}
+ end.
+
+upgrade_to_ssl(#state{socket = Socket,
+ connect_timeout = Conn_timeout,
+ ssl_options = Ssl_options,
+ tunnel_setup_queue = Q} = State) ->
+ case ssl:connect(Socket, Ssl_options, Conn_timeout) of
+ {ok, Ssl_socket} ->
+ do_trace("Upgraded to SSL socket!!~n", []),
+ State_1 = State#state{socket = Ssl_socket,
+ proxy_tunnel_setup = done},
+ send_queued_requests(lists:reverse(Q), State_1);
+ Err ->
+ do_trace("Upgrade to SSL socket failed. Reson: ~p~n", [Err]),
+ do_error_reply(State, {error, {send_failed, Err}}),
+ {error, send_failed}
+ end.
+
+send_queued_requests([], State) ->
+ do_trace("Sent all queued requests via SSL connection~n", []),
+ State#state{tunnel_setup_queue = []};
+send_queued_requests([{From, Url, Headers, Method, Body, Options, Timeout} | Q],
+ State) ->
+ case send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State) of
+ {noreply, State_1} ->
+ send_queued_requests(Q, State_1);
+ Err ->
+ do_trace("Error sending queued SSL request: ~n"
+ "URL : ~s~n"
+ "Method : ~p~n"
+ "Headers : ~p~n", [Url, Method, Headers]),
+ do_error_reply(State, {error, {send_failed, Err}}),
+ {error, send_failed}
+ end.
+
+is_connection_closing("HTTP/0.9", _) -> true;
+is_connection_closing(_, "close") -> true;
+is_connection_closing("HTTP/1.0", "false") -> true;
+is_connection_closing(_, _) -> false.
+
+%% This clause determines the chunk size when given data from the beginning of the chunk
+parse_11_response(DataRecvd,
+ #state{transfer_encoding = chunked,
+ chunk_size = chunk_start,
+ chunk_size_buffer = Chunk_sz_buf
+ } = State) ->
+ case scan_crlf(Chunk_sz_buf, DataRecvd) of
+ {yes, ChunkHeader, Data_1} ->
+ State_1 = maybe_accumulate_ce_data(State, <<ChunkHeader/binary, $\r, $\n>>),
+ ChunkSize = parse_chunk_header(ChunkHeader),
+ %%
+ %% Do we have to preserve the chunk encoding when
+ %% streaming? NO. This should be transparent to the client
+ %% process. Chunked encoding was only introduced to make
+ %% it efficient for the server.
+ %%
+ RemLen = size(Data_1),
+ do_trace("Determined chunk size: ~p. Already recvd: ~p~n",
+ [ChunkSize, RemLen]),
+ parse_11_response(Data_1, State_1#state{chunk_size_buffer = <<>>,
+ deleted_crlf = true,
+ recvd_chunk_size = 0,
+ chunk_size = ChunkSize});
+ {no, Data_1} ->
+ State#state{chunk_size_buffer = Data_1}
+ end;
+
+%% This clause is to remove the CRLF between two chunks
+%%
+parse_11_response(DataRecvd,
+ #state{transfer_encoding = chunked,
+ chunk_size = tbd,
+ chunk_size_buffer = Buf
+ } = State) ->
+ case scan_crlf(Buf, DataRecvd) of
+ {yes, _, NextChunk} ->
+ State_1 = maybe_accumulate_ce_data(State, <<$\r, $\n>>),
+ State_2 = State_1#state{chunk_size = chunk_start,
+ chunk_size_buffer = <<>>,
+ deleted_crlf = true},
+ parse_11_response(NextChunk, State_2);
+ {no, Data_1} ->
+ State#state{chunk_size_buffer = Data_1}
+ end;
+
+%% This clause deals with the end of a chunked transfer. ibrowse does
+%% not support Trailers in the Chunked Transfer encoding. Any trailer
+%% received is silently discarded.
+parse_11_response(DataRecvd,
+ #state{transfer_encoding = chunked, chunk_size = 0,
+ cur_req = CurReq,
+ deleted_crlf = DelCrlf,
+ chunk_size_buffer = Trailer,
+ reqs = Reqs} = State) ->
+ do_trace("Detected end of chunked transfer...~n", []),
+ DataRecvd_1 = case DelCrlf of
+ false ->
+ DataRecvd;
+ true ->
+ <<$\r, $\n, DataRecvd/binary>>
+ end,
+ case scan_header(Trailer, DataRecvd_1) of
+ {yes, TEHeaders, Rem} ->
+ {_, Reqs_1} = queue:out(Reqs),
+ State_1 = maybe_accumulate_ce_data(State, <<TEHeaders/binary, $\r, $\n>>),
+ State_2 = handle_response(CurReq,
+ State_1#state{reqs = Reqs_1}),
+ parse_response(Rem, reset_state(State_2));
+ {no, Rem} ->
+ accumulate_response(<<>>, State#state{chunk_size_buffer = Rem, deleted_crlf = false})
+ end;
+
+%% This clause extracts a chunk, given the size.
+parse_11_response(DataRecvd,
+ #state{transfer_encoding = chunked,
+ chunk_size = CSz,
+ recvd_chunk_size = Recvd_csz,
+ rep_buf_size = RepBufSz} = State) ->
+ NeedBytes = CSz - Recvd_csz,
+ DataLen = size(DataRecvd),
+ do_trace("Recvd more data: size: ~p. NeedBytes: ~p~n", [DataLen, NeedBytes]),
+ case DataLen >= NeedBytes of
+ true ->
+ {RemChunk, RemData} = split_binary(DataRecvd, NeedBytes),
+ do_trace("Recvd another chunk...~p~n", [RemChunk]),
+ do_trace("RemData -> ~p~n", [RemData]),
+ case accumulate_response(RemChunk, State) of
+ {error, Reason} ->
+ do_trace("Error accumulating response --> ~p~n", [Reason]),
+ {error, Reason};
+ #state{} = State_1 ->
+ State_2 = State_1#state{chunk_size=tbd},
+ parse_11_response(RemData, State_2)
+ end;
+ false ->
+ accumulate_response(DataRecvd,
+ State#state{rep_buf_size = RepBufSz + DataLen,
+ recvd_chunk_size = Recvd_csz + DataLen})
+ end;
+
+%% This clause to extract the body when Content-Length is specified
+parse_11_response(DataRecvd,
+ #state{content_length=CL, rep_buf_size=RepBufSz,
+ reqs=Reqs}=State) ->
+ NeedBytes = CL - RepBufSz,
+ DataLen = size(DataRecvd),
+ case DataLen >= NeedBytes of
+ true ->
+ {RemBody, Rem} = split_binary(DataRecvd, NeedBytes),
+ {_, Reqs_1} = queue:out(Reqs),
+ State_1 = accumulate_response(RemBody, State),
+ State_2 = handle_response(State_1#state.cur_req, State_1#state{reqs=Reqs_1}),
+ State_3 = reset_state(State_2),
+ parse_response(Rem, State_3);
+ false ->
+ accumulate_response(DataRecvd, State#state{rep_buf_size = (RepBufSz+DataLen)})
+ end.
+
+maybe_accumulate_ce_data(#state{cur_req = #request{preserve_chunked_encoding = false}} = State, _) ->
+ State;
+maybe_accumulate_ce_data(State, Data) ->
+ accumulate_response(Data, State).
+
+handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
+ response_format = Resp_format,
+ save_response_to_file = SaveResponseToFile,
+ tmp_file_name = TmpFilename,
+ tmp_file_fd = Fd,
+ options = Options
+ },
+ #state{http_status_code = SCode,
+ status_line = Status_line,
+ raw_headers = Raw_headers,
+ send_timer = ReqTimer,
+ reply_buffer = RepBuf,
+ recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
+ Body = RepBuf,
+ case Fd of
+ undefined ->
+ ok;
+ _ ->
+ ok = file:close(Fd)
+ end,
+ ResponseBody = case TmpFilename of
+ undefined ->
+ Body;
+ _ ->
+ {file, TmpFilename}
+ end,
+ {Resp_headers_1, Raw_headers_1} = maybe_add_custom_headers(RespHeaders, Raw_headers, Options),
+ Reply = case get_value(give_raw_headers, Options, false) of
+ true ->
+ {ok, Status_line, Raw_headers_1, ResponseBody};
+ false ->
+ {ok, SCode, Resp_headers_1, ResponseBody}
+ end,
+ State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+ cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
+ set_cur_request(State_1);
+handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
+ response_format = Resp_format,
+ options = Options},
+ #state{http_status_code = SCode,
+ status_line = Status_line,
+ raw_headers = Raw_headers,
+ recvd_headers = Resp_headers,
+ reply_buffer = RepBuf,
+ send_timer = ReqTimer} = State) ->
+ Body = RepBuf,
+ {Resp_headers_1, Raw_headers_1} = maybe_add_custom_headers(Resp_headers, Raw_headers, Options),
+ Reply = case get_value(give_raw_headers, Options, false) of
+ true ->
+ {ok, Status_line, Raw_headers_1, Body};
+ false ->
+ {ok, SCode, Resp_headers_1, Body}
+ end,
+ State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+ cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
+ set_cur_request(State_1).
+
+reset_state(State) ->
+ State#state{status = get_header,
+ rep_buf_size = 0,
+ streamed_size = 0,
+ content_length = undefined,
+ reply_buffer = <<>>,
+ chunk_size_buffer = <<>>,
+ recvd_headers = [],
+ status_line = undefined,
+ raw_headers = undefined,
+ deleted_crlf = false,
+ http_status_code = undefined,
+ chunk_size = undefined,
+ transfer_encoding = undefined
+ }.
+
+set_cur_request(#state{reqs = Reqs, socket = Socket} = State) ->
+ case queue:to_list(Reqs) of
+ [] ->
+ State#state{cur_req = undefined};
+ [#request{caller_controls_socket = Ccs} = NextReq | _] ->
+ case Ccs of
+ true ->
+ do_setopts(Socket, [{active, once}], State);
+ _ ->
+ ok
+ end,
+ State#state{cur_req = NextReq}
+ end.
+
+parse_headers(Headers) ->
+ case scan_crlf(Headers) of
+ {yes, StatusLine, T} ->
+ parse_headers(StatusLine, T);
+ {no, StatusLine} ->
+ parse_headers(StatusLine, <<>>)
+ end.
+
+parse_headers(StatusLine, Headers) ->
+ Headers_1 = parse_headers_1(Headers),
+ case parse_status_line(StatusLine) of
+ {ok, HttpVsn, StatCode, _Msg} ->
+ put(http_prot_vsn, HttpVsn),
+ {HttpVsn, StatCode, Headers_1, StatusLine, Headers};
+ _ -> %% A HTTP 0.9 response?
+ put(http_prot_vsn, "HTTP/0.9"),
+ {"HTTP/0.9", undefined, Headers, StatusLine, Headers}
+ end.
+
+% From RFC 2616
+%
+% HTTP/1.1 header field values can be folded onto multiple lines if
+% the continuation line begins with a space or horizontal tab. All
+% linear white space, including folding, has the same semantics as
+% SP. A recipient MAY replace any linear white space with a single
+% SP before interpreting the field value or forwarding the message
+% downstream.
+parse_headers_1(B) when is_binary(B) ->
+ parse_headers_1(binary_to_list(B));
+parse_headers_1(String) ->
+ parse_headers_1(String, [], []).
+
+parse_headers_1([$\n, H |T], [$\r | L], Acc) when H =:= 32;
+ H =:= $\t ->
+ parse_headers_1(lists:dropwhile(fun(X) ->
+ is_whitespace(X)
+ end, T), [32 | L], Acc);
+parse_headers_1([$\n|T], [$\r | L], Acc) ->
+ case parse_header(lists:reverse(L)) of
+ invalid ->
+ parse_headers_1(T, [], Acc);
+ NewHeader ->
+ parse_headers_1(T, [], [NewHeader | Acc])
+ end;
+parse_headers_1([H|T], L, Acc) ->
+ parse_headers_1(T, [H|L], Acc);
+parse_headers_1([], [], Acc) ->
+ lists:reverse(Acc);
+parse_headers_1([], L, Acc) ->
+ Acc_1 = case parse_header(lists:reverse(L)) of
+ invalid ->
+ Acc;
+ NewHeader ->
+ [NewHeader | Acc]
+ end,
+ lists:reverse(Acc_1).
+
+parse_status_line(Line) when is_binary(Line) ->
+ parse_status_line(binary_to_list(Line));
+parse_status_line(Line) ->
+ parse_status_line(Line, get_prot_vsn, [], []).
+parse_status_line([32 | T], get_prot_vsn, ProtVsn, StatCode) ->
+ parse_status_line(T, get_status_code, ProtVsn, StatCode);
+parse_status_line([32 | T], get_status_code, ProtVsn, StatCode) ->
+ {ok, lists:reverse(ProtVsn), lists:reverse(StatCode), T};
+parse_status_line([], get_status_code, ProtVsn, StatCode) ->
+ {ok, lists:reverse(ProtVsn), lists:reverse(StatCode), []};
+parse_status_line([H | T], get_prot_vsn, ProtVsn, StatCode) ->
+ parse_status_line(T, get_prot_vsn, [H|ProtVsn], StatCode);
+parse_status_line([H | T], get_status_code, ProtVsn, StatCode) ->
+ parse_status_line(T, get_status_code, ProtVsn, [H | StatCode]);
+parse_status_line([], _, _, _) ->
+ http_09.
+
+parse_header(L) ->
+ parse_header(L, []).
+
+parse_header([$: | V], Acc) ->
+ {lists:reverse(Acc), string:strip(V)};
+parse_header([H | T], Acc) ->
+ parse_header(T, [H | Acc]);
+parse_header([], _) ->
+ invalid.
+
+scan_header(Bin) ->
+ case get_crlf_crlf_pos(Bin, 0) of
+ {yes, Pos} ->
+ {Headers, <<_:4/binary, Body/binary>>} = split_binary(Bin, Pos),
+ {yes, Headers, Body};
+ no ->
+ {no, Bin}
+ end.
+
+scan_header(Bin1, Bin2) when size(Bin1) < 4 ->
+ scan_header(<<Bin1/binary, Bin2/binary>>);
+scan_header(Bin1, <<>>) ->
+ scan_header(Bin1);
+scan_header(Bin1, Bin2) ->
+ Bin1_already_scanned_size = size(Bin1) - 4,
+ <<Headers_prefix:Bin1_already_scanned_size/binary, Rest/binary>> = Bin1,
+ Bin_to_scan = <<Rest/binary, Bin2/binary>>,
+ case get_crlf_crlf_pos(Bin_to_scan, 0) of
+ {yes, Pos} ->
+ {Headers_suffix, <<_:4/binary, Body/binary>>} = split_binary(Bin_to_scan, Pos),
+ {yes, <<Headers_prefix/binary, Headers_suffix/binary>>, Body};
+ no ->
+ {no, <<Bin1/binary, Bin2/binary>>}
+ end.
+
+get_crlf_crlf_pos(<<$\r, $\n, $\r, $\n, _/binary>>, Pos) -> {yes, Pos};
+get_crlf_crlf_pos(<<_, Rest/binary>>, Pos) -> get_crlf_crlf_pos(Rest, Pos + 1);
+get_crlf_crlf_pos(<<>>, _) -> no.
+
+scan_crlf(Bin) ->
+ case get_crlf_pos(Bin) of
+ {yes, Pos} ->
+ {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin, Pos),
+ {yes, Prefix, Suffix};
+ no ->
+ {no, Bin}
+ end.
+
+scan_crlf(<<>>, Bin2) ->
+ scan_crlf(Bin2);
+scan_crlf(Bin1, Bin2) when size(Bin1) < 2 ->
+ scan_crlf(<<Bin1/binary, Bin2/binary>>);
+scan_crlf(Bin1, Bin2) ->
+ scan_crlf_1(size(Bin1) - 2, Bin1, Bin2).
+
+scan_crlf_1(Bin1_head_size, Bin1, Bin2) ->
+ <<Bin1_head:Bin1_head_size/binary, Bin1_tail/binary>> = Bin1,
+ Bin3 = <<Bin1_tail/binary, Bin2/binary>>,
+ case get_crlf_pos(Bin3) of
+ {yes, Pos} ->
+ {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin3, Pos),
+ {yes, list_to_binary([Bin1_head, Prefix]), Suffix};
+ no ->
+ {no, list_to_binary([Bin1, Bin2])}
+ end.
+
+get_crlf_pos(Bin) ->
+ get_crlf_pos(Bin, 0).
+
+get_crlf_pos(<<$\r, $\n, _/binary>>, Pos) -> {yes, Pos};
+get_crlf_pos(<<_, Rest/binary>>, Pos) -> get_crlf_pos(Rest, Pos + 1);
+get_crlf_pos(<<>>, _) -> no.
+
+fmt_val(L) when is_list(L) -> L;
+fmt_val(I) when is_integer(I) -> integer_to_list(I);
+fmt_val(A) when is_atom(A) -> atom_to_list(A);
+fmt_val(Term) -> io_lib:format("~p", [Term]).
+
+crnl() -> "\r\n".
+
+method(get) -> "GET";
+method(post) -> "POST";
+method(head) -> "HEAD";
+method(options) -> "OPTIONS";
+method(put) -> "PUT";
+method(delete) -> "DELETE";
+method(trace) -> "TRACE";
+method(mkcol) -> "MKCOL";
+method(propfind) -> "PROPFIND";
+method(proppatch) -> "PROPPATCH";
+method(lock) -> "LOCK";
+method(unlock) -> "UNLOCK";
+method(move) -> "MOVE";
+method(copy) -> "COPY";
+method(connect) -> "CONNECT".
+
+%% From RFC 2616
+%%
+% The chunked encoding modifies the body of a message in order to
+% transfer it as a series of chunks, each with its own size indicator,
+% followed by an OPTIONAL trailer containing entity-header
+% fields. This allows dynamically produced content to be transferred
+% along with the information necessary for the recipient to verify
+% that it has received the full message.
+% Chunked-Body = *chunk
+% last-chunk
+% trailer
+% CRLF
+% chunk = chunk-size [ chunk-extension ] CRLF
+% chunk-data CRLF
+% chunk-size = 1*HEX
+% last-chunk = 1*("0") [ chunk-extension ] CRLF
+% chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
+% chunk-ext-name = token
+% chunk-ext-val = token | quoted-string
+% chunk-data = chunk-size(OCTET)
+% trailer = *(entity-header CRLF)
+% The chunk-size field is a string of hex digits indicating the size
+% of the chunk. The chunked encoding is ended by any chunk whose size
+% is zero, followed by the trailer, which is terminated by an empty
+% line.
+%%
+%% The parsing implemented here discards all chunk extensions. It also
+%% strips trailing spaces from the chunk size fields as Apache 1.3.27 was
+%% sending them.
+parse_chunk_header(ChunkHeader) ->
+ parse_chunk_header(ChunkHeader, []).
+
+parse_chunk_header(<<$;, _/binary>>, Acc) ->
+ hexlist_to_integer(lists:reverse(Acc));
+parse_chunk_header(<<H, T/binary>>, Acc) ->
+ case is_whitespace(H) of
+ true ->
+ parse_chunk_header(T, Acc);
+ false ->
+ parse_chunk_header(T, [H | Acc])
+ end;
+parse_chunk_header(<<>>, Acc) ->
+ hexlist_to_integer(lists:reverse(Acc)).
+
+is_whitespace($\s) -> true;
+is_whitespace($\r) -> true;
+is_whitespace($\n) -> true;
+is_whitespace($\t) -> true;
+is_whitespace(_) -> false.
+
+send_async_headers(_ReqId, undefined, _, _State) ->
+ ok;
+send_async_headers(ReqId, StreamTo, Give_raw_headers,
+ #state{status_line = Status_line, raw_headers = Raw_headers,
+ recvd_headers = Headers, http_status_code = StatCode,
+ cur_req = #request{options = Opts}
+ }) ->
+ {Headers_1, Raw_headers_1} = maybe_add_custom_headers(Headers, Raw_headers, Opts),
+ case Give_raw_headers of
+ false ->
+ catch StreamTo ! {ibrowse_async_headers, ReqId, StatCode, Headers_1};
+ true ->
+ catch StreamTo ! {ibrowse_async_headers, ReqId, Status_line, Raw_headers_1}
+ end.
+
+maybe_add_custom_headers(Headers, Raw_headers, Opts) ->
+ Custom_headers = get_value(add_custom_headers, Opts, []),
+ Headers_1 = Headers ++ Custom_headers,
+ Raw_headers_1 = case Custom_headers of
+ [_ | _] when is_binary(Raw_headers) ->
+ Custom_headers_bin = list_to_binary(string:join([[X, $:, Y] || {X, Y} <- Custom_headers], "\r\n")),
+ <<Raw_headers/binary, "\r\n", Custom_headers_bin/binary>>;
+ _ ->
+ Raw_headers
+ end,
+ {Headers_1, Raw_headers_1}.
+
+format_response_data(Resp_format, Body) ->
+ case Resp_format of
+ list when is_list(Body) ->
+ flatten(Body);
+ list when is_binary(Body) ->
+ binary_to_list(Body);
+ binary when is_list(Body) ->
+ list_to_binary(Body);
+ _ ->
+ %% This is to cater for sending messages such as
+ %% {chunk_start, _}, chunk_end etc
+ Body
+ end.
+
+do_reply(State, From, undefined, _, Resp_format, {ok, St_code, Headers, Body}) ->
+ Msg_1 = {ok, St_code, Headers, format_response_data(Resp_format, Body)},
+ gen_server:reply(From, Msg_1),
+ dec_pipeline_counter(State);
+do_reply(State, From, undefined, _, _, Msg) ->
+ gen_server:reply(From, Msg),
+ dec_pipeline_counter(State);
+do_reply(#state{prev_req_id = Prev_req_id} = State,
+ _From, StreamTo, ReqId, Resp_format, {ok, _, _, Body}) ->
+ State_1 = dec_pipeline_counter(State),
+ case Body of
+ [] ->
+ ok;
+ _ ->
+ Body_1 = format_response_data(Resp_format, Body),
+ catch StreamTo ! {ibrowse_async_response, ReqId, Body_1}
+ end,
+ catch StreamTo ! {ibrowse_async_response_end, ReqId},
+ %% We don't want to delete the Req-id to Pid mapping straightaway
+ %% as the client may send a stream_next message just while we are
+ %% sending back this ibrowse_async_response_end message. If we
+ %% deleted this mapping straightaway, the caller will see a
+ %% {error, unknown_req_id} when it calls ibrowse:stream_next/1. To
+ %% get around this, we store the req id, and clear it after the
+ %% next request. If there are wierd combinations of stream,
+ %% stream_once and sync requests on the same connection, it will
+ %% take a while for the req_id-pid mapping to get cleared, but it
+ %% should do no harm.
+ ets:delete(ibrowse_stream, {req_id_pid, Prev_req_id}),
+ State_1#state{prev_req_id = ReqId};
+do_reply(State, _From, StreamTo, ReqId, Resp_format, Msg) ->
+ State_1 = dec_pipeline_counter(State),
+ Msg_1 = format_response_data(Resp_format, Msg),
+ catch StreamTo ! {ibrowse_async_response, ReqId, Msg_1},
+ State_1.
+
+do_interim_reply(undefined, _, _ReqId, _Msg) ->
+ ok;
+do_interim_reply(StreamTo, Response_format, ReqId, Msg) ->
+ Msg_1 = format_response_data(Response_format, Msg),
+ catch StreamTo ! {ibrowse_async_response, ReqId, Msg_1}.
+
+do_error_reply(#state{reqs = Reqs, tunnel_setup_queue = Tun_q} = State, Err) ->
+ ReqList = queue:to_list(Reqs),
+ lists:foreach(fun(#request{from=From, stream_to=StreamTo, req_id=ReqId,
+ response_format = Resp_format}) ->
+ ets:delete(ibrowse_stream, {req_id_pid, ReqId}),
+ do_reply(State, From, StreamTo, ReqId, Resp_format, {error, Err})
+ end, ReqList),
+ lists:foreach(
+ fun({From, _Url, _Headers, _Method, _Body, _Options, _Timeout}) ->
+ do_reply(State, From, undefined, undefined, undefined, Err)
+ end, Tun_q).
+
+fail_pipelined_requests(#state{reqs = Reqs, cur_req = CurReq} = State, Reply) ->
+ {_, Reqs_1} = queue:out(Reqs),
+ #request{from=From, stream_to=StreamTo, req_id=ReqId,
+ response_format = Resp_format} = CurReq,
+ State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+ do_error_reply(State_1#state{reqs = Reqs_1}, previous_request_failed).
+
+split_list_at(List, N) ->
+ split_list_at(List, N, []).
+
+split_list_at([], _, Acc) ->
+ {lists:reverse(Acc), []};
+split_list_at(List2, 0, List1) ->
+ {lists:reverse(List1), List2};
+split_list_at([H | List2], N, List1) ->
+ split_list_at(List2, N-1, [H | List1]).
+
+hexlist_to_integer(List) ->
+ hexlist_to_integer(lists:reverse(List), 1, 0).
+
+hexlist_to_integer([H | T], Multiplier, Acc) ->
+ hexlist_to_integer(T, Multiplier*16, Multiplier*to_ascii(H) + Acc);
+hexlist_to_integer([], _, Acc) ->
+ Acc.
+
+to_ascii($A) -> 10;
+to_ascii($a) -> 10;
+to_ascii($B) -> 11;
+to_ascii($b) -> 11;
+to_ascii($C) -> 12;
+to_ascii($c) -> 12;
+to_ascii($D) -> 13;
+to_ascii($d) -> 13;
+to_ascii($E) -> 14;
+to_ascii($e) -> 14;
+to_ascii($F) -> 15;
+to_ascii($f) -> 15;
+to_ascii($1) -> 1;
+to_ascii($2) -> 2;
+to_ascii($3) -> 3;
+to_ascii($4) -> 4;
+to_ascii($5) -> 5;
+to_ascii($6) -> 6;
+to_ascii($7) -> 7;
+to_ascii($8) -> 8;
+to_ascii($9) -> 9;
+to_ascii($0) -> 0.
+
+cancel_timer(undefined) -> ok;
+cancel_timer(Ref) -> _ = erlang:cancel_timer(Ref),
+ ok.
+
+cancel_timer(Ref, {eat_message, Msg}) ->
+ cancel_timer(Ref),
+ receive
+ Msg ->
+ ok
+ after 0 ->
+ ok
+ end.
+
+make_req_id() ->
+ now().
+
+to_lower(Str) ->
+ to_lower(Str, []).
+to_lower([H|T], Acc) when H >= $A, H =< $Z ->
+ to_lower(T, [H+32|Acc]);
+to_lower([H|T], Acc) ->
+ to_lower(T, [H|Acc]);
+to_lower([], Acc) ->
+ lists:reverse(Acc).
+
+shutting_down(#state{lb_ets_tid = undefined}) ->
+ ok;
+shutting_down(#state{lb_ets_tid = Tid,
+ cur_pipeline_size = Sz}) ->
+ catch ets:delete(Tid, {Sz, self()}).
+
+inc_pipeline_counter(#state{is_closing = true} = State) ->
+ State;
+inc_pipeline_counter(#state{cur_pipeline_size = Pipe_sz} = State) ->
+ State#state{cur_pipeline_size = Pipe_sz + 1}.
+
+dec_pipeline_counter(#state{is_closing = true} = State) ->
+ State;
+dec_pipeline_counter(#state{lb_ets_tid = undefined} = State) ->
+ State;
+dec_pipeline_counter(#state{cur_pipeline_size = Pipe_sz,
+ lb_ets_tid = Tid} = State) ->
+ ets:delete(Tid, {Pipe_sz, self()}),
+ ets:insert(Tid, {{Pipe_sz - 1, self()}, []}),
+ State#state{cur_pipeline_size = Pipe_sz - 1}.
+
+flatten([H | _] = L) when is_integer(H) ->
+ L;
+flatten([H | _] = L) when is_list(H) ->
+ lists:flatten(L);
+flatten([]) ->
+ [].
+
+get_stream_chunk_size(Options) ->
+ case lists:keysearch(stream_chunk_size, 1, Options) of
+ {value, {_, V}} when V > 0 ->
+ V;
+ _ ->
+ ?DEFAULT_STREAM_CHUNK_SIZE
+ end.
+
+set_inac_timer(State) ->
+ cancel_timer(State#state.inactivity_timer_ref),
+ set_inac_timer(State#state{inactivity_timer_ref = undefined},
+ get_inac_timeout(State)).
+
+set_inac_timer(State, Timeout) when is_integer(Timeout) ->
+ Ref = erlang:send_after(Timeout, self(), timeout),
+ State#state{inactivity_timer_ref = Ref};
+set_inac_timer(State, _) ->
+ State.
+
+get_inac_timeout(#state{cur_req = #request{options = Opts}}) ->
+ get_value(inactivity_timeout, Opts, infinity);
+get_inac_timeout(#state{cur_req = undefined}) ->
+ case ibrowse:get_config_value(inactivity_timeout, undefined) of
+ Val when is_integer(Val) ->
+ Val;
+ _ ->
+ case application:get_env(ibrowse, inactivity_timeout) of
+ {ok, Val} when is_integer(Val), Val > 0 ->
+ Val;
+ _ ->
+ 10000
+ end
+ end.
+
+trace_request(Req) ->
+ case get(my_trace_flag) of
+ true ->
+ %%Avoid the binary operations if trace is not on...
+ NReq = to_binary(Req),
+ do_trace("Sending request: ~n"
+ "--- Request Begin ---~n~s~n"
+ "--- Request End ---~n", [NReq]);
+ _ -> ok
+ end.
+
+trace_request_body(Body) ->
+ case get(my_trace_flag) of
+ true ->
+ %%Avoid the binary operations if trace is not on...
+ NBody = to_binary(Body),
+ case size(NBody) > 1024 of
+ true ->
+ ok;
+ false ->
+ do_trace("Sending request body: ~n"
+ "--- Request Body Begin ---~n~s~n"
+ "--- Request Body End ---~n", [NBody])
+ end;
+ false ->
+ ok
+ end.
+
+to_binary(X) when is_list(X) -> list_to_binary(X);
+to_binary(X) when is_binary(X) -> X.
diff --git a/1.1.x/src/ibrowse/ibrowse_lb.erl b/1.1.x/src/ibrowse/ibrowse_lb.erl
new file mode 100644
index 00000000..0e001d48
--- /dev/null
+++ b/1.1.x/src/ibrowse/ibrowse_lb.erl
@@ -0,0 +1,235 @@
+%%%-------------------------------------------------------------------
+%%% File : ibrowse_lb.erl
+%%% Author : chandru <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description :
+%%%
+%%% Created : 6 Mar 2008 by chandru <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+-module(ibrowse_lb).
+-author(chandru).
+-behaviour(gen_server).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+-export([
+ start_link/1,
+ spawn_connection/5,
+ stop/1
+ ]).
+
+%% gen_server callbacks
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+ ]).
+
+-record(state, {parent_pid,
+ ets_tid,
+ host,
+ port,
+ max_sessions,
+ max_pipeline_size,
+ num_cur_sessions = 0}).
+
+-include("ibrowse.hrl").
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link/0
+%% Description: Starts the server
+%%--------------------------------------------------------------------
+start_link(Args) ->
+ gen_server:start_link(?MODULE, Args, []).
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: init/1
+%% Description: Initiates the server
+%% Returns: {ok, State} |
+%% {ok, State, Timeout} |
+%% ignore |
+%% {stop, Reason}
+%%--------------------------------------------------------------------
+init([Host, Port]) ->
+ process_flag(trap_exit, true),
+ Max_sessions = ibrowse:get_config_value({max_sessions, Host, Port}, 10),
+ Max_pipe_sz = ibrowse:get_config_value({max_pipeline_size, Host, Port}, 10),
+ put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
+ put(ibrowse_trace_token, ["LB: ", Host, $:, integer_to_list(Port)]),
+ Tid = ets:new(ibrowse_lb, [public, ordered_set]),
+ {ok, #state{parent_pid = whereis(ibrowse),
+ host = Host,
+ port = Port,
+ ets_tid = Tid,
+ max_pipeline_size = Max_pipe_sz,
+ max_sessions = Max_sessions}}.
+
+spawn_connection(Lb_pid, Url,
+ Max_sessions,
+ Max_pipeline_size,
+ SSL_options)
+ when is_pid(Lb_pid),
+ is_record(Url, url),
+ is_integer(Max_pipeline_size),
+ is_integer(Max_sessions) ->
+ gen_server:call(Lb_pid,
+ {spawn_connection, Url, Max_sessions, Max_pipeline_size, SSL_options}).
+
+stop(Lb_pid) ->
+ case catch gen_server:call(Lb_pid, stop) of
+ {'EXIT', {timeout, _}} ->
+ exit(Lb_pid, kill);
+ ok ->
+ ok
+ end.
+%%--------------------------------------------------------------------
+%% Function: handle_call/3
+%% Description: Handling call messages
+%% Returns: {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} | (terminate/2 is called)
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+% handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
+% #state{max_sessions = Max_sess,
+% ets_tid = Tid,
+% max_pipeline_size = Max_pipe_sz,
+% num_cur_sessions = Num} = State)
+% when Num >= Max ->
+% Reply = find_best_connection(Tid),
+% {reply, sorry_dude_reuse, State};
+
+%% Update max_sessions in #state with supplied value
+handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
+ #state{num_cur_sessions = Num} = State)
+ when Num >= Max_sess ->
+ State_1 = maybe_create_ets(State),
+ Reply = find_best_connection(State_1#state.ets_tid, Max_pipe),
+ {reply, Reply, State_1#state{max_sessions = Max_sess}};
+
+handle_call({spawn_connection, Url, _Max_sess, _Max_pipe, SSL_options}, _From,
+ #state{num_cur_sessions = Cur} = State) ->
+ State_1 = maybe_create_ets(State),
+ Tid = State_1#state.ets_tid,
+ {ok, Pid} = ibrowse_http_client:start_link({Tid, Url, SSL_options}),
+ ets:insert(Tid, {{1, Pid}, []}),
+ {reply, {ok, Pid}, State_1#state{num_cur_sessions = Cur + 1}};
+
+handle_call(stop, _From, #state{ets_tid = undefined} = State) ->
+ gen_server:reply(_From, ok),
+ {stop, normal, State};
+
+handle_call(stop, _From, #state{ets_tid = Tid} = State) ->
+ ets:foldl(fun({{_, Pid}, _}, Acc) ->
+ ibrowse_http_client:stop(Pid),
+ Acc
+ end, [], Tid),
+ gen_server:reply(_From, ok),
+ {stop, normal, State};
+
+handle_call(Request, _From, State) ->
+ Reply = {unknown_request, Request},
+ {reply, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast/2
+%% Description: Handling cast messages
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info/2
+%% Description: Handling all non call/cast messages
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_info({'EXIT', Parent, _Reason}, #state{parent_pid = Parent} = State) ->
+ {stop, normal, State};
+
+handle_info({'EXIT', _Pid, _Reason}, #state{ets_tid = undefined} = State) ->
+ {noreply, State};
+
+handle_info({'EXIT', Pid, _Reason},
+ #state{num_cur_sessions = Cur,
+ ets_tid = Tid} = State) ->
+ ets:match_delete(Tid, {{'_', Pid}, '_'}),
+ Cur_1 = Cur - 1,
+ State_1 = case Cur_1 of
+ 0 ->
+ ets:delete(Tid),
+ State#state{ets_tid = undefined};
+ _ ->
+ State
+ end,
+ {noreply, State_1#state{num_cur_sessions = Cur_1}};
+
+handle_info({trace, Bool}, #state{ets_tid = undefined} = State) ->
+ put(my_trace_flag, Bool),
+ {noreply, State};
+
+handle_info({trace, Bool}, #state{ets_tid = Tid} = State) ->
+ ets:foldl(fun({{_, Pid}, _}, Acc) when is_pid(Pid) ->
+ catch Pid ! {trace, Bool},
+ Acc;
+ (_, Acc) ->
+ Acc
+ end, undefined, Tid),
+ put(my_trace_flag, Bool),
+ {noreply, State};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: terminate/2
+%% Description: Shutdown the server
+%% Returns: any (ignored by gen_server)
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Func: code_change/3
+%% Purpose: Convert process state when code is changed
+%% Returns: {ok, NewState}
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+find_best_connection(Tid, Max_pipe) ->
+ case ets:first(Tid) of
+ {Cur_sz, Pid} when Cur_sz < Max_pipe ->
+ ets:delete(Tid, {Cur_sz, Pid}),
+ ets:insert(Tid, {{Cur_sz + 1, Pid}, []}),
+ {ok, Pid};
+ _ ->
+ {error, retry_later}
+ end.
+
+maybe_create_ets(#state{ets_tid = undefined} = State) ->
+ Tid = ets:new(ibrowse_lb, [public, ordered_set]),
+ State#state{ets_tid = Tid};
+maybe_create_ets(State) ->
+ State.
diff --git a/1.1.x/src/ibrowse/ibrowse_lib.erl b/1.1.x/src/ibrowse/ibrowse_lib.erl
new file mode 100644
index 00000000..3cbe3ace
--- /dev/null
+++ b/1.1.x/src/ibrowse/ibrowse_lib.erl
@@ -0,0 +1,391 @@
+%%% File : ibrowse_lib.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description :
+%%% Created : 27 Feb 2004 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%% @doc Module with a few useful functions
+
+-module(ibrowse_lib).
+-author('chandru').
+-ifdef(debug).
+-compile(export_all).
+-endif.
+
+-include("ibrowse.hrl").
+
+-export([
+ get_trace_status/2,
+ do_trace/2,
+ do_trace/3,
+ url_encode/1,
+ decode_rfc822_date/1,
+ status_code/1,
+ encode_base64/1,
+ decode_base64/1,
+ get_value/2,
+ get_value/3,
+ parse_url/1,
+ printable_date/0
+ ]).
+
+get_trace_status(Host, Port) ->
+ ibrowse:get_config_value({trace, Host, Port}, false).
+
+%% @doc URL-encodes a string based on RFC 1738. Returns a flat list.
+%% @spec url_encode(Str) -> UrlEncodedStr
+%% Str = string()
+%% UrlEncodedStr = string()
+url_encode(Str) when is_list(Str) ->
+ url_encode_char(lists:reverse(Str), []).
+
+url_encode_char([X | T], Acc) when X >= $0, X =< $9 ->
+ url_encode_char(T, [X | Acc]);
+url_encode_char([X | T], Acc) when X >= $a, X =< $z ->
+ url_encode_char(T, [X | Acc]);
+url_encode_char([X | T], Acc) when X >= $A, X =< $Z ->
+ url_encode_char(T, [X | Acc]);
+url_encode_char([X | T], Acc) when X == $-; X == $_; X == $. ->
+ url_encode_char(T, [X | Acc]);
+url_encode_char([32 | T], Acc) ->
+ url_encode_char(T, [$+ | Acc]);
+url_encode_char([X | T], Acc) ->
+ url_encode_char(T, [$%, d2h(X bsr 4), d2h(X band 16#0f) | Acc]);
+url_encode_char([], Acc) ->
+ Acc.
+
+d2h(N) when N<10 -> N+$0;
+d2h(N) -> N+$a-10.
+
+decode_rfc822_date(String) when is_list(String) ->
+ case catch decode_rfc822_date_1(string:tokens(String, ", \t\r\n")) of
+ {'EXIT', _} ->
+ {error, invalid_date};
+ Res ->
+ Res
+ end.
+
+% TODO: Have to handle the Zone
+decode_rfc822_date_1([_,DayInt,Month,Year, Time,Zone]) ->
+ decode_rfc822_date_1([DayInt,Month,Year, Time,Zone]);
+decode_rfc822_date_1([Day,Month,Year, Time,_Zone]) ->
+ DayI = list_to_integer(Day),
+ MonthI = month_int(Month),
+ YearI = list_to_integer(Year),
+ TimeTup = case string:tokens(Time, ":") of
+ [H,M] ->
+ {list_to_integer(H),
+ list_to_integer(M),
+ 0};
+ [H,M,S] ->
+ {list_to_integer(H),
+ list_to_integer(M),
+ list_to_integer(S)}
+ end,
+ {{YearI,MonthI,DayI}, TimeTup}.
+
+month_int("Jan") -> 1;
+month_int("Feb") -> 2;
+month_int("Mar") -> 3;
+month_int("Apr") -> 4;
+month_int("May") -> 5;
+month_int("Jun") -> 6;
+month_int("Jul") -> 7;
+month_int("Aug") -> 8;
+month_int("Sep") -> 9;
+month_int("Oct") -> 10;
+month_int("Nov") -> 11;
+month_int("Dec") -> 12.
+
+%% @doc Given a status code, returns an atom describing the status code.
+%% @spec status_code(StatusCode::status_code()) -> StatusDescription
+%% status_code() = string() | integer()
+%% StatusDescription = atom()
+status_code(100) -> continue;
+status_code(101) -> switching_protocols;
+status_code(102) -> processing;
+status_code(200) -> ok;
+status_code(201) -> created;
+status_code(202) -> accepted;
+status_code(203) -> non_authoritative_information;
+status_code(204) -> no_content;
+status_code(205) -> reset_content;
+status_code(206) -> partial_content;
+status_code(207) -> multi_status;
+status_code(300) -> multiple_choices;
+status_code(301) -> moved_permanently;
+status_code(302) -> found;
+status_code(303) -> see_other;
+status_code(304) -> not_modified;
+status_code(305) -> use_proxy;
+status_code(306) -> unused;
+status_code(307) -> temporary_redirect;
+status_code(400) -> bad_request;
+status_code(401) -> unauthorized;
+status_code(402) -> payment_required;
+status_code(403) -> forbidden;
+status_code(404) -> not_found;
+status_code(405) -> method_not_allowed;
+status_code(406) -> not_acceptable;
+status_code(407) -> proxy_authentication_required;
+status_code(408) -> request_timeout;
+status_code(409) -> conflict;
+status_code(410) -> gone;
+status_code(411) -> length_required;
+status_code(412) -> precondition_failed;
+status_code(413) -> request_entity_too_large;
+status_code(414) -> request_uri_too_long;
+status_code(415) -> unsupported_media_type;
+status_code(416) -> requested_range_not_satisfiable;
+status_code(417) -> expectation_failed;
+status_code(422) -> unprocessable_entity;
+status_code(423) -> locked;
+status_code(424) -> failed_dependency;
+status_code(500) -> internal_server_error;
+status_code(501) -> not_implemented;
+status_code(502) -> bad_gateway;
+status_code(503) -> service_unavailable;
+status_code(504) -> gateway_timeout;
+status_code(505) -> http_version_not_supported;
+status_code(507) -> insufficient_storage;
+status_code(X) when is_list(X) -> status_code(list_to_integer(X));
+status_code(_) -> unknown_status_code.
+
+%% @doc Implements the base64 encoding algorithm. The output data type matches in the input data type.
+%% @spec encode_base64(In) -> Out
+%% In = string() | binary()
+%% Out = string() | binary()
+encode_base64(List) when is_list(List) ->
+ binary_to_list(base64:encode(List));
+encode_base64(Bin) when is_binary(Bin) ->
+ base64:encode(Bin).
+
+%% @doc Implements the base64 decoding algorithm. The output data type matches in the input data type.
+%% @spec decode_base64(In) -> Out | exit({error, invalid_input})
+%% In = string() | binary()
+%% Out = string() | binary()
+decode_base64(List) when is_list(List) ->
+ binary_to_list(base64:decode(List));
+decode_base64(Bin) when is_binary(Bin) ->
+ base64:decode(Bin).
+
+get_value(Tag, TVL, DefVal) ->
+ case lists:keysearch(Tag, 1, TVL) of
+ false ->
+ DefVal;
+ {value, {_, Val}} ->
+ Val
+ end.
+
+get_value(Tag, TVL) ->
+ {value, {_, V}} = lists:keysearch(Tag,1,TVL),
+ V.
+
+parse_url(Url) ->
+ case parse_url(Url, get_protocol, #url{abspath=Url}, []) of
+ #url{host_type = undefined, host = Host} = UrlRec ->
+ case inet_parse:address(Host) of
+ {ok, {_, _, _, _, _, _, _, _}} ->
+ UrlRec#url{host_type = ipv6_address};
+ {ok, {_, _, _, _}} ->
+ UrlRec#url{host_type = ipv4_address};
+ _ ->
+ UrlRec#url{host_type = hostname}
+ end;
+ Else ->
+ Else
+ end.
+
+parse_url([$:, $/, $/ | _], get_protocol, Url, []) ->
+ {invalid_uri_1, Url};
+parse_url([$:, $/, $/ | T], get_protocol, Url, TmpAcc) ->
+ Prot = list_to_existing_atom(lists:reverse(TmpAcc)),
+ parse_url(T, get_username,
+ Url#url{protocol = Prot},
+ []);
+parse_url([H | T], get_username, Url, TmpAcc) when H == $/;
+ H == $? ->
+ Path = case H of
+ $/ ->
+ [$/ | T];
+ $? ->
+ [$/, $? | T]
+ end,
+ %% No username/password. No port number
+ Url#url{host = lists:reverse(TmpAcc),
+ port = default_port(Url#url.protocol),
+ path = Path};
+parse_url([$: | T], get_username, Url, TmpAcc) ->
+ %% It is possible that no username/password has been
+ %% specified. But we'll continue with the assumption that there is
+ %% a username/password. If we encounter a '@' later on, there is a
+ %% username/password indeed. If we encounter a '/', it was
+ %% actually the hostname
+ parse_url(T, get_password,
+ Url#url{username = lists:reverse(TmpAcc)},
+ []);
+parse_url([$@ | T], get_username, Url, TmpAcc) ->
+ parse_url(T, get_host,
+ Url#url{username = lists:reverse(TmpAcc),
+ password = ""},
+ []);
+parse_url([$[ | T], get_username, Url, []) ->
+ % IPv6 address literals are enclosed by square brackets:
+ % http://www.ietf.org/rfc/rfc2732.txt
+ parse_url(T, get_ipv6_address, Url#url{host_type = ipv6_address}, []);
+parse_url([$[ | T], get_username, _Url, TmpAcc) ->
+ {error, {invalid_username_or_host, lists:reverse(TmpAcc) ++ "[" ++ T}};
+parse_url([$[ | _], get_password, _Url, []) ->
+ {error, missing_password};
+parse_url([$[ | T], get_password, Url, TmpAcc) ->
+ % IPv6 address literals are enclosed by square brackets:
+ % http://www.ietf.org/rfc/rfc2732.txt
+ parse_url(T, get_ipv6_address,
+ Url#url{host_type = ipv6_address,
+ password = lists:reverse(TmpAcc)},
+ []);
+parse_url([$@ | T], get_password, Url, TmpAcc) ->
+ parse_url(T, get_host,
+ Url#url{password = lists:reverse(TmpAcc)},
+ []);
+parse_url([H | T], get_password, Url, TmpAcc) when H == $/;
+ H == $? ->
+ %% Ok, what we thought was the username/password was the hostname
+ %% and portnumber
+ #url{username=User} = Url,
+ Port = list_to_integer(lists:reverse(TmpAcc)),
+ Path = case H of
+ $/ ->
+ [$/ | T];
+ $? ->
+ [$/, $? | T]
+ end,
+ Url#url{host = User,
+ port = Port,
+ username = undefined,
+ password = undefined,
+ path = Path};
+parse_url([$] | T], get_ipv6_address, #url{protocol = Prot} = Url, TmpAcc) ->
+ Addr = lists:reverse(TmpAcc),
+ case inet_parse:address(Addr) of
+ {ok, {_, _, _, _, _, _, _, _}} ->
+ Url2 = Url#url{host = Addr, port = default_port(Prot)},
+ case T of
+ [$: | T2] ->
+ parse_url(T2, get_port, Url2, []);
+ [$/ | T2] ->
+ Url2#url{path = [$/ | T2]};
+ [$? | T2] ->
+ Url2#url{path = [$/, $? | T2]};
+ [] ->
+ Url2#url{path = "/"};
+ _ ->
+ {error, {invalid_host, "[" ++ Addr ++ "]" ++ T}}
+ end;
+ _ ->
+ {error, {invalid_ipv6_address, Addr}}
+ end;
+parse_url([$[ | T], get_host, #url{} = Url, []) ->
+ parse_url(T, get_ipv6_address, Url#url{host_type = ipv6_address}, []);
+parse_url([$: | T], get_host, #url{} = Url, TmpAcc) ->
+ parse_url(T, get_port,
+ Url#url{host = lists:reverse(TmpAcc)},
+ []);
+parse_url([H | T], get_host, #url{protocol=Prot} = Url, TmpAcc) when H == $/;
+ H == $? ->
+ Path = case H of
+ $/ ->
+ [$/ | T];
+ $? ->
+ [$/, $? | T]
+ end,
+ Url#url{host = lists:reverse(TmpAcc),
+ port = default_port(Prot),
+ path = Path};
+parse_url([H | T], get_port, #url{protocol=Prot} = Url, TmpAcc) when H == $/;
+ H == $? ->
+ Path = case H of
+ $/ ->
+ [$/ | T];
+ $? ->
+ [$/, $? | T]
+ end,
+ Port = case TmpAcc of
+ [] ->
+ default_port(Prot);
+ _ ->
+ list_to_integer(lists:reverse(TmpAcc))
+ end,
+ Url#url{port = Port, path = Path};
+parse_url([H | T], State, Url, TmpAcc) ->
+ parse_url(T, State, Url, [H | TmpAcc]);
+parse_url([], get_host, Url, TmpAcc) when TmpAcc /= [] ->
+ Url#url{host = lists:reverse(TmpAcc),
+ port = default_port(Url#url.protocol),
+ path = "/"};
+parse_url([], get_username, Url, TmpAcc) when TmpAcc /= [] ->
+ Url#url{host = lists:reverse(TmpAcc),
+ port = default_port(Url#url.protocol),
+ path = "/"};
+parse_url([], get_port, #url{protocol=Prot} = Url, TmpAcc) ->
+ Port = case TmpAcc of
+ [] ->
+ default_port(Prot);
+ _ ->
+ list_to_integer(lists:reverse(TmpAcc))
+ end,
+ Url#url{port = Port,
+ path = "/"};
+parse_url([], get_password, Url, TmpAcc) ->
+ %% Ok, what we thought was the username/password was the hostname
+ %% and portnumber
+ #url{username=User} = Url,
+ Port = case TmpAcc of
+ [] ->
+ default_port(Url#url.protocol);
+ _ ->
+ list_to_integer(lists:reverse(TmpAcc))
+ end,
+ Url#url{host = User,
+ port = Port,
+ username = undefined,
+ password = undefined,
+ path = "/"};
+parse_url([], State, Url, TmpAcc) ->
+ {invalid_uri_2, State, Url, TmpAcc}.
+
+default_port(http) -> 80;
+default_port(https) -> 443;
+default_port(ftp) -> 21.
+
+printable_date() ->
+ {{Y,Mo,D},{H, M, S}} = calendar:local_time(),
+ {_,_,MicroSecs} = now(),
+ [integer_to_list(Y),
+ $-,
+ integer_to_list(Mo),
+ $-,
+ integer_to_list(D),
+ $_,
+ integer_to_list(H),
+ $:,
+ integer_to_list(M),
+ $:,
+ integer_to_list(S),
+ $:,
+ integer_to_list(MicroSecs div 1000)].
+
+do_trace(Fmt, Args) ->
+ do_trace(get(my_trace_flag), Fmt, Args).
+
+-ifdef(DEBUG).
+do_trace(_, Fmt, Args) ->
+ io:format("~s -- (~s) - "++Fmt,
+ [printable_date(),
+ get(ibrowse_trace_token) | Args]).
+-else.
+do_trace(true, Fmt, Args) ->
+ io:format("~s -- (~s) - "++Fmt,
+ [printable_date(),
+ get(ibrowse_trace_token) | Args]);
+do_trace(_, _, _) ->
+ ok.
+-endif.
diff --git a/1.1.x/src/ibrowse/ibrowse_sup.erl b/1.1.x/src/ibrowse/ibrowse_sup.erl
new file mode 100644
index 00000000..ace33d16
--- /dev/null
+++ b/1.1.x/src/ibrowse/ibrowse_sup.erl
@@ -0,0 +1,63 @@
+%%%-------------------------------------------------------------------
+%%% File : ibrowse_sup.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description :
+%%%
+%%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+-module(ibrowse_sup).
+-behaviour(supervisor).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+%%--------------------------------------------------------------------
+-export([
+ start_link/0
+ ]).
+
+%%--------------------------------------------------------------------
+%% Internal exports
+%%--------------------------------------------------------------------
+-export([
+ init/1
+ ]).
+
+%%--------------------------------------------------------------------
+%% Macros
+%%--------------------------------------------------------------------
+-define(SERVER, ?MODULE).
+
+%%--------------------------------------------------------------------
+%% Records
+%%--------------------------------------------------------------------
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link/0
+%% Description: Starts the supervisor
+%%--------------------------------------------------------------------
+start_link() ->
+ supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Func: init/1
+%% Returns: {ok, {SupFlags, [ChildSpec]}} |
+%% ignore |
+%% {error, Reason}
+%%--------------------------------------------------------------------
+init([]) ->
+ AChild = {ibrowse,{ibrowse,start_link,[]},
+ permanent,2000,worker,[ibrowse, ibrowse_http_client]},
+ {ok,{{one_for_all,10,1}, [AChild]}}.
+
+%%====================================================================
+%% Internal functions
+%%====================================================================
diff --git a/1.1.x/src/ibrowse/ibrowse_test.erl b/1.1.x/src/ibrowse/ibrowse_test.erl
new file mode 100644
index 00000000..ff3b5304
--- /dev/null
+++ b/1.1.x/src/ibrowse/ibrowse_test.erl
@@ -0,0 +1,513 @@
+%%% File : ibrowse_test.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description : Test ibrowse
+%%% Created : 14 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+
+-module(ibrowse_test).
+-export([
+ load_test/3,
+ send_reqs_1/3,
+ do_send_req/2,
+ unit_tests/0,
+ unit_tests/1,
+ unit_tests_1/2,
+ ue_test/0,
+ ue_test/1,
+ verify_chunked_streaming/0,
+ verify_chunked_streaming/1,
+ test_chunked_streaming_once/0,
+ i_do_async_req_list/4,
+ test_stream_once/3,
+ test_stream_once/4,
+ test_20122010/0,
+ test_20122010/1
+ ]).
+
+test_stream_once(Url, Method, Options) ->
+ test_stream_once(Url, Method, Options, 5000).
+
+test_stream_once(Url, Method, Options, Timeout) ->
+ case ibrowse:send_req(Url, [], Method, [], [{stream_to, {self(), once}} | Options], Timeout) of
+ {ibrowse_req_id, Req_id} ->
+ case ibrowse:stream_next(Req_id) of
+ ok ->
+ test_stream_once(Req_id);
+ Err ->
+ Err
+ end;
+ Err ->
+ Err
+ end.
+
+test_stream_once(Req_id) ->
+ receive
+ {ibrowse_async_headers, Req_id, StatCode, Headers} ->
+ io:format("Recvd headers~n~p~n", [{ibrowse_async_headers, Req_id, StatCode, Headers}]),
+ case ibrowse:stream_next(Req_id) of
+ ok ->
+ test_stream_once(Req_id);
+ Err ->
+ Err
+ end;
+ {ibrowse_async_response, Req_id, {error, Err}} ->
+ io:format("Recvd error: ~p~n", [Err]);
+ {ibrowse_async_response, Req_id, Body_1} ->
+ io:format("Recvd body part: ~n~p~n", [{ibrowse_async_response, Req_id, Body_1}]),
+ case ibrowse:stream_next(Req_id) of
+ ok ->
+ test_stream_once(Req_id);
+ Err ->
+ Err
+ end;
+ {ibrowse_async_response_end, Req_id} ->
+ ok
+ end.
+%% Use ibrowse:set_max_sessions/3 and ibrowse:set_max_pipeline_size/3 to
+%% tweak settings before running the load test. The defaults are 10 and 10.
+load_test(Url, NumWorkers, NumReqsPerWorker) when is_list(Url),
+ is_integer(NumWorkers),
+ is_integer(NumReqsPerWorker),
+ NumWorkers > 0,
+ NumReqsPerWorker > 0 ->
+ proc_lib:spawn(?MODULE, send_reqs_1, [Url, NumWorkers, NumReqsPerWorker]).
+
+send_reqs_1(Url, NumWorkers, NumReqsPerWorker) ->
+ Start_time = now(),
+ ets:new(pid_table, [named_table, public]),
+ ets:new(ibrowse_test_results, [named_table, public]),
+ ets:new(ibrowse_errors, [named_table, public, ordered_set]),
+ init_results(),
+ process_flag(trap_exit, true),
+ log_msg("Starting spawning of workers...~n", []),
+ spawn_workers(Url, NumWorkers, NumReqsPerWorker),
+ log_msg("Finished spawning workers...~n", []),
+ do_wait(),
+ End_time = now(),
+ log_msg("All workers are done...~n", []),
+ log_msg("ibrowse_test_results table: ~n~p~n", [ets:tab2list(ibrowse_test_results)]),
+ log_msg("Start time: ~1000.p~n", [calendar:now_to_local_time(Start_time)]),
+ log_msg("End time : ~1000.p~n", [calendar:now_to_local_time(End_time)]),
+ Elapsed_time_secs = trunc(timer:now_diff(End_time, Start_time) / 1000000),
+ log_msg("Elapsed : ~p~n", [Elapsed_time_secs]),
+ log_msg("Reqs/sec : ~p~n", [round(trunc((NumWorkers*NumReqsPerWorker) / Elapsed_time_secs))]),
+ dump_errors().
+
+init_results() ->
+ ets:insert(ibrowse_test_results, {crash, 0}),
+ ets:insert(ibrowse_test_results, {send_failed, 0}),
+ ets:insert(ibrowse_test_results, {other_error, 0}),
+ ets:insert(ibrowse_test_results, {success, 0}),
+ ets:insert(ibrowse_test_results, {retry_later, 0}),
+ ets:insert(ibrowse_test_results, {trid_mismatch, 0}),
+ ets:insert(ibrowse_test_results, {success_no_trid, 0}),
+ ets:insert(ibrowse_test_results, {failed, 0}),
+ ets:insert(ibrowse_test_results, {timeout, 0}),
+ ets:insert(ibrowse_test_results, {req_id, 0}).
+
+spawn_workers(_Url, 0, _) ->
+ ok;
+spawn_workers(Url, NumWorkers, NumReqsPerWorker) ->
+ Pid = proc_lib:spawn_link(?MODULE, do_send_req, [Url, NumReqsPerWorker]),
+ ets:insert(pid_table, {Pid, []}),
+ spawn_workers(Url, NumWorkers - 1, NumReqsPerWorker).
+
+do_wait() ->
+ receive
+ {'EXIT', _, normal} ->
+ do_wait();
+ {'EXIT', Pid, Reason} ->
+ ets:delete(pid_table, Pid),
+ ets:insert(ibrowse_errors, {Pid, Reason}),
+ ets:update_counter(ibrowse_test_results, crash, 1),
+ do_wait();
+ Msg ->
+ io:format("Recvd unknown message...~p~n", [Msg]),
+ do_wait()
+ after 1000 ->
+ case ets:info(pid_table, size) of
+ 0 ->
+ done;
+ _ ->
+ do_wait()
+ end
+ end.
+
+do_send_req(Url, NumReqs) ->
+ do_send_req_1(Url, NumReqs).
+
+do_send_req_1(_Url, 0) ->
+ ets:delete(pid_table, self());
+do_send_req_1(Url, NumReqs) ->
+ Counter = integer_to_list(ets:update_counter(ibrowse_test_results, req_id, 1)),
+ case ibrowse:send_req(Url, [{"ib_req_id", Counter}], get, [], [], 10000) of
+ {ok, _Status, Headers, _Body} ->
+ case lists:keysearch("ib_req_id", 1, Headers) of
+ {value, {_, Counter}} ->
+ ets:update_counter(ibrowse_test_results, success, 1);
+ {value, _} ->
+ ets:update_counter(ibrowse_test_results, trid_mismatch, 1);
+ false ->
+ ets:update_counter(ibrowse_test_results, success_no_trid, 1)
+ end;
+ {error, req_timedout} ->
+ ets:update_counter(ibrowse_test_results, timeout, 1);
+ {error, send_failed} ->
+ ets:update_counter(ibrowse_test_results, send_failed, 1);
+ {error, retry_later} ->
+ ets:update_counter(ibrowse_test_results, retry_later, 1);
+ Err ->
+ ets:insert(ibrowse_errors, {now(), Err}),
+ ets:update_counter(ibrowse_test_results, other_error, 1),
+ ok
+ end,
+ do_send_req_1(Url, NumReqs-1).
+
+dump_errors() ->
+ case ets:info(ibrowse_errors, size) of
+ 0 ->
+ ok;
+ _ ->
+ {A, B, C} = now(),
+ Filename = lists:flatten(
+ io_lib:format("ibrowse_errors_~p_~p_~p.txt" , [A, B, C])),
+ case file:open(Filename, [write, delayed_write, raw]) of
+ {ok, Iod} ->
+ dump_errors(ets:first(ibrowse_errors), Iod);
+ Err ->
+ io:format("failed to create file ~s. Reason: ~p~n", [Filename, Err]),
+ ok
+ end
+ end.
+
+dump_errors('$end_of_table', Iod) ->
+ file:close(Iod);
+dump_errors(Key, Iod) ->
+ [{_, Term}] = ets:lookup(ibrowse_errors, Key),
+ file:write(Iod, io_lib:format("~p~n", [Term])),
+ dump_errors(ets:next(ibrowse_errors, Key), Iod).
+
+%%------------------------------------------------------------------------------
+%% Unit Tests
+%%------------------------------------------------------------------------------
+-define(TEST_LIST, [{"http://intranet/messenger", get},
+ {"http://www.google.co.uk", get},
+ {"http://www.google.com", get},
+ {"http://www.google.com", options},
+ {"https://mail.google.com", get},
+ {"http://www.sun.com", get},
+ {"http://www.oracle.com", get},
+ {"http://www.bbc.co.uk", get},
+ {"http://www.bbc.co.uk", trace},
+ {"http://www.bbc.co.uk", options},
+ {"http://yaws.hyber.org", get},
+ {"http://jigsaw.w3.org/HTTP/ChunkedScript", get},
+ {"http://jigsaw.w3.org/HTTP/TE/foo.txt", get},
+ {"http://jigsaw.w3.org/HTTP/TE/bar.txt", get},
+ {"http://jigsaw.w3.org/HTTP/connection.html", get},
+ {"http://jigsaw.w3.org/HTTP/cc.html", get},
+ {"http://jigsaw.w3.org/HTTP/cc-private.html", get},
+ {"http://jigsaw.w3.org/HTTP/cc-proxy-revalidate.html", get},
+ {"http://jigsaw.w3.org/HTTP/cc-nocache.html", get},
+ {"http://jigsaw.w3.org/HTTP/h-content-md5.html", get},
+ {"http://jigsaw.w3.org/HTTP/h-retry-after.html", get},
+ {"http://jigsaw.w3.org/HTTP/h-retry-after-date.html", get},
+ {"http://jigsaw.w3.org/HTTP/neg", get},
+ {"http://jigsaw.w3.org/HTTP/negbad", get},
+ {"http://jigsaw.w3.org/HTTP/400/toolong/", get},
+ {"http://jigsaw.w3.org/HTTP/300/", get},
+ {"http://jigsaw.w3.org/HTTP/Basic/", get, [{basic_auth, {"guest", "guest"}}]},
+ {"http://jigsaw.w3.org/HTTP/CL/", get},
+ {"http://www.httpwatch.com/httpgallery/chunked/", get},
+ {"https://github.com", get, [{ssl_options, [{depth, 2}]}]},
+ {local_test_fun, test_20122010, []}
+ ]).
+
+unit_tests() ->
+ unit_tests([]).
+
+unit_tests(Options) ->
+ application:start(crypto),
+ application:start(public_key),
+ application:start(ssl),
+ (catch ibrowse_test_server:start_server(8181, tcp)),
+ ibrowse:start(),
+ Options_1 = Options ++ [{connect_timeout, 5000}],
+ {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]),
+ receive
+ {done, Pid} ->
+ ok;
+ {'DOWN', Ref, _, _, Info} ->
+ io:format("Test process crashed: ~p~n", [Info])
+ after 60000 ->
+ exit(Pid, kill),
+ io:format("Timed out waiting for tests to complete~n", [])
+ end.
+
+unit_tests_1(Parent, Options) ->
+ lists:foreach(fun({local_test_fun, Fun_name, Args}) ->
+ execute_req(local_test_fun, Fun_name, Args);
+ ({Url, Method}) ->
+ execute_req(Url, Method, Options);
+ ({Url, Method, X_Opts}) ->
+ execute_req(Url, Method, X_Opts ++ Options)
+ end, ?TEST_LIST),
+ Parent ! {done, self()}.
+
+verify_chunked_streaming() ->
+ verify_chunked_streaming([]).
+
+verify_chunked_streaming(Options) ->
+ io:format("~nVerifying that chunked streaming is working...~n", []),
+ Url = "http://www.httpwatch.com/httpgallery/chunked/",
+ io:format(" URL: ~s~n", [Url]),
+ io:format(" Fetching data without streaming...~n", []),
+ Result_without_streaming = ibrowse:send_req(
+ Url, [], get, [],
+ [{response_format, binary} | Options]),
+ io:format(" Fetching data with streaming as list...~n", []),
+ Async_response_list = do_async_req_list(
+ Url, get, [{response_format, list} | Options]),
+ io:format(" Fetching data with streaming as binary...~n", []),
+ Async_response_bin = do_async_req_list(
+ Url, get, [{response_format, binary} | Options]),
+ io:format(" Fetching data with streaming as binary, {active, once}...~n", []),
+ Async_response_bin_once = do_async_req_list(
+ Url, get, [once, {response_format, binary} | Options]),
+ Res1 = compare_responses(Result_without_streaming, Async_response_list, Async_response_bin),
+ Res2 = compare_responses(Result_without_streaming, Async_response_list, Async_response_bin_once),
+ case {Res1, Res2} of
+ {success, success} ->
+ io:format(" Chunked streaming working~n", []);
+ _ ->
+ ok
+ end.
+
+test_chunked_streaming_once() ->
+ test_chunked_streaming_once([]).
+
+test_chunked_streaming_once(Options) ->
+ io:format("~nTesting chunked streaming with the {stream_to, {Pid, once}} option...~n", []),
+ Url = "http://www.httpwatch.com/httpgallery/chunked/",
+ io:format(" URL: ~s~n", [Url]),
+ io:format(" Fetching data with streaming as binary, {active, once}...~n", []),
+ case do_async_req_list(Url, get, [once, {response_format, binary} | Options]) of
+ {ok, _, _, _} ->
+ io:format(" Success!~n", []);
+ Err ->
+ io:format(" Fail: ~p~n", [Err])
+ end.
+
+compare_responses({ok, St_code, _, Body}, {ok, St_code, _, Body}, {ok, St_code, _, Body}) ->
+ success;
+compare_responses({ok, St_code, _, Body_1}, {ok, St_code, _, Body_2}, {ok, St_code, _, Body_3}) ->
+ case Body_1 of
+ Body_2 ->
+ io:format("Body_1 and Body_2 match~n", []);
+ Body_3 ->
+ io:format("Body_1 and Body_3 match~n", []);
+ _ when Body_2 == Body_3 ->
+ io:format("Body_2 and Body_3 match~n", []);
+ _ ->
+ io:format("All three bodies are different!~n", [])
+ end,
+ io:format("Body_1 -> ~p~n", [Body_1]),
+ io:format("Body_2 -> ~p~n", [Body_2]),
+ io:format("Body_3 -> ~p~n", [Body_3]),
+ fail_bodies_mismatch;
+compare_responses(R1, R2, R3) ->
+ io:format("R1 -> ~p~n", [R1]),
+ io:format("R2 -> ~p~n", [R2]),
+ io:format("R3 -> ~p~n", [R3]),
+ fail.
+
+%% do_async_req_list(Url) ->
+%% do_async_req_list(Url, get).
+
+%% do_async_req_list(Url, Method) ->
+%% do_async_req_list(Url, Method, [{stream_to, self()},
+%% {stream_chunk_size, 1000}]).
+
+do_async_req_list(Url, Method, Options) ->
+ {Pid,_} = erlang:spawn_monitor(?MODULE, i_do_async_req_list,
+ [self(), Url, Method,
+ Options ++ [{stream_chunk_size, 1000}]]),
+%% io:format("Spawned process ~p~n", [Pid]),
+ wait_for_resp(Pid).
+
+wait_for_resp(Pid) ->
+ receive
+ {async_result, Pid, Res} ->
+ Res;
+ {async_result, Other_pid, _} ->
+ io:format("~p: Waiting for result from ~p: got from ~p~n", [self(), Pid, Other_pid]),
+ wait_for_resp(Pid);
+ {'DOWN', _, _, Pid, Reason} ->
+ {'EXIT', Reason};
+ {'DOWN', _, _, _, _} ->
+ wait_for_resp(Pid);
+ Msg ->
+ io:format("Recvd unknown message: ~p~n", [Msg]),
+ wait_for_resp(Pid)
+ after 100000 ->
+ {error, timeout}
+ end.
+
+i_do_async_req_list(Parent, Url, Method, Options) ->
+ Options_1 = case lists:member(once, Options) of
+ true ->
+ [{stream_to, {self(), once}} | (Options -- [once])];
+ false ->
+ [{stream_to, self()} | Options]
+ end,
+ Res = ibrowse:send_req(Url, [], Method, [], Options_1),
+ case Res of
+ {ibrowse_req_id, Req_id} ->
+ Result = wait_for_async_resp(Req_id, Options, undefined, undefined, []),
+ Parent ! {async_result, self(), Result};
+ Err ->
+ Parent ! {async_result, self(), Err}
+ end.
+
+wait_for_async_resp(Req_id, Options, Acc_Stat_code, Acc_Headers, Body) ->
+ receive
+ {ibrowse_async_headers, Req_id, StatCode, Headers} ->
+ %% io:format("Recvd headers...~n", []),
+ maybe_stream_next(Req_id, Options),
+ wait_for_async_resp(Req_id, Options, StatCode, Headers, Body);
+ {ibrowse_async_response_end, Req_id} ->
+ %% io:format("Recvd end of response.~n", []),
+ Body_1 = list_to_binary(lists:reverse(Body)),
+ {ok, Acc_Stat_code, Acc_Headers, Body_1};
+ {ibrowse_async_response, Req_id, Data} ->
+ maybe_stream_next(Req_id, Options),
+ %% io:format("Recvd data...~n", []),
+ wait_for_async_resp(Req_id, Options, Acc_Stat_code, Acc_Headers, [Data | Body]);
+ {ibrowse_async_response, Req_id, {error, _} = Err} ->
+ {ok, Acc_Stat_code, Acc_Headers, Err};
+ Err ->
+ {ok, Acc_Stat_code, Acc_Headers, Err}
+ after 10000 ->
+ {timeout, Acc_Stat_code, Acc_Headers, Body}
+ end.
+
+maybe_stream_next(Req_id, Options) ->
+ case lists:member(once, Options) of
+ true ->
+ ibrowse:stream_next(Req_id);
+ false ->
+ ok
+ end.
+
+execute_req(local_test_fun, Method, Args) ->
+ io:format(" ~-54.54w: ", [Method]),
+ Result = (catch apply(?MODULE, Method, Args)),
+ io:format("~p~n", [Result]);
+execute_req(Url, Method, Options) ->
+ io:format("~7.7w, ~50.50s: ", [Method, Url]),
+ Result = (catch ibrowse:send_req(Url, [], Method, [], Options)),
+ case Result of
+ {ok, SCode, _H, _B} ->
+ io:format("Status code: ~p~n", [SCode]);
+ Err ->
+ io:format("~p~n", [Err])
+ end.
+
+ue_test() ->
+ ue_test(lists:duplicate(1024, $?)).
+ue_test(Data) ->
+ {Time, Res} = timer:tc(ibrowse_lib, url_encode, [Data]),
+ io:format("Time -> ~p~n", [Time]),
+ io:format("Data Length -> ~p~n", [length(Data)]),
+ io:format("Res Length -> ~p~n", [length(Res)]).
+% io:format("Result -> ~s~n", [Res]).
+
+log_msg(Fmt, Args) ->
+ io:format("~s -- " ++ Fmt,
+ [ibrowse_lib:printable_date() | Args]).
+
+%%------------------------------------------------------------------------------
+%%
+%%------------------------------------------------------------------------------
+
+test_20122010() ->
+ test_20122010("http://localhost:8181").
+
+test_20122010(Url) ->
+ {ok, Pid} = ibrowse:spawn_worker_process(Url),
+ Expected_resp = <<"1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36-37-38-39-40-41-42-43-44-45-46-47-48-49-50-51-52-53-54-55-56-57-58-59-60-61-62-63-64-65-66-67-68-69-70-71-72-73-74-75-76-77-78-79-80-81-82-83-84-85-86-87-88-89-90-91-92-93-94-95-96-97-98-99-100">>,
+ Test_parent = self(),
+ Fun = fun() ->
+ do_test_20122010(Url, Pid, Expected_resp, Test_parent)
+ end,
+ Pids = [erlang:spawn_monitor(Fun) || _ <- lists:seq(1,10)],
+ wait_for_workers(Pids).
+
+wait_for_workers([{Pid, _Ref} | Pids]) ->
+ receive
+ {Pid, success} ->
+ wait_for_workers(Pids)
+ after 60000 ->
+ test_failed
+ end;
+wait_for_workers([]) ->
+ success.
+
+do_test_20122010(Url, Pid, Expected_resp, Test_parent) ->
+ do_test_20122010(10, Url, Pid, Expected_resp, Test_parent).
+
+do_test_20122010(0, _Url, _Pid, _Expected_resp, Test_parent) ->
+ Test_parent ! {self(), success};
+do_test_20122010(Rem_count, Url, Pid, Expected_resp, Test_parent) ->
+ {ibrowse_req_id, Req_id} = ibrowse:send_req_direct(
+ Pid,
+ Url ++ "/ibrowse_stream_once_chunk_pipeline_test",
+ [], get, [],
+ [{stream_to, {self(), once}},
+ {inactivity_timeout, 10000},
+ {include_ibrowse_req_id, true}]),
+ do_trace("~p -- sent request ~1000.p~n", [self(), Req_id]),
+ Req_id_str = lists:flatten(io_lib:format("~1000.p",[Req_id])),
+ receive
+ {ibrowse_async_headers, Req_id, "200", Headers} ->
+ case lists:keysearch("x-ibrowse-request-id", 1, Headers) of
+ {value, {_, Req_id_str}} ->
+ ok;
+ {value, {_, Req_id_1}} ->
+ do_trace("~p -- Sent req-id: ~1000.p. Recvd: ~1000.p~n",
+ [self(), Req_id, Req_id_1]),
+ exit(req_id_mismatch)
+ end
+ after 5000 ->
+ do_trace("~p -- response headers not received~n", [self()]),
+ exit({timeout, test_failed})
+ end,
+ do_trace("~p -- response headers received~n", [self()]),
+ ok = ibrowse:stream_next(Req_id),
+ case do_test_20122010_1(Expected_resp, Req_id, []) of
+ true ->
+ do_test_20122010(Rem_count - 1, Url, Pid, Expected_resp, Test_parent);
+ false ->
+ Test_parent ! {self(), failed}
+ end.
+
+do_test_20122010_1(Expected_resp, Req_id, Acc) ->
+ receive
+ {ibrowse_async_response, Req_id, Body_part} ->
+ ok = ibrowse:stream_next(Req_id),
+ do_test_20122010_1(Expected_resp, Req_id, [Body_part | Acc]);
+ {ibrowse_async_response_end, Req_id} ->
+ Acc_1 = list_to_binary(lists:reverse(Acc)),
+ Result = Acc_1 == Expected_resp,
+ do_trace("~p -- End of response. Result: ~p~n", [self(), Result]),
+ Result
+ after 1000 ->
+ exit({timeout, test_failed})
+ end.
+
+do_trace(Fmt, Args) ->
+ do_trace(get(my_trace_flag), Fmt, Args).
+
+do_trace(true, Fmt, Args) ->
+ io:format("~s -- " ++ Fmt, [ibrowse_lib:printable_date() | Args]);
+do_trace(_, _, _) ->
+ ok.
diff --git a/1.1.x/src/mochiweb/Makefile.am b/1.1.x/src/mochiweb/Makefile.am
new file mode 100644
index 00000000..752118df
--- /dev/null
+++ b/1.1.x/src/mochiweb/Makefile.am
@@ -0,0 +1,102 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+mochiwebebindir = $(localerlanglibdir)/mochiweb-7c2bc2/ebin
+
+mochiweb_file_collection = \
+ mochifmt.erl \
+ mochifmt_records.erl \
+ mochifmt_std.erl \
+ mochiglobal.erl \
+ mochihex.erl \
+ mochijson.erl \
+ mochijson2.erl \
+ mochilists.erl \
+ mochilogfile2.erl \
+ mochinum.erl \
+ mochitemp.erl \
+ mochiutf8.erl \
+ mochiweb.app.in \
+ mochiweb.erl \
+ mochiweb_acceptor.erl \
+ mochiweb_app.erl \
+ mochiweb_charref.erl \
+ mochiweb_cookies.erl \
+ mochiweb_cover.erl \
+ mochiweb_echo.erl \
+ mochiweb_headers.erl \
+ mochiweb_html.erl \
+ mochiweb_http.erl \
+ mochiweb_io.erl \
+ mochiweb_mime.erl \
+ mochiweb_multipart.erl \
+ mochiweb_request.erl \
+ mochiweb_response.erl \
+ mochiweb_skel.erl \
+ mochiweb_socket.erl \
+ mochiweb_socket_server.erl \
+ mochiweb_sup.erl \
+ mochiweb_util.erl \
+ reloader.erl
+
+mochiwebebin_make_generated_file_list = \
+ mochifmt.beam \
+ mochifmt_records.beam \
+ mochifmt_std.beam \
+ mochiglobal.beam \
+ mochihex.beam \
+ mochijson.beam \
+ mochijson2.beam \
+ mochilists.beam \
+ mochilogfile2.beam \
+ mochinum.beam \
+ mochitemp.beam \
+ mochiutf8.beam \
+ mochiweb.app \
+ mochiweb.beam \
+ mochiweb_acceptor.beam \
+ mochiweb_app.beam \
+ mochiweb_charref.beam \
+ mochiweb_cookies.beam \
+ mochiweb_cover.beam \
+ mochiweb_echo.beam \
+ mochiweb_headers.beam \
+ mochiweb_html.beam \
+ mochiweb_http.beam \
+ mochiweb_io.beam \
+ mochiweb_mime.beam \
+ mochiweb_multipart.beam \
+ mochiweb_request.beam \
+ mochiweb_response.beam \
+ mochiweb_skel.beam \
+ mochiweb_socket.beam \
+ mochiweb_socket_server.beam \
+ mochiweb_sup.beam \
+ mochiweb_util.beam \
+ reloader.beam
+
+mochiwebebin_DATA = \
+ $(mochiwebebin_make_generated_file_list)
+
+EXTRA_DIST = \
+ $(mochiweb_file_collection) \
+ internal.hrl
+
+CLEANFILES = \
+ $(mochiwebebin_make_generated_file_list)
+
+%.app: %.app.in
+ cp $< $@
+
+%.beam: %.erl
+
+ $(ERLC) $(ERLC_FLAGS) $<
diff --git a/1.1.x/src/mochiweb/internal.hrl b/1.1.x/src/mochiweb/internal.hrl
new file mode 100644
index 00000000..6db899a0
--- /dev/null
+++ b/1.1.x/src/mochiweb/internal.hrl
@@ -0,0 +1,3 @@
+
+-define(RECBUF_SIZE, 8192).
+
diff --git a/1.1.x/src/mochiweb/mochifmt.erl b/1.1.x/src/mochiweb/mochifmt.erl
new file mode 100644
index 00000000..5bc6b9c4
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochifmt.erl
@@ -0,0 +1,425 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2008 Mochi Media, Inc.
+
+%% @doc String Formatting for Erlang, inspired by Python 2.6
+%% (<a href="http://www.python.org/dev/peps/pep-3101/">PEP 3101</a>).
+%%
+-module(mochifmt).
+-author('bob@mochimedia.com').
+-export([format/2, format_field/2, convert_field/2, get_value/2, get_field/2]).
+-export([tokenize/1, format/3, get_field/3, format_field/3]).
+-export([bformat/2, bformat/3]).
+-export([f/2, f/3]).
+
+-record(conversion, {length, precision, ctype, align, fill_char, sign}).
+
+%% @spec tokenize(S::string()) -> tokens()
+%% @doc Tokenize a format string into mochifmt's internal format.
+tokenize(S) ->
+ {?MODULE, tokenize(S, "", [])}.
+
+%% @spec convert_field(Arg, Conversion::conversion()) -> term()
+%% @doc Process Arg according to the given explicit conversion specifier.
+convert_field(Arg, "") ->
+ Arg;
+convert_field(Arg, "r") ->
+ repr(Arg);
+convert_field(Arg, "s") ->
+ str(Arg).
+
+%% @spec get_value(Key::string(), Args::args()) -> term()
+%% @doc Get the Key from Args. If Args is a tuple then convert Key to
+%% an integer and get element(1 + Key, Args). If Args is a list and Key
+%% can be parsed as an integer then use lists:nth(1 + Key, Args),
+%% otherwise try and look for Key in Args as a proplist, converting
+%% Key to an atom or binary if necessary.
+get_value(Key, Args) when is_tuple(Args) ->
+ element(1 + list_to_integer(Key), Args);
+get_value(Key, Args) when is_list(Args) ->
+ try lists:nth(1 + list_to_integer(Key), Args)
+ catch error:_ ->
+ {_K, V} = proplist_lookup(Key, Args),
+ V
+ end.
+
+%% @spec get_field(Key::string(), Args) -> term()
+%% @doc Consecutively call get_value/2 on parts of Key delimited by ".",
+%% replacing Args with the result of the previous get_value. This
+%% is used to implement formats such as {0.0}.
+get_field(Key, Args) ->
+ get_field(Key, Args, ?MODULE).
+
+%% @spec get_field(Key::string(), Args, Module) -> term()
+%% @doc Consecutively call Module:get_value/2 on parts of Key delimited by ".",
+%% replacing Args with the result of the previous get_value. This
+%% is used to implement formats such as {0.0}.
+get_field(Key, Args, Module) ->
+ {Name, Next} = lists:splitwith(fun (C) -> C =/= $. end, Key),
+ Res = try Module:get_value(Name, Args)
+ catch error:undef -> get_value(Name, Args) end,
+ case Next of
+ "" ->
+ Res;
+ "." ++ S1 ->
+ get_field(S1, Res, Module)
+ end.
+
+%% @spec format(Format::string(), Args) -> iolist()
+%% @doc Format Args with Format.
+format(Format, Args) ->
+ format(Format, Args, ?MODULE).
+
+%% @spec format(Format::string(), Args, Module) -> iolist()
+%% @doc Format Args with Format using Module.
+format({?MODULE, Parts}, Args, Module) ->
+ format2(Parts, Args, Module, []);
+format(S, Args, Module) ->
+ format(tokenize(S), Args, Module).
+
+%% @spec format_field(Arg, Format) -> iolist()
+%% @doc Format Arg with Format.
+format_field(Arg, Format) ->
+ format_field(Arg, Format, ?MODULE).
+
+%% @spec format_field(Arg, Format, _Module) -> iolist()
+%% @doc Format Arg with Format.
+format_field(Arg, Format, _Module) ->
+ F = default_ctype(Arg, parse_std_conversion(Format)),
+ fix_padding(fix_sign(convert2(Arg, F), F), F).
+
+%% @spec f(Format::string(), Args) -> string()
+%% @doc Format Args with Format and return a string().
+f(Format, Args) ->
+ f(Format, Args, ?MODULE).
+
+%% @spec f(Format::string(), Args, Module) -> string()
+%% @doc Format Args with Format using Module and return a string().
+f(Format, Args, Module) ->
+ case lists:member(${, Format) of
+ true ->
+ binary_to_list(bformat(Format, Args, Module));
+ false ->
+ Format
+ end.
+
+%% @spec bformat(Format::string(), Args) -> binary()
+%% @doc Format Args with Format and return a binary().
+bformat(Format, Args) ->
+ iolist_to_binary(format(Format, Args)).
+
+%% @spec bformat(Format::string(), Args, Module) -> binary()
+%% @doc Format Args with Format using Module and return a binary().
+bformat(Format, Args, Module) ->
+ iolist_to_binary(format(Format, Args, Module)).
+
+%% Internal API
+
+add_raw("", Acc) ->
+ Acc;
+add_raw(S, Acc) ->
+ [{raw, lists:reverse(S)} | Acc].
+
+tokenize([], S, Acc) ->
+ lists:reverse(add_raw(S, Acc));
+tokenize("{{" ++ Rest, S, Acc) ->
+ tokenize(Rest, "{" ++ S, Acc);
+tokenize("{" ++ Rest, S, Acc) ->
+ {Format, Rest1} = tokenize_format(Rest),
+ tokenize(Rest1, "", [{format, make_format(Format)} | add_raw(S, Acc)]);
+tokenize("}}" ++ Rest, S, Acc) ->
+ tokenize(Rest, "}" ++ S, Acc);
+tokenize([C | Rest], S, Acc) ->
+ tokenize(Rest, [C | S], Acc).
+
+tokenize_format(S) ->
+ tokenize_format(S, 1, []).
+
+tokenize_format("}" ++ Rest, 1, Acc) ->
+ {lists:reverse(Acc), Rest};
+tokenize_format("}" ++ Rest, N, Acc) ->
+ tokenize_format(Rest, N - 1, "}" ++ Acc);
+tokenize_format("{" ++ Rest, N, Acc) ->
+ tokenize_format(Rest, 1 + N, "{" ++ Acc);
+tokenize_format([C | Rest], N, Acc) ->
+ tokenize_format(Rest, N, [C | Acc]).
+
+make_format(S) ->
+ {Name0, Spec} = case lists:splitwith(fun (C) -> C =/= $: end, S) of
+ {_, ""} ->
+ {S, ""};
+ {SN, ":" ++ SS} ->
+ {SN, SS}
+ end,
+ {Name, Transform} = case lists:splitwith(fun (C) -> C =/= $! end, Name0) of
+ {_, ""} ->
+ {Name0, ""};
+ {TN, "!" ++ TT} ->
+ {TN, TT}
+ end,
+ {Name, Transform, Spec}.
+
+proplist_lookup(S, P) ->
+ A = try list_to_existing_atom(S)
+ catch error:_ -> make_ref() end,
+ B = try list_to_binary(S)
+ catch error:_ -> make_ref() end,
+ proplist_lookup2({S, A, B}, P).
+
+proplist_lookup2({KS, KA, KB}, [{K, V} | _])
+ when KS =:= K orelse KA =:= K orelse KB =:= K ->
+ {K, V};
+proplist_lookup2(Keys, [_ | Rest]) ->
+ proplist_lookup2(Keys, Rest).
+
+format2([], _Args, _Module, Acc) ->
+ lists:reverse(Acc);
+format2([{raw, S} | Rest], Args, Module, Acc) ->
+ format2(Rest, Args, Module, [S | Acc]);
+format2([{format, {Key, Convert, Format0}} | Rest], Args, Module, Acc) ->
+ Format = f(Format0, Args, Module),
+ V = case Module of
+ ?MODULE ->
+ V0 = get_field(Key, Args),
+ V1 = convert_field(V0, Convert),
+ format_field(V1, Format);
+ _ ->
+ V0 = try Module:get_field(Key, Args)
+ catch error:undef -> get_field(Key, Args, Module) end,
+ V1 = try Module:convert_field(V0, Convert)
+ catch error:undef -> convert_field(V0, Convert) end,
+ try Module:format_field(V1, Format)
+ catch error:undef -> format_field(V1, Format, Module) end
+ end,
+ format2(Rest, Args, Module, [V | Acc]).
+
+default_ctype(_Arg, C=#conversion{ctype=N}) when N =/= undefined ->
+ C;
+default_ctype(Arg, C) when is_integer(Arg) ->
+ C#conversion{ctype=decimal};
+default_ctype(Arg, C) when is_float(Arg) ->
+ C#conversion{ctype=general};
+default_ctype(_Arg, C) ->
+ C#conversion{ctype=string}.
+
+fix_padding(Arg, #conversion{length=undefined}) ->
+ Arg;
+fix_padding(Arg, F=#conversion{length=Length, fill_char=Fill0, align=Align0,
+ ctype=Type}) ->
+ Padding = Length - iolist_size(Arg),
+ Fill = case Fill0 of
+ undefined ->
+ $\s;
+ _ ->
+ Fill0
+ end,
+ Align = case Align0 of
+ undefined ->
+ case Type of
+ string ->
+ left;
+ _ ->
+ right
+ end;
+ _ ->
+ Align0
+ end,
+ case Padding > 0 of
+ true ->
+ do_padding(Arg, Padding, Fill, Align, F);
+ false ->
+ Arg
+ end.
+
+do_padding(Arg, Padding, Fill, right, _F) ->
+ [lists:duplicate(Padding, Fill), Arg];
+do_padding(Arg, Padding, Fill, center, _F) ->
+ LPadding = lists:duplicate(Padding div 2, Fill),
+ RPadding = case Padding band 1 of
+ 1 ->
+ [Fill | LPadding];
+ _ ->
+ LPadding
+ end,
+ [LPadding, Arg, RPadding];
+do_padding([$- | Arg], Padding, Fill, sign_right, _F) ->
+ [[$- | lists:duplicate(Padding, Fill)], Arg];
+do_padding(Arg, Padding, Fill, sign_right, #conversion{sign=$-}) ->
+ [lists:duplicate(Padding, Fill), Arg];
+do_padding([S | Arg], Padding, Fill, sign_right, #conversion{sign=S}) ->
+ [[S | lists:duplicate(Padding, Fill)], Arg];
+do_padding(Arg, Padding, Fill, sign_right, #conversion{sign=undefined}) ->
+ [lists:duplicate(Padding, Fill), Arg];
+do_padding(Arg, Padding, Fill, left, _F) ->
+ [Arg | lists:duplicate(Padding, Fill)].
+
+fix_sign(Arg, #conversion{sign=$+}) when Arg >= 0 ->
+ [$+, Arg];
+fix_sign(Arg, #conversion{sign=$\s}) when Arg >= 0 ->
+ [$\s, Arg];
+fix_sign(Arg, _F) ->
+ Arg.
+
+ctype($\%) -> percent;
+ctype($s) -> string;
+ctype($b) -> bin;
+ctype($o) -> oct;
+ctype($X) -> upper_hex;
+ctype($x) -> hex;
+ctype($c) -> char;
+ctype($d) -> decimal;
+ctype($g) -> general;
+ctype($f) -> fixed;
+ctype($e) -> exp.
+
+align($<) -> left;
+align($>) -> right;
+align($^) -> center;
+align($=) -> sign_right.
+
+convert2(Arg, F=#conversion{ctype=percent}) ->
+ [convert2(100.0 * Arg, F#conversion{ctype=fixed}), $\%];
+convert2(Arg, #conversion{ctype=string}) ->
+ str(Arg);
+convert2(Arg, #conversion{ctype=bin}) ->
+ erlang:integer_to_list(Arg, 2);
+convert2(Arg, #conversion{ctype=oct}) ->
+ erlang:integer_to_list(Arg, 8);
+convert2(Arg, #conversion{ctype=upper_hex}) ->
+ erlang:integer_to_list(Arg, 16);
+convert2(Arg, #conversion{ctype=hex}) ->
+ string:to_lower(erlang:integer_to_list(Arg, 16));
+convert2(Arg, #conversion{ctype=char}) when Arg < 16#80 ->
+ [Arg];
+convert2(Arg, #conversion{ctype=char}) ->
+ xmerl_ucs:to_utf8(Arg);
+convert2(Arg, #conversion{ctype=decimal}) ->
+ integer_to_list(Arg);
+convert2(Arg, #conversion{ctype=general, precision=undefined}) ->
+ try mochinum:digits(Arg)
+ catch error:undef -> io_lib:format("~g", [Arg]) end;
+convert2(Arg, #conversion{ctype=fixed, precision=undefined}) ->
+ io_lib:format("~f", [Arg]);
+convert2(Arg, #conversion{ctype=exp, precision=undefined}) ->
+ io_lib:format("~e", [Arg]);
+convert2(Arg, #conversion{ctype=general, precision=P}) ->
+ io_lib:format("~." ++ integer_to_list(P) ++ "g", [Arg]);
+convert2(Arg, #conversion{ctype=fixed, precision=P}) ->
+ io_lib:format("~." ++ integer_to_list(P) ++ "f", [Arg]);
+convert2(Arg, #conversion{ctype=exp, precision=P}) ->
+ io_lib:format("~." ++ integer_to_list(P) ++ "e", [Arg]).
+
+str(A) when is_atom(A) ->
+ atom_to_list(A);
+str(I) when is_integer(I) ->
+ integer_to_list(I);
+str(F) when is_float(F) ->
+ try mochinum:digits(F)
+ catch error:undef -> io_lib:format("~g", [F]) end;
+str(L) when is_list(L) ->
+ L;
+str(B) when is_binary(B) ->
+ B;
+str(P) ->
+ repr(P).
+
+repr(P) when is_float(P) ->
+ try mochinum:digits(P)
+ catch error:undef -> float_to_list(P) end;
+repr(P) ->
+ io_lib:format("~p", [P]).
+
+parse_std_conversion(S) ->
+ parse_std_conversion(S, #conversion{}).
+
+parse_std_conversion("", Acc) ->
+ Acc;
+parse_std_conversion([Fill, Align | Spec], Acc)
+ when Align =:= $< orelse Align =:= $> orelse Align =:= $= orelse Align =:= $^ ->
+ parse_std_conversion(Spec, Acc#conversion{fill_char=Fill,
+ align=align(Align)});
+parse_std_conversion([Align | Spec], Acc)
+ when Align =:= $< orelse Align =:= $> orelse Align =:= $= orelse Align =:= $^ ->
+ parse_std_conversion(Spec, Acc#conversion{align=align(Align)});
+parse_std_conversion([Sign | Spec], Acc)
+ when Sign =:= $+ orelse Sign =:= $- orelse Sign =:= $\s ->
+ parse_std_conversion(Spec, Acc#conversion{sign=Sign});
+parse_std_conversion("0" ++ Spec, Acc) ->
+ Align = case Acc#conversion.align of
+ undefined ->
+ sign_right;
+ A ->
+ A
+ end,
+ parse_std_conversion(Spec, Acc#conversion{fill_char=$0, align=Align});
+parse_std_conversion(Spec=[D|_], Acc) when D >= $0 andalso D =< $9 ->
+ {W, Spec1} = lists:splitwith(fun (C) -> C >= $0 andalso C =< $9 end, Spec),
+ parse_std_conversion(Spec1, Acc#conversion{length=list_to_integer(W)});
+parse_std_conversion([$. | Spec], Acc) ->
+ case lists:splitwith(fun (C) -> C >= $0 andalso C =< $9 end, Spec) of
+ {"", Spec1} ->
+ parse_std_conversion(Spec1, Acc);
+ {P, Spec1} ->
+ parse_std_conversion(Spec1,
+ Acc#conversion{precision=list_to_integer(P)})
+ end;
+parse_std_conversion([Type], Acc) ->
+ parse_std_conversion("", Acc#conversion{ctype=ctype(Type)}).
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+tokenize_test() ->
+ {?MODULE, [{raw, "ABC"}]} = tokenize("ABC"),
+ {?MODULE, [{format, {"0", "", ""}}]} = tokenize("{0}"),
+ {?MODULE, [{raw, "ABC"}, {format, {"1", "", ""}}, {raw, "DEF"}]} =
+ tokenize("ABC{1}DEF"),
+ ok.
+
+format_test() ->
+ <<" -4">> = bformat("{0:4}", [-4]),
+ <<" 4">> = bformat("{0:4}", [4]),
+ <<" 4">> = bformat("{0:{0}}", [4]),
+ <<"4 ">> = bformat("{0:4}", ["4"]),
+ <<"4 ">> = bformat("{0:{0}}", ["4"]),
+ <<"1.2yoDEF">> = bformat("{2}{0}{1}{3}", {yo, "DE", 1.2, <<"F">>}),
+ <<"cafebabe">> = bformat("{0:x}", {16#cafebabe}),
+ <<"CAFEBABE">> = bformat("{0:X}", {16#cafebabe}),
+ <<"CAFEBABE">> = bformat("{0:X}", {16#cafebabe}),
+ <<"755">> = bformat("{0:o}", {8#755}),
+ <<"a">> = bformat("{0:c}", {97}),
+ %% Horizontal ellipsis
+ <<226, 128, 166>> = bformat("{0:c}", {16#2026}),
+ <<"11">> = bformat("{0:b}", {3}),
+ <<"11">> = bformat("{0:b}", [3]),
+ <<"11">> = bformat("{three:b}", [{three, 3}]),
+ <<"11">> = bformat("{three:b}", [{"three", 3}]),
+ <<"11">> = bformat("{three:b}", [{<<"three">>, 3}]),
+ <<"\"foo\"">> = bformat("{0!r}", {"foo"}),
+ <<"2008-5-4">> = bformat("{0.0}-{0.1}-{0.2}", {{2008,5,4}}),
+ <<"2008-05-04">> = bformat("{0.0:04}-{0.1:02}-{0.2:02}", {{2008,5,4}}),
+ <<"foo6bar-6">> = bformat("foo{1}{0}-{1}", {bar, 6}),
+ <<"-'atom test'-">> = bformat("-{arg!r}-", [{arg, 'atom test'}]),
+ <<"2008-05-04">> = bformat("{0.0:0{1.0}}-{0.1:0{1.1}}-{0.2:0{1.2}}",
+ {{2008,5,4}, {4, 2, 2}}),
+ ok.
+
+std_test() ->
+ M = mochifmt_std:new(),
+ <<"01">> = bformat("{0}{1}", [0, 1], M),
+ ok.
+
+records_test() ->
+ M = mochifmt_records:new([{conversion, record_info(fields, conversion)}]),
+ R = #conversion{length=long, precision=hard, sign=peace},
+ long = M:get_value("length", R),
+ hard = M:get_value("precision", R),
+ peace = M:get_value("sign", R),
+ <<"long hard">> = bformat("{length} {precision}", R, M),
+ <<"long hard">> = bformat("{0.length} {0.precision}", [R], M),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochifmt_records.erl b/1.1.x/src/mochiweb/mochifmt_records.erl
new file mode 100644
index 00000000..2326d1dd
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochifmt_records.erl
@@ -0,0 +1,38 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2008 Mochi Media, Inc.
+
+%% @doc Formatter that understands records.
+%%
+%% Usage:
+%%
+%% 1> M = mochifmt_records:new([{rec, record_info(fields, rec)}]),
+%% M:format("{0.bar}", [#rec{bar=foo}]).
+%% foo
+
+-module(mochifmt_records, [Recs]).
+-author('bob@mochimedia.com').
+-export([get_value/2]).
+
+get_value(Key, Rec) when is_tuple(Rec) and is_atom(element(1, Rec)) ->
+ try begin
+ Atom = list_to_existing_atom(Key),
+ {_, Fields} = proplists:lookup(element(1, Rec), Recs),
+ element(get_rec_index(Atom, Fields, 2), Rec)
+ end
+ catch error:_ -> mochifmt:get_value(Key, Rec)
+ end;
+get_value(Key, Args) ->
+ mochifmt:get_value(Key, Args).
+
+get_rec_index(Atom, [Atom | _], Index) ->
+ Index;
+get_rec_index(Atom, [_ | Rest], Index) ->
+ get_rec_index(Atom, Rest, 1 + Index).
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/1.1.x/src/mochiweb/mochifmt_std.erl b/1.1.x/src/mochiweb/mochifmt_std.erl
new file mode 100644
index 00000000..d4d74f6f
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochifmt_std.erl
@@ -0,0 +1,30 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2008 Mochi Media, Inc.
+
+%% @doc Template module for a mochifmt formatter.
+
+-module(mochifmt_std, []).
+-author('bob@mochimedia.com').
+-export([format/2, get_value/2, format_field/2, get_field/2, convert_field/2]).
+
+format(Format, Args) ->
+ mochifmt:format(Format, Args, THIS).
+
+get_field(Key, Args) ->
+ mochifmt:get_field(Key, Args, THIS).
+
+convert_field(Key, Args) ->
+ mochifmt:convert_field(Key, Args).
+
+get_value(Key, Args) ->
+ mochifmt:get_value(Key, Args).
+
+format_field(Arg, Format) ->
+ mochifmt:format_field(Arg, Format, THIS).
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiglobal.erl b/1.1.x/src/mochiweb/mochiglobal.erl
new file mode 100644
index 00000000..c740b878
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiglobal.erl
@@ -0,0 +1,107 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+%% @doc Abuse module constant pools as a "read-only shared heap" (since erts 5.6)
+%% <a href="http://www.erlang.org/pipermail/erlang-questions/2009-March/042503.html">[1]</a>.
+-module(mochiglobal).
+-author("Bob Ippolito <bob@mochimedia.com>").
+-export([get/1, get/2, put/2, delete/1]).
+
+-spec get(atom()) -> any() | undefined.
+%% @equiv get(K, undefined)
+get(K) ->
+ get(K, undefined).
+
+-spec get(atom(), T) -> any() | T.
+%% @doc Get the term for K or return Default.
+get(K, Default) ->
+ get(K, Default, key_to_module(K)).
+
+get(_K, Default, Mod) ->
+ try Mod:term()
+ catch error:undef ->
+ Default
+ end.
+
+-spec put(atom(), any()) -> ok.
+%% @doc Store term V at K, replaces an existing term if present.
+put(K, V) ->
+ put(K, V, key_to_module(K)).
+
+put(_K, V, Mod) ->
+ Bin = compile(Mod, V),
+ code:purge(Mod),
+ code:load_binary(Mod, atom_to_list(Mod) ++ ".erl", Bin),
+ ok.
+
+-spec delete(atom()) -> boolean().
+%% @doc Delete term stored at K, no-op if non-existent.
+delete(K) ->
+ delete(K, key_to_module(K)).
+
+delete(_K, Mod) ->
+ code:purge(Mod),
+ code:delete(Mod).
+
+-spec key_to_module(atom()) -> atom().
+key_to_module(K) ->
+ list_to_atom("mochiglobal:" ++ atom_to_list(K)).
+
+-spec compile(atom(), any()) -> binary().
+compile(Module, T) ->
+ {ok, Module, Bin} = compile:forms(forms(Module, T),
+ [verbose, report_errors]),
+ Bin.
+
+-spec forms(atom(), any()) -> [erl_syntax:syntaxTree()].
+forms(Module, T) ->
+ [erl_syntax:revert(X) || X <- term_to_abstract(Module, term, T)].
+
+-spec term_to_abstract(atom(), atom(), any()) -> [erl_syntax:syntaxTree()].
+term_to_abstract(Module, Getter, T) ->
+ [%% -module(Module).
+ erl_syntax:attribute(
+ erl_syntax:atom(module),
+ [erl_syntax:atom(Module)]),
+ %% -export([Getter/0]).
+ erl_syntax:attribute(
+ erl_syntax:atom(export),
+ [erl_syntax:list(
+ [erl_syntax:arity_qualifier(
+ erl_syntax:atom(Getter),
+ erl_syntax:integer(0))])]),
+ %% Getter() -> T.
+ erl_syntax:function(
+ erl_syntax:atom(Getter),
+ [erl_syntax:clause([], none, [erl_syntax:abstract(T)])])].
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+get_put_delete_test() ->
+ K = '$$test$$mochiglobal',
+ delete(K),
+ ?assertEqual(
+ bar,
+ get(K, bar)),
+ try
+ ?MODULE:put(K, baz),
+ ?assertEqual(
+ baz,
+ get(K, bar)),
+ ?MODULE:put(K, wibble),
+ ?assertEqual(
+ wibble,
+ ?MODULE:get(K))
+ after
+ delete(K)
+ end,
+ ?assertEqual(
+ bar,
+ get(K, bar)),
+ ?assertEqual(
+ undefined,
+ ?MODULE:get(K)),
+ ok.
+-endif.
diff --git a/1.1.x/src/mochiweb/mochihex.erl b/1.1.x/src/mochiweb/mochihex.erl
new file mode 100644
index 00000000..44a2aa7f
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochihex.erl
@@ -0,0 +1,91 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2006 Mochi Media, Inc.
+
+%% @doc Utilities for working with hexadecimal strings.
+
+-module(mochihex).
+-author('bob@mochimedia.com').
+
+-export([to_hex/1, to_bin/1, to_int/1, dehex/1, hexdigit/1]).
+
+%% @type iolist() = [char() | binary() | iolist()]
+%% @type iodata() = iolist() | binary()
+
+%% @spec to_hex(integer | iolist()) -> string()
+%% @doc Convert an iolist to a hexadecimal string.
+to_hex(0) ->
+ "0";
+to_hex(I) when is_integer(I), I > 0 ->
+ to_hex_int(I, []);
+to_hex(B) ->
+ to_hex(iolist_to_binary(B), []).
+
+%% @spec to_bin(string()) -> binary()
+%% @doc Convert a hexadecimal string to a binary.
+to_bin(L) ->
+ to_bin(L, []).
+
+%% @spec to_int(string()) -> integer()
+%% @doc Convert a hexadecimal string to an integer.
+to_int(L) ->
+ erlang:list_to_integer(L, 16).
+
+%% @spec dehex(char()) -> integer()
+%% @doc Convert a hex digit to its integer value.
+dehex(C) when C >= $0, C =< $9 ->
+ C - $0;
+dehex(C) when C >= $a, C =< $f ->
+ C - $a + 10;
+dehex(C) when C >= $A, C =< $F ->
+ C - $A + 10.
+
+%% @spec hexdigit(integer()) -> char()
+%% @doc Convert an integer less than 16 to a hex digit.
+hexdigit(C) when C >= 0, C =< 9 ->
+ C + $0;
+hexdigit(C) when C =< 15 ->
+ C + $a - 10.
+
+%% Internal API
+
+to_hex(<<>>, Acc) ->
+ lists:reverse(Acc);
+to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
+ to_hex(Rest, [hexdigit(C2), hexdigit(C1) | Acc]).
+
+to_hex_int(0, Acc) ->
+ Acc;
+to_hex_int(I, Acc) ->
+ to_hex_int(I bsr 4, [hexdigit(I band 15) | Acc]).
+
+to_bin([], Acc) ->
+ iolist_to_binary(lists:reverse(Acc));
+to_bin([C1, C2 | Rest], Acc) ->
+ to_bin(Rest, [(dehex(C1) bsl 4) bor dehex(C2) | Acc]).
+
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+to_hex_test() ->
+ "ff000ff1" = to_hex([255, 0, 15, 241]),
+ "ff000ff1" = to_hex(16#ff000ff1),
+ "0" = to_hex(16#0),
+ ok.
+
+to_bin_test() ->
+ <<255, 0, 15, 241>> = to_bin("ff000ff1"),
+ <<255, 0, 10, 161>> = to_bin("Ff000aA1"),
+ ok.
+
+to_int_test() ->
+ 16#ff000ff1 = to_int("ff000ff1"),
+ 16#ff000aa1 = to_int("FF000Aa1"),
+ 16#0 = to_int("0"),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochijson.erl b/1.1.x/src/mochiweb/mochijson.erl
new file mode 100644
index 00000000..2e3d1452
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochijson.erl
@@ -0,0 +1,531 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2006 Mochi Media, Inc.
+
+%% @doc Yet another JSON (RFC 4627) library for Erlang.
+-module(mochijson).
+-author('bob@mochimedia.com').
+-export([encoder/1, encode/1]).
+-export([decoder/1, decode/1]).
+-export([binary_encoder/1, binary_encode/1]).
+-export([binary_decoder/1, binary_decode/1]).
+
+% This is a macro to placate syntax highlighters..
+-define(Q, $\").
+-define(ADV_COL(S, N), S#decoder{column=N+S#decoder.column}).
+-define(INC_COL(S), S#decoder{column=1+S#decoder.column}).
+-define(INC_LINE(S), S#decoder{column=1, line=1+S#decoder.line}).
+
+%% @type iolist() = [char() | binary() | iolist()]
+%% @type iodata() = iolist() | binary()
+%% @type json_string() = atom | string() | binary()
+%% @type json_number() = integer() | float()
+%% @type json_array() = {array, [json_term()]}
+%% @type json_object() = {struct, [{json_string(), json_term()}]}
+%% @type json_term() = json_string() | json_number() | json_array() |
+%% json_object()
+%% @type encoding() = utf8 | unicode
+%% @type encoder_option() = {input_encoding, encoding()} |
+%% {handler, function()}
+%% @type decoder_option() = {input_encoding, encoding()} |
+%% {object_hook, function()}
+%% @type bjson_string() = binary()
+%% @type bjson_number() = integer() | float()
+%% @type bjson_array() = [bjson_term()]
+%% @type bjson_object() = {struct, [{bjson_string(), bjson_term()}]}
+%% @type bjson_term() = bjson_string() | bjson_number() | bjson_array() |
+%% bjson_object()
+%% @type binary_encoder_option() = {handler, function()}
+%% @type binary_decoder_option() = {object_hook, function()}
+
+-record(encoder, {input_encoding=unicode,
+ handler=null}).
+
+-record(decoder, {input_encoding=utf8,
+ object_hook=null,
+ line=1,
+ column=1,
+ state=null}).
+
+%% @spec encoder([encoder_option()]) -> function()
+%% @doc Create an encoder/1 with the given options.
+encoder(Options) ->
+ State = parse_encoder_options(Options, #encoder{}),
+ fun (O) -> json_encode(O, State) end.
+
+%% @spec encode(json_term()) -> iolist()
+%% @doc Encode the given as JSON to an iolist.
+encode(Any) ->
+ json_encode(Any, #encoder{}).
+
+%% @spec decoder([decoder_option()]) -> function()
+%% @doc Create a decoder/1 with the given options.
+decoder(Options) ->
+ State = parse_decoder_options(Options, #decoder{}),
+ fun (O) -> json_decode(O, State) end.
+
+%% @spec decode(iolist()) -> json_term()
+%% @doc Decode the given iolist to Erlang terms.
+decode(S) ->
+ json_decode(S, #decoder{}).
+
+%% @spec binary_decoder([binary_decoder_option()]) -> function()
+%% @doc Create a binary_decoder/1 with the given options.
+binary_decoder(Options) ->
+ mochijson2:decoder(Options).
+
+%% @spec binary_encoder([binary_encoder_option()]) -> function()
+%% @doc Create a binary_encoder/1 with the given options.
+binary_encoder(Options) ->
+ mochijson2:encoder(Options).
+
+%% @spec binary_encode(bjson_term()) -> iolist()
+%% @doc Encode the given as JSON to an iolist, using lists for arrays and
+%% binaries for strings.
+binary_encode(Any) ->
+ mochijson2:encode(Any).
+
+%% @spec binary_decode(iolist()) -> bjson_term()
+%% @doc Decode the given iolist to Erlang terms, using lists for arrays and
+%% binaries for strings.
+binary_decode(S) ->
+ mochijson2:decode(S).
+
+%% Internal API
+
+parse_encoder_options([], State) ->
+ State;
+parse_encoder_options([{input_encoding, Encoding} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{input_encoding=Encoding});
+parse_encoder_options([{handler, Handler} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{handler=Handler}).
+
+parse_decoder_options([], State) ->
+ State;
+parse_decoder_options([{input_encoding, Encoding} | Rest], State) ->
+ parse_decoder_options(Rest, State#decoder{input_encoding=Encoding});
+parse_decoder_options([{object_hook, Hook} | Rest], State) ->
+ parse_decoder_options(Rest, State#decoder{object_hook=Hook}).
+
+json_encode(true, _State) ->
+ "true";
+json_encode(false, _State) ->
+ "false";
+json_encode(null, _State) ->
+ "null";
+json_encode(I, _State) when is_integer(I) ->
+ integer_to_list(I);
+json_encode(F, _State) when is_float(F) ->
+ mochinum:digits(F);
+json_encode(L, State) when is_list(L); is_binary(L); is_atom(L) ->
+ json_encode_string(L, State);
+json_encode({array, Props}, State) when is_list(Props) ->
+ json_encode_array(Props, State);
+json_encode({struct, Props}, State) when is_list(Props) ->
+ json_encode_proplist(Props, State);
+json_encode(Bad, #encoder{handler=null}) ->
+ exit({json_encode, {bad_term, Bad}});
+json_encode(Bad, State=#encoder{handler=Handler}) ->
+ json_encode(Handler(Bad), State).
+
+json_encode_array([], _State) ->
+ "[]";
+json_encode_array(L, State) ->
+ F = fun (O, Acc) ->
+ [$,, json_encode(O, State) | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "[", L),
+ lists:reverse([$\] | Acc1]).
+
+json_encode_proplist([], _State) ->
+ "{}";
+json_encode_proplist(Props, State) ->
+ F = fun ({K, V}, Acc) ->
+ KS = case K of
+ K when is_atom(K) ->
+ json_encode_string_utf8(atom_to_list(K));
+ K when is_integer(K) ->
+ json_encode_string(integer_to_list(K), State);
+ K when is_list(K); is_binary(K) ->
+ json_encode_string(K, State)
+ end,
+ VS = json_encode(V, State),
+ [$,, VS, $:, KS | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "{", Props),
+ lists:reverse([$\} | Acc1]).
+
+json_encode_string(A, _State) when is_atom(A) ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(atom_to_list(A)));
+json_encode_string(B, _State) when is_binary(B) ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(B));
+json_encode_string(S, #encoder{input_encoding=utf8}) ->
+ json_encode_string_utf8(S);
+json_encode_string(S, #encoder{input_encoding=unicode}) ->
+ json_encode_string_unicode(S).
+
+json_encode_string_utf8(S) ->
+ [?Q | json_encode_string_utf8_1(S)].
+
+json_encode_string_utf8_1([C | Cs]) when C >= 0, C =< 16#7f ->
+ NewC = case C of
+ $\\ -> "\\\\";
+ ?Q -> "\\\"";
+ _ when C >= $\s, C < 16#7f -> C;
+ $\t -> "\\t";
+ $\n -> "\\n";
+ $\r -> "\\r";
+ $\f -> "\\f";
+ $\b -> "\\b";
+ _ when C >= 0, C =< 16#7f -> unihex(C);
+ _ -> exit({json_encode, {bad_char, C}})
+ end,
+ [NewC | json_encode_string_utf8_1(Cs)];
+json_encode_string_utf8_1(All=[C | _]) when C >= 16#80, C =< 16#10FFFF ->
+ [?Q | Rest] = json_encode_string_unicode(xmerl_ucs:from_utf8(All)),
+ Rest;
+json_encode_string_utf8_1([]) ->
+ "\"".
+
+json_encode_string_unicode(S) ->
+ [?Q | json_encode_string_unicode_1(S)].
+
+json_encode_string_unicode_1([C | Cs]) ->
+ NewC = case C of
+ $\\ -> "\\\\";
+ ?Q -> "\\\"";
+ _ when C >= $\s, C < 16#7f -> C;
+ $\t -> "\\t";
+ $\n -> "\\n";
+ $\r -> "\\r";
+ $\f -> "\\f";
+ $\b -> "\\b";
+ _ when C >= 0, C =< 16#10FFFF -> unihex(C);
+ _ -> exit({json_encode, {bad_char, C}})
+ end,
+ [NewC | json_encode_string_unicode_1(Cs)];
+json_encode_string_unicode_1([]) ->
+ "\"".
+
+dehex(C) when C >= $0, C =< $9 ->
+ C - $0;
+dehex(C) when C >= $a, C =< $f ->
+ C - $a + 10;
+dehex(C) when C >= $A, C =< $F ->
+ C - $A + 10.
+
+hexdigit(C) when C >= 0, C =< 9 ->
+ C + $0;
+hexdigit(C) when C =< 15 ->
+ C + $a - 10.
+
+unihex(C) when C < 16#10000 ->
+ <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
+ Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
+ [$\\, $u | Digits];
+unihex(C) when C =< 16#10FFFF ->
+ N = C - 16#10000,
+ S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
+ S2 = 16#dc00 bor (N band 16#3ff),
+ [unihex(S1), unihex(S2)].
+
+json_decode(B, S) when is_binary(B) ->
+ json_decode(binary_to_list(B), S);
+json_decode(L, S) ->
+ {Res, L1, S1} = decode1(L, S),
+ {eof, [], _} = tokenize(L1, S1#decoder{state=trim}),
+ Res.
+
+decode1(L, S=#decoder{state=null}) ->
+ case tokenize(L, S#decoder{state=any}) of
+ {{const, C}, L1, S1} ->
+ {C, L1, S1};
+ {start_array, L1, S1} ->
+ decode_array(L1, S1#decoder{state=any}, []);
+ {start_object, L1, S1} ->
+ decode_object(L1, S1#decoder{state=key}, [])
+ end.
+
+make_object(V, #decoder{object_hook=null}) ->
+ V;
+make_object(V, #decoder{object_hook=Hook}) ->
+ Hook(V).
+
+decode_object(L, S=#decoder{state=key}, Acc) ->
+ case tokenize(L, S) of
+ {end_object, Rest, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, Rest, S1#decoder{state=null}};
+ {{const, K}, Rest, S1} when is_list(K) ->
+ {colon, L2, S2} = tokenize(Rest, S1),
+ {V, L3, S3} = decode1(L2, S2#decoder{state=null}),
+ decode_object(L3, S3#decoder{state=comma}, [{K, V} | Acc])
+ end;
+decode_object(L, S=#decoder{state=comma}, Acc) ->
+ case tokenize(L, S) of
+ {end_object, Rest, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, Rest, S1#decoder{state=null}};
+ {comma, Rest, S1} ->
+ decode_object(Rest, S1#decoder{state=key}, Acc)
+ end.
+
+decode_array(L, S=#decoder{state=any}, Acc) ->
+ case tokenize(L, S) of
+ {end_array, Rest, S1} ->
+ {{array, lists:reverse(Acc)}, Rest, S1#decoder{state=null}};
+ {start_array, Rest, S1} ->
+ {Array, Rest1, S2} = decode_array(Rest, S1#decoder{state=any}, []),
+ decode_array(Rest1, S2#decoder{state=comma}, [Array | Acc]);
+ {start_object, Rest, S1} ->
+ {Array, Rest1, S2} = decode_object(Rest, S1#decoder{state=key}, []),
+ decode_array(Rest1, S2#decoder{state=comma}, [Array | Acc]);
+ {{const, Const}, Rest, S1} ->
+ decode_array(Rest, S1#decoder{state=comma}, [Const | Acc])
+ end;
+decode_array(L, S=#decoder{state=comma}, Acc) ->
+ case tokenize(L, S) of
+ {end_array, Rest, S1} ->
+ {{array, lists:reverse(Acc)}, Rest, S1#decoder{state=null}};
+ {comma, Rest, S1} ->
+ decode_array(Rest, S1#decoder{state=any}, Acc)
+ end.
+
+tokenize_string(IoList=[C | _], S=#decoder{input_encoding=utf8}, Acc)
+ when is_list(C); is_binary(C); C >= 16#7f ->
+ List = xmerl_ucs:from_utf8(iolist_to_binary(IoList)),
+ tokenize_string(List, S#decoder{input_encoding=unicode}, Acc);
+tokenize_string("\"" ++ Rest, S, Acc) ->
+ {lists:reverse(Acc), Rest, ?INC_COL(S)};
+tokenize_string("\\\"" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\" | Acc]);
+tokenize_string("\\\\" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\\ | Acc]);
+tokenize_string("\\/" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$/ | Acc]);
+tokenize_string("\\b" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\b | Acc]);
+tokenize_string("\\f" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\f | Acc]);
+tokenize_string("\\n" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\n | Acc]);
+tokenize_string("\\r" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\r | Acc]);
+tokenize_string("\\t" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\t | Acc]);
+tokenize_string([$\\, $u, C3, C2, C1, C0 | Rest], S, Acc) ->
+ % coalesce UTF-16 surrogate pair?
+ C = dehex(C0) bor
+ (dehex(C1) bsl 4) bor
+ (dehex(C2) bsl 8) bor
+ (dehex(C3) bsl 12),
+ tokenize_string(Rest, ?ADV_COL(S, 6), [C | Acc]);
+tokenize_string([C | Rest], S, Acc) when C >= $\s; C < 16#10FFFF ->
+ tokenize_string(Rest, ?ADV_COL(S, 1), [C | Acc]).
+
+tokenize_number(IoList=[C | _], Mode, S=#decoder{input_encoding=utf8}, Acc)
+ when is_list(C); is_binary(C); C >= 16#7f ->
+ List = xmerl_ucs:from_utf8(iolist_to_binary(IoList)),
+ tokenize_number(List, Mode, S#decoder{input_encoding=unicode}, Acc);
+tokenize_number([$- | Rest], sign, S, []) ->
+ tokenize_number(Rest, int, ?INC_COL(S), [$-]);
+tokenize_number(Rest, sign, S, []) ->
+ tokenize_number(Rest, int, S, []);
+tokenize_number([$0 | Rest], int, S, Acc) ->
+ tokenize_number(Rest, frac, ?INC_COL(S), [$0 | Acc]);
+tokenize_number([C | Rest], int, S, Acc) when C >= $1, C =< $9 ->
+ tokenize_number(Rest, int1, ?INC_COL(S), [C | Acc]);
+tokenize_number([C | Rest], int1, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, int1, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, int1, S, Acc) ->
+ tokenize_number(Rest, frac, S, Acc);
+tokenize_number([$., C | Rest], frac, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
+tokenize_number([E | Rest], frac, S, Acc) when E == $e; E == $E ->
+ tokenize_number(Rest, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
+tokenize_number(Rest, frac, S, Acc) ->
+ {{int, lists:reverse(Acc)}, Rest, S};
+tokenize_number([C | Rest], frac1, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, frac1, ?INC_COL(S), [C | Acc]);
+tokenize_number([E | Rest], frac1, S, Acc) when E == $e; E == $E ->
+ tokenize_number(Rest, esign, ?INC_COL(S), [$e | Acc]);
+tokenize_number(Rest, frac1, S, Acc) ->
+ {{float, lists:reverse(Acc)}, Rest, S};
+tokenize_number([C | Rest], esign, S, Acc) when C == $-; C == $+ ->
+ tokenize_number(Rest, eint, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, esign, S, Acc) ->
+ tokenize_number(Rest, eint, S, Acc);
+tokenize_number([C | Rest], eint, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, eint1, ?INC_COL(S), [C | Acc]);
+tokenize_number([C | Rest], eint1, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, eint1, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, eint1, S, Acc) ->
+ {{float, lists:reverse(Acc)}, Rest, S}.
+
+tokenize([], S=#decoder{state=trim}) ->
+ {eof, [], S};
+tokenize([L | Rest], S) when is_list(L) ->
+ tokenize(L ++ Rest, S);
+tokenize([B | Rest], S) when is_binary(B) ->
+ tokenize(xmerl_ucs:from_utf8(B) ++ Rest, S);
+tokenize("\r\n" ++ Rest, S) ->
+ tokenize(Rest, ?INC_LINE(S));
+tokenize("\n" ++ Rest, S) ->
+ tokenize(Rest, ?INC_LINE(S));
+tokenize([C | Rest], S) when C == $\s; C == $\t ->
+ tokenize(Rest, ?INC_COL(S));
+tokenize("{" ++ Rest, S) ->
+ {start_object, Rest, ?INC_COL(S)};
+tokenize("}" ++ Rest, S) ->
+ {end_object, Rest, ?INC_COL(S)};
+tokenize("[" ++ Rest, S) ->
+ {start_array, Rest, ?INC_COL(S)};
+tokenize("]" ++ Rest, S) ->
+ {end_array, Rest, ?INC_COL(S)};
+tokenize("," ++ Rest, S) ->
+ {comma, Rest, ?INC_COL(S)};
+tokenize(":" ++ Rest, S) ->
+ {colon, Rest, ?INC_COL(S)};
+tokenize("null" ++ Rest, S) ->
+ {{const, null}, Rest, ?ADV_COL(S, 4)};
+tokenize("true" ++ Rest, S) ->
+ {{const, true}, Rest, ?ADV_COL(S, 4)};
+tokenize("false" ++ Rest, S) ->
+ {{const, false}, Rest, ?ADV_COL(S, 5)};
+tokenize("\"" ++ Rest, S) ->
+ {String, Rest1, S1} = tokenize_string(Rest, ?INC_COL(S), []),
+ {{const, String}, Rest1, S1};
+tokenize(L=[C | _], S) when C >= $0, C =< $9; C == $- ->
+ case tokenize_number(L, sign, S, []) of
+ {{int, Int}, Rest, S1} ->
+ {{const, list_to_integer(Int)}, Rest, S1};
+ {{float, Float}, Rest, S1} ->
+ {{const, list_to_float(Float)}, Rest, S1}
+ end.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+%% testing constructs borrowed from the Yaws JSON implementation.
+
+%% Create an object from a list of Key/Value pairs.
+
+obj_new() ->
+ {struct, []}.
+
+is_obj({struct, Props}) ->
+ F = fun ({K, _}) when is_list(K) ->
+ true;
+ (_) ->
+ false
+ end,
+ lists:all(F, Props).
+
+obj_from_list(Props) ->
+ Obj = {struct, Props},
+ case is_obj(Obj) of
+ true -> Obj;
+ false -> exit(json_bad_object)
+ end.
+
+%% Test for equivalence of Erlang terms.
+%% Due to arbitrary order of construction, equivalent objects might
+%% compare unequal as erlang terms, so we need to carefully recurse
+%% through aggregates (tuples and objects).
+
+equiv({struct, Props1}, {struct, Props2}) ->
+ equiv_object(Props1, Props2);
+equiv({array, L1}, {array, L2}) ->
+ equiv_list(L1, L2);
+equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
+equiv(S1, S2) when is_list(S1), is_list(S2) -> S1 == S2;
+equiv(true, true) -> true;
+equiv(false, false) -> true;
+equiv(null, null) -> true.
+
+%% Object representation and traversal order is unknown.
+%% Use the sledgehammer and sort property lists.
+
+equiv_object(Props1, Props2) ->
+ L1 = lists:keysort(1, Props1),
+ L2 = lists:keysort(1, Props2),
+ Pairs = lists:zip(L1, L2),
+ true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
+ equiv(K1, K2) and equiv(V1, V2)
+ end, Pairs).
+
+%% Recursively compare tuple elements for equivalence.
+
+equiv_list([], []) ->
+ true;
+equiv_list([V1 | L1], [V2 | L2]) ->
+ equiv(V1, V2) andalso equiv_list(L1, L2).
+
+e2j_vec_test() ->
+ test_one(e2j_test_vec(utf8), 1).
+
+issue33_test() ->
+ %% http://code.google.com/p/mochiweb/issues/detail?id=33
+ Js = {struct, [{"key", [194, 163]}]},
+ Encoder = encoder([{input_encoding, utf8}]),
+ "{\"key\":\"\\u00a3\"}" = lists:flatten(Encoder(Js)).
+
+test_one([], _N) ->
+ %% io:format("~p tests passed~n", [N-1]),
+ ok;
+test_one([{E, J} | Rest], N) ->
+ %% io:format("[~p] ~p ~p~n", [N, E, J]),
+ true = equiv(E, decode(J)),
+ true = equiv(E, decode(encode(E))),
+ test_one(Rest, 1+N).
+
+e2j_test_vec(utf8) ->
+ [
+ {1, "1"},
+ {3.1416, "3.14160"}, % text representation may truncate, trail zeroes
+ {-1, "-1"},
+ {-3.1416, "-3.14160"},
+ {12.0e10, "1.20000e+11"},
+ {1.234E+10, "1.23400e+10"},
+ {-1.234E-10, "-1.23400e-10"},
+ {10.0, "1.0e+01"},
+ {123.456, "1.23456E+2"},
+ {10.0, "1e1"},
+ {"foo", "\"foo\""},
+ {"foo" ++ [5] ++ "bar", "\"foo\\u0005bar\""},
+ {"", "\"\""},
+ {"\"", "\"\\\"\""},
+ {"\n\n\n", "\"\\n\\n\\n\""},
+ {"\\", "\"\\\\\""},
+ {"\" \b\f\r\n\t\"", "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
+ {obj_new(), "{}"},
+ {obj_from_list([{"foo", "bar"}]), "{\"foo\":\"bar\"}"},
+ {obj_from_list([{"foo", "bar"}, {"baz", 123}]),
+ "{\"foo\":\"bar\",\"baz\":123}"},
+ {{array, []}, "[]"},
+ {{array, [{array, []}]}, "[[]]"},
+ {{array, [1, "foo"]}, "[1,\"foo\"]"},
+
+ % json array in a json object
+ {obj_from_list([{"foo", {array, [123]}}]),
+ "{\"foo\":[123]}"},
+
+ % json object in a json object
+ {obj_from_list([{"foo", obj_from_list([{"bar", true}])}]),
+ "{\"foo\":{\"bar\":true}}"},
+
+ % fold evaluation order
+ {obj_from_list([{"foo", {array, []}},
+ {"bar", obj_from_list([{"baz", true}])},
+ {"alice", "bob"}]),
+ "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
+
+ % json object in a json array
+ {{array, [-123, "foo", obj_from_list([{"bar", {array, []}}]), null]},
+ "[-123,\"foo\",{\"bar\":[]},null]"}
+ ].
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochijson2.erl b/1.1.x/src/mochiweb/mochijson2.erl
new file mode 100644
index 00000000..64cabc86
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochijson2.erl
@@ -0,0 +1,802 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Yet another JSON (RFC 4627) library for Erlang. mochijson2 works
+%% with binaries as strings, arrays as lists (without an {array, _})
+%% wrapper and it only knows how to decode UTF-8 (and ASCII).
+
+-module(mochijson2).
+-author('bob@mochimedia.com').
+-export([encoder/1, encode/1]).
+-export([decoder/1, decode/1]).
+
+% This is a macro to placate syntax highlighters..
+-define(Q, $\").
+-define(ADV_COL(S, N), S#decoder{offset=N+S#decoder.offset,
+ column=N+S#decoder.column}).
+-define(INC_COL(S), S#decoder{offset=1+S#decoder.offset,
+ column=1+S#decoder.column}).
+-define(INC_LINE(S), S#decoder{offset=1+S#decoder.offset,
+ column=1,
+ line=1+S#decoder.line}).
+-define(INC_CHAR(S, C),
+ case C of
+ $\n ->
+ S#decoder{column=1,
+ line=1+S#decoder.line,
+ offset=1+S#decoder.offset};
+ _ ->
+ S#decoder{column=1+S#decoder.column,
+ offset=1+S#decoder.offset}
+ end).
+-define(IS_WHITESPACE(C),
+ (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
+
+%% @type iolist() = [char() | binary() | iolist()]
+%% @type iodata() = iolist() | binary()
+%% @type json_string() = atom | binary()
+%% @type json_number() = integer() | float()
+%% @type json_array() = [json_term()]
+%% @type json_object() = {struct, [{json_string(), json_term()}]}
+%% @type json_iolist() = {json, iolist()}
+%% @type json_term() = json_string() | json_number() | json_array() |
+%% json_object() | json_iolist()
+
+-record(encoder, {handler=null,
+ utf8=false}).
+
+-record(decoder, {object_hook=null,
+ offset=0,
+ line=1,
+ column=1,
+ state=null}).
+
+%% @spec encoder([encoder_option()]) -> function()
+%% @doc Create an encoder/1 with the given options.
+%% @type encoder_option() = handler_option() | utf8_option()
+%% @type utf8_option() = boolean(). Emit unicode as utf8 (default - false)
+encoder(Options) ->
+ State = parse_encoder_options(Options, #encoder{}),
+ fun (O) -> json_encode(O, State) end.
+
+%% @spec encode(json_term()) -> iolist()
+%% @doc Encode the given as JSON to an iolist.
+encode(Any) ->
+ json_encode(Any, #encoder{}).
+
+%% @spec decoder([decoder_option()]) -> function()
+%% @doc Create a decoder/1 with the given options.
+decoder(Options) ->
+ State = parse_decoder_options(Options, #decoder{}),
+ fun (O) -> json_decode(O, State) end.
+
+%% @spec decode(iolist()) -> json_term()
+%% @doc Decode the given iolist to Erlang terms.
+decode(S) ->
+ json_decode(S, #decoder{}).
+
+%% Internal API
+
+parse_encoder_options([], State) ->
+ State;
+parse_encoder_options([{handler, Handler} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{handler=Handler});
+parse_encoder_options([{utf8, Switch} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{utf8=Switch}).
+
+parse_decoder_options([], State) ->
+ State;
+parse_decoder_options([{object_hook, Hook} | Rest], State) ->
+ parse_decoder_options(Rest, State#decoder{object_hook=Hook}).
+
+json_encode(true, _State) ->
+ <<"true">>;
+json_encode(false, _State) ->
+ <<"false">>;
+json_encode(null, _State) ->
+ <<"null">>;
+json_encode(I, _State) when is_integer(I) ->
+ integer_to_list(I);
+json_encode(F, _State) when is_float(F) ->
+ mochinum:digits(F);
+json_encode(S, State) when is_binary(S); is_atom(S) ->
+ json_encode_string(S, State);
+json_encode(Array, State) when is_list(Array) ->
+ json_encode_array(Array, State);
+json_encode({struct, Props}, State) when is_list(Props) ->
+ json_encode_proplist(Props, State);
+json_encode({json, IoList}, _State) ->
+ IoList;
+json_encode(Bad, #encoder{handler=null}) ->
+ exit({json_encode, {bad_term, Bad}});
+json_encode(Bad, State=#encoder{handler=Handler}) ->
+ json_encode(Handler(Bad), State).
+
+json_encode_array([], _State) ->
+ <<"[]">>;
+json_encode_array(L, State) ->
+ F = fun (O, Acc) ->
+ [$,, json_encode(O, State) | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "[", L),
+ lists:reverse([$\] | Acc1]).
+
+json_encode_proplist([], _State) ->
+ <<"{}">>;
+json_encode_proplist(Props, State) ->
+ F = fun ({K, V}, Acc) ->
+ KS = json_encode_string(K, State),
+ VS = json_encode(V, State),
+ [$,, VS, $:, KS | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "{", Props),
+ lists:reverse([$\} | Acc1]).
+
+json_encode_string(A, State) when is_atom(A) ->
+ L = atom_to_list(A),
+ case json_string_is_safe(L) of
+ true ->
+ [?Q, L, ?Q];
+ false ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(L), State, [?Q])
+ end;
+json_encode_string(B, State) when is_binary(B) ->
+ case json_bin_is_safe(B) of
+ true ->
+ [?Q, B, ?Q];
+ false ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(B), State, [?Q])
+ end;
+json_encode_string(I, _State) when is_integer(I) ->
+ [?Q, integer_to_list(I), ?Q];
+json_encode_string(L, State) when is_list(L) ->
+ case json_string_is_safe(L) of
+ true ->
+ [?Q, L, ?Q];
+ false ->
+ json_encode_string_unicode(L, State, [?Q])
+ end.
+
+json_string_is_safe([]) ->
+ true;
+json_string_is_safe([C | Rest]) ->
+ case C of
+ ?Q ->
+ false;
+ $\\ ->
+ false;
+ $\b ->
+ false;
+ $\f ->
+ false;
+ $\n ->
+ false;
+ $\r ->
+ false;
+ $\t ->
+ false;
+ C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
+ false;
+ C when C < 16#7f ->
+ json_string_is_safe(Rest);
+ _ ->
+ false
+ end.
+
+json_bin_is_safe(<<>>) ->
+ true;
+json_bin_is_safe(<<C, Rest/binary>>) ->
+ case C of
+ ?Q ->
+ false;
+ $\\ ->
+ false;
+ $\b ->
+ false;
+ $\f ->
+ false;
+ $\n ->
+ false;
+ $\r ->
+ false;
+ $\t ->
+ false;
+ C when C >= 0, C < $\s; C >= 16#7f ->
+ false;
+ C when C < 16#7f ->
+ json_bin_is_safe(Rest)
+ end.
+
+json_encode_string_unicode([], _State, Acc) ->
+ lists:reverse([$\" | Acc]);
+json_encode_string_unicode([C | Cs], State, Acc) ->
+ Acc1 = case C of
+ ?Q ->
+ [?Q, $\\ | Acc];
+ %% Escaping solidus is only useful when trying to protect
+ %% against "</script>" injection attacks which are only
+ %% possible when JSON is inserted into a HTML document
+ %% in-line. mochijson2 does not protect you from this, so
+ %% if you do insert directly into HTML then you need to
+ %% uncomment the following case or escape the output of encode.
+ %%
+ %% $/ ->
+ %% [$/, $\\ | Acc];
+ %%
+ $\\ ->
+ [$\\, $\\ | Acc];
+ $\b ->
+ [$b, $\\ | Acc];
+ $\f ->
+ [$f, $\\ | Acc];
+ $\n ->
+ [$n, $\\ | Acc];
+ $\r ->
+ [$r, $\\ | Acc];
+ $\t ->
+ [$t, $\\ | Acc];
+ C when C >= 0, C < $\s ->
+ [unihex(C) | Acc];
+ C when C >= 16#7f, C =< 16#10FFFF, State#encoder.utf8 ->
+ [xmerl_ucs:to_utf8(C) | Acc];
+ C when C >= 16#7f, C =< 16#10FFFF, not State#encoder.utf8 ->
+ [unihex(C) | Acc];
+ C when C < 16#7f ->
+ [C | Acc];
+ _ ->
+ exit({json_encode, {bad_char, C}})
+ end,
+ json_encode_string_unicode(Cs, State, Acc1).
+
+hexdigit(C) when C >= 0, C =< 9 ->
+ C + $0;
+hexdigit(C) when C =< 15 ->
+ C + $a - 10.
+
+unihex(C) when C < 16#10000 ->
+ <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
+ Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
+ [$\\, $u | Digits];
+unihex(C) when C =< 16#10FFFF ->
+ N = C - 16#10000,
+ S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
+ S2 = 16#dc00 bor (N band 16#3ff),
+ [unihex(S1), unihex(S2)].
+
+json_decode(L, S) when is_list(L) ->
+ json_decode(iolist_to_binary(L), S);
+json_decode(B, S) ->
+ {Res, S1} = decode1(B, S),
+ {eof, _} = tokenize(B, S1#decoder{state=trim}),
+ Res.
+
+decode1(B, S=#decoder{state=null}) ->
+ case tokenize(B, S#decoder{state=any}) of
+ {{const, C}, S1} ->
+ {C, S1};
+ {start_array, S1} ->
+ decode_array(B, S1);
+ {start_object, S1} ->
+ decode_object(B, S1)
+ end.
+
+make_object(V, #decoder{object_hook=null}) ->
+ V;
+make_object(V, #decoder{object_hook=Hook}) ->
+ Hook(V).
+
+decode_object(B, S) ->
+ decode_object(B, S#decoder{state=key}, []).
+
+decode_object(B, S=#decoder{state=key}, Acc) ->
+ case tokenize(B, S) of
+ {end_object, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, S1#decoder{state=null}};
+ {{const, K}, S1} ->
+ {colon, S2} = tokenize(B, S1),
+ {V, S3} = decode1(B, S2#decoder{state=null}),
+ decode_object(B, S3#decoder{state=comma}, [{K, V} | Acc])
+ end;
+decode_object(B, S=#decoder{state=comma}, Acc) ->
+ case tokenize(B, S) of
+ {end_object, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, S1#decoder{state=null}};
+ {comma, S1} ->
+ decode_object(B, S1#decoder{state=key}, Acc)
+ end.
+
+decode_array(B, S) ->
+ decode_array(B, S#decoder{state=any}, []).
+
+decode_array(B, S=#decoder{state=any}, Acc) ->
+ case tokenize(B, S) of
+ {end_array, S1} ->
+ {lists:reverse(Acc), S1#decoder{state=null}};
+ {start_array, S1} ->
+ {Array, S2} = decode_array(B, S1),
+ decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
+ {start_object, S1} ->
+ {Array, S2} = decode_object(B, S1),
+ decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
+ {{const, Const}, S1} ->
+ decode_array(B, S1#decoder{state=comma}, [Const | Acc])
+ end;
+decode_array(B, S=#decoder{state=comma}, Acc) ->
+ case tokenize(B, S) of
+ {end_array, S1} ->
+ {lists:reverse(Acc), S1#decoder{state=null}};
+ {comma, S1} ->
+ decode_array(B, S1#decoder{state=any}, Acc)
+ end.
+
+tokenize_string(B, S=#decoder{offset=O}) ->
+ case tokenize_string_fast(B, O) of
+ {escape, O1} ->
+ Length = O1 - O,
+ S1 = ?ADV_COL(S, Length),
+ <<_:O/binary, Head:Length/binary, _/binary>> = B,
+ tokenize_string(B, S1, lists:reverse(binary_to_list(Head)));
+ O1 ->
+ Length = O1 - O,
+ <<_:O/binary, String:Length/binary, ?Q, _/binary>> = B,
+ {{const, String}, ?ADV_COL(S, Length + 1)}
+ end.
+
+tokenize_string_fast(B, O) ->
+ case B of
+ <<_:O/binary, ?Q, _/binary>> ->
+ O;
+ <<_:O/binary, $\\, _/binary>> ->
+ {escape, O};
+ <<_:O/binary, C1, _/binary>> when C1 < 128 ->
+ tokenize_string_fast(B, 1 + O);
+ <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
+ C2 >= 128, C2 =< 191 ->
+ tokenize_string_fast(B, 2 + O);
+ <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191 ->
+ tokenize_string_fast(B, 3 + O);
+ <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191,
+ C4 >= 128, C4 =< 191 ->
+ tokenize_string_fast(B, 4 + O);
+ _ ->
+ throw(invalid_utf8)
+ end.
+
+tokenize_string(B, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, ?Q, _/binary>> ->
+ {{const, iolist_to_binary(lists:reverse(Acc))}, ?INC_COL(S)};
+ <<_:O/binary, "\\\"", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\" | Acc]);
+ <<_:O/binary, "\\\\", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\\ | Acc]);
+ <<_:O/binary, "\\/", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$/ | Acc]);
+ <<_:O/binary, "\\b", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\b | Acc]);
+ <<_:O/binary, "\\f", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\f | Acc]);
+ <<_:O/binary, "\\n", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\n | Acc]);
+ <<_:O/binary, "\\r", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\r | Acc]);
+ <<_:O/binary, "\\t", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\t | Acc]);
+ <<_:O/binary, "\\u", C3, C2, C1, C0, Rest/binary>> ->
+ C = erlang:list_to_integer([C3, C2, C1, C0], 16),
+ if C > 16#D7FF, C < 16#DC00 ->
+ %% coalesce UTF-16 surrogate pair
+ <<"\\u", D3, D2, D1, D0, _/binary>> = Rest,
+ D = erlang:list_to_integer([D3,D2,D1,D0], 16),
+ [CodePoint] = xmerl_ucs:from_utf16be(<<C:16/big-unsigned-integer,
+ D:16/big-unsigned-integer>>),
+ Acc1 = lists:reverse(xmerl_ucs:to_utf8(CodePoint), Acc),
+ tokenize_string(B, ?ADV_COL(S, 12), Acc1);
+ true ->
+ Acc1 = lists:reverse(xmerl_ucs:to_utf8(C), Acc),
+ tokenize_string(B, ?ADV_COL(S, 6), Acc1)
+ end;
+ <<_:O/binary, C1, _/binary>> when C1 < 128 ->
+ tokenize_string(B, ?INC_CHAR(S, C1), [C1 | Acc]);
+ <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
+ C2 >= 128, C2 =< 191 ->
+ tokenize_string(B, ?ADV_COL(S, 2), [C2, C1 | Acc]);
+ <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191 ->
+ tokenize_string(B, ?ADV_COL(S, 3), [C3, C2, C1 | Acc]);
+ <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191,
+ C4 >= 128, C4 =< 191 ->
+ tokenize_string(B, ?ADV_COL(S, 4), [C4, C3, C2, C1 | Acc]);
+ _ ->
+ throw(invalid_utf8)
+ end.
+
+tokenize_number(B, S) ->
+ case tokenize_number(B, sign, S, []) of
+ {{int, Int}, S1} ->
+ {{const, list_to_integer(Int)}, S1};
+ {{float, Float}, S1} ->
+ {{const, list_to_float(Float)}, S1}
+ end.
+
+tokenize_number(B, sign, S=#decoder{offset=O}, []) ->
+ case B of
+ <<_:O/binary, $-, _/binary>> ->
+ tokenize_number(B, int, ?INC_COL(S), [$-]);
+ _ ->
+ tokenize_number(B, int, S, [])
+ end;
+tokenize_number(B, int, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, $0, _/binary>> ->
+ tokenize_number(B, frac, ?INC_COL(S), [$0 | Acc]);
+ <<_:O/binary, C, _/binary>> when C >= $1 andalso C =< $9 ->
+ tokenize_number(B, int1, ?INC_COL(S), [C | Acc])
+ end;
+tokenize_number(B, int1, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, int1, ?INC_COL(S), [C | Acc]);
+ _ ->
+ tokenize_number(B, frac, S, Acc)
+ end;
+tokenize_number(B, frac, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, $., C, _/binary>> when C >= $0, C =< $9 ->
+ tokenize_number(B, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
+ <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
+ tokenize_number(B, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
+ _ ->
+ {{int, lists:reverse(Acc)}, S}
+ end;
+tokenize_number(B, frac1, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, frac1, ?INC_COL(S), [C | Acc]);
+ <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
+ tokenize_number(B, esign, ?INC_COL(S), [$e | Acc]);
+ _ ->
+ {{float, lists:reverse(Acc)}, S}
+ end;
+tokenize_number(B, esign, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C =:= $- orelse C=:= $+ ->
+ tokenize_number(B, eint, ?INC_COL(S), [C | Acc]);
+ _ ->
+ tokenize_number(B, eint, S, Acc)
+ end;
+tokenize_number(B, eint, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, eint1, ?INC_COL(S), [C | Acc])
+ end;
+tokenize_number(B, eint1, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, eint1, ?INC_COL(S), [C | Acc]);
+ _ ->
+ {{float, lists:reverse(Acc)}, S}
+ end.
+
+tokenize(B, S=#decoder{offset=O}) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ tokenize(B, ?INC_CHAR(S, C));
+ <<_:O/binary, "{", _/binary>> ->
+ {start_object, ?INC_COL(S)};
+ <<_:O/binary, "}", _/binary>> ->
+ {end_object, ?INC_COL(S)};
+ <<_:O/binary, "[", _/binary>> ->
+ {start_array, ?INC_COL(S)};
+ <<_:O/binary, "]", _/binary>> ->
+ {end_array, ?INC_COL(S)};
+ <<_:O/binary, ",", _/binary>> ->
+ {comma, ?INC_COL(S)};
+ <<_:O/binary, ":", _/binary>> ->
+ {colon, ?INC_COL(S)};
+ <<_:O/binary, "null", _/binary>> ->
+ {{const, null}, ?ADV_COL(S, 4)};
+ <<_:O/binary, "true", _/binary>> ->
+ {{const, true}, ?ADV_COL(S, 4)};
+ <<_:O/binary, "false", _/binary>> ->
+ {{const, false}, ?ADV_COL(S, 5)};
+ <<_:O/binary, "\"", _/binary>> ->
+ tokenize_string(B, ?INC_COL(S));
+ <<_:O/binary, C, _/binary>> when (C >= $0 andalso C =< $9)
+ orelse C =:= $- ->
+ tokenize_number(B, S);
+ <<_:O/binary>> ->
+ trim = S#decoder.state,
+ {eof, S}
+ end.
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+
+%% testing constructs borrowed from the Yaws JSON implementation.
+
+%% Create an object from a list of Key/Value pairs.
+
+obj_new() ->
+ {struct, []}.
+
+is_obj({struct, Props}) ->
+ F = fun ({K, _}) when is_binary(K) -> true end,
+ lists:all(F, Props).
+
+obj_from_list(Props) ->
+ Obj = {struct, Props},
+ ?assert(is_obj(Obj)),
+ Obj.
+
+%% Test for equivalence of Erlang terms.
+%% Due to arbitrary order of construction, equivalent objects might
+%% compare unequal as erlang terms, so we need to carefully recurse
+%% through aggregates (tuples and objects).
+
+equiv({struct, Props1}, {struct, Props2}) ->
+ equiv_object(Props1, Props2);
+equiv(L1, L2) when is_list(L1), is_list(L2) ->
+ equiv_list(L1, L2);
+equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
+equiv(B1, B2) when is_binary(B1), is_binary(B2) -> B1 == B2;
+equiv(A, A) when A =:= true orelse A =:= false orelse A =:= null -> true.
+
+%% Object representation and traversal order is unknown.
+%% Use the sledgehammer and sort property lists.
+
+equiv_object(Props1, Props2) ->
+ L1 = lists:keysort(1, Props1),
+ L2 = lists:keysort(1, Props2),
+ Pairs = lists:zip(L1, L2),
+ true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
+ equiv(K1, K2) and equiv(V1, V2)
+ end, Pairs).
+
+%% Recursively compare tuple elements for equivalence.
+
+equiv_list([], []) ->
+ true;
+equiv_list([V1 | L1], [V2 | L2]) ->
+ equiv(V1, V2) andalso equiv_list(L1, L2).
+
+decode_test() ->
+ [1199344435545.0, 1] = decode(<<"[1199344435545.0,1]">>),
+ <<16#F0,16#9D,16#9C,16#95>> = decode([34,"\\ud835","\\udf15",34]).
+
+e2j_vec_test() ->
+ test_one(e2j_test_vec(utf8), 1).
+
+test_one([], _N) ->
+ %% io:format("~p tests passed~n", [N-1]),
+ ok;
+test_one([{E, J} | Rest], N) ->
+ %% io:format("[~p] ~p ~p~n", [N, E, J]),
+ true = equiv(E, decode(J)),
+ true = equiv(E, decode(encode(E))),
+ test_one(Rest, 1+N).
+
+e2j_test_vec(utf8) ->
+ [
+ {1, "1"},
+ {3.1416, "3.14160"}, %% text representation may truncate, trail zeroes
+ {-1, "-1"},
+ {-3.1416, "-3.14160"},
+ {12.0e10, "1.20000e+11"},
+ {1.234E+10, "1.23400e+10"},
+ {-1.234E-10, "-1.23400e-10"},
+ {10.0, "1.0e+01"},
+ {123.456, "1.23456E+2"},
+ {10.0, "1e1"},
+ {<<"foo">>, "\"foo\""},
+ {<<"foo", 5, "bar">>, "\"foo\\u0005bar\""},
+ {<<"">>, "\"\""},
+ {<<"\n\n\n">>, "\"\\n\\n\\n\""},
+ {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
+ {obj_new(), "{}"},
+ {obj_from_list([{<<"foo">>, <<"bar">>}]), "{\"foo\":\"bar\"}"},
+ {obj_from_list([{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]),
+ "{\"foo\":\"bar\",\"baz\":123}"},
+ {[], "[]"},
+ {[[]], "[[]]"},
+ {[1, <<"foo">>], "[1,\"foo\"]"},
+
+ %% json array in a json object
+ {obj_from_list([{<<"foo">>, [123]}]),
+ "{\"foo\":[123]}"},
+
+ %% json object in a json object
+ {obj_from_list([{<<"foo">>, obj_from_list([{<<"bar">>, true}])}]),
+ "{\"foo\":{\"bar\":true}}"},
+
+ %% fold evaluation order
+ {obj_from_list([{<<"foo">>, []},
+ {<<"bar">>, obj_from_list([{<<"baz">>, true}])},
+ {<<"alice">>, <<"bob">>}]),
+ "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
+
+ %% json object in a json array
+ {[-123, <<"foo">>, obj_from_list([{<<"bar">>, []}]), null],
+ "[-123,\"foo\",{\"bar\":[]},null]"}
+ ].
+
+%% test utf8 encoding
+encoder_utf8_test() ->
+ %% safe conversion case (default)
+ [34,"\\u0001","\\u0442","\\u0435","\\u0441","\\u0442",34] =
+ encode(<<1,"\321\202\320\265\321\201\321\202">>),
+
+ %% raw utf8 output (optional)
+ Enc = mochijson2:encoder([{utf8, true}]),
+ [34,"\\u0001",[209,130],[208,181],[209,129],[209,130],34] =
+ Enc(<<1,"\321\202\320\265\321\201\321\202">>).
+
+input_validation_test() ->
+ Good = [
+ {16#00A3, <<?Q, 16#C2, 16#A3, ?Q>>}, %% pound
+ {16#20AC, <<?Q, 16#E2, 16#82, 16#AC, ?Q>>}, %% euro
+ {16#10196, <<?Q, 16#F0, 16#90, 16#86, 16#96, ?Q>>} %% denarius
+ ],
+ lists:foreach(fun({CodePoint, UTF8}) ->
+ Expect = list_to_binary(xmerl_ucs:to_utf8(CodePoint)),
+ Expect = decode(UTF8)
+ end, Good),
+
+ Bad = [
+ %% 2nd, 3rd, or 4th byte of a multi-byte sequence w/o leading byte
+ <<?Q, 16#80, ?Q>>,
+ %% missing continuations, last byte in each should be 80-BF
+ <<?Q, 16#C2, 16#7F, ?Q>>,
+ <<?Q, 16#E0, 16#80,16#7F, ?Q>>,
+ <<?Q, 16#F0, 16#80, 16#80, 16#7F, ?Q>>,
+ %% we don't support code points > 10FFFF per RFC 3629
+ <<?Q, 16#F5, 16#80, 16#80, 16#80, ?Q>>,
+ %% escape characters trigger a different code path
+ <<?Q, $\\, $\n, 16#80, ?Q>>
+ ],
+ lists:foreach(
+ fun(X) ->
+ ok = try decode(X) catch invalid_utf8 -> ok end,
+ %% could be {ucs,{bad_utf8_character_code}} or
+ %% {json_encode,{bad_char,_}}
+ {'EXIT', _} = (catch encode(X))
+ end, Bad).
+
+inline_json_test() ->
+ ?assertEqual(<<"\"iodata iodata\"">>,
+ iolist_to_binary(
+ encode({json, [<<"\"iodata">>, " iodata\""]}))),
+ ?assertEqual({struct, [{<<"key">>, <<"iodata iodata">>}]},
+ decode(
+ encode({struct,
+ [{key, {json, [<<"\"iodata">>, " iodata\""]}}]}))),
+ ok.
+
+big_unicode_test() ->
+ UTF8Seq = list_to_binary(xmerl_ucs:to_utf8(16#0001d120)),
+ ?assertEqual(
+ <<"\"\\ud834\\udd20\"">>,
+ iolist_to_binary(encode(UTF8Seq))),
+ ?assertEqual(
+ UTF8Seq,
+ decode(iolist_to_binary(encode(UTF8Seq)))),
+ ok.
+
+custom_decoder_test() ->
+ ?assertEqual(
+ {struct, [{<<"key">>, <<"value">>}]},
+ (decoder([]))("{\"key\": \"value\"}")),
+ F = fun ({struct, [{<<"key">>, <<"value">>}]}) -> win end,
+ ?assertEqual(
+ win,
+ (decoder([{object_hook, F}]))("{\"key\": \"value\"}")),
+ ok.
+
+atom_test() ->
+ %% JSON native atoms
+ [begin
+ ?assertEqual(A, decode(atom_to_list(A))),
+ ?assertEqual(iolist_to_binary(atom_to_list(A)),
+ iolist_to_binary(encode(A)))
+ end || A <- [true, false, null]],
+ %% Atom to string
+ ?assertEqual(
+ <<"\"foo\"">>,
+ iolist_to_binary(encode(foo))),
+ ?assertEqual(
+ <<"\"\\ud834\\udd20\"">>,
+ iolist_to_binary(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))),
+ ok.
+
+key_encode_test() ->
+ %% Some forms are accepted as keys that would not be strings in other
+ %% cases
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode({struct, [{foo, 1}]}))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode({struct, [{<<"foo">>, 1}]}))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode({struct, [{"foo", 1}]}))),
+ ?assertEqual(
+ <<"{\"\\ud834\\udd20\":1}">>,
+ iolist_to_binary(
+ encode({struct, [{[16#0001d120], 1}]}))),
+ ?assertEqual(
+ <<"{\"1\":1}">>,
+ iolist_to_binary(encode({struct, [{1, 1}]}))),
+ ok.
+
+unsafe_chars_test() ->
+ Chars = "\"\\\b\f\n\r\t",
+ [begin
+ ?assertEqual(false, json_string_is_safe([C])),
+ ?assertEqual(false, json_bin_is_safe(<<C>>)),
+ ?assertEqual(<<C>>, decode(encode(<<C>>)))
+ end || C <- Chars],
+ ?assertEqual(
+ false,
+ json_string_is_safe([16#0001d120])),
+ ?assertEqual(
+ false,
+ json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8(16#0001d120)))),
+ ?assertEqual(
+ [16#0001d120],
+ xmerl_ucs:from_utf8(
+ binary_to_list(
+ decode(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))))),
+ ?assertEqual(
+ false,
+ json_string_is_safe([16#110000])),
+ ?assertEqual(
+ false,
+ json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8([16#110000])))),
+ %% solidus can be escaped but isn't unsafe by default
+ ?assertEqual(
+ <<"/">>,
+ decode(<<"\"\\/\"">>)),
+ ok.
+
+int_test() ->
+ ?assertEqual(0, decode("0")),
+ ?assertEqual(1, decode("1")),
+ ?assertEqual(11, decode("11")),
+ ok.
+
+large_int_test() ->
+ ?assertEqual(<<"-2147483649214748364921474836492147483649">>,
+ iolist_to_binary(encode(-2147483649214748364921474836492147483649))),
+ ?assertEqual(<<"2147483649214748364921474836492147483649">>,
+ iolist_to_binary(encode(2147483649214748364921474836492147483649))),
+ ok.
+
+float_test() ->
+ ?assertEqual(<<"-2147483649.0">>, iolist_to_binary(encode(-2147483649.0))),
+ ?assertEqual(<<"2147483648.0">>, iolist_to_binary(encode(2147483648.0))),
+ ok.
+
+handler_test() ->
+ ?assertEqual(
+ {'EXIT',{json_encode,{bad_term,{}}}},
+ catch encode({})),
+ F = fun ({}) -> [] end,
+ ?assertEqual(
+ <<"[]">>,
+ iolist_to_binary((encoder([{handler, F}]))({}))),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochilists.erl b/1.1.x/src/mochiweb/mochilists.erl
new file mode 100644
index 00000000..8981e7b6
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochilists.erl
@@ -0,0 +1,104 @@
+%% @copyright Copyright (c) 2010 Mochi Media, Inc.
+%% @author David Reid <dreid@mochimedia.com>
+
+%% @doc Utility functions for dealing with proplists.
+
+-module(mochilists).
+-author("David Reid <dreid@mochimedia.com>").
+-export([get_value/2, get_value/3, is_defined/2, set_default/2, set_defaults/2]).
+
+%% @spec set_default({Key::term(), Value::term()}, Proplist::list()) -> list()
+%%
+%% @doc Return new Proplist with {Key, Value} set if not is_defined(Key, Proplist).
+set_default({Key, Value}, Proplist) ->
+ case is_defined(Key, Proplist) of
+ true ->
+ Proplist;
+ false ->
+ [{Key, Value} | Proplist]
+ end.
+
+%% @spec set_defaults([{Key::term(), Value::term()}], Proplist::list()) -> list()
+%%
+%% @doc Return new Proplist with {Key, Value} set if not is_defined(Key, Proplist).
+set_defaults(DefaultProps, Proplist) ->
+ lists:foldl(fun set_default/2, Proplist, DefaultProps).
+
+
+%% @spec is_defined(Key::term(), Proplist::list()) -> bool()
+%%
+%% @doc Returns true if Propist contains at least one entry associated
+%% with Key, otherwise false is returned.
+is_defined(Key, Proplist) ->
+ lists:keyfind(Key, 1, Proplist) =/= false.
+
+
+%% @spec get_value(Key::term(), Proplist::list()) -> term() | undefined
+%%
+%% @doc Return the value of <code>Key</code> or undefined
+get_value(Key, Proplist) ->
+ get_value(Key, Proplist, undefined).
+
+%% @spec get_value(Key::term(), Proplist::list(), Default::term()) -> term()
+%%
+%% @doc Return the value of <code>Key</code> or <code>Default</code>
+get_value(_Key, [], Default) ->
+ Default;
+get_value(Key, Proplist, Default) ->
+ case lists:keyfind(Key, 1, Proplist) of
+ false ->
+ Default;
+ {Key, Value} ->
+ Value
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+set_defaults_test() ->
+ ?assertEqual(
+ [{k, v}],
+ set_defaults([{k, v}], [])),
+ ?assertEqual(
+ [{k, v}],
+ set_defaults([{k, vee}], [{k, v}])),
+ ?assertEqual(
+ lists:sort([{kay, vee}, {k, v}]),
+ lists:sort(set_defaults([{k, vee}, {kay, vee}], [{k, v}]))),
+ ok.
+
+set_default_test() ->
+ ?assertEqual(
+ [{k, v}],
+ set_default({k, v}, [])),
+ ?assertEqual(
+ [{k, v}],
+ set_default({k, vee}, [{k, v}])),
+ ok.
+
+get_value_test() ->
+ ?assertEqual(
+ undefined,
+ get_value(foo, [])),
+ ?assertEqual(
+ undefined,
+ get_value(foo, [{bar, baz}])),
+ ?assertEqual(
+ bar,
+ get_value(foo, [{foo, bar}])),
+ ?assertEqual(
+ default,
+ get_value(foo, [], default)),
+ ?assertEqual(
+ default,
+ get_value(foo, [{bar, baz}], default)),
+ ?assertEqual(
+ bar,
+ get_value(foo, [{foo, bar}], default)),
+ ok.
+
+-endif.
+
diff --git a/1.1.x/src/mochiweb/mochilogfile2.erl b/1.1.x/src/mochiweb/mochilogfile2.erl
new file mode 100644
index 00000000..c34ee73a
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochilogfile2.erl
@@ -0,0 +1,140 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc Write newline delimited log files, ensuring that if a truncated
+%% entry is found on log open then it is fixed before writing. Uses
+%% delayed writes and raw files for performance.
+-module(mochilogfile2).
+-author('bob@mochimedia.com').
+
+-export([open/1, write/2, close/1, name/1]).
+
+%% @spec open(Name) -> Handle
+%% @doc Open the log file Name, creating or appending as necessary. All data
+%% at the end of the file will be truncated until a newline is found, to
+%% ensure that all records are complete.
+open(Name) ->
+ {ok, FD} = file:open(Name, [raw, read, write, delayed_write, binary]),
+ fix_log(FD),
+ {?MODULE, Name, FD}.
+
+%% @spec name(Handle) -> string()
+%% @doc Return the path of the log file.
+name({?MODULE, Name, _FD}) ->
+ Name.
+
+%% @spec write(Handle, IoData) -> ok
+%% @doc Write IoData to the log file referenced by Handle.
+write({?MODULE, _Name, FD}, IoData) ->
+ ok = file:write(FD, [IoData, $\n]),
+ ok.
+
+%% @spec close(Handle) -> ok
+%% @doc Close the log file referenced by Handle.
+close({?MODULE, _Name, FD}) ->
+ ok = file:sync(FD),
+ ok = file:close(FD),
+ ok.
+
+fix_log(FD) ->
+ {ok, Location} = file:position(FD, eof),
+ Seek = find_last_newline(FD, Location),
+ {ok, Seek} = file:position(FD, Seek),
+ ok = file:truncate(FD),
+ ok.
+
+%% Seek backwards to the last valid log entry
+find_last_newline(_FD, N) when N =< 1 ->
+ 0;
+find_last_newline(FD, Location) ->
+ case file:pread(FD, Location - 1, 1) of
+ {ok, <<$\n>>} ->
+ Location;
+ {ok, _} ->
+ find_last_newline(FD, Location - 1)
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+name_test() ->
+ D = mochitemp:mkdtemp(),
+ FileName = filename:join(D, "open_close_test.log"),
+ H = open(FileName),
+ ?assertEqual(
+ FileName,
+ name(H)),
+ close(H),
+ file:delete(FileName),
+ file:del_dir(D),
+ ok.
+
+open_close_test() ->
+ D = mochitemp:mkdtemp(),
+ FileName = filename:join(D, "open_close_test.log"),
+ OpenClose = fun () ->
+ H = open(FileName),
+ ?assertEqual(
+ true,
+ filelib:is_file(FileName)),
+ ok = close(H),
+ ?assertEqual(
+ {ok, <<>>},
+ file:read_file(FileName)),
+ ok
+ end,
+ OpenClose(),
+ OpenClose(),
+ file:delete(FileName),
+ file:del_dir(D),
+ ok.
+
+write_test() ->
+ D = mochitemp:mkdtemp(),
+ FileName = filename:join(D, "write_test.log"),
+ F = fun () ->
+ H = open(FileName),
+ write(H, "test line"),
+ close(H),
+ ok
+ end,
+ F(),
+ ?assertEqual(
+ {ok, <<"test line\n">>},
+ file:read_file(FileName)),
+ F(),
+ ?assertEqual(
+ {ok, <<"test line\ntest line\n">>},
+ file:read_file(FileName)),
+ file:delete(FileName),
+ file:del_dir(D),
+ ok.
+
+fix_log_test() ->
+ D = mochitemp:mkdtemp(),
+ FileName = filename:join(D, "write_test.log"),
+ file:write_file(FileName, <<"first line good\nsecond line bad">>),
+ F = fun () ->
+ H = open(FileName),
+ write(H, "test line"),
+ close(H),
+ ok
+ end,
+ F(),
+ ?assertEqual(
+ {ok, <<"first line good\ntest line\n">>},
+ file:read_file(FileName)),
+ file:write_file(FileName, <<"first line bad">>),
+ F(),
+ ?assertEqual(
+ {ok, <<"test line\n">>},
+ file:read_file(FileName)),
+ F(),
+ ?assertEqual(
+ {ok, <<"test line\ntest line\n">>},
+ file:read_file(FileName)),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochinum.erl b/1.1.x/src/mochiweb/mochinum.erl
new file mode 100644
index 00000000..a7e2bfbc
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochinum.erl
@@ -0,0 +1,331 @@
+%% @copyright 2007 Mochi Media, Inc.
+%% @author Bob Ippolito <bob@mochimedia.com>
+
+%% @doc Useful numeric algorithms for floats that cover some deficiencies
+%% in the math module. More interesting is digits/1, which implements
+%% the algorithm from:
+%% http://www.cs.indiana.edu/~burger/fp/index.html
+%% See also "Printing Floating-Point Numbers Quickly and Accurately"
+%% in Proceedings of the SIGPLAN '96 Conference on Programming Language
+%% Design and Implementation.
+
+-module(mochinum).
+-author("Bob Ippolito <bob@mochimedia.com>").
+-export([digits/1, frexp/1, int_pow/2, int_ceil/1]).
+
+%% IEEE 754 Float exponent bias
+-define(FLOAT_BIAS, 1022).
+-define(MIN_EXP, -1074).
+-define(BIG_POW, 4503599627370496).
+
+%% External API
+
+%% @spec digits(number()) -> string()
+%% @doc Returns a string that accurately represents the given integer or float
+%% using a conservative amount of digits. Great for generating
+%% human-readable output, or compact ASCII serializations for floats.
+digits(N) when is_integer(N) ->
+ integer_to_list(N);
+digits(0.0) ->
+ "0.0";
+digits(Float) ->
+ {Frac, Exp} = frexp(Float),
+ Exp1 = Exp - 53,
+ Frac1 = trunc(abs(Frac) * (1 bsl 53)),
+ [Place | Digits] = digits1(Float, Exp1, Frac1),
+ R = insert_decimal(Place, [$0 + D || D <- Digits]),
+ case Float < 0 of
+ true ->
+ [$- | R];
+ _ ->
+ R
+ end.
+
+%% @spec frexp(F::float()) -> {Frac::float(), Exp::float()}
+%% @doc Return the fractional and exponent part of an IEEE 754 double,
+%% equivalent to the libc function of the same name.
+%% F = Frac * pow(2, Exp).
+frexp(F) ->
+ frexp1(unpack(F)).
+
+%% @spec int_pow(X::integer(), N::integer()) -> Y::integer()
+%% @doc Moderately efficient way to exponentiate integers.
+%% int_pow(10, 2) = 100.
+int_pow(_X, 0) ->
+ 1;
+int_pow(X, N) when N > 0 ->
+ int_pow(X, N, 1).
+
+%% @spec int_ceil(F::float()) -> integer()
+%% @doc Return the ceiling of F as an integer. The ceiling is defined as
+%% F when F == trunc(F);
+%% trunc(F) when F &lt; 0;
+%% trunc(F) + 1 when F &gt; 0.
+int_ceil(X) ->
+ T = trunc(X),
+ case (X - T) of
+ Neg when Neg < 0 -> T;
+ Pos when Pos > 0 -> T + 1;
+ _ -> T
+ end.
+
+
+%% Internal API
+
+int_pow(X, N, R) when N < 2 ->
+ R * X;
+int_pow(X, N, R) ->
+ int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
+
+insert_decimal(0, S) ->
+ "0." ++ S;
+insert_decimal(Place, S) when Place > 0 ->
+ L = length(S),
+ case Place - L of
+ 0 ->
+ S ++ ".0";
+ N when N < 0 ->
+ {S0, S1} = lists:split(L + N, S),
+ S0 ++ "." ++ S1;
+ N when N < 6 ->
+ %% More places than digits
+ S ++ lists:duplicate(N, $0) ++ ".0";
+ _ ->
+ insert_decimal_exp(Place, S)
+ end;
+insert_decimal(Place, S) when Place > -6 ->
+ "0." ++ lists:duplicate(abs(Place), $0) ++ S;
+insert_decimal(Place, S) ->
+ insert_decimal_exp(Place, S).
+
+insert_decimal_exp(Place, S) ->
+ [C | S0] = S,
+ S1 = case S0 of
+ [] ->
+ "0";
+ _ ->
+ S0
+ end,
+ Exp = case Place < 0 of
+ true ->
+ "e-";
+ false ->
+ "e+"
+ end,
+ [C] ++ "." ++ S1 ++ Exp ++ integer_to_list(abs(Place - 1)).
+
+
+digits1(Float, Exp, Frac) ->
+ Round = ((Frac band 1) =:= 0),
+ case Exp >= 0 of
+ true ->
+ BExp = 1 bsl Exp,
+ case (Frac =/= ?BIG_POW) of
+ true ->
+ scale((Frac * BExp * 2), 2, BExp, BExp,
+ Round, Round, Float);
+ false ->
+ scale((Frac * BExp * 4), 4, (BExp * 2), BExp,
+ Round, Round, Float)
+ end;
+ false ->
+ case (Exp =:= ?MIN_EXP) orelse (Frac =/= ?BIG_POW) of
+ true ->
+ scale((Frac * 2), 1 bsl (1 - Exp), 1, 1,
+ Round, Round, Float);
+ false ->
+ scale((Frac * 4), 1 bsl (2 - Exp), 2, 1,
+ Round, Round, Float)
+ end
+ end.
+
+scale(R, S, MPlus, MMinus, LowOk, HighOk, Float) ->
+ Est = int_ceil(math:log10(abs(Float)) - 1.0e-10),
+ %% Note that the scheme implementation uses a 326 element look-up table
+ %% for int_pow(10, N) where we do not.
+ case Est >= 0 of
+ true ->
+ fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est,
+ LowOk, HighOk);
+ false ->
+ Scale = int_pow(10, -Est),
+ fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est,
+ LowOk, HighOk)
+ end.
+
+fixup(R, S, MPlus, MMinus, K, LowOk, HighOk) ->
+ TooLow = case HighOk of
+ true ->
+ (R + MPlus) >= S;
+ false ->
+ (R + MPlus) > S
+ end,
+ case TooLow of
+ true ->
+ [(K + 1) | generate(R, S, MPlus, MMinus, LowOk, HighOk)];
+ false ->
+ [K | generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)]
+ end.
+
+generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
+ D = R0 div S,
+ R = R0 rem S,
+ TC1 = case LowOk of
+ true ->
+ R =< MMinus;
+ false ->
+ R < MMinus
+ end,
+ TC2 = case HighOk of
+ true ->
+ (R + MPlus) >= S;
+ false ->
+ (R + MPlus) > S
+ end,
+ case TC1 of
+ false ->
+ case TC2 of
+ false ->
+ [D | generate(R * 10, S, MPlus * 10, MMinus * 10,
+ LowOk, HighOk)];
+ true ->
+ [D + 1]
+ end;
+ true ->
+ case TC2 of
+ false ->
+ [D];
+ true ->
+ case R * 2 < S of
+ true ->
+ [D];
+ false ->
+ [D + 1]
+ end
+ end
+ end.
+
+unpack(Float) ->
+ <<Sign:1, Exp:11, Frac:52>> = <<Float:64/float>>,
+ {Sign, Exp, Frac}.
+
+frexp1({_Sign, 0, 0}) ->
+ {0.0, 0};
+frexp1({Sign, 0, Frac}) ->
+ Exp = log2floor(Frac),
+ <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, (Frac-1):52>>,
+ {Frac1, -(?FLOAT_BIAS) - 52 + Exp};
+frexp1({Sign, Exp, Frac}) ->
+ <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, Frac:52>>,
+ {Frac1, Exp - ?FLOAT_BIAS}.
+
+log2floor(Int) ->
+ log2floor(Int, 0).
+
+log2floor(0, N) ->
+ N;
+log2floor(Int, N) ->
+ log2floor(Int bsr 1, 1 + N).
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+int_ceil_test() ->
+ 1 = int_ceil(0.0001),
+ 0 = int_ceil(0.0),
+ 1 = int_ceil(0.99),
+ 1 = int_ceil(1.0),
+ -1 = int_ceil(-1.5),
+ -2 = int_ceil(-2.0),
+ ok.
+
+int_pow_test() ->
+ 1 = int_pow(1, 1),
+ 1 = int_pow(1, 0),
+ 1 = int_pow(10, 0),
+ 10 = int_pow(10, 1),
+ 100 = int_pow(10, 2),
+ 1000 = int_pow(10, 3),
+ ok.
+
+digits_test() ->
+ ?assertEqual("0",
+ digits(0)),
+ ?assertEqual("0.0",
+ digits(0.0)),
+ ?assertEqual("1.0",
+ digits(1.0)),
+ ?assertEqual("-1.0",
+ digits(-1.0)),
+ ?assertEqual("0.1",
+ digits(0.1)),
+ ?assertEqual("0.01",
+ digits(0.01)),
+ ?assertEqual("0.001",
+ digits(0.001)),
+ ?assertEqual("1.0e+6",
+ digits(1000000.0)),
+ ?assertEqual("0.5",
+ digits(0.5)),
+ ?assertEqual("4503599627370496.0",
+ digits(4503599627370496.0)),
+ %% small denormalized number
+ %% 4.94065645841246544177e-324
+ <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+ ?assertEqual("4.9406564584124654e-324",
+ digits(SmallDenorm)),
+ ?assertEqual(SmallDenorm,
+ list_to_float(digits(SmallDenorm))),
+ %% large denormalized number
+ %% 2.22507385850720088902e-308
+ <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+ ?assertEqual("2.225073858507201e-308",
+ digits(BigDenorm)),
+ ?assertEqual(BigDenorm,
+ list_to_float(digits(BigDenorm))),
+ %% small normalized number
+ %% 2.22507385850720138309e-308
+ <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+ ?assertEqual("2.2250738585072014e-308",
+ digits(SmallNorm)),
+ ?assertEqual(SmallNorm,
+ list_to_float(digits(SmallNorm))),
+ %% large normalized number
+ %% 1.79769313486231570815e+308
+ <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+ ?assertEqual("1.7976931348623157e+308",
+ digits(LargeNorm)),
+ ?assertEqual(LargeNorm,
+ list_to_float(digits(LargeNorm))),
+ ok.
+
+frexp_test() ->
+ %% zero
+ {0.0, 0} = frexp(0.0),
+ %% one
+ {0.5, 1} = frexp(1.0),
+ %% negative one
+ {-0.5, 1} = frexp(-1.0),
+ %% small denormalized number
+ %% 4.94065645841246544177e-324
+ <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+ {0.5, -1073} = frexp(SmallDenorm),
+ %% large denormalized number
+ %% 2.22507385850720088902e-308
+ <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+ {0.99999999999999978, -1022} = frexp(BigDenorm),
+ %% small normalized number
+ %% 2.22507385850720138309e-308
+ <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+ {0.5, -1021} = frexp(SmallNorm),
+ %% large normalized number
+ %% 1.79769313486231570815e+308
+ <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+ {0.99999999999999989, 1024} = frexp(LargeNorm),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochitemp.erl b/1.1.x/src/mochiweb/mochitemp.erl
new file mode 100644
index 00000000..bb23d2a6
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochitemp.erl
@@ -0,0 +1,310 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc Create temporary files and directories. Requires crypto to be started.
+
+-module(mochitemp).
+-export([gettempdir/0]).
+-export([mkdtemp/0, mkdtemp/3]).
+-export([rmtempdir/1]).
+%% -export([mkstemp/4]).
+-define(SAFE_CHARS, {$a, $b, $c, $d, $e, $f, $g, $h, $i, $j, $k, $l, $m,
+ $n, $o, $p, $q, $r, $s, $t, $u, $v, $w, $x, $y, $z,
+ $A, $B, $C, $D, $E, $F, $G, $H, $I, $J, $K, $L, $M,
+ $N, $O, $P, $Q, $R, $S, $T, $U, $V, $W, $X, $Y, $Z,
+ $0, $1, $2, $3, $4, $5, $6, $7, $8, $9, $_}).
+-define(TMP_MAX, 10000).
+
+-include_lib("kernel/include/file.hrl").
+
+%% TODO: An ugly wrapper over the mktemp tool with open_port and sadness?
+%% We can't implement this race-free in Erlang without the ability
+%% to issue O_CREAT|O_EXCL. I suppose we could hack something with
+%% mkdtemp, del_dir, open.
+%% mkstemp(Suffix, Prefix, Dir, Options) ->
+%% ok.
+
+rmtempdir(Dir) ->
+ case file:del_dir(Dir) of
+ {error, eexist} ->
+ ok = rmtempdirfiles(Dir),
+ ok = file:del_dir(Dir);
+ ok ->
+ ok
+ end.
+
+rmtempdirfiles(Dir) ->
+ {ok, Files} = file:list_dir(Dir),
+ ok = rmtempdirfiles(Dir, Files).
+
+rmtempdirfiles(_Dir, []) ->
+ ok;
+rmtempdirfiles(Dir, [Basename | Rest]) ->
+ Path = filename:join([Dir, Basename]),
+ case filelib:is_dir(Path) of
+ true ->
+ ok = rmtempdir(Path);
+ false ->
+ ok = file:delete(Path)
+ end,
+ rmtempdirfiles(Dir, Rest).
+
+mkdtemp() ->
+ mkdtemp("", "tmp", gettempdir()).
+
+mkdtemp(Suffix, Prefix, Dir) ->
+ mkdtemp_n(rngpath_fun(Suffix, Prefix, Dir), ?TMP_MAX).
+
+
+
+mkdtemp_n(RngPath, 1) ->
+ make_dir(RngPath());
+mkdtemp_n(RngPath, N) ->
+ try make_dir(RngPath())
+ catch throw:{error, eexist} ->
+ mkdtemp_n(RngPath, N - 1)
+ end.
+
+make_dir(Path) ->
+ case file:make_dir(Path) of
+ ok ->
+ ok;
+ E={error, eexist} ->
+ throw(E)
+ end,
+ %% Small window for a race condition here because dir is created 777
+ ok = file:write_file_info(Path, #file_info{mode=8#0700}),
+ Path.
+
+rngpath_fun(Prefix, Suffix, Dir) ->
+ fun () ->
+ filename:join([Dir, Prefix ++ rngchars(6) ++ Suffix])
+ end.
+
+rngchars(0) ->
+ "";
+rngchars(N) ->
+ [rngchar() | rngchars(N - 1)].
+
+rngchar() ->
+ rngchar(crypto:rand_uniform(0, tuple_size(?SAFE_CHARS))).
+
+rngchar(C) ->
+ element(1 + C, ?SAFE_CHARS).
+
+%% @spec gettempdir() -> string()
+%% @doc Get a usable temporary directory using the first of these that is a directory:
+%% $TMPDIR, $TMP, $TEMP, "/tmp", "/var/tmp", "/usr/tmp", ".".
+gettempdir() ->
+ gettempdir(gettempdir_checks(), fun normalize_dir/1).
+
+gettempdir_checks() ->
+ [{fun os:getenv/1, ["TMPDIR", "TMP", "TEMP"]},
+ {fun gettempdir_identity/1, ["/tmp", "/var/tmp", "/usr/tmp"]},
+ {fun gettempdir_cwd/1, [cwd]}].
+
+gettempdir_identity(L) ->
+ L.
+
+gettempdir_cwd(cwd) ->
+ {ok, L} = file:get_cwd(),
+ L.
+
+gettempdir([{_F, []} | RestF], Normalize) ->
+ gettempdir(RestF, Normalize);
+gettempdir([{F, [L | RestL]} | RestF], Normalize) ->
+ case Normalize(F(L)) of
+ false ->
+ gettempdir([{F, RestL} | RestF], Normalize);
+ Dir ->
+ Dir
+ end.
+
+normalize_dir(False) when False =:= false orelse False =:= "" ->
+ %% Erlang doesn't have an unsetenv, wtf.
+ false;
+normalize_dir(L) ->
+ Dir = filename:absname(L),
+ case filelib:is_dir(Dir) of
+ false ->
+ false;
+ true ->
+ Dir
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+pushenv(L) ->
+ [{K, os:getenv(K)} || K <- L].
+popenv(L) ->
+ F = fun ({K, false}) ->
+ %% Erlang doesn't have an unsetenv, wtf.
+ os:putenv(K, "");
+ ({K, V}) ->
+ os:putenv(K, V)
+ end,
+ lists:foreach(F, L).
+
+gettempdir_fallback_test() ->
+ ?assertEqual(
+ "/",
+ gettempdir([{fun gettempdir_identity/1, ["/--not-here--/"]},
+ {fun gettempdir_identity/1, ["/"]}],
+ fun normalize_dir/1)),
+ ?assertEqual(
+ "/",
+ %% simulate a true os:getenv unset env
+ gettempdir([{fun gettempdir_identity/1, [false]},
+ {fun gettempdir_identity/1, ["/"]}],
+ fun normalize_dir/1)),
+ ok.
+
+gettempdir_identity_test() ->
+ ?assertEqual(
+ "/",
+ gettempdir([{fun gettempdir_identity/1, ["/"]}], fun normalize_dir/1)),
+ ok.
+
+gettempdir_cwd_test() ->
+ {ok, Cwd} = file:get_cwd(),
+ ?assertEqual(
+ normalize_dir(Cwd),
+ gettempdir([{fun gettempdir_cwd/1, [cwd]}], fun normalize_dir/1)),
+ ok.
+
+rngchars_test() ->
+ crypto:start(),
+ ?assertEqual(
+ "",
+ rngchars(0)),
+ ?assertEqual(
+ 10,
+ length(rngchars(10))),
+ ok.
+
+rngchar_test() ->
+ ?assertEqual(
+ $a,
+ rngchar(0)),
+ ?assertEqual(
+ $A,
+ rngchar(26)),
+ ?assertEqual(
+ $_,
+ rngchar(62)),
+ ok.
+
+mkdtemp_n_failonce_test() ->
+ crypto:start(),
+ D = mkdtemp(),
+ Path = filename:join([D, "testdir"]),
+ %% Toggle the existence of a dir so that it fails
+ %% the first time and succeeds the second.
+ F = fun () ->
+ case filelib:is_dir(Path) of
+ true ->
+ file:del_dir(Path);
+ false ->
+ file:make_dir(Path)
+ end,
+ Path
+ end,
+ try
+ %% Fails the first time
+ ?assertThrow(
+ {error, eexist},
+ mkdtemp_n(F, 1)),
+ %% Reset state
+ file:del_dir(Path),
+ %% Succeeds the second time
+ ?assertEqual(
+ Path,
+ mkdtemp_n(F, 2))
+ after rmtempdir(D)
+ end,
+ ok.
+
+mkdtemp_n_fail_test() ->
+ {ok, Cwd} = file:get_cwd(),
+ ?assertThrow(
+ {error, eexist},
+ mkdtemp_n(fun () -> Cwd end, 1)),
+ ?assertThrow(
+ {error, eexist},
+ mkdtemp_n(fun () -> Cwd end, 2)),
+ ok.
+
+make_dir_fail_test() ->
+ {ok, Cwd} = file:get_cwd(),
+ ?assertThrow(
+ {error, eexist},
+ make_dir(Cwd)),
+ ok.
+
+mkdtemp_test() ->
+ crypto:start(),
+ D = mkdtemp(),
+ ?assertEqual(
+ true,
+ filelib:is_dir(D)),
+ ?assertEqual(
+ ok,
+ file:del_dir(D)),
+ ok.
+
+rmtempdir_test() ->
+ crypto:start(),
+ D1 = mkdtemp(),
+ ?assertEqual(
+ true,
+ filelib:is_dir(D1)),
+ ?assertEqual(
+ ok,
+ rmtempdir(D1)),
+ D2 = mkdtemp(),
+ ?assertEqual(
+ true,
+ filelib:is_dir(D2)),
+ ok = file:write_file(filename:join([D2, "foo"]), <<"bytes">>),
+ D3 = mkdtemp("suffix", "prefix", D2),
+ ?assertEqual(
+ true,
+ filelib:is_dir(D3)),
+ ok = file:write_file(filename:join([D3, "foo"]), <<"bytes">>),
+ ?assertEqual(
+ ok,
+ rmtempdir(D2)),
+ ?assertEqual(
+ {error, enoent},
+ file:consult(D3)),
+ ?assertEqual(
+ {error, enoent},
+ file:consult(D2)),
+ ok.
+
+gettempdir_env_test() ->
+ Env = pushenv(["TMPDIR", "TEMP", "TMP"]),
+ FalseEnv = [{"TMPDIR", false}, {"TEMP", false}, {"TMP", false}],
+ try
+ popenv(FalseEnv),
+ popenv([{"TMPDIR", "/"}]),
+ ?assertEqual(
+ "/",
+ os:getenv("TMPDIR")),
+ ?assertEqual(
+ "/",
+ gettempdir()),
+ {ok, Cwd} = file:get_cwd(),
+ popenv(FalseEnv),
+ popenv([{"TMP", Cwd}]),
+ ?assertEqual(
+ normalize_dir(Cwd),
+ gettempdir())
+ after popenv(Env)
+ end,
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiutf8.erl b/1.1.x/src/mochiweb/mochiutf8.erl
new file mode 100644
index 00000000..206e1186
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiutf8.erl
@@ -0,0 +1,316 @@
+%% @copyright 2010 Mochi Media, Inc.
+%% @author Bob Ippolito <bob@mochimedia.com>
+
+%% @doc Algorithm to convert any binary to a valid UTF-8 sequence by ignoring
+%% invalid bytes.
+
+-module(mochiutf8).
+-export([valid_utf8_bytes/1, codepoint_to_bytes/1, bytes_to_codepoints/1]).
+-export([bytes_foldl/3, codepoint_foldl/3, read_codepoint/1, len/1]).
+
+%% External API
+
+-type unichar_low() :: 0..16#d7ff.
+-type unichar_high() :: 16#e000..16#10ffff.
+-type unichar() :: unichar_low() | unichar_high().
+
+-spec codepoint_to_bytes(unichar()) -> binary().
+%% @doc Convert a unicode codepoint to UTF-8 bytes.
+codepoint_to_bytes(C) when (C >= 16#00 andalso C =< 16#7f) ->
+ %% U+0000 - U+007F - 7 bits
+ <<C>>;
+codepoint_to_bytes(C) when (C >= 16#080 andalso C =< 16#07FF) ->
+ %% U+0080 - U+07FF - 11 bits
+ <<0:5, B1:5, B0:6>> = <<C:16>>,
+ <<2#110:3, B1:5,
+ 2#10:2, B0:6>>;
+codepoint_to_bytes(C) when (C >= 16#0800 andalso C =< 16#FFFF) andalso
+ (C < 16#D800 orelse C > 16#DFFF) ->
+ %% U+0800 - U+FFFF - 16 bits (excluding UTC-16 surrogate code points)
+ <<B2:4, B1:6, B0:6>> = <<C:16>>,
+ <<2#1110:4, B2:4,
+ 2#10:2, B1:6,
+ 2#10:2, B0:6>>;
+codepoint_to_bytes(C) when (C >= 16#010000 andalso C =< 16#10FFFF) ->
+ %% U+10000 - U+10FFFF - 21 bits
+ <<0:3, B3:3, B2:6, B1:6, B0:6>> = <<C:24>>,
+ <<2#11110:5, B3:3,
+ 2#10:2, B2:6,
+ 2#10:2, B1:6,
+ 2#10:2, B0:6>>.
+
+-spec codepoints_to_bytes([unichar()]) -> binary().
+%% @doc Convert a list of codepoints to a UTF-8 binary.
+codepoints_to_bytes(L) ->
+ <<<<(codepoint_to_bytes(C))/binary>> || C <- L>>.
+
+-spec read_codepoint(binary()) -> {unichar(), binary(), binary()}.
+read_codepoint(Bin = <<2#0:1, C:7, Rest/binary>>) ->
+ %% U+0000 - U+007F - 7 bits
+ <<B:1/binary, _/binary>> = Bin,
+ {C, B, Rest};
+read_codepoint(Bin = <<2#110:3, B1:5,
+ 2#10:2, B0:6,
+ Rest/binary>>) ->
+ %% U+0080 - U+07FF - 11 bits
+ case <<B1:5, B0:6>> of
+ <<C:11>> when C >= 16#80 ->
+ <<B:2/binary, _/binary>> = Bin,
+ {C, B, Rest}
+ end;
+read_codepoint(Bin = <<2#1110:4, B2:4,
+ 2#10:2, B1:6,
+ 2#10:2, B0:6,
+ Rest/binary>>) ->
+ %% U+0800 - U+FFFF - 16 bits (excluding UTC-16 surrogate code points)
+ case <<B2:4, B1:6, B0:6>> of
+ <<C:16>> when (C >= 16#0800 andalso C =< 16#FFFF) andalso
+ (C < 16#D800 orelse C > 16#DFFF) ->
+ <<B:3/binary, _/binary>> = Bin,
+ {C, B, Rest}
+ end;
+read_codepoint(Bin = <<2#11110:5, B3:3,
+ 2#10:2, B2:6,
+ 2#10:2, B1:6,
+ 2#10:2, B0:6,
+ Rest/binary>>) ->
+ %% U+10000 - U+10FFFF - 21 bits
+ case <<B3:3, B2:6, B1:6, B0:6>> of
+ <<C:21>> when (C >= 16#010000 andalso C =< 16#10FFFF) ->
+ <<B:4/binary, _/binary>> = Bin,
+ {C, B, Rest}
+ end.
+
+-spec codepoint_foldl(fun((unichar(), _) -> _), _, binary()) -> _.
+codepoint_foldl(F, Acc, <<>>) when is_function(F, 2) ->
+ Acc;
+codepoint_foldl(F, Acc, Bin) ->
+ {C, _, Rest} = read_codepoint(Bin),
+ codepoint_foldl(F, F(C, Acc), Rest).
+
+-spec bytes_foldl(fun((binary(), _) -> _), _, binary()) -> _.
+bytes_foldl(F, Acc, <<>>) when is_function(F, 2) ->
+ Acc;
+bytes_foldl(F, Acc, Bin) ->
+ {_, B, Rest} = read_codepoint(Bin),
+ bytes_foldl(F, F(B, Acc), Rest).
+
+-spec bytes_to_codepoints(binary()) -> [unichar()].
+bytes_to_codepoints(B) ->
+ lists:reverse(codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [], B)).
+
+-spec len(binary()) -> non_neg_integer().
+len(<<>>) ->
+ 0;
+len(B) ->
+ {_, _, Rest} = read_codepoint(B),
+ 1 + len(Rest).
+
+-spec valid_utf8_bytes(B::binary()) -> binary().
+%% @doc Return only the bytes in B that represent valid UTF-8. Uses
+%% the following recursive algorithm: skip one byte if B does not
+%% follow UTF-8 syntax (a 1-4 byte encoding of some number),
+%% skip sequence of 2-4 bytes if it represents an overlong encoding
+%% or bad code point (surrogate U+D800 - U+DFFF or > U+10FFFF).
+valid_utf8_bytes(B) when is_binary(B) ->
+ binary_skip_bytes(B, invalid_utf8_indexes(B)).
+
+%% Internal API
+
+-spec binary_skip_bytes(binary(), [non_neg_integer()]) -> binary().
+%% @doc Return B, but skipping the 0-based indexes in L.
+binary_skip_bytes(B, []) ->
+ B;
+binary_skip_bytes(B, L) ->
+ binary_skip_bytes(B, L, 0, []).
+
+%% @private
+-spec binary_skip_bytes(binary(), [non_neg_integer()], non_neg_integer(), iolist()) -> binary().
+binary_skip_bytes(B, [], _N, Acc) ->
+ iolist_to_binary(lists:reverse([B | Acc]));
+binary_skip_bytes(<<_, RestB/binary>>, [N | RestL], N, Acc) ->
+ binary_skip_bytes(RestB, RestL, 1 + N, Acc);
+binary_skip_bytes(<<C, RestB/binary>>, L, N, Acc) ->
+ binary_skip_bytes(RestB, L, 1 + N, [C | Acc]).
+
+-spec invalid_utf8_indexes(binary()) -> [non_neg_integer()].
+%% @doc Return the 0-based indexes in B that are not valid UTF-8.
+invalid_utf8_indexes(B) ->
+ invalid_utf8_indexes(B, 0, []).
+
+%% @private.
+-spec invalid_utf8_indexes(binary(), non_neg_integer(), [non_neg_integer()]) -> [non_neg_integer()].
+invalid_utf8_indexes(<<C, Rest/binary>>, N, Acc) when C < 16#80 ->
+ %% U+0000 - U+007F - 7 bits
+ invalid_utf8_indexes(Rest, 1 + N, Acc);
+invalid_utf8_indexes(<<C1, C2, Rest/binary>>, N, Acc)
+ when C1 band 16#E0 =:= 16#C0,
+ C2 band 16#C0 =:= 16#80 ->
+ %% U+0080 - U+07FF - 11 bits
+ case ((C1 band 16#1F) bsl 6) bor (C2 band 16#3F) of
+ C when C < 16#80 ->
+ %% Overlong encoding.
+ invalid_utf8_indexes(Rest, 2 + N, [1 + N, N | Acc]);
+ _ ->
+ %% Upper bound U+07FF does not need to be checked
+ invalid_utf8_indexes(Rest, 2 + N, Acc)
+ end;
+invalid_utf8_indexes(<<C1, C2, C3, Rest/binary>>, N, Acc)
+ when C1 band 16#F0 =:= 16#E0,
+ C2 band 16#C0 =:= 16#80,
+ C3 band 16#C0 =:= 16#80 ->
+ %% U+0800 - U+FFFF - 16 bits
+ case ((((C1 band 16#0F) bsl 6) bor (C2 band 16#3F)) bsl 6) bor
+ (C3 band 16#3F) of
+ C when (C < 16#800) orelse (C >= 16#D800 andalso C =< 16#DFFF) ->
+ %% Overlong encoding or surrogate.
+ invalid_utf8_indexes(Rest, 3 + N, [2 + N, 1 + N, N | Acc]);
+ _ ->
+ %% Upper bound U+FFFF does not need to be checked
+ invalid_utf8_indexes(Rest, 3 + N, Acc)
+ end;
+invalid_utf8_indexes(<<C1, C2, C3, C4, Rest/binary>>, N, Acc)
+ when C1 band 16#F8 =:= 16#F0,
+ C2 band 16#C0 =:= 16#80,
+ C3 band 16#C0 =:= 16#80,
+ C4 band 16#C0 =:= 16#80 ->
+ %% U+10000 - U+10FFFF - 21 bits
+ case ((((((C1 band 16#0F) bsl 6) bor (C2 band 16#3F)) bsl 6) bor
+ (C3 band 16#3F)) bsl 6) bor (C4 band 16#3F) of
+ C when (C < 16#10000) orelse (C > 16#10FFFF) ->
+ %% Overlong encoding or invalid code point.
+ invalid_utf8_indexes(Rest, 4 + N, [3 + N, 2 + N, 1 + N, N | Acc]);
+ _ ->
+ invalid_utf8_indexes(Rest, 4 + N, Acc)
+ end;
+invalid_utf8_indexes(<<_, Rest/binary>>, N, Acc) ->
+ %% Invalid char
+ invalid_utf8_indexes(Rest, 1 + N, [N | Acc]);
+invalid_utf8_indexes(<<>>, _N, Acc) ->
+ lists:reverse(Acc).
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+binary_skip_bytes_test() ->
+ ?assertEqual(<<"foo">>,
+ binary_skip_bytes(<<"foo">>, [])),
+ ?assertEqual(<<"foobar">>,
+ binary_skip_bytes(<<"foo bar">>, [3])),
+ ?assertEqual(<<"foo">>,
+ binary_skip_bytes(<<"foo bar">>, [3, 4, 5, 6])),
+ ?assertEqual(<<"oo bar">>,
+ binary_skip_bytes(<<"foo bar">>, [0])),
+ ok.
+
+invalid_utf8_indexes_test() ->
+ ?assertEqual(
+ [],
+ invalid_utf8_indexes(<<"unicode snowman for you: ", 226, 152, 131>>)),
+ ?assertEqual(
+ [0],
+ invalid_utf8_indexes(<<128>>)),
+ ?assertEqual(
+ [57,59,60,64,66,67],
+ invalid_utf8_indexes(<<"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; (",
+ 167, 65, 170, 186, 73, 83, 80, 166, 87, 186, 217, 41, 41>>)),
+ ok.
+
+codepoint_to_bytes_test() ->
+ %% U+0000 - U+007F - 7 bits
+ %% U+0080 - U+07FF - 11 bits
+ %% U+0800 - U+FFFF - 16 bits (excluding UTC-16 surrogate code points)
+ %% U+10000 - U+10FFFF - 21 bits
+ ?assertEqual(
+ <<"a">>,
+ codepoint_to_bytes($a)),
+ ?assertEqual(
+ <<16#c2, 16#80>>,
+ codepoint_to_bytes(16#80)),
+ ?assertEqual(
+ <<16#df, 16#bf>>,
+ codepoint_to_bytes(16#07ff)),
+ ?assertEqual(
+ <<16#ef, 16#bf, 16#bf>>,
+ codepoint_to_bytes(16#ffff)),
+ ?assertEqual(
+ <<16#f4, 16#8f, 16#bf, 16#bf>>,
+ codepoint_to_bytes(16#10ffff)),
+ ok.
+
+bytes_foldl_test() ->
+ ?assertEqual(
+ <<"abc">>,
+ bytes_foldl(fun (B, Acc) -> <<Acc/binary, B/binary>> end, <<>>, <<"abc">>)),
+ ?assertEqual(
+ <<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>,
+ bytes_foldl(fun (B, Acc) -> <<Acc/binary, B/binary>> end, <<>>,
+ <<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>)),
+ ok.
+
+bytes_to_codepoints_test() ->
+ ?assertEqual(
+ "abc" ++ [16#2603, 16#4e2d, 16#85, 16#10ffff],
+ bytes_to_codepoints(<<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>)),
+ ok.
+
+codepoint_foldl_test() ->
+ ?assertEqual(
+ "cba",
+ codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [], <<"abc">>)),
+ ?assertEqual(
+ [16#10ffff, 16#85, 16#4e2d, 16#2603 | "cba"],
+ codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [],
+ <<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>)),
+ ok.
+
+len_test() ->
+ ?assertEqual(
+ 29,
+ len(<<"unicode snowman for you: ", 226, 152, 131, 228, 184, 173, 194, 133, 244, 143, 191, 191>>)),
+ ok.
+
+codepoints_to_bytes_test() ->
+ ?assertEqual(
+ iolist_to_binary(lists:map(fun codepoint_to_bytes/1, lists:seq(1, 1000))),
+ codepoints_to_bytes(lists:seq(1, 1000))),
+ ok.
+
+valid_utf8_bytes_test() ->
+ ?assertEqual(
+ <<"invalid U+11ffff: ">>,
+ valid_utf8_bytes(<<"invalid U+11ffff: ", 244, 159, 191, 191>>)),
+ ?assertEqual(
+ <<"U+10ffff: ", 244, 143, 191, 191>>,
+ valid_utf8_bytes(<<"U+10ffff: ", 244, 143, 191, 191>>)),
+ ?assertEqual(
+ <<"overlong 2-byte encoding (a): ">>,
+ valid_utf8_bytes(<<"overlong 2-byte encoding (a): ", 2#11000001, 2#10100001>>)),
+ ?assertEqual(
+ <<"overlong 2-byte encoding (!): ">>,
+ valid_utf8_bytes(<<"overlong 2-byte encoding (!): ", 2#11000000, 2#10100001>>)),
+ ?assertEqual(
+ <<"mu: ", 194, 181>>,
+ valid_utf8_bytes(<<"mu: ", 194, 181>>)),
+ ?assertEqual(
+ <<"bad coding bytes: ">>,
+ valid_utf8_bytes(<<"bad coding bytes: ", 2#10011111, 2#10111111, 2#11111111>>)),
+ ?assertEqual(
+ <<"low surrogate (unpaired): ">>,
+ valid_utf8_bytes(<<"low surrogate (unpaired): ", 237, 176, 128>>)),
+ ?assertEqual(
+ <<"high surrogate (unpaired): ">>,
+ valid_utf8_bytes(<<"high surrogate (unpaired): ", 237, 191, 191>>)),
+ ?assertEqual(
+ <<"unicode snowman for you: ", 226, 152, 131>>,
+ valid_utf8_bytes(<<"unicode snowman for you: ", 226, 152, 131>>)),
+ ?assertEqual(
+ <<"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; (AISPW))">>,
+ valid_utf8_bytes(<<"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; (",
+ 167, 65, 170, 186, 73, 83, 80, 166, 87, 186, 217, 41, 41>>)),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb.app.in b/1.1.x/src/mochiweb/mochiweb.app.in
new file mode 100644
index 00000000..c6a2630b
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb.app.in
@@ -0,0 +1,32 @@
+{application, mochiweb,
+ [{description, "MochiMedia Web Server"},
+ {vsn, "7c2bc2"},
+ {modules, [
+ mochihex,
+ mochijson,
+ mochijson2,
+ mochinum,
+ mochiweb,
+ mochiweb_app,
+ mochiweb_charref,
+ mochiweb_cookies,
+ mochiweb_echo,
+ mochiweb_headers,
+ mochiweb_html,
+ mochiweb_http,
+ mochiweb_multipart,
+ mochiweb_request,
+ mochiweb_response,
+ mochiweb_skel,
+ mochiweb_socket_server,
+ mochiweb_sup,
+ mochiweb_util,
+ reloader,
+ mochifmt,
+ mochifmt_std,
+ mochifmt_records
+ ]},
+ {registered, []},
+ {mod, {mochiweb_app, []}},
+ {env, []},
+ {applications, [kernel, stdlib]}]}.
diff --git a/1.1.x/src/mochiweb/mochiweb.app.src b/1.1.x/src/mochiweb/mochiweb.app.src
new file mode 100644
index 00000000..a1c95aae
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb.app.src
@@ -0,0 +1,9 @@
+%% This is generated from src/mochiweb.app.src
+{application, mochiweb,
+ [{description, "MochiMedia Web Server"},
+ {vsn, "7c2bc2"},
+ {modules, []},
+ {registered, []},
+ {mod, {mochiweb_app, []}},
+ {env, []},
+ {applications, [kernel, stdlib, crypto, inets]}]}.
diff --git a/1.1.x/src/mochiweb/mochiweb.erl b/1.1.x/src/mochiweb/mochiweb.erl
new file mode 100644
index 00000000..3118028b
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb.erl
@@ -0,0 +1,289 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Start and stop the MochiWeb server.
+
+-module(mochiweb).
+-author('bob@mochimedia.com').
+
+-export([start/0, stop/0]).
+-export([new_request/1, new_response/1]).
+-export([all_loaded/0, all_loaded/1, reload/0]).
+
+%% @spec start() -> ok
+%% @doc Start the MochiWeb server.
+start() ->
+ ensure_started(crypto),
+ application:start(mochiweb).
+
+%% @spec stop() -> ok
+%% @doc Stop the MochiWeb server.
+stop() ->
+ Res = application:stop(mochiweb),
+ application:stop(crypto),
+ Res.
+
+reload() ->
+ [c:l(Module) || Module <- all_loaded()].
+
+all_loaded() ->
+ all_loaded(filename:dirname(code:which(?MODULE))).
+
+all_loaded(Base) when is_atom(Base) ->
+ [];
+all_loaded(Base) ->
+ FullBase = Base ++ "/",
+ F = fun ({_Module, Loaded}, Acc) when is_atom(Loaded) ->
+ Acc;
+ ({Module, Loaded}, Acc) ->
+ case lists:prefix(FullBase, Loaded) of
+ true ->
+ [Module | Acc];
+ false ->
+ Acc
+ end
+ end,
+ lists:foldl(F, [], code:all_loaded()).
+
+
+%% @spec new_request({Socket, Request, Headers}) -> MochiWebRequest
+%% @doc Return a mochiweb_request data structure.
+new_request({Socket, {Method, {abs_path, Uri}, Version}, Headers}) ->
+ mochiweb_request:new(Socket,
+ Method,
+ Uri,
+ Version,
+ mochiweb_headers:make(Headers));
+% this case probably doesn't "exist".
+new_request({Socket, {Method, {absoluteURI, _Protocol, _Host, _Port, Uri},
+ Version}, Headers}) ->
+ mochiweb_request:new(Socket,
+ Method,
+ Uri,
+ Version,
+ mochiweb_headers:make(Headers));
+%% Request-URI is "*"
+%% From http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
+new_request({Socket, {Method, '*'=Uri, Version}, Headers}) ->
+ mochiweb_request:new(Socket,
+ Method,
+ Uri,
+ Version,
+ mochiweb_headers:make(Headers)).
+
+%% @spec new_response({Request, integer(), Headers}) -> MochiWebResponse
+%% @doc Return a mochiweb_response data structure.
+new_response({Request, Code, Headers}) ->
+ mochiweb_response:new(Request,
+ Code,
+ mochiweb_headers:make(Headers)).
+
+%% Internal API
+
+ensure_started(App) ->
+ case application:start(App) of
+ ok ->
+ ok;
+ {error, {already_started, App}} ->
+ ok
+ end.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+-record(treq, {path, body= <<>>, xreply= <<>>}).
+
+ssl_cert_opts() ->
+ EbinDir = filename:dirname(code:which(?MODULE)),
+ CertDir = filename:join([EbinDir, "..", "support", "test-materials"]),
+ CertFile = filename:join(CertDir, "test_ssl_cert.pem"),
+ KeyFile = filename:join(CertDir, "test_ssl_key.pem"),
+ [{certfile, CertFile}, {keyfile, KeyFile}].
+
+with_server(Transport, ServerFun, ClientFun) ->
+ ServerOpts0 = [{ip, "127.0.0.1"}, {port, 0}, {loop, ServerFun}],
+ ServerOpts = case Transport of
+ plain ->
+ ServerOpts0;
+ ssl ->
+ ServerOpts0 ++ [{ssl, true}, {ssl_opts, ssl_cert_opts()}]
+ end,
+ {ok, Server} = mochiweb_http:start(ServerOpts),
+ Port = mochiweb_socket_server:get(Server, port),
+ Res = (catch ClientFun(Transport, Port)),
+ mochiweb_http:stop(Server),
+ Res.
+
+request_test() ->
+ R = mochiweb_request:new(z, z, "/foo/bar/baz%20wibble+quux?qs=2", z, []),
+ "/foo/bar/baz wibble quux" = R:get(path),
+ ok.
+
+single_http_GET_test() ->
+ do_GET(plain, 1).
+
+single_https_GET_test() ->
+ do_GET(ssl, 1).
+
+multiple_http_GET_test() ->
+ do_GET(plain, 3).
+
+multiple_https_GET_test() ->
+ do_GET(ssl, 3).
+
+hundred_http_GET_test() ->
+ do_GET(plain, 100).
+
+hundred_https_GET_test() ->
+ do_GET(ssl, 100).
+
+single_128_http_POST_test() ->
+ do_POST(plain, 128, 1).
+
+single_128_https_POST_test() ->
+ do_POST(ssl, 128, 1).
+
+single_2k_http_POST_test() ->
+ do_POST(plain, 2048, 1).
+
+single_2k_https_POST_test() ->
+ do_POST(ssl, 2048, 1).
+
+single_100k_http_POST_test() ->
+ do_POST(plain, 102400, 1).
+
+single_100k_https_POST_test() ->
+ do_POST(ssl, 102400, 1).
+
+multiple_100k_http_POST_test() ->
+ do_POST(plain, 102400, 3).
+
+multiple_100K_https_POST_test() ->
+ do_POST(ssl, 102400, 3).
+
+hundred_128_http_POST_test() ->
+ do_POST(plain, 128, 100).
+
+hundred_128_https_POST_test() ->
+ do_POST(ssl, 128, 100).
+
+do_GET(Transport, Times) ->
+ PathPrefix = "/whatever/",
+ ReplyPrefix = "You requested: ",
+ ServerFun = fun (Req) ->
+ Reply = ReplyPrefix ++ Req:get(path),
+ Req:ok({"text/plain", Reply})
+ end,
+ TestReqs = [begin
+ Path = PathPrefix ++ integer_to_list(N),
+ ExpectedReply = list_to_binary(ReplyPrefix ++ Path),
+ #treq{path=Path, xreply=ExpectedReply}
+ end || N <- lists:seq(1, Times)],
+ ClientFun = new_client_fun('GET', TestReqs),
+ ok = with_server(Transport, ServerFun, ClientFun),
+ ok.
+
+do_POST(Transport, Size, Times) ->
+ ServerFun = fun (Req) ->
+ Body = Req:recv_body(),
+ Headers = [{"Content-Type", "application/octet-stream"}],
+ Req:respond({201, Headers, Body})
+ end,
+ TestReqs = [begin
+ Path = "/stuff/" ++ integer_to_list(N),
+ Body = crypto:rand_bytes(Size),
+ #treq{path=Path, body=Body, xreply=Body}
+ end || N <- lists:seq(1, Times)],
+ ClientFun = new_client_fun('POST', TestReqs),
+ ok = with_server(Transport, ServerFun, ClientFun),
+ ok.
+
+new_client_fun(Method, TestReqs) ->
+ fun (Transport, Port) ->
+ client_request(Transport, Port, Method, TestReqs)
+ end.
+
+client_request(Transport, Port, Method, TestReqs) ->
+ Opts = [binary, {active, false}, {packet, http}],
+ SockFun = case Transport of
+ plain ->
+ {ok, Socket} = gen_tcp:connect("127.0.0.1", Port, Opts),
+ fun (recv) ->
+ gen_tcp:recv(Socket, 0);
+ ({recv, Length}) ->
+ gen_tcp:recv(Socket, Length);
+ ({send, Data}) ->
+ gen_tcp:send(Socket, Data);
+ ({setopts, L}) ->
+ inet:setopts(Socket, L)
+ end;
+ ssl ->
+ {ok, Socket} = ssl:connect("127.0.0.1", Port, [{ssl_imp, new} | Opts]),
+ fun (recv) ->
+ ssl:recv(Socket, 0);
+ ({recv, Length}) ->
+ ssl:recv(Socket, Length);
+ ({send, Data}) ->
+ ssl:send(Socket, Data);
+ ({setopts, L}) ->
+ ssl:setopts(Socket, L)
+ end
+ end,
+ client_request(SockFun, Method, TestReqs).
+
+client_request(SockFun, _Method, []) ->
+ {the_end, {error, closed}} = {the_end, SockFun(recv)},
+ ok;
+client_request(SockFun, Method,
+ [#treq{path=Path, body=Body, xreply=ExReply} | Rest]) ->
+ Request = [atom_to_list(Method), " ", Path, " HTTP/1.1\r\n",
+ client_headers(Body, Rest =:= []),
+ "\r\n",
+ Body],
+ ok = SockFun({send, Request}),
+ case Method of
+ 'GET' ->
+ {ok, {http_response, {1,1}, 200, "OK"}} = SockFun(recv);
+ 'POST' ->
+ {ok, {http_response, {1,1}, 201, "Created"}} = SockFun(recv)
+ end,
+ ok = SockFun({setopts, [{packet, httph}]}),
+ {ok, {http_header, _, 'Server', _, "MochiWeb" ++ _}} = SockFun(recv),
+ {ok, {http_header, _, 'Date', _, _}} = SockFun(recv),
+ {ok, {http_header, _, 'Content-Type', _, _}} = SockFun(recv),
+ {ok, {http_header, _, 'Content-Length', _, ConLenStr}} = SockFun(recv),
+ ContentLength = list_to_integer(ConLenStr),
+ {ok, http_eoh} = SockFun(recv),
+ ok = SockFun({setopts, [{packet, raw}]}),
+ {payload, ExReply} = {payload, drain_reply(SockFun, ContentLength, <<>>)},
+ ok = SockFun({setopts, [{packet, http}]}),
+ client_request(SockFun, Method, Rest).
+
+client_headers(Body, IsLastRequest) ->
+ ["Host: localhost\r\n",
+ case Body of
+ <<>> ->
+ "";
+ _ ->
+ ["Content-Type: application/octet-stream\r\n",
+ "Content-Length: ", integer_to_list(byte_size(Body)), "\r\n"]
+ end,
+ case IsLastRequest of
+ true ->
+ "Connection: close\r\n";
+ false ->
+ ""
+ end].
+
+drain_reply(_SockFun, 0, Acc) ->
+ Acc;
+drain_reply(SockFun, Length, Acc) ->
+ Sz = erlang:min(Length, 1024),
+ {ok, B} = SockFun({recv, Sz}),
+ drain_reply(SockFun, Length - Sz, <<Acc/bytes, B/bytes>>).
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_acceptor.erl b/1.1.x/src/mochiweb/mochiweb_acceptor.erl
new file mode 100644
index 00000000..79d172c3
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_acceptor.erl
@@ -0,0 +1,48 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc MochiWeb acceptor.
+
+-module(mochiweb_acceptor).
+-author('bob@mochimedia.com').
+
+-include("internal.hrl").
+
+-export([start_link/3, init/3]).
+
+start_link(Server, Listen, Loop) ->
+ proc_lib:spawn_link(?MODULE, init, [Server, Listen, Loop]).
+
+init(Server, Listen, Loop) ->
+ T1 = now(),
+ case catch mochiweb_socket:accept(Listen) of
+ {ok, Socket} ->
+ gen_server:cast(Server, {accepted, self(), timer:now_diff(now(), T1)}),
+ call_loop(Loop, Socket);
+ {error, closed} ->
+ exit(normal);
+ {error, timeout} ->
+ exit(normal);
+ {error, esslaccept} ->
+ exit(normal);
+ Other ->
+ error_logger:error_report(
+ [{application, mochiweb},
+ "Accept failed error",
+ lists:flatten(io_lib:format("~p", [Other]))]),
+ exit({error, accept_failed})
+ end.
+
+call_loop({M, F}, Socket) ->
+ M:F(Socket);
+call_loop({M, F, A}, Socket) ->
+ erlang:apply(M, F, [Socket | A]);
+call_loop(Loop, Socket) ->
+ Loop(Socket).
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_app.erl b/1.1.x/src/mochiweb/mochiweb_app.erl
new file mode 100644
index 00000000..5d67787b
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_app.erl
@@ -0,0 +1,27 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Callbacks for the mochiweb application.
+
+-module(mochiweb_app).
+-author('bob@mochimedia.com').
+
+-behaviour(application).
+-export([start/2,stop/1]).
+
+%% @spec start(_Type, _StartArgs) -> ServerRet
+%% @doc application start callback for mochiweb.
+start(_Type, _StartArgs) ->
+ mochiweb_sup:start_link().
+
+%% @spec stop(_State) -> ServerRet
+%% @doc application stop callback for mochiweb.
+stop(_State) ->
+ ok.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_charref.erl b/1.1.x/src/mochiweb/mochiweb_charref.erl
new file mode 100644
index 00000000..99cd5502
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_charref.erl
@@ -0,0 +1,308 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Converts HTML 4 charrefs and entities to codepoints.
+-module(mochiweb_charref).
+-export([charref/1]).
+
+%% External API.
+
+%% @spec charref(S) -> integer() | undefined
+%% @doc Convert a decimal charref, hex charref, or html entity to a unicode
+%% codepoint, or return undefined on failure.
+%% The input should not include an ampersand or semicolon.
+%% charref("#38") = 38, charref("#x26") = 38, charref("amp") = 38.
+charref(B) when is_binary(B) ->
+ charref(binary_to_list(B));
+charref([$#, C | L]) when C =:= $x orelse C =:= $X ->
+ try erlang:list_to_integer(L, 16)
+ catch
+ error:badarg -> undefined
+ end;
+charref([$# | L]) ->
+ try list_to_integer(L)
+ catch
+ error:badarg -> undefined
+ end;
+charref(L) ->
+ entity(L).
+
+%% Internal API.
+
+entity("nbsp") -> 160;
+entity("iexcl") -> 161;
+entity("cent") -> 162;
+entity("pound") -> 163;
+entity("curren") -> 164;
+entity("yen") -> 165;
+entity("brvbar") -> 166;
+entity("sect") -> 167;
+entity("uml") -> 168;
+entity("copy") -> 169;
+entity("ordf") -> 170;
+entity("laquo") -> 171;
+entity("not") -> 172;
+entity("shy") -> 173;
+entity("reg") -> 174;
+entity("macr") -> 175;
+entity("deg") -> 176;
+entity("plusmn") -> 177;
+entity("sup2") -> 178;
+entity("sup3") -> 179;
+entity("acute") -> 180;
+entity("micro") -> 181;
+entity("para") -> 182;
+entity("middot") -> 183;
+entity("cedil") -> 184;
+entity("sup1") -> 185;
+entity("ordm") -> 186;
+entity("raquo") -> 187;
+entity("frac14") -> 188;
+entity("frac12") -> 189;
+entity("frac34") -> 190;
+entity("iquest") -> 191;
+entity("Agrave") -> 192;
+entity("Aacute") -> 193;
+entity("Acirc") -> 194;
+entity("Atilde") -> 195;
+entity("Auml") -> 196;
+entity("Aring") -> 197;
+entity("AElig") -> 198;
+entity("Ccedil") -> 199;
+entity("Egrave") -> 200;
+entity("Eacute") -> 201;
+entity("Ecirc") -> 202;
+entity("Euml") -> 203;
+entity("Igrave") -> 204;
+entity("Iacute") -> 205;
+entity("Icirc") -> 206;
+entity("Iuml") -> 207;
+entity("ETH") -> 208;
+entity("Ntilde") -> 209;
+entity("Ograve") -> 210;
+entity("Oacute") -> 211;
+entity("Ocirc") -> 212;
+entity("Otilde") -> 213;
+entity("Ouml") -> 214;
+entity("times") -> 215;
+entity("Oslash") -> 216;
+entity("Ugrave") -> 217;
+entity("Uacute") -> 218;
+entity("Ucirc") -> 219;
+entity("Uuml") -> 220;
+entity("Yacute") -> 221;
+entity("THORN") -> 222;
+entity("szlig") -> 223;
+entity("agrave") -> 224;
+entity("aacute") -> 225;
+entity("acirc") -> 226;
+entity("atilde") -> 227;
+entity("auml") -> 228;
+entity("aring") -> 229;
+entity("aelig") -> 230;
+entity("ccedil") -> 231;
+entity("egrave") -> 232;
+entity("eacute") -> 233;
+entity("ecirc") -> 234;
+entity("euml") -> 235;
+entity("igrave") -> 236;
+entity("iacute") -> 237;
+entity("icirc") -> 238;
+entity("iuml") -> 239;
+entity("eth") -> 240;
+entity("ntilde") -> 241;
+entity("ograve") -> 242;
+entity("oacute") -> 243;
+entity("ocirc") -> 244;
+entity("otilde") -> 245;
+entity("ouml") -> 246;
+entity("divide") -> 247;
+entity("oslash") -> 248;
+entity("ugrave") -> 249;
+entity("uacute") -> 250;
+entity("ucirc") -> 251;
+entity("uuml") -> 252;
+entity("yacute") -> 253;
+entity("thorn") -> 254;
+entity("yuml") -> 255;
+entity("fnof") -> 402;
+entity("Alpha") -> 913;
+entity("Beta") -> 914;
+entity("Gamma") -> 915;
+entity("Delta") -> 916;
+entity("Epsilon") -> 917;
+entity("Zeta") -> 918;
+entity("Eta") -> 919;
+entity("Theta") -> 920;
+entity("Iota") -> 921;
+entity("Kappa") -> 922;
+entity("Lambda") -> 923;
+entity("Mu") -> 924;
+entity("Nu") -> 925;
+entity("Xi") -> 926;
+entity("Omicron") -> 927;
+entity("Pi") -> 928;
+entity("Rho") -> 929;
+entity("Sigma") -> 931;
+entity("Tau") -> 932;
+entity("Upsilon") -> 933;
+entity("Phi") -> 934;
+entity("Chi") -> 935;
+entity("Psi") -> 936;
+entity("Omega") -> 937;
+entity("alpha") -> 945;
+entity("beta") -> 946;
+entity("gamma") -> 947;
+entity("delta") -> 948;
+entity("epsilon") -> 949;
+entity("zeta") -> 950;
+entity("eta") -> 951;
+entity("theta") -> 952;
+entity("iota") -> 953;
+entity("kappa") -> 954;
+entity("lambda") -> 955;
+entity("mu") -> 956;
+entity("nu") -> 957;
+entity("xi") -> 958;
+entity("omicron") -> 959;
+entity("pi") -> 960;
+entity("rho") -> 961;
+entity("sigmaf") -> 962;
+entity("sigma") -> 963;
+entity("tau") -> 964;
+entity("upsilon") -> 965;
+entity("phi") -> 966;
+entity("chi") -> 967;
+entity("psi") -> 968;
+entity("omega") -> 969;
+entity("thetasym") -> 977;
+entity("upsih") -> 978;
+entity("piv") -> 982;
+entity("bull") -> 8226;
+entity("hellip") -> 8230;
+entity("prime") -> 8242;
+entity("Prime") -> 8243;
+entity("oline") -> 8254;
+entity("frasl") -> 8260;
+entity("weierp") -> 8472;
+entity("image") -> 8465;
+entity("real") -> 8476;
+entity("trade") -> 8482;
+entity("alefsym") -> 8501;
+entity("larr") -> 8592;
+entity("uarr") -> 8593;
+entity("rarr") -> 8594;
+entity("darr") -> 8595;
+entity("harr") -> 8596;
+entity("crarr") -> 8629;
+entity("lArr") -> 8656;
+entity("uArr") -> 8657;
+entity("rArr") -> 8658;
+entity("dArr") -> 8659;
+entity("hArr") -> 8660;
+entity("forall") -> 8704;
+entity("part") -> 8706;
+entity("exist") -> 8707;
+entity("empty") -> 8709;
+entity("nabla") -> 8711;
+entity("isin") -> 8712;
+entity("notin") -> 8713;
+entity("ni") -> 8715;
+entity("prod") -> 8719;
+entity("sum") -> 8721;
+entity("minus") -> 8722;
+entity("lowast") -> 8727;
+entity("radic") -> 8730;
+entity("prop") -> 8733;
+entity("infin") -> 8734;
+entity("ang") -> 8736;
+entity("and") -> 8743;
+entity("or") -> 8744;
+entity("cap") -> 8745;
+entity("cup") -> 8746;
+entity("int") -> 8747;
+entity("there4") -> 8756;
+entity("sim") -> 8764;
+entity("cong") -> 8773;
+entity("asymp") -> 8776;
+entity("ne") -> 8800;
+entity("equiv") -> 8801;
+entity("le") -> 8804;
+entity("ge") -> 8805;
+entity("sub") -> 8834;
+entity("sup") -> 8835;
+entity("nsub") -> 8836;
+entity("sube") -> 8838;
+entity("supe") -> 8839;
+entity("oplus") -> 8853;
+entity("otimes") -> 8855;
+entity("perp") -> 8869;
+entity("sdot") -> 8901;
+entity("lceil") -> 8968;
+entity("rceil") -> 8969;
+entity("lfloor") -> 8970;
+entity("rfloor") -> 8971;
+entity("lang") -> 9001;
+entity("rang") -> 9002;
+entity("loz") -> 9674;
+entity("spades") -> 9824;
+entity("clubs") -> 9827;
+entity("hearts") -> 9829;
+entity("diams") -> 9830;
+entity("quot") -> 34;
+entity("amp") -> 38;
+entity("lt") -> 60;
+entity("gt") -> 62;
+entity("OElig") -> 338;
+entity("oelig") -> 339;
+entity("Scaron") -> 352;
+entity("scaron") -> 353;
+entity("Yuml") -> 376;
+entity("circ") -> 710;
+entity("tilde") -> 732;
+entity("ensp") -> 8194;
+entity("emsp") -> 8195;
+entity("thinsp") -> 8201;
+entity("zwnj") -> 8204;
+entity("zwj") -> 8205;
+entity("lrm") -> 8206;
+entity("rlm") -> 8207;
+entity("ndash") -> 8211;
+entity("mdash") -> 8212;
+entity("lsquo") -> 8216;
+entity("rsquo") -> 8217;
+entity("sbquo") -> 8218;
+entity("ldquo") -> 8220;
+entity("rdquo") -> 8221;
+entity("bdquo") -> 8222;
+entity("dagger") -> 8224;
+entity("Dagger") -> 8225;
+entity("permil") -> 8240;
+entity("lsaquo") -> 8249;
+entity("rsaquo") -> 8250;
+entity("euro") -> 8364;
+entity(_) -> undefined.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+exhaustive_entity_test() ->
+ T = mochiweb_cover:clause_lookup_table(?MODULE, entity),
+ [?assertEqual(V, entity(K)) || {K, V} <- T].
+
+charref_test() ->
+ 1234 = charref("#1234"),
+ 255 = charref("#xfF"),
+ 255 = charref(<<"#XFf">>),
+ 38 = charref("amp"),
+ 38 = charref(<<"amp">>),
+ undefined = charref("not_an_entity"),
+ undefined = charref("#not_an_entity"),
+ undefined = charref("#xnot_an_entity"),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_cookies.erl b/1.1.x/src/mochiweb/mochiweb_cookies.erl
new file mode 100644
index 00000000..c090b714
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_cookies.erl
@@ -0,0 +1,309 @@
+%% @author Emad El-Haraty <emad@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc HTTP Cookie parsing and generating (RFC 2109, RFC 2965).
+
+-module(mochiweb_cookies).
+-export([parse_cookie/1, cookie/3, cookie/2]).
+
+-define(QUOTE, $\").
+
+-define(IS_WHITESPACE(C),
+ (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
+
+%% RFC 2616 separators (called tspecials in RFC 2068)
+-define(IS_SEPARATOR(C),
+ (C < 32 orelse
+ C =:= $\s orelse C =:= $\t orelse
+ C =:= $( orelse C =:= $) orelse C =:= $< orelse C =:= $> orelse
+ C =:= $@ orelse C =:= $, orelse C =:= $; orelse C =:= $: orelse
+ C =:= $\\ orelse C =:= $\" orelse C =:= $/ orelse
+ C =:= $[ orelse C =:= $] orelse C =:= $? orelse C =:= $= orelse
+ C =:= ${ orelse C =:= $})).
+
+%% @type proplist() = [{Key::string(), Value::string()}].
+%% @type header() = {Name::string(), Value::string()}.
+
+%% @spec cookie(Key::string(), Value::string()) -> header()
+%% @doc Short-hand for <code>cookie(Key, Value, [])</code>.
+cookie(Key, Value) ->
+ cookie(Key, Value, []).
+
+%% @spec cookie(Key::string(), Value::string(), Options::[Option]) -> header()
+%% where Option = {max_age, integer()} | {local_time, {date(), time()}}
+%% | {domain, string()} | {path, string()}
+%% | {secure, true | false} | {http_only, true | false}
+%%
+%% @doc Generate a Set-Cookie header field tuple.
+cookie(Key, Value, Options) ->
+ Cookie = [any_to_list(Key), "=", quote(Value), "; Version=1"],
+ %% Set-Cookie:
+ %% Comment, Domain, Max-Age, Path, Secure, Version
+ %% Set-Cookie2:
+ %% Comment, CommentURL, Discard, Domain, Max-Age, Path, Port, Secure,
+ %% Version
+ ExpiresPart =
+ case proplists:get_value(max_age, Options) of
+ undefined ->
+ "";
+ RawAge ->
+ When = case proplists:get_value(local_time, Options) of
+ undefined ->
+ calendar:local_time();
+ LocalTime ->
+ LocalTime
+ end,
+ Age = case RawAge < 0 of
+ true ->
+ 0;
+ false ->
+ RawAge
+ end,
+ ["; Expires=", age_to_cookie_date(Age, When),
+ "; Max-Age=", quote(Age)]
+ end,
+ SecurePart =
+ case proplists:get_value(secure, Options) of
+ true ->
+ "; Secure";
+ _ ->
+ ""
+ end,
+ DomainPart =
+ case proplists:get_value(domain, Options) of
+ undefined ->
+ "";
+ Domain ->
+ ["; Domain=", quote(Domain)]
+ end,
+ PathPart =
+ case proplists:get_value(path, Options) of
+ undefined ->
+ "";
+ Path ->
+ ["; Path=", quote(Path)]
+ end,
+ HttpOnlyPart =
+ case proplists:get_value(http_only, Options) of
+ true ->
+ "; HttpOnly";
+ _ ->
+ ""
+ end,
+ CookieParts = [Cookie, ExpiresPart, SecurePart, DomainPart, PathPart, HttpOnlyPart],
+ {"Set-Cookie", lists:flatten(CookieParts)}.
+
+
+%% Every major browser incorrectly handles quoted strings in a
+%% different and (worse) incompatible manner. Instead of wasting time
+%% writing redundant code for each browser, we restrict cookies to
+%% only contain characters that browsers handle compatibly.
+%%
+%% By replacing the definition of quote with this, we generate
+%% RFC-compliant cookies:
+%%
+%% quote(V) ->
+%% Fun = fun(?QUOTE, Acc) -> [$\\, ?QUOTE | Acc];
+%% (Ch, Acc) -> [Ch | Acc]
+%% end,
+%% [?QUOTE | lists:foldr(Fun, [?QUOTE], V)].
+
+%% Convert to a string and raise an error if quoting is required.
+quote(V0) ->
+ V = any_to_list(V0),
+ lists:all(fun(Ch) -> Ch =:= $/ orelse not ?IS_SEPARATOR(Ch) end, V)
+ orelse erlang:error({cookie_quoting_required, V}),
+ V.
+
+add_seconds(Secs, LocalTime) ->
+ Greg = calendar:datetime_to_gregorian_seconds(LocalTime),
+ calendar:gregorian_seconds_to_datetime(Greg + Secs).
+
+age_to_cookie_date(Age, LocalTime) ->
+ httpd_util:rfc1123_date(add_seconds(Age, LocalTime)).
+
+%% @spec parse_cookie(string()) -> [{K::string(), V::string()}]
+%% @doc Parse the contents of a Cookie header field, ignoring cookie
+%% attributes, and return a simple property list.
+parse_cookie("") ->
+ [];
+parse_cookie(Cookie) ->
+ parse_cookie(Cookie, []).
+
+%% Internal API
+
+parse_cookie([], Acc) ->
+ lists:reverse(Acc);
+parse_cookie(String, Acc) ->
+ {{Token, Value}, Rest} = read_pair(String),
+ Acc1 = case Token of
+ "" ->
+ Acc;
+ "$" ++ _ ->
+ Acc;
+ _ ->
+ [{Token, Value} | Acc]
+ end,
+ parse_cookie(Rest, Acc1).
+
+read_pair(String) ->
+ {Token, Rest} = read_token(skip_whitespace(String)),
+ {Value, Rest1} = read_value(skip_whitespace(Rest)),
+ {{Token, Value}, skip_past_separator(Rest1)}.
+
+read_value([$= | Value]) ->
+ Value1 = skip_whitespace(Value),
+ case Value1 of
+ [?QUOTE | _] ->
+ read_quoted(Value1);
+ _ ->
+ read_token(Value1)
+ end;
+read_value(String) ->
+ {"", String}.
+
+read_quoted([?QUOTE | String]) ->
+ read_quoted(String, []).
+
+read_quoted([], Acc) ->
+ {lists:reverse(Acc), []};
+read_quoted([?QUOTE | Rest], Acc) ->
+ {lists:reverse(Acc), Rest};
+read_quoted([$\\, Any | Rest], Acc) ->
+ read_quoted(Rest, [Any | Acc]);
+read_quoted([C | Rest], Acc) ->
+ read_quoted(Rest, [C | Acc]).
+
+skip_whitespace(String) ->
+ F = fun (C) -> ?IS_WHITESPACE(C) end,
+ lists:dropwhile(F, String).
+
+read_token(String) ->
+ F = fun (C) -> not ?IS_SEPARATOR(C) end,
+ lists:splitwith(F, String).
+
+skip_past_separator([]) ->
+ [];
+skip_past_separator([$; | Rest]) ->
+ Rest;
+skip_past_separator([$, | Rest]) ->
+ Rest;
+skip_past_separator([_ | Rest]) ->
+ skip_past_separator(Rest).
+
+any_to_list(V) when is_list(V) ->
+ V;
+any_to_list(V) when is_atom(V) ->
+ atom_to_list(V);
+any_to_list(V) when is_binary(V) ->
+ binary_to_list(V);
+any_to_list(V) when is_integer(V) ->
+ integer_to_list(V).
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+quote_test() ->
+ %% ?assertError eunit macro is not compatible with coverage module
+ try quote(":wq")
+ catch error:{cookie_quoting_required, ":wq"} -> ok
+ end,
+ ?assertEqual(
+ "foo",
+ quote(foo)),
+ ok.
+
+parse_cookie_test() ->
+ %% RFC example
+ C1 = "$Version=\"1\"; Customer=\"WILE_E_COYOTE\"; $Path=\"/acme\";
+ Part_Number=\"Rocket_Launcher_0001\"; $Path=\"/acme\";
+ Shipping=\"FedEx\"; $Path=\"/acme\"",
+ ?assertEqual(
+ [{"Customer","WILE_E_COYOTE"},
+ {"Part_Number","Rocket_Launcher_0001"},
+ {"Shipping","FedEx"}],
+ parse_cookie(C1)),
+ %% Potential edge cases
+ ?assertEqual(
+ [{"foo", "x"}],
+ parse_cookie("foo=\"\\x\"")),
+ ?assertEqual(
+ [],
+ parse_cookie("=")),
+ ?assertEqual(
+ [{"foo", ""}, {"bar", ""}],
+ parse_cookie(" foo ; bar ")),
+ ?assertEqual(
+ [{"foo", ""}, {"bar", ""}],
+ parse_cookie("foo=;bar=")),
+ ?assertEqual(
+ [{"foo", "\";"}, {"bar", ""}],
+ parse_cookie("foo = \"\\\";\";bar ")),
+ ?assertEqual(
+ [{"foo", "\";bar"}],
+ parse_cookie("foo=\"\\\";bar")),
+ ?assertEqual(
+ [],
+ parse_cookie([])),
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble"}],
+ parse_cookie("foo=bar , baz=wibble ")),
+ ok.
+
+domain_test() ->
+ ?assertEqual(
+ {"Set-Cookie",
+ "Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Domain=acme.com; "
+ "HttpOnly"},
+ cookie("Customer", "WILE_E_COYOTE",
+ [{http_only, true}, {domain, "acme.com"}])),
+ ok.
+
+local_time_test() ->
+ {"Set-Cookie", S} = cookie("Customer", "WILE_E_COYOTE",
+ [{max_age, 111}, {secure, true}]),
+ ?assertMatch(
+ ["Customer=WILE_E_COYOTE",
+ " Version=1",
+ " Expires=" ++ _,
+ " Max-Age=111",
+ " Secure"],
+ string:tokens(S, ";")),
+ ok.
+
+cookie_test() ->
+ C1 = {"Set-Cookie",
+ "Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Path=/acme"},
+ C1 = cookie("Customer", "WILE_E_COYOTE", [{path, "/acme"}]),
+ C1 = cookie("Customer", "WILE_E_COYOTE",
+ [{path, "/acme"}, {badoption, "negatory"}]),
+ C1 = cookie('Customer', 'WILE_E_COYOTE', [{path, '/acme'}]),
+ C1 = cookie(<<"Customer">>, <<"WILE_E_COYOTE">>, [{path, <<"/acme">>}]),
+
+ {"Set-Cookie","=NoKey; Version=1"} = cookie("", "NoKey", []),
+ {"Set-Cookie","=NoKey; Version=1"} = cookie("", "NoKey"),
+ LocalTime = calendar:universal_time_to_local_time({{2007, 5, 15}, {13, 45, 33}}),
+ C2 = {"Set-Cookie",
+ "Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Expires=Tue, 15 May 2007 13:45:33 GMT; "
+ "Max-Age=0"},
+ C2 = cookie("Customer", "WILE_E_COYOTE",
+ [{max_age, -111}, {local_time, LocalTime}]),
+ C3 = {"Set-Cookie",
+ "Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Expires=Wed, 16 May 2007 13:45:50 GMT; "
+ "Max-Age=86417"},
+ C3 = cookie("Customer", "WILE_E_COYOTE",
+ [{max_age, 86417}, {local_time, LocalTime}]),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_cover.erl b/1.1.x/src/mochiweb/mochiweb_cover.erl
new file mode 100644
index 00000000..6a14ef51
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_cover.erl
@@ -0,0 +1,75 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc Workarounds for various cover deficiencies.
+-module(mochiweb_cover).
+-export([get_beam/1, get_abstract_code/1,
+ get_clauses/2, clause_lookup_table/1]).
+-export([clause_lookup_table/2]).
+
+%% Internal
+
+get_beam(Module) ->
+ {Module, Beam, _Path} = code:get_object_code(Module),
+ Beam.
+
+get_abstract_code(Beam) ->
+ {ok, {_Module,
+ [{abstract_code,
+ {raw_abstract_v1, L}}]}} = beam_lib:chunks(Beam, [abstract_code]),
+ L.
+
+get_clauses(Function, Code) ->
+ [L] = [Clauses || {function, _, FName, _, Clauses}
+ <- Code, FName =:= Function],
+ L.
+
+clause_lookup_table(Module, Function) ->
+ clause_lookup_table(
+ get_clauses(Function,
+ get_abstract_code(get_beam(Module)))).
+
+clause_lookup_table(Clauses) ->
+ lists:foldr(fun clause_fold/2, [], Clauses).
+
+clause_fold({clause, _,
+ [InTerm],
+ _Guards=[],
+ [OutTerm]},
+ Acc) ->
+ try [{erl_parse:normalise(InTerm), erl_parse:normalise(OutTerm)} | Acc]
+ catch error:_ -> Acc
+ end;
+clause_fold(_, Acc) ->
+ Acc.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+foo_table(a) -> b;
+foo_table("a") -> <<"b">>;
+foo_table(123) -> {4, 3, 2};
+foo_table([list]) -> [];
+foo_table([list1, list2]) -> [list1, list2, list3];
+foo_table(ignored) -> some, code, ignored;
+foo_table(Var) -> Var.
+
+foo_table_test() ->
+ T = clause_lookup_table(?MODULE, foo_table),
+ [?assertEqual(V, foo_table(K)) || {K, V} <- T].
+
+clause_lookup_table_test() ->
+ ?assertEqual(b, foo_table(a)),
+ ?assertEqual(ignored, foo_table(ignored)),
+ ?assertEqual('Var', foo_table('Var')),
+ ?assertEqual(
+ [{a, b},
+ {"a", <<"b">>},
+ {123, {4, 3, 2}},
+ {[list], []},
+ {[list1, list2], [list1, list2, list3]}],
+ clause_lookup_table(?MODULE, foo_table)).
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_echo.erl b/1.1.x/src/mochiweb/mochiweb_echo.erl
new file mode 100644
index 00000000..6f7872b9
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_echo.erl
@@ -0,0 +1,38 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Simple and stupid echo server to demo mochiweb_socket_server.
+
+-module(mochiweb_echo).
+-author('bob@mochimedia.com').
+-export([start/0, stop/0, loop/1]).
+
+stop() ->
+ mochiweb_socket_server:stop(?MODULE).
+
+start() ->
+ mochiweb_socket_server:start([{name, ?MODULE},
+ {port, 6789},
+ {ip, "127.0.0.1"},
+ {max, 1},
+ {loop, {?MODULE, loop}}]).
+
+loop(Socket) ->
+ case mochiweb_socket:recv(Socket, 0, 30000) of
+ {ok, Data} ->
+ case mochiweb_socket:send(Socket, Data) of
+ ok ->
+ loop(Socket);
+ _ ->
+ exit(normal)
+ end;
+ _Other ->
+ exit(normal)
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_headers.erl b/1.1.x/src/mochiweb/mochiweb_headers.erl
new file mode 100644
index 00000000..4fce9838
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_headers.erl
@@ -0,0 +1,299 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Case preserving (but case insensitive) HTTP Header dictionary.
+
+-module(mochiweb_headers).
+-author('bob@mochimedia.com').
+-export([empty/0, from_list/1, insert/3, enter/3, get_value/2, lookup/2]).
+-export([delete_any/2, get_primary_value/2]).
+-export([default/3, enter_from_list/2, default_from_list/2]).
+-export([to_list/1, make/1]).
+-export([from_binary/1]).
+
+%% @type headers().
+%% @type key() = atom() | binary() | string().
+%% @type value() = atom() | binary() | string() | integer().
+
+%% @spec empty() -> headers()
+%% @doc Create an empty headers structure.
+empty() ->
+ gb_trees:empty().
+
+%% @spec make(headers() | [{key(), value()}]) -> headers()
+%% @doc Construct a headers() from the given list.
+make(L) when is_list(L) ->
+ from_list(L);
+%% assume a tuple is already mochiweb_headers.
+make(T) when is_tuple(T) ->
+ T.
+
+%% @spec from_binary(iolist()) -> headers()
+%% @doc Transforms a raw HTTP header into a mochiweb headers structure.
+%%
+%% The given raw HTTP header can be one of the following:
+%%
+%% 1) A string or a binary representing a full HTTP header ending with
+%% double CRLF.
+%% Examples:
+%% ```
+%% "Content-Length: 47\r\nContent-Type: text/plain\r\n\r\n"
+%% <<"Content-Length: 47\r\nContent-Type: text/plain\r\n\r\n">>'''
+%%
+%% 2) A list of binaries or strings where each element represents a raw
+%% HTTP header line ending with a single CRLF.
+%% Examples:
+%% ```
+%% [<<"Content-Length: 47\r\n">>, <<"Content-Type: text/plain\r\n">>]
+%% ["Content-Length: 47\r\n", "Content-Type: text/plain\r\n"]
+%% ["Content-Length: 47\r\n", <<"Content-Type: text/plain\r\n">>]'''
+%%
+from_binary(RawHttpHeader) when is_binary(RawHttpHeader) ->
+ from_binary(RawHttpHeader, []);
+from_binary(RawHttpHeaderList) ->
+ from_binary(list_to_binary([RawHttpHeaderList, "\r\n"])).
+
+from_binary(RawHttpHeader, Acc) ->
+ case erlang:decode_packet(httph, RawHttpHeader, []) of
+ {ok, {http_header, _, H, _, V}, Rest} ->
+ from_binary(Rest, [{H, V} | Acc]);
+ _ ->
+ make(Acc)
+ end.
+
+%% @spec from_list([{key(), value()}]) -> headers()
+%% @doc Construct a headers() from the given list.
+from_list(List) ->
+ lists:foldl(fun ({K, V}, T) -> insert(K, V, T) end, empty(), List).
+
+%% @spec enter_from_list([{key(), value()}], headers()) -> headers()
+%% @doc Insert pairs into the headers, replace any values for existing keys.
+enter_from_list(List, T) ->
+ lists:foldl(fun ({K, V}, T1) -> enter(K, V, T1) end, T, List).
+
+%% @spec default_from_list([{key(), value()}], headers()) -> headers()
+%% @doc Insert pairs into the headers for keys that do not already exist.
+default_from_list(List, T) ->
+ lists:foldl(fun ({K, V}, T1) -> default(K, V, T1) end, T, List).
+
+%% @spec to_list(headers()) -> [{key(), string()}]
+%% @doc Return the contents of the headers. The keys will be the exact key
+%% that was first inserted (e.g. may be an atom or binary, case is
+%% preserved).
+to_list(T) ->
+ F = fun ({K, {array, L}}, Acc) ->
+ L1 = lists:reverse(L),
+ lists:foldl(fun (V, Acc1) -> [{K, V} | Acc1] end, Acc, L1);
+ (Pair, Acc) ->
+ [Pair | Acc]
+ end,
+ lists:reverse(lists:foldl(F, [], gb_trees:values(T))).
+
+%% @spec get_value(key(), headers()) -> string() | undefined
+%% @doc Return the value of the given header using a case insensitive search.
+%% undefined will be returned for keys that are not present.
+get_value(K, T) ->
+ case lookup(K, T) of
+ {value, {_, V}} ->
+ expand(V);
+ none ->
+ undefined
+ end.
+
+%% @spec get_primary_value(key(), headers()) -> string() | undefined
+%% @doc Return the value of the given header up to the first semicolon using
+%% a case insensitive search. undefined will be returned for keys
+%% that are not present.
+get_primary_value(K, T) ->
+ case get_value(K, T) of
+ undefined ->
+ undefined;
+ V ->
+ lists:takewhile(fun (C) -> C =/= $; end, V)
+ end.
+
+%% @spec lookup(key(), headers()) -> {value, {key(), string()}} | none
+%% @doc Return the case preserved key and value for the given header using
+%% a case insensitive search. none will be returned for keys that are
+%% not present.
+lookup(K, T) ->
+ case gb_trees:lookup(normalize(K), T) of
+ {value, {K0, V}} ->
+ {value, {K0, expand(V)}};
+ none ->
+ none
+ end.
+
+%% @spec default(key(), value(), headers()) -> headers()
+%% @doc Insert the pair into the headers if it does not already exist.
+default(K, V, T) ->
+ K1 = normalize(K),
+ V1 = any_to_list(V),
+ try gb_trees:insert(K1, {K, V1}, T)
+ catch
+ error:{key_exists, _} ->
+ T
+ end.
+
+%% @spec enter(key(), value(), headers()) -> headers()
+%% @doc Insert the pair into the headers, replacing any pre-existing key.
+enter(K, V, T) ->
+ K1 = normalize(K),
+ V1 = any_to_list(V),
+ gb_trees:enter(K1, {K, V1}, T).
+
+%% @spec insert(key(), value(), headers()) -> headers()
+%% @doc Insert the pair into the headers, merging with any pre-existing key.
+%% A merge is done with Value = V0 ++ ", " ++ V1.
+insert(K, V, T) ->
+ K1 = normalize(K),
+ V1 = any_to_list(V),
+ try gb_trees:insert(K1, {K, V1}, T)
+ catch
+ error:{key_exists, _} ->
+ {K0, V0} = gb_trees:get(K1, T),
+ V2 = merge(K1, V1, V0),
+ gb_trees:update(K1, {K0, V2}, T)
+ end.
+
+%% @spec delete_any(key(), headers()) -> headers()
+%% @doc Delete the header corresponding to key if it is present.
+delete_any(K, T) ->
+ K1 = normalize(K),
+ gb_trees:delete_any(K1, T).
+
+%% Internal API
+
+expand({array, L}) ->
+ mochiweb_util:join(lists:reverse(L), ", ");
+expand(V) ->
+ V.
+
+merge("set-cookie", V1, {array, L}) ->
+ {array, [V1 | L]};
+merge("set-cookie", V1, V0) ->
+ {array, [V1, V0]};
+merge(_, V1, V0) ->
+ V0 ++ ", " ++ V1.
+
+normalize(K) when is_list(K) ->
+ string:to_lower(K);
+normalize(K) when is_atom(K) ->
+ normalize(atom_to_list(K));
+normalize(K) when is_binary(K) ->
+ normalize(binary_to_list(K)).
+
+any_to_list(V) when is_list(V) ->
+ V;
+any_to_list(V) when is_atom(V) ->
+ atom_to_list(V);
+any_to_list(V) when is_binary(V) ->
+ binary_to_list(V);
+any_to_list(V) when is_integer(V) ->
+ integer_to_list(V).
+
+%%
+%% Tests.
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+make_test() ->
+ Identity = make([{hdr, foo}]),
+ ?assertEqual(
+ Identity,
+ make(Identity)).
+
+enter_from_list_test() ->
+ H = make([{hdr, foo}]),
+ ?assertEqual(
+ [{baz, "wibble"}, {hdr, "foo"}],
+ to_list(enter_from_list([{baz, wibble}], H))),
+ ?assertEqual(
+ [{hdr, "bar"}],
+ to_list(enter_from_list([{hdr, bar}], H))),
+ ok.
+
+default_from_list_test() ->
+ H = make([{hdr, foo}]),
+ ?assertEqual(
+ [{baz, "wibble"}, {hdr, "foo"}],
+ to_list(default_from_list([{baz, wibble}], H))),
+ ?assertEqual(
+ [{hdr, "foo"}],
+ to_list(default_from_list([{hdr, bar}], H))),
+ ok.
+
+get_primary_value_test() ->
+ H = make([{hdr, foo}, {baz, <<"wibble;taco">>}]),
+ ?assertEqual(
+ "foo",
+ get_primary_value(hdr, H)),
+ ?assertEqual(
+ undefined,
+ get_primary_value(bar, H)),
+ ?assertEqual(
+ "wibble",
+ get_primary_value(<<"baz">>, H)),
+ ok.
+
+set_cookie_test() ->
+ H = make([{"set-cookie", foo}, {"set-cookie", bar}, {"set-cookie", baz}]),
+ ?assertEqual(
+ [{"set-cookie", "foo"}, {"set-cookie", "bar"}, {"set-cookie", "baz"}],
+ to_list(H)),
+ ok.
+
+headers_test() ->
+ H = ?MODULE:make([{hdr, foo}, {"Hdr", "bar"}, {'Hdr', 2}]),
+ [{hdr, "foo, bar, 2"}] = ?MODULE:to_list(H),
+ H1 = ?MODULE:insert(taco, grande, H),
+ [{hdr, "foo, bar, 2"}, {taco, "grande"}] = ?MODULE:to_list(H1),
+ H2 = ?MODULE:make([{"Set-Cookie", "foo"}]),
+ [{"Set-Cookie", "foo"}] = ?MODULE:to_list(H2),
+ H3 = ?MODULE:insert("Set-Cookie", "bar", H2),
+ [{"Set-Cookie", "foo"}, {"Set-Cookie", "bar"}] = ?MODULE:to_list(H3),
+ "foo, bar" = ?MODULE:get_value("set-cookie", H3),
+ {value, {"Set-Cookie", "foo, bar"}} = ?MODULE:lookup("set-cookie", H3),
+ undefined = ?MODULE:get_value("shibby", H3),
+ none = ?MODULE:lookup("shibby", H3),
+ H4 = ?MODULE:insert("content-type",
+ "application/x-www-form-urlencoded; charset=utf8",
+ H3),
+ "application/x-www-form-urlencoded" = ?MODULE:get_primary_value(
+ "content-type", H4),
+ H4 = ?MODULE:delete_any("nonexistent-header", H4),
+ H3 = ?MODULE:delete_any("content-type", H4),
+ HB = <<"Content-Length: 47\r\nContent-Type: text/plain\r\n\r\n">>,
+ H_HB = ?MODULE:from_binary(HB),
+ H_HB = ?MODULE:from_binary(binary_to_list(HB)),
+ "47" = ?MODULE:get_value("Content-Length", H_HB),
+ "text/plain" = ?MODULE:get_value("Content-Type", H_HB),
+ L_H_HB = ?MODULE:to_list(H_HB),
+ 2 = length(L_H_HB),
+ true = lists:member({'Content-Length', "47"}, L_H_HB),
+ true = lists:member({'Content-Type', "text/plain"}, L_H_HB),
+ HL = [ <<"Content-Length: 47\r\n">>, <<"Content-Type: text/plain\r\n">> ],
+ HL2 = [ "Content-Length: 47\r\n", <<"Content-Type: text/plain\r\n">> ],
+ HL3 = [ <<"Content-Length: 47\r\n">>, "Content-Type: text/plain\r\n" ],
+ H_HL = ?MODULE:from_binary(HL),
+ H_HL = ?MODULE:from_binary(HL2),
+ H_HL = ?MODULE:from_binary(HL3),
+ "47" = ?MODULE:get_value("Content-Length", H_HL),
+ "text/plain" = ?MODULE:get_value("Content-Type", H_HL),
+ L_H_HL = ?MODULE:to_list(H_HL),
+ 2 = length(L_H_HL),
+ true = lists:member({'Content-Length', "47"}, L_H_HL),
+ true = lists:member({'Content-Type', "text/plain"}, L_H_HL),
+ [] = ?MODULE:to_list(?MODULE:from_binary(<<>>)),
+ [] = ?MODULE:to_list(?MODULE:from_binary(<<"">>)),
+ [] = ?MODULE:to_list(?MODULE:from_binary(<<"\r\n">>)),
+ [] = ?MODULE:to_list(?MODULE:from_binary(<<"\r\n\r\n">>)),
+ [] = ?MODULE:to_list(?MODULE:from_binary("")),
+ [] = ?MODULE:to_list(?MODULE:from_binary([<<>>])),
+ [] = ?MODULE:to_list(?MODULE:from_binary([<<"">>])),
+ [] = ?MODULE:to_list(?MODULE:from_binary([<<"\r\n">>])),
+ [] = ?MODULE:to_list(?MODULE:from_binary([<<"\r\n\r\n">>])),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_html.erl b/1.1.x/src/mochiweb/mochiweb_html.erl
new file mode 100644
index 00000000..a15c359c
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_html.erl
@@ -0,0 +1,1061 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Loosely tokenizes and generates parse trees for HTML 4.
+-module(mochiweb_html).
+-export([tokens/1, parse/1, parse_tokens/1, to_tokens/1, escape/1,
+ escape_attr/1, to_html/1]).
+
+%% This is a macro to placate syntax highlighters..
+-define(QUOTE, $\").
+-define(SQUOTE, $\').
+-define(ADV_COL(S, N),
+ S#decoder{column=N+S#decoder.column,
+ offset=N+S#decoder.offset}).
+-define(INC_COL(S),
+ S#decoder{column=1+S#decoder.column,
+ offset=1+S#decoder.offset}).
+-define(INC_LINE(S),
+ S#decoder{column=1,
+ line=1+S#decoder.line,
+ offset=1+S#decoder.offset}).
+-define(INC_CHAR(S, C),
+ case C of
+ $\n ->
+ S#decoder{column=1,
+ line=1+S#decoder.line,
+ offset=1+S#decoder.offset};
+ _ ->
+ S#decoder{column=1+S#decoder.column,
+ offset=1+S#decoder.offset}
+ end).
+
+-define(IS_WHITESPACE(C),
+ (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
+-define(IS_LITERAL_SAFE(C),
+ ((C >= $A andalso C =< $Z) orelse (C >= $a andalso C =< $z)
+ orelse (C >= $0 andalso C =< $9))).
+-define(PROBABLE_CLOSE(C),
+ (C =:= $> orelse ?IS_WHITESPACE(C))).
+
+-record(decoder, {line=1,
+ column=1,
+ offset=0}).
+
+%% @type html_node() = {string(), [html_attr()], [html_node() | string()]}
+%% @type html_attr() = {string(), string()}
+%% @type html_token() = html_data() | start_tag() | end_tag() | inline_html() | html_comment() | html_doctype()
+%% @type html_data() = {data, string(), Whitespace::boolean()}
+%% @type start_tag() = {start_tag, Name, [html_attr()], Singleton::boolean()}
+%% @type end_tag() = {end_tag, Name}
+%% @type html_comment() = {comment, Comment}
+%% @type html_doctype() = {doctype, [Doctype]}
+%% @type inline_html() = {'=', iolist()}
+
+%% External API.
+
+%% @spec parse(string() | binary()) -> html_node()
+%% @doc tokenize and then transform the token stream into a HTML tree.
+parse(Input) ->
+ parse_tokens(tokens(Input)).
+
+%% @spec parse_tokens([html_token()]) -> html_node()
+%% @doc Transform the output of tokens(Doc) into a HTML tree.
+parse_tokens(Tokens) when is_list(Tokens) ->
+ %% Skip over doctype, processing instructions
+ F = fun (X) ->
+ case X of
+ {start_tag, _, _, false} ->
+ false;
+ _ ->
+ true
+ end
+ end,
+ [{start_tag, Tag, Attrs, false} | Rest] = lists:dropwhile(F, Tokens),
+ {Tree, _} = tree(Rest, [norm({Tag, Attrs})]),
+ Tree.
+
+%% @spec tokens(StringOrBinary) -> [html_token()]
+%% @doc Transform the input UTF-8 HTML into a token stream.
+tokens(Input) ->
+ tokens(iolist_to_binary(Input), #decoder{}, []).
+
+%% @spec to_tokens(html_node()) -> [html_token()]
+%% @doc Convert a html_node() tree to a list of tokens.
+to_tokens({Tag0}) ->
+ to_tokens({Tag0, [], []});
+to_tokens(T={'=', _}) ->
+ [T];
+to_tokens(T={doctype, _}) ->
+ [T];
+to_tokens(T={comment, _}) ->
+ [T];
+to_tokens({Tag0, Acc}) ->
+ %% This is only allowed in sub-tags: {p, [{"class", "foo"}]}
+ to_tokens({Tag0, [], Acc});
+to_tokens({Tag0, Attrs, Acc}) ->
+ Tag = to_tag(Tag0),
+ to_tokens([{Tag, Acc}], [{start_tag, Tag, Attrs, is_singleton(Tag)}]).
+
+%% @spec to_html([html_token()] | html_node()) -> iolist()
+%% @doc Convert a list of html_token() to a HTML document.
+to_html(Node) when is_tuple(Node) ->
+ to_html(to_tokens(Node));
+to_html(Tokens) when is_list(Tokens) ->
+ to_html(Tokens, []).
+
+%% @spec escape(string() | atom() | binary()) -> binary()
+%% @doc Escape a string such that it's safe for HTML (amp; lt; gt;).
+escape(B) when is_binary(B) ->
+ escape(binary_to_list(B), []);
+escape(A) when is_atom(A) ->
+ escape(atom_to_list(A), []);
+escape(S) when is_list(S) ->
+ escape(S, []).
+
+%% @spec escape_attr(string() | binary() | atom() | integer() | float()) -> binary()
+%% @doc Escape a string such that it's safe for HTML attrs
+%% (amp; lt; gt; quot;).
+escape_attr(B) when is_binary(B) ->
+ escape_attr(binary_to_list(B), []);
+escape_attr(A) when is_atom(A) ->
+ escape_attr(atom_to_list(A), []);
+escape_attr(S) when is_list(S) ->
+ escape_attr(S, []);
+escape_attr(I) when is_integer(I) ->
+ escape_attr(integer_to_list(I), []);
+escape_attr(F) when is_float(F) ->
+ escape_attr(mochinum:digits(F), []).
+
+to_html([], Acc) ->
+ lists:reverse(Acc);
+to_html([{'=', Content} | Rest], Acc) ->
+ to_html(Rest, [Content | Acc]);
+to_html([{pi, Tag, Attrs} | Rest], Acc) ->
+ Open = [<<"<?">>,
+ Tag,
+ attrs_to_html(Attrs, []),
+ <<"?>">>],
+ to_html(Rest, [Open | Acc]);
+to_html([{comment, Comment} | Rest], Acc) ->
+ to_html(Rest, [[<<"<!--">>, Comment, <<"-->">>] | Acc]);
+to_html([{doctype, Parts} | Rest], Acc) ->
+ Inside = doctype_to_html(Parts, Acc),
+ to_html(Rest, [[<<"<!DOCTYPE">>, Inside, <<">">>] | Acc]);
+to_html([{data, Data, _Whitespace} | Rest], Acc) ->
+ to_html(Rest, [escape(Data) | Acc]);
+to_html([{start_tag, Tag, Attrs, Singleton} | Rest], Acc) ->
+ Open = [<<"<">>,
+ Tag,
+ attrs_to_html(Attrs, []),
+ case Singleton of
+ true -> <<" />">>;
+ false -> <<">">>
+ end],
+ to_html(Rest, [Open | Acc]);
+to_html([{end_tag, Tag} | Rest], Acc) ->
+ to_html(Rest, [[<<"</">>, Tag, <<">">>] | Acc]).
+
+doctype_to_html([], Acc) ->
+ lists:reverse(Acc);
+doctype_to_html([Word | Rest], Acc) ->
+ case lists:all(fun (C) -> ?IS_LITERAL_SAFE(C) end,
+ binary_to_list(iolist_to_binary(Word))) of
+ true ->
+ doctype_to_html(Rest, [[<<" ">>, Word] | Acc]);
+ false ->
+ doctype_to_html(Rest, [[<<" \"">>, escape_attr(Word), ?QUOTE] | Acc])
+ end.
+
+attrs_to_html([], Acc) ->
+ lists:reverse(Acc);
+attrs_to_html([{K, V} | Rest], Acc) ->
+ attrs_to_html(Rest,
+ [[<<" ">>, escape(K), <<"=\"">>,
+ escape_attr(V), <<"\"">>] | Acc]).
+
+escape([], Acc) ->
+ list_to_binary(lists:reverse(Acc));
+escape("<" ++ Rest, Acc) ->
+ escape(Rest, lists:reverse("&lt;", Acc));
+escape(">" ++ Rest, Acc) ->
+ escape(Rest, lists:reverse("&gt;", Acc));
+escape("&" ++ Rest, Acc) ->
+ escape(Rest, lists:reverse("&amp;", Acc));
+escape([C | Rest], Acc) ->
+ escape(Rest, [C | Acc]).
+
+escape_attr([], Acc) ->
+ list_to_binary(lists:reverse(Acc));
+escape_attr("<" ++ Rest, Acc) ->
+ escape_attr(Rest, lists:reverse("&lt;", Acc));
+escape_attr(">" ++ Rest, Acc) ->
+ escape_attr(Rest, lists:reverse("&gt;", Acc));
+escape_attr("&" ++ Rest, Acc) ->
+ escape_attr(Rest, lists:reverse("&amp;", Acc));
+escape_attr([?QUOTE | Rest], Acc) ->
+ escape_attr(Rest, lists:reverse("&quot;", Acc));
+escape_attr([C | Rest], Acc) ->
+ escape_attr(Rest, [C | Acc]).
+
+to_tag(A) when is_atom(A) ->
+ norm(atom_to_list(A));
+to_tag(L) ->
+ norm(L).
+
+to_tokens([], Acc) ->
+ lists:reverse(Acc);
+to_tokens([{Tag, []} | Rest], Acc) ->
+ to_tokens(Rest, [{end_tag, to_tag(Tag)} | Acc]);
+to_tokens([{Tag0, [{T0} | R1]} | Rest], Acc) ->
+ %% Allow {br}
+ to_tokens([{Tag0, [{T0, [], []} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [T0={'=', _C0} | R1]} | Rest], Acc) ->
+ %% Allow {'=', iolist()}
+ to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
+to_tokens([{Tag0, [T0={comment, _C0} | R1]} | Rest], Acc) ->
+ %% Allow {comment, iolist()}
+ to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
+to_tokens([{Tag0, [T0={pi, _S0, _A0} | R1]} | Rest], Acc) ->
+ %% Allow {pi, binary(), list()}
+ to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
+to_tokens([{Tag0, [{T0, A0=[{_, _} | _]} | R1]} | Rest], Acc) ->
+ %% Allow {p, [{"class", "foo"}]}
+ to_tokens([{Tag0, [{T0, A0, []} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [{T0, C0} | R1]} | Rest], Acc) ->
+ %% Allow {p, "content"} and {p, <<"content">>}
+ to_tokens([{Tag0, [{T0, [], C0} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [{T0, A1, C0} | R1]} | Rest], Acc) when is_binary(C0) ->
+ %% Allow {"p", [{"class", "foo"}], <<"content">>}
+ to_tokens([{Tag0, [{T0, A1, binary_to_list(C0)} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [{T0, A1, C0=[C | _]} | R1]} | Rest], Acc)
+ when is_integer(C) ->
+ %% Allow {"p", [{"class", "foo"}], "content"}
+ to_tokens([{Tag0, [{T0, A1, [C0]} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [{T0, A1, C1} | R1]} | Rest], Acc) ->
+ %% Native {"p", [{"class", "foo"}], ["content"]}
+ Tag = to_tag(Tag0),
+ T1 = to_tag(T0),
+ case is_singleton(norm(T1)) of
+ true ->
+ to_tokens([{Tag, R1} | Rest], [{start_tag, T1, A1, true} | Acc]);
+ false ->
+ to_tokens([{T1, C1}, {Tag, R1} | Rest],
+ [{start_tag, T1, A1, false} | Acc])
+ end;
+to_tokens([{Tag0, [L | R1]} | Rest], Acc) when is_list(L) ->
+ %% List text
+ Tag = to_tag(Tag0),
+ to_tokens([{Tag, R1} | Rest], [{data, iolist_to_binary(L), false} | Acc]);
+to_tokens([{Tag0, [B | R1]} | Rest], Acc) when is_binary(B) ->
+ %% Binary text
+ Tag = to_tag(Tag0),
+ to_tokens([{Tag, R1} | Rest], [{data, B, false} | Acc]).
+
+tokens(B, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary>> ->
+ lists:reverse(Acc);
+ _ ->
+ {Tag, S1} = tokenize(B, S),
+ case parse_flag(Tag) of
+ script ->
+ {Tag2, S2} = tokenize_script(B, S1),
+ tokens(B, S2, [Tag2, Tag | Acc]);
+ textarea ->
+ {Tag2, S2} = tokenize_textarea(B, S1),
+ tokens(B, S2, [Tag2, Tag | Acc]);
+ none ->
+ tokens(B, S1, [Tag | Acc])
+ end
+ end.
+
+parse_flag({start_tag, B, _, false}) ->
+ case string:to_lower(binary_to_list(B)) of
+ "script" ->
+ script;
+ "textarea" ->
+ textarea;
+ _ ->
+ none
+ end;
+parse_flag(_) ->
+ none.
+
+tokenize(B, S=#decoder{offset=O}) ->
+ case B of
+ <<_:O/binary, "<!--", _/binary>> ->
+ tokenize_comment(B, ?ADV_COL(S, 4));
+ <<_:O/binary, "<!DOCTYPE", _/binary>> ->
+ tokenize_doctype(B, ?ADV_COL(S, 10));
+ <<_:O/binary, "<![CDATA[", _/binary>> ->
+ tokenize_cdata(B, ?ADV_COL(S, 9));
+ <<_:O/binary, "<?", _/binary>> ->
+ {Tag, S1} = tokenize_literal(B, ?ADV_COL(S, 2)),
+ {Attrs, S2} = tokenize_attributes(B, S1),
+ S3 = find_qgt(B, S2),
+ {{pi, Tag, Attrs}, S3};
+ <<_:O/binary, "&", _/binary>> ->
+ tokenize_charref(B, ?INC_COL(S));
+ <<_:O/binary, "</", _/binary>> ->
+ {Tag, S1} = tokenize_literal(B, ?ADV_COL(S, 2)),
+ {S2, _} = find_gt(B, S1),
+ {{end_tag, Tag}, S2};
+ <<_:O/binary, "<", C, _/binary>> when ?IS_WHITESPACE(C) ->
+ %% This isn't really strict HTML
+ {{data, Data, _Whitespace}, S1} = tokenize_data(B, ?INC_COL(S)),
+ {{data, <<$<, Data/binary>>, false}, S1};
+ <<_:O/binary, "<", _/binary>> ->
+ {Tag, S1} = tokenize_literal(B, ?INC_COL(S)),
+ {Attrs, S2} = tokenize_attributes(B, S1),
+ {S3, HasSlash} = find_gt(B, S2),
+ Singleton = HasSlash orelse is_singleton(norm(binary_to_list(Tag))),
+ {{start_tag, Tag, Attrs, Singleton}, S3};
+ _ ->
+ tokenize_data(B, S)
+ end.
+
+tree_data([{data, Data, Whitespace} | Rest], AllWhitespace, Acc) ->
+ tree_data(Rest, (Whitespace andalso AllWhitespace), [Data | Acc]);
+tree_data(Rest, AllWhitespace, Acc) ->
+ {iolist_to_binary(lists:reverse(Acc)), AllWhitespace, Rest}.
+
+tree([], Stack) ->
+ {destack(Stack), []};
+tree([{end_tag, Tag} | Rest], Stack) ->
+ case destack(norm(Tag), Stack) of
+ S when is_list(S) ->
+ tree(Rest, S);
+ Result ->
+ {Result, []}
+ end;
+tree([{start_tag, Tag, Attrs, true} | Rest], S) ->
+ tree(Rest, append_stack_child(norm({Tag, Attrs}), S));
+tree([{start_tag, Tag, Attrs, false} | Rest], S) ->
+ tree(Rest, stack(norm({Tag, Attrs}), S));
+tree([T={pi, _Tag, _Attrs} | Rest], S) ->
+ tree(Rest, append_stack_child(T, S));
+tree([T={comment, _Comment} | Rest], S) ->
+ tree(Rest, append_stack_child(T, S));
+tree(L=[{data, _Data, _Whitespace} | _], S) ->
+ case tree_data(L, true, []) of
+ {_, true, Rest} ->
+ tree(Rest, S);
+ {Data, false, Rest} ->
+ tree(Rest, append_stack_child(Data, S))
+ end;
+tree([{doctype, _} | Rest], Stack) ->
+ tree(Rest, Stack).
+
+norm({Tag, Attrs}) ->
+ {norm(Tag), [{norm(K), iolist_to_binary(V)} || {K, V} <- Attrs], []};
+norm(Tag) when is_binary(Tag) ->
+ Tag;
+norm(Tag) ->
+ list_to_binary(string:to_lower(Tag)).
+
+stack(T1={TN, _, _}, Stack=[{TN, _, _} | _Rest])
+ when TN =:= <<"li">> orelse TN =:= <<"option">> ->
+ [T1 | destack(TN, Stack)];
+stack(T1={TN0, _, _}, Stack=[{TN1, _, _} | _Rest])
+ when (TN0 =:= <<"dd">> orelse TN0 =:= <<"dt">>) andalso
+ (TN1 =:= <<"dd">> orelse TN1 =:= <<"dt">>) ->
+ [T1 | destack(TN1, Stack)];
+stack(T1, Stack) ->
+ [T1 | Stack].
+
+append_stack_child(StartTag, [{Name, Attrs, Acc} | Stack]) ->
+ [{Name, Attrs, [StartTag | Acc]} | Stack].
+
+destack(TagName, Stack) when is_list(Stack) ->
+ F = fun (X) ->
+ case X of
+ {TagName, _, _} ->
+ false;
+ _ ->
+ true
+ end
+ end,
+ case lists:splitwith(F, Stack) of
+ {_, []} ->
+ %% If we're parsing something like XML we might find
+ %% a <link>tag</link> that is normally a singleton
+ %% in HTML but isn't here
+ case {is_singleton(TagName), Stack} of
+ {true, [{T0, A0, Acc0} | Post0]} ->
+ case lists:splitwith(F, Acc0) of
+ {_, []} ->
+ %% Actually was a singleton
+ Stack;
+ {Pre, [{T1, A1, []} | Post1]} ->
+ [{T0, A0, [{T1, A1, lists:reverse(Pre)} | Post1]}
+ | Post0]
+ end;
+ _ ->
+ %% No match, no state change
+ Stack
+ end;
+ {_Pre, [_T]} ->
+ %% Unfurl the whole stack, we're done
+ destack(Stack);
+ {Pre, [T, {T0, A0, Acc0} | Post]} ->
+ %% Unfurl up to the tag, then accumulate it
+ [{T0, A0, [destack(Pre ++ [T]) | Acc0]} | Post]
+ end.
+
+destack([{Tag, Attrs, Acc}]) ->
+ {Tag, Attrs, lists:reverse(Acc)};
+destack([{T1, A1, Acc1}, {T0, A0, Acc0} | Rest]) ->
+ destack([{T0, A0, [{T1, A1, lists:reverse(Acc1)} | Acc0]} | Rest]).
+
+is_singleton(<<"br">>) -> true;
+is_singleton(<<"hr">>) -> true;
+is_singleton(<<"img">>) -> true;
+is_singleton(<<"input">>) -> true;
+is_singleton(<<"base">>) -> true;
+is_singleton(<<"meta">>) -> true;
+is_singleton(<<"link">>) -> true;
+is_singleton(<<"area">>) -> true;
+is_singleton(<<"param">>) -> true;
+is_singleton(<<"col">>) -> true;
+is_singleton(_) -> false.
+
+tokenize_data(B, S=#decoder{offset=O}) ->
+ tokenize_data(B, S, O, true).
+
+tokenize_data(B, S=#decoder{offset=O}, Start, Whitespace) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when (C =/= $< andalso C =/= $&) ->
+ tokenize_data(B, ?INC_CHAR(S, C), Start,
+ (Whitespace andalso ?IS_WHITESPACE(C)));
+ _ ->
+ Len = O - Start,
+ <<_:Start/binary, Data:Len/binary, _/binary>> = B,
+ {{data, Data, Whitespace}, S}
+ end.
+
+tokenize_attributes(B, S) ->
+ tokenize_attributes(B, S, []).
+
+tokenize_attributes(B, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary>> ->
+ {lists:reverse(Acc), S};
+ <<_:O/binary, C, _/binary>> when (C =:= $> orelse C =:= $/) ->
+ {lists:reverse(Acc), S};
+ <<_:O/binary, "?>", _/binary>> ->
+ {lists:reverse(Acc), S};
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ tokenize_attributes(B, ?INC_CHAR(S, C), Acc);
+ _ ->
+ {Attr, S1} = tokenize_literal(B, S),
+ {Value, S2} = tokenize_attr_value(Attr, B, S1),
+ tokenize_attributes(B, S2, [{Attr, Value} | Acc])
+ end.
+
+tokenize_attr_value(Attr, B, S) ->
+ S1 = skip_whitespace(B, S),
+ O = S1#decoder.offset,
+ case B of
+ <<_:O/binary, "=", _/binary>> ->
+ S2 = skip_whitespace(B, ?INC_COL(S1)),
+ tokenize_word_or_literal(B, S2);
+ _ ->
+ {Attr, S1}
+ end.
+
+skip_whitespace(B, S=#decoder{offset=O}) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ skip_whitespace(B, ?INC_CHAR(S, C));
+ _ ->
+ S
+ end.
+
+tokenize_literal(Bin, S) ->
+ tokenize_literal(Bin, S, []).
+
+tokenize_literal(Bin, S=#decoder{offset=O}, Acc) ->
+ case Bin of
+ <<_:O/binary, $&, _/binary>> ->
+ {{data, Data, false}, S1} = tokenize_charref(Bin, ?INC_COL(S)),
+ tokenize_literal(Bin, S1, [Data | Acc]);
+ <<_:O/binary, C, _/binary>> when not (?IS_WHITESPACE(C)
+ orelse C =:= $>
+ orelse C =:= $/
+ orelse C =:= $=) ->
+ tokenize_literal(Bin, ?INC_COL(S), [C | Acc]);
+ _ ->
+ {iolist_to_binary(lists:reverse(Acc)), S}
+ end.
+
+find_qgt(Bin, S=#decoder{offset=O}) ->
+ case Bin of
+ <<_:O/binary, "?>", _/binary>> ->
+ ?ADV_COL(S, 2);
+ %% tokenize_attributes takes care of this state:
+ %% <<_:O/binary, C, _/binary>> ->
+ %% find_qgt(Bin, ?INC_CHAR(S, C));
+ <<_:O/binary>> ->
+ S
+ end.
+
+find_gt(Bin, S) ->
+ find_gt(Bin, S, false).
+
+find_gt(Bin, S=#decoder{offset=O}, HasSlash) ->
+ case Bin of
+ <<_:O/binary, $/, _/binary>> ->
+ find_gt(Bin, ?INC_COL(S), true);
+ <<_:O/binary, $>, _/binary>> ->
+ {?INC_COL(S), HasSlash};
+ <<_:O/binary, C, _/binary>> ->
+ find_gt(Bin, ?INC_CHAR(S, C), HasSlash);
+ _ ->
+ {S, HasSlash}
+ end.
+
+tokenize_charref(Bin, S=#decoder{offset=O}) ->
+ tokenize_charref(Bin, S, O).
+
+tokenize_charref(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ <<_:O/binary>> ->
+ <<_:Start/binary, Raw/binary>> = Bin,
+ {{data, Raw, false}, S};
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C)
+ orelse C =:= ?SQUOTE
+ orelse C =:= ?QUOTE
+ orelse C =:= $/
+ orelse C =:= $> ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{data, Raw, false}, S};
+ <<_:O/binary, $;, _/binary>> ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ Data = case mochiweb_charref:charref(Raw) of
+ undefined ->
+ Start1 = Start - 1,
+ Len1 = Len + 2,
+ <<_:Start1/binary, R:Len1/binary, _/binary>> = Bin,
+ R;
+ Unichar ->
+ mochiutf8:codepoint_to_bytes(Unichar)
+ end,
+ {{data, Data, false}, ?INC_COL(S)};
+ _ ->
+ tokenize_charref(Bin, ?INC_COL(S), Start)
+ end.
+
+tokenize_doctype(Bin, S) ->
+ tokenize_doctype(Bin, S, []).
+
+tokenize_doctype(Bin, S=#decoder{offset=O}, Acc) ->
+ case Bin of
+ <<_:O/binary>> ->
+ {{doctype, lists:reverse(Acc)}, S};
+ <<_:O/binary, $>, _/binary>> ->
+ {{doctype, lists:reverse(Acc)}, ?INC_COL(S)};
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ tokenize_doctype(Bin, ?INC_CHAR(S, C), Acc);
+ _ ->
+ {Word, S1} = tokenize_word_or_literal(Bin, S),
+ tokenize_doctype(Bin, S1, [Word | Acc])
+ end.
+
+tokenize_word_or_literal(Bin, S=#decoder{offset=O}) ->
+ case Bin of
+ <<_:O/binary, C, _/binary>> when C =:= ?QUOTE orelse C =:= ?SQUOTE ->
+ tokenize_word(Bin, ?INC_COL(S), C);
+ <<_:O/binary, C, _/binary>> when not ?IS_WHITESPACE(C) ->
+ %% Sanity check for whitespace
+ tokenize_literal(Bin, S, [])
+ end.
+
+tokenize_word(Bin, S, Quote) ->
+ tokenize_word(Bin, S, Quote, []).
+
+tokenize_word(Bin, S=#decoder{offset=O}, Quote, Acc) ->
+ case Bin of
+ <<_:O/binary>> ->
+ {iolist_to_binary(lists:reverse(Acc)), S};
+ <<_:O/binary, Quote, _/binary>> ->
+ {iolist_to_binary(lists:reverse(Acc)), ?INC_COL(S)};
+ <<_:O/binary, $&, _/binary>> ->
+ {{data, Data, false}, S1} = tokenize_charref(Bin, ?INC_COL(S)),
+ tokenize_word(Bin, S1, Quote, [Data | Acc]);
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_word(Bin, ?INC_CHAR(S, C), Quote, [C | Acc])
+ end.
+
+tokenize_cdata(Bin, S=#decoder{offset=O}) ->
+ tokenize_cdata(Bin, S, O).
+
+tokenize_cdata(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ <<_:O/binary, "]]>", _/binary>> ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{data, Raw, false}, ?ADV_COL(S, 3)};
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_cdata(Bin, ?INC_CHAR(S, C), Start);
+ _ ->
+ <<_:O/binary, Raw/binary>> = Bin,
+ {{data, Raw, false}, S}
+ end.
+
+tokenize_comment(Bin, S=#decoder{offset=O}) ->
+ tokenize_comment(Bin, S, O).
+
+tokenize_comment(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ <<_:O/binary, "-->", _/binary>> ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{comment, Raw}, ?ADV_COL(S, 3)};
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_comment(Bin, ?INC_CHAR(S, C), Start);
+ <<_:Start/binary, Raw/binary>> ->
+ {{comment, Raw}, S}
+ end.
+
+tokenize_script(Bin, S=#decoder{offset=O}) ->
+ tokenize_script(Bin, S, O).
+
+tokenize_script(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ %% Just a look-ahead, we want the end_tag separately
+ <<_:O/binary, $<, $/, SS, CC, RR, II, PP, TT, ZZ, _/binary>>
+ when (SS =:= $s orelse SS =:= $S) andalso
+ (CC =:= $c orelse CC =:= $C) andalso
+ (RR =:= $r orelse RR =:= $R) andalso
+ (II =:= $i orelse II =:= $I) andalso
+ (PP =:= $p orelse PP =:= $P) andalso
+ (TT=:= $t orelse TT =:= $T) andalso
+ ?PROBABLE_CLOSE(ZZ) ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{data, Raw, false}, S};
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_script(Bin, ?INC_CHAR(S, C), Start);
+ <<_:Start/binary, Raw/binary>> ->
+ {{data, Raw, false}, S}
+ end.
+
+tokenize_textarea(Bin, S=#decoder{offset=O}) ->
+ tokenize_textarea(Bin, S, O).
+
+tokenize_textarea(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ %% Just a look-ahead, we want the end_tag separately
+ <<_:O/binary, $<, $/, TT, EE, XX, TT2, AA, RR, EE2, AA2, ZZ, _/binary>>
+ when (TT =:= $t orelse TT =:= $T) andalso
+ (EE =:= $e orelse EE =:= $E) andalso
+ (XX =:= $x orelse XX =:= $X) andalso
+ (TT2 =:= $t orelse TT2 =:= $T) andalso
+ (AA =:= $a orelse AA =:= $A) andalso
+ (RR =:= $r orelse RR =:= $R) andalso
+ (EE2 =:= $e orelse EE2 =:= $E) andalso
+ (AA2 =:= $a orelse AA2 =:= $A) andalso
+ ?PROBABLE_CLOSE(ZZ) ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{data, Raw, false}, S};
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_textarea(Bin, ?INC_CHAR(S, C), Start);
+ <<_:Start/binary, Raw/binary>> ->
+ {{data, Raw, false}, S}
+ end.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+to_html_test() ->
+ ?assertEqual(
+ <<"<html><head><title>hey!</title></head><body><p class=\"foo\">what's up<br /></p><div>sucka</div>RAW!<!-- comment! --></body></html>">>,
+ iolist_to_binary(
+ to_html({html, [],
+ [{<<"head">>, [],
+ [{title, <<"hey!">>}]},
+ {body, [],
+ [{p, [{class, foo}], [<<"what's">>, <<" up">>, {br}]},
+ {'div', <<"sucka">>},
+ {'=', <<"RAW!">>},
+ {comment, <<" comment! ">>}]}]}))),
+ ?assertEqual(
+ <<"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">">>,
+ iolist_to_binary(
+ to_html({doctype,
+ [<<"html">>, <<"PUBLIC">>,
+ <<"-//W3C//DTD XHTML 1.0 Transitional//EN">>,
+ <<"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">>]}))),
+ ?assertEqual(
+ <<"<html><?xml:namespace prefix=\"o\" ns=\"urn:schemas-microsoft-com:office:office\"?></html>">>,
+ iolist_to_binary(
+ to_html({<<"html">>,[],
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}]}))),
+ ok.
+
+escape_test() ->
+ ?assertEqual(
+ <<"&amp;quot;\"word &gt;&lt;&lt;up!&amp;quot;">>,
+ escape(<<"&quot;\"word ><<up!&quot;">>)),
+ ?assertEqual(
+ <<"&amp;quot;\"word &gt;&lt;&lt;up!&amp;quot;">>,
+ escape("&quot;\"word ><<up!&quot;")),
+ ?assertEqual(
+ <<"&amp;quot;\"word &gt;&lt;&lt;up!&amp;quot;">>,
+ escape('&quot;\"word ><<up!&quot;')),
+ ok.
+
+escape_attr_test() ->
+ ?assertEqual(
+ <<"&amp;quot;&quot;word &gt;&lt;&lt;up!&amp;quot;">>,
+ escape_attr(<<"&quot;\"word ><<up!&quot;">>)),
+ ?assertEqual(
+ <<"&amp;quot;&quot;word &gt;&lt;&lt;up!&amp;quot;">>,
+ escape_attr("&quot;\"word ><<up!&quot;")),
+ ?assertEqual(
+ <<"&amp;quot;&quot;word &gt;&lt;&lt;up!&amp;quot;">>,
+ escape_attr('&quot;\"word ><<up!&quot;')),
+ ?assertEqual(
+ <<"12345">>,
+ escape_attr(12345)),
+ ?assertEqual(
+ <<"1.5">>,
+ escape_attr(1.5)),
+ ok.
+
+tokens_test() ->
+ ?assertEqual(
+ [{start_tag, <<"foo">>, [{<<"bar">>, <<"baz">>},
+ {<<"wibble">>, <<"wibble">>},
+ {<<"alice">>, <<"bob">>}], true}],
+ tokens(<<"<foo bar=baz wibble='wibble' alice=\"bob\"/>">>)),
+ ?assertEqual(
+ [{start_tag, <<"foo">>, [{<<"bar">>, <<"baz">>},
+ {<<"wibble">>, <<"wibble">>},
+ {<<"alice">>, <<"bob">>}], true}],
+ tokens(<<"<foo bar=baz wibble='wibble' alice=bob/>">>)),
+ ?assertEqual(
+ [{comment, <<"[if lt IE 7]>\n<style type=\"text/css\">\n.no_ie { display: none; }\n</style>\n<![endif]">>}],
+ tokens(<<"<!--[if lt IE 7]>\n<style type=\"text/css\">\n.no_ie { display: none; }\n</style>\n<![endif]-->">>)),
+ ?assertEqual(
+ [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
+ {data, <<" A= B <= C ">>, false},
+ {end_tag, <<"script">>}],
+ tokens(<<"<script type=\"text/javascript\"> A= B <= C </script>">>)),
+ ?assertEqual(
+ [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
+ {data, <<" A= B <= C ">>, false},
+ {end_tag, <<"script">>}],
+ tokens(<<"<script type =\"text/javascript\"> A= B <= C </script>">>)),
+ ?assertEqual(
+ [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
+ {data, <<" A= B <= C ">>, false},
+ {end_tag, <<"script">>}],
+ tokens(<<"<script type = \"text/javascript\"> A= B <= C </script>">>)),
+ ?assertEqual(
+ [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
+ {data, <<" A= B <= C ">>, false},
+ {end_tag, <<"script">>}],
+ tokens(<<"<script type= \"text/javascript\"> A= B <= C </script>">>)),
+ ?assertEqual(
+ [{start_tag, <<"textarea">>, [], false},
+ {data, <<"<html></body>">>, false},
+ {end_tag, <<"textarea">>}],
+ tokens(<<"<textarea><html></body></textarea>">>)),
+ ?assertEqual(
+ [{start_tag, <<"textarea">>, [], false},
+ {data, <<"<html></body></textareaz>">>, false}],
+ tokens(<<"<textarea ><html></body></textareaz>">>)),
+ ?assertEqual(
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}],
+ tokens(<<"<?xml:namespace prefix=\"o\" ns=\"urn:schemas-microsoft-com:office:office\"?>">>)),
+ ?assertEqual(
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}],
+ tokens(<<"<?xml:namespace prefix=o ns=urn:schemas-microsoft-com:office:office \n?>">>)),
+ ?assertEqual(
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}],
+ tokens(<<"<?xml:namespace prefix=o ns=urn:schemas-microsoft-com:office:office">>)),
+ ?assertEqual(
+ [{data, <<"<">>, false}],
+ tokens(<<"&lt;">>)),
+ ?assertEqual(
+ [{data, <<"not html ">>, false},
+ {data, <<"< at all">>, false}],
+ tokens(<<"not html < at all">>)),
+ ok.
+
+parse_test() ->
+ D0 = <<"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/html4/strict.dtd\">
+<html>
+ <head>
+ <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\">
+ <title>Foo</title>
+ <link rel=\"stylesheet\" type=\"text/css\" href=\"/static/rel/dojo/resources/dojo.css\" media=\"screen\">
+ <link rel=\"stylesheet\" type=\"text/css\" href=\"/static/foo.css\" media=\"screen\">
+ <!--[if lt IE 7]>
+ <style type=\"text/css\">
+ .no_ie { display: none; }
+ </style>
+ <![endif]-->
+ <link rel=\"icon\" href=\"/static/images/favicon.ico\" type=\"image/x-icon\">
+ <link rel=\"shortcut icon\" href=\"/static/images/favicon.ico\" type=\"image/x-icon\">
+ </head>
+ <body id=\"home\" class=\"tundra\"><![CDATA[&lt;<this<!-- is -->CDATA>&gt;]]></body>
+</html>">>,
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"head">>, [],
+ [{<<"meta">>,
+ [{<<"http-equiv">>,<<"Content-Type">>},
+ {<<"content">>,<<"text/html; charset=UTF-8">>}],
+ []},
+ {<<"title">>,[],[<<"Foo">>]},
+ {<<"link">>,
+ [{<<"rel">>,<<"stylesheet">>},
+ {<<"type">>,<<"text/css">>},
+ {<<"href">>,<<"/static/rel/dojo/resources/dojo.css">>},
+ {<<"media">>,<<"screen">>}],
+ []},
+ {<<"link">>,
+ [{<<"rel">>,<<"stylesheet">>},
+ {<<"type">>,<<"text/css">>},
+ {<<"href">>,<<"/static/foo.css">>},
+ {<<"media">>,<<"screen">>}],
+ []},
+ {comment,<<"[if lt IE 7]>\n <style type=\"text/css\">\n .no_ie { display: none; }\n </style>\n <![endif]">>},
+ {<<"link">>,
+ [{<<"rel">>,<<"icon">>},
+ {<<"href">>,<<"/static/images/favicon.ico">>},
+ {<<"type">>,<<"image/x-icon">>}],
+ []},
+ {<<"link">>,
+ [{<<"rel">>,<<"shortcut icon">>},
+ {<<"href">>,<<"/static/images/favicon.ico">>},
+ {<<"type">>,<<"image/x-icon">>}],
+ []}]},
+ {<<"body">>,
+ [{<<"id">>,<<"home">>},
+ {<<"class">>,<<"tundra">>}],
+ [<<"&lt;<this<!-- is -->CDATA>&gt;">>]}]},
+ parse(D0)),
+ ?assertEqual(
+ {<<"html">>,[],
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}]},
+ parse(
+ <<"<html><?xml:namespace prefix=\"o\" ns=\"urn:schemas-microsoft-com:office:office\"?></html>">>)),
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"dd">>, [], [<<"foo">>]},
+ {<<"dt">>, [], [<<"bar">>]}]},
+ parse(<<"<html><dd>foo<dt>bar</html>">>)),
+ %% Singleton sadness
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"link">>, [], []},
+ <<"foo">>,
+ {<<"br">>, [], []},
+ <<"bar">>]},
+ parse(<<"<html><link>foo<br>bar</html>">>)),
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"link">>, [], [<<"foo">>,
+ {<<"br">>, [], []},
+ <<"bar">>]}]},
+ parse(<<"<html><link>foo<br>bar</link></html>">>)),
+ ok.
+
+exhaustive_is_singleton_test() ->
+ T = mochiweb_cover:clause_lookup_table(?MODULE, is_singleton),
+ [?assertEqual(V, is_singleton(K)) || {K, V} <- T].
+
+tokenize_attributes_test() ->
+ ?assertEqual(
+ {<<"foo">>,
+ [{<<"bar">>, <<"b\"az">>},
+ {<<"wibble">>, <<"wibble">>},
+ {<<"taco", 16#c2, 16#a9>>, <<"bell">>},
+ {<<"quux">>, <<"quux">>}],
+ []},
+ parse(<<"<foo bar=\"b&quot;az\" wibble taco&copy;=bell quux">>)),
+ ok.
+
+tokens2_test() ->
+ D0 = <<"<channel><title>from __future__ import *</title><link>http://bob.pythonmac.org</link><description>Bob's Rants</description></channel>">>,
+ ?assertEqual(
+ [{start_tag,<<"channel">>,[],false},
+ {start_tag,<<"title">>,[],false},
+ {data,<<"from __future__ import *">>,false},
+ {end_tag,<<"title">>},
+ {start_tag,<<"link">>,[],true},
+ {data,<<"http://bob.pythonmac.org">>,false},
+ {end_tag,<<"link">>},
+ {start_tag,<<"description">>,[],false},
+ {data,<<"Bob's Rants">>,false},
+ {end_tag,<<"description">>},
+ {end_tag,<<"channel">>}],
+ tokens(D0)),
+ ok.
+
+to_tokens_test() ->
+ ?assertEqual(
+ [{start_tag, <<"p">>, [{class, 1}], false},
+ {end_tag, <<"p">>}],
+ to_tokens({p, [{class, 1}], []})),
+ ?assertEqual(
+ [{start_tag, <<"p">>, [], false},
+ {end_tag, <<"p">>}],
+ to_tokens({p})),
+ ?assertEqual(
+ [{'=', <<"data">>}],
+ to_tokens({'=', <<"data">>})),
+ ?assertEqual(
+ [{comment, <<"comment">>}],
+ to_tokens({comment, <<"comment">>})),
+ %% This is only allowed in sub-tags:
+ %% {p, [{"class", "foo"}]} as {p, [{"class", "foo"}], []}
+ %% On the outside it's always treated as follows:
+ %% {p, [], [{"class", "foo"}]} as {p, [], [{"class", "foo"}]}
+ ?assertEqual(
+ [{start_tag, <<"html">>, [], false},
+ {start_tag, <<"p">>, [{class, 1}], false},
+ {end_tag, <<"p">>},
+ {end_tag, <<"html">>}],
+ to_tokens({html, [{p, [{class, 1}]}]})),
+ ok.
+
+parse2_test() ->
+ D0 = <<"<channel><title>from __future__ import *</title><link>http://bob.pythonmac.org<br>foo</link><description>Bob's Rants</description></channel>">>,
+ ?assertEqual(
+ {<<"channel">>,[],
+ [{<<"title">>,[],[<<"from __future__ import *">>]},
+ {<<"link">>,[],[
+ <<"http://bob.pythonmac.org">>,
+ {<<"br">>,[],[]},
+ <<"foo">>]},
+ {<<"description">>,[],[<<"Bob's Rants">>]}]},
+ parse(D0)),
+ ok.
+
+parse_tokens_test() ->
+ D0 = [{doctype,[<<"HTML">>,<<"PUBLIC">>,<<"-//W3C//DTD HTML 4.01 Transitional//EN">>]},
+ {data,<<"\n">>,true},
+ {start_tag,<<"html">>,[],false}],
+ ?assertEqual(
+ {<<"html">>, [], []},
+ parse_tokens(D0)),
+ D1 = D0 ++ [{end_tag, <<"html">>}],
+ ?assertEqual(
+ {<<"html">>, [], []},
+ parse_tokens(D1)),
+ D2 = D0 ++ [{start_tag, <<"body">>, [], false}],
+ ?assertEqual(
+ {<<"html">>, [], [{<<"body">>, [], []}]},
+ parse_tokens(D2)),
+ D3 = D0 ++ [{start_tag, <<"head">>, [], false},
+ {end_tag, <<"head">>},
+ {start_tag, <<"body">>, [], false}],
+ ?assertEqual(
+ {<<"html">>, [], [{<<"head">>, [], []}, {<<"body">>, [], []}]},
+ parse_tokens(D3)),
+ D4 = D3 ++ [{data,<<"\n">>,true},
+ {start_tag,<<"div">>,[{<<"class">>,<<"a">>}],false},
+ {start_tag,<<"a">>,[{<<"name">>,<<"#anchor">>}],false},
+ {end_tag,<<"a">>},
+ {end_tag,<<"div">>},
+ {start_tag,<<"div">>,[{<<"class">>,<<"b">>}],false},
+ {start_tag,<<"div">>,[{<<"class">>,<<"c">>}],false},
+ {end_tag,<<"div">>},
+ {end_tag,<<"div">>}],
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"head">>, [], []},
+ {<<"body">>, [],
+ [{<<"div">>, [{<<"class">>, <<"a">>}], [{<<"a">>, [{<<"name">>, <<"#anchor">>}], []}]},
+ {<<"div">>, [{<<"class">>, <<"b">>}], [{<<"div">>, [{<<"class">>, <<"c">>}], []}]}
+ ]}]},
+ parse_tokens(D4)),
+ D5 = [{start_tag,<<"html">>,[],false},
+ {data,<<"\n">>,true},
+ {data,<<"boo">>,false},
+ {data,<<"hoo">>,false},
+ {data,<<"\n">>,true},
+ {end_tag,<<"html">>}],
+ ?assertEqual(
+ {<<"html">>, [], [<<"\nboohoo\n">>]},
+ parse_tokens(D5)),
+ D6 = [{start_tag,<<"html">>,[],false},
+ {data,<<"\n">>,true},
+ {data,<<"\n">>,true},
+ {end_tag,<<"html">>}],
+ ?assertEqual(
+ {<<"html">>, [], []},
+ parse_tokens(D6)),
+ D7 = [{start_tag,<<"html">>,[],false},
+ {start_tag,<<"ul">>,[],false},
+ {start_tag,<<"li">>,[],false},
+ {data,<<"word">>,false},
+ {start_tag,<<"li">>,[],false},
+ {data,<<"up">>,false},
+ {end_tag,<<"li">>},
+ {start_tag,<<"li">>,[],false},
+ {data,<<"fdsa">>,false},
+ {start_tag,<<"br">>,[],true},
+ {data,<<"asdf">>,false},
+ {end_tag,<<"ul">>},
+ {end_tag,<<"html">>}],
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"ul">>, [],
+ [{<<"li">>, [], [<<"word">>]},
+ {<<"li">>, [], [<<"up">>]},
+ {<<"li">>, [], [<<"fdsa">>,{<<"br">>, [], []}, <<"asdf">>]}]}]},
+ parse_tokens(D7)),
+ ok.
+
+destack_test() ->
+ {<<"a">>, [], []} =
+ destack([{<<"a">>, [], []}]),
+ {<<"a">>, [], [{<<"b">>, [], []}]} =
+ destack([{<<"b">>, [], []}, {<<"a">>, [], []}]),
+ {<<"a">>, [], [{<<"b">>, [], [{<<"c">>, [], []}]}]} =
+ destack([{<<"c">>, [], []}, {<<"b">>, [], []}, {<<"a">>, [], []}]),
+ [{<<"a">>, [], [{<<"b">>, [], [{<<"c">>, [], []}]}]}] =
+ destack(<<"b">>,
+ [{<<"c">>, [], []}, {<<"b">>, [], []}, {<<"a">>, [], []}]),
+ [{<<"b">>, [], [{<<"c">>, [], []}]}, {<<"a">>, [], []}] =
+ destack(<<"c">>,
+ [{<<"c">>, [], []}, {<<"b">>, [], []},{<<"a">>, [], []}]),
+ ok.
+
+doctype_test() ->
+ ?assertEqual(
+ {<<"html">>,[],[{<<"head">>,[],[]}]},
+ mochiweb_html:parse("<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">"
+ "<html><head></head></body></html>")),
+ %% http://code.google.com/p/mochiweb/issues/detail?id=52
+ ?assertEqual(
+ {<<"html">>,[],[{<<"head">>,[],[]}]},
+ mochiweb_html:parse("<html>"
+ "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">"
+ "<head></head></body></html>")),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_http.erl b/1.1.x/src/mochiweb/mochiweb_http.erl
new file mode 100644
index 00000000..ab0af7e8
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_http.erl
@@ -0,0 +1,273 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc HTTP server.
+
+-module(mochiweb_http).
+-author('bob@mochimedia.com').
+-export([start/0, start/1, stop/0, stop/1]).
+-export([loop/2, default_body/1]).
+-export([after_response/2, reentry/1]).
+-export([parse_range_request/1, range_skip_length/2]).
+
+-define(REQUEST_RECV_TIMEOUT, 300000). % timeout waiting for request line
+-define(HEADERS_RECV_TIMEOUT, 30000). % timeout waiting for headers
+
+-define(MAX_HEADERS, 1000).
+-define(DEFAULTS, [{name, ?MODULE},
+ {port, 8888}]).
+
+parse_options(Options) ->
+ {loop, HttpLoop} = proplists:lookup(loop, Options),
+ Loop = fun (S) ->
+ ?MODULE:loop(S, HttpLoop)
+ end,
+ Options1 = [{loop, Loop} | proplists:delete(loop, Options)],
+ mochilists:set_defaults(?DEFAULTS, Options1).
+
+stop() ->
+ mochiweb_socket_server:stop(?MODULE).
+
+stop(Name) ->
+ mochiweb_socket_server:stop(Name).
+
+start() ->
+ start([{ip, "127.0.0.1"},
+ {loop, {?MODULE, default_body}}]).
+
+start(Options) ->
+ mochiweb_socket_server:start(parse_options(Options)).
+
+frm(Body) ->
+ ["<html><head></head><body>"
+ "<form method=\"POST\">"
+ "<input type=\"hidden\" value=\"message\" name=\"hidden\"/>"
+ "<input type=\"submit\" value=\"regular POST\">"
+ "</form>"
+ "<br />"
+ "<form method=\"POST\" enctype=\"multipart/form-data\""
+ " action=\"/multipart\">"
+ "<input type=\"hidden\" value=\"multipart message\" name=\"hidden\"/>"
+ "<input type=\"file\" name=\"file\"/>"
+ "<input type=\"submit\" value=\"multipart POST\" />"
+ "</form>"
+ "<pre>", Body, "</pre>"
+ "</body></html>"].
+
+default_body(Req, M, "/chunked") when M =:= 'GET'; M =:= 'HEAD' ->
+ Res = Req:ok({"text/plain", [], chunked}),
+ Res:write_chunk("First chunk\r\n"),
+ timer:sleep(5000),
+ Res:write_chunk("Last chunk\r\n"),
+ Res:write_chunk("");
+default_body(Req, M, _Path) when M =:= 'GET'; M =:= 'HEAD' ->
+ Body = io_lib:format("~p~n", [[{parse_qs, Req:parse_qs()},
+ {parse_cookie, Req:parse_cookie()},
+ Req:dump()]]),
+ Req:ok({"text/html",
+ [mochiweb_cookies:cookie("mochiweb_http", "test_cookie")],
+ frm(Body)});
+default_body(Req, 'POST', "/multipart") ->
+ Body = io_lib:format("~p~n", [[{parse_qs, Req:parse_qs()},
+ {parse_cookie, Req:parse_cookie()},
+ {body, Req:recv_body()},
+ Req:dump()]]),
+ Req:ok({"text/html", [], frm(Body)});
+default_body(Req, 'POST', _Path) ->
+ Body = io_lib:format("~p~n", [[{parse_qs, Req:parse_qs()},
+ {parse_cookie, Req:parse_cookie()},
+ {parse_post, Req:parse_post()},
+ Req:dump()]]),
+ Req:ok({"text/html", [], frm(Body)});
+default_body(Req, _Method, _Path) ->
+ Req:respond({501, [], []}).
+
+default_body(Req) ->
+ default_body(Req, Req:get(method), Req:get(path)).
+
+loop(Socket, Body) ->
+ mochiweb_socket:setopts(Socket, [{packet, http}]),
+ request(Socket, Body).
+
+request(Socket, Body) ->
+ case mochiweb_socket:recv(Socket, 0, ?REQUEST_RECV_TIMEOUT) of
+ {ok, {http_request, Method, Path, Version}} ->
+ mochiweb_socket:setopts(Socket, [{packet, httph}]),
+ headers(Socket, {Method, Path, Version}, [], Body, 0);
+ {error, {http_error, "\r\n"}} ->
+ request(Socket, Body);
+ {error, {http_error, "\n"}} ->
+ request(Socket, Body);
+ {error, closed} ->
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ {error, timeout} ->
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ _Other ->
+ handle_invalid_request(Socket)
+ end.
+
+reentry(Body) ->
+ fun (Req) ->
+ ?MODULE:after_response(Body, Req)
+ end.
+
+headers(Socket, Request, Headers, _Body, ?MAX_HEADERS) ->
+ %% Too many headers sent, bad request.
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ handle_invalid_request(Socket, Request, Headers);
+headers(Socket, Request, Headers, Body, HeaderCount) ->
+ case mochiweb_socket:recv(Socket, 0, ?HEADERS_RECV_TIMEOUT) of
+ {ok, http_eoh} ->
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ Req = mochiweb:new_request({Socket, Request,
+ lists:reverse(Headers)}),
+ call_body(Body, Req),
+ ?MODULE:after_response(Body, Req);
+ {ok, {http_header, _, Name, _, Value}} ->
+ headers(Socket, Request, [{Name, Value} | Headers], Body,
+ 1 + HeaderCount);
+ {error, closed} ->
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ _Other ->
+ handle_invalid_request(Socket, Request, Headers)
+ end.
+
+call_body({M, F}, Req) ->
+ M:F(Req);
+call_body(Body, Req) ->
+ Body(Req).
+
+handle_invalid_request(Socket) ->
+ handle_invalid_request(Socket, {'GET', {abs_path, "/"}, {0,9}}, []).
+
+handle_invalid_request(Socket, Request, RevHeaders) ->
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ Req = mochiweb:new_request({Socket, Request,
+ lists:reverse(RevHeaders)}),
+ Req:respond({400, [], []}),
+ mochiweb_socket:close(Socket),
+ exit(normal).
+
+after_response(Body, Req) ->
+ Socket = Req:get(socket),
+ case Req:should_close() of
+ true ->
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ false ->
+ Req:cleanup(),
+ ?MODULE:loop(Socket, Body)
+ end.
+
+parse_range_request(RawRange) when is_list(RawRange) ->
+ try
+ "bytes=" ++ RangeString = RawRange,
+ Ranges = string:tokens(RangeString, ","),
+ lists:map(fun ("-" ++ V) ->
+ {none, list_to_integer(V)};
+ (R) ->
+ case string:tokens(R, "-") of
+ [S1, S2] ->
+ {list_to_integer(S1), list_to_integer(S2)};
+ [S] ->
+ {list_to_integer(S), none}
+ end
+ end,
+ Ranges)
+ catch
+ _:_ ->
+ fail
+ end.
+
+range_skip_length(Spec, Size) ->
+ case Spec of
+ {none, R} when R =< Size, R >= 0 ->
+ {Size - R, R};
+ {none, _OutOfRange} ->
+ {0, Size};
+ {R, none} when R >= 0, R < Size ->
+ {R, Size - R};
+ {_OutOfRange, none} ->
+ invalid_range;
+ {Start, End} when 0 =< Start, Start =< End, End < Size ->
+ {Start, End - Start + 1};
+ {_OutOfRange, _End} ->
+ invalid_range
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+range_test() ->
+ %% valid, single ranges
+ ?assertEqual([{20, 30}], parse_range_request("bytes=20-30")),
+ ?assertEqual([{20, none}], parse_range_request("bytes=20-")),
+ ?assertEqual([{none, 20}], parse_range_request("bytes=-20")),
+
+ %% trivial single range
+ ?assertEqual(undefined, parse_range_request("bytes=0-")),
+
+ %% invalid, single ranges
+ ?assertEqual(fail, parse_range_request("")),
+ ?assertEqual(fail, parse_range_request("garbage")),
+ ?assertEqual(fail, parse_range_request("bytes=-20-30")),
+
+ %% valid, multiple range
+ ?assertEqual(
+ [{20, 30}, {50, 100}, {110, 200}],
+ parse_range_request("bytes=20-30,50-100,110-200")),
+ ?assertEqual(
+ [{20, none}, {50, 100}, {none, 200}],
+ parse_range_request("bytes=20-,50-100,-200")),
+
+ %% no ranges
+ ?assertEqual([], parse_range_request("bytes=")),
+ ok.
+
+range_skip_length_test() ->
+ Body = <<"012345678901234567890123456789012345678901234567890123456789">>,
+ BodySize = byte_size(Body), %% 60
+ BodySize = 60,
+
+ %% these values assume BodySize =:= 60
+ ?assertEqual({1,9}, range_skip_length({1,9}, BodySize)), %% 1-9
+ ?assertEqual({10,10}, range_skip_length({10,19}, BodySize)), %% 10-19
+ ?assertEqual({40, 20}, range_skip_length({none, 20}, BodySize)), %% -20
+ ?assertEqual({30, 30}, range_skip_length({30, none}, BodySize)), %% 30-
+
+ %% valid edge cases for range_skip_length
+ ?assertEqual({BodySize, 0}, range_skip_length({none, 0}, BodySize)),
+ ?assertEqual({0, BodySize}, range_skip_length({none, BodySize}, BodySize)),
+ ?assertEqual({0, BodySize}, range_skip_length({0, none}, BodySize)),
+ BodySizeLess1 = BodySize - 1,
+ ?assertEqual({BodySizeLess1, 1},
+ range_skip_length({BodySize - 1, none}, BodySize)),
+
+ %% out of range, return whole thing
+ ?assertEqual({0, BodySize},
+ range_skip_length({none, BodySize + 1}, BodySize)),
+ ?assertEqual({0, BodySize},
+ range_skip_length({none, -1}, BodySize)),
+
+ %% invalid ranges
+ ?assertEqual(invalid_range,
+ range_skip_length({-1, 30}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({0, BodySize + 1}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({-1, BodySize + 1}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({BodySize, 40}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({-1, none}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({BodySize, none}, BodySize)),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_io.erl b/1.1.x/src/mochiweb/mochiweb_io.erl
new file mode 100644
index 00000000..6ce57ec8
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_io.erl
@@ -0,0 +1,46 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Utilities for dealing with IO devices (open files).
+
+-module(mochiweb_io).
+-author('bob@mochimedia.com').
+
+-export([iodevice_stream/3, iodevice_stream/2]).
+-export([iodevice_foldl/4, iodevice_foldl/3]).
+-export([iodevice_size/1]).
+-define(READ_SIZE, 8192).
+
+iodevice_foldl(F, Acc, IoDevice) ->
+ iodevice_foldl(F, Acc, IoDevice, ?READ_SIZE).
+
+iodevice_foldl(F, Acc, IoDevice, BufferSize) ->
+ case file:read(IoDevice, BufferSize) of
+ eof ->
+ Acc;
+ {ok, Data} ->
+ iodevice_foldl(F, F(Data, Acc), IoDevice, BufferSize)
+ end.
+
+iodevice_stream(Callback, IoDevice) ->
+ iodevice_stream(Callback, IoDevice, ?READ_SIZE).
+
+iodevice_stream(Callback, IoDevice, BufferSize) ->
+ F = fun (Data, ok) -> Callback(Data) end,
+ ok = iodevice_foldl(F, ok, IoDevice, BufferSize).
+
+iodevice_size(IoDevice) ->
+ {ok, Size} = file:position(IoDevice, eof),
+ {ok, 0} = file:position(IoDevice, bof),
+ Size.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_mime.erl b/1.1.x/src/mochiweb/mochiweb_mime.erl
new file mode 100644
index 00000000..5344aee7
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_mime.erl
@@ -0,0 +1,94 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Gives a good MIME type guess based on file extension.
+
+-module(mochiweb_mime).
+-author('bob@mochimedia.com').
+-export([from_extension/1]).
+
+%% @spec from_extension(S::string()) -> string() | undefined
+%% @doc Given a filename extension (e.g. ".html") return a guess for the MIME
+%% type such as "text/html". Will return the atom undefined if no good
+%% guess is available.
+from_extension(".html") ->
+ "text/html";
+from_extension(".xhtml") ->
+ "application/xhtml+xml";
+from_extension(".xml") ->
+ "application/xml";
+from_extension(".css") ->
+ "text/css";
+from_extension(".js") ->
+ "application/x-javascript";
+from_extension(".jpg") ->
+ "image/jpeg";
+from_extension(".gif") ->
+ "image/gif";
+from_extension(".png") ->
+ "image/png";
+from_extension(".swf") ->
+ "application/x-shockwave-flash";
+from_extension(".zip") ->
+ "application/zip";
+from_extension(".bz2") ->
+ "application/x-bzip2";
+from_extension(".gz") ->
+ "application/x-gzip";
+from_extension(".tar") ->
+ "application/x-tar";
+from_extension(".tgz") ->
+ "application/x-gzip";
+from_extension(".txt") ->
+ "text/plain";
+from_extension(".doc") ->
+ "application/msword";
+from_extension(".pdf") ->
+ "application/pdf";
+from_extension(".xls") ->
+ "application/vnd.ms-excel";
+from_extension(".rtf") ->
+ "application/rtf";
+from_extension(".mov") ->
+ "video/quicktime";
+from_extension(".mp3") ->
+ "audio/mpeg";
+from_extension(".z") ->
+ "application/x-compress";
+from_extension(".wav") ->
+ "audio/x-wav";
+from_extension(".ico") ->
+ "image/x-icon";
+from_extension(".bmp") ->
+ "image/bmp";
+from_extension(".m4a") ->
+ "audio/mpeg";
+from_extension(".m3u") ->
+ "audio/x-mpegurl";
+from_extension(".exe") ->
+ "application/octet-stream";
+from_extension(".csv") ->
+ "text/csv";
+from_extension(_) ->
+ undefined.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+exhaustive_from_extension_test() ->
+ T = mochiweb_cover:clause_lookup_table(?MODULE, from_extension),
+ [?assertEqual(V, from_extension(K)) || {K, V} <- T].
+
+from_extension_test() ->
+ ?assertEqual("text/html",
+ from_extension(".html")),
+ ?assertEqual(undefined,
+ from_extension("")),
+ ?assertEqual(undefined,
+ from_extension(".wtf")),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_multipart.erl b/1.1.x/src/mochiweb/mochiweb_multipart.erl
new file mode 100644
index 00000000..3069cf4d
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_multipart.erl
@@ -0,0 +1,824 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Utilities for parsing multipart/form-data.
+
+-module(mochiweb_multipart).
+-author('bob@mochimedia.com').
+
+-export([parse_form/1, parse_form/2]).
+-export([parse_multipart_request/2]).
+-export([parts_to_body/3, parts_to_multipart_body/4]).
+-export([default_file_handler/2]).
+
+-define(CHUNKSIZE, 4096).
+
+-record(mp, {state, boundary, length, buffer, callback, req}).
+
+%% TODO: DOCUMENT THIS MODULE.
+%% @type key() = atom() | string() | binary().
+%% @type value() = atom() | iolist() | integer().
+%% @type header() = {key(), value()}.
+%% @type bodypart() = {Start::integer(), End::integer(), Body::iolist()}.
+%% @type formfile() = {Name::string(), ContentType::string(), Content::binary()}.
+%% @type request().
+%% @type file_handler() = (Filename::string(), ContentType::string()) -> file_handler_callback().
+%% @type file_handler_callback() = (binary() | eof) -> file_handler_callback() | term().
+
+%% @spec parts_to_body([bodypart()], ContentType::string(),
+%% Size::integer()) -> {[header()], iolist()}
+%% @doc Return {[header()], iolist()} representing the body for the given
+%% parts, may be a single part or multipart.
+parts_to_body([{Start, End, Body}], ContentType, Size) ->
+ HeaderList = [{"Content-Type", ContentType},
+ {"Content-Range",
+ ["bytes ",
+ mochiweb_util:make_io(Start), "-", mochiweb_util:make_io(End),
+ "/", mochiweb_util:make_io(Size)]}],
+ {HeaderList, Body};
+parts_to_body(BodyList, ContentType, Size) when is_list(BodyList) ->
+ parts_to_multipart_body(BodyList, ContentType, Size,
+ mochihex:to_hex(crypto:rand_bytes(8))).
+
+%% @spec parts_to_multipart_body([bodypart()], ContentType::string(),
+%% Size::integer(), Boundary::string()) ->
+%% {[header()], iolist()}
+%% @doc Return {[header()], iolist()} representing the body for the given
+%% parts, always a multipart response.
+parts_to_multipart_body(BodyList, ContentType, Size, Boundary) ->
+ HeaderList = [{"Content-Type",
+ ["multipart/byteranges; ",
+ "boundary=", Boundary]}],
+ MultiPartBody = multipart_body(BodyList, ContentType, Boundary, Size),
+
+ {HeaderList, MultiPartBody}.
+
+%% @spec multipart_body([bodypart()], ContentType::string(),
+%% Boundary::string(), Size::integer()) -> iolist()
+%% @doc Return the representation of a multipart body for the given [bodypart()].
+multipart_body([], _ContentType, Boundary, _Size) ->
+ ["--", Boundary, "--\r\n"];
+multipart_body([{Start, End, Body} | BodyList], ContentType, Boundary, Size) ->
+ ["--", Boundary, "\r\n",
+ "Content-Type: ", ContentType, "\r\n",
+ "Content-Range: ",
+ "bytes ", mochiweb_util:make_io(Start), "-", mochiweb_util:make_io(End),
+ "/", mochiweb_util:make_io(Size), "\r\n\r\n",
+ Body, "\r\n"
+ | multipart_body(BodyList, ContentType, Boundary, Size)].
+
+%% @spec parse_form(request()) -> [{string(), string() | formfile()}]
+%% @doc Parse a multipart form from the given request using the in-memory
+%% default_file_handler/2.
+parse_form(Req) ->
+ parse_form(Req, fun default_file_handler/2).
+
+%% @spec parse_form(request(), F::file_handler()) -> [{string(), string() | term()}]
+%% @doc Parse a multipart form from the given request using the given file_handler().
+parse_form(Req, FileHandler) ->
+ Callback = fun (Next) -> parse_form_outer(Next, FileHandler, []) end,
+ {_, _, Res} = parse_multipart_request(Req, Callback),
+ Res.
+
+parse_form_outer(eof, _, Acc) ->
+ lists:reverse(Acc);
+parse_form_outer({headers, H}, FileHandler, State) ->
+ {"form-data", H1} = proplists:get_value("content-disposition", H),
+ Name = proplists:get_value("name", H1),
+ Filename = proplists:get_value("filename", H1),
+ case Filename of
+ undefined ->
+ fun (Next) ->
+ parse_form_value(Next, {Name, []}, FileHandler, State)
+ end;
+ _ ->
+ ContentType = proplists:get_value("content-type", H),
+ Handler = FileHandler(Filename, ContentType),
+ fun (Next) ->
+ parse_form_file(Next, {Name, Handler}, FileHandler, State)
+ end
+ end.
+
+parse_form_value(body_end, {Name, Acc}, FileHandler, State) ->
+ Value = binary_to_list(iolist_to_binary(lists:reverse(Acc))),
+ State1 = [{Name, Value} | State],
+ fun (Next) -> parse_form_outer(Next, FileHandler, State1) end;
+parse_form_value({body, Data}, {Name, Acc}, FileHandler, State) ->
+ Acc1 = [Data | Acc],
+ fun (Next) -> parse_form_value(Next, {Name, Acc1}, FileHandler, State) end.
+
+parse_form_file(body_end, {Name, Handler}, FileHandler, State) ->
+ Value = Handler(eof),
+ State1 = [{Name, Value} | State],
+ fun (Next) -> parse_form_outer(Next, FileHandler, State1) end;
+parse_form_file({body, Data}, {Name, Handler}, FileHandler, State) ->
+ H1 = Handler(Data),
+ fun (Next) -> parse_form_file(Next, {Name, H1}, FileHandler, State) end.
+
+default_file_handler(Filename, ContentType) ->
+ default_file_handler_1(Filename, ContentType, []).
+
+default_file_handler_1(Filename, ContentType, Acc) ->
+ fun(eof) ->
+ Value = iolist_to_binary(lists:reverse(Acc)),
+ {Filename, ContentType, Value};
+ (Next) ->
+ default_file_handler_1(Filename, ContentType, [Next | Acc])
+ end.
+
+parse_multipart_request(Req, Callback) ->
+ %% TODO: Support chunked?
+ Length = list_to_integer(Req:get_header_value("content-length")),
+ Boundary = iolist_to_binary(
+ get_boundary(Req:get_header_value("content-type"))),
+ Prefix = <<"\r\n--", Boundary/binary>>,
+ BS = byte_size(Boundary),
+ Chunk = read_chunk(Req, Length),
+ Length1 = Length - byte_size(Chunk),
+ <<"--", Boundary:BS/binary, "\r\n", Rest/binary>> = Chunk,
+ feed_mp(headers, flash_multipart_hack(#mp{boundary=Prefix,
+ length=Length1,
+ buffer=Rest,
+ callback=Callback,
+ req=Req})).
+
+parse_headers(<<>>) ->
+ [];
+parse_headers(Binary) ->
+ parse_headers(Binary, []).
+
+parse_headers(Binary, Acc) ->
+ case find_in_binary(<<"\r\n">>, Binary) of
+ {exact, N} ->
+ <<Line:N/binary, "\r\n", Rest/binary>> = Binary,
+ parse_headers(Rest, [split_header(Line) | Acc]);
+ not_found ->
+ lists:reverse([split_header(Binary) | Acc])
+ end.
+
+split_header(Line) ->
+ {Name, [$: | Value]} = lists:splitwith(fun (C) -> C =/= $: end,
+ binary_to_list(Line)),
+ {string:to_lower(string:strip(Name)),
+ mochiweb_util:parse_header(Value)}.
+
+read_chunk(Req, Length) when Length > 0 ->
+ case Length of
+ Length when Length < ?CHUNKSIZE ->
+ Req:recv(Length);
+ _ ->
+ Req:recv(?CHUNKSIZE)
+ end.
+
+read_more(State=#mp{length=Length, buffer=Buffer, req=Req}) ->
+ Data = read_chunk(Req, Length),
+ Buffer1 = <<Buffer/binary, Data/binary>>,
+ flash_multipart_hack(State#mp{length=Length - byte_size(Data),
+ buffer=Buffer1}).
+
+flash_multipart_hack(State=#mp{length=0, buffer=Buffer, boundary=Prefix}) ->
+ %% http://code.google.com/p/mochiweb/issues/detail?id=22
+ %% Flash doesn't terminate multipart with \r\n properly so we fix it up here
+ PrefixSize = size(Prefix),
+ case size(Buffer) - (2 + PrefixSize) of
+ Seek when Seek >= 0 ->
+ case Buffer of
+ <<_:Seek/binary, Prefix:PrefixSize/binary, "--">> ->
+ Buffer1 = <<Buffer/binary, "\r\n">>,
+ State#mp{buffer=Buffer1};
+ _ ->
+ State
+ end;
+ _ ->
+ State
+ end;
+flash_multipart_hack(State) ->
+ State.
+
+feed_mp(headers, State=#mp{buffer=Buffer, callback=Callback}) ->
+ {State1, P} = case find_in_binary(<<"\r\n\r\n">>, Buffer) of
+ {exact, N} ->
+ {State, N};
+ _ ->
+ S1 = read_more(State),
+ %% Assume headers must be less than ?CHUNKSIZE
+ {exact, N} = find_in_binary(<<"\r\n\r\n">>,
+ S1#mp.buffer),
+ {S1, N}
+ end,
+ <<Headers:P/binary, "\r\n\r\n", Rest/binary>> = State1#mp.buffer,
+ NextCallback = Callback({headers, parse_headers(Headers)}),
+ feed_mp(body, State1#mp{buffer=Rest,
+ callback=NextCallback});
+feed_mp(body, State=#mp{boundary=Prefix, buffer=Buffer, callback=Callback}) ->
+ Boundary = find_boundary(Prefix, Buffer),
+ case Boundary of
+ {end_boundary, Start, Skip} ->
+ <<Data:Start/binary, _:Skip/binary, Rest/binary>> = Buffer,
+ C1 = Callback({body, Data}),
+ C2 = C1(body_end),
+ {State#mp.length, Rest, C2(eof)};
+ {next_boundary, Start, Skip} ->
+ <<Data:Start/binary, _:Skip/binary, Rest/binary>> = Buffer,
+ C1 = Callback({body, Data}),
+ feed_mp(headers, State#mp{callback=C1(body_end),
+ buffer=Rest});
+ {maybe, Start} ->
+ <<Data:Start/binary, Rest/binary>> = Buffer,
+ feed_mp(body, read_more(State#mp{callback=Callback({body, Data}),
+ buffer=Rest}));
+ not_found ->
+ {Data, Rest} = {Buffer, <<>>},
+ feed_mp(body, read_more(State#mp{callback=Callback({body, Data}),
+ buffer=Rest}))
+ end.
+
+get_boundary(ContentType) ->
+ {"multipart/form-data", Opts} = mochiweb_util:parse_header(ContentType),
+ case proplists:get_value("boundary", Opts) of
+ S when is_list(S) ->
+ S
+ end.
+
+find_in_binary(B, Data) when size(B) > 0 ->
+ case size(Data) - size(B) of
+ Last when Last < 0 ->
+ partial_find(B, Data, 0, size(Data));
+ Last ->
+ find_in_binary(B, size(B), Data, 0, Last)
+ end.
+
+find_in_binary(B, BS, D, N, Last) when N =< Last->
+ case D of
+ <<_:N/binary, B:BS/binary, _/binary>> ->
+ {exact, N};
+ _ ->
+ find_in_binary(B, BS, D, 1 + N, Last)
+ end;
+find_in_binary(B, BS, D, N, Last) when N =:= 1 + Last ->
+ partial_find(B, D, N, BS - 1).
+
+partial_find(_B, _D, _N, 0) ->
+ not_found;
+partial_find(B, D, N, K) ->
+ <<B1:K/binary, _/binary>> = B,
+ case D of
+ <<_Skip:N/binary, B1:K/binary>> ->
+ {partial, N, K};
+ _ ->
+ partial_find(B, D, 1 + N, K - 1)
+ end.
+
+find_boundary(Prefix, Data) ->
+ case find_in_binary(Prefix, Data) of
+ {exact, Skip} ->
+ PrefixSkip = Skip + size(Prefix),
+ case Data of
+ <<_:PrefixSkip/binary, "\r\n", _/binary>> ->
+ {next_boundary, Skip, size(Prefix) + 2};
+ <<_:PrefixSkip/binary, "--\r\n", _/binary>> ->
+ {end_boundary, Skip, size(Prefix) + 4};
+ _ when size(Data) < PrefixSkip + 4 ->
+ %% Underflow
+ {maybe, Skip};
+ _ ->
+ %% False positive
+ not_found
+ end;
+ {partial, Skip, Length} when (Skip + Length) =:= size(Data) ->
+ %% Underflow
+ {maybe, Skip};
+ _ ->
+ not_found
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+ssl_cert_opts() ->
+ EbinDir = filename:dirname(code:which(?MODULE)),
+ CertDir = filename:join([EbinDir, "..", "support", "test-materials"]),
+ CertFile = filename:join(CertDir, "test_ssl_cert.pem"),
+ KeyFile = filename:join(CertDir, "test_ssl_key.pem"),
+ [{certfile, CertFile}, {keyfile, KeyFile}].
+
+with_socket_server(Transport, ServerFun, ClientFun) ->
+ ServerOpts0 = [{ip, "127.0.0.1"}, {port, 0}, {loop, ServerFun}],
+ ServerOpts = case Transport of
+ plain ->
+ ServerOpts0;
+ ssl ->
+ ServerOpts0 ++ [{ssl, true}, {ssl_opts, ssl_cert_opts()}]
+ end,
+ {ok, Server} = mochiweb_socket_server:start(ServerOpts),
+ Port = mochiweb_socket_server:get(Server, port),
+ ClientOpts = [binary, {active, false}],
+ {ok, Client} = case Transport of
+ plain ->
+ gen_tcp:connect("127.0.0.1", Port, ClientOpts);
+ ssl ->
+ ClientOpts1 = [{ssl_imp, new} | ClientOpts],
+ {ok, SslSocket} = ssl:connect("127.0.0.1", Port, ClientOpts1),
+ {ok, {ssl, SslSocket}}
+ end,
+ Res = (catch ClientFun(Client)),
+ mochiweb_socket_server:stop(Server),
+ Res.
+
+fake_request(Socket, ContentType, Length) ->
+ mochiweb_request:new(Socket,
+ 'POST',
+ "/multipart",
+ {1,1},
+ mochiweb_headers:make(
+ [{"content-type", ContentType},
+ {"content-length", Length}])).
+
+test_callback({body, <<>>}, Rest=[body_end | _]) ->
+ %% When expecting the body_end we might get an empty binary
+ fun (Next) -> test_callback(Next, Rest) end;
+test_callback({body, Got}, [{body, Expect} | Rest]) when Got =/= Expect ->
+ %% Partial response
+ GotSize = size(Got),
+ <<Got:GotSize/binary, Expect1/binary>> = Expect,
+ fun (Next) -> test_callback(Next, [{body, Expect1} | Rest]) end;
+test_callback(Got, [Expect | Rest]) ->
+ ?assertEqual(Got, Expect),
+ case Rest of
+ [] ->
+ ok;
+ _ ->
+ fun (Next) -> test_callback(Next, Rest) end
+ end.
+
+parse3_http_test() ->
+ parse3(plain).
+
+parse3_https_test() ->
+ parse3(ssl).
+
+parse3(Transport) ->
+ ContentType = "multipart/form-data; boundary=---------------------------7386909285754635891697677882",
+ BinContent = <<"-----------------------------7386909285754635891697677882\r\nContent-Disposition: form-data; name=\"hidden\"\r\n\r\nmultipart message\r\n-----------------------------7386909285754635891697677882\r\nContent-Disposition: form-data; name=\"file\"; filename=\"test_file.txt\"\r\nContent-Type: text/plain\r\n\r\nWoo multiline text file\n\nLa la la\r\n-----------------------------7386909285754635891697677882--\r\n">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "hidden"}]}}]},
+ {body, <<"multipart message">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", "test_file.txt"}]}},
+ {"content-type", {"text/plain", []}}]},
+ {body, <<"Woo multiline text file\n\nLa la la">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse2_http_test() ->
+ parse2(plain).
+
+parse2_https_test() ->
+ parse2(ssl).
+
+parse2(Transport) ->
+ ContentType = "multipart/form-data; boundary=---------------------------6072231407570234361599764024",
+ BinContent = <<"-----------------------------6072231407570234361599764024\r\nContent-Disposition: form-data; name=\"hidden\"\r\n\r\nmultipart message\r\n-----------------------------6072231407570234361599764024\r\nContent-Disposition: form-data; name=\"file\"; filename=\"\"\r\nContent-Type: application/octet-stream\r\n\r\n\r\n-----------------------------6072231407570234361599764024--\r\n">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "hidden"}]}}]},
+ {body, <<"multipart message">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", ""}]}},
+ {"content-type", {"application/octet-stream", []}}]},
+ {body, <<>>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_form_http_test() ->
+ do_parse_form(plain).
+
+parse_form_https_test() ->
+ do_parse_form(ssl).
+
+do_parse_form(Transport) ->
+ ContentType = "multipart/form-data; boundary=AaB03x",
+ "AaB03x" = get_boundary(ContentType),
+ Content = mochiweb_util:join(
+ ["--AaB03x",
+ "Content-Disposition: form-data; name=\"submit-name\"",
+ "",
+ "Larry",
+ "--AaB03x",
+ "Content-Disposition: form-data; name=\"files\";"
+ ++ "filename=\"file1.txt\"",
+ "Content-Type: text/plain",
+ "",
+ "... contents of file1.txt ...",
+ "--AaB03x--",
+ ""], "\r\n"),
+ BinContent = iolist_to_binary(Content),
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_form(Req),
+ [{"submit-name", "Larry"},
+ {"files", {"file1.txt", {"text/plain",[]},
+ <<"... contents of file1.txt ...">>}
+ }] = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_http_test() ->
+ do_parse(plain).
+
+parse_https_test() ->
+ do_parse(ssl).
+
+do_parse(Transport) ->
+ ContentType = "multipart/form-data; boundary=AaB03x",
+ "AaB03x" = get_boundary(ContentType),
+ Content = mochiweb_util:join(
+ ["--AaB03x",
+ "Content-Disposition: form-data; name=\"submit-name\"",
+ "",
+ "Larry",
+ "--AaB03x",
+ "Content-Disposition: form-data; name=\"files\";"
+ ++ "filename=\"file1.txt\"",
+ "Content-Type: text/plain",
+ "",
+ "... contents of file1.txt ...",
+ "--AaB03x--",
+ ""], "\r\n"),
+ BinContent = iolist_to_binary(Content),
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "submit-name"}]}}]},
+ {body, <<"Larry">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "files"}, {"filename", "file1.txt"}]}},
+ {"content-type", {"text/plain", []}}]},
+ {body, <<"... contents of file1.txt ...">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_partial_body_boundary_http_test() ->
+ parse_partial_body_boundary(plain).
+
+parse_partial_body_boundary_https_test() ->
+ parse_partial_body_boundary(ssl).
+
+parse_partial_body_boundary(Transport) ->
+ Boundary = string:copies("$", 2048),
+ ContentType = "multipart/form-data; boundary=" ++ Boundary,
+ ?assertEqual(Boundary, get_boundary(ContentType)),
+ Content = mochiweb_util:join(
+ ["--" ++ Boundary,
+ "Content-Disposition: form-data; name=\"submit-name\"",
+ "",
+ "Larry",
+ "--" ++ Boundary,
+ "Content-Disposition: form-data; name=\"files\";"
+ ++ "filename=\"file1.txt\"",
+ "Content-Type: text/plain",
+ "",
+ "... contents of file1.txt ...",
+ "--" ++ Boundary ++ "--",
+ ""], "\r\n"),
+ BinContent = iolist_to_binary(Content),
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "submit-name"}]}}]},
+ {body, <<"Larry">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "files"}, {"filename", "file1.txt"}]}},
+ {"content-type", {"text/plain", []}}
+ ]},
+ {body, <<"... contents of file1.txt ...">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_large_header_http_test() ->
+ parse_large_header(plain).
+
+parse_large_header_https_test() ->
+ parse_large_header(ssl).
+
+parse_large_header(Transport) ->
+ ContentType = "multipart/form-data; boundary=AaB03x",
+ "AaB03x" = get_boundary(ContentType),
+ Content = mochiweb_util:join(
+ ["--AaB03x",
+ "Content-Disposition: form-data; name=\"submit-name\"",
+ "",
+ "Larry",
+ "--AaB03x",
+ "Content-Disposition: form-data; name=\"files\";"
+ ++ "filename=\"file1.txt\"",
+ "Content-Type: text/plain",
+ "x-large-header: " ++ string:copies("%", 4096),
+ "",
+ "... contents of file1.txt ...",
+ "--AaB03x--",
+ ""], "\r\n"),
+ BinContent = iolist_to_binary(Content),
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "submit-name"}]}}]},
+ {body, <<"Larry">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "files"}, {"filename", "file1.txt"}]}},
+ {"content-type", {"text/plain", []}},
+ {"x-large-header", {string:copies("%", 4096), []}}
+ ]},
+ {body, <<"... contents of file1.txt ...">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+find_boundary_test() ->
+ B = <<"\r\n--X">>,
+ {next_boundary, 0, 7} = find_boundary(B, <<"\r\n--X\r\nRest">>),
+ {next_boundary, 1, 7} = find_boundary(B, <<"!\r\n--X\r\nRest">>),
+ {end_boundary, 0, 9} = find_boundary(B, <<"\r\n--X--\r\nRest">>),
+ {end_boundary, 1, 9} = find_boundary(B, <<"!\r\n--X--\r\nRest">>),
+ not_found = find_boundary(B, <<"--X\r\nRest">>),
+ {maybe, 0} = find_boundary(B, <<"\r\n--X\r">>),
+ {maybe, 1} = find_boundary(B, <<"!\r\n--X\r">>),
+ P = <<"\r\n-----------------------------16037454351082272548568224146">>,
+ B0 = <<55,212,131,77,206,23,216,198,35,87,252,118,252,8,25,211,132,229,
+ 182,42,29,188,62,175,247,243,4,4,0,59, 13,10,45,45,45,45,45,45,45,
+ 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,
+ 49,54,48,51,55,52,53,52,51,53,49>>,
+ {maybe, 30} = find_boundary(P, B0),
+ not_found = find_boundary(B, <<"\r\n--XJOPKE">>),
+ ok.
+
+find_in_binary_test() ->
+ {exact, 0} = find_in_binary(<<"foo">>, <<"foobarbaz">>),
+ {exact, 1} = find_in_binary(<<"oo">>, <<"foobarbaz">>),
+ {exact, 8} = find_in_binary(<<"z">>, <<"foobarbaz">>),
+ not_found = find_in_binary(<<"q">>, <<"foobarbaz">>),
+ {partial, 7, 2} = find_in_binary(<<"azul">>, <<"foobarbaz">>),
+ {exact, 0} = find_in_binary(<<"foobarbaz">>, <<"foobarbaz">>),
+ {partial, 0, 3} = find_in_binary(<<"foobar">>, <<"foo">>),
+ {partial, 1, 3} = find_in_binary(<<"foobar">>, <<"afoo">>),
+ ok.
+
+flash_parse_http_test() ->
+ flash_parse(plain).
+
+flash_parse_https_test() ->
+ flash_parse(ssl).
+
+flash_parse(Transport) ->
+ ContentType = "multipart/form-data; boundary=----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5",
+ "----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5" = get_boundary(ContentType),
+ BinContent = <<"------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Filename\"\r\n\r\nhello.txt\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"success_action_status\"\r\n\r\n201\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"file\"; filename=\"hello.txt\"\r\nContent-Type: application/octet-stream\r\n\r\nhello\n\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Upload\"\r\n\r\nSubmit Query\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5--">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Filename"}]}}]},
+ {body, <<"hello.txt">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "success_action_status"}]}}]},
+ {body, <<"201">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", "hello.txt"}]}},
+ {"content-type", {"application/octet-stream", []}}]},
+ {body, <<"hello\n">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Upload"}]}}]},
+ {body, <<"Submit Query">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+flash_parse2_http_test() ->
+ flash_parse2(plain).
+
+flash_parse2_https_test() ->
+ flash_parse2(ssl).
+
+flash_parse2(Transport) ->
+ ContentType = "multipart/form-data; boundary=----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5",
+ "----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5" = get_boundary(ContentType),
+ Chunk = iolist_to_binary(string:copies("%", 4096)),
+ BinContent = <<"------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Filename\"\r\n\r\nhello.txt\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"success_action_status\"\r\n\r\n201\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"file\"; filename=\"hello.txt\"\r\nContent-Type: application/octet-stream\r\n\r\n", Chunk/binary, "\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Upload\"\r\n\r\nSubmit Query\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5--">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Filename"}]}}]},
+ {body, <<"hello.txt">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "success_action_status"}]}}]},
+ {body, <<"201">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", "hello.txt"}]}},
+ {"content-type", {"application/octet-stream", []}}]},
+ {body, Chunk},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Upload"}]}}]},
+ {body, <<"Submit Query">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_headers_test() ->
+ ?assertEqual([], parse_headers(<<>>)).
+
+flash_multipart_hack_test() ->
+ Buffer = <<"prefix-">>,
+ Prefix = <<"prefix">>,
+ State = #mp{length=0, buffer=Buffer, boundary=Prefix},
+ ?assertEqual(State,
+ flash_multipart_hack(State)).
+
+parts_to_body_single_test() ->
+ {HL, B} = parts_to_body([{0, 5, <<"01234">>}],
+ "text/plain",
+ 10),
+ [{"Content-Range", Range},
+ {"Content-Type", Type}] = lists:sort(HL),
+ ?assertEqual(
+ <<"bytes 0-5/10">>,
+ iolist_to_binary(Range)),
+ ?assertEqual(
+ <<"text/plain">>,
+ iolist_to_binary(Type)),
+ ?assertEqual(
+ <<"01234">>,
+ iolist_to_binary(B)),
+ ok.
+
+parts_to_body_multi_test() ->
+ {[{"Content-Type", Type}],
+ _B} = parts_to_body([{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
+ "text/plain",
+ 10),
+ ?assertMatch(
+ <<"multipart/byteranges; boundary=", _/binary>>,
+ iolist_to_binary(Type)),
+ ok.
+
+parts_to_multipart_body_test() ->
+ {[{"Content-Type", V}], B} = parts_to_multipart_body(
+ [{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
+ "text/plain",
+ 10,
+ "BOUNDARY"),
+ MB = multipart_body(
+ [{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
+ "text/plain",
+ "BOUNDARY",
+ 10),
+ ?assertEqual(
+ <<"multipart/byteranges; boundary=BOUNDARY">>,
+ iolist_to_binary(V)),
+ ?assertEqual(
+ iolist_to_binary(MB),
+ iolist_to_binary(B)),
+ ok.
+
+multipart_body_test() ->
+ ?assertEqual(
+ <<"--BOUNDARY--\r\n">>,
+ iolist_to_binary(multipart_body([], "text/plain", "BOUNDARY", 0))),
+ ?assertEqual(
+ <<"--BOUNDARY\r\n"
+ "Content-Type: text/plain\r\n"
+ "Content-Range: bytes 0-5/10\r\n\r\n"
+ "01234\r\n"
+ "--BOUNDARY\r\n"
+ "Content-Type: text/plain\r\n"
+ "Content-Range: bytes 5-10/10\r\n\r\n"
+ "56789\r\n"
+ "--BOUNDARY--\r\n">>,
+ iolist_to_binary(multipart_body([{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
+ "text/plain",
+ "BOUNDARY",
+ 10))),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_request.erl b/1.1.x/src/mochiweb/mochiweb_request.erl
new file mode 100644
index 00000000..ffe4e9eb
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_request.erl
@@ -0,0 +1,768 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc MochiWeb HTTP Request abstraction.
+
+-module(mochiweb_request, [Socket, Method, RawPath, Version, Headers]).
+-author('bob@mochimedia.com').
+
+-include_lib("kernel/include/file.hrl").
+-include("internal.hrl").
+
+-define(QUIP, "Any of you quaids got a smint?").
+
+-export([get_header_value/1, get_primary_header_value/1, get/1, dump/0]).
+-export([send/1, recv/1, recv/2, recv_body/0, recv_body/1, stream_body/3]).
+-export([start_response/1, start_response_length/1, start_raw_response/1]).
+-export([respond/1, ok/1]).
+-export([not_found/0, not_found/1]).
+-export([parse_post/0, parse_qs/0]).
+-export([should_close/0, cleanup/0]).
+-export([parse_cookie/0, get_cookie_value/1]).
+-export([serve_file/2, serve_file/3]).
+-export([accepted_encodings/1]).
+-export([accepts_content_type/1]).
+
+-define(SAVE_QS, mochiweb_request_qs).
+-define(SAVE_PATH, mochiweb_request_path).
+-define(SAVE_RECV, mochiweb_request_recv).
+-define(SAVE_BODY, mochiweb_request_body).
+-define(SAVE_BODY_LENGTH, mochiweb_request_body_length).
+-define(SAVE_POST, mochiweb_request_post).
+-define(SAVE_COOKIE, mochiweb_request_cookie).
+-define(SAVE_FORCE_CLOSE, mochiweb_request_force_close).
+
+%% @type iolist() = [iolist() | binary() | char()].
+%% @type iodata() = binary() | iolist().
+%% @type key() = atom() | string() | binary()
+%% @type value() = atom() | string() | binary() | integer()
+%% @type headers(). A mochiweb_headers structure.
+%% @type response(). A mochiweb_response parameterized module instance.
+%% @type ioheaders() = headers() | [{key(), value()}].
+
+% 10 second default idle timeout
+-define(IDLE_TIMEOUT, 10000).
+
+% Maximum recv_body() length of 1MB
+-define(MAX_RECV_BODY, (1024*1024)).
+
+%% @spec get_header_value(K) -> undefined | Value
+%% @doc Get the value of a given request header.
+get_header_value(K) ->
+ mochiweb_headers:get_value(K, Headers).
+
+get_primary_header_value(K) ->
+ mochiweb_headers:get_primary_value(K, Headers).
+
+%% @type field() = socket | scheme | method | raw_path | version | headers | peer | path | body_length | range
+
+%% @spec get(field()) -> term()
+%% @doc Return the internal representation of the given field. If
+%% <code>socket</code> is requested on a HTTPS connection, then
+%% an ssl socket will be returned as <code>{ssl, SslSocket}</code>.
+%% You can use <code>SslSocket</code> with the <code>ssl</code>
+%% application, eg: <code>ssl:peercert(SslSocket)</code>.
+get(socket) ->
+ Socket;
+get(scheme) ->
+ case mochiweb_socket:type(Socket) of
+ plain ->
+ http;
+ ssl ->
+ https
+ end;
+get(method) ->
+ Method;
+get(raw_path) ->
+ RawPath;
+get(version) ->
+ Version;
+get(headers) ->
+ Headers;
+get(peer) ->
+ case mochiweb_socket:peername(Socket) of
+ {ok, {Addr={10, _, _, _}, _Port}} ->
+ case get_header_value("x-forwarded-for") of
+ undefined ->
+ inet_parse:ntoa(Addr);
+ Hosts ->
+ string:strip(lists:last(string:tokens(Hosts, ",")))
+ end;
+ {ok, {{127, 0, 0, 1}, _Port}} ->
+ case get_header_value("x-forwarded-for") of
+ undefined ->
+ "127.0.0.1";
+ Hosts ->
+ string:strip(lists:last(string:tokens(Hosts, ",")))
+ end;
+ {ok, {Addr, _Port}} ->
+ inet_parse:ntoa(Addr);
+ {error, enotconn} ->
+ exit(normal)
+ end;
+get(path) ->
+ case erlang:get(?SAVE_PATH) of
+ undefined ->
+ {Path0, _, _} = mochiweb_util:urlsplit_path(RawPath),
+ Path = mochiweb_util:unquote(Path0),
+ put(?SAVE_PATH, Path),
+ Path;
+ Cached ->
+ Cached
+ end;
+get(body_length) ->
+ case erlang:get(?SAVE_BODY_LENGTH) of
+ undefined ->
+ BodyLength = body_length(),
+ put(?SAVE_BODY_LENGTH, {cached, BodyLength}),
+ BodyLength;
+ {cached, Cached} ->
+ Cached
+ end;
+get(range) ->
+ case get_header_value(range) of
+ undefined ->
+ undefined;
+ RawRange ->
+ mochiweb_http:parse_range_request(RawRange)
+ end.
+
+%% @spec dump() -> {mochiweb_request, [{atom(), term()}]}
+%% @doc Dump the internal representation to a "human readable" set of terms
+%% for debugging/inspection purposes.
+dump() ->
+ {?MODULE, [{method, Method},
+ {version, Version},
+ {raw_path, RawPath},
+ {headers, mochiweb_headers:to_list(Headers)}]}.
+
+%% @spec send(iodata()) -> ok
+%% @doc Send data over the socket.
+send(Data) ->
+ case mochiweb_socket:send(Socket, Data) of
+ ok ->
+ ok;
+ _ ->
+ exit(normal)
+ end.
+
+%% @spec recv(integer()) -> binary()
+%% @doc Receive Length bytes from the client as a binary, with the default
+%% idle timeout.
+recv(Length) ->
+ recv(Length, ?IDLE_TIMEOUT).
+
+%% @spec recv(integer(), integer()) -> binary()
+%% @doc Receive Length bytes from the client as a binary, with the given
+%% Timeout in msec.
+recv(Length, Timeout) ->
+ case mochiweb_socket:recv(Socket, Length, Timeout) of
+ {ok, Data} ->
+ put(?SAVE_RECV, true),
+ Data;
+ _ ->
+ exit(normal)
+ end.
+
+%% @spec body_length() -> undefined | chunked | unknown_transfer_encoding | integer()
+%% @doc Infer body length from transfer-encoding and content-length headers.
+body_length() ->
+ case get_header_value("transfer-encoding") of
+ undefined ->
+ case get_header_value("content-length") of
+ undefined ->
+ undefined;
+ Length ->
+ list_to_integer(Length)
+ end;
+ "chunked" ->
+ chunked;
+ Unknown ->
+ {unknown_transfer_encoding, Unknown}
+ end.
+
+
+%% @spec recv_body() -> binary()
+%% @doc Receive the body of the HTTP request (defined by Content-Length).
+%% Will only receive up to the default max-body length of 1MB.
+recv_body() ->
+ recv_body(?MAX_RECV_BODY).
+
+%% @spec recv_body(integer()) -> binary()
+%% @doc Receive the body of the HTTP request (defined by Content-Length).
+%% Will receive up to MaxBody bytes.
+recv_body(MaxBody) ->
+ case erlang:get(?SAVE_BODY) of
+ undefined ->
+ % we could use a sane constant for max chunk size
+ Body = stream_body(?MAX_RECV_BODY, fun
+ ({0, _ChunkedFooter}, {_LengthAcc, BinAcc}) ->
+ iolist_to_binary(lists:reverse(BinAcc));
+ ({Length, Bin}, {LengthAcc, BinAcc}) ->
+ NewLength = Length + LengthAcc,
+ if NewLength > MaxBody ->
+ exit({body_too_large, chunked});
+ true ->
+ {NewLength, [Bin | BinAcc]}
+ end
+ end, {0, []}, MaxBody),
+ put(?SAVE_BODY, Body),
+ Body;
+ Cached -> Cached
+ end.
+
+stream_body(MaxChunkSize, ChunkFun, FunState) ->
+ stream_body(MaxChunkSize, ChunkFun, FunState, undefined).
+
+stream_body(MaxChunkSize, ChunkFun, FunState, MaxBodyLength) ->
+ Expect = case get_header_value("expect") of
+ undefined ->
+ undefined;
+ Value when is_list(Value) ->
+ string:to_lower(Value)
+ end,
+ case Expect of
+ "100-continue" ->
+ start_raw_response({100, gb_trees:empty()});
+ _Else ->
+ ok
+ end,
+ case body_length() of
+ undefined ->
+ undefined;
+ {unknown_transfer_encoding, Unknown} ->
+ exit({unknown_transfer_encoding, Unknown});
+ chunked ->
+ % In this case the MaxBody is actually used to
+ % determine the maximum allowed size of a single
+ % chunk.
+ stream_chunked_body(MaxChunkSize, ChunkFun, FunState);
+ 0 ->
+ <<>>;
+ Length when is_integer(Length) ->
+ case MaxBodyLength of
+ MaxBodyLength when is_integer(MaxBodyLength), MaxBodyLength < Length ->
+ exit({body_too_large, content_length});
+ _ ->
+ stream_unchunked_body(Length, ChunkFun, FunState)
+ end;
+ Length ->
+ exit({length_not_integer, Length})
+ end.
+
+
+%% @spec start_response({integer(), ioheaders()}) -> response()
+%% @doc Start the HTTP response by sending the Code HTTP response and
+%% ResponseHeaders. The server will set header defaults such as Server
+%% and Date if not present in ResponseHeaders.
+start_response({Code, ResponseHeaders}) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ HResponse1 = mochiweb_headers:default_from_list(server_headers(),
+ HResponse),
+ start_raw_response({Code, HResponse1}).
+
+%% @spec start_raw_response({integer(), headers()}) -> response()
+%% @doc Start the HTTP response by sending the Code HTTP response and
+%% ResponseHeaders.
+start_raw_response({Code, ResponseHeaders}) ->
+ F = fun ({K, V}, Acc) ->
+ [mochiweb_util:make_io(K), <<": ">>, V, <<"\r\n">> | Acc]
+ end,
+ End = lists:foldl(F, [<<"\r\n">>],
+ mochiweb_headers:to_list(ResponseHeaders)),
+ send([make_version(Version), make_code(Code), <<"\r\n">> | End]),
+ mochiweb:new_response({THIS, Code, ResponseHeaders}).
+
+
+%% @spec start_response_length({integer(), ioheaders(), integer()}) -> response()
+%% @doc Start the HTTP response by sending the Code HTTP response and
+%% ResponseHeaders including a Content-Length of Length. The server
+%% will set header defaults such as Server
+%% and Date if not present in ResponseHeaders.
+start_response_length({Code, ResponseHeaders, Length}) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ HResponse1 = mochiweb_headers:enter("Content-Length", Length, HResponse),
+ start_response({Code, HResponse1}).
+
+%% @spec respond({integer(), ioheaders(), iodata() | chunked | {file, IoDevice}}) -> response()
+%% @doc Start the HTTP response with start_response, and send Body to the
+%% client (if the get(method) /= 'HEAD'). The Content-Length header
+%% will be set by the Body length, and the server will insert header
+%% defaults.
+respond({Code, ResponseHeaders, {file, IoDevice}}) ->
+ Length = mochiweb_io:iodevice_size(IoDevice),
+ Response = start_response_length({Code, ResponseHeaders, Length}),
+ case Method of
+ 'HEAD' ->
+ ok;
+ _ ->
+ mochiweb_io:iodevice_stream(fun send/1, IoDevice)
+ end,
+ Response;
+respond({Code, ResponseHeaders, chunked}) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ HResponse1 = case Method of
+ 'HEAD' ->
+ %% This is what Google does, http://www.google.com/
+ %% is chunked but HEAD gets Content-Length: 0.
+ %% The RFC is ambiguous so emulating Google is smart.
+ mochiweb_headers:enter("Content-Length", "0",
+ HResponse);
+ _ when Version >= {1, 1} ->
+ %% Only use chunked encoding for HTTP/1.1
+ mochiweb_headers:enter("Transfer-Encoding", "chunked",
+ HResponse);
+ _ ->
+ %% For pre-1.1 clients we send the data as-is
+ %% without a Content-Length header and without
+ %% chunk delimiters. Since the end of the document
+ %% is now ambiguous we must force a close.
+ put(?SAVE_FORCE_CLOSE, true),
+ HResponse
+ end,
+ start_response({Code, HResponse1});
+respond({Code, ResponseHeaders, Body}) ->
+ Response = start_response_length({Code, ResponseHeaders, iolist_size(Body)}),
+ case Method of
+ 'HEAD' ->
+ ok;
+ _ ->
+ send(Body)
+ end,
+ Response.
+
+%% @spec not_found() -> response()
+%% @doc Alias for <code>not_found([])</code>.
+not_found() ->
+ not_found([]).
+
+%% @spec not_found(ExtraHeaders) -> response()
+%% @doc Alias for <code>respond({404, [{"Content-Type", "text/plain"}
+%% | ExtraHeaders], &lt;&lt;"Not found."&gt;&gt;})</code>.
+not_found(ExtraHeaders) ->
+ respond({404, [{"Content-Type", "text/plain"} | ExtraHeaders],
+ <<"Not found.">>}).
+
+%% @spec ok({value(), iodata()} | {value(), ioheaders(), iodata() | {file, IoDevice}}) ->
+%% response()
+%% @doc respond({200, [{"Content-Type", ContentType} | Headers], Body}).
+ok({ContentType, Body}) ->
+ ok({ContentType, [], Body});
+ok({ContentType, ResponseHeaders, Body}) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ case THIS:get(range) of
+ X when (X =:= undefined orelse X =:= fail) orelse Body =:= chunked ->
+ %% http://code.google.com/p/mochiweb/issues/detail?id=54
+ %% Range header not supported when chunked, return 200 and provide
+ %% full response.
+ HResponse1 = mochiweb_headers:enter("Content-Type", ContentType,
+ HResponse),
+ respond({200, HResponse1, Body});
+ Ranges ->
+ {PartList, Size} = range_parts(Body, Ranges),
+ case PartList of
+ [] -> %% no valid ranges
+ HResponse1 = mochiweb_headers:enter("Content-Type",
+ ContentType,
+ HResponse),
+ %% could be 416, for now we'll just return 200
+ respond({200, HResponse1, Body});
+ PartList ->
+ {RangeHeaders, RangeBody} =
+ mochiweb_multipart:parts_to_body(PartList, ContentType, Size),
+ HResponse1 = mochiweb_headers:enter_from_list(
+ [{"Accept-Ranges", "bytes"} |
+ RangeHeaders],
+ HResponse),
+ respond({206, HResponse1, RangeBody})
+ end
+ end.
+
+%% @spec should_close() -> bool()
+%% @doc Return true if the connection must be closed. If false, using
+%% Keep-Alive should be safe.
+should_close() ->
+ ForceClose = erlang:get(mochiweb_request_force_close) =/= undefined,
+ DidNotRecv = erlang:get(mochiweb_request_recv) =:= undefined,
+ ForceClose orelse Version < {1, 0}
+ %% Connection: close
+ orelse get_header_value("connection") =:= "close"
+ %% HTTP 1.0 requires Connection: Keep-Alive
+ orelse (Version =:= {1, 0}
+ andalso get_header_value("connection") =/= "Keep-Alive")
+ %% unread data left on the socket, can't safely continue
+ orelse (DidNotRecv
+ andalso get_header_value("content-length") =/= undefined
+ andalso list_to_integer(get_header_value("content-length")) > 0)
+ orelse (DidNotRecv
+ andalso get_header_value("transfer-encoding") =:= "chunked").
+
+%% @spec cleanup() -> ok
+%% @doc Clean up any junk in the process dictionary, required before continuing
+%% a Keep-Alive request.
+cleanup() ->
+ [erase(K) || K <- [?SAVE_QS,
+ ?SAVE_PATH,
+ ?SAVE_RECV,
+ ?SAVE_BODY,
+ ?SAVE_POST,
+ ?SAVE_COOKIE,
+ ?SAVE_FORCE_CLOSE]],
+ ok.
+
+%% @spec parse_qs() -> [{Key::string(), Value::string()}]
+%% @doc Parse the query string of the URL.
+parse_qs() ->
+ case erlang:get(?SAVE_QS) of
+ undefined ->
+ {_, QueryString, _} = mochiweb_util:urlsplit_path(RawPath),
+ Parsed = mochiweb_util:parse_qs(QueryString),
+ put(?SAVE_QS, Parsed),
+ Parsed;
+ Cached ->
+ Cached
+ end.
+
+%% @spec get_cookie_value(Key::string) -> string() | undefined
+%% @doc Get the value of the given cookie.
+get_cookie_value(Key) ->
+ proplists:get_value(Key, parse_cookie()).
+
+%% @spec parse_cookie() -> [{Key::string(), Value::string()}]
+%% @doc Parse the cookie header.
+parse_cookie() ->
+ case erlang:get(?SAVE_COOKIE) of
+ undefined ->
+ Cookies = case get_header_value("cookie") of
+ undefined ->
+ [];
+ Value ->
+ mochiweb_cookies:parse_cookie(Value)
+ end,
+ put(?SAVE_COOKIE, Cookies),
+ Cookies;
+ Cached ->
+ Cached
+ end.
+
+%% @spec parse_post() -> [{Key::string(), Value::string()}]
+%% @doc Parse an application/x-www-form-urlencoded form POST. This
+%% has the side-effect of calling recv_body().
+parse_post() ->
+ case erlang:get(?SAVE_POST) of
+ undefined ->
+ Parsed = case recv_body() of
+ undefined ->
+ [];
+ Binary ->
+ case get_primary_header_value("content-type") of
+ "application/x-www-form-urlencoded" ++ _ ->
+ mochiweb_util:parse_qs(Binary);
+ _ ->
+ []
+ end
+ end,
+ put(?SAVE_POST, Parsed),
+ Parsed;
+ Cached ->
+ Cached
+ end.
+
+%% @spec stream_chunked_body(integer(), fun(), term()) -> term()
+%% @doc The function is called for each chunk.
+%% Used internally by read_chunked_body.
+stream_chunked_body(MaxChunkSize, Fun, FunState) ->
+ case read_chunk_length() of
+ 0 ->
+ Fun({0, read_chunk(0)}, FunState);
+ Length when Length > MaxChunkSize ->
+ NewState = read_sub_chunks(Length, MaxChunkSize, Fun, FunState),
+ stream_chunked_body(MaxChunkSize, Fun, NewState);
+ Length ->
+ NewState = Fun({Length, read_chunk(Length)}, FunState),
+ stream_chunked_body(MaxChunkSize, Fun, NewState)
+ end.
+
+stream_unchunked_body(0, Fun, FunState) ->
+ Fun({0, <<>>}, FunState);
+stream_unchunked_body(Length, Fun, FunState) when Length > 0 ->
+ PktSize = case Length > ?RECBUF_SIZE of
+ true ->
+ ?RECBUF_SIZE;
+ false ->
+ Length
+ end,
+ Bin = recv(PktSize),
+ NewState = Fun({PktSize, Bin}, FunState),
+ stream_unchunked_body(Length - PktSize, Fun, NewState).
+
+%% @spec read_chunk_length() -> integer()
+%% @doc Read the length of the next HTTP chunk.
+read_chunk_length() ->
+ mochiweb_socket:setopts(Socket, [{packet, line}]),
+ case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
+ {ok, Header} ->
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ Splitter = fun (C) ->
+ C =/= $\r andalso C =/= $\n andalso C =/= $
+ end,
+ {Hex, _Rest} = lists:splitwith(Splitter, binary_to_list(Header)),
+ mochihex:to_int(Hex);
+ _ ->
+ exit(normal)
+ end.
+
+%% @spec read_chunk(integer()) -> Chunk::binary() | [Footer::binary()]
+%% @doc Read in a HTTP chunk of the given length. If Length is 0, then read the
+%% HTTP footers (as a list of binaries, since they're nominal).
+read_chunk(0) ->
+ mochiweb_socket:setopts(Socket, [{packet, line}]),
+ F = fun (F1, Acc) ->
+ case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
+ {ok, <<"\r\n">>} ->
+ Acc;
+ {ok, Footer} ->
+ F1(F1, [Footer | Acc]);
+ _ ->
+ exit(normal)
+ end
+ end,
+ Footers = F(F, []),
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ put(?SAVE_RECV, true),
+ Footers;
+read_chunk(Length) ->
+ case mochiweb_socket:recv(Socket, 2 + Length, ?IDLE_TIMEOUT) of
+ {ok, <<Chunk:Length/binary, "\r\n">>} ->
+ Chunk;
+ _ ->
+ exit(normal)
+ end.
+
+read_sub_chunks(Length, MaxChunkSize, Fun, FunState) when Length > MaxChunkSize ->
+ Bin = recv(MaxChunkSize),
+ NewState = Fun({size(Bin), Bin}, FunState),
+ read_sub_chunks(Length - MaxChunkSize, MaxChunkSize, Fun, NewState);
+
+read_sub_chunks(Length, _MaxChunkSize, Fun, FunState) ->
+ Fun({Length, read_chunk(Length)}, FunState).
+
+%% @spec serve_file(Path, DocRoot) -> Response
+%% @doc Serve a file relative to DocRoot.
+serve_file(Path, DocRoot) ->
+ serve_file(Path, DocRoot, []).
+
+%% @spec serve_file(Path, DocRoot, ExtraHeaders) -> Response
+%% @doc Serve a file relative to DocRoot.
+serve_file(Path, DocRoot, ExtraHeaders) ->
+ case mochiweb_util:safe_relative_path(Path) of
+ undefined ->
+ not_found(ExtraHeaders);
+ RelPath ->
+ FullPath = filename:join([DocRoot, RelPath]),
+ case filelib:is_dir(FullPath) of
+ true ->
+ maybe_redirect(RelPath, FullPath, ExtraHeaders);
+ false ->
+ maybe_serve_file(FullPath, ExtraHeaders)
+ end
+ end.
+
+%% Internal API
+
+%% This has the same effect as the DirectoryIndex directive in httpd
+directory_index(FullPath) ->
+ filename:join([FullPath, "index.html"]).
+
+maybe_redirect([], FullPath, ExtraHeaders) ->
+ maybe_serve_file(directory_index(FullPath), ExtraHeaders);
+
+maybe_redirect(RelPath, FullPath, ExtraHeaders) ->
+ case string:right(RelPath, 1) of
+ "/" ->
+ maybe_serve_file(directory_index(FullPath), ExtraHeaders);
+ _ ->
+ Host = mochiweb_headers:get_value("host", Headers),
+ Location = "http://" ++ Host ++ "/" ++ RelPath ++ "/",
+ LocationBin = list_to_binary(Location),
+ MoreHeaders = [{"Location", Location},
+ {"Content-Type", "text/html"} | ExtraHeaders],
+ Top = <<"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">"
+ "<html><head>"
+ "<title>301 Moved Permanently</title>"
+ "</head><body>"
+ "<h1>Moved Permanently</h1>"
+ "<p>The document has moved <a href=\"">>,
+ Bottom = <<">here</a>.</p></body></html>\n">>,
+ Body = <<Top/binary, LocationBin/binary, Bottom/binary>>,
+ respond({301, MoreHeaders, Body})
+ end.
+
+maybe_serve_file(File, ExtraHeaders) ->
+ case file:read_file_info(File) of
+ {ok, FileInfo} ->
+ LastModified = httpd_util:rfc1123_date(FileInfo#file_info.mtime),
+ case get_header_value("if-modified-since") of
+ LastModified ->
+ respond({304, ExtraHeaders, ""});
+ _ ->
+ case file:open(File, [raw, binary]) of
+ {ok, IoDevice} ->
+ ContentType = mochiweb_util:guess_mime(File),
+ Res = ok({ContentType,
+ [{"last-modified", LastModified}
+ | ExtraHeaders],
+ {file, IoDevice}}),
+ file:close(IoDevice),
+ Res;
+ _ ->
+ not_found(ExtraHeaders)
+ end
+ end;
+ {error, _} ->
+ not_found(ExtraHeaders)
+ end.
+
+server_headers() ->
+ [{"Server", "MochiWeb/1.0 (" ++ ?QUIP ++ ")"},
+ {"Date", httpd_util:rfc1123_date()}].
+
+make_code(X) when is_integer(X) ->
+ [integer_to_list(X), [" " | httpd_util:reason_phrase(X)]];
+make_code(Io) when is_list(Io); is_binary(Io) ->
+ Io.
+
+make_version({1, 0}) ->
+ <<"HTTP/1.0 ">>;
+make_version(_) ->
+ <<"HTTP/1.1 ">>.
+
+range_parts({file, IoDevice}, Ranges) ->
+ Size = mochiweb_io:iodevice_size(IoDevice),
+ F = fun (Spec, Acc) ->
+ case mochiweb_http:range_skip_length(Spec, Size) of
+ invalid_range ->
+ Acc;
+ V ->
+ [V | Acc]
+ end
+ end,
+ LocNums = lists:foldr(F, [], Ranges),
+ {ok, Data} = file:pread(IoDevice, LocNums),
+ Bodies = lists:zipwith(fun ({Skip, Length}, PartialBody) ->
+ {Skip, Skip + Length - 1, PartialBody}
+ end,
+ LocNums, Data),
+ {Bodies, Size};
+range_parts(Body0, Ranges) ->
+ Body = iolist_to_binary(Body0),
+ Size = size(Body),
+ F = fun(Spec, Acc) ->
+ case mochiweb_http:range_skip_length(Spec, Size) of
+ invalid_range ->
+ Acc;
+ {Skip, Length} ->
+ <<_:Skip/binary, PartialBody:Length/binary, _/binary>> = Body,
+ [{Skip, Skip + Length - 1, PartialBody} | Acc]
+ end
+ end,
+ {lists:foldr(F, [], Ranges), Size}.
+
+%% @spec accepted_encodings([encoding()]) -> [encoding()] | bad_accept_encoding_value
+%% @type encoding() = string().
+%%
+%% @doc Returns a list of encodings accepted by a request. Encodings that are
+%% not supported by the server will not be included in the return list.
+%% This list is computed from the "Accept-Encoding" header and
+%% its elements are ordered, descendingly, according to their Q values.
+%%
+%% Section 14.3 of the RFC 2616 (HTTP 1.1) describes the "Accept-Encoding"
+%% header and the process of determining which server supported encodings
+%% can be used for encoding the body for the request's response.
+%%
+%% Examples
+%%
+%% 1) For a missing "Accept-Encoding" header:
+%% accepted_encodings(["gzip", "identity"]) -> ["identity"]
+%%
+%% 2) For an "Accept-Encoding" header with value "gzip, deflate":
+%% accepted_encodings(["gzip", "identity"]) -> ["gzip", "identity"]
+%%
+%% 3) For an "Accept-Encoding" header with value "gzip;q=0.5, deflate":
+%% accepted_encodings(["gzip", "deflate", "identity"]) ->
+%% ["deflate", "gzip", "identity"]
+%%
+accepted_encodings(SupportedEncodings) ->
+ AcceptEncodingHeader = case get_header_value("Accept-Encoding") of
+ undefined ->
+ "";
+ Value ->
+ Value
+ end,
+ case mochiweb_util:parse_qvalues(AcceptEncodingHeader) of
+ invalid_qvalue_string ->
+ bad_accept_encoding_value;
+ QList ->
+ mochiweb_util:pick_accepted_encodings(
+ QList, SupportedEncodings, "identity"
+ )
+ end.
+
+%% @spec accepts_content_type(string() | binary()) -> boolean() | bad_accept_header
+%%
+%% @doc Determines whether a request accepts a given media type by analyzing its
+%% "Accept" header.
+%%
+%% Examples
+%%
+%% 1) For a missing "Accept" header:
+%% accepts_content_type("application/json") -> true
+%%
+%% 2) For an "Accept" header with value "text/plain, application/*":
+%% accepts_content_type("application/json") -> true
+%%
+%% 3) For an "Accept" header with value "text/plain, */*; q=0.0":
+%% accepts_content_type("application/json") -> false
+%%
+%% 4) For an "Accept" header with value "text/plain; q=0.5, */*; q=0.1":
+%% accepts_content_type("application/json") -> true
+%%
+%% 5) For an "Accept" header with value "text/*; q=0.0, */*":
+%% accepts_content_type("text/plain") -> false
+%%
+accepts_content_type(ContentType) when is_binary(ContentType) ->
+ accepts_content_type(binary_to_list(ContentType));
+accepts_content_type(ContentType1) ->
+ ContentType = re:replace(ContentType1, "\\s", "", [global, {return, list}]),
+ AcceptHeader = case get_header_value("Accept") of
+ undefined ->
+ "*/*";
+ Value ->
+ Value
+ end,
+ case mochiweb_util:parse_qvalues(AcceptHeader) of
+ invalid_qvalue_string ->
+ bad_accept_header;
+ QList ->
+ [MainType, _SubType] = string:tokens(ContentType, "/"),
+ SuperType = MainType ++ "/*",
+ lists:any(
+ fun({"*/*", Q}) when Q > 0.0 ->
+ true;
+ ({Type, Q}) when Q > 0.0 ->
+ Type =:= ContentType orelse Type =:= SuperType;
+ (_) ->
+ false
+ end,
+ QList
+ ) andalso
+ (not lists:member({ContentType, 0.0}, QList)) andalso
+ (not lists:member({SuperType, 0.0}, QList))
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_response.erl b/1.1.x/src/mochiweb/mochiweb_response.erl
new file mode 100644
index 00000000..ab8ee61c
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_response.erl
@@ -0,0 +1,64 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Response abstraction.
+
+-module(mochiweb_response, [Request, Code, Headers]).
+-author('bob@mochimedia.com').
+
+-define(QUIP, "Any of you quaids got a smint?").
+
+-export([get_header_value/1, get/1, dump/0]).
+-export([send/1, write_chunk/1]).
+
+%% @spec get_header_value(string() | atom() | binary()) -> string() | undefined
+%% @doc Get the value of the given response header.
+get_header_value(K) ->
+ mochiweb_headers:get_value(K, Headers).
+
+%% @spec get(request | code | headers) -> term()
+%% @doc Return the internal representation of the given field.
+get(request) ->
+ Request;
+get(code) ->
+ Code;
+get(headers) ->
+ Headers.
+
+%% @spec dump() -> {mochiweb_request, [{atom(), term()}]}
+%% @doc Dump the internal representation to a "human readable" set of terms
+%% for debugging/inspection purposes.
+dump() ->
+ [{request, Request:dump()},
+ {code, Code},
+ {headers, mochiweb_headers:to_list(Headers)}].
+
+%% @spec send(iodata()) -> ok
+%% @doc Send data over the socket if the method is not HEAD.
+send(Data) ->
+ case Request:get(method) of
+ 'HEAD' ->
+ ok;
+ _ ->
+ Request:send(Data)
+ end.
+
+%% @spec write_chunk(iodata()) -> ok
+%% @doc Write a chunk of a HTTP chunked response. If Data is zero length,
+%% then the chunked response will be finished.
+write_chunk(Data) ->
+ case Request:get(version) of
+ Version when Version >= {1, 1} ->
+ Length = iolist_size(Data),
+ send([io_lib:format("~.16b\r\n", [Length]), Data, <<"\r\n">>]);
+ _ ->
+ send(Data)
+ end.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_skel.erl b/1.1.x/src/mochiweb/mochiweb_skel.erl
new file mode 100644
index 00000000..76eefa60
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_skel.erl
@@ -0,0 +1,86 @@
+-module(mochiweb_skel).
+-export([skelcopy/2]).
+
+-include_lib("kernel/include/file.hrl").
+
+%% External API
+
+skelcopy(DestDir, Name) ->
+ ok = ensuredir(DestDir),
+ LDst = case length(filename:dirname(DestDir)) of
+ 1 -> %% handle case when dirname returns "/"
+ 0;
+ N ->
+ N + 1
+ end,
+ skelcopy(src(), DestDir, Name, LDst),
+ DestLink = filename:join([DestDir, Name, "deps", "mochiweb-src"]),
+ ok = filelib:ensure_dir(DestLink),
+ ok = file:make_symlink(
+ filename:join(filename:dirname(code:which(?MODULE)), ".."),
+ DestLink).
+
+%% Internal API
+
+src() ->
+ Dir = filename:dirname(code:which(?MODULE)),
+ filename:join(Dir, "../priv/skel").
+
+skel() ->
+ "skel".
+
+skelcopy(Src, DestDir, Name, LDst) ->
+ Dest = re:replace(filename:basename(Src), skel(), Name,
+ [global, {return, list}]),
+ case file:read_file_info(Src) of
+ {ok, #file_info{type=directory, mode=Mode}} ->
+ Dir = DestDir ++ "/" ++ Dest,
+ EDst = lists:nthtail(LDst, Dir),
+ ok = ensuredir(Dir),
+ ok = file:write_file_info(Dir, #file_info{mode=Mode}),
+ case filename:basename(Src) of
+ "ebin" ->
+ ok;
+ _ ->
+ {ok, Files} = file:list_dir(Src),
+ io:format("~s/~n", [EDst]),
+ lists:foreach(fun ("." ++ _) -> ok;
+ (F) ->
+ skelcopy(filename:join(Src, F),
+ Dir,
+ Name,
+ LDst)
+ end,
+ Files),
+ ok
+ end;
+ {ok, #file_info{type=regular, mode=Mode}} ->
+ OutFile = filename:join(DestDir, Dest),
+ {ok, B} = file:read_file(Src),
+ S = re:replace(binary_to_list(B), skel(), Name,
+ [{return, list}, global]),
+ ok = file:write_file(OutFile, list_to_binary(S)),
+ ok = file:write_file_info(OutFile, #file_info{mode=Mode}),
+ io:format(" ~s~n", [filename:basename(Src)]),
+ ok;
+ {ok, _} ->
+ io:format("ignored source file: ~p~n", [Src]),
+ ok
+ end.
+
+ensuredir(Dir) ->
+ case file:make_dir(Dir) of
+ ok ->
+ ok;
+ {error, eexist} ->
+ ok;
+ E ->
+ E
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_socket.erl b/1.1.x/src/mochiweb/mochiweb_socket.erl
new file mode 100644
index 00000000..76b018c8
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_socket.erl
@@ -0,0 +1,84 @@
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc MochiWeb socket - wrapper for plain and ssl sockets.
+
+-module(mochiweb_socket).
+
+-export([listen/4, accept/1, recv/3, send/2, close/1, port/1, peername/1,
+ setopts/2, type/1]).
+
+-define(ACCEPT_TIMEOUT, 2000).
+
+listen(Ssl, Port, Opts, SslOpts) ->
+ case Ssl of
+ true ->
+ case ssl:listen(Port, Opts ++ SslOpts) of
+ {ok, ListenSocket} ->
+ {ok, {ssl, ListenSocket}};
+ {error, _} = Err ->
+ Err
+ end;
+ false ->
+ gen_tcp:listen(Port, Opts)
+ end.
+
+accept({ssl, ListenSocket}) ->
+ % There's a bug in ssl:transport_accept/2 at the moment, which is the
+ % reason for the try...catch block. Should be fixed in OTP R14.
+ try ssl:transport_accept(ListenSocket) of
+ {ok, Socket} ->
+ case ssl:ssl_accept(Socket) of
+ ok ->
+ {ok, {ssl, Socket}};
+ {error, _} = Err ->
+ Err
+ end;
+ {error, _} = Err ->
+ Err
+ catch
+ error:{badmatch, {error, Reason}} ->
+ {error, Reason}
+ end;
+accept(ListenSocket) ->
+ gen_tcp:accept(ListenSocket, ?ACCEPT_TIMEOUT).
+
+recv({ssl, Socket}, Length, Timeout) ->
+ ssl:recv(Socket, Length, Timeout);
+recv(Socket, Length, Timeout) ->
+ gen_tcp:recv(Socket, Length, Timeout).
+
+send({ssl, Socket}, Data) ->
+ ssl:send(Socket, Data);
+send(Socket, Data) ->
+ gen_tcp:send(Socket, Data).
+
+close({ssl, Socket}) ->
+ ssl:close(Socket);
+close(Socket) ->
+ gen_tcp:close(Socket).
+
+port({ssl, Socket}) ->
+ case ssl:sockname(Socket) of
+ {ok, {_, Port}} ->
+ {ok, Port};
+ {error, _} = Err ->
+ Err
+ end;
+port(Socket) ->
+ inet:port(Socket).
+
+peername({ssl, Socket}) ->
+ ssl:peername(Socket);
+peername(Socket) ->
+ inet:peername(Socket).
+
+setopts({ssl, Socket}, Opts) ->
+ ssl:setopts(Socket, Opts);
+setopts(Socket, Opts) ->
+ inet:setopts(Socket, Opts).
+
+type({ssl, _}) ->
+ ssl;
+type(_) ->
+ plain.
+
diff --git a/1.1.x/src/mochiweb/mochiweb_socket_server.erl b/1.1.x/src/mochiweb/mochiweb_socket_server.erl
new file mode 100644
index 00000000..1aae09ac
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_socket_server.erl
@@ -0,0 +1,272 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc MochiWeb socket server.
+
+-module(mochiweb_socket_server).
+-author('bob@mochimedia.com').
+-behaviour(gen_server).
+
+-include("internal.hrl").
+
+-export([start/1, stop/1]).
+-export([init/1, handle_call/3, handle_cast/2, terminate/2, code_change/3,
+ handle_info/2]).
+-export([get/2]).
+
+-record(mochiweb_socket_server,
+ {port,
+ loop,
+ name=undefined,
+ %% NOTE: This is currently ignored.
+ max=2048,
+ ip=any,
+ listen=null,
+ nodelay=false,
+ backlog=128,
+ active_sockets=0,
+ acceptor_pool_size=16,
+ ssl=false,
+ ssl_opts=[{ssl_imp, new}],
+ acceptor_pool=sets:new()}).
+
+start(State=#mochiweb_socket_server{}) ->
+ start_server(State);
+start(Options) ->
+ start(parse_options(Options)).
+
+get(Name, Property) ->
+ gen_server:call(Name, {get, Property}).
+
+stop(Name) when is_atom(Name) ->
+ gen_server:cast(Name, stop);
+stop(Pid) when is_pid(Pid) ->
+ gen_server:cast(Pid, stop);
+stop({local, Name}) ->
+ stop(Name);
+stop({global, Name}) ->
+ stop(Name);
+stop(Options) ->
+ State = parse_options(Options),
+ stop(State#mochiweb_socket_server.name).
+
+%% Internal API
+
+parse_options(Options) ->
+ parse_options(Options, #mochiweb_socket_server{}).
+
+parse_options([], State) ->
+ State;
+parse_options([{name, L} | Rest], State) when is_list(L) ->
+ Name = {local, list_to_atom(L)},
+ parse_options(Rest, State#mochiweb_socket_server{name=Name});
+parse_options([{name, A} | Rest], State) when A =:= undefined ->
+ parse_options(Rest, State#mochiweb_socket_server{name=A});
+parse_options([{name, A} | Rest], State) when is_atom(A) ->
+ Name = {local, A},
+ parse_options(Rest, State#mochiweb_socket_server{name=Name});
+parse_options([{name, Name} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{name=Name});
+parse_options([{port, L} | Rest], State) when is_list(L) ->
+ Port = list_to_integer(L),
+ parse_options(Rest, State#mochiweb_socket_server{port=Port});
+parse_options([{port, Port} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{port=Port});
+parse_options([{ip, Ip} | Rest], State) ->
+ ParsedIp = case Ip of
+ any ->
+ any;
+ Ip when is_tuple(Ip) ->
+ Ip;
+ Ip when is_list(Ip) ->
+ {ok, IpTuple} = inet_parse:address(Ip),
+ IpTuple
+ end,
+ parse_options(Rest, State#mochiweb_socket_server{ip=ParsedIp});
+parse_options([{loop, Loop} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{loop=Loop});
+parse_options([{backlog, Backlog} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{backlog=Backlog});
+parse_options([{nodelay, NoDelay} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{nodelay=NoDelay});
+parse_options([{acceptor_pool_size, Max} | Rest], State) ->
+ MaxInt = ensure_int(Max),
+ parse_options(Rest,
+ State#mochiweb_socket_server{acceptor_pool_size=MaxInt});
+parse_options([{max, Max} | Rest], State) ->
+ error_logger:info_report([{warning, "TODO: max is currently unsupported"},
+ {max, Max}]),
+ MaxInt = ensure_int(Max),
+ parse_options(Rest, State#mochiweb_socket_server{max=MaxInt});
+parse_options([{ssl, Ssl} | Rest], State) when is_boolean(Ssl) ->
+ parse_options(Rest, State#mochiweb_socket_server{ssl=Ssl});
+parse_options([{ssl_opts, SslOpts} | Rest], State) when is_list(SslOpts) ->
+ SslOpts1 = [{ssl_imp, new} | proplists:delete(ssl_imp, SslOpts)],
+ parse_options(Rest, State#mochiweb_socket_server{ssl_opts=SslOpts1}).
+
+start_server(State=#mochiweb_socket_server{ssl=Ssl, name=Name}) ->
+ case Ssl of
+ true ->
+ application:start(crypto),
+ application:start(public_key),
+ application:start(ssl);
+ false ->
+ void
+ end,
+ case Name of
+ undefined ->
+ gen_server:start_link(?MODULE, State, []);
+ _ ->
+ gen_server:start_link(Name, ?MODULE, State, [])
+ end.
+
+ensure_int(N) when is_integer(N) ->
+ N;
+ensure_int(S) when is_list(S) ->
+ integer_to_list(S).
+
+ipv6_supported() ->
+ case (catch inet:getaddr("localhost", inet6)) of
+ {ok, _Addr} ->
+ true;
+ {error, _} ->
+ false
+ end.
+
+init(State=#mochiweb_socket_server{ip=Ip, port=Port, backlog=Backlog, nodelay=NoDelay}) ->
+ process_flag(trap_exit, true),
+ BaseOpts = [binary,
+ {reuseaddr, true},
+ {packet, 0},
+ {backlog, Backlog},
+ {recbuf, ?RECBUF_SIZE},
+ {active, false},
+ {nodelay, NoDelay}],
+ Opts = case Ip of
+ any ->
+ case ipv6_supported() of % IPv4, and IPv6 if supported
+ true -> [inet, inet6 | BaseOpts];
+ _ -> BaseOpts
+ end;
+ {_, _, _, _} -> % IPv4
+ [inet, {ip, Ip} | BaseOpts];
+ {_, _, _, _, _, _, _, _} -> % IPv6
+ [inet6, {ip, Ip} | BaseOpts]
+ end,
+ case listen(Port, Opts, State) of
+ {stop, eacces} ->
+ case Port < 1024 of
+ true ->
+ case fdsrv:start() of
+ {ok, _} ->
+ case fdsrv:bind_socket(tcp, Port) of
+ {ok, Fd} ->
+ listen(Port, [{fd, Fd} | Opts], State);
+ _ ->
+ {stop, fdsrv_bind_failed}
+ end;
+ _ ->
+ {stop, fdsrv_start_failed}
+ end;
+ false ->
+ {stop, eacces}
+ end;
+ Other ->
+ Other
+ end.
+
+new_acceptor_pool(Listen,
+ State=#mochiweb_socket_server{acceptor_pool=Pool,
+ acceptor_pool_size=Size,
+ loop=Loop}) ->
+ F = fun (_, S) ->
+ Pid = mochiweb_acceptor:start_link(self(), Listen, Loop),
+ sets:add_element(Pid, S)
+ end,
+ Pool1 = lists:foldl(F, Pool, lists:seq(1, Size)),
+ State#mochiweb_socket_server{acceptor_pool=Pool1}.
+
+listen(Port, Opts, State=#mochiweb_socket_server{ssl=Ssl, ssl_opts=SslOpts}) ->
+ case mochiweb_socket:listen(Ssl, Port, Opts, SslOpts) of
+ {ok, Listen} ->
+ {ok, ListenPort} = mochiweb_socket:port(Listen),
+ {ok, new_acceptor_pool(
+ Listen,
+ State#mochiweb_socket_server{listen=Listen,
+ port=ListenPort})};
+ {error, Reason} ->
+ {stop, Reason}
+ end.
+
+do_get(port, #mochiweb_socket_server{port=Port}) ->
+ Port;
+do_get(active_sockets, #mochiweb_socket_server{active_sockets=ActiveSockets}) ->
+ ActiveSockets.
+
+handle_call({get, Property}, _From, State) ->
+ Res = do_get(Property, State),
+ {reply, Res, State};
+handle_call(_Message, _From, State) ->
+ Res = error,
+ {reply, Res, State}.
+
+handle_cast({accepted, Pid, _Timing},
+ State=#mochiweb_socket_server{active_sockets=ActiveSockets}) ->
+ State1 = State#mochiweb_socket_server{active_sockets=1 + ActiveSockets},
+ {noreply, recycle_acceptor(Pid, State1)};
+handle_cast(stop, State) ->
+ {stop, normal, State}.
+
+terminate(_Reason, #mochiweb_socket_server{listen=Listen, port=Port}) ->
+ mochiweb_socket:close(Listen),
+ case Port < 1024 of
+ true ->
+ catch fdsrv:stop(),
+ ok;
+ false ->
+ ok
+ end.
+
+code_change(_OldVsn, State, _Extra) ->
+ State.
+
+recycle_acceptor(Pid, State=#mochiweb_socket_server{
+ acceptor_pool=Pool,
+ listen=Listen,
+ loop=Loop,
+ active_sockets=ActiveSockets}) ->
+ case sets:is_element(Pid, Pool) of
+ true ->
+ Acceptor = mochiweb_acceptor:start_link(self(), Listen, Loop),
+ Pool1 = sets:add_element(Acceptor, sets:del_element(Pid, Pool)),
+ State#mochiweb_socket_server{acceptor_pool=Pool1};
+ false ->
+ State#mochiweb_socket_server{active_sockets=ActiveSockets - 1}
+ end.
+
+handle_info({'EXIT', Pid, normal}, State) ->
+ {noreply, recycle_acceptor(Pid, State)};
+handle_info({'EXIT', Pid, Reason},
+ State=#mochiweb_socket_server{acceptor_pool=Pool}) ->
+ case sets:is_element(Pid, Pool) of
+ true ->
+ %% If there was an unexpected error accepting, log and sleep.
+ error_logger:error_report({?MODULE, ?LINE,
+ {acceptor_error, Reason}}),
+ timer:sleep(100);
+ false ->
+ ok
+ end,
+ {noreply, recycle_acceptor(Pid, State)};
+handle_info(Info, State) ->
+ error_logger:info_report([{'INFO', Info}, {'State', State}]),
+ {noreply, State}.
+
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_sup.erl b/1.1.x/src/mochiweb/mochiweb_sup.erl
new file mode 100644
index 00000000..af7df9b3
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_sup.erl
@@ -0,0 +1,41 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Supervisor for the mochiweb application.
+
+-module(mochiweb_sup).
+-author('bob@mochimedia.com').
+
+-behaviour(supervisor).
+
+%% External exports
+-export([start_link/0, upgrade/0]).
+
+%% supervisor callbacks
+-export([init/1]).
+
+%% @spec start_link() -> ServerRet
+%% @doc API for starting the supervisor.
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%% @spec upgrade() -> ok
+%% @doc Add processes if necessary.
+upgrade() ->
+ {ok, {_, Specs}} = init([]),
+ [supervisor:start_child(?MODULE, Spec) || Spec <- Specs],
+ ok.
+
+%% @spec init([]) -> SupervisorTree
+%% @doc supervisor callback, ensures yaws is in embedded mode and then
+%% returns the supervisor tree.
+init([]) ->
+ Processes = [],
+ {ok, {{one_for_one, 10, 10}, Processes}}.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/1.1.x/src/mochiweb/mochiweb_util.erl b/1.1.x/src/mochiweb/mochiweb_util.erl
new file mode 100644
index 00000000..62ff0d06
--- /dev/null
+++ b/1.1.x/src/mochiweb/mochiweb_util.erl
@@ -0,0 +1,973 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Utilities for parsing and quoting.
+
+-module(mochiweb_util).
+-author('bob@mochimedia.com').
+-export([join/2, quote_plus/1, urlencode/1, parse_qs/1, unquote/1]).
+-export([path_split/1]).
+-export([urlsplit/1, urlsplit_path/1, urlunsplit/1, urlunsplit_path/1]).
+-export([guess_mime/1, parse_header/1]).
+-export([shell_quote/1, cmd/1, cmd_string/1, cmd_port/2, cmd_status/1]).
+-export([record_to_proplist/2, record_to_proplist/3]).
+-export([safe_relative_path/1, partition/2]).
+-export([parse_qvalues/1, pick_accepted_encodings/3]).
+-export([make_io/1]).
+
+-define(PERCENT, 37). % $\%
+-define(FULLSTOP, 46). % $\.
+-define(IS_HEX(C), ((C >= $0 andalso C =< $9) orelse
+ (C >= $a andalso C =< $f) orelse
+ (C >= $A andalso C =< $F))).
+-define(QS_SAFE(C), ((C >= $a andalso C =< $z) orelse
+ (C >= $A andalso C =< $Z) orelse
+ (C >= $0 andalso C =< $9) orelse
+ (C =:= ?FULLSTOP orelse C =:= $- orelse C =:= $~ orelse
+ C =:= $_))).
+
+hexdigit(C) when C < 10 -> $0 + C;
+hexdigit(C) when C < 16 -> $A + (C - 10).
+
+unhexdigit(C) when C >= $0, C =< $9 -> C - $0;
+unhexdigit(C) when C >= $a, C =< $f -> C - $a + 10;
+unhexdigit(C) when C >= $A, C =< $F -> C - $A + 10.
+
+%% @spec partition(String, Sep) -> {String, [], []} | {Prefix, Sep, Postfix}
+%% @doc Inspired by Python 2.5's str.partition:
+%% partition("foo/bar", "/") = {"foo", "/", "bar"},
+%% partition("foo", "/") = {"foo", "", ""}.
+partition(String, Sep) ->
+ case partition(String, Sep, []) of
+ undefined ->
+ {String, "", ""};
+ Result ->
+ Result
+ end.
+
+partition("", _Sep, _Acc) ->
+ undefined;
+partition(S, Sep, Acc) ->
+ case partition2(S, Sep) of
+ undefined ->
+ [C | Rest] = S,
+ partition(Rest, Sep, [C | Acc]);
+ Rest ->
+ {lists:reverse(Acc), Sep, Rest}
+ end.
+
+partition2(Rest, "") ->
+ Rest;
+partition2([C | R1], [C | R2]) ->
+ partition2(R1, R2);
+partition2(_S, _Sep) ->
+ undefined.
+
+
+
+%% @spec safe_relative_path(string()) -> string() | undefined
+%% @doc Return the reduced version of a relative path or undefined if it
+%% is not safe. safe relative paths can be joined with an absolute path
+%% and will result in a subdirectory of the absolute path.
+safe_relative_path("/" ++ _) ->
+ undefined;
+safe_relative_path(P) ->
+ safe_relative_path(P, []).
+
+safe_relative_path("", Acc) ->
+ case Acc of
+ [] ->
+ "";
+ _ ->
+ string:join(lists:reverse(Acc), "/")
+ end;
+safe_relative_path(P, Acc) ->
+ case partition(P, "/") of
+ {"", "/", _} ->
+ %% /foo or foo//bar
+ undefined;
+ {"..", _, _} when Acc =:= [] ->
+ undefined;
+ {"..", _, Rest} ->
+ safe_relative_path(Rest, tl(Acc));
+ {Part, "/", ""} ->
+ safe_relative_path("", ["", Part | Acc]);
+ {Part, _, Rest} ->
+ safe_relative_path(Rest, [Part | Acc])
+ end.
+
+%% @spec shell_quote(string()) -> string()
+%% @doc Quote a string according to UNIX shell quoting rules, returns a string
+%% surrounded by double quotes.
+shell_quote(L) ->
+ shell_quote(L, [$\"]).
+
+%% @spec cmd_port([string()], Options) -> port()
+%% @doc open_port({spawn, mochiweb_util:cmd_string(Argv)}, Options).
+cmd_port(Argv, Options) ->
+ open_port({spawn, cmd_string(Argv)}, Options).
+
+%% @spec cmd([string()]) -> string()
+%% @doc os:cmd(cmd_string(Argv)).
+cmd(Argv) ->
+ os:cmd(cmd_string(Argv)).
+
+%% @spec cmd_string([string()]) -> string()
+%% @doc Create a shell quoted command string from a list of arguments.
+cmd_string(Argv) ->
+ string:join([shell_quote(X) || X <- Argv], " ").
+
+%% @spec cmd_status([string()]) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from the given application, will be
+%% spawned with cmd_port/2.
+cmd_status(Argv) ->
+ Port = cmd_port(Argv, [exit_status, stderr_to_stdout,
+ use_stdio, binary]),
+ try cmd_loop(Port, [])
+ after catch port_close(Port)
+ end.
+
+%% @spec cmd_loop(port(), list()) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from a port.
+cmd_loop(Port, Acc) ->
+ receive
+ {Port, {exit_status, Status}} ->
+ {Status, iolist_to_binary(lists:reverse(Acc))};
+ {Port, {data, Data}} ->
+ cmd_loop(Port, [Data | Acc])
+ end.
+
+%% @spec join([iolist()], iolist()) -> iolist()
+%% @doc Join a list of strings or binaries together with the given separator
+%% string or char or binary. The output is flattened, but may be an
+%% iolist() instead of a string() if any of the inputs are binary().
+join([], _Separator) ->
+ [];
+join([S], _Separator) ->
+ lists:flatten(S);
+join(Strings, Separator) ->
+ lists:flatten(revjoin(lists:reverse(Strings), Separator, [])).
+
+revjoin([], _Separator, Acc) ->
+ Acc;
+revjoin([S | Rest], Separator, []) ->
+ revjoin(Rest, Separator, [S]);
+revjoin([S | Rest], Separator, Acc) ->
+ revjoin(Rest, Separator, [S, Separator | Acc]).
+
+%% @spec quote_plus(atom() | integer() | float() | string() | binary()) -> string()
+%% @doc URL safe encoding of the given term.
+quote_plus(Atom) when is_atom(Atom) ->
+ quote_plus(atom_to_list(Atom));
+quote_plus(Int) when is_integer(Int) ->
+ quote_plus(integer_to_list(Int));
+quote_plus(Binary) when is_binary(Binary) ->
+ quote_plus(binary_to_list(Binary));
+quote_plus(Float) when is_float(Float) ->
+ quote_plus(mochinum:digits(Float));
+quote_plus(String) ->
+ quote_plus(String, []).
+
+quote_plus([], Acc) ->
+ lists:reverse(Acc);
+quote_plus([C | Rest], Acc) when ?QS_SAFE(C) ->
+ quote_plus(Rest, [C | Acc]);
+quote_plus([$\s | Rest], Acc) ->
+ quote_plus(Rest, [$+ | Acc]);
+quote_plus([C | Rest], Acc) ->
+ <<Hi:4, Lo:4>> = <<C>>,
+ quote_plus(Rest, [hexdigit(Lo), hexdigit(Hi), ?PERCENT | Acc]).
+
+%% @spec urlencode([{Key, Value}]) -> string()
+%% @doc URL encode the property list.
+urlencode(Props) ->
+ Pairs = lists:foldr(
+ fun ({K, V}, Acc) ->
+ [quote_plus(K) ++ "=" ++ quote_plus(V) | Acc]
+ end, [], Props),
+ string:join(Pairs, "&").
+
+%% @spec parse_qs(string() | binary()) -> [{Key, Value}]
+%% @doc Parse a query string or application/x-www-form-urlencoded.
+parse_qs(Binary) when is_binary(Binary) ->
+ parse_qs(binary_to_list(Binary));
+parse_qs(String) ->
+ parse_qs(String, []).
+
+parse_qs([], Acc) ->
+ lists:reverse(Acc);
+parse_qs(String, Acc) ->
+ {Key, Rest} = parse_qs_key(String),
+ {Value, Rest1} = parse_qs_value(Rest),
+ parse_qs(Rest1, [{Key, Value} | Acc]).
+
+parse_qs_key(String) ->
+ parse_qs_key(String, []).
+
+parse_qs_key([], Acc) ->
+ {qs_revdecode(Acc), ""};
+parse_qs_key([$= | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key(Rest=[$; | _], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key(Rest=[$& | _], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key([C | Rest], Acc) ->
+ parse_qs_key(Rest, [C | Acc]).
+
+parse_qs_value(String) ->
+ parse_qs_value(String, []).
+
+parse_qs_value([], Acc) ->
+ {qs_revdecode(Acc), ""};
+parse_qs_value([$; | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_value([$& | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_value([C | Rest], Acc) ->
+ parse_qs_value(Rest, [C | Acc]).
+
+%% @spec unquote(string() | binary()) -> string()
+%% @doc Unquote a URL encoded string.
+unquote(Binary) when is_binary(Binary) ->
+ unquote(binary_to_list(Binary));
+unquote(String) ->
+ qs_revdecode(lists:reverse(String)).
+
+qs_revdecode(S) ->
+ qs_revdecode(S, []).
+
+qs_revdecode([], Acc) ->
+ Acc;
+qs_revdecode([$+ | Rest], Acc) ->
+ qs_revdecode(Rest, [$\s | Acc]);
+qs_revdecode([Lo, Hi, ?PERCENT | Rest], Acc) when ?IS_HEX(Lo), ?IS_HEX(Hi) ->
+ qs_revdecode(Rest, [(unhexdigit(Lo) bor (unhexdigit(Hi) bsl 4)) | Acc]);
+qs_revdecode([C | Rest], Acc) ->
+ qs_revdecode(Rest, [C | Acc]).
+
+%% @spec urlsplit(Url) -> {Scheme, Netloc, Path, Query, Fragment}
+%% @doc Return a 5-tuple, does not expand % escapes. Only supports HTTP style
+%% URLs.
+urlsplit(Url) ->
+ {Scheme, Url1} = urlsplit_scheme(Url),
+ {Netloc, Url2} = urlsplit_netloc(Url1),
+ {Path, Query, Fragment} = urlsplit_path(Url2),
+ {Scheme, Netloc, Path, Query, Fragment}.
+
+urlsplit_scheme(Url) ->
+ case urlsplit_scheme(Url, []) of
+ no_scheme ->
+ {"", Url};
+ Res ->
+ Res
+ end.
+
+urlsplit_scheme([C | Rest], Acc) when ((C >= $a andalso C =< $z) orelse
+ (C >= $A andalso C =< $Z) orelse
+ (C >= $0 andalso C =< $9) orelse
+ C =:= $+ orelse C =:= $- orelse
+ C =:= $.) ->
+ urlsplit_scheme(Rest, [C | Acc]);
+urlsplit_scheme([$: | Rest], Acc=[_ | _]) ->
+ {string:to_lower(lists:reverse(Acc)), Rest};
+urlsplit_scheme(_Rest, _Acc) ->
+ no_scheme.
+
+urlsplit_netloc("//" ++ Rest) ->
+ urlsplit_netloc(Rest, []);
+urlsplit_netloc(Path) ->
+ {"", Path}.
+
+urlsplit_netloc("", Acc) ->
+ {lists:reverse(Acc), ""};
+urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
+ {lists:reverse(Acc), Rest};
+urlsplit_netloc([C | Rest], Acc) ->
+ urlsplit_netloc(Rest, [C | Acc]).
+
+
+%% @spec path_split(string()) -> {Part, Rest}
+%% @doc Split a path starting from the left, as in URL traversal.
+%% path_split("foo/bar") = {"foo", "bar"},
+%% path_split("/foo/bar") = {"", "foo/bar"}.
+path_split(S) ->
+ path_split(S, []).
+
+path_split("", Acc) ->
+ {lists:reverse(Acc), ""};
+path_split("/" ++ Rest, Acc) ->
+ {lists:reverse(Acc), Rest};
+path_split([C | Rest], Acc) ->
+ path_split(Rest, [C | Acc]).
+
+
+%% @spec urlunsplit({Scheme, Netloc, Path, Query, Fragment}) -> string()
+%% @doc Assemble a URL from the 5-tuple. Path must be absolute.
+urlunsplit({Scheme, Netloc, Path, Query, Fragment}) ->
+ lists:flatten([case Scheme of "" -> ""; _ -> [Scheme, "://"] end,
+ Netloc,
+ urlunsplit_path({Path, Query, Fragment})]).
+
+%% @spec urlunsplit_path({Path, Query, Fragment}) -> string()
+%% @doc Assemble a URL path from the 3-tuple.
+urlunsplit_path({Path, Query, Fragment}) ->
+ lists:flatten([Path,
+ case Query of "" -> ""; _ -> [$? | Query] end,
+ case Fragment of "" -> ""; _ -> [$# | Fragment] end]).
+
+%% @spec urlsplit_path(Url) -> {Path, Query, Fragment}
+%% @doc Return a 3-tuple, does not expand % escapes. Only supports HTTP style
+%% paths.
+urlsplit_path(Path) ->
+ urlsplit_path(Path, []).
+
+urlsplit_path("", Acc) ->
+ {lists:reverse(Acc), "", ""};
+urlsplit_path("?" ++ Rest, Acc) ->
+ {Query, Fragment} = urlsplit_query(Rest),
+ {lists:reverse(Acc), Query, Fragment};
+urlsplit_path("#" ++ Rest, Acc) ->
+ {lists:reverse(Acc), "", Rest};
+urlsplit_path([C | Rest], Acc) ->
+ urlsplit_path(Rest, [C | Acc]).
+
+urlsplit_query(Query) ->
+ urlsplit_query(Query, []).
+
+urlsplit_query("", Acc) ->
+ {lists:reverse(Acc), ""};
+urlsplit_query("#" ++ Rest, Acc) ->
+ {lists:reverse(Acc), Rest};
+urlsplit_query([C | Rest], Acc) ->
+ urlsplit_query(Rest, [C | Acc]).
+
+%% @spec guess_mime(string()) -> string()
+%% @doc Guess the mime type of a file by the extension of its filename.
+guess_mime(File) ->
+ case mochiweb_mime:from_extension(filename:extension(File)) of
+ undefined ->
+ "text/plain";
+ Mime ->
+ Mime
+ end.
+
+%% @spec parse_header(string()) -> {Type, [{K, V}]}
+%% @doc Parse a Content-Type like header, return the main Content-Type
+%% and a property list of options.
+parse_header(String) ->
+ %% TODO: This is exactly as broken as Python's cgi module.
+ %% Should parse properly like mochiweb_cookies.
+ [Type | Parts] = [string:strip(S) || S <- string:tokens(String, ";")],
+ F = fun (S, Acc) ->
+ case lists:splitwith(fun (C) -> C =/= $= end, S) of
+ {"", _} ->
+ %% Skip anything with no name
+ Acc;
+ {_, ""} ->
+ %% Skip anything with no value
+ Acc;
+ {Name, [$\= | Value]} ->
+ [{string:to_lower(string:strip(Name)),
+ unquote_header(string:strip(Value))} | Acc]
+ end
+ end,
+ {string:to_lower(Type),
+ lists:foldr(F, [], Parts)}.
+
+unquote_header("\"" ++ Rest) ->
+ unquote_header(Rest, []);
+unquote_header(S) ->
+ S.
+
+unquote_header("", Acc) ->
+ lists:reverse(Acc);
+unquote_header("\"", Acc) ->
+ lists:reverse(Acc);
+unquote_header([$\\, C | Rest], Acc) ->
+ unquote_header(Rest, [C | Acc]);
+unquote_header([C | Rest], Acc) ->
+ unquote_header(Rest, [C | Acc]).
+
+%% @spec record_to_proplist(Record, Fields) -> proplist()
+%% @doc calls record_to_proplist/3 with a default TypeKey of '__record'
+record_to_proplist(Record, Fields) ->
+ record_to_proplist(Record, Fields, '__record').
+
+%% @spec record_to_proplist(Record, Fields, TypeKey) -> proplist()
+%% @doc Return a proplist of the given Record with each field in the
+%% Fields list set as a key with the corresponding value in the Record.
+%% TypeKey is the key that is used to store the record type
+%% Fields should be obtained by calling record_info(fields, record_type)
+%% where record_type is the record type of Record
+record_to_proplist(Record, Fields, TypeKey)
+ when tuple_size(Record) - 1 =:= length(Fields) ->
+ lists:zip([TypeKey | Fields], tuple_to_list(Record)).
+
+
+shell_quote([], Acc) ->
+ lists:reverse([$\" | Acc]);
+shell_quote([C | Rest], Acc) when C =:= $\" orelse C =:= $\` orelse
+ C =:= $\\ orelse C =:= $\$ ->
+ shell_quote(Rest, [C, $\\ | Acc]);
+shell_quote([C | Rest], Acc) ->
+ shell_quote(Rest, [C | Acc]).
+
+%% @spec parse_qvalues(string()) -> [qvalue()] | invalid_qvalue_string
+%% @type qvalue() = {media_type() | encoding() , float()}.
+%% @type media_type() = string().
+%% @type encoding() = string().
+%%
+%% @doc Parses a list (given as a string) of elements with Q values associated
+%% to them. Elements are separated by commas and each element is separated
+%% from its Q value by a semicolon. Q values are optional but when missing
+%% the value of an element is considered as 1.0. A Q value is always in the
+%% range [0.0, 1.0]. A Q value list is used for example as the value of the
+%% HTTP "Accept" and "Accept-Encoding" headers.
+%%
+%% Q values are described in section 2.9 of the RFC 2616 (HTTP 1.1).
+%%
+%% Example:
+%%
+%% parse_qvalues("gzip; q=0.5, deflate, identity;q=0.0") ->
+%% [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}]
+%%
+parse_qvalues(QValuesStr) ->
+ try
+ lists:map(
+ fun(Pair) ->
+ [Type | Params] = string:tokens(Pair, ";"),
+ NormParams = normalize_media_params(Params),
+ {Q, NonQParams} = extract_q(NormParams),
+ {string:join([string:strip(Type) | NonQParams], ";"), Q}
+ end,
+ string:tokens(string:to_lower(QValuesStr), ",")
+ )
+ catch
+ _Type:_Error ->
+ invalid_qvalue_string
+ end.
+
+normalize_media_params(Params) ->
+ {ok, Re} = re:compile("\\s"),
+ normalize_media_params(Re, Params, []).
+
+normalize_media_params(_Re, [], Acc) ->
+ lists:reverse(Acc);
+normalize_media_params(Re, [Param | Rest], Acc) ->
+ NormParam = re:replace(Param, Re, "", [global, {return, list}]),
+ normalize_media_params(Re, Rest, [NormParam | Acc]).
+
+extract_q(NormParams) ->
+ {ok, KVRe} = re:compile("^([^=]+)=([^=]+)$"),
+ {ok, QRe} = re:compile("^((?:0|1)(?:\\.\\d{1,3})?)$"),
+ extract_q(KVRe, QRe, NormParams, []).
+
+extract_q(_KVRe, _QRe, [], Acc) ->
+ {1.0, lists:reverse(Acc)};
+extract_q(KVRe, QRe, [Param | Rest], Acc) ->
+ case re:run(Param, KVRe, [{capture, [1, 2], list}]) of
+ {match, [Name, Value]} ->
+ case Name of
+ "q" ->
+ {match, [Q]} = re:run(Value, QRe, [{capture, [1], list}]),
+ QVal = case Q of
+ "0" ->
+ 0.0;
+ "1" ->
+ 1.0;
+ Else ->
+ list_to_float(Else)
+ end,
+ case QVal < 0.0 orelse QVal > 1.0 of
+ false ->
+ {QVal, lists:reverse(Acc) ++ Rest}
+ end;
+ _ ->
+ extract_q(KVRe, QRe, Rest, [Param | Acc])
+ end
+ end.
+
+%% @spec pick_accepted_encodings([qvalue()], [encoding()], encoding()) ->
+%% [encoding()]
+%%
+%% @doc Determines which encodings specified in the given Q values list are
+%% valid according to a list of supported encodings and a default encoding.
+%%
+%% The returned list of encodings is sorted, descendingly, according to the
+%% Q values of the given list. The last element of this list is the given
+%% default encoding unless this encoding is explicitily or implicitily
+%% marked with a Q value of 0.0 in the given Q values list.
+%% Note: encodings with the same Q value are kept in the same order as
+%% found in the input Q values list.
+%%
+%% This encoding picking process is described in section 14.3 of the
+%% RFC 2616 (HTTP 1.1).
+%%
+%% Example:
+%%
+%% pick_accepted_encodings(
+%% [{"gzip", 0.5}, {"deflate", 1.0}],
+%% ["gzip", "identity"],
+%% "identity"
+%% ) ->
+%% ["gzip", "identity"]
+%%
+pick_accepted_encodings(AcceptedEncs, SupportedEncs, DefaultEnc) ->
+ SortedQList = lists:reverse(
+ lists:sort(fun({_, Q1}, {_, Q2}) -> Q1 < Q2 end, AcceptedEncs)
+ ),
+ {Accepted, Refused} = lists:foldr(
+ fun({E, Q}, {A, R}) ->
+ case Q > 0.0 of
+ true ->
+ {[E | A], R};
+ false ->
+ {A, [E | R]}
+ end
+ end,
+ {[], []},
+ SortedQList
+ ),
+ Refused1 = lists:foldr(
+ fun(Enc, Acc) ->
+ case Enc of
+ "*" ->
+ lists:subtract(SupportedEncs, Accepted) ++ Acc;
+ _ ->
+ [Enc | Acc]
+ end
+ end,
+ [],
+ Refused
+ ),
+ Accepted1 = lists:foldr(
+ fun(Enc, Acc) ->
+ case Enc of
+ "*" ->
+ lists:subtract(SupportedEncs, Accepted ++ Refused1) ++ Acc;
+ _ ->
+ [Enc | Acc]
+ end
+ end,
+ [],
+ Accepted
+ ),
+ Accepted2 = case lists:member(DefaultEnc, Accepted1) of
+ true ->
+ Accepted1;
+ false ->
+ Accepted1 ++ [DefaultEnc]
+ end,
+ [E || E <- Accepted2, lists:member(E, SupportedEncs),
+ not lists:member(E, Refused1)].
+
+make_io(Atom) when is_atom(Atom) ->
+ atom_to_list(Atom);
+make_io(Integer) when is_integer(Integer) ->
+ integer_to_list(Integer);
+make_io(Io) when is_list(Io); is_binary(Io) ->
+ Io.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+make_io_test() ->
+ ?assertEqual(
+ <<"atom">>,
+ iolist_to_binary(make_io(atom))),
+ ?assertEqual(
+ <<"20">>,
+ iolist_to_binary(make_io(20))),
+ ?assertEqual(
+ <<"list">>,
+ iolist_to_binary(make_io("list"))),
+ ?assertEqual(
+ <<"binary">>,
+ iolist_to_binary(make_io(<<"binary">>))),
+ ok.
+
+-record(test_record, {field1=f1, field2=f2}).
+record_to_proplist_test() ->
+ ?assertEqual(
+ [{'__record', test_record},
+ {field1, f1},
+ {field2, f2}],
+ record_to_proplist(#test_record{}, record_info(fields, test_record))),
+ ?assertEqual(
+ [{'typekey', test_record},
+ {field1, f1},
+ {field2, f2}],
+ record_to_proplist(#test_record{},
+ record_info(fields, test_record),
+ typekey)),
+ ok.
+
+shell_quote_test() ->
+ ?assertEqual(
+ "\"foo \\$bar\\\"\\`' baz\"",
+ shell_quote("foo $bar\"`' baz")),
+ ok.
+
+cmd_port_test_spool(Port, Acc) ->
+ receive
+ {Port, eof} ->
+ Acc;
+ {Port, {data, {eol, Data}}} ->
+ cmd_port_test_spool(Port, ["\n", Data | Acc]);
+ {Port, Unknown} ->
+ throw({unknown, Unknown})
+ after 100 ->
+ throw(timeout)
+ end.
+
+cmd_port_test() ->
+ Port = cmd_port(["echo", "$bling$ `word`!"],
+ [eof, stream, {line, 4096}]),
+ Res = try lists:append(lists:reverse(cmd_port_test_spool(Port, [])))
+ after catch port_close(Port)
+ end,
+ self() ! {Port, wtf},
+ try cmd_port_test_spool(Port, [])
+ catch throw:{unknown, wtf} -> ok
+ end,
+ try cmd_port_test_spool(Port, [])
+ catch throw:timeout -> ok
+ end,
+ ?assertEqual(
+ "$bling$ `word`!\n",
+ Res).
+
+cmd_test() ->
+ ?assertEqual(
+ "$bling$ `word`!\n",
+ cmd(["echo", "$bling$ `word`!"])),
+ ok.
+
+cmd_string_test() ->
+ ?assertEqual(
+ "\"echo\" \"\\$bling\\$ \\`word\\`!\"",
+ cmd_string(["echo", "$bling$ `word`!"])),
+ ok.
+
+cmd_status_test() ->
+ ?assertEqual(
+ {0, <<"$bling$ `word`!\n">>},
+ cmd_status(["echo", "$bling$ `word`!"])),
+ ok.
+
+
+parse_header_test() ->
+ ?assertEqual(
+ {"multipart/form-data", [{"boundary", "AaB03x"}]},
+ parse_header("multipart/form-data; boundary=AaB03x")),
+ %% This tests (currently) intentionally broken behavior
+ ?assertEqual(
+ {"multipart/form-data",
+ [{"b", ""},
+ {"cgi", "is"},
+ {"broken", "true\"e"}]},
+ parse_header("multipart/form-data;b=;cgi=\"i\\s;broken=true\"e;=z;z")),
+ ok.
+
+guess_mime_test() ->
+ "text/plain" = guess_mime(""),
+ "text/plain" = guess_mime(".text"),
+ "application/zip" = guess_mime(".zip"),
+ "application/zip" = guess_mime("x.zip"),
+ "text/html" = guess_mime("x.html"),
+ "application/xhtml+xml" = guess_mime("x.xhtml"),
+ ok.
+
+path_split_test() ->
+ {"", "foo/bar"} = path_split("/foo/bar"),
+ {"foo", "bar"} = path_split("foo/bar"),
+ {"bar", ""} = path_split("bar"),
+ ok.
+
+urlsplit_test() ->
+ {"", "", "/foo", "", "bar?baz"} = urlsplit("/foo#bar?baz"),
+ {"http", "host:port", "/foo", "", "bar?baz"} =
+ urlsplit("http://host:port/foo#bar?baz"),
+ {"http", "host", "", "", ""} = urlsplit("http://host"),
+ {"", "", "/wiki/Category:Fruit", "", ""} =
+ urlsplit("/wiki/Category:Fruit"),
+ ok.
+
+urlsplit_path_test() ->
+ {"/foo/bar", "", ""} = urlsplit_path("/foo/bar"),
+ {"/foo", "baz", ""} = urlsplit_path("/foo?baz"),
+ {"/foo", "", "bar?baz"} = urlsplit_path("/foo#bar?baz"),
+ {"/foo", "", "bar?baz#wibble"} = urlsplit_path("/foo#bar?baz#wibble"),
+ {"/foo", "bar", "baz"} = urlsplit_path("/foo?bar#baz"),
+ {"/foo", "bar?baz", "baz"} = urlsplit_path("/foo?bar?baz#baz"),
+ ok.
+
+urlunsplit_test() ->
+ "/foo#bar?baz" = urlunsplit({"", "", "/foo", "", "bar?baz"}),
+ "http://host:port/foo#bar?baz" =
+ urlunsplit({"http", "host:port", "/foo", "", "bar?baz"}),
+ ok.
+
+urlunsplit_path_test() ->
+ "/foo/bar" = urlunsplit_path({"/foo/bar", "", ""}),
+ "/foo?baz" = urlunsplit_path({"/foo", "baz", ""}),
+ "/foo#bar?baz" = urlunsplit_path({"/foo", "", "bar?baz"}),
+ "/foo#bar?baz#wibble" = urlunsplit_path({"/foo", "", "bar?baz#wibble"}),
+ "/foo?bar#baz" = urlunsplit_path({"/foo", "bar", "baz"}),
+ "/foo?bar?baz#baz" = urlunsplit_path({"/foo", "bar?baz", "baz"}),
+ ok.
+
+join_test() ->
+ ?assertEqual("foo,bar,baz",
+ join(["foo", "bar", "baz"], $,)),
+ ?assertEqual("foo,bar,baz",
+ join(["foo", "bar", "baz"], ",")),
+ ?assertEqual("foo bar",
+ join([["foo", " bar"]], ",")),
+ ?assertEqual("foo bar,baz",
+ join([["foo", " bar"], "baz"], ",")),
+ ?assertEqual("foo",
+ join(["foo"], ",")),
+ ?assertEqual("foobarbaz",
+ join(["foo", "bar", "baz"], "")),
+ ?assertEqual("foo" ++ [<<>>] ++ "bar" ++ [<<>>] ++ "baz",
+ join(["foo", "bar", "baz"], <<>>)),
+ ?assertEqual("foobar" ++ [<<"baz">>],
+ join(["foo", "bar", <<"baz">>], "")),
+ ?assertEqual("",
+ join([], "any")),
+ ok.
+
+quote_plus_test() ->
+ "foo" = quote_plus(foo),
+ "1" = quote_plus(1),
+ "1.1" = quote_plus(1.1),
+ "foo" = quote_plus("foo"),
+ "foo+bar" = quote_plus("foo bar"),
+ "foo%0A" = quote_plus("foo\n"),
+ "foo%0A" = quote_plus("foo\n"),
+ "foo%3B%26%3D" = quote_plus("foo;&="),
+ "foo%3B%26%3D" = quote_plus(<<"foo;&=">>),
+ ok.
+
+unquote_test() ->
+ ?assertEqual("foo bar",
+ unquote("foo+bar")),
+ ?assertEqual("foo bar",
+ unquote("foo%20bar")),
+ ?assertEqual("foo\r\n",
+ unquote("foo%0D%0A")),
+ ?assertEqual("foo\r\n",
+ unquote(<<"foo%0D%0A">>)),
+ ok.
+
+urlencode_test() ->
+ "foo=bar&baz=wibble+%0D%0A&z=1" = urlencode([{foo, "bar"},
+ {"baz", "wibble \r\n"},
+ {z, 1}]),
+ ok.
+
+parse_qs_test() ->
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
+ parse_qs("foo=bar&baz=wibble+%0D%0a&z=1")),
+ ?assertEqual(
+ [{"", "bar"}, {"baz", "wibble \r\n"}, {"z", ""}],
+ parse_qs("=bar&baz=wibble+%0D%0a&z=")),
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
+ parse_qs(<<"foo=bar&baz=wibble+%0D%0a&z=1">>)),
+ ?assertEqual(
+ [],
+ parse_qs("")),
+ ?assertEqual(
+ [{"foo", ""}, {"bar", ""}, {"baz", ""}],
+ parse_qs("foo;bar&baz")),
+ ok.
+
+partition_test() ->
+ {"foo", "", ""} = partition("foo", "/"),
+ {"foo", "/", "bar"} = partition("foo/bar", "/"),
+ {"foo", "/", ""} = partition("foo/", "/"),
+ {"", "/", "bar"} = partition("/bar", "/"),
+ {"f", "oo/ba", "r"} = partition("foo/bar", "oo/ba"),
+ ok.
+
+safe_relative_path_test() ->
+ "foo" = safe_relative_path("foo"),
+ "foo/" = safe_relative_path("foo/"),
+ "foo" = safe_relative_path("foo/bar/.."),
+ "bar" = safe_relative_path("foo/../bar"),
+ "bar/" = safe_relative_path("foo/../bar/"),
+ "" = safe_relative_path("foo/.."),
+ "" = safe_relative_path("foo/../"),
+ undefined = safe_relative_path("/foo"),
+ undefined = safe_relative_path("../foo"),
+ undefined = safe_relative_path("foo/../.."),
+ undefined = safe_relative_path("foo//"),
+ ok.
+
+parse_qvalues_test() ->
+ [] = parse_qvalues(""),
+ [{"identity", 0.0}] = parse_qvalues("identity;q=0"),
+ [{"identity", 0.0}] = parse_qvalues("identity ;q=0"),
+ [{"identity", 0.0}] = parse_qvalues(" identity; q =0 "),
+ [{"identity", 0.0}] = parse_qvalues("identity ; q = 0"),
+ [{"identity", 0.0}] = parse_qvalues("identity ; q= 0.0"),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip,deflate,identity;q=0.0"
+ ),
+ [{"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "deflate,gzip,identity;q=0.0"
+ ),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] =
+ parse_qvalues("gzip,deflate,gzip,identity;q=0"),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip, deflate , identity; q=0.0"
+ ),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=1, deflate;q=1.0, identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate;q=1.0, identity;q=0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate , identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 0.8}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate;q=0.8, identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}] = parse_qvalues(
+ "gzip; q=0.5,deflate,identity"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}, {"identity", 1.0}] =
+ parse_qvalues("gzip; q=0.5,deflate,identity, identity "),
+ [{"text/html;level=1", 1.0}, {"text/plain", 0.5}] =
+ parse_qvalues("text/html;level=1, text/plain;q=0.5"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html;level=1;q=0.3, text/plain"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html; level = 1; q = 0.3, text/plain"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html;q=0.3;level=1, text/plain"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=1.1, deflate"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=0.5, deflate;q=2"),
+ invalid_qvalue_string = parse_qvalues("gzip, deflate;q=AB"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=2.1, deflate"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=0.1234, deflate"),
+ invalid_qvalue_string = parse_qvalues("text/html;level=1;q=0.3, text/html;level"),
+ ok.
+
+pick_accepted_encodings_test() ->
+ ["identity"] = pick_accepted_encodings(
+ [],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["identity"] = pick_accepted_encodings(
+ [{"gzip", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.5}, {"deflate", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["identity"] = pick_accepted_encodings(
+ [{"gzip", 0.0}, {"deflate", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}, {"gzip", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 0.9}, {"gzip", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ [] = pick_accepted_encodings(
+ [{"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"*", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"*", 0.6}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 0.6}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip"] = pick_accepted_encodings(
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"deflate", 0.0}, {"*", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"*", 1.0}, {"deflate", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ok.
+
+-endif.
diff --git a/1.1.x/src/mochiweb/reloader.erl b/1.1.x/src/mochiweb/reloader.erl
new file mode 100644
index 00000000..c0f5de88
--- /dev/null
+++ b/1.1.x/src/mochiweb/reloader.erl
@@ -0,0 +1,161 @@
+%% @copyright 2007 Mochi Media, Inc.
+%% @author Matthew Dempsky <matthew@mochimedia.com>
+%%
+%% @doc Erlang module for automatically reloading modified modules
+%% during development.
+
+-module(reloader).
+-author("Matthew Dempsky <matthew@mochimedia.com>").
+
+-include_lib("kernel/include/file.hrl").
+
+-behaviour(gen_server).
+-export([start/0, start_link/0]).
+-export([stop/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
+-export([all_changed/0]).
+-export([is_changed/1]).
+-export([reload_modules/1]).
+-record(state, {last, tref}).
+
+%% External API
+
+%% @spec start() -> ServerRet
+%% @doc Start the reloader.
+start() ->
+ gen_server:start({local, ?MODULE}, ?MODULE, [], []).
+
+%% @spec start_link() -> ServerRet
+%% @doc Start the reloader.
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+%% @spec stop() -> ok
+%% @doc Stop the reloader.
+stop() ->
+ gen_server:call(?MODULE, stop).
+
+%% gen_server callbacks
+
+%% @spec init([]) -> {ok, State}
+%% @doc gen_server init, opens the server in an initial state.
+init([]) ->
+ {ok, TRef} = timer:send_interval(timer:seconds(1), doit),
+ {ok, #state{last = stamp(), tref = TRef}}.
+
+%% @spec handle_call(Args, From, State) -> tuple()
+%% @doc gen_server callback.
+handle_call(stop, _From, State) ->
+ {stop, shutdown, stopped, State};
+handle_call(_Req, _From, State) ->
+ {reply, {error, badrequest}, State}.
+
+%% @spec handle_cast(Cast, State) -> tuple()
+%% @doc gen_server callback.
+handle_cast(_Req, State) ->
+ {noreply, State}.
+
+%% @spec handle_info(Info, State) -> tuple()
+%% @doc gen_server callback.
+handle_info(doit, State) ->
+ Now = stamp(),
+ doit(State#state.last, Now),
+ {noreply, State#state{last = Now}};
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%% @spec terminate(Reason, State) -> ok
+%% @doc gen_server termination callback.
+terminate(_Reason, State) ->
+ {ok, cancel} = timer:cancel(State#state.tref),
+ ok.
+
+
+%% @spec code_change(_OldVsn, State, _Extra) -> State
+%% @doc gen_server code_change callback (trivial).
+code_change(_Vsn, State, _Extra) ->
+ {ok, State}.
+
+%% @spec reload_modules([atom()]) -> [{module, atom()} | {error, term()}]
+%% @doc code:purge/1 and code:load_file/1 the given list of modules in order,
+%% return the results of code:load_file/1.
+reload_modules(Modules) ->
+ [begin code:purge(M), code:load_file(M) end || M <- Modules].
+
+%% @spec all_changed() -> [atom()]
+%% @doc Return a list of beam modules that have changed.
+all_changed() ->
+ [M || {M, Fn} <- code:all_loaded(), is_list(Fn), is_changed(M)].
+
+%% @spec is_changed(atom()) -> boolean()
+%% @doc true if the loaded module is a beam with a vsn attribute
+%% and does not match the on-disk beam file, returns false otherwise.
+is_changed(M) ->
+ try
+ module_vsn(M:module_info()) =/= module_vsn(code:get_object_code(M))
+ catch _:_ ->
+ false
+ end.
+
+%% Internal API
+
+module_vsn({M, Beam, _Fn}) ->
+ {ok, {M, Vsn}} = beam_lib:version(Beam),
+ Vsn;
+module_vsn(L) when is_list(L) ->
+ {_, Attrs} = lists:keyfind(attributes, 1, L),
+ {_, Vsn} = lists:keyfind(vsn, 1, Attrs),
+ Vsn.
+
+doit(From, To) ->
+ [case file:read_file_info(Filename) of
+ {ok, #file_info{mtime = Mtime}} when Mtime >= From, Mtime < To ->
+ reload(Module);
+ {ok, _} ->
+ unmodified;
+ {error, enoent} ->
+ %% The Erlang compiler deletes existing .beam files if
+ %% recompiling fails. Maybe it's worth spitting out a
+ %% warning here, but I'd want to limit it to just once.
+ gone;
+ {error, Reason} ->
+ io:format("Error reading ~s's file info: ~p~n",
+ [Filename, Reason]),
+ error
+ end || {Module, Filename} <- code:all_loaded(), is_list(Filename)].
+
+reload(Module) ->
+ io:format("Reloading ~p ...", [Module]),
+ code:purge(Module),
+ case code:load_file(Module) of
+ {module, Module} ->
+ io:format(" ok.~n"),
+ case erlang:function_exported(Module, test, 0) of
+ true ->
+ io:format(" - Calling ~p:test() ...", [Module]),
+ case catch Module:test() of
+ ok ->
+ io:format(" ok.~n"),
+ reload;
+ Reason ->
+ io:format(" fail: ~p.~n", [Reason]),
+ reload_but_test_failed
+ end;
+ false ->
+ reload
+ end;
+ {error, Reason} ->
+ io:format(" fail: ~p.~n", [Reason]),
+ error
+ end.
+
+
+stamp() ->
+ erlang:localtime().
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/1.1.x/test/Makefile.am b/1.1.x/test/Makefile.am
new file mode 100644
index 00000000..45130a64
--- /dev/null
+++ b/1.1.x/test/Makefile.am
@@ -0,0 +1,14 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+SUBDIRS = bench etap javascript view_server
+
diff --git a/1.1.x/test/bench/Makefile.am b/1.1.x/test/bench/Makefile.am
new file mode 100644
index 00000000..ce39c4b0
--- /dev/null
+++ b/1.1.x/test/bench/Makefile.am
@@ -0,0 +1,22 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+EXTRA_DIST = benchbulk.sh bench_marks.js run.tpl
+
+noinst_SCRIPTS = run
+CLEANFILES = run
+
+run: run.tpl
+ sed -e "s|%abs_top_srcdir%|$(abs_top_srcdir)|" \
+ -e "s|%abs_top_builddir%|$(abs_top_builddir)|" \
+ < $< > $@
+ chmod +x $@
diff --git a/1.1.x/test/bench/bench_marks.js b/1.1.x/test/bench/bench_marks.js
new file mode 100644
index 00000000..4025adbb
--- /dev/null
+++ b/1.1.x/test/bench/bench_marks.js
@@ -0,0 +1,103 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var NUM_DOCS = 2000;
+var NUM_BATCHES = 20;
+
+var init = function() {
+ var db = new CouchDB("bench_mark_db", {"X-Couch-Full-Commit": "false"});
+ db.deleteDb();
+ db.createDb();
+ return db;
+};
+
+var timeit = function(func) {
+ var startTime = (new Date()).getTime();
+ func();
+ return ((new Date()).getTime() - startTime) / 1000;
+};
+
+var report = function(name, rate) {
+ rate = Math.round(parseFloat(rate) * 100) / 100;
+ console.log("" + name + ": " + rate + " docs/second");
+};
+
+var makeDocs = function(n) {
+ docs = [];
+ for (var i=0; i < n; i++) {
+ docs.push({"foo":"bar"});
+ };
+ return docs;
+};
+
+var couchTests = {};
+
+couchTests.single_doc_insert = function() {
+ var db = init();
+ var len = timeit(function() {
+ for(var i = 0; i < NUM_DOCS; i++) {
+ db.save({"foo": "bar"});
+ }
+ });
+ report("Single doc inserts", NUM_DOCS/len);
+};
+
+couchTests.batch_ok_doc_insert = function() {
+ var db = init();
+ var len = timeit(function() {
+ for(var i = 0; i < NUM_DOCS; i++) {
+ db.save({"foo":"bar"}, {"batch":"ok"});
+ }
+ });
+ report("Single doc inserts with batch=ok", NUM_DOCS/len);
+};
+
+couchTests.bulk_doc_100 = function() {
+ var db = init();
+ var len = timeit(function() {
+ for(var i = 0; i < NUM_BATCHES; i++) {
+ db.bulkSave(makeDocs(100));
+ }
+ });
+ report("Bulk docs - 100", (NUM_BATCHES*100)/len);
+};
+
+couchTests.bulk_doc_1000 = function() {
+ var db = init();
+ var len = timeit(function() {
+ for(var i = 0; i < NUM_BATCHES; i++) {
+ db.bulkSave(makeDocs(1000));
+ }
+ });
+ report("Bulk docs - 1000", (NUM_BATCHES*1000)/len);
+};
+
+
+couchTests.bulk_doc_5000 = function() {
+ var db = init();
+ var len = timeit(function() {
+ for(var i = 0; i < NUM_BATCHES; i++) {
+ db.bulkSave(makeDocs(5000));
+ }
+ });
+ report("Bulk docs - 5000", (NUM_BATCHES*5000)/len);
+};
+
+couchTests.bulk_doc_10000 = function() {
+ var db = init();
+ var len = timeit(function() {
+ for(var i = 0; i < NUM_BATCHES; i++) {
+ db.bulkSave(makeDocs(10000));
+ }
+ });
+ report("Bulk docs - 10000", (NUM_BATCHES*10000)/len);
+};
diff --git a/1.1.x/test/bench/benchbulk.sh b/1.1.x/test/bench/benchbulk.sh
new file mode 100755
index 00000000..22804c64
--- /dev/null
+++ b/1.1.x/test/bench/benchbulk.sh
@@ -0,0 +1,69 @@
+#!/bin/sh -e
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+
+# usage: time benchbulk.sh
+# it takes about 30 seconds to run on my old MacBook with bulksize 1000
+
+BULKSIZE=100
+DOCSIZE=10
+INSERTS=10
+ROUNDS=10
+DBURL="http://127.0.0.1:5984/benchbulk"
+POSTURL="$DBURL/_bulk_docs"
+
+function make_bulk_docs() {
+ ROW=0
+ SIZE=$(($1-1))
+ START=$2
+ BODYSIZE=$3
+
+ BODY=$(printf "%0${BODYSIZE}d")
+
+ echo '{"docs":['
+ while [ $ROW -lt $SIZE ]; do
+ printf '{"_id":"%020d", "body":"'$BODY'"},' $(($ROW + $START))
+ let ROW=ROW+1
+ done
+ printf '{"_id":"%020d", "body":"'$BODY'"}' $(($ROW + $START))
+ echo ']}'
+}
+
+echo "Making $INSERTS bulk inserts of $BULKSIZE docs each"
+
+echo "Attempt to delete db at $DBURL"
+curl -X DELETE $DBURL -w\\n
+
+echo "Attempt to create db at $DBURL"
+curl -X PUT $DBURL -w\\n
+
+echo "Running $ROUNDS rounds of $INSERTS concurrent inserts to $POSTURL"
+RUN=0
+while [ $RUN -lt $ROUNDS ]; do
+
+ POSTS=0
+ while [ $POSTS -lt $INSERTS ]; do
+ STARTKEY=$[ POSTS * BULKSIZE + RUN * BULKSIZE * INSERTS ]
+ echo "startkey $STARTKEY bulksize $BULKSIZE"
+ DOCS=$(make_bulk_docs $BULKSIZE $STARTKEY $DOCSIZE)
+ # echo $DOCS
+ echo $DOCS | curl -T - -X POST $POSTURL -w%{http_code}\ %{time_total}\ sec\\n >/dev/null 2>&1 &
+ let POSTS=POSTS+1
+ done
+
+ echo "waiting"
+ wait
+ let RUN=RUN+1
+done
+
+curl $DBURL -w\\n
diff --git a/1.1.x/test/bench/run.tpl b/1.1.x/test/bench/run.tpl
new file mode 100755
index 00000000..9307863f
--- /dev/null
+++ b/1.1.x/test/bench/run.tpl
@@ -0,0 +1,28 @@
+#!/bin/sh -e
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+SRC_DIR=%abs_top_srcdir%
+SCRIPT_DIR=$SRC_DIR/share/www/script
+JS_TEST_DIR=$SRC_DIR/test/javascript
+JS_BENCH_DIR=$SRC_DIR/test/bench
+
+COUCHJS=%abs_top_builddir%/src/couchdb/priv/couchjs
+
+cat $SCRIPT_DIR/json2.js \
+ $SCRIPT_DIR/couch.js \
+ $JS_TEST_DIR/couch_http.js \
+ $JS_BENCH_DIR/bench_marks.js \
+ $JS_TEST_DIR/cli_runner.js \
+ | $COUCHJS -
+
diff --git a/1.1.x/test/etap/001-load.t b/1.1.x/test/etap/001-load.t
new file mode 100755
index 00000000..6f49e1ba
--- /dev/null
+++ b/1.1.x/test/etap/001-load.t
@@ -0,0 +1,68 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Test that we can load each module.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(37),
+ Modules = [
+ couch_btree,
+ couch_config,
+ couch_config_writer,
+ couch_db,
+ couch_db_update_notifier,
+ couch_db_update_notifier_sup,
+ couch_db_updater,
+ couch_doc,
+ couch_event_sup,
+ couch_external_manager,
+ couch_external_server,
+ couch_file,
+ couch_httpd,
+ couch_httpd_db,
+ couch_httpd_external,
+ couch_httpd_misc_handlers,
+ couch_httpd_show,
+ couch_httpd_stats_handlers,
+ couch_httpd_view,
+ couch_key_tree,
+ couch_log,
+ couch_os_process,
+ couch_query_servers,
+ couch_ref_counter,
+ couch_rep,
+ couch_rep_sup,
+ couch_server,
+ couch_server_sup,
+ couch_stats_aggregator,
+ couch_stats_collector,
+ couch_stream,
+ couch_task_status,
+ couch_util,
+ couch_view,
+ couch_view_compactor,
+ couch_view_group,
+ couch_view_updater
+ ],
+
+ lists:foreach(
+ fun(Module) ->
+ etap_can:loaded_ok(
+ Module,
+ lists:concat(["Loaded: ", Module])
+ )
+ end, Modules),
+ etap:end_tests().
diff --git a/1.1.x/test/etap/002-icu-driver.t b/1.1.x/test/etap/002-icu-driver.t
new file mode 100644
index 00000000..d70f3303
--- /dev/null
+++ b/1.1.x/test/etap/002-icu-driver.t
@@ -0,0 +1,33 @@
+#!/usr/bin/env escript
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(3),
+ etap:is(
+ couch_util:start_driver("src/couchdb/priv/.libs"),
+ ok,
+ "Started couch_icu_driver."
+ ),
+ etap:is(
+ couch_util:collate(<<"foo">>, <<"bar">>),
+ 1,
+ "Can collate stuff"
+ ),
+ etap:is(
+ couch_util:collate(<<"A">>, <<"aa">>),
+ -1,
+ "Collate's non-ascii style."
+ ),
+ etap:end_tests().
diff --git a/1.1.x/test/etap/010-file-basics.t b/1.1.x/test/etap/010-file-basics.t
new file mode 100755
index 00000000..ed71f5e8
--- /dev/null
+++ b/1.1.x/test/etap/010-file-basics.t
@@ -0,0 +1,108 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+filename() -> test_util:build_file("test/etap/temp.010").
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(19),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail()
+ end,
+ ok.
+
+test() ->
+ etap:is({error, enoent}, couch_file:open("not a real file"),
+ "Opening a non-existant file should return an enoent error."),
+
+ etap:fun_is(
+ fun({ok, _}) -> true; (_) -> false end,
+ couch_file:open(filename() ++ ".1", [create, invalid_option]),
+ "Invalid flags to open are ignored."
+ ),
+
+ {ok, Fd} = couch_file:open(filename() ++ ".0", [create, overwrite]),
+ etap:ok(is_pid(Fd),
+ "Returned file descriptor is a Pid"),
+
+ etap:is({ok, 0}, couch_file:bytes(Fd),
+ "Newly created files have 0 bytes."),
+
+ etap:is({ok, 0}, couch_file:append_term(Fd, foo),
+ "Appending a term returns the previous end of file position."),
+
+ {ok, Size} = couch_file:bytes(Fd),
+ etap:is_greater(Size, 0,
+ "Writing a term increased the file size."),
+
+ etap:is({ok, Size}, couch_file:append_binary(Fd, <<"fancy!">>),
+ "Appending a binary returns the current file size."),
+
+ etap:is({ok, foo}, couch_file:pread_term(Fd, 0),
+ "Reading the first term returns what we wrote: foo"),
+
+ etap:is({ok, <<"fancy!">>}, couch_file:pread_binary(Fd, Size),
+ "Reading back the binary returns what we wrote: <<\"fancy\">>."),
+
+ etap:is({ok, <<131, 100, 0, 3, 102, 111, 111>>},
+ couch_file:pread_binary(Fd, 0),
+ "Reading a binary at a term position returns the term as binary."
+ ),
+
+ {ok, BinPos} = couch_file:append_binary(Fd, <<131,100,0,3,102,111,111>>),
+ etap:is({ok, foo}, couch_file:pread_term(Fd, BinPos),
+ "Reading a term from a written binary term representation succeeds."),
+
+ BigBin = list_to_binary(lists:duplicate(100000, 0)),
+ {ok, BigBinPos} = couch_file:append_binary(Fd, BigBin),
+ etap:is({ok, BigBin}, couch_file:pread_binary(Fd, BigBinPos),
+ "Reading a large term from a written representation succeeds."),
+
+ ok = couch_file:write_header(Fd, hello),
+ etap:is({ok, hello}, couch_file:read_header(Fd),
+ "Reading a header succeeds."),
+
+ {ok, BigBinPos2} = couch_file:append_binary(Fd, BigBin),
+ etap:is({ok, BigBin}, couch_file:pread_binary(Fd, BigBinPos2),
+ "Reading a large term from a written representation succeeds 2."),
+
+ % append_binary == append_iolist?
+ % Possible bug in pread_iolist or iolist() -> append_binary
+ {ok, IOLPos} = couch_file:append_binary(Fd, ["foo", $m, <<"bam">>]),
+ {ok, IoList} = couch_file:pread_iolist(Fd, IOLPos),
+ etap:is(<<"foombam">>, iolist_to_binary(IoList),
+ "Reading an results in a binary form of the written iolist()"),
+
+ % XXX: How does on test fsync?
+ etap:is(ok, couch_file:sync(Fd),
+ "Syncing does not cause an error."),
+
+ etap:is(ok, couch_file:truncate(Fd, Size),
+ "Truncating a file succeeds."),
+
+ %etap:is(eof, (catch couch_file:pread_binary(Fd, Size)),
+ % "Reading data that was truncated fails.")
+ etap:skip(fun() -> ok end,
+ "No idea how to test reading beyond EOF"),
+
+ etap:is({ok, foo}, couch_file:pread_term(Fd, 0),
+ "Truncating does not affect data located before the truncation mark."),
+
+ etap:is(ok, couch_file:close(Fd),
+ "Files close properly."),
+ ok.
diff --git a/1.1.x/test/etap/011-file-headers.t b/1.1.x/test/etap/011-file-headers.t
new file mode 100755
index 00000000..4705f629
--- /dev/null
+++ b/1.1.x/test/etap/011-file-headers.t
@@ -0,0 +1,145 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+%%! -pa ./src/couchdb -sasl errlog_type error -boot start_sasl -noshell
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+filename() -> test_util:build_file("test/etap/temp.011").
+sizeblock() -> 4096. % Need to keep this in sync with couch_file.erl
+
+main(_) ->
+ test_util:init_code_path(),
+ {S1, S2, S3} = now(),
+ random:seed(S1, S2, S3),
+
+ etap:plan(17),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail()
+ end,
+ ok.
+
+test() ->
+ {ok, Fd} = couch_file:open(filename(), [create,overwrite]),
+
+ etap:is({ok, 0}, couch_file:bytes(Fd),
+ "File should be initialized to contain zero bytes."),
+
+ etap:is(ok, couch_file:write_header(Fd, {<<"some_data">>, 32}),
+ "Writing a header succeeds."),
+
+ {ok, Size1} = couch_file:bytes(Fd),
+ etap:is_greater(Size1, 0,
+ "Writing a header allocates space in the file."),
+
+ etap:is({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd),
+ "Reading the header returns what we wrote."),
+
+ etap:is(ok, couch_file:write_header(Fd, [foo, <<"more">>]),
+ "Writing a second header succeeds."),
+
+ {ok, Size2} = couch_file:bytes(Fd),
+ etap:is_greater(Size2, Size1,
+ "Writing a second header allocates more space."),
+
+ etap:is({ok, [foo, <<"more">>]}, couch_file:read_header(Fd),
+ "Reading the second header does not return the first header."),
+
+ % Delete the second header.
+ ok = couch_file:truncate(Fd, Size1),
+
+ etap:is({ok, {<<"some_data">>, 32}}, couch_file:read_header(Fd),
+ "Reading the header after a truncation returns a previous header."),
+
+ couch_file:write_header(Fd, [foo, <<"more">>]),
+ etap:is({ok, Size2}, couch_file:bytes(Fd),
+ "Rewriting the same second header returns the same second size."),
+
+ ok = couch_file:close(Fd),
+
+ % Now for the fun stuff. Try corrupting the second header and see
+ % if we recover properly.
+
+ % Destroy the 0x1 byte that marks a header
+ check_header_recovery(fun(CouchFd, RawFd, Expect, HeaderPos) ->
+ etap:isnt(Expect, couch_file:read_header(CouchFd),
+ "Should return a different header before corruption."),
+ file:pwrite(RawFd, HeaderPos, <<0>>),
+ etap:is(Expect, couch_file:read_header(CouchFd),
+ "Corrupting the byte marker should read the previous header.")
+ end),
+
+ % Corrupt the size.
+ check_header_recovery(fun(CouchFd, RawFd, Expect, HeaderPos) ->
+ etap:isnt(Expect, couch_file:read_header(CouchFd),
+ "Should return a different header before corruption."),
+ % +1 for 0x1 byte marker
+ file:pwrite(RawFd, HeaderPos+1, <<10/integer>>),
+ etap:is(Expect, couch_file:read_header(CouchFd),
+ "Corrupting the size should read the previous header.")
+ end),
+
+ % Corrupt the MD5 signature
+ check_header_recovery(fun(CouchFd, RawFd, Expect, HeaderPos) ->
+ etap:isnt(Expect, couch_file:read_header(CouchFd),
+ "Should return a different header before corruption."),
+ % +5 = +1 for 0x1 byte and +4 for term size.
+ file:pwrite(RawFd, HeaderPos+5, <<"F01034F88D320B22">>),
+ etap:is(Expect, couch_file:read_header(CouchFd),
+ "Corrupting the MD5 signature should read the previous header.")
+ end),
+
+ % Corrupt the data
+ check_header_recovery(fun(CouchFd, RawFd, Expect, HeaderPos) ->
+ etap:isnt(Expect, couch_file:read_header(CouchFd),
+ "Should return a different header before corruption."),
+ % +21 = +1 for 0x1 byte, +4 for term size and +16 for MD5 sig
+ file:pwrite(RawFd, HeaderPos+21, <<"some data goes here!">>),
+ etap:is(Expect, couch_file:read_header(CouchFd),
+ "Corrupting the header data should read the previous header.")
+ end),
+
+ ok.
+
+check_header_recovery(CheckFun) ->
+ {ok, Fd} = couch_file:open(filename(), [create,overwrite]),
+ {ok, RawFd} = file:open(filename(), [read, write, raw, binary]),
+
+ {ok, _} = write_random_data(Fd),
+ ExpectHeader = {some_atom, <<"a binary">>, 756},
+ ok = couch_file:write_header(Fd, ExpectHeader),
+
+ {ok, HeaderPos} = write_random_data(Fd),
+ ok = couch_file:write_header(Fd, {2342, <<"corruption! greed!">>}),
+
+ CheckFun(Fd, RawFd, {ok, ExpectHeader}, HeaderPos),
+
+ ok = file:close(RawFd),
+ ok = couch_file:close(Fd),
+ ok.
+
+write_random_data(Fd) ->
+ write_random_data(Fd, 100 + random:uniform(1000)).
+
+write_random_data(Fd, 0) ->
+ {ok, Bytes} = couch_file:bytes(Fd),
+ {ok, (1 + Bytes div sizeblock()) * sizeblock()};
+write_random_data(Fd, N) ->
+ Choices = [foo, bar, <<"bizzingle">>, "bank", ["rough", stuff]],
+ Term = lists:nth(random:uniform(4) + 1, Choices),
+ {ok, _} = couch_file:append_term(Fd, Term),
+ write_random_data(Fd, N-1).
+
diff --git a/1.1.x/test/etap/020-btree-basics.t b/1.1.x/test/etap/020-btree-basics.t
new file mode 100755
index 00000000..18c4a836
--- /dev/null
+++ b/1.1.x/test/etap/020-btree-basics.t
@@ -0,0 +1,205 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+%%! -pa ./src/couchdb -sasl errlog_type error -boot start_sasl -noshell
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+filename() -> test_util:build_file("test/etap/temp.020").
+rows() -> 250.
+
+-record(btree, {fd, root, extract_kv, assemble_kv, less, reduce}).
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(48),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail()
+ end,
+ ok.
+
+%% @todo Determine if this number should be greater to see if the btree was
+%% broken into multiple nodes. AKA "How do we appropiately detect if multiple
+%% nodes were created."
+test()->
+ Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, rows())],
+ etap:ok(test_kvs(Sorted), "Testing sorted keys"),
+ etap:ok(test_kvs(lists:reverse(Sorted)), "Testing reversed sorted keys"),
+ etap:ok(test_kvs(shuffle(Sorted)), "Testing shuffled keys."),
+ ok.
+
+test_kvs(KeyValues) ->
+ ReduceFun = fun
+ (reduce, KVs) ->
+ length(KVs);
+ (rereduce, Reds) ->
+ lists:sum(Reds)
+ end,
+
+ Keys = [K || {K, _} <- KeyValues],
+
+ {ok, Fd} = couch_file:open(filename(), [create,overwrite]),
+ {ok, Btree} = couch_btree:open(nil, Fd),
+ etap:ok(is_record(Btree, btree), "Created btree is really a btree record"),
+ etap:is(Btree#btree.fd, Fd, "Btree#btree.fd is set correctly."),
+ etap:is(Btree#btree.root, nil, "Btree#btree.root is set correctly."),
+
+ Btree1 = couch_btree:set_options(Btree, [{reduce, ReduceFun}]),
+ etap:is(Btree1#btree.reduce, ReduceFun, "Reduce function was set"),
+ {ok, _, EmptyRes} = couch_btree:foldl(Btree1, fun(_, X) -> {ok, X+1} end, 0),
+ etap:is(EmptyRes, 0, "Folding over an empty btree"),
+
+ {ok, Btree2} = couch_btree:add_remove(Btree1, KeyValues, []),
+ etap:ok(test_btree(Btree2, KeyValues),
+ "Adding all keys at once returns a complete btree."),
+
+ etap:fun_is(
+ fun
+ ({ok, {kp_node, _}}) -> true;
+ (_) -> false
+ end,
+ couch_file:pread_term(Fd, element(1, Btree2#btree.root)),
+ "Btree root pointer is a kp_node."
+ ),
+
+ {ok, Btree3} = couch_btree:add_remove(Btree2, [], Keys),
+ etap:ok(test_btree(Btree3, []),
+ "Removing all keys at once returns an empty btree."),
+
+ Btree4 = lists:foldl(fun(KV, BtAcc) ->
+ {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
+ BtAcc2
+ end, Btree3, KeyValues),
+ etap:ok(test_btree(Btree4, KeyValues),
+ "Adding all keys one at a time returns a complete btree."),
+
+ Btree5 = lists:foldl(fun({K, _}, BtAcc) ->
+ {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
+ BtAcc2
+ end, Btree4, KeyValues),
+ etap:ok(test_btree(Btree5, []),
+ "Removing all keys one at a time returns an empty btree."),
+
+ KeyValuesRev = lists:reverse(KeyValues),
+ Btree6 = lists:foldl(fun(KV, BtAcc) ->
+ {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
+ BtAcc2
+ end, Btree5, KeyValuesRev),
+ etap:ok(test_btree(Btree6, KeyValues),
+ "Adding all keys in reverse order returns a complete btree."),
+
+ {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) ->
+ case Count rem 2 == 0 of
+ true-> {Count+1, [X | Left], Right};
+ false -> {Count+1, Left, [X | Right]}
+ end
+ end, {0, [], []}, KeyValues),
+
+ etap:ok(test_add_remove(Btree6, Rem2Keys0, Rem2Keys1),
+ "Add/Remove every other key."),
+
+ etap:ok(test_add_remove(Btree6, Rem2Keys1, Rem2Keys0),
+ "Add/Remove opposite every other key."),
+
+ {ok, Btree7} = couch_btree:add_remove(Btree6, [], [K||{K,_}<-Rem2Keys1]),
+ {ok, Btree8} = couch_btree:add_remove(Btree7, [], [K||{K,_}<-Rem2Keys0]),
+ etap:ok(test_btree(Btree8, []),
+ "Removing both halves of every other key returns an empty btree."),
+
+ %% Third chunk (close out)
+ etap:is(couch_file:close(Fd), ok, "closing out"),
+ true.
+
+test_btree(Btree, KeyValues) ->
+ ok = test_key_access(Btree, KeyValues),
+ ok = test_lookup_access(Btree, KeyValues),
+ ok = test_final_reductions(Btree, KeyValues),
+ true.
+
+test_add_remove(Btree, OutKeyValues, RemainingKeyValues) ->
+ Btree2 = lists:foldl(fun({K, _}, BtAcc) ->
+ {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
+ BtAcc2
+ end, Btree, OutKeyValues),
+ true = test_btree(Btree2, RemainingKeyValues),
+
+ Btree3 = lists:foldl(fun(KV, BtAcc) ->
+ {ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
+ BtAcc2
+ end, Btree2, OutKeyValues),
+ true = test_btree(Btree3, OutKeyValues ++ RemainingKeyValues).
+
+test_key_access(Btree, List) ->
+ FoldFun = fun(Element, {[HAcc|TAcc], Count}) ->
+ case Element == HAcc of
+ true -> {ok, {TAcc, Count + 1}};
+ _ -> {ok, {TAcc, Count + 1}}
+ end
+ end,
+ Length = length(List),
+ Sorted = lists:sort(List),
+ {ok, _, {[], Length}} = couch_btree:foldl(Btree, FoldFun, {Sorted, 0}),
+ {ok, _, {[], Length}} = couch_btree:fold(Btree, FoldFun, {Sorted, 0}, [{dir, rev}]),
+ ok.
+
+test_lookup_access(Btree, KeyValues) ->
+ FoldFun = fun({Key, Value}, {Key, Value}) -> {stop, true} end,
+ lists:foreach(fun({Key, Value}) ->
+ [{ok, {Key, Value}}] = couch_btree:lookup(Btree, [Key]),
+ {ok, _, true} = couch_btree:foldl(Btree, FoldFun, {Key, Value}, [{start_key, Key}])
+ end, KeyValues).
+
+test_final_reductions(Btree, KeyValues) ->
+ KVLen = length(KeyValues),
+ FoldLFun = fun(_X, LeadingReds, Acc) ->
+ CountToStart = KVLen div 3 + Acc,
+ CountToStart = couch_btree:final_reduce(Btree, LeadingReds),
+ {ok, Acc+1}
+ end,
+ FoldRFun = fun(_X, LeadingReds, Acc) ->
+ CountToEnd = KVLen - KVLen div 3 + Acc,
+ CountToEnd = couch_btree:final_reduce(Btree, LeadingReds),
+ {ok, Acc+1}
+ end,
+ {LStartKey, _} = case KVLen of
+ 0 -> {nil, nil};
+ _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues))
+ end,
+ {RStartKey, _} = case KVLen of
+ 0 -> {nil, nil};
+ _ -> lists:nth(KVLen div 3, lists:sort(KeyValues))
+ end,
+ {ok, _, FoldLRed} = couch_btree:foldl(Btree, FoldLFun, 0, [{start_key, LStartKey}]),
+ {ok, _, FoldRRed} = couch_btree:fold(Btree, FoldRFun, 0, [{dir, rev}, {start_key, RStartKey}]),
+ KVLen = FoldLRed + FoldRRed,
+ ok.
+
+shuffle(List) ->
+ randomize(round(math:log(length(List)) + 0.5), List).
+
+randomize(1, List) ->
+ randomize(List);
+randomize(T, List) ->
+ lists:foldl(fun(_E, Acc) ->
+ randomize(Acc)
+ end, randomize(List), lists:seq(1, (T - 1))).
+
+randomize(List) ->
+ D = lists:map(fun(A) ->
+ {random:uniform(), A}
+ end, List),
+ {_, D1} = lists:unzip(lists:keysort(1, D)),
+ D1.
diff --git a/1.1.x/test/etap/021-btree-reductions.t b/1.1.x/test/etap/021-btree-reductions.t
new file mode 100755
index 00000000..331e49af
--- /dev/null
+++ b/1.1.x/test/etap/021-btree-reductions.t
@@ -0,0 +1,141 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+%%! -pa ./src/couchdb -sasl errlog_type error -boot start_sasl -noshell
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+filename() -> "./test/etap/temp.021".
+rows() -> 1000.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(8),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail()
+ end,
+ ok.
+
+test()->
+ ReduceFun = fun
+ (reduce, KVs) -> length(KVs);
+ (rereduce, Reds) -> lists:sum(Reds)
+ end,
+
+ {ok, Fd} = couch_file:open(filename(), [create,overwrite]),
+ {ok, Btree} = couch_btree:open(nil, Fd, [{reduce, ReduceFun}]),
+
+ % Create a list, of {"even", Value} or {"odd", Value} pairs.
+ {_, EvenOddKVs} = lists:foldl(fun(Idx, {Key, Acc}) ->
+ case Key of
+ "even" -> {"odd", [{{Key, Idx}, 1} | Acc]};
+ _ -> {"even", [{{Key, Idx}, 1} | Acc]}
+ end
+ end, {"odd", []}, lists:seq(1, rows())),
+
+ {ok, Btree2} = couch_btree:add_remove(Btree, EvenOddKVs, []),
+
+ GroupFun = fun({K1, _}, {K2, _}) -> K1 == K2 end,
+ FoldFun = fun(GroupedKey, Unreduced, Acc) ->
+ {ok, [{GroupedKey, couch_btree:final_reduce(Btree2, Unreduced)} | Acc]}
+ end,
+
+ {SK1, EK1} = {{"even", -1}, {"even", foo}},
+ {SK2, EK2} = {{"odd", -1}, {"odd", foo}},
+
+ etap:fun_is(
+ fun
+ ({ok, [{{"odd", _}, 500}, {{"even", _}, 500}]}) ->
+ true;
+ (_) ->
+ false
+ end,
+ couch_btree:fold_reduce(Btree2, FoldFun, [], [{key_group_fun, GroupFun}]),
+ "Reduction works with no specified direction, startkey, or endkey."
+ ),
+
+ etap:fun_is(
+ fun
+ ({ok, [{{"odd", _}, 500}, {{"even", _}, 500}]}) ->
+ true;
+ (_) ->
+ false
+ end,
+ couch_btree:fold_reduce(Btree2, FoldFun, [], [{key_group_fun, GroupFun}, {dir, fwd}]),
+ "Reducing forward works with no startkey or endkey."
+ ),
+
+ etap:fun_is(
+ fun
+ ({ok, [{{"even", _}, 500}, {{"odd", _}, 500}]}) ->
+ true;
+ (_) ->
+ false
+ end,
+ couch_btree:fold_reduce(Btree2, FoldFun, [], [{key_group_fun, GroupFun}, {dir, rev}]),
+ "Reducing backwards works with no startkey or endkey."
+ ),
+
+ etap:fun_is(
+ fun
+ ({ok, [{{"odd", _}, 500}, {{"even", _}, 500}]}) ->
+ true;
+ (_) ->
+ false
+ end,
+ couch_btree:fold_reduce(Btree2, FoldFun, [], [{dir, fwd}, {key_group_fun, GroupFun}, {start_key, SK1}, {end_key, EK2}]),
+ "Reducing works over the entire range with startkey and endkey set."
+ ),
+
+ etap:fun_is(
+ fun
+ ({ok, [{{"even", _}, 500}]}) -> true;
+ (_) -> false
+ end,
+ couch_btree:fold_reduce(Btree2, FoldFun, [], [{dir, fwd}, {key_group_fun, GroupFun}, {start_key, SK1}, {end_key, EK1}]),
+ "Reducing forward over first half works with a startkey and endkey."
+ ),
+
+ etap:fun_is(
+ fun
+ ({ok, [{{"odd", _}, 500}]}) -> true;
+ (_) -> false
+ end,
+ couch_btree:fold_reduce(Btree2, FoldFun, [], [{dir, fwd}, {key_group_fun, GroupFun}, {start_key, SK2}, {end_key, EK2}]),
+ "Reducing forward over second half works with second startkey and endkey"
+ ),
+
+ etap:fun_is(
+ fun
+ ({ok, [{{"odd", _}, 500}]}) -> true;
+ (_) -> false
+ end,
+ couch_btree:fold_reduce(Btree2, FoldFun, [], [{dir, rev}, {key_group_fun, GroupFun}, {start_key, EK2}, {end_key, SK2}]),
+ "Reducing in reverse works after swapping the startkey and endkey."
+ ),
+
+ etap:fun_is(
+ fun
+ ({ok, [{{"even", _}, 500}, {{"odd", _}, 500}]}) ->
+ true;
+ (_) ->
+ false
+ end,
+ couch_btree:fold_reduce(Btree2, FoldFun, [], [{dir, rev}, {key_group_fun, GroupFun}, {start_key, EK2}, {end_key, SK1}]),
+ "Reducing in reverse results in reversed accumulator."
+ ),
+
+ couch_file:close(Fd).
diff --git a/1.1.x/test/etap/030-doc-from-json.t b/1.1.x/test/etap/030-doc-from-json.t
new file mode 100755
index 00000000..b0c393ef
--- /dev/null
+++ b/1.1.x/test/etap/030-doc-from-json.t
@@ -0,0 +1,236 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+%%! -pa ./src/couchdb -pa ./src/mochiweb -sasl errlog_type false -noshell
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% XXX: Figure out how to -include("couch_db.hrl")
+-record(doc, {id= <<"">>, revs={0, []}, body={[]},
+ atts=[], deleted=false, meta=[]}).
+-record(att, {name, type, att_len, disk_len, md5= <<>>, revpos=0, data,
+ encoding=identity}).
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(26),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail()
+ end,
+ ok.
+
+test() ->
+ couch_config:start_link(test_util:config_files()),
+ couch_config:set("attachments", "compression_level", "0", false),
+ ok = test_from_json_success(),
+ ok = test_from_json_errors(),
+ ok.
+
+test_from_json_success() ->
+ Cases = [
+ {
+ {[]},
+ #doc{},
+ "Return an empty document for an empty JSON object."
+ },
+ {
+ {[{<<"_id">>, <<"zing!">>}]},
+ #doc{id= <<"zing!">>},
+ "Parses document ids."
+ },
+ {
+ {[{<<"_id">>, <<"_design/foo">>}]},
+ #doc{id= <<"_design/foo">>},
+ "_design/document ids."
+ },
+ {
+ {[{<<"_id">>, <<"_local/bam">>}]},
+ #doc{id= <<"_local/bam">>},
+ "_local/document ids."
+ },
+ {
+ {[{<<"_rev">>, <<"4-230234">>}]},
+ #doc{revs={4, [<<"230234">>]}},
+ "_rev stored in revs."
+ },
+ {
+ {[{<<"soap">>, 35}]},
+ #doc{body={[{<<"soap">>, 35}]}},
+ "Non underscore prefixed fields stored in body."
+ },
+ {
+ {[{<<"_attachments">>, {[
+ {<<"my_attachment.fu">>, {[
+ {<<"stub">>, true},
+ {<<"content_type">>, <<"application/awesome">>},
+ {<<"length">>, 45}
+ ]}},
+ {<<"noahs_private_key.gpg">>, {[
+ {<<"data">>, <<"SSBoYXZlIGEgcGV0IGZpc2gh">>},
+ {<<"content_type">>, <<"application/pgp-signature">>}
+ ]}}
+ ]}}]},
+ #doc{atts=[
+ #att{
+ name = <<"my_attachment.fu">>,
+ data = stub,
+ type = <<"application/awesome">>,
+ att_len = 45,
+ disk_len = 45,
+ revpos = nil
+ },
+ #att{
+ name = <<"noahs_private_key.gpg">>,
+ data = <<"I have a pet fish!">>,
+ type = <<"application/pgp-signature">>,
+ att_len = 18,
+ disk_len = 18,
+ revpos = 0
+ }
+ ]},
+ "Attachments are parsed correctly."
+ },
+ {
+ {[{<<"_deleted">>, true}]},
+ #doc{deleted=true},
+ "_deleted controls the deleted field."
+ },
+ {
+ {[{<<"_deleted">>, false}]},
+ #doc{},
+ "{\"_deleted\": false} is ok."
+ },
+ {
+ {[
+ {<<"_revisions">>, {[
+ {<<"start">>, 4},
+ {<<"ids">>, [<<"foo1">>, <<"phi3">>, <<"omega">>]}
+ ]}},
+ {<<"_rev">>, <<"6-something">>}
+ ]},
+ #doc{revs={4, [<<"foo1">>, <<"phi3">>, <<"omega">>]}},
+ "_revisions attribute are preferred to _rev."
+ },
+ {
+ {[{<<"_revs_info">>, dropping}]},
+ #doc{},
+ "Drops _revs_info."
+ },
+ {
+ {[{<<"_local_seq">>, dropping}]},
+ #doc{},
+ "Drops _local_seq."
+ },
+ {
+ {[{<<"_conflicts">>, dropping}]},
+ #doc{},
+ "Drops _conflicts."
+ },
+ {
+ {[{<<"_deleted_conflicts">>, dropping}]},
+ #doc{},
+ "Drops _deleted_conflicts."
+ }
+ ],
+
+ lists:foreach(fun({EJson, Expect, Mesg}) ->
+ etap:is(couch_doc:from_json_obj(EJson), Expect, Mesg)
+ end, Cases),
+ ok.
+
+test_from_json_errors() ->
+ Cases = [
+ {
+ [],
+ {bad_request, "Document must be a JSON object"},
+ "arrays are invalid"
+ },
+ {
+ 4,
+ {bad_request, "Document must be a JSON object"},
+ "integers are invalid"
+ },
+ {
+ true,
+ {bad_request, "Document must be a JSON object"},
+ "literals are invalid"
+ },
+ {
+ {[{<<"_id">>, {[{<<"foo">>, 5}]}}]},
+ {bad_request, <<"Document id must be a string">>},
+ "Document id must be a string."
+ },
+ {
+ {[{<<"_id">>, <<"_random">>}]},
+ {bad_request,
+ <<"Only reserved document ids may start with underscore.">>},
+ "Disallow arbitrary underscore prefixed docids."
+ },
+ {
+ {[{<<"_rev">>, 5}]},
+ {bad_request, <<"Invalid rev format">>},
+ "_rev must be a string"
+ },
+ {
+ {[{<<"_rev">>, "foobar"}]},
+ {bad_request, <<"Invalid rev format">>},
+ "_rev must be %d-%s"
+ },
+ {
+ {[{<<"_rev">>, "foo-bar"}]},
+ "Error if _rev's integer expection is broken."
+ },
+ {
+ {[{<<"_revisions">>, {[{<<"start">>, true}]}}]},
+ {doc_validation, "_revisions.start isn't an integer."},
+ "_revisions.start must be an integer."
+ },
+ {
+ {[{<<"_revisions">>, {[
+ {<<"start">>, 0},
+ {<<"ids">>, 5}
+ ]}}]},
+ {doc_validation, "_revisions.ids isn't a array."},
+ "_revions.ids must be a list."
+ },
+ {
+ {[{<<"_revisions">>, {[
+ {<<"start">>, 0},
+ {<<"ids">>, [5]}
+ ]}}]},
+ {doc_validation, "RevId isn't a string"},
+ "Revision ids must be strings."
+ },
+ {
+ {[{<<"_something">>, 5}]},
+ {doc_validation, <<"Bad special document member: _something">>},
+ "Underscore prefix fields are reserved."
+ }
+ ],
+
+ lists:foreach(fun
+ ({EJson, Expect, Mesg}) ->
+ Error = (catch couch_doc:from_json_obj(EJson)),
+ etap:is(Error, Expect, Mesg);
+ ({EJson, Mesg}) ->
+ try
+ couch_doc:from_json_obj(EJson),
+ etap:ok(false, "Conversion failed to raise an exception.")
+ catch
+ _:_ -> etap:ok(true, Mesg)
+ end
+ end, Cases),
+ ok.
diff --git a/1.1.x/test/etap/031-doc-to-json.t b/1.1.x/test/etap/031-doc-to-json.t
new file mode 100755
index 00000000..ce950f95
--- /dev/null
+++ b/1.1.x/test/etap/031-doc-to-json.t
@@ -0,0 +1,197 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+%%! -pa ./src/couchdb -pa ./src/mochiweb -sasl errlog_type false -noshell
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% XXX: Figure out how to -include("couch_db.hrl")
+-record(doc, {id= <<"">>, revs={0, []}, body={[]},
+ atts=[], deleted=false, meta=[]}).
+-record(att, {name, type, att_len, disk_len, md5= <<>>, revpos=0, data,
+ encoding=identity}).
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(12),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail()
+ end,
+ ok.
+
+test() ->
+ couch_config:start_link(test_util:config_files()),
+ couch_config:set("attachments", "compression_level", "0", false),
+ ok = test_to_json_success(),
+ ok.
+
+test_to_json_success() ->
+ Cases = [
+ {
+ #doc{},
+ {[{<<"_id">>, <<"">>}]},
+ "Empty docs are {\"_id\": \"\"}"
+ },
+ {
+ #doc{id= <<"foo">>},
+ {[{<<"_id">>, <<"foo">>}]},
+ "_id is added."
+ },
+ {
+ #doc{revs={5, ["foo"]}},
+ {[{<<"_id">>, <<>>}, {<<"_rev">>, <<"5-foo">>}]},
+ "_rev is added."
+ },
+ {
+ [revs],
+ #doc{revs={5, [<<"first">>, <<"second">>]}},
+ {[
+ {<<"_id">>, <<>>},
+ {<<"_rev">>, <<"5-first">>},
+ {<<"_revisions">>, {[
+ {<<"start">>, 5},
+ {<<"ids">>, [<<"first">>, <<"second">>]}
+ ]}}
+ ]},
+ "_revisions include with revs option"
+ },
+ {
+ #doc{body={[{<<"foo">>, <<"bar">>}]}},
+ {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}]},
+ "Arbitrary fields are added."
+ },
+ {
+ #doc{deleted=true, body={[{<<"foo">>, <<"bar">>}]}},
+ {[{<<"_id">>, <<>>}, {<<"foo">>, <<"bar">>}, {<<"_deleted">>, true}]},
+ "Deleted docs no longer drop body members."
+ },
+ {
+ #doc{meta=[
+ {revs_info, 4, [{<<"fin">>, deleted}, {<<"zim">>, missing}]}
+ ]},
+ {[
+ {<<"_id">>, <<>>},
+ {<<"_revs_info">>, [
+ {[{<<"rev">>, <<"4-fin">>}, {<<"status">>, <<"deleted">>}]},
+ {[{<<"rev">>, <<"3-zim">>}, {<<"status">>, <<"missing">>}]}
+ ]}
+ ]},
+ "_revs_info field is added correctly."
+ },
+ {
+ #doc{meta=[{local_seq, 5}]},
+ {[{<<"_id">>, <<>>}, {<<"_local_seq">>, 5}]},
+ "_local_seq is added as an integer."
+ },
+ {
+ #doc{meta=[{conflicts, [{3, <<"yep">>}, {1, <<"snow">>}]}]},
+ {[
+ {<<"_id">>, <<>>},
+ {<<"_conflicts">>, [<<"3-yep">>, <<"1-snow">>]}
+ ]},
+ "_conflicts is added as an array of strings."
+ },
+ {
+ #doc{meta=[{deleted_conflicts, [{10923, <<"big_cowboy_hat">>}]}]},
+ {[
+ {<<"_id">>, <<>>},
+ {<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]}
+ ]},
+ "_deleted_conflicsts is added as an array of strings."
+ },
+ {
+ #doc{atts=[
+ #att{
+ name = <<"big.xml">>,
+ type = <<"xml/sucks">>,
+ data = fun() -> ok end,
+ revpos = 1,
+ att_len = 400,
+ disk_len = 400
+ },
+ #att{
+ name = <<"fast.json">>,
+ type = <<"json/ftw">>,
+ data = <<"{\"so\": \"there!\"}">>,
+ revpos = 1,
+ att_len = 16,
+ disk_len = 16
+ }
+ ]},
+ {[
+ {<<"_id">>, <<>>},
+ {<<"_attachments">>, {[
+ {<<"big.xml">>, {[
+ {<<"content_type">>, <<"xml/sucks">>},
+ {<<"revpos">>, 1},
+ {<<"length">>, 400},
+ {<<"stub">>, true}
+ ]}},
+ {<<"fast.json">>, {[
+ {<<"content_type">>, <<"json/ftw">>},
+ {<<"revpos">>, 1},
+ {<<"length">>, 16},
+ {<<"stub">>, true}
+ ]}}
+ ]}}
+ ]},
+ "Attachments attached as stubs only include a length."
+ },
+ {
+ [attachments],
+ #doc{atts=[
+ #att{
+ name = <<"stuff.txt">>,
+ type = <<"text/plain">>,
+ data = fun() -> <<"diet pepsi">> end,
+ revpos = 1,
+ att_len = 10,
+ disk_len = 10
+ },
+ #att{
+ name = <<"food.now">>,
+ type = <<"application/food">>,
+ revpos = 1,
+ data = <<"sammich">>
+ }
+ ]},
+ {[
+ {<<"_id">>, <<>>},
+ {<<"_attachments">>, {[
+ {<<"stuff.txt">>, {[
+ {<<"content_type">>, <<"text/plain">>},
+ {<<"revpos">>, 1},
+ {<<"data">>, <<"ZGlldCBwZXBzaQ==">>}
+ ]}},
+ {<<"food.now">>, {[
+ {<<"content_type">>, <<"application/food">>},
+ {<<"revpos">>, 1},
+ {<<"data">>, <<"c2FtbWljaA==">>}
+ ]}}
+ ]}}
+ ]},
+ "Attachments included inline with attachments option."
+ }
+ ],
+
+ lists:foreach(fun
+ ({Doc, EJson, Mesg}) ->
+ etap:is(couch_doc:to_json_obj(Doc, []), EJson, Mesg);
+ ({Options, Doc, EJson, Mesg}) ->
+ etap:is(couch_doc:to_json_obj(Doc, Options), EJson, Mesg)
+ end, Cases),
+ ok.
+
diff --git a/1.1.x/test/etap/040-util.t b/1.1.x/test/etap/040-util.t
new file mode 100755
index 00000000..8f80db87
--- /dev/null
+++ b/1.1.x/test/etap/040-util.t
@@ -0,0 +1,80 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+ test_util:init_code_path(),
+ application:start(crypto),
+
+ etap:plan(14),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ % to_existing_atom
+ etap:is(true, couch_util:to_existing_atom(true), "An atom is an atom."),
+ etap:is(foo, couch_util:to_existing_atom(<<"foo">>),
+ "A binary foo is the atom foo."),
+ etap:is(foobarbaz, couch_util:to_existing_atom("foobarbaz"),
+ "A list of atoms is one munged atom."),
+
+ % implode
+ etap:is([1, 38, 2, 38, 3], couch_util:implode([1,2,3],"&"),
+ "use & as separator in list."),
+
+ % trim
+ Strings = [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"],
+ etap:ok(lists:all(fun(S) -> couch_util:trim(S) == "foo" end, Strings),
+ "everything here trimmed should be foo."),
+
+ % abs_pathname
+ {ok, Cwd} = file:get_cwd(),
+ etap:is(Cwd ++ "/foo", couch_util:abs_pathname("./foo"),
+ "foo is in this directory."),
+
+ % should_flush
+ etap:ok(not couch_util:should_flush(),
+ "Not using enough memory to flush."),
+ AcquireMem = fun() ->
+ IntsToAGazillion = lists:seq(1, 200000),
+ LotsOfData = lists:map(
+ fun(Int) -> {Int, <<"foobar">>} end,
+ lists:seq(1, 500000)),
+ etap:ok(couch_util:should_flush(),
+ "Allocation 200K tuples puts us above the memory threshold.")
+ end,
+ AcquireMem(),
+
+ etap:ok(not couch_util:should_flush(),
+ "Checking to flush invokes GC."),
+
+ % verify
+ etap:is(true, couch_util:verify("It4Vooya", "It4Vooya"),
+ "String comparison."),
+ etap:is(false, couch_util:verify("It4VooyaX", "It4Vooya"),
+ "String comparison (unequal lengths)."),
+ etap:is(true, couch_util:verify(<<"ahBase3r">>, <<"ahBase3r">>),
+ "Binary comparison."),
+ etap:is(false, couch_util:verify(<<"ahBase3rX">>, <<"ahBase3r">>),
+ "Binary comparison (unequal lengths)."),
+ etap:is(false, couch_util:verify(nil, <<"ahBase3r">>),
+ "Binary comparison with atom."),
+
+ ok.
diff --git a/1.1.x/test/etap/041-uuid-gen-seq.ini b/1.1.x/test/etap/041-uuid-gen-seq.ini
new file mode 100644
index 00000000..94cebc6f
--- /dev/null
+++ b/1.1.x/test/etap/041-uuid-gen-seq.ini
@@ -0,0 +1,19 @@
+; Licensed to the Apache Software Foundation (ASF) under one
+; or more contributor license agreements. See the NOTICE file
+; distributed with this work for additional information
+; regarding copyright ownership. The ASF licenses this file
+; to you under the Apache License, Version 2.0 (the
+; "License"); you may not use this file except in compliance
+; with the License. You may obtain a copy of the License at
+;
+; http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing,
+; software distributed under the License is distributed on an
+; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+; KIND, either express or implied. See the License for the
+; specific language governing permissions and limitations
+; under the License.
+
+[uuids]
+algorithm = sequential
diff --git a/1.1.x/test/etap/041-uuid-gen-utc.ini b/1.1.x/test/etap/041-uuid-gen-utc.ini
new file mode 100644
index 00000000..c2b83831
--- /dev/null
+++ b/1.1.x/test/etap/041-uuid-gen-utc.ini
@@ -0,0 +1,19 @@
+; Licensed to the Apache Software Foundation (ASF) under one
+; or more contributor license agreements. See the NOTICE file
+; distributed with this work for additional information
+; regarding copyright ownership. The ASF licenses this file
+; to you under the Apache License, Version 2.0 (the
+; "License"); you may not use this file except in compliance
+; with the License. You may obtain a copy of the License at
+;
+; http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing,
+; software distributed under the License is distributed on an
+; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+; KIND, either express or implied. See the License for the
+; specific language governing permissions and limitations
+; under the License.
+
+[uuids]
+algorithm = utc_random
diff --git a/1.1.x/test/etap/041-uuid-gen.t b/1.1.x/test/etap/041-uuid-gen.t
new file mode 100755
index 00000000..1e6aa9ee
--- /dev/null
+++ b/1.1.x/test/etap/041-uuid-gen.t
@@ -0,0 +1,118 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+default_config() ->
+ test_util:build_file("etc/couchdb/default_dev.ini").
+
+seq_alg_config() ->
+ test_util:source_file("test/etap/041-uuid-gen-seq.ini").
+
+utc_alg_config() ->
+ test_util:source_file("test/etap/041-uuid-gen-utc.ini").
+
+% Run tests and wait for the gen_servers to shutdown
+run_test(IniFiles, Test) ->
+ {ok, Pid} = couch_config:start_link(IniFiles),
+ erlang:monitor(process, Pid),
+ couch_uuids:start(),
+ Test(),
+ couch_uuids:stop(),
+ couch_config:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} -> ok;
+ _Other -> etap:diag("OTHER: ~p~n", [_Other])
+ after
+ 1000 -> throw({timeout_error, config_stop})
+ end.
+
+main(_) ->
+ test_util:init_code_path(),
+ application:start(crypto),
+ etap:plan(6),
+
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+
+ TestUnique = fun() ->
+ etap:is(
+ test_unique(10000, couch_uuids:new()),
+ true,
+ "Can generate 10K unique IDs"
+ )
+ end,
+ run_test([default_config()], TestUnique),
+ run_test([default_config(), seq_alg_config()], TestUnique),
+ run_test([default_config(), utc_alg_config()], TestUnique),
+
+ TestMonotonic = fun () ->
+ etap:is(
+ couch_uuids:new() < couch_uuids:new(),
+ true,
+ "should produce monotonically increasing ids"
+ )
+ end,
+ run_test([default_config(), seq_alg_config()], TestMonotonic),
+ run_test([default_config(), utc_alg_config()], TestMonotonic),
+
+ % Pretty sure that the average of a uniform distribution is the
+ % midpoint of the range. Thus, to exceed a threshold, we need
+ % approximately Total / (Range/2 + RangeMin) samples.
+ %
+ % In our case this works out to be 8194. (0xFFF000 / 0x7FF)
+ % These tests just fudge the limits for a good generator at 25%
+ % in either direction. Technically it should be possible to generate
+ % bounds that will show if your random number generator is not
+ % sufficiently random but I hated statistics in school.
+ TestRollOver = fun() ->
+ UUID = binary_to_list(couch_uuids:new()),
+ Prefix = element(1, lists:split(26, UUID)),
+ N = gen_until_pref_change(Prefix,0),
+ etap:diag("N is: ~p~n",[N]),
+ etap:is(
+ N >= 5000 andalso N =< 11000,
+ true,
+ "should roll over every so often."
+ )
+ end,
+ run_test([default_config(), seq_alg_config()], TestRollOver).
+
+test_unique(0, _) ->
+ true;
+test_unique(N, UUID) ->
+ case couch_uuids:new() of
+ UUID ->
+ etap:diag("N: ~p~n", [N]),
+ false;
+ Else -> test_unique(N-1, Else)
+ end.
+
+get_prefix(UUID) ->
+ element(1, lists:split(26, binary_to_list(UUID))).
+
+gen_until_pref_change(_, Count) when Count > 8251 ->
+ Count;
+gen_until_pref_change(Prefix, N) ->
+ case get_prefix(couch_uuids:new()) of
+ Prefix -> gen_until_pref_change(Prefix, N+1);
+ _ -> N
+ end.
diff --git a/1.1.x/test/etap/050-stream.t b/1.1.x/test/etap/050-stream.t
new file mode 100755
index 00000000..d30b524a
--- /dev/null
+++ b/1.1.x/test/etap/050-stream.t
@@ -0,0 +1,87 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(13),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+read_all(Fd, PosList) ->
+ Data = couch_stream:foldl(Fd, PosList, fun(Bin, Acc) -> [Bin, Acc] end, []),
+ iolist_to_binary(Data).
+
+test() ->
+ {ok, Fd} = couch_file:open("test/etap/temp.050", [create,overwrite]),
+ {ok, Stream} = couch_stream:open(Fd),
+
+ etap:is(ok, couch_stream:write(Stream, <<"food">>),
+ "Writing to streams works."),
+
+ etap:is(ok, couch_stream:write(Stream, <<"foob">>),
+ "Consecutive writing to streams works."),
+
+ etap:is(ok, couch_stream:write(Stream, <<>>),
+ "Writing an empty binary does nothing."),
+
+ {Ptrs, Length, _, _, _} = couch_stream:close(Stream),
+ etap:is(Ptrs, [{0, 8}], "Close returns the file pointers."),
+ etap:is(Length, 8, "Close also returns the number of bytes written."),
+ etap:is(<<"foodfoob">>, read_all(Fd, Ptrs), "Returned pointers are valid."),
+
+ % Remeber where we expect the pointer to be.
+ {ok, ExpPtr} = couch_file:bytes(Fd),
+ {ok, Stream2} = couch_stream:open(Fd),
+ OneBits = <<1:(8*10)>>,
+ etap:is(ok, couch_stream:write(Stream2, OneBits),
+ "Successfully wrote 80 1 bits."),
+
+ ZeroBits = <<0:(8*10)>>,
+ etap:is(ok, couch_stream:write(Stream2, ZeroBits),
+ "Successfully wrote 80 0 bits."),
+
+ {Ptrs2, Length2, _, _, _} = couch_stream:close(Stream2),
+ etap:is(Ptrs2, [{ExpPtr, 20}], "Closing stream returns the file pointers."),
+ etap:is(Length2, 20, "Length written is 160 bytes."),
+
+ AllBits = iolist_to_binary([OneBits,ZeroBits]),
+ etap:is(AllBits, read_all(Fd, Ptrs2), "Returned pointers are valid."),
+
+ % Stream more the 4K chunk size.
+ {ok, ExpPtr2} = couch_file:bytes(Fd),
+ {ok, Stream3} = couch_stream:open(Fd),
+ Acc2 = lists:foldl(fun(_, Acc) ->
+ Data = <<"a1b2c">>,
+ couch_stream:write(Stream3, Data),
+ [Data | Acc]
+ end, [], lists:seq(1, 1024)),
+ {Ptrs3, Length3, _, _, _} = couch_stream:close(Stream3),
+
+ % 4095 because of 5 * 4096 rem 5 (last write before exceeding threshold)
+ % + 5 puts us over the threshold
+ % + 4 bytes for the term_to_binary adding a length header
+ % + 1 byte every 4K for tail append headers
+ SecondPtr = ExpPtr2 + 4095 + 5 + 4 + 1,
+ etap:is(Ptrs3, [{ExpPtr2, 4100}, {SecondPtr, 1020}], "Pointers every 4K bytes."),
+ etap:is(Length3, 5120, "Wrote the expected 5K bytes."),
+
+ couch_file:close(Fd),
+ ok.
diff --git a/1.1.x/test/etap/060-kt-merging.t b/1.1.x/test/etap/060-kt-merging.t
new file mode 100755
index 00000000..0e481a52
--- /dev/null
+++ b/1.1.x/test/etap/060-kt-merging.t
@@ -0,0 +1,115 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(12),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ One = {0, {"1","foo",[]}},
+ TwoSibs = [{0, {"1","foo",[]}},
+ {0, {"2","foo",[]}}],
+ OneChild = {0, {"1","foo",[{"1a", "bar", []}]}},
+ TwoChild = {0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ TwoChildSibs = {0, {"1","foo", [{"1a", "bar", []},
+ {"1b", "bar", []}]}},
+ TwoChildSibs2 = {0, {"1","foo", [{"1a", "bar", []},
+ {"1b", "bar", [{"1bb", "boo", []}]}]}},
+ Stemmed1b = {1, {"1a", "bar", []}},
+ Stemmed1a = {1, {"1a", "bar", [{"1aa", "bar", []}]}},
+ Stemmed1aa = {2, {"1aa", "bar", []}},
+ Stemmed1bb = {2, {"1bb", "boo", []}},
+
+ etap:is(
+ {[One], no_conflicts},
+ couch_key_tree:merge([], One, 10),
+ "The empty tree is the identity for merge."
+ ),
+
+ etap:is(
+ {TwoSibs, no_conflicts},
+ couch_key_tree:merge(TwoSibs, One, 10),
+ "Merging a prefix of a tree with the tree yields the tree."
+ ),
+
+ etap:is(
+ {[One], no_conflicts},
+ couch_key_tree:merge([One], One, 10),
+ "Merging is reflexive."
+ ),
+
+ etap:is(
+ {[TwoChild], no_conflicts},
+ couch_key_tree:merge([TwoChild], TwoChild, 10),
+ "Merging two children is still reflexive."
+ ),
+
+ etap:is(
+ {[TwoChildSibs], no_conflicts},
+ couch_key_tree:merge([TwoChildSibs], TwoChildSibs, 10),
+ "Merging a tree to itself is itself."),
+
+ etap:is(
+ {[TwoChildSibs], no_conflicts},
+ couch_key_tree:merge([TwoChildSibs], Stemmed1b, 10),
+ "Merging a tree with a stem."
+ ),
+
+ etap:is(
+ {[TwoChildSibs2], no_conflicts},
+ couch_key_tree:merge([TwoChildSibs2], Stemmed1bb, 10),
+ "Merging a stem at a deeper level."
+ ),
+
+ etap:is(
+ {[TwoChild], no_conflicts},
+ couch_key_tree:merge([TwoChild], Stemmed1aa, 10),
+ "Merging a single tree with a deeper stem."
+ ),
+
+ etap:is(
+ {[TwoChild], no_conflicts},
+ couch_key_tree:merge([TwoChild], Stemmed1a, 10),
+ "Merging a larger stem."
+ ),
+
+ etap:is(
+ {[Stemmed1a], no_conflicts},
+ couch_key_tree:merge([Stemmed1a], Stemmed1aa, 10),
+ "More merging."
+ ),
+
+ Expect1 = [OneChild, Stemmed1aa],
+ etap:is(
+ {Expect1, conflicts},
+ couch_key_tree:merge([OneChild], Stemmed1aa, 10),
+ "Merging should create conflicts."
+ ),
+
+ etap:is(
+ {[TwoChild], no_conflicts},
+ couch_key_tree:merge(Expect1, TwoChild, 10),
+ "Merge should have no conflicts."
+ ),
+
+ ok.
diff --git a/1.1.x/test/etap/061-kt-missing-leaves.t b/1.1.x/test/etap/061-kt-missing-leaves.t
new file mode 100755
index 00000000..d60b4db8
--- /dev/null
+++ b/1.1.x/test/etap/061-kt-missing-leaves.t
@@ -0,0 +1,65 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(4),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ Stemmed2 = [{2, {"1aa", "bar", []}}],
+
+ etap:is(
+ [],
+ couch_key_tree:find_missing(TwoChildSibs, [{0,"1"}, {1,"1a"}]),
+ "Look for missing keys."
+ ),
+
+ etap:is(
+ [{0, "10"}, {100, "x"}],
+ couch_key_tree:find_missing(
+ TwoChildSibs,
+ [{0,"1"}, {0, "10"}, {1,"1a"}, {100, "x"}]
+ ),
+ "Look for missing keys."
+ ),
+
+ etap:is(
+ [{0, "1"}, {100, "x"}],
+ couch_key_tree:find_missing(
+ Stemmed1,
+ [{0,"1"}, {1,"1a"}, {100, "x"}]
+ ),
+ "Look for missing keys."
+ ),
+ etap:is(
+ [{0, "1"}, {1,"1a"}, {100, "x"}],
+ couch_key_tree:find_missing(
+ Stemmed2,
+ [{0,"1"}, {1,"1a"}, {100, "x"}]
+ ),
+ "Look for missing keys."
+ ),
+
+ ok.
diff --git a/1.1.x/test/etap/062-kt-remove-leaves.t b/1.1.x/test/etap/062-kt-remove-leaves.t
new file mode 100755
index 00000000..745a00be
--- /dev/null
+++ b/1.1.x/test/etap/062-kt-remove-leaves.t
@@ -0,0 +1,69 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(6),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}],
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+
+ etap:is(
+ {TwoChildSibs, []},
+ couch_key_tree:remove_leafs(TwoChildSibs, []),
+ "Removing no leaves has no effect on the tree."
+ ),
+
+ etap:is(
+ {TwoChildSibs, []},
+ couch_key_tree:remove_leafs(TwoChildSibs, [{0, "1"}]),
+ "Removing a non-existant branch has no effect."
+ ),
+
+ etap:is(
+ {OneChild, [{1, "1b"}]},
+ couch_key_tree:remove_leafs(TwoChildSibs, [{1, "1b"}]),
+ "Removing a leaf removes the leaf."
+ ),
+
+ etap:is(
+ {[], [{1, "1b"},{1, "1a"}]},
+ couch_key_tree:remove_leafs(TwoChildSibs, [{1, "1a"}, {1, "1b"}]),
+ "Removing all leaves returns an empty tree."
+ ),
+
+ etap:is(
+ {Stemmed, []},
+ couch_key_tree:remove_leafs(Stemmed, [{1, "1a"}]),
+ "Removing a non-existant node has no effect."
+ ),
+
+ etap:is(
+ {[], [{2, "1aa"}]},
+ couch_key_tree:remove_leafs(Stemmed, [{2, "1aa"}]),
+ "Removing the last leaf returns an empty tree."
+ ),
+
+ ok.
diff --git a/1.1.x/test/etap/063-kt-get-leaves.t b/1.1.x/test/etap/063-kt-get-leaves.t
new file mode 100755
index 00000000..6d4e8007
--- /dev/null
+++ b/1.1.x/test/etap/063-kt-get-leaves.t
@@ -0,0 +1,98 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(11),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+
+ etap:is(
+ {[{"foo", {0, ["1"]}}],[]},
+ couch_key_tree:get(TwoChildSibs, [{0, "1"}]),
+ "extract a subtree."
+ ),
+
+ etap:is(
+ {[{"bar", {1, ["1a", "1"]}}],[]},
+ couch_key_tree:get(TwoChildSibs, [{1, "1a"}]),
+ "extract a subtree."
+ ),
+
+ etap:is(
+ {[],[{0,"x"}]},
+ couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}]),
+ "gather up the leaves."
+ ),
+
+ etap:is(
+ {[{"bar", {1, ["1a","1"]}}],[]},
+ couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}]),
+ "gather up the leaves."
+ ),
+
+ etap:is(
+ {[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]},
+ couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}]),
+ "gather up the leaves."
+ ),
+
+ etap:is(
+ {[{0,[{"1", "foo"}]}],[]},
+ couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}]),
+ "retrieve full key paths."
+ ),
+
+ etap:is(
+ {[{1,[{"1a", "bar"},{"1", "foo"}]}],[]},
+ couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}]),
+ "retrieve full key paths."
+ ),
+
+ etap:is(
+ [{2, [{"1aa", "bar"},{"1a", "bar"}]}],
+ couch_key_tree:get_all_leafs_full(Stemmed),
+ "retrieve all leaves."
+ ),
+
+ etap:is(
+ [{1, [{"1a", "bar"},{"1", "foo"}]}, {1, [{"1b", "bar"},{"1", "foo"}]}],
+ couch_key_tree:get_all_leafs_full(TwoChildSibs),
+ "retrieve all the leaves."
+ ),
+
+ etap:is(
+ [{"bar", {2, ["1aa","1a"]}}],
+ couch_key_tree:get_all_leafs(Stemmed),
+ "retrieve all leaves."
+ ),
+
+ etap:is(
+ [{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b","1"]}}],
+ couch_key_tree:get_all_leafs(TwoChildSibs),
+ "retrieve all the leaves."
+ ),
+
+ ok.
diff --git a/1.1.x/test/etap/064-kt-counting.t b/1.1.x/test/etap/064-kt-counting.t
new file mode 100755
index 00000000..f182d287
--- /dev/null
+++ b/1.1.x/test/etap/064-kt-counting.t
@@ -0,0 +1,46 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(4),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ EmptyTree = [],
+ One = [{0, {"1","foo",[]}}],
+ TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ Stemmed = [{2, {"1bb", "boo", []}}],
+
+ etap:is(0, couch_key_tree:count_leafs(EmptyTree),
+ "Empty trees have no leaves."),
+
+ etap:is(1, couch_key_tree:count_leafs(One),
+ "Single node trees have a single leaf."),
+
+ etap:is(2, couch_key_tree:count_leafs(TwoChildSibs),
+ "Two children siblings counted as two leaves."),
+
+ etap:is(1, couch_key_tree:count_leafs(Stemmed),
+ "Stemming does not affect leaf counting."),
+
+ ok.
diff --git a/1.1.x/test/etap/065-kt-stemming.t b/1.1.x/test/etap/065-kt-stemming.t
new file mode 100755
index 00000000..6e781c1d
--- /dev/null
+++ b/1.1.x/test/etap/065-kt-stemming.t
@@ -0,0 +1,42 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(3),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
+ Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
+ Stemmed2 = [{2, {"1aa", "bar", []}}],
+
+ etap:is(TwoChild, couch_key_tree:stem(TwoChild, 3),
+ "Stemming more levels than what exists does nothing."),
+
+ etap:is(Stemmed1, couch_key_tree:stem(TwoChild, 2),
+ "Stemming with a depth of two returns the deepest two nodes."),
+
+ etap:is(Stemmed2, couch_key_tree:stem(TwoChild, 1),
+ "Stemming to a depth of one returns the deepest node."),
+
+ ok.
diff --git a/1.1.x/test/etap/070-couch-db.t b/1.1.x/test/etap/070-couch-db.t
new file mode 100755
index 00000000..787d6c6a
--- /dev/null
+++ b/1.1.x/test/etap/070-couch-db.t
@@ -0,0 +1,73 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(4),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+
+ couch_server_sup:start_link(test_util:config_files()),
+
+ couch_db:create(<<"etap-test-db">>, []),
+ {ok, AllDbs} = couch_server:all_databases(),
+ etap:ok(lists:member(<<"etap-test-db">>, AllDbs), "Database was created."),
+
+ couch_server:delete(<<"etap-test-db">>, []),
+ {ok, AllDbs2} = couch_server:all_databases(),
+ etap:ok(not lists:member(<<"etap-test-db">>, AllDbs2),
+ "Database was deleted."),
+
+ gen_server:call(couch_server, {set_max_dbs_open, 3}),
+ MkDbName = fun(Int) -> list_to_binary("lru-" ++ integer_to_list(Int)) end,
+
+ lists:foreach(fun(Int) ->
+ {ok, TestDbs} = couch_server:all_databases(),
+ ok = case lists:member(MkDbName(Int), TestDbs) of
+ true -> couch_server:delete(MkDbName(Int), []);
+ _ -> ok
+ end,
+ {ok, Db} = couch_db:create(MkDbName(Int), []),
+ ok = couch_db:close(Db)
+ end, lists:seq(1, 6)),
+
+ {ok, AllDbs3} = couch_server:all_databases(),
+ NumCreated = lists:foldl(fun(Int, Acc) ->
+ true = lists:member(MkDbName(Int), AllDbs3),
+ Acc+1
+ end, 0, lists:seq(1, 6)),
+ etap:is(6, NumCreated, "Created all databases."),
+
+ lists:foreach(fun(Int) ->
+ ok = couch_server:delete(MkDbName(Int), [])
+ end, lists:seq(1, 6)),
+
+ {ok, AllDbs4} = couch_server:all_databases(),
+ NumDeleted = lists:foldl(fun(Int, Acc) ->
+ false = lists:member(MkDbName(Int), AllDbs4),
+ Acc+1
+ end, 0, lists:seq(1, 6)),
+ etap:is(6, NumDeleted, "Deleted all databases."),
+
+ ok.
diff --git a/1.1.x/test/etap/080-config-get-set.t b/1.1.x/test/etap/080-config-get-set.t
new file mode 100755
index 00000000..a4a8577a
--- /dev/null
+++ b/1.1.x/test/etap/080-config-get-set.t
@@ -0,0 +1,128 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+default_config() ->
+ test_util:build_file("etc/couchdb/default_dev.ini").
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(12),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ % start couch_config with default
+ couch_config:start_link([default_config()]),
+
+
+ % Check that we can get values
+
+
+ etap:fun_is(
+ fun(List) -> length(List) > 0 end,
+ couch_config:all(),
+ "Data was loaded from the INI file."
+ ),
+
+ etap:fun_is(
+ fun(List) -> length(List) > 0 end,
+ couch_config:get("daemons"),
+ "There are settings in the [daemons] section of the INI file."
+ ),
+
+ etap:is(
+ couch_config:get("httpd_design_handlers", "_view"),
+ "{couch_httpd_view, handle_view_req}",
+ "The {httpd_design_handlers, view} is the expected default."
+ ),
+
+ etap:is(
+ couch_config:get("httpd", "foo", "bar"),
+ "bar",
+ "Returns the default when key doesn't exist in config."
+ ),
+
+ etap:is(
+ couch_config:get("httpd", "foo"),
+ undefined,
+ "The default default is the atom 'undefined'."
+ ),
+
+ etap:is(
+ couch_config:get("httpd", "port", "bar"),
+ "5984",
+ "Only returns the default when the config setting does not exist."
+ ),
+
+
+ % Check that setting values works.
+
+
+ ok = couch_config:set("log", "level", "severe", false),
+
+ etap:is(
+ couch_config:get("log", "level"),
+ "severe",
+ "Non persisted changes take effect."
+ ),
+
+ etap:is(
+ couch_config:get("new_section", "bizzle"),
+ undefined,
+ "Section 'new_section' does not exist."
+ ),
+
+ ok = couch_config:set("new_section", "bizzle", "bang", false),
+
+ etap:is(
+ couch_config:get("new_section", "bizzle"),
+ "bang",
+ "New section 'new_section' was created for a new key/value pair."
+ ),
+
+
+ % Check that deleting works
+
+
+ ok = couch_config:delete("new_section", "bizzle", false),
+ etap:is(
+ couch_config:get("new_section", "bizzle"),
+ undefined,
+ "Deleting sets the value to \"\""
+ ),
+
+
+ % Check ge/set/delete binary strings
+
+ ok = couch_config:set(<<"foo">>, <<"bar">>, <<"baz">>, false),
+ etap:is(
+ couch_config:get(<<"foo">>, <<"bar">>),
+ <<"baz">>,
+ "Can get and set with binary section and key values."
+ ),
+ ok = couch_config:delete(<<"foo">>, <<"bar">>, false),
+ etap:is(
+ couch_config:get(<<"foo">>, <<"bar">>),
+ undefined,
+ "Deleting with binary section/key pairs sets the value to \"\""
+ ),
+
+ ok.
diff --git a/1.1.x/test/etap/081-config-override.1.ini b/1.1.x/test/etap/081-config-override.1.ini
new file mode 100644
index 00000000..55451dad
--- /dev/null
+++ b/1.1.x/test/etap/081-config-override.1.ini
@@ -0,0 +1,22 @@
+; Licensed to the Apache Software Foundation (ASF) under one
+; or more contributor license agreements. See the NOTICE file
+; distributed with this work for additional information
+; regarding copyright ownership. The ASF licenses this file
+; to you under the Apache License, Version 2.0 (the
+; "License"); you may not use this file except in compliance
+; with the License. You may obtain a copy of the License at
+;
+; http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing,
+; software distributed under the License is distributed on an
+; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+; KIND, either express or implied. See the License for the
+; specific language governing permissions and limitations
+; under the License.
+
+[couchdb]
+max_dbs_open=10
+
+[httpd]
+port=4895
diff --git a/1.1.x/test/etap/081-config-override.2.ini b/1.1.x/test/etap/081-config-override.2.ini
new file mode 100644
index 00000000..5f46357f
--- /dev/null
+++ b/1.1.x/test/etap/081-config-override.2.ini
@@ -0,0 +1,22 @@
+; Licensed to the Apache Software Foundation (ASF) under one
+; or more contributor license agreements. See the NOTICE file
+; distributed with this work for additional information
+; regarding copyright ownership. The ASF licenses this file
+; to you under the Apache License, Version 2.0 (the
+; "License"); you may not use this file except in compliance
+; with the License. You may obtain a copy of the License at
+;
+; http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing,
+; software distributed under the License is distributed on an
+; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+; KIND, either express or implied. See the License for the
+; specific language governing permissions and limitations
+; under the License.
+
+[httpd]
+port = 80
+
+[fizbang]
+unicode = normalized
diff --git a/1.1.x/test/etap/081-config-override.t b/1.1.x/test/etap/081-config-override.t
new file mode 100755
index 00000000..01f8b4c2
--- /dev/null
+++ b/1.1.x/test/etap/081-config-override.t
@@ -0,0 +1,212 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+default_config() ->
+ test_util:build_file("etc/couchdb/default_dev.ini").
+
+local_config_1() ->
+ test_util:source_file("test/etap/081-config-override.1.ini").
+
+local_config_2() ->
+ test_util:source_file("test/etap/081-config-override.2.ini").
+
+local_config_write() ->
+ test_util:build_file("test/etap/temp.081").
+
+% Run tests and wait for the config gen_server to shutdown.
+run_tests(IniFiles, Tests) ->
+ {ok, Pid} = couch_config:start_link(IniFiles),
+ erlang:monitor(process, Pid),
+ Tests(),
+ couch_config:stop(),
+ receive
+ {'DOWN', _, _, Pid, _} -> ok;
+ _Other -> etap:diag("OTHER: ~p~n", [_Other])
+ after
+ 1000 -> throw({timeout_error, config_stop})
+ end.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(17),
+
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+
+ CheckStartStop = fun() -> ok end,
+ run_tests([default_config()], CheckStartStop),
+
+ CheckDefaults = fun() ->
+ etap:is(
+ couch_config:get("couchdb", "max_dbs_open"),
+ "100",
+ "{couchdb, max_dbs_open} is 100 by defualt."
+ ),
+
+ etap:is(
+ couch_config:get("httpd","port"),
+ "5984",
+ "{httpd, port} is 5984 by default"
+ ),
+
+ etap:is(
+ couch_config:get("fizbang", "unicode"),
+ undefined,
+ "{fizbang, unicode} is undefined by default"
+ )
+ end,
+
+ run_tests([default_config()], CheckDefaults),
+
+
+ % Check that subsequent files override values appropriately
+
+ CheckOverride = fun() ->
+ etap:is(
+ couch_config:get("couchdb", "max_dbs_open"),
+ "10",
+ "{couchdb, max_dbs_open} was overriden with the value 10"
+ ),
+
+ etap:is(
+ couch_config:get("httpd", "port"),
+ "4895",
+ "{httpd, port} was overriden with the value 4895"
+ )
+ end,
+
+ run_tests([default_config(), local_config_1()], CheckOverride),
+
+
+ % Check that overrides can create new sections
+
+ CheckOverride2 = fun() ->
+ etap:is(
+ couch_config:get("httpd", "port"),
+ "80",
+ "{httpd, port} is overriden with the value 80"
+ ),
+
+ etap:is(
+ couch_config:get("fizbang", "unicode"),
+ "normalized",
+ "{fizbang, unicode} was created by override INI file"
+ )
+ end,
+
+ run_tests([default_config(), local_config_2()], CheckOverride2),
+
+
+ % Check that values can be overriden multiple times
+
+ CheckOverride3 = fun() ->
+ etap:is(
+ couch_config:get("httpd", "port"),
+ "80",
+ "{httpd, port} value was taken from the last specified INI file."
+ )
+ end,
+
+ run_tests(
+ [default_config(), local_config_1(), local_config_2()],
+ CheckOverride3
+ ),
+
+ % Check persistence to last file.
+
+ % Empty the file in case it exists.
+ {ok, Fd} = file:open(local_config_write(), write),
+ ok = file:truncate(Fd),
+ ok = file:close(Fd),
+
+ % Open and write a value
+ CheckCanWrite = fun() ->
+ etap:is(
+ couch_config:get("httpd", "port"),
+ "5984",
+ "{httpd, port} is still 5984 by default"
+ ),
+
+ etap:is(
+ couch_config:set("httpd", "port", "8080"),
+ ok,
+ "Writing {httpd, port} is kosher."
+ ),
+
+ etap:is(
+ couch_config:get("httpd", "port"),
+ "8080",
+ "{httpd, port} was updated to 8080 successfully."
+ ),
+
+ etap:is(
+ couch_config:delete("httpd", "bind_address"),
+ ok,
+ "Deleting {httpd, bind_address} succeeds"
+ ),
+
+ etap:is(
+ couch_config:get("httpd", "bind_address"),
+ undefined,
+ "{httpd, bind_address} was actually deleted."
+ )
+ end,
+
+ run_tests([default_config(), local_config_write()], CheckCanWrite),
+
+ % Open and check where we don't expect persistence.
+
+ CheckDidntWrite = fun() ->
+ etap:is(
+ couch_config:get("httpd", "port"),
+ "5984",
+ "{httpd, port} was not persisted to the primary INI file."
+ ),
+
+ etap:is(
+ couch_config:get("httpd", "bind_address"),
+ "127.0.0.1",
+ "{httpd, bind_address} was not deleted form the primary INI file."
+ )
+ end,
+
+ run_tests([default_config()], CheckDidntWrite),
+
+ % Open and check we have only the persistence we expect.
+ CheckDidWrite = fun() ->
+ etap:is(
+ couch_config:get("httpd", "port"),
+ "8080",
+ "{httpd, port} is still 8080 after reopening the config."
+ ),
+
+ etap:is(
+ couch_config:get("httpd", "bind_address"),
+ undefined,
+ "{httpd, bind_address} is still \"\" after reopening."
+ )
+ end,
+
+ run_tests([local_config_write()], CheckDidWrite),
+
+ ok.
diff --git a/1.1.x/test/etap/082-config-register.t b/1.1.x/test/etap/082-config-register.t
new file mode 100755
index 00000000..191ba8f8
--- /dev/null
+++ b/1.1.x/test/etap/082-config-register.t
@@ -0,0 +1,94 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+default_config() ->
+ test_util:build_file("etc/couchdb/default_dev.ini").
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(5),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_config:start_link([default_config()]),
+
+ etap:is(
+ couch_config:get("httpd", "port"),
+ "5984",
+ "{httpd, port} is 5984 by default."
+ ),
+
+ ok = couch_config:set("httpd", "port", "4895", false),
+
+ etap:is(
+ couch_config:get("httpd", "port"),
+ "4895",
+ "{httpd, port} changed to 4895"
+ ),
+
+ SentinelFunc = fun() ->
+ % Ping/Pong to make sure we wait for this
+ % process to die
+ receive {ping, From} -> From ! pong end
+ end,
+ SentinelPid = spawn(SentinelFunc),
+
+ couch_config:register(
+ fun("httpd", "port", Value) ->
+ etap:is(Value, "8080", "Registered function got notification.")
+ end,
+ SentinelPid
+ ),
+
+ ok = couch_config:set("httpd", "port", "8080", false),
+
+ % Implicitly checking that we *don't* call the function
+ etap:is(
+ couch_config:get("httpd", "bind_address"),
+ "127.0.0.1",
+ "{httpd, bind_address} is not '0.0.0.0'"
+ ),
+ ok = couch_config:set("httpd", "bind_address", "0.0.0.0", false),
+
+ % Ping-Pong kill process
+ SentinelPid ! {ping, self()},
+ receive
+ _Any -> ok
+ after 1000 ->
+ throw({timeout_error, registered_pid})
+ end,
+
+ ok = couch_config:set("httpd", "port", "80", false),
+ etap:is(
+ couch_config:get("httpd", "port"),
+ "80",
+ "Implicitly test that the function got de-registered"
+ ),
+
+ % test passing of Persist flag
+ couch_config:register(
+ fun("httpd", _, _, Persist) ->
+ etap:is(Persist, false)
+ end),
+ ok = couch_config:set("httpd", "port", "80", false),
+
+ ok.
diff --git a/1.1.x/test/etap/083-config-no-files.t b/1.1.x/test/etap/083-config-no-files.t
new file mode 100755
index 00000000..bc40ec9d
--- /dev/null
+++ b/1.1.x/test/etap/083-config-no-files.t
@@ -0,0 +1,55 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+default_config() ->
+ test_util:build_file("etc/couchdb/default_dev.ini").
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(3),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_config:start_link([]),
+
+ etap:fun_is(
+ fun(KVPairs) -> length(KVPairs) == 0 end,
+ couch_config:all(),
+ "No INI files specified returns 0 key/value pairs."
+ ),
+
+ ok = couch_config:set("httpd", "port", "80", false),
+
+ etap:is(
+ couch_config:get("httpd", "port"),
+ "80",
+ "Created a new non-persisted k/v pair."
+ ),
+
+ ok = couch_config:set("httpd", "bind_address", "127.0.0.1", false),
+ etap:is(
+ couch_config:get("httpd", "bind_address"),
+ "127.0.0.1",
+ "Asking for a persistent key/value pair doesn't choke."
+ ),
+
+ ok.
diff --git a/1.1.x/test/etap/090-task-status.t b/1.1.x/test/etap/090-task-status.t
new file mode 100755
index 00000000..b278de7f
--- /dev/null
+++ b/1.1.x/test/etap/090-task-status.t
@@ -0,0 +1,209 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(16),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+check_status(Pid,ListPropLists) ->
+ From = list_to_binary(pid_to_list(Pid)),
+ Element = lists:foldl(
+ fun(PropList,Acc) ->
+ case couch_util:get_value(pid,PropList) of
+ From ->
+ [PropList | Acc];
+ _ ->
+ []
+ end
+ end,
+ [], ListPropLists
+ ),
+ couch_util:get_value(status,hd(Element)).
+
+loop() ->
+ receive
+ {add, From} ->
+ Resp = couch_task_status:add_task("type", "task", "init"),
+ From ! {ok, self(), Resp},
+ loop();
+ {update, Status, From} ->
+ Resp = couch_task_status:update(Status),
+ From ! {ok, self(), Resp},
+ loop();
+ {update_frequency, Msecs, From} ->
+ Resp = couch_task_status:set_update_frequency(Msecs),
+ From ! {ok, self(), Resp},
+ loop();
+ {done, From} ->
+ From ! {ok, self(), ok}
+ end.
+
+call(Pid, Command) ->
+ Pid ! {Command, self()},
+ wait(Pid).
+
+call(Pid, Command, Arg) ->
+ Pid ! {Command, Arg, self()},
+ wait(Pid).
+
+wait(Pid) ->
+ receive
+ {ok, Pid, Msg} -> Msg
+ after 1000 ->
+ throw(timeout_error)
+ end.
+
+test() ->
+ {ok, TaskStatusPid} = couch_task_status:start_link(),
+
+ TaskUpdater = fun() -> loop() end,
+ % create three updaters
+ Pid1 = spawn(TaskUpdater),
+ Pid2 = spawn(TaskUpdater),
+ Pid3 = spawn(TaskUpdater),
+
+ ok = call(Pid1, add),
+ etap:is(
+ length(couch_task_status:all()),
+ 1,
+ "Started a task"
+ ),
+
+ etap:is(
+ call(Pid1, add),
+ {add_task_error, already_registered},
+ "Unable to register multiple tasks for a single Pid."
+ ),
+
+ etap:is(
+ check_status(Pid1, couch_task_status:all()),
+ <<"init">>,
+ "Task status was set to 'init'."
+ ),
+
+ call(Pid1,update,"running"),
+ etap:is(
+ check_status(Pid1,couch_task_status:all()),
+ <<"running">>,
+ "Status updated to 'running'."
+ ),
+
+
+ call(Pid2,add),
+ etap:is(
+ length(couch_task_status:all()),
+ 2,
+ "Started a second task."
+ ),
+
+ etap:is(
+ check_status(Pid2, couch_task_status:all()),
+ <<"init">>,
+ "Second tasks's status was set to 'init'."
+ ),
+
+ call(Pid2, update, "running"),
+ etap:is(
+ check_status(Pid2, couch_task_status:all()),
+ <<"running">>,
+ "Second task's status updated to 'running'."
+ ),
+
+
+ call(Pid3, add),
+ etap:is(
+ length(couch_task_status:all()),
+ 3,
+ "Registered a third task."
+ ),
+
+ etap:is(
+ check_status(Pid3, couch_task_status:all()),
+ <<"init">>,
+ "Third tasks's status was set to 'init'."
+ ),
+
+ call(Pid3, update, "running"),
+ etap:is(
+ check_status(Pid3, couch_task_status:all()),
+ <<"running">>,
+ "Third task's status updated to 'running'."
+ ),
+
+
+ call(Pid3, update_frequency, 500),
+ call(Pid3, update, "still running"),
+ etap:is(
+ check_status(Pid3, couch_task_status:all()),
+ <<"still running">>,
+ "Third task's status updated to 'still running'."
+ ),
+
+ call(Pid3, update, "skip this update"),
+ etap:is(
+ check_status(Pid3, couch_task_status:all()),
+ <<"still running">>,
+ "Status update dropped because of frequency limit."
+ ),
+
+ call(Pid3, update_frequency, 0),
+ call(Pid3, update, "don't skip"),
+ etap:is(
+ check_status(Pid3, couch_task_status:all()),
+ <<"don't skip">>,
+ "Status updated after reseting frequency limit."
+ ),
+
+
+ call(Pid1, done),
+ etap:is(
+ length(couch_task_status:all()),
+ 2,
+ "First task finished."
+ ),
+
+ call(Pid2, done),
+ etap:is(
+ length(couch_task_status:all()),
+ 1,
+ "Second task finished."
+ ),
+
+ call(Pid3, done),
+ etap:is(
+ length(couch_task_status:all()),
+ 0,
+ "Third task finished."
+ ),
+
+ erlang:monitor(process, TaskStatusPid),
+ couch_task_status:stop(),
+ receive
+ {'DOWN', _, _, TaskStatusPid, _} ->
+ ok
+ after
+ 1000 ->
+ throw(timeout_error)
+ end,
+
+ ok.
diff --git a/1.1.x/test/etap/100-ref-counter.t b/1.1.x/test/etap/100-ref-counter.t
new file mode 100755
index 00000000..8f996d04
--- /dev/null
+++ b/1.1.x/test/etap/100-ref-counter.t
@@ -0,0 +1,114 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(8),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+loop() ->
+ receive
+ close -> ok
+ end.
+
+wait() ->
+ receive
+ {'DOWN', _, _, _, _} -> ok
+ after 1000 ->
+ throw(timeout_error)
+ end.
+
+test() ->
+ {ok, RefCtr} = couch_ref_counter:start([]),
+
+ etap:is(
+ couch_ref_counter:count(RefCtr),
+ 1,
+ "A ref_counter is initialized with the calling process as a referer."
+ ),
+
+ ChildPid1 = spawn(fun() -> loop() end),
+
+ % This is largely implicit in that nothing else breaks
+ % as ok is just returned from gen_server:cast()
+ etap:is(
+ couch_ref_counter:drop(RefCtr, ChildPid1),
+ ok,
+ "Dropping an unknown Pid is ignored."
+ ),
+
+ couch_ref_counter:add(RefCtr, ChildPid1),
+ etap:is(
+ couch_ref_counter:count(RefCtr),
+ 2,
+ "Adding a Pid to the ref_counter increases it's count."
+ ),
+
+ couch_ref_counter:add(RefCtr, ChildPid1),
+ etap:is(
+ couch_ref_counter:count(RefCtr),
+ 2,
+ "Readding the same Pid maintains the count but increments it's refs."
+ ),
+
+ couch_ref_counter:drop(RefCtr, ChildPid1),
+ etap:is(
+ couch_ref_counter:count(RefCtr),
+ 2,
+ "Droping the doubly added Pid only removes a ref, not a referer."
+ ),
+
+ couch_ref_counter:drop(RefCtr, ChildPid1),
+ etap:is(
+ couch_ref_counter:count(RefCtr),
+ 1,
+ "Dropping the second ref drops the referer."
+ ),
+
+ couch_ref_counter:add(RefCtr, ChildPid1),
+ etap:is(
+ couch_ref_counter:count(RefCtr),
+ 2,
+ "Sanity checking that the Pid was re-added."
+ ),
+
+ erlang:monitor(process, ChildPid1),
+ ChildPid1 ! close,
+ wait(),
+
+ CheckFun = fun
+ (Iter, nil) ->
+ case couch_ref_counter:count(RefCtr) of
+ 1 -> Iter;
+ _ -> nil
+ end;
+ (_, Acc) ->
+ Acc
+ end,
+ Result = lists:foldl(CheckFun, nil, lists:seq(1, 10000)),
+ etap:isnt(
+ Result,
+ nil,
+ "The referer count was decremented automatically on process exit."
+ ),
+
+ ok.
diff --git a/1.1.x/test/etap/110-replication-httpc.t b/1.1.x/test/etap/110-replication-httpc.t
new file mode 100755
index 00000000..39b0755e
--- /dev/null
+++ b/1.1.x/test/etap/110-replication-httpc.t
@@ -0,0 +1,132 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% XXX: Figure out how to -include("couch_rep.hrl")
+-record(http_db, {
+ url,
+ auth = [],
+ resource = "",
+ headers = [
+ {"User-Agent", "CouchDB/"++couch_server:get_version()},
+ {"Accept", "application/json"},
+ {"Accept-Encoding", "gzip"}
+ ],
+ qs = [],
+ method = get,
+ body = nil,
+ options = [
+ {response_format,binary},
+ {inactivity_timeout, 30000}
+ ],
+ retries = 10,
+ pause = 1,
+ conn = nil
+}).
+
+server() ->
+ lists:concat([
+ "http://127.0.0.1:", mochiweb_socket_server:get(couch_httpd, port), "/"
+ ]).
+
+dbname() -> "etap-test-db".
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(6),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_server_sup:start_link(test_util:config_files()),
+ ibrowse:start(),
+ crypto:start(),
+
+ couch_server:delete(list_to_binary(dbname()), []),
+ {ok, Db} = couch_db:create(list_to_binary(dbname()), []),
+
+ test_welcome(),
+ test_binary_url(),
+ test_put(),
+ test_qs(),
+ test_db_exists(),
+
+ couch_db:close(Db),
+ couch_server:delete(list_to_binary(dbname()), []),
+ ok.
+
+test_welcome() ->
+ WelcomeReq = #http_db{url=server()},
+ Expect = {[
+ {<<"couchdb">>, <<"Welcome">>},
+ {<<"version">>, list_to_binary(couch_server:get_version())}
+ ]},
+ etap:is(
+ couch_rep_httpc:request(WelcomeReq),
+ Expect,
+ "welcome request with url-as-list"
+ ).
+
+test_binary_url() ->
+ Req = #http_db{url=list_to_binary(server())},
+ Expect = {[
+ {<<"couchdb">>, <<"Welcome">>},
+ {<<"version">>, list_to_binary(couch_server:get_version())}
+ ]},
+ etap:is(
+ couch_rep_httpc:request(Req),
+ Expect,
+ "welcome request with url-as-binary"
+ ).
+
+test_put() ->
+ Req = #http_db{
+ url = server() ++ dbname() ++ "/",
+ resource = "test_put",
+ body = {[{<<"foo">>, <<"bar">>}]},
+ method = put
+ },
+ {Resp} = couch_rep_httpc:request(Req),
+ etap:ok(couch_util:get_value(<<"ok">>, Resp), "ok:true on upload"),
+ etap:is(<<"test_put">>, couch_util:get_value(<<"id">>, Resp), "id is correct").
+
+test_qs() ->
+ Req = #http_db{
+ url = server() ++ dbname() ++ "/",
+ resource = "foo",
+ qs = [
+ {bar, true},
+ {baz, 1.03},
+ {bif, mochijson2:encode(<<"1-23456">>)}
+ ]
+ },
+ Expect = server() ++ dbname() ++ "/foo?bar=true&baz=1.03&bif=\"1-23456\"",
+ etap:is(
+ couch_rep_httpc:full_url(Req),
+ Expect,
+ "query-string proplist encoding ok"
+ ).
+
+test_db_exists() ->
+ Req1 = #http_db{url=server() ++ dbname() ++ "/"},
+ Req2 = #http_db{url=server() ++ dbname() ++ "_foo/"},
+ etap:is(couch_rep_httpc:db_exists(Req1), Req1, "db_exists true check").
+ % etap:is(couch_rep_httpc:db_exists(Req2), false, "db_exists false check").
diff --git a/1.1.x/test/etap/111-replication-changes-feed.t b/1.1.x/test/etap/111-replication-changes-feed.t
new file mode 100755
index 00000000..358bf1e2
--- /dev/null
+++ b/1.1.x/test/etap/111-replication-changes-feed.t
@@ -0,0 +1,253 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% XXX: Figure out how to -include("couch_db.hrl")
+-record(doc, {id= <<"">>, revs={0, []}, body={[]},
+ attachments=[], deleted=false, meta=[]}).
+
+-record(http_db, {
+ url,
+ auth = [],
+ resource = "",
+ headers = [
+ {"User-Agent", "CouchDB/"++couch_server:get_version()},
+ {"Accept", "application/json"},
+ {"Accept-Encoding", "gzip"}
+ ],
+ qs = [],
+ method = get,
+ body = nil,
+ options = [
+ {response_format,binary},
+ {inactivity_timeout, 30000}
+ ],
+ retries = 10,
+ pause = 1,
+ conn = nil
+}).
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(13),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_server_sup:start_link(test_util:config_files()),
+ ibrowse:start(),
+ crypto:start(),
+
+ couch_server:delete(<<"etap-test-db">>, []),
+ {ok, Db1} = couch_db:create(<<"etap-test-db">>, []),
+ test_all(local),
+ couch_db:close(Db1),
+ couch_server:delete(<<"etap-test-db">>, []),
+
+ couch_server:delete(<<"etap-test-db">>, []),
+ {ok, Db2} = couch_db:create(<<"etap-test-db">>, []),
+ test_all(remote),
+ test_remote_only(),
+ couch_db:close(Db2),
+ couch_server:delete(<<"etap-test-db">>, []),
+
+ ok.
+
+test_all(Type) ->
+ test_unchanged_db(Type),
+ test_simple_change(Type),
+ test_since_parameter(Type),
+ test_continuous_parameter(Type),
+ test_conflicts(Type),
+ test_deleted_conflicts(Type).
+
+test_remote_only() ->
+ test_chunk_reassembly(remote).
+
+test_unchanged_db(Type) ->
+ {ok, Pid} = start_changes_feed(Type, 0, false),
+ etap:is(
+ couch_rep_changes_feed:next(Pid),
+ complete,
+ io_lib:format(
+ "(~p) changes feed for unchanged DB is automatically complete",
+ [Type])
+ ).
+
+test_simple_change(Type) ->
+ Expect = generate_change(),
+ {ok, Pid} = start_changes_feed(Type, 0, false),
+ etap:is(
+ {couch_rep_changes_feed:next(Pid), couch_rep_changes_feed:next(Pid)},
+ {[Expect], complete},
+ io_lib:format("(~p) change one document, get one row", [Type])
+ ).
+
+test_since_parameter(Type) ->
+ {ok, Pid} = start_changes_feed(Type, get_update_seq(), false),
+ etap:is(
+ couch_rep_changes_feed:next(Pid),
+ complete,
+ io_lib:format(
+ "(~p) since query-string parameter allows us to skip changes",
+ [Type])
+ ).
+
+test_continuous_parameter(Type) ->
+ {ok, Pid} = start_changes_feed(Type, get_update_seq(), true),
+
+ % make the changes_feed request before the next update
+ Self = self(),
+ spawn(fun() ->
+ Change = couch_rep_changes_feed:next(Pid),
+ Self ! {actual, Change}
+ end),
+
+ Expect = generate_change(),
+ etap:is(
+ receive {actual, Actual} -> Actual end,
+ [Expect],
+ io_lib:format(
+ "(~p) feed=continuous query-string parameter picks up new changes",
+ [Type])
+ ),
+
+ ok = couch_rep_changes_feed:stop(Pid).
+
+test_conflicts(Type) ->
+ Since = get_update_seq(),
+ Expect = generate_conflict(),
+ {ok, Pid} = start_changes_feed(Type, Since, false),
+ etap:is(
+ {couch_rep_changes_feed:next(Pid), couch_rep_changes_feed:next(Pid)},
+ {[Expect], complete},
+ io_lib:format("(~p) conflict revisions show up in feed", [Type])
+ ).
+
+test_deleted_conflicts(Type) ->
+ Since = get_update_seq(),
+ {ExpectProps} = generate_conflict(),
+
+ %% delete the conflict revision
+ Id = couch_util:get_value(<<"id">>, ExpectProps),
+ [Win, {[{<<"rev">>, Lose}]}] = couch_util:get_value(<<"changes">>, ExpectProps),
+ Doc = couch_doc:from_json_obj({[
+ {<<"_id">>, Id},
+ {<<"_rev">>, Lose},
+ {<<"_deleted">>, true}
+ ]}),
+ Db = get_db(),
+ {ok, Rev} = couch_db:update_doc(Db, Doc, [full_commit]),
+ couch_db:close(Db),
+
+ Expect = {[
+ {<<"seq">>, get_update_seq()},
+ {<<"id">>, Id},
+ {<<"changes">>, [Win, {[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}]}
+ ]},
+
+ {ok, Pid} = start_changes_feed(Type, Since, false),
+ etap:is(
+ {couch_rep_changes_feed:next(Pid), couch_rep_changes_feed:next(Pid)},
+ {[Expect], complete},
+ io_lib:format("(~p) deleted conflict revisions show up in feed", [Type])
+ ).
+
+test_chunk_reassembly(Type) ->
+ Since = get_update_seq(),
+ Expect = [generate_change() || _I <- lists:seq(1,30)],
+ {ok, Pid} = start_changes_feed(Type, Since, false),
+ etap:is(
+ get_all_changes(Pid, []),
+ Expect,
+ io_lib:format("(~p) reassembles chunks split across TCP frames",
+ [Type])
+ ).
+
+get_all_changes(Pid, Acc) ->
+ case couch_rep_changes_feed:next(Pid) of
+ complete ->
+ lists:flatten(lists:reverse(Acc));
+ Else ->
+ get_all_changes(Pid, [Else|Acc])
+ end.
+
+generate_change() ->
+ generate_change(couch_uuids:random()).
+
+generate_change(Id) ->
+ generate_change(Id, {[]}).
+
+generate_change(Id, EJson) ->
+ Doc = couch_doc:from_json_obj(EJson),
+ Db = get_db(),
+ {ok, Rev} = couch_db:update_doc(Db, Doc#doc{id = Id}, [full_commit]),
+ couch_db:close(Db),
+ {[
+ {<<"seq">>, get_update_seq()},
+ {<<"id">>, Id},
+ {<<"changes">>, [{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}]}
+ ]}.
+
+generate_conflict() ->
+ Id = couch_uuids:random(),
+ Db = get_db(),
+ Doc1 = (couch_doc:from_json_obj({[<<"foo">>, <<"bar">>]}))#doc{id = Id},
+ Doc2 = (couch_doc:from_json_obj({[<<"foo">>, <<"baz">>]}))#doc{id = Id},
+ {ok, Rev1} = couch_db:update_doc(Db, Doc1, [full_commit]),
+ {ok, Rev2} = couch_db:update_doc(Db, Doc2, [full_commit, all_or_nothing]),
+
+ %% relies on undocumented CouchDB conflict winner algo and revision sorting!
+ RevList = [{[{<<"rev">>, couch_doc:rev_to_str(R)}]} || R
+ <- lists:sort(fun(A,B) -> B<A end, [Rev1,Rev2])],
+ {[
+ {<<"seq">>, get_update_seq()},
+ {<<"id">>, Id},
+ {<<"changes">>, RevList}
+ ]}.
+
+get_db() ->
+ {ok, Db} = couch_db:open(<<"etap-test-db">>, []),
+ Db.
+
+get_dbname(local) ->
+ "etap-test-db";
+get_dbname(remote) ->
+ server() ++ "etap-test-db/".
+
+server() ->
+ lists:concat([
+ "http://127.0.0.1:", mochiweb_socket_server:get(couch_httpd, port), "/"
+ ]).
+
+get_update_seq() ->
+ Db = get_db(),
+ Seq = couch_db:get_update_seq(Db),
+ couch_db:close(Db),
+ Seq.
+
+start_changes_feed(local, Since, Continuous) ->
+ Props = [{<<"continuous">>, Continuous}],
+ couch_rep_changes_feed:start_link(self(), get_db(), Since, Props);
+start_changes_feed(remote, Since, Continuous) ->
+ Props = [{<<"continuous">>, Continuous}],
+ Db = #http_db{url = get_dbname(remote)},
+ couch_rep_changes_feed:start_link(self(), Db, Since, Props).
diff --git a/1.1.x/test/etap/112-replication-missing-revs.t b/1.1.x/test/etap/112-replication-missing-revs.t
new file mode 100755
index 00000000..39280aee
--- /dev/null
+++ b/1.1.x/test/etap/112-replication-missing-revs.t
@@ -0,0 +1,207 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% XXX: Figure out how to -include("couch_db.hrl")
+
+-record(doc, {id= <<"">>, revs={0, []}, body={[]},
+ attachments=[], deleted=false, meta=[]}).
+
+-record(http_db, {
+ url,
+ auth = [],
+ resource = "",
+ headers = [
+ {"User-Agent", "CouchDB/"++couch_server:get_version()},
+ {"Accept", "application/json"},
+ {"Accept-Encoding", "gzip"}
+ ],
+ qs = [],
+ method = get,
+ body = nil,
+ options = [
+ {response_format,binary},
+ {inactivity_timeout, 30000}
+ ],
+ retries = 10,
+ pause = 1,
+ conn = nil
+}).
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(12),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_server_sup:start_link(test_util:config_files()),
+ ibrowse:start(),
+ crypto:start(),
+
+ couch_server:delete(<<"etap-test-source">>, []),
+ couch_server:delete(<<"etap-test-target">>, []),
+
+ Dbs1 = setup(),
+ test_all(local, local),
+ ok = teardown(Dbs1),
+
+ Dbs2 = setup(),
+ test_all(local, remote),
+ ok = teardown(Dbs2),
+
+ Dbs3 = setup(),
+ test_all(remote, local),
+ ok = teardown(Dbs3),
+
+ Dbs4 = setup(),
+ test_all(remote, remote),
+ ok = teardown(Dbs4),
+
+ ok.
+
+test_all(SrcType, TgtType) ->
+ test_unchanged_db(SrcType, TgtType),
+ test_multiple_changes(SrcType, TgtType),
+ test_changes_not_missing(SrcType, TgtType).
+
+test_unchanged_db(SrcType, TgtType) ->
+ {ok, Pid1} = start_changes_feed(SrcType, 0, false),
+ {ok, Pid2} = start_missing_revs(TgtType, Pid1),
+ etap:is(
+ couch_rep_missing_revs:next(Pid2),
+ complete,
+ io_lib:format(
+ "(~p, ~p) no missing revs if source is unchanged",
+ [SrcType, TgtType])
+ ).
+
+test_multiple_changes(SrcType, TgtType) ->
+ Expect = {2, [generate_change(), generate_change()]},
+ {ok, Pid1} = start_changes_feed(SrcType, 0, false),
+ {ok, Pid2} = start_missing_revs(TgtType, Pid1),
+ etap:is(
+ get_all_missing_revs(Pid2, {0, []}),
+ Expect,
+ io_lib:format("(~p, ~p) add src docs, get missing tgt revs + high seq",
+ [SrcType, TgtType])
+ ).
+
+test_changes_not_missing(SrcType, TgtType) ->
+ %% put identical changes on source and target
+ Id = couch_uuids:random(),
+ {Id, _Seq, [Rev]} = Expect = generate_change(Id, {[]}, get_db(source)),
+ {Id, _, [Rev]} = generate_change(Id, {[]}, get_db(target)),
+
+ %% confirm that this change is not in missing revs feed
+ {ok, Pid1} = start_changes_feed(SrcType, 0, false),
+ {ok, Pid2} = start_missing_revs(TgtType, Pid1),
+ {HighSeq, AllRevs} = get_all_missing_revs(Pid2, {0, []}),
+
+ %% etap:none/3 has a bug, so just define it correctly here
+ etap:is(
+ lists:member(Expect, AllRevs),
+ false,
+ io_lib:format(
+ "(~p, ~p) skip revs that already exist on target",
+ [SrcType, TgtType])
+ ).
+
+generate_change() ->
+ generate_change(couch_uuids:random()).
+
+generate_change(Id) ->
+ generate_change(Id, {[]}).
+
+generate_change(Id, EJson) ->
+ generate_change(Id, EJson, get_db(source)).
+
+generate_change(Id, EJson, Db) ->
+ Doc = couch_doc:from_json_obj(EJson),
+ Seq = get_update_seq(),
+ {ok, Rev} = couch_db:update_doc(Db, Doc#doc{id = Id}, [full_commit]),
+ couch_db:close(Db),
+ {Id, Seq+1, [Rev]}.
+
+get_all_missing_revs(Pid, {HighSeq, Revs}) ->
+ case couch_rep_missing_revs:next(Pid) of
+ complete ->
+ {HighSeq, lists:flatten(lists:reverse(Revs))};
+ {Seq, More} ->
+ get_all_missing_revs(Pid, {Seq, [More|Revs]})
+ end.
+
+get_db(source) ->
+ {ok, Db} = couch_db:open(<<"etap-test-source">>, []),
+ Db;
+get_db(target) ->
+ {ok, Db} = couch_db:open(<<"etap-test-target">>, []),
+ Db.
+
+get_update_seq() ->
+ Db = get_db(source),
+ Seq = couch_db:get_update_seq(Db),
+ couch_db:close(Db),
+ Seq.
+
+setup() ->
+ {ok, DbA} = couch_db:create(<<"etap-test-source">>, []),
+ {ok, DbB} = couch_db:create(<<"etap-test-target">>, []),
+ [DbA, DbB].
+
+teardown([DbA, DbB]) ->
+ couch_db:close(DbA),
+ couch_db:close(DbB),
+ couch_server:delete(<<"etap-test-source">>, []),
+ couch_server:delete(<<"etap-test-target">>, []),
+ ok.
+
+start_changes_feed(local, Since, Continuous) ->
+ Props = [{<<"continuous">>, Continuous}],
+ couch_rep_changes_feed:start_link(self(), get_db(source), Since, Props);
+start_changes_feed(remote, Since, Continuous) ->
+ Props = [{<<"continuous">>, Continuous}],
+ Db = #http_db{url = server() ++ "etap-test-source/"},
+ couch_rep_changes_feed:start_link(self(), Db, Since, Props).
+
+server() ->
+ lists:concat([
+ "http://127.0.0.1:", mochiweb_socket_server:get(couch_httpd, port), "/"
+ ]).
+
+couch_rep_pid(Db) ->
+ spawn(fun() -> couch_rep_pid_loop(Db) end).
+
+couch_rep_pid_loop(Db) ->
+ receive
+ {'$gen_call', From, get_target_db} ->
+ gen_server:reply(From, {ok, Db})
+ end,
+ couch_rep_pid_loop(Db).
+
+start_missing_revs(local, Changes) ->
+ TargetDb = get_db(target),
+ MainPid = couch_rep_pid(TargetDb),
+ couch_rep_missing_revs:start_link(MainPid, TargetDb, Changes, []);
+start_missing_revs(remote, Changes) ->
+ TargetDb = #http_db{url = server() ++ "etap-test-target/"},
+ MainPid = couch_rep_pid(TargetDb),
+ couch_rep_missing_revs:start_link(MainPid, TargetDb, Changes, []).
diff --git a/1.1.x/test/etap/113-replication-attachment-comp.t b/1.1.x/test/etap/113-replication-attachment-comp.t
new file mode 100755
index 00000000..e30a96bc
--- /dev/null
+++ b/1.1.x/test/etap/113-replication-attachment-comp.t
@@ -0,0 +1,314 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(user_ctx, {
+ name = null,
+ roles = [],
+ handler
+}).
+
+test_db_a_name() ->
+ <<"couch_test_rep_att_comp_a">>.
+
+test_db_b_name() ->
+ <<"couch_test_rep_att_comp_b">>.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(45),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_server_sup:start_link(test_util:config_files()),
+ put(addr, couch_config:get("httpd", "bind_address", "127.0.0.1")),
+ put(port, integer_to_list(mochiweb_socket_server:get(couch_httpd, port))),
+ application:start(inets),
+ ibrowse:start(),
+ timer:sleep(1000),
+
+ %
+ % test pull replication
+ %
+
+ delete_db(test_db_a_name()),
+ delete_db(test_db_b_name()),
+ create_db(test_db_a_name()),
+ create_db(test_db_b_name()),
+
+ % enable compression
+ couch_config:set("attachments", "compression_level", "8", false),
+ couch_config:set("attachments", "compressible_types", "text/*", false),
+
+ % store doc with text attachment in DB A
+ put_text_att(test_db_a_name()),
+
+ % disable attachment compression
+ couch_config:set("attachments", "compression_level", "0", false),
+
+ % do pull replication
+ do_pull_replication(test_db_a_name(), test_db_b_name()),
+
+ % verify that DB B has the attachment stored in compressed form
+ check_att_is_compressed(test_db_b_name()),
+ check_server_can_decompress_att(test_db_b_name()),
+ check_att_stubs(test_db_a_name(), test_db_b_name()),
+
+ %
+ % test push replication
+ %
+
+ delete_db(test_db_a_name()),
+ delete_db(test_db_b_name()),
+ create_db(test_db_a_name()),
+ create_db(test_db_b_name()),
+
+ % enable compression
+ couch_config:set("attachments", "compression_level", "8", false),
+ couch_config:set("attachments", "compressible_types", "text/*", false),
+
+ % store doc with text attachment in DB A
+ put_text_att(test_db_a_name()),
+
+ % disable attachment compression
+ couch_config:set("attachments", "compression_level", "0", false),
+
+ % do push replication
+ do_push_replication(test_db_a_name(), test_db_b_name()),
+
+ % verify that DB B has the attachment stored in compressed form
+ check_att_is_compressed(test_db_b_name()),
+ check_server_can_decompress_att(test_db_b_name()),
+ check_att_stubs(test_db_a_name(), test_db_b_name()),
+
+ %
+ % test local replication
+ %
+
+ delete_db(test_db_a_name()),
+ delete_db(test_db_b_name()),
+ create_db(test_db_a_name()),
+ create_db(test_db_b_name()),
+
+ % enable compression
+ couch_config:set("attachments", "compression_level", "8", false),
+ couch_config:set("attachments", "compressible_types", "text/*", false),
+
+ % store doc with text attachment in DB A
+ put_text_att(test_db_a_name()),
+
+ % disable attachment compression
+ couch_config:set("attachments", "compression_level", "0", false),
+
+ % do local-local replication
+ do_local_replication(test_db_a_name(), test_db_b_name()),
+
+ % verify that DB B has the attachment stored in compressed form
+ check_att_is_compressed(test_db_b_name()),
+ check_server_can_decompress_att(test_db_b_name()),
+ check_att_stubs(test_db_a_name(), test_db_b_name()),
+
+ timer:sleep(3000), % to avoid mochiweb socket closed exceptions
+ delete_db(test_db_a_name()),
+ delete_db(test_db_b_name()),
+ couch_server_sup:stop(),
+ ok.
+
+put_text_att(DbName) ->
+ {ok, {{_, Code, _}, _Headers, _Body}} = http:request(
+ put,
+ {db_url(DbName) ++ "/testdoc1/readme.txt", [],
+ "text/plain", test_text_data()},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 201, "Created text attachment"),
+ ok.
+
+do_pull_replication(SourceDbName, TargetDbName) ->
+ RepObj = {[
+ {<<"source">>, list_to_binary(db_url(SourceDbName))},
+ {<<"target">>, TargetDbName}
+ ]},
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ post,
+ {rep_url(), [],
+ "application/json", list_to_binary(couch_util:json_encode(RepObj))},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "Pull replication successfully triggered"),
+ Json = couch_util:json_decode(Body),
+ RepOk = couch_util:get_nested_json_value(Json, [<<"ok">>]),
+ etap:is(RepOk, true, "Pull replication completed with success"),
+ ok.
+
+do_push_replication(SourceDbName, TargetDbName) ->
+ RepObj = {[
+ {<<"source">>, SourceDbName},
+ {<<"target">>, list_to_binary(db_url(TargetDbName))}
+ ]},
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ post,
+ {rep_url(), [],
+ "application/json", list_to_binary(couch_util:json_encode(RepObj))},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "Push replication successfully triggered"),
+ Json = couch_util:json_decode(Body),
+ RepOk = couch_util:get_nested_json_value(Json, [<<"ok">>]),
+ etap:is(RepOk, true, "Push replication completed with success"),
+ ok.
+
+do_local_replication(SourceDbName, TargetDbName) ->
+ RepObj = {[
+ {<<"source">>, SourceDbName},
+ {<<"target">>, TargetDbName}
+ ]},
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ post,
+ {rep_url(), [],
+ "application/json", list_to_binary(couch_util:json_encode(RepObj))},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "Local replication successfully triggered"),
+ Json = couch_util:json_decode(Body),
+ RepOk = couch_util:get_nested_json_value(Json, [<<"ok">>]),
+ etap:is(RepOk, true, "Local replication completed with success"),
+ ok.
+
+check_att_is_compressed(DbName) ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {db_url(DbName) ++ "/testdoc1/readme.txt",
+ [{"Accept-Encoding", "gzip"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code for the attachment request is 200"),
+ Gziped = lists:member({"content-encoding", "gzip"}, Headers),
+ etap:is(Gziped, true, "The attachment was received in compressed form"),
+ Uncompressed = binary_to_list(zlib:gunzip(list_to_binary(Body))),
+ etap:is(
+ Uncompressed,
+ test_text_data(),
+ "The attachment content is valid after decompression at the client side"
+ ),
+ ok.
+
+check_server_can_decompress_att(DbName) ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {db_url(DbName) ++ "/testdoc1/readme.txt", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code for the attachment request is 200"),
+ Gziped = lists:member({"content-encoding", "gzip"}, Headers),
+ etap:is(
+ Gziped, false, "The attachment was not received in compressed form"
+ ),
+ etap:is(
+ Body,
+ test_text_data(),
+ "The attachment content is valid after server decompression"
+ ),
+ ok.
+
+check_att_stubs(SourceDbName, TargetDbName) ->
+ {ok, {{_, Code1, _}, _Headers1, Body1}} = http:request(
+ get,
+ {db_url(SourceDbName) ++ "/testdoc1?att_encoding_info=true", []},
+ [],
+ [{sync, true}]),
+ etap:is(
+ Code1,
+ 200,
+ "HTTP response code is 200 for the source DB doc request"
+ ),
+ Json1 = couch_util:json_decode(Body1),
+ SourceAttStub = couch_util:get_nested_json_value(
+ Json1,
+ [<<"_attachments">>, <<"readme.txt">>]
+ ),
+ {ok, {{_, Code2, _}, _Headers2, Body2}} = http:request(
+ get,
+ {db_url(TargetDbName) ++ "/testdoc1?att_encoding_info=true", []},
+ [],
+ [{sync, true}]),
+ etap:is(
+ Code2,
+ 200,
+ "HTTP response code is 200 for the target DB doc request"
+ ),
+ Json2 = couch_util:json_decode(Body2),
+ TargetAttStub = couch_util:get_nested_json_value(
+ Json2,
+ [<<"_attachments">>, <<"readme.txt">>]
+ ),
+ IdenticalStubs = (SourceAttStub =:= TargetAttStub),
+ etap:is(IdenticalStubs, true, "Attachment stubs are identical"),
+ TargetAttStubLength = couch_util:get_nested_json_value(
+ TargetAttStub,
+ [<<"length">>]
+ ),
+ TargetAttStubEnc = couch_util:get_nested_json_value(
+ TargetAttStub,
+ [<<"encoding">>]
+ ),
+ etap:is(
+ TargetAttStubEnc,
+ <<"gzip">>,
+ "Attachment stub has encoding property set to gzip"
+ ),
+ TargetAttStubEncLength = couch_util:get_nested_json_value(
+ TargetAttStub,
+ [<<"encoded_length">>]
+ ),
+ EncLengthDefined = is_integer(TargetAttStubEncLength),
+ etap:is(
+ EncLengthDefined,
+ true,
+ "Stubs have the encoded_length field properly defined"
+ ),
+ EncLengthSmaller = (TargetAttStubEncLength < TargetAttStubLength),
+ etap:is(
+ EncLengthSmaller,
+ true,
+ "Stubs have the encoded_length field smaller than their length field"
+ ),
+ ok.
+
+admin_user_ctx() ->
+ {user_ctx, #user_ctx{roles=[<<"_admin">>]}}.
+
+create_db(DbName) ->
+ {ok, _} = couch_db:create(DbName, [admin_user_ctx()]).
+
+delete_db(DbName) ->
+ couch_server:delete(DbName, [admin_user_ctx()]).
+
+db_url(DbName) ->
+ "http://" ++ get(addr) ++ ":" ++ get(port) ++ "/" ++
+ binary_to_list(DbName).
+
+rep_url() ->
+ "http://" ++ get(addr) ++ ":" ++ get(port) ++ "/_replicate".
+
+test_text_data() ->
+ {ok, Data} = file:read_file(test_util:source_file("README")),
+ binary_to_list(Data).
diff --git a/1.1.x/test/etap/120-stats-collect.t b/1.1.x/test/etap/120-stats-collect.t
new file mode 100755
index 00000000..dee88765
--- /dev/null
+++ b/1.1.x/test/etap/120-stats-collect.t
@@ -0,0 +1,150 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(11),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail()
+ end,
+ ok.
+
+test() ->
+ couch_stats_collector:start(),
+ ok = test_counters(),
+ ok = test_abs_values(),
+ ok = test_proc_counting(),
+ ok = test_all(),
+ ok.
+
+test_counters() ->
+ AddCount = fun() -> couch_stats_collector:increment(foo) end,
+ RemCount = fun() -> couch_stats_collector:decrement(foo) end,
+ repeat(AddCount, 100),
+ repeat(RemCount, 25),
+ repeat(AddCount, 10),
+ repeat(RemCount, 5),
+ etap:is(
+ couch_stats_collector:get(foo),
+ 80,
+ "Incrememnt tracks correctly."
+ ),
+
+ repeat(RemCount, 80),
+ etap:is(
+ couch_stats_collector:get(foo),
+ 0,
+ "Decremented to zaro."
+ ),
+ ok.
+
+test_abs_values() ->
+ lists:map(fun(Val) ->
+ couch_stats_collector:record(bar, Val)
+ end, lists:seq(1, 15)),
+ etap:is(
+ couch_stats_collector:get(bar),
+ lists:seq(1, 15),
+ "Absolute values are recorded correctly."
+ ),
+
+ couch_stats_collector:clear(bar),
+ etap:is(
+ couch_stats_collector:get(bar),
+ nil,
+ "Absolute values are cleared correctly."
+ ),
+ ok.
+
+test_proc_counting() ->
+ Self = self(),
+ OnePid = spawn(fun() ->
+ couch_stats_collector:track_process_count(hoopla),
+ Self ! reporting,
+ receive sepuku -> ok end
+ end),
+ R1 = erlang:monitor(process, OnePid),
+ receive reporting -> ok end,
+ etap:is(
+ couch_stats_collector:get(hoopla),
+ 1,
+ "track_process_count incrememnts the counter."
+ ),
+
+ TwicePid = spawn(fun() ->
+ couch_stats_collector:track_process_count(hoopla),
+ couch_stats_collector:track_process_count(hoopla),
+ Self ! reporting,
+ receive sepuku -> ok end
+ end),
+ R2 = erlang:monitor(process, TwicePid),
+ receive reporting -> ok end,
+ etap:is(
+ couch_stats_collector:get(hoopla),
+ 3,
+ "track_process_count allows more than one incrememnt per Pid"
+ ),
+
+ OnePid ! sepuku,
+ receive {'DOWN', R1, _, _, _} -> ok end,
+ timer:sleep(250),
+ etap:is(
+ couch_stats_collector:get(hoopla),
+ 2,
+ "Process count is decremented when process exits."
+ ),
+
+ TwicePid ! sepuku,
+ receive {'DOWN', R2, _, _, _} -> ok end,
+ timer:sleep(250),
+ etap:is(
+ couch_stats_collector:get(hoopla),
+ 0,
+ "Process count is decremented for each call to track_process_count."
+ ),
+ ok.
+
+test_all() ->
+ couch_stats_collector:record(bar, 0.0),
+ couch_stats_collector:record(bar, 1.0),
+ etap:is(
+ couch_stats_collector:all(),
+ [{foo, 0}, {hoopla, 0}, {bar, [1.0, 0.0]}],
+ "all/0 returns all counters and absolute values."
+ ),
+
+ etap:is(
+ couch_stats_collector:all(incremental),
+ [{foo, 0}, {hoopla, 0}],
+ "all/1 returns only the specified type."
+ ),
+
+ couch_stats_collector:record(zing, 90),
+ etap:is(
+ couch_stats_collector:all(absolute),
+ [{zing, [90]}, {bar, [1.0, 0.0]}],
+ "all/1 returns only the specified type."
+ ),
+ ok.
+
+repeat(_, 0) ->
+ ok;
+repeat(Fun, Count) ->
+ Fun(),
+ repeat(Fun, Count-1).
diff --git a/1.1.x/test/etap/121-stats-aggregates.cfg b/1.1.x/test/etap/121-stats-aggregates.cfg
new file mode 100644
index 00000000..30e475da
--- /dev/null
+++ b/1.1.x/test/etap/121-stats-aggregates.cfg
@@ -0,0 +1,19 @@
+% Licensed to the Apache Software Foundation (ASF) under one
+% or more contributor license agreements. See the NOTICE file
+% distributed with this work for additional information
+% regarding copyright ownership. The ASF licenses this file
+% to you under the Apache License, Version 2.0 (the
+% "License"); you may not use this file except in compliance
+% with the License. You may obtain a copy of the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing,
+% software distributed under the License is distributed on an
+% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+% KIND, either express or implied. See the License for the
+% specific language governing permissions and limitations
+% under the License.
+
+{testing, stuff, "yay description"}.
+{number, '11', "randomosity"}.
diff --git a/1.1.x/test/etap/121-stats-aggregates.ini b/1.1.x/test/etap/121-stats-aggregates.ini
new file mode 100644
index 00000000..cc5cd218
--- /dev/null
+++ b/1.1.x/test/etap/121-stats-aggregates.ini
@@ -0,0 +1,20 @@
+; Licensed to the Apache Software Foundation (ASF) under one
+; or more contributor license agreements. See the NOTICE file
+; distributed with this work for additional information
+; regarding copyright ownership. The ASF licenses this file
+; to you under the Apache License, Version 2.0 (the
+; "License"); you may not use this file except in compliance
+; with the License. You may obtain a copy of the License at
+;
+; http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing,
+; software distributed under the License is distributed on an
+; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+; KIND, either express or implied. See the License for the
+; specific language governing permissions and limitations
+; under the License.
+
+[stats]
+rate = 10000000 ; We call collect_sample in testing
+samples = [0, 1]
diff --git a/1.1.x/test/etap/121-stats-aggregates.t b/1.1.x/test/etap/121-stats-aggregates.t
new file mode 100755
index 00000000..d678aa9d
--- /dev/null
+++ b/1.1.x/test/etap/121-stats-aggregates.t
@@ -0,0 +1,171 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+ini_file() ->
+ test_util:source_file("test/etap/121-stats-aggregates.ini").
+
+cfg_file() ->
+ test_util:source_file("test/etap/121-stats-aggregates.cfg").
+
+main(_) ->
+ test_util:init_code_path(),
+ etap:plan(17),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail()
+ end,
+ ok.
+
+test() ->
+ couch_config:start_link([ini_file()]),
+ couch_stats_collector:start(),
+ couch_stats_aggregator:start(cfg_file()),
+ ok = test_all_empty(),
+ ok = test_get_empty(),
+ ok = test_count_stats(),
+ ok = test_abs_stats(),
+ ok.
+
+test_all_empty() ->
+ {Aggs} = couch_stats_aggregator:all(),
+
+ etap:is(length(Aggs), 2, "There are only two aggregate types in testing."),
+ etap:is(
+ couch_util:get_value(testing, Aggs),
+ {[{stuff, make_agg(<<"yay description">>,
+ null, null, null, null, null)}]},
+ "{testing, stuff} is empty at start."
+ ),
+ etap:is(
+ couch_util:get_value(number, Aggs),
+ {[{'11', make_agg(<<"randomosity">>,
+ null, null, null, null, null)}]},
+ "{number, '11'} is empty at start."
+ ),
+ ok.
+
+test_get_empty() ->
+ etap:is(
+ couch_stats_aggregator:get_json({testing, stuff}),
+ make_agg(<<"yay description">>, null, null, null, null, null),
+ "Getting {testing, stuff} returns an empty aggregate."
+ ),
+ etap:is(
+ couch_stats_aggregator:get_json({number, '11'}),
+ make_agg(<<"randomosity">>, null, null, null, null, null),
+ "Getting {number, '11'} returns an empty aggregate."
+ ),
+ ok.
+
+test_count_stats() ->
+ lists:foreach(fun(_) ->
+ couch_stats_collector:increment({testing, stuff})
+ end, lists:seq(1, 100)),
+ couch_stats_aggregator:collect_sample(),
+ etap:is(
+ couch_stats_aggregator:get_json({testing, stuff}),
+ make_agg(<<"yay description">>, 100, 100, null, 100, 100),
+ "COUNT: Adding values changes the stats."
+ ),
+ etap:is(
+ couch_stats_aggregator:get_json({testing, stuff}, 1),
+ make_agg(<<"yay description">>, 100, 100, null, 100, 100),
+ "COUNT: Adding values changes stats for all times."
+ ),
+
+ timer:sleep(500),
+ couch_stats_aggregator:collect_sample(),
+ etap:is(
+ couch_stats_aggregator:get_json({testing, stuff}),
+ make_agg(<<"yay description">>, 100, 50, 70.711, 0, 100),
+ "COUNT: Removing values changes stats."
+ ),
+ etap:is(
+ couch_stats_aggregator:get_json({testing, stuff}, 1),
+ make_agg(<<"yay description">>, 100, 50, 70.711, 0, 100),
+ "COUNT: Removing values changes stats for all times."
+ ),
+
+ timer:sleep(600),
+ couch_stats_aggregator:collect_sample(),
+ etap:is(
+ couch_stats_aggregator:get_json({testing, stuff}),
+ make_agg(<<"yay description">>, 100, 33.333, 57.735, 0, 100),
+ "COUNT: Letting time passes doesn't remove data from time 0 aggregates"
+ ),
+ etap:is(
+ couch_stats_aggregator:get_json({testing, stuff}, 1),
+ make_agg(<<"yay description">>, 0, 0, 0, 0, 0),
+ "COUNT: Letting time pass removes data from other time aggregates."
+ ),
+ ok.
+
+test_abs_stats() ->
+ lists:foreach(fun(X) ->
+ couch_stats_collector:record({number, 11}, X)
+ end, lists:seq(0, 10)),
+ couch_stats_aggregator:collect_sample(),
+ etap:is(
+ couch_stats_aggregator:get_json({number, 11}),
+ make_agg(<<"randomosity">>, 5, 5, null, 5, 5),
+ "ABS: Adding values changes the stats."
+ ),
+ etap:is(
+ couch_stats_aggregator:get_json({number, 11}, 1),
+ make_agg(<<"randomosity">>, 5, 5, null, 5, 5),
+ "ABS: Adding values changes stats for all times."
+ ),
+
+ timer:sleep(500),
+ couch_stats_collector:record({number, 11}, 15),
+ couch_stats_aggregator:collect_sample(),
+ etap:is(
+ couch_stats_aggregator:get_json({number, 11}),
+ make_agg(<<"randomosity">>, 20, 10, 7.071, 5, 15),
+ "ABS: New values changes stats"
+ ),
+ etap:is(
+ couch_stats_aggregator:get_json({number, 11}, 1),
+ make_agg(<<"randomosity">>, 20, 10, 7.071, 5, 15),
+ "ABS: Removing values changes stats for all times."
+ ),
+
+ timer:sleep(600),
+ couch_stats_aggregator:collect_sample(),
+ etap:is(
+ couch_stats_aggregator:get_json({number, 11}),
+ make_agg(<<"randomosity">>, 20, 10, 7.071, 5, 15),
+ "ABS: Letting time passes doesn't remove data from time 0 aggregates"
+ ),
+ etap:is(
+ couch_stats_aggregator:get_json({number, 11}, 1),
+ make_agg(<<"randomosity">>, 15, 15, null, 15, 15),
+ "ABS: Letting time pass removes data from other time aggregates."
+ ),
+ ok.
+
+make_agg(Desc, Sum, Mean, StdDev, Min, Max) ->
+ {[
+ {description, Desc},
+ {current, Sum},
+ {sum, Sum},
+ {mean, Mean},
+ {stddev, StdDev},
+ {min, Min},
+ {max, Max}
+ ]}.
diff --git a/1.1.x/test/etap/130-attachments-md5.t b/1.1.x/test/etap/130-attachments-md5.t
new file mode 100755
index 00000000..6296e08a
--- /dev/null
+++ b/1.1.x/test/etap/130-attachments-md5.t
@@ -0,0 +1,248 @@
+#!/usr/bin/env escript
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+test_db_name() ->
+ <<"etap-test-db">>.
+
+docid() ->
+ case get(docid) of
+ undefined ->
+ put(docid, 1),
+ "1";
+ Count ->
+ put(docid, Count+1),
+ integer_to_list(Count+1)
+ end.
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(16),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_server_sup:start_link(test_util:config_files()),
+ Addr = couch_config:get("httpd", "bind_address", any),
+ put(addr, Addr),
+ put(port, mochiweb_socket_server:get(couch_httpd, port)),
+ timer:sleep(1000),
+
+ couch_server:delete(test_db_name(), []),
+ couch_db:create(test_db_name(), []),
+
+ test_identity_without_md5(),
+ test_chunked_without_md5(),
+
+ test_identity_with_valid_md5(),
+ test_chunked_with_valid_md5_header(),
+ test_chunked_with_valid_md5_trailer(),
+
+ test_identity_with_invalid_md5(),
+ test_chunked_with_invalid_md5_header(),
+ test_chunked_with_invalid_md5_trailer(),
+
+ couch_server:delete(test_db_name(), []),
+ couch_server_sup:stop(),
+ ok.
+
+test_identity_without_md5() ->
+ Data = [
+ "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
+ "Content-Type: text/plain\r\n",
+ "Content-Length: 34\r\n",
+ "\r\n",
+ "We all live in a yellow submarine!"],
+
+ {Code, Json} = do_request(Data),
+ etap:is(Code, 201, "Stored with identity encoding and no MD5"),
+ etap:is(get_json(Json, [<<"ok">>]), true, "Body indicates success.").
+
+test_chunked_without_md5() ->
+ AttData = <<"We all live in a yellow submarine!">>,
+ <<Part1:21/binary, Part2:13/binary>> = AttData,
+ Data = [
+ "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
+ "Content-Type: text/plain\r\n",
+ "Transfer-Encoding: chunked\r\n",
+ "\r\n",
+ to_hex(size(Part1)), "\r\n",
+ Part1, "\r\n",
+ to_hex(size(Part2)), "\r\n",
+ Part2, "\r\n"
+ "0\r\n"
+ "\r\n"],
+
+ {Code, Json} = do_request(Data),
+ etap:is(Code, 201, "Stored with chunked encoding and no MD5"),
+ etap:is(get_json(Json, [<<"ok">>]), true, "Body indicates success.").
+
+test_identity_with_valid_md5() ->
+ AttData = "We all live in a yellow submarine!",
+ Data = [
+ "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
+ "Content-Type: text/plain\r\n",
+ "Content-Length: 34\r\n",
+ "Content-MD5: ", base64:encode(couch_util:md5(AttData)), "\r\n",
+ "\r\n",
+ AttData],
+
+ {Code, Json} = do_request(Data),
+ etap:is(Code, 201, "Stored with identity encoding and valid MD5"),
+ etap:is(get_json(Json, [<<"ok">>]), true, "Body indicates success.").
+
+test_chunked_with_valid_md5_header() ->
+ AttData = <<"We all live in a yellow submarine!">>,
+ <<Part1:21/binary, Part2:13/binary>> = AttData,
+ Data = [
+ "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
+ "Content-Type: text/plain\r\n",
+ "Transfer-Encoding: chunked\r\n",
+ "Content-MD5: ", base64:encode(couch_util:md5(AttData)), "\r\n",
+ "\r\n",
+ to_hex(size(Part1)), "\r\n",
+ Part1, "\r\n",
+ to_hex(size(Part2)), "\r\n",
+ Part2, "\r\n",
+ "0\r\n",
+ "\r\n"],
+
+ {Code, Json} = do_request(Data),
+ etap:is(Code, 201, "Stored with chunked encoding and valid MD5 header."),
+ etap:is(get_json(Json, [<<"ok">>]), true, "Body indicates success.").
+
+test_chunked_with_valid_md5_trailer() ->
+ AttData = <<"We all live in a yellow submarine!">>,
+ <<Part1:21/binary, Part2:13/binary>> = AttData,
+ Data = [
+ "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
+ "Content-Type: text/plain\r\n",
+ "Transfer-Encoding: chunked\r\n",
+ "Trailer: Content-MD5\r\n",
+ "\r\n",
+ to_hex(size(Part1)), "\r\n",
+ Part1, "\r\n",
+ to_hex(size(Part2)), "\r\n",
+ Part2, "\r\n",
+ "0\r\n",
+ "Content-MD5: ", base64:encode(couch_util:md5(AttData)), "\r\n",
+ "\r\n"],
+
+ {Code, Json} = do_request(Data),
+ etap:is(Code, 201, "Stored with chunked encoding and valid MD5 trailer."),
+ etap:is(get_json(Json, [<<"ok">>]), true, "Body indicates success.").
+
+test_identity_with_invalid_md5() ->
+ Data = [
+ "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
+ "Content-Type: text/plain\r\n",
+ "Content-Length: 34\r\n",
+ "Content-MD5: ", base64:encode(<<"foobar!">>), "\r\n",
+ "\r\n",
+ "We all live in a yellow submarine!"],
+
+ {Code, Json} = do_request(Data),
+ etap:is(Code, 400, "Invalid MD5 header causes an error: identity"),
+ etap:is(
+ get_json(Json, [<<"error">>]),
+ <<"content_md5_mismatch">>,
+ "Body indicates reason for failure."
+ ).
+
+test_chunked_with_invalid_md5_header() ->
+ AttData = <<"We all live in a yellow submarine!">>,
+ <<Part1:21/binary, Part2:13/binary>> = AttData,
+ Data = [
+ "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
+ "Content-Type: text/plain\r\n",
+ "Transfer-Encoding: chunked\r\n",
+ "Content-MD5: ", base64:encode(<<"so sneaky...">>), "\r\n",
+ "\r\n",
+ to_hex(size(Part1)), "\r\n",
+ Part1, "\r\n",
+ to_hex(size(Part2)), "\r\n",
+ Part2, "\r\n",
+ "0\r\n",
+ "\r\n"],
+
+ {Code, Json} = do_request(Data),
+ etap:is(Code, 400, "Invalid MD5 header causes an error: chunked"),
+ etap:is(
+ get_json(Json, [<<"error">>]),
+ <<"content_md5_mismatch">>,
+ "Body indicates reason for failure."
+ ).
+
+test_chunked_with_invalid_md5_trailer() ->
+ AttData = <<"We all live in a yellow submarine!">>,
+ <<Part1:21/binary, Part2:13/binary>> = AttData,
+ Data = [
+ "PUT /", test_db_name(), "/", docid(), "/readme.txt HTTP/1.1\r\n",
+ "Content-Type: text/plain\r\n",
+ "Transfer-Encoding: chunked\r\n",
+ "Trailer: Content-MD5\r\n",
+ "\r\n",
+ to_hex(size(Part1)), "\r\n",
+ Part1, "\r\n",
+ to_hex(size(Part2)), "\r\n",
+ Part2, "\r\n",
+ "0\r\n",
+ "Content-MD5: ", base64:encode(<<"Kool-Aid Fountain!">>), "\r\n",
+ "\r\n"],
+
+ {Code, Json} = do_request(Data),
+ etap:is(Code, 400, "Invalid MD5 Trailer causes an error"),
+ etap:is(
+ get_json(Json, [<<"error">>]),
+ <<"content_md5_mismatch">>,
+ "Body indicates reason for failure."
+ ).
+
+
+get_socket() ->
+ Options = [binary, {packet, 0}, {active, false}],
+ {ok, Sock} = gen_tcp:connect(get(addr), get(port), Options),
+ Sock.
+
+do_request(Request) ->
+ Sock = get_socket(),
+ gen_tcp:send(Sock, list_to_binary(lists:flatten(Request))),
+ timer:sleep(1000),
+ {ok, R} = gen_tcp:recv(Sock, 0),
+ gen_tcp:close(Sock),
+ [Header, Body] = re:split(R, "\r\n\r\n", [{return, binary}]),
+ {ok, {http_response, _, Code, _}, _} =
+ erlang:decode_packet(http, Header, []),
+ Json = couch_util:json_decode(Body),
+ {Code, Json}.
+
+get_json(Json, Path) ->
+ couch_util:get_nested_json_value(Json, Path).
+
+to_hex(Val) ->
+ to_hex(Val, []).
+
+to_hex(0, Acc) ->
+ Acc;
+to_hex(Val, Acc) ->
+ to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
+
+hex_char(V) when V < 10 -> $0 + V;
+hex_char(V) -> $A + V - 10.
+
diff --git a/1.1.x/test/etap/140-attachment-comp.t b/1.1.x/test/etap/140-attachment-comp.t
new file mode 100755
index 00000000..475f4fb0
--- /dev/null
+++ b/1.1.x/test/etap/140-attachment-comp.t
@@ -0,0 +1,762 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+test_db_name() ->
+ <<"couch_test_atts_compression">>.
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(85),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_server_sup:start_link(test_util:config_files()),
+ put(addr, couch_config:get("httpd", "bind_address", "127.0.0.1")),
+ put(port, integer_to_list(mochiweb_socket_server:get(couch_httpd, port))),
+ application:start(inets),
+ timer:sleep(1000),
+ couch_server:delete(test_db_name(), []),
+ couch_db:create(test_db_name(), []),
+
+ couch_config:set("attachments", "compression_level", "8", false),
+ couch_config:set("attachments", "compressible_types", "text/*", false),
+
+ create_1st_text_att(),
+ create_1st_png_att(),
+ create_2nd_text_att(),
+ create_2nd_png_att(),
+
+ tests_for_1st_text_att(),
+ tests_for_1st_png_att(),
+ tests_for_2nd_text_att(),
+ tests_for_2nd_png_att(),
+
+ create_already_compressed_att(db_url() ++ "/doc_comp_att", "readme.txt"),
+ test_already_compressed_att(db_url() ++ "/doc_comp_att", "readme.txt"),
+
+ test_create_already_compressed_att_with_invalid_content_encoding(
+ db_url() ++ "/doc_att_deflate",
+ "readme.txt",
+ zlib:compress(test_text_data()),
+ "deflate"
+ ),
+
+ test_create_already_compressed_att_with_invalid_content_encoding(
+ db_url() ++ "/doc_att_compress",
+ "readme.txt",
+ % Note: As of OTP R13B04, it seems there's no LZW compression
+ % (i.e. UNIX compress utility implementation) lib in OTP.
+ % However there's a simple working Erlang implementation at:
+ % http://scienceblogs.com/goodmath/2008/01/simple_lempelziv_compression_i.php
+ test_text_data(),
+ "compress"
+ ),
+
+ test_compressible_type_with_parameters(),
+
+ timer:sleep(3000), % to avoid mochiweb socket closed exceptions
+ couch_server:delete(test_db_name(), []),
+ couch_server_sup:stop(),
+ ok.
+
+db_url() ->
+ "http://" ++ get(addr) ++ ":" ++ get(port) ++ "/" ++
+ binary_to_list(test_db_name()).
+
+create_1st_text_att() ->
+ {ok, {{_, Code, _}, _Headers, _Body}} = http:request(
+ put,
+ {db_url() ++ "/testdoc1/readme.txt", [],
+ "text/plain", test_text_data()},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 201, "Created text attachment using the standalone api"),
+ ok.
+
+create_1st_png_att() ->
+ {ok, {{_, Code, _}, _Headers, _Body}} = http:request(
+ put,
+ {db_url() ++ "/testdoc2/icon.png", [],
+ "image/png", test_png_data()},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 201, "Created png attachment using the standalone api"),
+ ok.
+
+% create a text attachment using the non-standalone attachment api
+create_2nd_text_att() ->
+ DocJson = {[
+ {<<"_attachments">>, {[
+ {<<"readme.txt">>, {[
+ {<<"content_type">>, <<"text/plain">>},
+ {<<"data">>, base64:encode(test_text_data())}
+ ]}
+ }]}}
+ ]},
+ {ok, {{_, Code, _}, _Headers, _Body}} = http:request(
+ put,
+ {db_url() ++ "/testdoc3", [],
+ "application/json", list_to_binary(couch_util:json_encode(DocJson))},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 201, "Created text attachment using the non-standalone api"),
+ ok.
+
+% create a png attachment using the non-standalone attachment api
+create_2nd_png_att() ->
+ DocJson = {[
+ {<<"_attachments">>, {[
+ {<<"icon.png">>, {[
+ {<<"content_type">>, <<"image/png">>},
+ {<<"data">>, base64:encode(test_png_data())}
+ ]}
+ }]}}
+ ]},
+ {ok, {{_, Code, _}, _Headers, _Body}} = http:request(
+ put,
+ {db_url() ++ "/testdoc4", [],
+ "application/json", list_to_binary(couch_util:json_encode(DocJson))},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 201, "Created png attachment using the non-standalone api"),
+ ok.
+
+create_already_compressed_att(DocUri, AttName) ->
+ {ok, {{_, Code, _}, _Headers, _Body}} = http:request(
+ put,
+ {DocUri ++ "/" ++ AttName, [{"Content-Encoding", "gzip"}],
+ "text/plain", zlib:gzip(test_text_data())},
+ [],
+ [{sync, true}]),
+ etap:is(
+ Code,
+ 201,
+ "Created already compressed attachment using the standalone api"
+ ),
+ ok.
+
+tests_for_1st_text_att() ->
+ test_get_1st_text_att_with_accept_encoding_gzip(),
+ test_get_1st_text_att_without_accept_encoding_header(),
+ test_get_1st_text_att_with_accept_encoding_deflate(),
+ test_get_1st_text_att_with_accept_encoding_deflate_only(),
+ test_get_doc_with_1st_text_att(),
+ test_1st_text_att_stub().
+
+tests_for_1st_png_att() ->
+ test_get_1st_png_att_without_accept_encoding_header(),
+ test_get_1st_png_att_with_accept_encoding_gzip(),
+ test_get_1st_png_att_with_accept_encoding_deflate(),
+ test_get_doc_with_1st_png_att(),
+ test_1st_png_att_stub().
+
+tests_for_2nd_text_att() ->
+ test_get_2nd_text_att_with_accept_encoding_gzip(),
+ test_get_2nd_text_att_without_accept_encoding_header(),
+ test_get_doc_with_2nd_text_att(),
+ test_2nd_text_att_stub().
+
+tests_for_2nd_png_att() ->
+ test_get_2nd_png_att_without_accept_encoding_header(),
+ test_get_2nd_png_att_with_accept_encoding_gzip(),
+ test_get_doc_with_2nd_png_att(),
+ test_2nd_png_att_stub().
+
+test_get_1st_text_att_with_accept_encoding_gzip() ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc1/readme.txt", [{"Accept-Encoding", "gzip"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Gziped = lists:member({"content-encoding", "gzip"}, Headers),
+ etap:is(Gziped, true, "received body is gziped"),
+ Uncompressed = binary_to_list(zlib:gunzip(list_to_binary(Body))),
+ etap:is(
+ Uncompressed,
+ test_text_data(),
+ "received data for the 1st text attachment is ok"
+ ),
+ ok.
+
+test_get_1st_text_att_without_accept_encoding_header() ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc1/readme.txt", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Gziped = lists:member({"content-encoding", "gzip"}, Headers),
+ etap:is(Gziped, false, "received body is not gziped"),
+ etap:is(
+ Body,
+ test_text_data(),
+ "received data for the 1st text attachment is ok"
+ ),
+ ok.
+
+test_get_1st_text_att_with_accept_encoding_deflate() ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc1/readme.txt", [{"Accept-Encoding", "deflate"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Gziped = lists:member({"content-encoding", "gzip"}, Headers),
+ etap:is(Gziped, false, "received body is not gziped"),
+ Deflated = lists:member({"content-encoding", "deflate"}, Headers),
+ etap:is(Deflated, false, "received body is not deflated"),
+ etap:is(
+ Body,
+ test_text_data(),
+ "received data for the 1st text attachment is ok"
+ ),
+ ok.
+
+test_get_1st_text_att_with_accept_encoding_deflate_only() ->
+ {ok, {{_, Code, _}, _Headers, _Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc1/readme.txt",
+ [{"Accept-Encoding", "deflate, *;q=0"}]},
+ [],
+ [{sync, true}]),
+ etap:is(
+ Code,
+ 406,
+ "HTTP response code is 406 for an unsupported content encoding request"
+ ),
+ ok.
+
+test_get_1st_png_att_without_accept_encoding_header() ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc2/icon.png", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Encoding = couch_util:get_value("content-encoding", Headers),
+ etap:is(Encoding, undefined, "received body is not gziped"),
+ etap:is(
+ Body,
+ test_png_data(),
+ "received data for the 1st png attachment is ok"
+ ),
+ ok.
+
+test_get_1st_png_att_with_accept_encoding_gzip() ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc2/icon.png", [{"Accept-Encoding", "gzip"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Encoding = couch_util:get_value("content-encoding", Headers),
+ etap:is(Encoding, undefined, "received body is not gziped"),
+ etap:is(
+ Body,
+ test_png_data(),
+ "received data for the 1st png attachment is ok"
+ ),
+ ok.
+
+test_get_1st_png_att_with_accept_encoding_deflate() ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc2/icon.png", [{"Accept-Encoding", "deflate"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Encoding = couch_util:get_value("content-encoding", Headers),
+ etap:is(Encoding, undefined, "received body is in identity form"),
+ etap:is(
+ Body,
+ test_png_data(),
+ "received data for the 1st png attachment is ok"
+ ),
+ ok.
+
+test_get_doc_with_1st_text_att() ->
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc1?attachments=true",
+ [{"Accept", "application/json"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Json = couch_util:json_decode(Body),
+ TextAttJson = couch_util:get_nested_json_value(
+ Json,
+ [<<"_attachments">>, <<"readme.txt">>]
+ ),
+ TextAttType = couch_util:get_nested_json_value(
+ TextAttJson,
+ [<<"content_type">>]
+ ),
+ TextAttData = couch_util:get_nested_json_value(
+ TextAttJson,
+ [<<"data">>]
+ ),
+ etap:is(
+ TextAttType,
+ <<"text/plain">>,
+ "1st text attachment has type text/plain"
+ ),
+ %% check the attachment's data is the base64 encoding of the plain text
+ %% and not the base64 encoding of the gziped plain text
+ etap:is(
+ TextAttData,
+ base64:encode(test_text_data()),
+ "1st text attachment data is properly base64 encoded"
+ ),
+ ok.
+
+test_1st_text_att_stub() ->
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc1?att_encoding_info=true", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Json = couch_util:json_decode(Body),
+ {TextAttJson} = couch_util:get_nested_json_value(
+ Json,
+ [<<"_attachments">>, <<"readme.txt">>]
+ ),
+ TextAttLength = couch_util:get_value(<<"length">>, TextAttJson),
+ etap:is(
+ TextAttLength,
+ length(test_text_data()),
+ "1st text attachment stub length matches the uncompressed length"
+ ),
+ TextAttEncoding = couch_util:get_value(<<"encoding">>, TextAttJson),
+ etap:is(
+ TextAttEncoding,
+ <<"gzip">>,
+ "1st text attachment stub has the encoding field set to gzip"
+ ),
+ TextAttEncLength = couch_util:get_value(<<"encoded_length">>, TextAttJson),
+ etap:is(
+ TextAttEncLength,
+ iolist_size(zlib:gzip(test_text_data())),
+ "1st text attachment stub encoded_length matches the compressed length"
+ ),
+ ok.
+
+test_get_doc_with_1st_png_att() ->
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc2?attachments=true",
+ [{"Accept", "application/json"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Json = couch_util:json_decode(Body),
+ PngAttJson = couch_util:get_nested_json_value(
+ Json,
+ [<<"_attachments">>, <<"icon.png">>]
+ ),
+ PngAttType = couch_util:get_nested_json_value(
+ PngAttJson,
+ [<<"content_type">>]
+ ),
+ PngAttData = couch_util:get_nested_json_value(
+ PngAttJson,
+ [<<"data">>]
+ ),
+ etap:is(PngAttType, <<"image/png">>, "attachment has type image/png"),
+ etap:is(
+ PngAttData,
+ base64:encode(test_png_data()),
+ "1st png attachment data is properly base64 encoded"
+ ),
+ ok.
+
+test_1st_png_att_stub() ->
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc2?att_encoding_info=true", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Json = couch_util:json_decode(Body),
+ {PngAttJson} = couch_util:get_nested_json_value(
+ Json,
+ [<<"_attachments">>, <<"icon.png">>]
+ ),
+ PngAttLength = couch_util:get_value(<<"length">>, PngAttJson),
+ etap:is(
+ PngAttLength,
+ length(test_png_data()),
+ "1st png attachment stub length matches the uncompressed length"
+ ),
+ PngEncoding = couch_util:get_value(<<"encoding">>, PngAttJson),
+ etap:is(
+ PngEncoding,
+ undefined,
+ "1st png attachment stub doesn't have an encoding field"
+ ),
+ PngEncLength = couch_util:get_value(<<"encoded_length">>, PngAttJson),
+ etap:is(
+ PngEncLength,
+ undefined,
+ "1st png attachment stub doesn't have an encoded_length field"
+ ),
+ ok.
+
+test_get_2nd_text_att_with_accept_encoding_gzip() ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc3/readme.txt", [{"Accept-Encoding", "gzip"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Gziped = lists:member({"content-encoding", "gzip"}, Headers),
+ etap:is(Gziped, true, "received body is gziped"),
+ Uncompressed = binary_to_list(zlib:gunzip(list_to_binary(Body))),
+ etap:is(
+ Uncompressed,
+ test_text_data(),
+ "received data for the 2nd text attachment is ok"
+ ),
+ ok.
+
+test_get_2nd_text_att_without_accept_encoding_header() ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc3/readme.txt", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Gziped = lists:member({"content-encoding", "gzip"}, Headers),
+ etap:is(Gziped, false, "received body is not gziped"),
+ etap:is(
+ Body,
+ test_text_data(),
+ "received data for the 2nd text attachment is ok"
+ ),
+ ok.
+
+test_get_2nd_png_att_without_accept_encoding_header() ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc4/icon.png", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Gziped = lists:member({"content-encoding", "gzip"}, Headers),
+ etap:is(Gziped, false, "received body is not gziped"),
+ etap:is(
+ Body,
+ test_png_data(),
+ "received data for the 2nd png attachment is ok"
+ ),
+ ok.
+
+test_get_2nd_png_att_with_accept_encoding_gzip() ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc4/icon.png", [{"Accept-Encoding", "gzip"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Gziped = lists:member({"content-encoding", "gzip"}, Headers),
+ etap:is(Gziped, false, "received body is not gziped"),
+ etap:is(
+ Body,
+ test_png_data(),
+ "received data for the 2nd png attachment is ok"
+ ),
+ ok.
+
+test_get_doc_with_2nd_text_att() ->
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc3?attachments=true",
+ [{"Accept", "application/json"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Json = couch_util:json_decode(Body),
+ TextAttJson = couch_util:get_nested_json_value(
+ Json,
+ [<<"_attachments">>, <<"readme.txt">>]
+ ),
+ TextAttType = couch_util:get_nested_json_value(
+ TextAttJson,
+ [<<"content_type">>]
+ ),
+ TextAttData = couch_util:get_nested_json_value(
+ TextAttJson,
+ [<<"data">>]
+ ),
+ etap:is(TextAttType, <<"text/plain">>, "attachment has type text/plain"),
+ %% check the attachment's data is the base64 encoding of the plain text
+ %% and not the base64 encoding of the gziped plain text
+ etap:is(
+ TextAttData,
+ base64:encode(test_text_data()),
+ "2nd text attachment data is properly base64 encoded"
+ ),
+ ok.
+
+test_2nd_text_att_stub() ->
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc3?att_encoding_info=true", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Json = couch_util:json_decode(Body),
+ {TextAttJson} = couch_util:get_nested_json_value(
+ Json,
+ [<<"_attachments">>, <<"readme.txt">>]
+ ),
+ TextAttLength = couch_util:get_value(<<"length">>, TextAttJson),
+ etap:is(
+ TextAttLength,
+ length(test_text_data()),
+ "2nd text attachment stub length matches the uncompressed length"
+ ),
+ TextAttEncoding = couch_util:get_value(<<"encoding">>, TextAttJson),
+ etap:is(
+ TextAttEncoding,
+ <<"gzip">>,
+ "2nd text attachment stub has the encoding field set to gzip"
+ ),
+ TextAttEncLength = couch_util:get_value(<<"encoded_length">>, TextAttJson),
+ etap:is(
+ TextAttEncLength,
+ iolist_size(zlib:gzip(test_text_data())),
+ "2nd text attachment stub encoded_length matches the compressed length"
+ ),
+ ok.
+
+test_get_doc_with_2nd_png_att() ->
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc4?attachments=true",
+ [{"Accept", "application/json"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Json = couch_util:json_decode(Body),
+ PngAttJson = couch_util:get_nested_json_value(
+ Json,
+ [<<"_attachments">>, <<"icon.png">>]
+ ),
+ PngAttType = couch_util:get_nested_json_value(
+ PngAttJson,
+ [<<"content_type">>]
+ ),
+ PngAttData = couch_util:get_nested_json_value(
+ PngAttJson,
+ [<<"data">>]
+ ),
+ etap:is(PngAttType, <<"image/png">>, "attachment has type image/png"),
+ etap:is(
+ PngAttData,
+ base64:encode(test_png_data()),
+ "2nd png attachment data is properly base64 encoded"
+ ),
+ ok.
+
+test_2nd_png_att_stub() ->
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc4?att_encoding_info=true", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Json = couch_util:json_decode(Body),
+ {PngAttJson} = couch_util:get_nested_json_value(
+ Json,
+ [<<"_attachments">>, <<"icon.png">>]
+ ),
+ PngAttLength = couch_util:get_value(<<"length">>, PngAttJson),
+ etap:is(
+ PngAttLength,
+ length(test_png_data()),
+ "2nd png attachment stub length matches the uncompressed length"
+ ),
+ PngEncoding = couch_util:get_value(<<"encoding">>, PngAttJson),
+ etap:is(
+ PngEncoding,
+ undefined,
+ "2nd png attachment stub doesn't have an encoding field"
+ ),
+ PngEncLength = couch_util:get_value(<<"encoded_length">>, PngAttJson),
+ etap:is(
+ PngEncLength,
+ undefined,
+ "2nd png attachment stub doesn't have an encoded_length field"
+ ),
+ ok.
+
+test_already_compressed_att(DocUri, AttName) ->
+ test_get_already_compressed_att_with_accept_gzip(DocUri, AttName),
+ test_get_already_compressed_att_without_accept(DocUri, AttName),
+ test_get_already_compressed_att_stub(DocUri, AttName).
+
+test_get_already_compressed_att_with_accept_gzip(DocUri, AttName) ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {DocUri ++ "/" ++ AttName, [{"Accept-Encoding", "gzip"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Gziped = lists:member({"content-encoding", "gzip"}, Headers),
+ etap:is(Gziped, true, "received body is gziped"),
+ etap:is(
+ iolist_to_binary(Body),
+ iolist_to_binary(zlib:gzip(test_text_data())),
+ "received data for the already compressed attachment is ok"
+ ),
+ ok.
+
+test_get_already_compressed_att_without_accept(DocUri, AttName) ->
+ {ok, {{_, Code, _}, Headers, Body}} = http:request(
+ get,
+ {DocUri ++ "/" ++ AttName, []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Gziped = lists:member({"content-encoding", "gzip"}, Headers),
+ etap:is(Gziped, false, "received body is not gziped"),
+ etap:is(
+ iolist_to_binary(Body),
+ iolist_to_binary(test_text_data()),
+ "received data for the already compressed attachment is ok"
+ ),
+ ok.
+
+test_get_already_compressed_att_stub(DocUri, AttName) ->
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ get,
+ {DocUri ++ "?att_encoding_info=true", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "HTTP response code is 200"),
+ Json = couch_util:json_decode(Body),
+ {AttJson} = couch_util:get_nested_json_value(
+ Json,
+ [<<"_attachments">>, iolist_to_binary(AttName)]
+ ),
+ AttLength = couch_util:get_value(<<"length">>, AttJson),
+ etap:is(
+ AttLength,
+ iolist_size((zlib:gzip(test_text_data()))),
+ "Already compressed attachment stub length matches the "
+ "compressed length"
+ ),
+ Encoding = couch_util:get_value(<<"encoding">>, AttJson),
+ etap:is(
+ Encoding,
+ <<"gzip">>,
+ "Already compressed attachment stub has the encoding field set to gzip"
+ ),
+ EncLength = couch_util:get_value(<<"encoded_length">>, AttJson),
+ etap:is(
+ EncLength,
+ AttLength,
+ "Already compressed attachment stub encoded_length matches the "
+ "length field value"
+ ),
+ ok.
+
+test_create_already_compressed_att_with_invalid_content_encoding(
+ DocUri, AttName, AttData, Encoding) ->
+ {ok, {{_, Code, _}, _Headers, _Body}} = http:request(
+ put,
+ {DocUri ++ "/" ++ AttName, [{"Content-Encoding", Encoding}],
+ "text/plain", AttData},
+ [],
+ [{sync, true}]),
+ etap:is(
+ Code,
+ 415,
+ "Couldn't create an already compressed attachment using the "
+ "unsupported encoding '" ++ Encoding ++ "'"
+ ),
+ ok.
+
+test_compressible_type_with_parameters() ->
+ {ok, {{_, Code, _}, _Headers, _Body}} = http:request(
+ put,
+ {db_url() ++ "/testdoc5/readme.txt", [],
+ "text/plain; charset=UTF-8", test_text_data()},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 201, "Created text attachment with MIME type "
+ "'text/plain; charset=UTF-8' using the standalone api"),
+ {ok, {{_, Code2, _}, Headers2, Body}} = http:request(
+ get,
+ {db_url() ++ "/testdoc5/readme.txt", [{"Accept-Encoding", "gzip"}]},
+ [],
+ [{sync, true}]),
+ etap:is(Code2, 200, "HTTP response code is 200"),
+ Gziped = lists:member({"content-encoding", "gzip"}, Headers2),
+ etap:is(Gziped, true, "received body is gziped"),
+ Uncompressed = binary_to_list(zlib:gunzip(list_to_binary(Body))),
+ etap:is(Uncompressed, test_text_data(), "received data is gzipped"),
+ {ok, {{_, Code3, _}, _Headers3, Body3}} = http:request(
+ get,
+ {db_url() ++ "/testdoc5?att_encoding_info=true", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code3, 200, "HTTP response code is 200"),
+ Json = couch_util:json_decode(Body3),
+ {TextAttJson} = couch_util:get_nested_json_value(
+ Json,
+ [<<"_attachments">>, <<"readme.txt">>]
+ ),
+ TextAttLength = couch_util:get_value(<<"length">>, TextAttJson),
+ etap:is(
+ TextAttLength,
+ length(test_text_data()),
+ "text attachment stub length matches the uncompressed length"
+ ),
+ TextAttEncoding = couch_util:get_value(<<"encoding">>, TextAttJson),
+ etap:is(
+ TextAttEncoding,
+ <<"gzip">>,
+ "text attachment stub has the encoding field set to gzip"
+ ),
+ TextAttEncLength = couch_util:get_value(<<"encoded_length">>, TextAttJson),
+ etap:is(
+ TextAttEncLength,
+ iolist_size(zlib:gzip(test_text_data())),
+ "text attachment stub encoded_length matches the compressed length"
+ ),
+ ok.
+
+test_png_data() ->
+ {ok, Data} = file:read_file(
+ test_util:source_file("share/www/image/logo.png")
+ ),
+ binary_to_list(Data).
+
+test_text_data() ->
+ {ok, Data} = file:read_file(
+ test_util:source_file("README")
+ ),
+ binary_to_list(Data).
diff --git a/1.1.x/test/etap/150-invalid-view-seq.t b/1.1.x/test/etap/150-invalid-view-seq.t
new file mode 100755
index 00000000..594d3416
--- /dev/null
+++ b/1.1.x/test/etap/150-invalid-view-seq.t
@@ -0,0 +1,190 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(user_ctx, {
+ name = null,
+ roles = [],
+ handler
+}).
+
+test_db_name() ->
+ <<"couch_test_invalid_view_seq">>.
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(10),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+%% NOTE: since during the test we stop the server,
+%% a huge and ugly but harmless stack trace is sent to stderr
+%%
+test() ->
+ couch_server_sup:start_link(test_util:config_files()),
+ timer:sleep(1000),
+ delete_db(),
+ create_db(),
+
+ create_docs(),
+ create_design_doc(),
+
+ % make DB file backup
+ backup_db_file(),
+
+ put(addr, couch_config:get("httpd", "bind_address", "127.0.0.1")),
+ put(port, integer_to_list(mochiweb_socket_server:get(couch_httpd, port))),
+ application:start(inets),
+
+ create_new_doc(),
+ query_view_before_restore_backup(),
+
+ % restore DB file backup after querying view
+ restore_backup_db_file(),
+
+ query_view_after_restore_backup(),
+
+ delete_db(),
+ couch_server_sup:stop(),
+ ok.
+
+admin_user_ctx() ->
+ {user_ctx, #user_ctx{roles=[<<"_admin">>]}}.
+
+create_db() ->
+ {ok, _} = couch_db:create(test_db_name(), [admin_user_ctx()]).
+
+delete_db() ->
+ couch_server:delete(test_db_name(), [admin_user_ctx()]).
+
+create_docs() ->
+ {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
+ Doc1 = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"doc1">>},
+ {<<"value">>, 1}
+
+ ]}),
+ Doc2 = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"doc2">>},
+ {<<"value">>, 2}
+
+ ]}),
+ Doc3 = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"doc3">>},
+ {<<"value">>, 3}
+
+ ]}),
+ {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
+ couch_db:ensure_full_commit(Db),
+ couch_db:close(Db).
+
+create_design_doc() ->
+ {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
+ DDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/foo">>},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>, {[
+ {<<"bar">>, {[
+ {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>}
+ ]}}
+ ]}}
+ ]}),
+ {ok, _} = couch_db:update_docs(Db, [DDoc]),
+ couch_db:ensure_full_commit(Db),
+ couch_db:close(Db).
+
+backup_db_file() ->
+ DbFile = test_util:build_file("tmp/lib/" ++
+ binary_to_list(test_db_name()) ++ ".couch"),
+ {ok, _} = file:copy(DbFile, DbFile ++ ".backup"),
+ ok.
+
+create_new_doc() ->
+ {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
+ Doc666 = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"doc666">>},
+ {<<"value">>, 999}
+
+ ]}),
+ {ok, _} = couch_db:update_docs(Db, [Doc666]),
+ couch_db:ensure_full_commit(Db),
+ couch_db:close(Db).
+
+db_url() ->
+ "http://" ++ get(addr) ++ ":" ++ get(port) ++ "/" ++
+ binary_to_list(test_db_name()).
+
+query_view_before_restore_backup() ->
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/_design/foo/_view/bar", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "Got view response before restoring backup."),
+ ViewJson = couch_util:json_decode(Body),
+ Rows = couch_util:get_nested_json_value(ViewJson, [<<"rows">>]),
+ HasDoc1 = has_doc("doc1", Rows),
+ HasDoc2 = has_doc("doc2", Rows),
+ HasDoc3 = has_doc("doc3", Rows),
+ HasDoc666 = has_doc("doc666", Rows),
+ etap:is(HasDoc1, true, "Before backup restore, view has doc1"),
+ etap:is(HasDoc2, true, "Before backup restore, view has doc2"),
+ etap:is(HasDoc3, true, "Before backup restore, view has doc3"),
+ etap:is(HasDoc666, true, "Before backup restore, view has doc666"),
+ ok.
+
+has_doc(DocId1, Rows) ->
+ DocId = iolist_to_binary(DocId1),
+ lists:any(
+ fun({R}) -> lists:member({<<"id">>, DocId}, R) end,
+ Rows
+ ).
+
+restore_backup_db_file() ->
+ couch_server_sup:stop(),
+ timer:sleep(3000),
+ DbFile = test_util:build_file("tmp/lib/" ++
+ binary_to_list(test_db_name()) ++ ".couch"),
+ ok = file:delete(DbFile),
+ ok = file:rename(DbFile ++ ".backup", DbFile),
+ couch_server_sup:start_link(test_util:config_files()),
+ timer:sleep(1000),
+ put(port, integer_to_list(mochiweb_socket_server:get(couch_httpd, port))),
+ ok.
+
+query_view_after_restore_backup() ->
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/_design/foo/_view/bar", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "Got view response after restoring backup."),
+ ViewJson = couch_util:json_decode(Body),
+ Rows = couch_util:get_nested_json_value(ViewJson, [<<"rows">>]),
+ HasDoc1 = has_doc("doc1", Rows),
+ HasDoc2 = has_doc("doc2", Rows),
+ HasDoc3 = has_doc("doc3", Rows),
+ HasDoc666 = has_doc("doc666", Rows),
+ etap:is(HasDoc1, true, "After backup restore, view has doc1"),
+ etap:is(HasDoc2, true, "After backup restore, view has doc2"),
+ etap:is(HasDoc3, true, "After backup restore, view has doc3"),
+ etap:is(HasDoc666, false, "After backup restore, view does not have doc666"),
+ ok.
diff --git a/1.1.x/test/etap/160-vhosts.t b/1.1.x/test/etap/160-vhosts.t
new file mode 100755
index 00000000..8dac53e5
--- /dev/null
+++ b/1.1.x/test/etap/160-vhosts.t
@@ -0,0 +1,291 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% XXX: Figure out how to -include("couch_rep.hrl")
+-record(http_db, {
+ url,
+ auth = [],
+ resource = "",
+ headers = [
+ {"User-Agent", "CouchDB/"++couch_server:get_version()},
+ {"Accept", "application/json"},
+ {"Accept-Encoding", "gzip"}
+ ],
+ qs = [],
+ method = get,
+ body = nil,
+ options = [
+ {response_format,binary},
+ {inactivity_timeout, 30000}
+ ],
+ retries = 10,
+ pause = 1,
+ conn = nil
+}).
+
+-record(user_ctx, {
+ name = null,
+ roles = [],
+ handler
+}).
+
+server() ->
+ lists:concat([
+ "http://127.0.0.1:", mochiweb_socket_server:get(couch_httpd, port), "/"
+ ]).
+
+dbname() -> "etap-test-db".
+admin_user_ctx() -> {user_ctx, #user_ctx{roles=[<<"_admin">>]}}.
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(14),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_server_sup:start_link(test_util:config_files()),
+ ibrowse:start(),
+ crypto:start(),
+
+ timer:sleep(1000),
+ couch_server:delete(list_to_binary(dbname()), [admin_user_ctx()]),
+ {ok, Db} = couch_db:create(list_to_binary(dbname()), [admin_user_ctx()]),
+
+ Doc = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"doc1">>},
+ {<<"value">>, 666}
+ ]}),
+
+ Doc1 = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/doc1">>},
+ {<<"shows">>, {[
+ {<<"test">>, <<"function(doc, req) {
+ return { json: {
+ requested_path: '/' + req.requested_path.join('/'),
+ path: '/' + req.path.join('/')
+ }};
+}">>}
+ ]}},
+ {<<"rewrites">>, [
+ {[
+ {<<"from">>, <<"/">>},
+ {<<"to">>, <<"_show/test">>}
+ ]}
+ ]}
+ ]}),
+
+ {ok, _} = couch_db:update_docs(Db, [Doc, Doc1]),
+
+ couch_db:ensure_full_commit(Db),
+
+ %% end boilerplate, start test
+
+ ok = couch_config:set("vhosts", "example.com", "/etap-test-db", false),
+ ok = couch_config:set("vhosts", "*.example.com",
+ "/etap-test-db/_design/doc1/_rewrite", false),
+ ok = couch_config:set("vhosts", "example.com/test", "/etap-test-db", false),
+ ok = couch_config:set("vhosts", "example1.com",
+ "/etap-test-db/_design/doc1/_rewrite/", false),
+ ok = couch_config:set("vhosts",":appname.:dbname.example1.com",
+ "/:dbname/_design/:appname/_rewrite/", false),
+ ok = couch_config:set("vhosts", ":dbname.example1.com", "/:dbname", false),
+
+ ok = couch_config:set("vhosts", "*.example2.com", "/*", false),
+ ok = couch_config:set("vhosts", "*/test", "/etap-test-db", false),
+ ok = couch_config:set("vhosts", "*.example2.com/test", "/*", false),
+ ok = couch_config:set("vhosts", "*/test1",
+ "/etap-test-db/_design/doc1/_show/test", false),
+
+ % let couch_httpd restart
+ timer:sleep(100),
+
+ test_regular_request(),
+ test_vhost_request(),
+ test_vhost_request_with_qs(),
+ test_vhost_request_with_global(),
+ test_vhost_requested_path(),
+ test_vhost_requested_path_path(),
+ test_vhost_request_wildcard(),
+ test_vhost_request_replace_var(),
+ test_vhost_request_replace_var1(),
+ test_vhost_request_replace_wildcard(),
+ test_vhost_request_path(),
+ test_vhost_request_path1(),
+ test_vhost_request_path2(),
+ test_vhost_request_path3(),
+
+ %% restart boilerplate
+ couch_db:close(Db),
+ timer:sleep(3000),
+ couch_server_sup:stop(),
+
+ ok.
+
+test_regular_request() ->
+ Result = case ibrowse:send_req(server(), [], get, []) of
+ {ok, _, _, Body} ->
+ {[{<<"couchdb">>, <<"Welcome">>},
+ {<<"version">>,_}
+ ]} = couch_util:json_decode(Body),
+ etap:is(true, true, "should return server info");
+ _Else ->
+ etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+test_vhost_request() ->
+ case ibrowse:send_req(server(), [], get, [], [{host_header, "example.com"}]) of
+ {ok, _, _, Body} ->
+ {JsonBody} = couch_util:json_decode(Body),
+ HasDbNameInfo = proplists:is_defined(<<"db_name">>, JsonBody),
+ etap:is(HasDbNameInfo, true, "should return database info");
+ _Else ->
+ etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+test_vhost_request_with_qs() ->
+ Url = server() ++ "doc1?revs_info=true",
+ case ibrowse:send_req(Url, [], get, [], [{host_header, "example.com"}]) of
+ {ok, _, _, Body} ->
+ {JsonProps} = couch_util:json_decode(Body),
+ HasRevsInfo = proplists:is_defined(<<"_revs_info">>, JsonProps),
+ etap:is(HasRevsInfo, true, "should return _revs_info");
+ _Else ->
+ etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+test_vhost_request_with_global() ->
+ Url2 = server() ++ "_utils/index.html",
+ case ibrowse:send_req(Url2, [], get, [], [{host_header, "example.com"}]) of
+ {ok, _, _, Body2} ->
+ "<!DOCTYPE" ++ _Foo = Body2,
+ etap:is(true, true, "should serve /_utils even inside vhosts");
+ _Else ->
+ etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+test_vhost_requested_path() ->
+ case ibrowse:send_req(server(), [], get, [], [{host_header, "example1.com"}]) of
+ {ok, _, _, Body} ->
+ {Json} = couch_util:json_decode(Body),
+ etap:is(case proplists:get_value(<<"requested_path">>, Json) of
+ <<"/">> -> true;
+ _ -> false
+ end, true, <<"requested path in req ok">>);
+ _Else ->
+ etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+test_vhost_requested_path_path() ->
+ case ibrowse:send_req(server(), [], get, [], [{host_header, "example1.com"}]) of
+ {ok, _, _, Body} ->
+ {Json} = couch_util:json_decode(Body),
+ etap:is(case proplists:get_value(<<"path">>, Json) of
+ <<"/etap-test-db/_design/doc1/_show/test">> -> true;
+ _ -> false
+ end, true, <<"path in req ok">>);
+ _Else ->
+ etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+test_vhost_request_wildcard()->
+ case ibrowse:send_req(server(), [], get, [], [{host_header, "test.example.com"}]) of
+ {ok, _, _, Body} ->
+ {Json} = couch_util:json_decode(Body),
+ etap:is(case proplists:get_value(<<"path">>, Json) of
+ <<"/etap-test-db/_design/doc1/_show/test">> -> true;
+ _ -> false
+ end, true, <<"wildcard ok">>);
+ _Else -> etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+
+test_vhost_request_replace_var() ->
+ case ibrowse:send_req(server(), [], get, [], [{host_header,"etap-test-db.example1.com"}]) of
+ {ok, _, _, Body} ->
+ {JsonBody} = couch_util:json_decode(Body),
+ HasDbNameInfo = proplists:is_defined(<<"db_name">>, JsonBody),
+ etap:is(HasDbNameInfo, true, "should return database info");
+ _Else -> etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+test_vhost_request_replace_var1() ->
+ case ibrowse:send_req(server(), [], get, [], [{host_header, "doc1.etap-test-db.example1.com"}]) of
+ {ok, _, _, Body} ->
+ {Json} = couch_util:json_decode(Body),
+ etap:is(case proplists:get_value(<<"path">>, Json) of
+ <<"/etap-test-db/_design/doc1/_show/test">> -> true;
+ _ -> false
+ end, true, <<"wildcard ok">>);
+ _Else -> etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+test_vhost_request_replace_wildcard() ->
+ case ibrowse:send_req(server(), [], get, [], [{host_header,"etap-test-db.example2.com"}]) of
+ {ok, _, _, Body} ->
+ {JsonBody} = couch_util:json_decode(Body),
+ HasDbNameInfo = proplists:is_defined(<<"db_name">>, JsonBody),
+ etap:is(HasDbNameInfo, true, "should return database info");
+ _Else -> etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+test_vhost_request_path() ->
+ Uri = server() ++ "test",
+ case ibrowse:send_req(Uri, [], get, [], [{host_header, "example.com"}]) of
+ {ok, _, _, Body} ->
+ {JsonBody} = couch_util:json_decode(Body),
+ HasDbNameInfo = proplists:is_defined(<<"db_name">>, JsonBody),
+ etap:is(HasDbNameInfo, true, "should return database info");
+ _Else -> etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+test_vhost_request_path1() ->
+ Url = server() ++ "test/doc1?revs_info=true",
+ case ibrowse:send_req(Url, [], get, [], []) of
+ {ok, _, _, Body} ->
+ {JsonProps} = couch_util:json_decode(Body),
+ HasRevsInfo = proplists:is_defined(<<"_revs_info">>, JsonProps),
+ etap:is(HasRevsInfo, true, "should return _revs_info");
+ _Else -> etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+test_vhost_request_path2() ->
+ Uri = server() ++ "test",
+ case ibrowse:send_req(Uri, [], get, [], [{host_header,"etap-test-db.example2.com"}]) of
+ {ok, _, _, Body} ->
+ {JsonBody} = couch_util:json_decode(Body),
+ HasDbNameInfo = proplists:is_defined(<<"db_name">>, JsonBody),
+ etap:is(HasDbNameInfo, true, "should return database info");
+ _Else -> etap:is(false, true, <<"ibrowse fail">>)
+ end.
+
+test_vhost_request_path3() ->
+ Uri = server() ++ "test1",
+ case ibrowse:send_req(Uri, [], get, [], []) of
+ {ok, _, _, Body} ->
+ {Json} = couch_util:json_decode(Body),
+ etap:is(case proplists:get_value(<<"path">>, Json) of
+ <<"/etap-test-db/_design/doc1/_show/test">> -> true;
+ _ -> false
+ end, true, <<"path in req ok">>);
+ _Else -> etap:is(false, true, <<"ibrowse fail">>)
+ end.
diff --git a/1.1.x/test/etap/170-os-daemons.es b/1.1.x/test/etap/170-os-daemons.es
new file mode 100755
index 00000000..73974e90
--- /dev/null
+++ b/1.1.x/test/etap/170-os-daemons.es
@@ -0,0 +1,26 @@
+#! /usr/bin/env escript
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+loop() ->
+ loop(io:read("")).
+
+loop({ok, _}) ->
+ loop(io:read(""));
+loop(eof) ->
+ stop;
+loop({error, Reason}) ->
+ throw({error, Reason}).
+
+main([]) ->
+ loop().
diff --git a/1.1.x/test/etap/170-os-daemons.t b/1.1.x/test/etap/170-os-daemons.t
new file mode 100755
index 00000000..6feaa1bf
--- /dev/null
+++ b/1.1.x/test/etap/170-os-daemons.t
@@ -0,0 +1,114 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(daemon, {
+ port,
+ name,
+ cmd,
+ kill,
+ status=running,
+ cfg_patterns=[],
+ errors=[],
+ buf=[]
+}).
+
+config_files() ->
+ lists:map(fun test_util:build_file/1, [
+ "etc/couchdb/default_dev.ini"
+ ]).
+
+daemon_cmd() ->
+ test_util:source_file("test/etap/170-os-daemons.es").
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(49),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_config:start_link(config_files()),
+ couch_os_daemons:start_link(),
+
+ etap:diag("Daemons boot after configuration added."),
+ couch_config:set("os_daemons", "foo", daemon_cmd(), false),
+ timer:sleep(1000),
+
+ {ok, [D1]} = couch_os_daemons:info([table]),
+ check_daemon(D1, "foo"),
+
+ % Check table form
+ {ok, Tab1} = couch_os_daemons:info(),
+ [T1] = ets:tab2list(Tab1),
+ check_daemon(T1, "foo"),
+
+ etap:diag("Daemons stop after configuration removed."),
+ couch_config:delete("os_daemons", "foo", false),
+ timer:sleep(500),
+
+ {ok, []} = couch_os_daemons:info([table]),
+ {ok, Tab2} = couch_os_daemons:info(),
+ etap:is(ets:tab2list(Tab2), [], "As table returns empty table."),
+
+ etap:diag("Adding multiple daemons causes both to boot."),
+ couch_config:set("os_daemons", "bar", daemon_cmd(), false),
+ couch_config:set("os_daemons", "baz", daemon_cmd(), false),
+ timer:sleep(500),
+ {ok, Daemons} = couch_os_daemons:info([table]),
+ lists:foreach(fun(D) ->
+ check_daemon(D)
+ end, Daemons),
+
+ {ok, Tab3} = couch_os_daemons:info(),
+ lists:foreach(fun(D) ->
+ check_daemon(D)
+ end, ets:tab2list(Tab3)),
+
+ etap:diag("Removing one daemon leaves the other alive."),
+ couch_config:delete("os_daemons", "bar", false),
+ timer:sleep(500),
+
+ {ok, [D2]} = couch_os_daemons:info([table]),
+ check_daemon(D2, "baz"),
+
+ % Check table version
+ {ok, Tab4} = couch_os_daemons:info(),
+ [T4] = ets:tab2list(Tab4),
+ check_daemon(T4, "baz"),
+
+ ok.
+
+check_daemon(D) ->
+ check_daemon(D, D#daemon.name).
+
+check_daemon(D, Name) ->
+ BaseName = "170-os-daemons.es",
+ BaseLen = length(BaseName),
+ CmdLen = length(D#daemon.cmd),
+ CmdName = lists:sublist(D#daemon.cmd, CmdLen-BaseLen+1, BaseLen),
+
+ etap:is(is_port(D#daemon.port), true, "Daemon port is a port."),
+ etap:is(D#daemon.name, Name, "Daemon name was set correctly."),
+ etap:is(CmdName, BaseName, "Command name was set correctly."),
+ etap:isnt(D#daemon.kill, undefined, "Kill command was set."),
+ etap:is(D#daemon.errors, [], "No errors occurred while booting."),
+ etap:is(D#daemon.buf, [], "No extra data left in the buffer.").
diff --git a/1.1.x/test/etap/171-os-daemons-config.es b/1.1.x/test/etap/171-os-daemons-config.es
new file mode 100755
index 00000000..1f68ddc6
--- /dev/null
+++ b/1.1.x/test/etap/171-os-daemons-config.es
@@ -0,0 +1,83 @@
+#! /usr/bin/env escript
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+filename() ->
+ list_to_binary(test_util:source_file("test/etap/171-os-daemons-config.es")).
+
+read() ->
+ case io:get_line('') of
+ eof ->
+ stop;
+ Data ->
+ couch_util:json_decode(Data)
+ end.
+
+write(Mesg) ->
+ Data = iolist_to_binary(couch_util:json_encode(Mesg)),
+ io:format(binary_to_list(Data) ++ "\n", []).
+
+get_cfg(Section) ->
+ write([<<"get">>, Section]),
+ read().
+
+get_cfg(Section, Name) ->
+ write([<<"get">>, Section, Name]),
+ read().
+
+log(Mesg) ->
+ write([<<"log">>, Mesg]).
+
+log(Mesg, Level) ->
+ write([<<"log">>, Mesg, {[{<<"level">>, Level}]}]).
+
+test_get_cfg1() ->
+ FileName = filename(),
+ {[{<<"foo">>, FileName}]} = get_cfg(<<"os_daemons">>).
+
+test_get_cfg2() ->
+ FileName = filename(),
+ FileName = get_cfg(<<"os_daemons">>, <<"foo">>),
+ <<"sequential">> = get_cfg(<<"uuids">>, <<"algorithm">>).
+
+test_get_unknown_cfg() ->
+ {[]} = get_cfg(<<"aal;3p4">>),
+ null = get_cfg(<<"aal;3p4">>, <<"313234kjhsdfl">>).
+
+test_log() ->
+ log(<<"foobar!">>),
+ log(<<"some stuff!">>, <<"debug">>),
+ log(2),
+ log(true),
+ write([<<"log">>, <<"stuff">>, 2]),
+ write([<<"log">>, 3, null]),
+ write([<<"log">>, [1, 2], {[{<<"level">>, <<"debug">>}]}]),
+ write([<<"log">>, <<"true">>, {[]}]).
+
+do_tests() ->
+ test_get_cfg1(),
+ test_get_cfg2(),
+ test_get_unknown_cfg(),
+ test_log(),
+ loop(io:read("")).
+
+loop({ok, _}) ->
+ loop(io:read(""));
+loop(eof) ->
+ init:stop();
+loop({error, _Reason}) ->
+ init:stop().
+
+main([]) ->
+ test_util:init_code_path(),
+ do_tests().
diff --git a/1.1.x/test/etap/171-os-daemons-config.t b/1.1.x/test/etap/171-os-daemons-config.t
new file mode 100755
index 00000000..e9dc3f32
--- /dev/null
+++ b/1.1.x/test/etap/171-os-daemons-config.t
@@ -0,0 +1,74 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(daemon, {
+ port,
+ name,
+ cmd,
+ kill,
+ status=running,
+ cfg_patterns=[],
+ errors=[],
+ buf=[]
+}).
+
+config_files() ->
+ lists:map(fun test_util:build_file/1, [
+ "etc/couchdb/default_dev.ini"
+ ]).
+
+daemon_cmd() ->
+ test_util:source_file("test/etap/171-os-daemons-config.es").
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(6),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_config:start_link(config_files()),
+ couch_config:set("log", "level", "debug", false),
+ couch_log:start_link(),
+ couch_os_daemons:start_link(),
+
+ % "foo" is a required name by this test.
+ couch_config:set("os_daemons", "foo", daemon_cmd(), false),
+ timer:sleep(1000),
+
+ {ok, [D1]} = couch_os_daemons:info([table]),
+ check_daemon(D1, "foo"),
+
+ ok.
+
+check_daemon(D, Name) ->
+ BaseName = "171-os-daemons-config.es",
+ BaseLen = length(BaseName),
+ CmdLen = length(D#daemon.cmd),
+ CmdName = lists:sublist(D#daemon.cmd, CmdLen-BaseLen+1, BaseLen),
+
+ etap:is(is_port(D#daemon.port), true, "Daemon port is a port."),
+ etap:is(D#daemon.name, Name, "Daemon name was set correctly."),
+ etap:is(CmdName, BaseName, "Command name was set correctly."),
+ etap:isnt(D#daemon.kill, undefined, "Kill command was set."),
+ etap:is(D#daemon.errors, [], "No errors occurred while booting."),
+ etap:is(D#daemon.buf, [], "No extra data left in the buffer.").
diff --git a/1.1.x/test/etap/172-os-daemon-errors.1.es b/1.1.x/test/etap/172-os-daemon-errors.1.es
new file mode 100644
index 00000000..a9defba1
--- /dev/null
+++ b/1.1.x/test/etap/172-os-daemon-errors.1.es
@@ -0,0 +1,22 @@
+#! /usr/bin/env escript
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Please do not make this file executable as that's the error being tested.
+
+loop() ->
+ timer:sleep(5000),
+ loop().
+
+main([]) ->
+ loop().
diff --git a/1.1.x/test/etap/172-os-daemon-errors.2.es b/1.1.x/test/etap/172-os-daemon-errors.2.es
new file mode 100755
index 00000000..52de0401
--- /dev/null
+++ b/1.1.x/test/etap/172-os-daemon-errors.2.es
@@ -0,0 +1,16 @@
+#! /usr/bin/env escript
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main([]) ->
+ init:stop().
diff --git a/1.1.x/test/etap/172-os-daemon-errors.3.es b/1.1.x/test/etap/172-os-daemon-errors.3.es
new file mode 100755
index 00000000..64229800
--- /dev/null
+++ b/1.1.x/test/etap/172-os-daemon-errors.3.es
@@ -0,0 +1,17 @@
+#! /usr/bin/env escript
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main([]) ->
+ timer:sleep(1000),
+ init:stop().
diff --git a/1.1.x/test/etap/172-os-daemon-errors.4.es b/1.1.x/test/etap/172-os-daemon-errors.4.es
new file mode 100755
index 00000000..577f3410
--- /dev/null
+++ b/1.1.x/test/etap/172-os-daemon-errors.4.es
@@ -0,0 +1,17 @@
+#! /usr/bin/env escript
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+main([]) ->
+ timer:sleep(2000),
+ init:stop().
diff --git a/1.1.x/test/etap/172-os-daemon-errors.t b/1.1.x/test/etap/172-os-daemon-errors.t
new file mode 100755
index 00000000..287a0812
--- /dev/null
+++ b/1.1.x/test/etap/172-os-daemon-errors.t
@@ -0,0 +1,126 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(daemon, {
+ port,
+ name,
+ cmd,
+ kill,
+ status=running,
+ cfg_patterns=[],
+ errors=[],
+ buf=[]
+}).
+
+config_files() ->
+ lists:map(fun test_util:build_file/1, [
+ "etc/couchdb/default_dev.ini"
+ ]).
+
+bad_perms() ->
+ test_util:source_file("test/etap/172-os-daemon-errors.1.es").
+
+die_on_boot() ->
+ test_util:source_file("test/etap/172-os-daemon-errors.2.es").
+
+die_quickly() ->
+ test_util:source_file("test/etap/172-os-daemon-errors.3.es").
+
+can_reboot() ->
+ test_util:source_file("test/etap/172-os-daemon-errors.4.es").
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(36),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_config:start_link(config_files()),
+ couch_os_daemons:start_link(),
+
+ etap:diag("Daemon not executable."),
+ test_halts("foo", bad_perms(), 1000),
+
+ etap:diag("Daemon dies on boot."),
+ test_halts("bar", die_on_boot(), 1000),
+
+ etap:diag("Daemon dies quickly after boot."),
+ test_halts("baz", die_quickly(), 4000),
+
+ etap:diag("Daemon dies, but not quickly enough to be halted."),
+ test_runs("bam", can_reboot()),
+
+ ok.
+
+test_halts(Name, Cmd, Time) ->
+ couch_config:set("os_daemons", Name, Cmd ++ " 2> /dev/null", false),
+ timer:sleep(Time),
+ {ok, [D]} = couch_os_daemons:info([table]),
+ check_dead(D, Name, Cmd),
+ couch_config:delete("os_daemons", Name, false).
+
+test_runs(Name, Cmd) ->
+ couch_config:set("os_daemons", Name, Cmd, false),
+
+ timer:sleep(1000),
+ {ok, [D1]} = couch_os_daemons:info([table]),
+ check_daemon(D1, Name, Cmd, 0),
+
+ % Should reboot every two seconds. We're at 1s, so wait
+ % utnil 3s to be in the middle of the next invocation's
+ % life span.
+ timer:sleep(2000),
+ {ok, [D2]} = couch_os_daemons:info([table]),
+ check_daemon(D2, Name, Cmd, 1),
+
+ % If the kill command changed, that means we rebooted the process.
+ etap:isnt(D1#daemon.kill, D2#daemon.kill, "Kill command changed.").
+
+check_dead(D, Name, Cmd) ->
+ BaseName = filename:basename(Cmd) ++ " 2> /dev/null",
+ BaseLen = length(BaseName),
+ CmdLen = length(D#daemon.cmd),
+ CmdName = lists:sublist(D#daemon.cmd, CmdLen-BaseLen+1, BaseLen),
+
+ etap:is(is_port(D#daemon.port), true, "Daemon port is a port."),
+ etap:is(D#daemon.name, Name, "Daemon name was set correctly."),
+ etap:is(CmdName, BaseName, "Command name was set correctly."),
+ etap:isnt(D#daemon.kill, undefined, "Kill command was set."),
+ etap:is(D#daemon.status, halted, "Daemon has been halted."),
+ etap:is(D#daemon.errors, nil, "Errors have been disabled."),
+ etap:is(D#daemon.buf, nil, "Buffer has been switched off.").
+
+check_daemon(D, Name, Cmd, Errs) ->
+ BaseName = filename:basename(Cmd),
+ BaseLen = length(BaseName),
+ CmdLen = length(D#daemon.cmd),
+ CmdName = lists:sublist(D#daemon.cmd, CmdLen-BaseLen+1, BaseLen),
+
+ etap:is(is_port(D#daemon.port), true, "Daemon port is a port."),
+ etap:is(D#daemon.name, Name, "Daemon name was set correctly."),
+ etap:is(CmdName, BaseName, "Command name was set correctly."),
+ etap:isnt(D#daemon.kill, undefined, "Kill command was set."),
+ etap:is(D#daemon.status, running, "Daemon still running."),
+ etap:is(length(D#daemon.errors), Errs, "Found expected number of errors."),
+ etap:is(D#daemon.buf, [], "No extra data left in the buffer.").
+
diff --git a/1.1.x/test/etap/173-os-daemon-cfg-register.t b/1.1.x/test/etap/173-os-daemon-cfg-register.t
new file mode 100755
index 00000000..71181aa2
--- /dev/null
+++ b/1.1.x/test/etap/173-os-daemon-cfg-register.t
@@ -0,0 +1,93 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(daemon, {
+ port,
+ name,
+ cmd,
+ kill,
+ status=running,
+ cfg_patterns=[],
+ errors=[],
+ buf=[]
+}).
+
+daemon_name() ->
+ "wheee".
+
+daemon_cmd() ->
+ test_util:build_file("test/etap/test_cfg_register").
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(27),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_config:start_link(test_util:config_files()),
+ couch_os_daemons:start_link(),
+
+ DaemonCmd = daemon_cmd() ++ " 2> /dev/null",
+
+ etap:diag("Booting the daemon"),
+ couch_config:set("os_daemons", daemon_name(), DaemonCmd, false),
+ timer:sleep(1000),
+ {ok, [D1]} = couch_os_daemons:info([table]),
+ check_daemon(D1, running),
+
+ etap:diag("Daemon restarts when section changes."),
+ couch_config:set("s1", "k", "foo", false),
+ timer:sleep(1000),
+ {ok, [D2]} = couch_os_daemons:info([table]),
+ check_daemon(D2, running),
+ etap:isnt(D2#daemon.kill, D1#daemon.kill, "Kill command shows restart."),
+
+ etap:diag("Daemon doesn't restart for ignored section key."),
+ couch_config:set("s2", "k2", "baz", false),
+ timer:sleep(1000),
+ {ok, [D3]} = couch_os_daemons:info([table]),
+ etap:is(D3, D2, "Same daemon info after ignored config change."),
+
+ etap:diag("Daemon restarts for specific section/key pairs."),
+ couch_config:set("s2", "k", "bingo", false),
+ timer:sleep(1000),
+ {ok, [D4]} = couch_os_daemons:info([table]),
+ check_daemon(D4, running),
+ etap:isnt(D4#daemon.kill, D3#daemon.kill, "Kill command changed again."),
+
+ ok.
+
+check_daemon(D, Status) ->
+ BaseName = filename:basename(daemon_cmd()) ++ " 2> /dev/null",
+ BaseLen = length(BaseName),
+ CmdLen = length(D#daemon.cmd),
+ CmdName = lists:sublist(D#daemon.cmd, CmdLen-BaseLen+1, BaseLen),
+
+ etap:is(is_port(D#daemon.port), true, "Daemon port is a port."),
+ etap:is(D#daemon.name, daemon_name(), "Daemon name was set correctly."),
+ etap:is(CmdName, BaseName, "Command name was set correctly."),
+ etap:isnt(D#daemon.kill, undefined, "Kill command was set."),
+ etap:is(D#daemon.status, Status, "Daemon status is correct."),
+ etap:is(D#daemon.cfg_patterns, [{"s1"}, {"s2", "k"}], "Cfg patterns set"),
+ etap:is(D#daemon.errors, [], "No errors have occurred."),
+ etap:isnt(D#daemon.buf, nil, "Buffer is active.").
diff --git a/1.1.x/test/etap/180-http-proxy.ini b/1.1.x/test/etap/180-http-proxy.ini
new file mode 100644
index 00000000..3e2ba137
--- /dev/null
+++ b/1.1.x/test/etap/180-http-proxy.ini
@@ -0,0 +1,20 @@
+; Licensed to the Apache Software Foundation (ASF) under one
+; or more contributor license agreements. See the NOTICE file
+; distributed with this work for additional information
+; regarding copyright ownership. The ASF licenses this file
+; to you under the Apache License, Version 2.0 (the
+; "License"); you may not use this file except in compliance
+; with the License. You may obtain a copy of the License at
+;
+; http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing,
+; software distributed under the License is distributed on an
+; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+; KIND, either express or implied. See the License for the
+; specific language governing permissions and limitations
+; under the License.
+
+; 49151 is IANA Reserved, let's assume no one is listening there
+[httpd_global_handlers]
+_error = {couch_httpd_proxy, handle_proxy_req, <<"http://127.0.0.1:49151/">>}
diff --git a/1.1.x/test/etap/180-http-proxy.t b/1.1.x/test/etap/180-http-proxy.t
new file mode 100755
index 00000000..41c77631
--- /dev/null
+++ b/1.1.x/test/etap/180-http-proxy.t
@@ -0,0 +1,378 @@
+#!/usr/bin/env escript
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(req, {method=get, path="", headers=[], body="", opts=[]}).
+
+server() ->
+ lists:concat([
+ "http://127.0.0.1:",
+ mochiweb_socket_server:get(couch_httpd, port),
+ "/_test/"
+ ]).
+
+proxy() ->
+ "http://127.0.0.1:" ++ integer_to_list(test_web:get_port()) ++ "/".
+
+external() -> "https://www.google.com/".
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(61),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag("Test died abnormally: ~p", [Other]),
+ etap:bail("Bad return value.")
+ end,
+ ok.
+
+check_request(Name, Req, Remote, Local) ->
+ case Remote of
+ no_remote -> ok;
+ _ -> test_web:set_assert(Remote)
+ end,
+ Url = case proplists:lookup(url, Req#req.opts) of
+ none -> server() ++ Req#req.path;
+ {url, DestUrl} -> DestUrl
+ end,
+ Opts = [{headers_as_is, true} | Req#req.opts],
+ Resp =ibrowse:send_req(
+ Url, Req#req.headers, Req#req.method, Req#req.body, Opts
+ ),
+ %etap:diag("ibrowse response: ~p", [Resp]),
+ case Local of
+ no_local -> ok;
+ _ -> etap:fun_is(Local, Resp, Name)
+ end,
+ case {Remote, Local} of
+ {no_remote, _} ->
+ ok;
+ {_, no_local} ->
+ ok;
+ _ ->
+ etap:is(test_web:check_last(), was_ok, Name ++ " - request handled")
+ end,
+ Resp.
+
+test() ->
+ couch_server_sup:start_link([
+ test_util:source_file("test/etap/180-http-proxy.ini") |
+ test_util:config_files()
+ ]),
+ ibrowse:start(),
+ crypto:start(),
+
+ % start the test_web server on a random port
+ test_web:start_link(),
+ Url = lists:concat([
+ "{couch_httpd_proxy, handle_proxy_req, <<\"http://127.0.0.1:",
+ test_web:get_port(),
+ "/\">>}"
+ ]),
+ couch_config:set("httpd_global_handlers", "_test", Url, false),
+
+ % let couch_httpd restart
+ timer:sleep(100),
+
+ test_basic(),
+ test_alternate_status(),
+ test_trailing_slash(),
+ test_passes_header(),
+ test_passes_host_header(),
+ test_passes_header_back(),
+ test_rewrites_location_headers(),
+ test_doesnt_rewrite_external_locations(),
+ test_rewrites_relative_location(),
+ test_uses_same_version(),
+ test_passes_body(),
+ test_passes_eof_body_back(),
+ test_passes_chunked_body(),
+ test_passes_chunked_body_back(),
+
+ test_connect_error(),
+
+ ok.
+
+test_basic() ->
+ Remote = fun(Req) ->
+ 'GET' = Req:get(method),
+ "/" = Req:get(path),
+ 0 = Req:get(body_length),
+ <<>> = Req:recv_body(),
+ {ok, {200, [{"Content-Type", "text/plain"}], "ok"}}
+ end,
+ Local = fun({ok, "200", _, "ok"}) -> true; (_) -> false end,
+ check_request("Basic proxy test", #req{}, Remote, Local).
+
+test_alternate_status() ->
+ Remote = fun(Req) ->
+ "/alternate_status" = Req:get(path),
+ {ok, {201, [], "ok"}}
+ end,
+ Local = fun({ok, "201", _, "ok"}) -> true; (_) -> false end,
+ Req = #req{path="alternate_status"},
+ check_request("Alternate status", Req, Remote, Local).
+
+test_trailing_slash() ->
+ Remote = fun(Req) ->
+ "/trailing_slash/" = Req:get(path),
+ {ok, {200, [], "ok"}}
+ end,
+ Local = fun({ok, "200", _, "ok"}) -> true; (_) -> false end,
+ Req = #req{path="trailing_slash/"},
+ check_request("Trailing slash", Req, Remote, Local).
+
+test_passes_header() ->
+ Remote = fun(Req) ->
+ "/passes_header" = Req:get(path),
+ "plankton" = Req:get_header_value("X-CouchDB-Ralph"),
+ {ok, {200, [], "ok"}}
+ end,
+ Local = fun({ok, "200", _, "ok"}) -> true; (_) -> false end,
+ Req = #req{
+ path="passes_header",
+ headers=[{"X-CouchDB-Ralph", "plankton"}]
+ },
+ check_request("Passes header", Req, Remote, Local).
+
+test_passes_host_header() ->
+ Remote = fun(Req) ->
+ "/passes_host_header" = Req:get(path),
+ "www.google.com" = Req:get_header_value("Host"),
+ {ok, {200, [], "ok"}}
+ end,
+ Local = fun({ok, "200", _, "ok"}) -> true; (_) -> false end,
+ Req = #req{
+ path="passes_host_header",
+ headers=[{"Host", "www.google.com"}]
+ },
+ check_request("Passes host header", Req, Remote, Local).
+
+test_passes_header_back() ->
+ Remote = fun(Req) ->
+ "/passes_header_back" = Req:get(path),
+ {ok, {200, [{"X-CouchDB-Plankton", "ralph"}], "ok"}}
+ end,
+ Local = fun
+ ({ok, "200", Headers, "ok"}) ->
+ lists:member({"X-CouchDB-Plankton", "ralph"}, Headers);
+ (_) ->
+ false
+ end,
+ Req = #req{path="passes_header_back"},
+ check_request("Passes header back", Req, Remote, Local).
+
+test_rewrites_location_headers() ->
+ etap:diag("Testing location header rewrites."),
+ do_rewrite_tests([
+ {"Location", proxy() ++ "foo/bar", server() ++ "foo/bar"},
+ {"Content-Location", proxy() ++ "bing?q=2", server() ++ "bing?q=2"},
+ {"Uri", proxy() ++ "zip#frag", server() ++ "zip#frag"},
+ {"Destination", proxy(), server()}
+ ]).
+
+test_doesnt_rewrite_external_locations() ->
+ etap:diag("Testing no rewrite of external locations."),
+ do_rewrite_tests([
+ {"Location", external() ++ "search", external() ++ "search"},
+ {"Content-Location", external() ++ "s?q=2", external() ++ "s?q=2"},
+ {"Uri", external() ++ "f#f", external() ++ "f#f"},
+ {"Destination", external() ++ "f?q=2#f", external() ++ "f?q=2#f"}
+ ]).
+
+test_rewrites_relative_location() ->
+ etap:diag("Testing relative rewrites."),
+ do_rewrite_tests([
+ {"Location", "/foo", server() ++ "foo"},
+ {"Content-Location", "bar", server() ++ "bar"},
+ {"Uri", "/zing?q=3", server() ++ "zing?q=3"},
+ {"Destination", "bing?q=stuff#yay", server() ++ "bing?q=stuff#yay"}
+ ]).
+
+do_rewrite_tests(Tests) ->
+ lists:foreach(fun({Header, Location, Url}) ->
+ do_rewrite_test(Header, Location, Url)
+ end, Tests).
+
+do_rewrite_test(Header, Location, Url) ->
+ Remote = fun(Req) ->
+ "/rewrite_test" = Req:get(path),
+ {ok, {302, [{Header, Location}], "ok"}}
+ end,
+ Local = fun
+ ({ok, "302", Headers, "ok"}) ->
+ etap:is(
+ couch_util:get_value(Header, Headers),
+ Url,
+ "Header rewritten correctly."
+ ),
+ true;
+ (_) ->
+ false
+ end,
+ Req = #req{path="rewrite_test"},
+ Label = "Rewrite test for ",
+ check_request(Label ++ Header, Req, Remote, Local).
+
+test_uses_same_version() ->
+ Remote = fun(Req) ->
+ "/uses_same_version" = Req:get(path),
+ {1, 0} = Req:get(version),
+ {ok, {200, [], "ok"}}
+ end,
+ Local = fun({ok, "200", _, "ok"}) -> true; (_) -> false end,
+ Req = #req{
+ path="uses_same_version",
+ opts=[{http_vsn, {1, 0}}]
+ },
+ check_request("Uses same version", Req, Remote, Local).
+
+test_passes_body() ->
+ Remote = fun(Req) ->
+ 'PUT' = Req:get(method),
+ "/passes_body" = Req:get(path),
+ <<"Hooray!">> = Req:recv_body(),
+ {ok, {201, [], "ok"}}
+ end,
+ Local = fun({ok, "201", _, "ok"}) -> true; (_) -> false end,
+ Req = #req{
+ method=put,
+ path="passes_body",
+ body="Hooray!"
+ },
+ check_request("Passes body", Req, Remote, Local).
+
+test_passes_eof_body_back() ->
+ BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
+ Remote = fun(Req) ->
+ 'GET' = Req:get(method),
+ "/passes_eof_body" = Req:get(path),
+ {raw, {200, [{"Connection", "close"}], BodyChunks}}
+ end,
+ Local = fun({ok, "200", _, "foobarbazinga"}) -> true; (_) -> false end,
+ Req = #req{path="passes_eof_body"},
+ check_request("Passes eof body", Req, Remote, Local).
+
+test_passes_chunked_body() ->
+ BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
+ Remote = fun(Req) ->
+ 'POST' = Req:get(method),
+ "/passes_chunked_body" = Req:get(path),
+ RecvBody = fun
+ ({Length, Chunk}, [Chunk | Rest]) ->
+ Length = size(Chunk),
+ Rest;
+ ({0, []}, []) ->
+ ok
+ end,
+ ok = Req:stream_body(1024*1024, RecvBody, BodyChunks),
+ {ok, {201, [], "ok"}}
+ end,
+ Local = fun({ok, "201", _, "ok"}) -> true; (_) -> false end,
+ Req = #req{
+ method=post,
+ path="passes_chunked_body",
+ headers=[{"Transfer-Encoding", "chunked"}],
+ body=mk_chunked_body(BodyChunks)
+ },
+ check_request("Passes chunked body", Req, Remote, Local).
+
+test_passes_chunked_body_back() ->
+ Name = "Passes chunked body back",
+ Remote = fun(Req) ->
+ 'GET' = Req:get(method),
+ "/passes_chunked_body_back" = Req:get(path),
+ BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
+ {chunked, {200, [{"Transfer-Encoding", "chunked"}], BodyChunks}}
+ end,
+ Req = #req{
+ path="passes_chunked_body_back",
+ opts=[{stream_to, self()}]
+ },
+
+ Resp = check_request(Name, Req, Remote, no_local),
+
+ etap:fun_is(
+ fun({ibrowse_req_id, _}) -> true; (_) -> false end,
+ Resp,
+ "Received an ibrowse request id."
+ ),
+ {_, ReqId} = Resp,
+
+ % Grab headers from response
+ receive
+ {ibrowse_async_headers, ReqId, "200", Headers} ->
+ etap:is(
+ proplists:get_value("Transfer-Encoding", Headers),
+ "chunked",
+ "Response included the Transfer-Encoding: chunked header"
+ ),
+ ibrowse:stream_next(ReqId)
+ after 1000 ->
+ throw({error, timeout})
+ end,
+
+ % Check body received
+ % TODO: When we upgrade to ibrowse >= 2.0.0 this check needs to
+ % check that the chunks returned are what we sent from the
+ % Remote test.
+ etap:diag("TODO: UPGRADE IBROWSE"),
+ etap:is(recv_body(ReqId, []), <<"foobarbazinga">>, "Decoded chunked body."),
+
+ % Check test_web server.
+ etap:is(test_web:check_last(), was_ok, Name ++ " - request handled").
+
+test_connect_error() ->
+ Local = fun({ok, "500", _Headers, _Body}) -> true; (_) -> false end,
+ Url = lists:concat([
+ "http://127.0.0.1:",
+ mochiweb_socket_server:get(couch_httpd, port),
+ "/_error"
+ ]),
+ Req = #req{opts=[{url, Url}]},
+ check_request("Connect error", Req, no_remote, Local).
+
+
+mk_chunked_body(Chunks) ->
+ mk_chunked_body(Chunks, []).
+
+mk_chunked_body([], Acc) ->
+ iolist_to_binary(lists:reverse(Acc, "0\r\n\r\n"));
+mk_chunked_body([Chunk | Rest], Acc) ->
+ Size = to_hex(size(Chunk)),
+ mk_chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]).
+
+to_hex(Val) ->
+ to_hex(Val, []).
+
+to_hex(0, Acc) ->
+ Acc;
+to_hex(Val, Acc) ->
+ to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
+
+hex_char(V) when V < 10 -> $0 + V;
+hex_char(V) -> $A + V - 10.
+
+recv_body(ReqId, Acc) ->
+ receive
+ {ibrowse_async_response, ReqId, Data} ->
+ recv_body(ReqId, [Data | Acc]);
+ {ibrowse_async_response_end, ReqId} ->
+ iolist_to_binary(lists:reverse(Acc));
+ Else ->
+ throw({error, unexpected_mesg, Else})
+ after 5000 ->
+ throw({error, timeout})
+ end.
diff --git a/1.1.x/test/etap/200-view-group-no-db-leaks.t b/1.1.x/test/etap/200-view-group-no-db-leaks.t
new file mode 100755
index 00000000..9c77f1a8
--- /dev/null
+++ b/1.1.x/test/etap/200-view-group-no-db-leaks.t
@@ -0,0 +1,262 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(user_ctx, {
+ name = null,
+ roles = [],
+ handler
+}).
+
+-define(LATEST_DISK_VERSION, 5).
+
+-record(db_header,
+ {disk_version = ?LATEST_DISK_VERSION,
+ update_seq = 0,
+ unused = 0,
+ fulldocinfo_by_id_btree_state = nil,
+ docinfo_by_seq_btree_state = nil,
+ local_docs_btree_state = nil,
+ purge_seq = 0,
+ purged_docs = nil,
+ security_ptr = nil,
+ revs_limit = 1000
+}).
+
+-record(db, {
+ main_pid = nil,
+ update_pid = nil,
+ compactor_pid = nil,
+ instance_start_time, % number of microsecs since jan 1 1970 as a binary string
+ fd,
+ fd_ref_counter,
+ header = #db_header{},
+ committed_update_seq,
+ fulldocinfo_by_id_btree,
+ docinfo_by_seq_btree,
+ local_docs_btree,
+ update_seq,
+ name,
+ filepath,
+ validate_doc_funs = [],
+ security = [],
+ security_ptr = nil,
+ user_ctx = #user_ctx{},
+ waiting_delayed_commit = nil,
+ revs_limit = 1000,
+ fsync_options = [],
+ is_sys_db = false
+}).
+
+test_db_name() -> <<"couch_test_view_group_db_leaks">>.
+ddoc_name() -> <<"foo">>.
+
+main(_) ->
+ test_util:init_code_path(),
+
+ etap:plan(18),
+ case (catch test()) of
+ ok ->
+ etap:end_tests();
+ Other ->
+ etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+ etap:bail(Other)
+ end,
+ ok.
+
+test() ->
+ couch_server_sup:start_link(test_util:config_files()),
+ timer:sleep(1000),
+ put(addr, couch_config:get("httpd", "bind_address", "127.0.0.1")),
+ put(port, integer_to_list(mochiweb_socket_server:get(couch_httpd, port))),
+ application:start(inets),
+
+ delete_db(),
+ create_db(),
+
+ create_docs(),
+ create_design_doc(),
+
+ ViewGroup = couch_view:get_group_server(
+ test_db_name(), <<"_design/", (ddoc_name())/binary>>),
+ etap:is(is_pid(ViewGroup), true, "got view group pid"),
+ etap:is(is_process_alive(ViewGroup), true, "view group pid is alive"),
+
+ query_view(),
+ check_db_ref_count(),
+ etap:is(is_process_alive(ViewGroup), true, "view group pid is alive"),
+
+ create_new_doc(<<"doc1000">>),
+ query_view(),
+ check_db_ref_count(),
+ etap:is(is_process_alive(ViewGroup), true, "view group pid is alive"),
+
+ Ref1 = get_db_ref_counter(),
+ compact_db(),
+ check_db_ref_count(),
+ Ref2 = get_db_ref_counter(),
+ etap:isnt(Ref1, Ref2, "DB ref counter changed"),
+ etap:is(false, is_process_alive(Ref1), "old DB ref counter is not alive"),
+ etap:is(is_process_alive(ViewGroup), true, "view group pid is alive"),
+
+ compact_view_group(),
+ check_db_ref_count(),
+ Ref3 = get_db_ref_counter(),
+ etap:is(Ref3, Ref2, "DB ref counter didn't change"),
+ etap:is(is_process_alive(ViewGroup), true, "view group pid is alive"),
+
+ create_new_doc(<<"doc1001">>),
+ query_view(),
+ check_db_ref_count(),
+ etap:is(is_process_alive(ViewGroup), true, "view group pid is alive"),
+
+ MonRef = erlang:monitor(process, ViewGroup),
+ ok = couch_server:delete(test_db_name(), []),
+ receive
+ {'DOWN', MonRef, _, _, _} ->
+ etap:diag("view group is dead after DB deletion")
+ after 5000 ->
+ etap:bail("view group did not die after DB deletion")
+ end,
+
+ ok = timer:sleep(1000),
+ delete_db(),
+ couch_server_sup:stop(),
+ ok.
+
+admin_user_ctx() ->
+ {user_ctx, #user_ctx{roles=[<<"_admin">>]}}.
+
+create_db() ->
+ {ok, Db} = couch_db:create(test_db_name(), [admin_user_ctx()]),
+ ok = couch_db:close(Db).
+
+delete_db() ->
+ couch_server:delete(test_db_name(), [admin_user_ctx()]).
+
+compact_db() ->
+ {ok, Db} = couch_db:open_int(test_db_name(), []),
+ ok = couch_db:start_compact(Db),
+ ok = couch_db:close(Db),
+ wait_db_compact_done(10).
+
+wait_db_compact_done(0) ->
+ etap:bail("DB compaction failed to finish.");
+wait_db_compact_done(N) ->
+ {ok, Db} = couch_db:open_int(test_db_name(), []),
+ ok = couch_db:close(Db),
+ case is_pid(Db#db.compactor_pid) of
+ false ->
+ ok;
+ true ->
+ ok = timer:sleep(500),
+ wait_db_compact_done(N - 1)
+ end.
+
+compact_view_group() ->
+ ok = couch_view_compactor:start_compact(test_db_name(), ddoc_name()),
+ wait_view_compact_done(10).
+
+wait_view_compact_done(0) ->
+ etap:bail("View group compaction failed to finish.");
+wait_view_compact_done(N) ->
+ {ok, {{_, Code, _}, _Headers, Body}} = http:request(
+ get,
+ {db_url() ++ "/_design/" ++ binary_to_list(ddoc_name()) ++ "/_info", []},
+ [],
+ [{sync, true}]),
+ case Code of
+ 200 -> ok;
+ _ -> etap:bail("Invalid view group info.")
+ end,
+ {Info} = couch_util:json_decode(Body),
+ {IndexInfo} = couch_util:get_value(<<"view_index">>, Info),
+ CompactRunning = couch_util:get_value(<<"compact_running">>, IndexInfo),
+ case CompactRunning of
+ false ->
+ ok;
+ true ->
+ ok = timer:sleep(500),
+ wait_view_compact_done(N - 1)
+ end.
+
+get_db_ref_counter() ->
+ {ok, #db{fd_ref_counter = Ref} = Db} = couch_db:open_int(test_db_name(), []),
+ ok = couch_db:close(Db),
+ Ref.
+
+check_db_ref_count() ->
+ {ok, #db{fd_ref_counter = Ref} = Db} = couch_db:open_int(test_db_name(), []),
+ ok = couch_db:close(Db),
+ etap:is(couch_ref_counter:count(Ref), 2,
+ "DB ref counter is only held by couch_db and couch_db_updater"),
+ ok.
+
+create_docs() ->
+ {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
+ Doc1 = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"doc1">>},
+ {<<"value">>, 1}
+ ]}),
+ Doc2 = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"doc2">>},
+ {<<"value">>, 2}
+
+ ]}),
+ Doc3 = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"doc3">>},
+ {<<"value">>, 3}
+ ]}),
+ {ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
+ couch_db:ensure_full_commit(Db),
+ couch_db:close(Db).
+
+create_design_doc() ->
+ {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
+ DDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, <<"_design/", (ddoc_name())/binary>>},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>, {[
+ {<<"bar">>, {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
+ ]}}
+ ]}}
+ ]}),
+ {ok, _} = couch_db:update_docs(Db, [DDoc]),
+ couch_db:ensure_full_commit(Db),
+ couch_db:close(Db).
+
+create_new_doc(Id) ->
+ {ok, Db} = couch_db:open(test_db_name(), [admin_user_ctx()]),
+ Doc666 = couch_doc:from_json_obj({[
+ {<<"_id">>, Id},
+ {<<"value">>, 999}
+ ]}),
+ {ok, _} = couch_db:update_docs(Db, [Doc666]),
+ couch_db:ensure_full_commit(Db),
+ couch_db:close(Db).
+
+db_url() ->
+ "http://" ++ get(addr) ++ ":" ++ get(port) ++ "/" ++
+ binary_to_list(test_db_name()).
+
+query_view() ->
+ {ok, {{_, Code, _}, _Headers, _Body}} = http:request(
+ get,
+ {db_url() ++ "/_design/" ++ binary_to_list(ddoc_name()) ++
+ "/_view/bar", []},
+ [],
+ [{sync, true}]),
+ etap:is(Code, 200, "got view response"),
+ ok.
diff --git a/1.1.x/test/etap/Makefile.am b/1.1.x/test/etap/Makefile.am
new file mode 100644
index 00000000..9ba3fcfa
--- /dev/null
+++ b/1.1.x/test/etap/Makefile.am
@@ -0,0 +1,88 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+noinst_SCRIPTS = run
+noinst_DATA = test_util.beam test_web.beam
+
+noinst_PROGRAMS = test_cfg_register
+test_cfg_register_SOURCES = test_cfg_register.c
+test_cfg_register_CFLAGS = -D_BSD_SOURCE
+
+%.beam: %.erl
+ $(ERLC) $<
+
+run: run.tpl
+ sed -e "s|%abs_top_srcdir%|@abs_top_srcdir@|g" \
+ -e "s|%abs_top_builddir%|@abs_top_builddir@|g" > \
+ $@ < $<
+ chmod +x $@
+
+CLEANFILES = run *.beam
+DISTCLEANFILES = temp.*
+
+EXTRA_DIST = \
+ run.tpl \
+ test_web.erl \
+ random_port.ini \
+ 001-load.t \
+ 002-icu-driver.t \
+ 010-file-basics.t \
+ 011-file-headers.t \
+ 020-btree-basics.t \
+ 021-btree-reductions.t \
+ 030-doc-from-json.t \
+ 031-doc-to-json.t \
+ 040-util.t \
+ 041-uuid-gen-seq.ini \
+ 041-uuid-gen-utc.ini \
+ 041-uuid-gen.t \
+ 050-stream.t \
+ 060-kt-merging.t \
+ 061-kt-missing-leaves.t \
+ 062-kt-remove-leaves.t \
+ 063-kt-get-leaves.t \
+ 064-kt-counting.t \
+ 065-kt-stemming.t \
+ 070-couch-db.t \
+ 080-config-get-set.t \
+ 081-config-override.1.ini \
+ 081-config-override.2.ini \
+ 081-config-override.t \
+ 082-config-register.t \
+ 083-config-no-files.t \
+ 090-task-status.t \
+ 100-ref-counter.t \
+ 110-replication-httpc.t \
+ 111-replication-changes-feed.t \
+ 112-replication-missing-revs.t \
+ 113-replication-attachment-comp.t \
+ 120-stats-collect.t \
+ 121-stats-aggregates.cfg \
+ 121-stats-aggregates.ini \
+ 121-stats-aggregates.t \
+ 130-attachments-md5.t \
+ 140-attachment-comp.t \
+ 150-invalid-view-seq.t \
+ 160-vhosts.t \
+ 170-os-daemons.es \
+ 170-os-daemons.t \
+ 171-os-daemons-config.es \
+ 171-os-daemons-config.t \
+ 172-os-daemon-errors.1.es \
+ 172-os-daemon-errors.2.es \
+ 172-os-daemon-errors.3.es \
+ 172-os-daemon-errors.4.es \
+ 172-os-daemon-errors.t \
+ 173-os-daemon-cfg-register.t \
+ 180-http-proxy.ini \
+ 180-http-proxy.t \
+ 200-view-group-no-db-leaks.t
diff --git a/1.1.x/test/etap/random_port.ini b/1.1.x/test/etap/random_port.ini
new file mode 100644
index 00000000..ada3c13d
--- /dev/null
+++ b/1.1.x/test/etap/random_port.ini
@@ -0,0 +1,19 @@
+; Licensed to the Apache Software Foundation (ASF) under one
+; or more contributor license agreements. See the NOTICE file
+; distributed with this work for additional information
+; regarding copyright ownership. The ASF licenses this file
+; to you under the Apache License, Version 2.0 (the
+; "License"); you may not use this file except in compliance
+; with the License. You may obtain a copy of the License at
+;
+; http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing,
+; software distributed under the License is distributed on an
+; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+; KIND, either express or implied. See the License for the
+; specific language governing permissions and limitations
+; under the License.
+
+[httpd]
+port = 0
diff --git a/1.1.x/test/etap/run.tpl b/1.1.x/test/etap/run.tpl
new file mode 100644
index 00000000..faf0f456
--- /dev/null
+++ b/1.1.x/test/etap/run.tpl
@@ -0,0 +1,27 @@
+#!/bin/sh -e
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+SRCDIR="%abs_top_srcdir%"
+BUILDIR="%abs_top_builddir%"
+
+export ERL_FLAGS="$ERL_FLAGS -pa $BUILDIR/test/etap/"
+
+if test $# -gt 0; then
+ while [ $# -gt 0 ]; do
+ $1
+ shift
+ done
+else
+ prove $SRCDIR/test/etap/*.t
+fi
diff --git a/1.1.x/test/etap/test_cfg_register.c b/1.1.x/test/etap/test_cfg_register.c
new file mode 100644
index 00000000..7161eb55
--- /dev/null
+++ b/1.1.x/test/etap/test_cfg_register.c
@@ -0,0 +1,30 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <stdio.h>
+
+int
+main(int argc, const char * argv[])
+{
+ char c = '\0';
+ size_t num = 1;
+
+ fprintf(stdout, "[\"register\", \"s1\"]\n");
+ fprintf(stdout, "[\"register\", \"s2\", \"k\"]\n");
+ fflush(stdout);
+
+ while(c != '\n' && num > 0) {
+ num = fread(&c, 1, 1, stdin);
+ }
+
+ exit(0);
+}
diff --git a/1.1.x/test/etap/test_util.erl.in b/1.1.x/test/etap/test_util.erl.in
new file mode 100644
index 00000000..460b0293
--- /dev/null
+++ b/1.1.x/test/etap/test_util.erl.in
@@ -0,0 +1,42 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_util).
+
+-export([init_code_path/0]).
+-export([source_file/1, build_file/1, config_files/0]).
+
+srcdir() ->
+ "@abs_top_srcdir@".
+
+builddir() ->
+ "@abs_top_builddir@".
+
+init_code_path() ->
+ Paths = ["etap", "couchdb", "erlang-oauth", "ibrowse", "mochiweb"],
+ lists:foreach(fun(Name) ->
+ code:add_patha(filename:join([builddir(), "src", Name]))
+ end, Paths).
+
+source_file(Name) ->
+ filename:join([srcdir(), Name]).
+
+build_file(Name) ->
+ filename:join([builddir(), Name]).
+
+config_files() ->
+ [
+ build_file("etc/couchdb/default_dev.ini"),
+ build_file("etc/couchdb/local_dev.ini"),
+ source_file("test/etap/random_port.ini")
+ ].
+
diff --git a/1.1.x/test/etap/test_web.erl b/1.1.x/test/etap/test_web.erl
new file mode 100644
index 00000000..ed78651f
--- /dev/null
+++ b/1.1.x/test/etap/test_web.erl
@@ -0,0 +1,99 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_web).
+-behaviour(gen_server).
+
+-export([start_link/0, loop/1, get_port/0, set_assert/1, check_last/0]).
+-export([init/1, terminate/2, code_change/3]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+
+-define(SERVER, test_web_server).
+-define(HANDLER, test_web_handler).
+
+start_link() ->
+ gen_server:start({local, ?HANDLER}, ?MODULE, [], []),
+ mochiweb_http:start([
+ {name, ?SERVER},
+ {loop, {?MODULE, loop}},
+ {port, 0}
+ ]).
+
+loop(Req) ->
+ %etap:diag("Handling request: ~p", [Req]),
+ case gen_server:call(?HANDLER, {check_request, Req}) of
+ {ok, RespInfo} ->
+ {ok, Req:respond(RespInfo)};
+ {raw, {Status, Headers, BodyChunks}} ->
+ Resp = Req:start_response({Status, Headers}),
+ lists:foreach(fun(C) -> Resp:send(C) end, BodyChunks),
+ erlang:put(mochiweb_request_force_close, true),
+ {ok, Resp};
+ {chunked, {Status, Headers, BodyChunks}} ->
+ Resp = Req:respond({Status, Headers, chunked}),
+ timer:sleep(500),
+ lists:foreach(fun(C) -> Resp:write_chunk(C) end, BodyChunks),
+ Resp:write_chunk([]),
+ {ok, Resp};
+ {error, Reason} ->
+ etap:diag("Error: ~p", [Reason]),
+ Body = lists:flatten(io_lib:format("Error: ~p", [Reason])),
+ {ok, Req:respond({200, [], Body})}
+ end.
+
+get_port() ->
+ mochiweb_socket_server:get(?SERVER, port).
+
+set_assert(Fun) ->
+ ok = gen_server:call(?HANDLER, {set_assert, Fun}).
+
+check_last() ->
+ gen_server:call(?HANDLER, last_status).
+
+init(_) ->
+ {ok, nil}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+handle_call({check_request, Req}, _From, State) when is_function(State, 1) ->
+ Resp2 = case (catch State(Req)) of
+ {ok, Resp} -> {reply, {ok, Resp}, was_ok};
+ {raw, Resp} -> {reply, {raw, Resp}, was_ok};
+ {chunked, Resp} -> {reply, {chunked, Resp}, was_ok};
+ Error -> {reply, {error, Error}, not_ok}
+ end,
+ Req:cleanup(),
+ Resp2;
+handle_call({check_request, _Req}, _From, _State) ->
+ {reply, {error, no_assert_function}, not_ok};
+handle_call(last_status, _From, State) when is_atom(State) ->
+ {reply, State, nil};
+handle_call(last_status, _From, State) ->
+ {reply, {error, not_checked}, State};
+handle_call({set_assert, Fun}, _From, nil) ->
+ {reply, ok, Fun};
+handle_call({set_assert, _}, _From, State) ->
+ {reply, {error, assert_function_set}, State};
+handle_call(Msg, _From, State) ->
+ {reply, {ignored, Msg}, State}.
+
+handle_cast(Msg, State) ->
+ etap:diag("Ignoring cast message: ~p", [Msg]),
+ {noreply, State}.
+
+handle_info(Msg, State) ->
+ etap:diag("Ignoring info message: ~p", [Msg]),
+ {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/1.1.x/test/javascript/Makefile.am b/1.1.x/test/javascript/Makefile.am
new file mode 100644
index 00000000..71f9ae62
--- /dev/null
+++ b/1.1.x/test/javascript/Makefile.am
@@ -0,0 +1,25 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+EXTRA_DIST = \
+ cli_runner.js \
+ couch_http.js \
+ run.tpl
+
+noinst_SCRIPTS = run
+CLEANFILES = run
+
+run: run.tpl
+ sed -e "s|%abs_top_srcdir%|$(abs_top_srcdir)|" \
+ -e "s|%abs_top_builddir%|$(abs_top_builddir)|" \
+ < $< > $@
+ chmod +x $@
diff --git a/1.1.x/test/javascript/cli_runner.js b/1.1.x/test/javascript/cli_runner.js
new file mode 100644
index 00000000..cdbe2e73
--- /dev/null
+++ b/1.1.x/test/javascript/cli_runner.js
@@ -0,0 +1,52 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var console = {
+ log: function(arg) {
+ var msg = (arg.toString()).replace(/\n/g, "\n ");
+ print("# " + msg);
+ }
+};
+
+function T(arg1, arg2) {
+ if(!arg1) {
+ throw((arg2 ? arg2 : arg1).toString());
+ }
+}
+
+function runTestConsole(num, name, func) {
+ try {
+ func();
+ print("ok " + num + " " + name);
+ } catch(e) {
+ msg = e.toString();
+ msg = msg.replace(/\n/g, "\n ");
+ print("not ok " + num + " " + name + " " + msg);
+ }
+}
+
+function runAllTestsConsole() {
+ var numTests = 0;
+ for(var t in couchTests) { numTests += 1; }
+ print("1.." + numTests);
+ var testId = 0;
+ for(var t in couchTests) {
+ testId += 1;
+ runTestConsole(testId, t, couchTests[t]);
+ }
+};
+
+try {
+ runAllTestsConsole();
+} catch (e) {
+ p("# " + e.toString());
+}
diff --git a/1.1.x/test/javascript/couch_http.js b/1.1.x/test/javascript/couch_http.js
new file mode 100644
index 00000000..5f4716d2
--- /dev/null
+++ b/1.1.x/test/javascript/couch_http.js
@@ -0,0 +1,62 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+(function() {
+ CouchHTTP.prototype.base_url = "http://127.0.0.1:5984"
+
+ if(typeof(CouchHTTP) != "undefined") {
+ CouchHTTP.prototype.open = function(method, url, async) {
+ if(!/^\s*http:\/\//.test(url)) {
+ if(/^[^\/]/.test(url)) {
+ url = this.base_url + "/" + url;
+ } else {
+ url = this.base_url + url;
+ }
+ }
+
+ return this._open(method, url, async);
+ };
+
+ CouchHTTP.prototype.setRequestHeader = function(name, value) {
+ // Drop content-length headers because cURL will set it for us
+ // based on body length
+ if(name.toLowerCase().replace(/^\s+|\s+$/g, '') != "content-length") {
+ this._setRequestHeader(name, value);
+ }
+ }
+
+ CouchHTTP.prototype.send = function(body) {
+ this._send(body || "");
+ var headers = {};
+ this._headers.forEach(function(hdr) {
+ var pair = hdr.split(":");
+ var name = pair.shift();
+ headers[name] = pair.join(":").replace(/^\s+|\s+$/g, "");
+ });
+ this.headers = headers;
+ };
+
+ CouchHTTP.prototype.getResponseHeader = function(name) {
+ for(var hdr in this.headers) {
+ if(hdr.toLowerCase() == name.toLowerCase()) {
+ return this.headers[hdr];
+ }
+ }
+ return null;
+ };
+ }
+})();
+
+CouchDB.urlPrefix = "";
+CouchDB.newXhr = function() {
+ return new CouchHTTP();
+};
diff --git a/1.1.x/test/javascript/run.tpl b/1.1.x/test/javascript/run.tpl
new file mode 100644
index 00000000..c5abe6e7
--- /dev/null
+++ b/1.1.x/test/javascript/run.tpl
@@ -0,0 +1,30 @@
+#!/bin/sh -e
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+SRC_DIR=%abs_top_srcdir%
+SCRIPT_DIR=$SRC_DIR/share/www/script
+JS_TEST_DIR=$SRC_DIR/test/javascript
+
+COUCHJS=%abs_top_builddir%/src/couchdb/priv/couchjs
+
+cat $SCRIPT_DIR/json2.js \
+ $SCRIPT_DIR/sha1.js \
+ $SCRIPT_DIR/oauth.js \
+ $SCRIPT_DIR/couch.js \
+ $SCRIPT_DIR/couch_test_runner.js \
+ $SCRIPT_DIR/couch_tests.js \
+ $SCRIPT_DIR/test/*.js \
+ $JS_TEST_DIR/couch_http.js \
+ $JS_TEST_DIR/cli_runner.js \
+ | $COUCHJS -
diff --git a/1.1.x/test/view_server/Makefile.am b/1.1.x/test/view_server/Makefile.am
new file mode 100644
index 00000000..11e7feb4
--- /dev/null
+++ b/1.1.x/test/view_server/Makefile.am
@@ -0,0 +1,15 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+EXTRA_DIST = \
+ query_server_spec.rb \
+ run_native_process.es
diff --git a/1.1.x/test/view_server/query_server_spec.rb b/1.1.x/test/view_server/query_server_spec.rb
new file mode 100644
index 00000000..de1df5c1
--- /dev/null
+++ b/1.1.x/test/view_server/query_server_spec.rb
@@ -0,0 +1,824 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# to run (requires ruby and rspec):
+# spec test/view_server/query_server_spec.rb -f specdoc --color
+#
+# environment options:
+# QS_TRACE=true
+# shows full output from the query server
+# QS_LANG=lang
+# run tests on the query server (for now, one of: js, erlang)
+#
+
+COUCH_ROOT = "#{File.dirname(__FILE__)}/../.." unless defined?(COUCH_ROOT)
+LANGUAGE = ENV["QS_LANG"] || "js"
+
+puts "Running query server specs for #{LANGUAGE} query server"
+
+require 'spec'
+require 'json'
+
+class OSProcessRunner
+ def self.run
+ trace = ENV["QS_TRACE"] || false
+ puts "launching #{run_command}" if trace
+ if block_given?
+ IO.popen(run_command, "r+") do |io|
+ qs = QueryServerRunner.new(io, trace)
+ yield qs
+ end
+ else
+ io = IO.popen(run_command, "r+")
+ QueryServerRunner.new(io, trace)
+ end
+ end
+ def initialize io, trace = false
+ @qsio = io
+ @trace = trace
+ end
+ def close
+ @qsio.close
+ end
+ def reset!
+ run(["reset"])
+ end
+ def add_fun(fun)
+ run(["add_fun", fun])
+ end
+ def teach_ddoc(ddoc)
+ run(["ddoc", "new", ddoc_id(ddoc), ddoc])
+ end
+ def ddoc_run(ddoc, fun_path, args)
+ run(["ddoc", ddoc_id(ddoc), fun_path, args])
+ end
+ def ddoc_id(ddoc)
+ d_id = ddoc["_id"]
+ raise 'ddoc must have _id' unless d_id
+ d_id
+ end
+ def get_chunks
+ resp = jsgets
+ raise "not a chunk" unless resp.first == "chunks"
+ return resp[1]
+ end
+ def run json
+ rrun json
+ jsgets
+ end
+ def rrun json
+ line = json.to_json
+ puts "run: #{line}" if @trace
+ @qsio.puts line
+ end
+ def rgets
+ resp = @qsio.gets
+ puts "got: #{resp}" if @trace
+ resp
+ end
+ def jsgets
+ resp = rgets
+ # err = @qserr.gets
+ # puts "err: #{err}" if err
+ if resp
+ begin
+ rj = JSON.parse("[#{resp.chomp}]")[0]
+ rescue JSON::ParserError
+ puts "JSON ERROR (dump under trace mode)"
+ # puts resp.chomp
+ while resp = rgets
+ # puts resp.chomp
+ end
+ end
+ if rj.respond_to?(:[]) && rj.is_a?(Array)
+ if rj[0] == "log"
+ log = rj[1]
+ puts "log: #{log}" if @trace
+ rj = jsgets
+ end
+ end
+ rj
+ else
+ raise "no response"
+ end
+ end
+end
+
+class QueryServerRunner < OSProcessRunner
+
+ COMMANDS = {
+ "js" => "#{COUCH_ROOT}/bin/couchjs_dev #{COUCH_ROOT}/share/server/main.js",
+ "erlang" => "#{COUCH_ROOT}/test/view_server/run_native_process.es"
+ }
+
+ def self.run_command
+ COMMANDS[LANGUAGE]
+ end
+end
+
+class ExternalRunner < OSProcessRunner
+ def self.run_command
+ "#{COUCH_ROOT}/src/couchdb/couchjs #{COUCH_ROOT}/share/server/echo.js"
+ end
+end
+
+# we could organize this into a design document per language.
+# that would make testing future languages really easy.
+
+functions = {
+ "emit-twice" => {
+ "js" => %{function(doc){emit("foo",doc.a); emit("bar",doc.a)}},
+ "erlang" => <<-ERLANG
+ fun({Doc}) ->
+ A = couch_util:get_value(<<"a">>, Doc, null),
+ Emit(<<"foo">>, A),
+ Emit(<<"bar">>, A)
+ end.
+ ERLANG
+ },
+ "emit-once" => {
+ "js" => <<-JS,
+ function(doc){
+ emit("baz",doc.a)
+ }
+ JS
+ "erlang" => <<-ERLANG
+ fun({Doc}) ->
+ A = couch_util:get_value(<<"a">>, Doc, null),
+ Emit(<<"baz">>, A)
+ end.
+ ERLANG
+ },
+ "reduce-values-length" => {
+ "js" => %{function(keys, values, rereduce) { return values.length; }},
+ "erlang" => %{fun(Keys, Values, ReReduce) -> length(Values) end.}
+ },
+ "reduce-values-sum" => {
+ "js" => %{function(keys, values, rereduce) { return sum(values); }},
+ "erlang" => %{fun(Keys, Values, ReReduce) -> lists:sum(Values) end.}
+ },
+ "validate-forbidden" => {
+ "js" => <<-JS,
+ function(newDoc, oldDoc, userCtx) {
+ if(newDoc.bad)
+ throw({forbidden:"bad doc"}); "foo bar";
+ }
+ JS
+ "erlang" => <<-ERLANG
+ fun({NewDoc}, _OldDoc, _UserCtx) ->
+ case couch_util:get_value(<<"bad">>, NewDoc) of
+ undefined -> 1;
+ _ -> {[{forbidden, <<"bad doc">>}]}
+ end
+ end.
+ ERLANG
+ },
+ "show-simple" => {
+ "js" => <<-JS,
+ function(doc, req) {
+ log("ok");
+ return [doc.title, doc.body].join(' - ');
+ }
+ JS
+ "erlang" => <<-ERLANG
+ fun({Doc}, Req) ->
+ Title = couch_util:get_value(<<"title">>, Doc),
+ Body = couch_util:get_value(<<"body">>, Doc),
+ Resp = <<Title/binary, " - ", Body/binary>>,
+ {[{<<"body">>, Resp}]}
+ end.
+ ERLANG
+ },
+ "show-headers" => {
+ "js" => <<-JS,
+ function(doc, req) {
+ var resp = {"code":200, "headers":{"X-Plankton":"Rusty"}};
+ resp.body = [doc.title, doc.body].join(' - ');
+ return resp;
+ }
+ JS
+ "erlang" => <<-ERLANG
+ fun({Doc}, Req) ->
+ Title = couch_util:get_value(<<"title">>, Doc),
+ Body = couch_util:get_value(<<"body">>, Doc),
+ Resp = <<Title/binary, " - ", Body/binary>>,
+ {[
+ {<<"code">>, 200},
+ {<<"headers">>, {[{<<"X-Plankton">>, <<"Rusty">>}]}},
+ {<<"body">>, Resp}
+ ]}
+ end.
+ ERLANG
+ },
+ "show-sends" => {
+ "js" => <<-JS,
+ function(head, req) {
+ start({headers:{"Content-Type" : "text/plain"}});
+ send("first chunk");
+ send('second "chunk"');
+ return "tail";
+ };
+ JS
+ "erlang" => <<-ERLANG
+ fun(Head, Req) ->
+ Resp = {[
+ {<<"headers">>, {[{<<"Content-Type">>, <<"text/plain">>}]}}
+ ]},
+ Start(Resp),
+ Send(<<"first chunk">>),
+ Send(<<"second \\\"chunk\\\"">>),
+ <<"tail">>
+ end.
+ ERLANG
+ },
+ "show-while-get-rows" => {
+ "js" => <<-JS,
+ function(head, req) {
+ send("first chunk");
+ send(req.q);
+ var row;
+ log("about to getRow " + typeof(getRow));
+ while(row = getRow()) {
+ send(row.key);
+ };
+ return "tail";
+ };
+ JS
+ "erlang" => <<-ERLANG,
+ fun(Head, {Req}) ->
+ Send(<<"first chunk">>),
+ Send(couch_util:get_value(<<"q">>, Req)),
+ Fun = fun({Row}, _) ->
+ Send(couch_util:get_value(<<"key">>, Row)),
+ {ok, nil}
+ end,
+ {ok, _} = FoldRows(Fun, nil),
+ <<"tail">>
+ end.
+ ERLANG
+ },
+ "show-while-get-rows-multi-send" => {
+ "js" => <<-JS,
+ function(head, req) {
+ send("bacon");
+ var row;
+ log("about to getRow " + typeof(getRow));
+ while(row = getRow()) {
+ send(row.key);
+ send("eggs");
+ };
+ return "tail";
+ };
+ JS
+ "erlang" => <<-ERLANG,
+ fun(Head, Req) ->
+ Send(<<"bacon">>),
+ Fun = fun({Row}, _) ->
+ Send(couch_util:get_value(<<"key">>, Row)),
+ Send(<<"eggs">>),
+ {ok, nil}
+ end,
+ FoldRows(Fun, nil),
+ <<"tail">>
+ end.
+ ERLANG
+ },
+ "list-simple" => {
+ "js" => <<-JS,
+ function(head, req) {
+ send("first chunk");
+ send(req.q);
+ var row;
+ while(row = getRow()) {
+ send(row.key);
+ };
+ return "early";
+ };
+ JS
+ "erlang" => <<-ERLANG,
+ fun(Head, {Req}) ->
+ Send(<<"first chunk">>),
+ Send(couch_util:get_value(<<"q">>, Req)),
+ Fun = fun({Row}, _) ->
+ Send(couch_util:get_value(<<"key">>, Row)),
+ {ok, nil}
+ end,
+ FoldRows(Fun, nil),
+ <<"early">>
+ end.
+ ERLANG
+ },
+ "list-chunky" => {
+ "js" => <<-JS,
+ function(head, req) {
+ send("first chunk");
+ send(req.q);
+ var row, i=0;
+ while(row = getRow()) {
+ send(row.key);
+ i += 1;
+ if (i > 2) {
+ return('early tail');
+ }
+ };
+ };
+ JS
+ "erlang" => <<-ERLANG,
+ fun(Head, {Req}) ->
+ Send(<<"first chunk">>),
+ Send(couch_util:get_value(<<"q">>, Req)),
+ Fun = fun
+ ({Row}, Count) when Count < 2 ->
+ Send(couch_util:get_value(<<"key">>, Row)),
+ {ok, Count+1};
+ ({Row}, Count) when Count == 2 ->
+ Send(couch_util:get_value(<<"key">>, Row)),
+ {stop, <<"early tail">>}
+ end,
+ {ok, Tail} = FoldRows(Fun, 0),
+ Tail
+ end.
+ ERLANG
+ },
+ "list-old-style" => {
+ "js" => <<-JS,
+ function(head, req, foo, bar) {
+ return "stuff";
+ }
+ JS
+ "erlang" => <<-ERLANG,
+ fun(Head, Req, Foo, Bar) ->
+ <<"stuff">>
+ end.
+ ERLANG
+ },
+ "list-capped" => {
+ "js" => <<-JS,
+ function(head, req) {
+ send("bacon")
+ var row, i = 0;
+ while(row = getRow()) {
+ send(row.key);
+ i += 1;
+ if (i > 2) {
+ return('early');
+ }
+ };
+ }
+ JS
+ "erlang" => <<-ERLANG,
+ fun(Head, Req) ->
+ Send(<<"bacon">>),
+ Fun = fun
+ ({Row}, Count) when Count < 2 ->
+ Send(couch_util:get_value(<<"key">>, Row)),
+ {ok, Count+1};
+ ({Row}, Count) when Count == 2 ->
+ Send(couch_util:get_value(<<"key">>, Row)),
+ {stop, <<"early">>}
+ end,
+ {ok, Tail} = FoldRows(Fun, 0),
+ Tail
+ end.
+ ERLANG
+ },
+ "list-raw" => {
+ "js" => <<-JS,
+ function(head, req) {
+ // log(this.toSource());
+ // log(typeof send);
+ send("first chunk");
+ send(req.q);
+ var row;
+ while(row = getRow()) {
+ send(row.key);
+ };
+ return "tail";
+ };
+ JS
+ "erlang" => <<-ERLANG,
+ fun(Head, {Req}) ->
+ Send(<<"first chunk">>),
+ Send(couch_util:get_value(<<"q">>, Req)),
+ Fun = fun({Row}, _) ->
+ Send(couch_util:get_value(<<"key">>, Row)),
+ {ok, nil}
+ end,
+ FoldRows(Fun, nil),
+ <<"tail">>
+ end.
+ ERLANG
+ },
+ "filter-basic" => {
+ "js" => <<-JS,
+ function(doc, req) {
+ if (doc.good) {
+ return true;
+ }
+ }
+ JS
+ "erlang" => <<-ERLANG,
+ fun({Doc}, Req) ->
+ couch_util:get_value(<<"good">>, Doc)
+ end.
+ ERLANG
+ },
+ "update-basic" => {
+ "js" => <<-JS,
+ function(doc, req) {
+ doc.world = "hello";
+ var resp = [doc, "hello doc"];
+ return resp;
+ }
+ JS
+ "erlang" => <<-ERLANG,
+ fun({Doc}, Req) ->
+ Doc2 = [{<<"world">>, <<"hello">>}|Doc],
+ [{Doc2}, {[{<<"body">>, <<"hello doc">>}]}]
+ end.
+ ERLANG
+ },
+ "error" => {
+ "js" => <<-JS,
+ function() {
+ throw(["error","error_key","testing"]);
+ }
+ JS
+ "erlang" => <<-ERLANG
+ fun(A, B) ->
+ throw([<<"error">>,<<"error_key">>,<<"testing">>])
+ end.
+ ERLANG
+ },
+ "fatal" => {
+ "js" => <<-JS,
+ function() {
+ throw(["fatal","error_key","testing"]);
+ }
+ JS
+ "erlang" => <<-ERLANG
+ fun(A, B) ->
+ throw([<<"fatal">>,<<"error_key">>,<<"testing">>])
+ end.
+ ERLANG
+ }
+}
+
+def make_ddoc(fun_path, fun_str)
+ doc = {"_id"=>"foo"}
+ d = doc
+ while p = fun_path.shift
+ l = p
+ if !fun_path.empty?
+ d[p] = {}
+ d = d[p]
+ end
+ end
+ d[l] = fun_str
+ doc
+end
+
+describe "query server normal case" do
+ before(:all) do
+ `cd #{COUCH_ROOT} && make`
+ @qs = QueryServerRunner.run
+ end
+ after(:all) do
+ @qs.close
+ end
+ it "should reset" do
+ @qs.run(["reset"]).should == true
+ end
+ it "should not erase ddocs on reset" do
+ @fun = functions["show-simple"][LANGUAGE]
+ @ddoc = make_ddoc(["shows","simple"], @fun)
+ @qs.teach_ddoc(@ddoc)
+ @qs.run(["reset"]).should == true
+ @qs.ddoc_run(@ddoc,
+ ["shows","simple"],
+ [{:title => "Best ever", :body => "Doc body"}, {}]).should ==
+ ["resp", {"body" => "Best ever - Doc body"}]
+ end
+
+ it "should run map funs" do
+ @qs.reset!
+ @qs.run(["add_fun", functions["emit-twice"][LANGUAGE]]).should == true
+ @qs.run(["add_fun", functions["emit-once"][LANGUAGE]]).should == true
+ rows = @qs.run(["map_doc", {:a => "b"}])
+ rows[0][0].should == ["foo", "b"]
+ rows[0][1].should == ["bar", "b"]
+ rows[1][0].should == ["baz", "b"]
+ end
+ describe "reduce" do
+ before(:all) do
+ @fun = functions["reduce-values-length"][LANGUAGE]
+ @qs.reset!
+ end
+ it "should reduce" do
+ kvs = (0...10).collect{|i|[i,i*2]}
+ @qs.run(["reduce", [@fun], kvs]).should == [true, [10]]
+ end
+ end
+ describe "rereduce" do
+ before(:all) do
+ @fun = functions["reduce-values-sum"][LANGUAGE]
+ @qs.reset!
+ end
+ it "should rereduce" do
+ vs = (0...10).collect{|i|i}
+ @qs.run(["rereduce", [@fun], vs]).should == [true, [45]]
+ end
+ end
+
+ describe "design docs" do
+ before(:all) do
+ @ddoc = {
+ "_id" => "foo"
+ }
+ @qs.reset!
+ end
+ it "should learn design docs" do
+ @qs.teach_ddoc(@ddoc).should == true
+ end
+ end
+
+ # it "should validate"
+ describe "validation" do
+ before(:all) do
+ @fun = functions["validate-forbidden"][LANGUAGE]
+ @ddoc = make_ddoc(["validate_doc_update"], @fun)
+ @qs.teach_ddoc(@ddoc)
+ end
+ it "should allow good updates" do
+ @qs.ddoc_run(@ddoc,
+ ["validate_doc_update"],
+ [{"good" => true}, {}, {}]).should == 1
+ end
+ it "should reject invalid updates" do
+ @qs.ddoc_run(@ddoc,
+ ["validate_doc_update"],
+ [{"bad" => true}, {}, {}]).should == {"forbidden"=>"bad doc"}
+ end
+ end
+
+ describe "show" do
+ before(:all) do
+ @fun = functions["show-simple"][LANGUAGE]
+ @ddoc = make_ddoc(["shows","simple"], @fun)
+ @qs.teach_ddoc(@ddoc)
+ end
+ it "should show" do
+ @qs.ddoc_run(@ddoc,
+ ["shows","simple"],
+ [{:title => "Best ever", :body => "Doc body"}, {}]).should ==
+ ["resp", {"body" => "Best ever - Doc body"}]
+ end
+ end
+
+ describe "show with headers" do
+ before(:all) do
+ # TODO we can make real ddocs up there.
+ @fun = functions["show-headers"][LANGUAGE]
+ @ddoc = make_ddoc(["shows","headers"], @fun)
+ @qs.teach_ddoc(@ddoc)
+ end
+ it "should show headers" do
+ @qs.ddoc_run(
+ @ddoc,
+ ["shows","headers"],
+ [{:title => "Best ever", :body => "Doc body"}, {}]
+ ).
+ should == ["resp", {"code"=>200,"headers" => {"X-Plankton"=>"Rusty"}, "body" => "Best ever - Doc body"}]
+ end
+ end
+
+ describe "recoverable error" do
+ before(:all) do
+ @fun = functions["error"][LANGUAGE]
+ @ddoc = make_ddoc(["shows","error"], @fun)
+ @qs.teach_ddoc(@ddoc)
+ end
+ it "should not exit" do
+ @qs.ddoc_run(@ddoc, ["shows","error"],
+ [{"foo"=>"bar"}, {"q" => "ok"}]).
+ should == ["error", "error_key", "testing"]
+ # still running
+ @qs.run(["reset"]).should == true
+ end
+ end
+
+ describe "changes filter" do
+ before(:all) do
+ @fun = functions["filter-basic"][LANGUAGE]
+ @ddoc = make_ddoc(["filters","basic"], @fun)
+ @qs.teach_ddoc(@ddoc)
+ end
+ it "should only return true for good docs" do
+ @qs.ddoc_run(@ddoc,
+ ["filters","basic"],
+ [[{"key"=>"bam", "good" => true}, {"foo" => "bar"}, {"good" => true}], {"req" => "foo"}]
+ ).
+ should == [true, [true, false, true]]
+ end
+ end
+
+ describe "update" do
+ before(:all) do
+ # in another patch we can remove this duplication
+ # by setting up the design doc for each language ahead of time.
+ @fun = functions["update-basic"][LANGUAGE]
+ @ddoc = make_ddoc(["updates","basic"], @fun)
+ @qs.teach_ddoc(@ddoc)
+ end
+ it "should return a doc and a resp body" do
+ up, doc, resp = @qs.ddoc_run(@ddoc,
+ ["updates","basic"],
+ [{"foo" => "gnarly"}, {"method" => "POST"}]
+ )
+ up.should == "up"
+ doc.should == {"foo" => "gnarly", "world" => "hello"}
+ resp["body"].should == "hello doc"
+ end
+ end
+
+# end
+# LIST TESTS
+# __END__
+
+ describe "ddoc list" do
+ before(:all) do
+ @ddoc = {
+ "_id" => "foo",
+ "lists" => {
+ "simple" => functions["list-simple"][LANGUAGE],
+ "headers" => functions["show-sends"][LANGUAGE],
+ "rows" => functions["show-while-get-rows"][LANGUAGE],
+ "buffer-chunks" => functions["show-while-get-rows-multi-send"][LANGUAGE],
+ "chunky" => functions["list-chunky"][LANGUAGE]
+ }
+ }
+ @qs.teach_ddoc(@ddoc)
+ end
+
+ describe "example list" do
+ it "should run normal" do
+ @qs.ddoc_run(@ddoc,
+ ["lists","simple"],
+ [{"foo"=>"bar"}, {"q" => "ok"}]
+ ).should == ["start", ["first chunk", "ok"], {"headers"=>{}}]
+ @qs.run(["list_row", {"key"=>"baz"}]).should == ["chunks", ["baz"]]
+ @qs.run(["list_row", {"key"=>"bam"}]).should == ["chunks", ["bam"]]
+ @qs.run(["list_row", {"key"=>"foom"}]).should == ["chunks", ["foom"]]
+ @qs.run(["list_row", {"key"=>"fooz"}]).should == ["chunks", ["fooz"]]
+ @qs.run(["list_row", {"key"=>"foox"}]).should == ["chunks", ["foox"]]
+ @qs.run(["list_end"]).should == ["end" , ["early"]]
+ end
+ end
+
+ describe "headers" do
+ it "should do headers proper" do
+ @qs.ddoc_run(@ddoc, ["lists","headers"],
+ [{"total_rows"=>1000}, {"q" => "ok"}]
+ ).should == ["start", ["first chunk", 'second "chunk"'],
+ {"headers"=>{"Content-Type"=>"text/plain"}}]
+ @qs.rrun(["list_end"])
+ @qs.jsgets.should == ["end", ["tail"]]
+ end
+ end
+
+ describe "with rows" do
+ it "should list em" do
+ @qs.ddoc_run(@ddoc, ["lists","rows"],
+ [{"foo"=>"bar"}, {"q" => "ok"}]).
+ should == ["start", ["first chunk", "ok"], {"headers"=>{}}]
+ @qs.rrun(["list_row", {"key"=>"baz"}])
+ @qs.get_chunks.should == ["baz"]
+ @qs.rrun(["list_row", {"key"=>"bam"}])
+ @qs.get_chunks.should == ["bam"]
+ @qs.rrun(["list_end"])
+ @qs.jsgets.should == ["end", ["tail"]]
+ end
+ it "should work with zero rows" do
+ @qs.ddoc_run(@ddoc, ["lists","rows"],
+ [{"foo"=>"bar"}, {"q" => "ok"}]).
+ should == ["start", ["first chunk", "ok"], {"headers"=>{}}]
+ @qs.rrun(["list_end"])
+ @qs.jsgets.should == ["end", ["tail"]]
+ end
+ end
+
+ describe "should buffer multiple chunks sent for a single row." do
+ it "should should buffer em" do
+ @qs.ddoc_run(@ddoc, ["lists","buffer-chunks"],
+ [{"foo"=>"bar"}, {"q" => "ok"}]).
+ should == ["start", ["bacon"], {"headers"=>{}}]
+ @qs.rrun(["list_row", {"key"=>"baz"}])
+ @qs.get_chunks.should == ["baz", "eggs"]
+ @qs.rrun(["list_row", {"key"=>"bam"}])
+ @qs.get_chunks.should == ["bam", "eggs"]
+ @qs.rrun(["list_end"])
+ @qs.jsgets.should == ["end", ["tail"]]
+ end
+ end
+ it "should end after 2" do
+ @qs.ddoc_run(@ddoc, ["lists","chunky"],
+ [{"foo"=>"bar"}, {"q" => "ok"}]).
+ should == ["start", ["first chunk", "ok"], {"headers"=>{}}]
+
+ @qs.run(["list_row", {"key"=>"baz"}]).
+ should == ["chunks", ["baz"]]
+
+ @qs.run(["list_row", {"key"=>"bam"}]).
+ should == ["chunks", ["bam"]]
+
+ @qs.run(["list_row", {"key"=>"foom"}]).
+ should == ["end", ["foom", "early tail"]]
+ # here's where js has to discard quit properly
+ @qs.run(["reset"]).
+ should == true
+ end
+ end
+ end
+
+
+
+def should_have_exited qs
+ begin
+ qs.run(["reset"])
+ "raise before this (except Erlang)".should == true
+ rescue RuntimeError => e
+ e.message.should == "no response"
+ rescue Errno::EPIPE
+ true.should == true
+ end
+end
+
+describe "query server that exits" do
+ before(:each) do
+ @qs = QueryServerRunner.run
+ @ddoc = {
+ "_id" => "foo",
+ "lists" => {
+ "capped" => functions["list-capped"][LANGUAGE],
+ "raw" => functions["list-raw"][LANGUAGE]
+ },
+ "shows" => {
+ "fatal" => functions["fatal"][LANGUAGE]
+ }
+ }
+ @qs.teach_ddoc(@ddoc)
+ end
+ after(:each) do
+ @qs.close
+ end
+
+ describe "only goes to 2 list" do
+ it "should exit if erlang sends too many rows" do
+ @qs.ddoc_run(@ddoc, ["lists","capped"],
+ [{"foo"=>"bar"}, {"q" => "ok"}]).
+ should == ["start", ["bacon"], {"headers"=>{}}]
+ @qs.run(["list_row", {"key"=>"baz"}]).should == ["chunks", ["baz"]]
+ @qs.run(["list_row", {"key"=>"foom"}]).should == ["chunks", ["foom"]]
+ @qs.run(["list_row", {"key"=>"fooz"}]).should == ["end", ["fooz", "early"]]
+ e = @qs.run(["list_row", {"key"=>"foox"}])
+ e[0].should == "error"
+ e[1].should == "unknown_command"
+ should_have_exited @qs
+ end
+ end
+
+ describe "raw list" do
+ it "should exit if it gets a non-row in the middle" do
+ @qs.ddoc_run(@ddoc, ["lists","raw"],
+ [{"foo"=>"bar"}, {"q" => "ok"}]).
+ should == ["start", ["first chunk", "ok"], {"headers"=>{}}]
+ e = @qs.run(["reset"])
+ e[0].should == "error"
+ e[1].should == "list_error"
+ should_have_exited @qs
+ end
+ end
+
+ describe "fatal error" do
+ it "should exit" do
+ @qs.ddoc_run(@ddoc, ["shows","fatal"],
+ [{"foo"=>"bar"}, {"q" => "ok"}]).
+ should == ["error", "error_key", "testing"]
+ should_have_exited @qs
+ end
+ end
+end
+
+describe "thank you for using the tests" do
+ it "for more info run with QS_TRACE=true or see query_server_spec.rb file header" do
+ end
+end \ No newline at end of file
diff --git a/1.1.x/test/view_server/run_native_process.es b/1.1.x/test/view_server/run_native_process.es
new file mode 100755
index 00000000..fcf16d75
--- /dev/null
+++ b/1.1.x/test/view_server/run_native_process.es
@@ -0,0 +1,59 @@
+#! /usr/bin/env escript
+
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+read() ->
+ case io:get_line('') of
+ eof -> stop;
+ Data -> couch_util:json_decode(Data)
+ end.
+
+send(Data) when is_binary(Data) ->
+ send(binary_to_list(Data));
+send(Data) when is_list(Data) ->
+ io:format(Data ++ "\n", []).
+
+write(Data) ->
+ % log("~p", [Data]),
+ case (catch couch_util:json_encode(Data)) of
+ % when testing, this is what prints your errors
+ {json_encode, Error} -> write({[{<<"error">>, Error}]});
+ Json -> send(Json)
+ end.
+
+% log(Mesg) ->
+% log(Mesg, []).
+% log(Mesg, Params) ->
+% io:format(standard_error, Mesg, Params).
+% jlog(Mesg) ->
+% write([<<"log">>, list_to_binary(io_lib:format("~p",[Mesg]))]).
+
+loop(Pid) ->
+ case read() of
+ stop -> ok;
+ Json ->
+ case (catch couch_native_process:prompt(Pid, Json)) of
+ {error, Reason} ->
+ ok = write([error, Reason, Reason]);
+ Resp ->
+ ok = write(Resp),
+ loop(Pid)
+ end
+ end.
+
+main([]) ->
+ code:add_pathz("src/couchdb"),
+ code:add_pathz("src/mochiweb"),
+ {ok, Pid} = couch_native_process:start_link(),
+ loop(Pid).
+
diff --git a/1.1.x/utils/Makefile.am b/1.1.x/utils/Makefile.am
new file mode 100644
index 00000000..379c1eb0
--- /dev/null
+++ b/1.1.x/utils/Makefile.am
@@ -0,0 +1,42 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+noinst_SCRIPTS = run
+
+CLEANFILES = $(noinst_SCRIPTS)
+
+transform = @program_transform_name@
+couchdb_command_name = `echo couchdb | sed '$(transform)'`
+
+run: ../bin/couchdb.tpl
+ sed -e "s|%ERL%|$(ERL)|g" \
+ -e "s|%ICU_CONFIG%|$(ICU_CONFIG)|g" \
+ -e "s|%bindir%|$(abs_top_builddir)/bin|g" \
+ -e "s|%defaultini%|default_dev.ini|g" \
+ -e "s|%localini%|local_dev.ini|g" \
+ -e "s|%localerlanglibdir%|foo \
+ -pa $(abs_top_builddir)\/src\/couchdb \
+ -pa $(abs_top_builddir)\/src\/erlang-oauth \
+ -pa $(abs_top_builddir)\/src\/ibrowse \
+ -pa $(abs_top_builddir)\/src\/mochiweb|g" \
+ -e "s|%localconfdir%|$(abs_top_builddir)/etc/couchdb|g" \
+ -e "s|%localstatelogdir%|$(abs_top_builddir)/tmp/log|g" \
+ -e "s|%localstatelibdir%|$(abs_top_builddir)/tmp/lib|g" \
+ -e "s|%localstatedir%|$(abs_top_builddir)/tmp|g" \
+ -e "s|%bug_uri%|@bug_uri@|g" \
+ -e "s|%package_author_address%|@package_author_address@|g" \
+ -e "s|%package_author_name%|@package_author_name@|g" \
+ -e "s|%package_name%|@package_name@|g" \
+ -e "s|%version%|@version@|g" \
+ -e "s|%couchdb_command_name%|$(couchdb_command_name)|g" > \
+ $@ < $<
+ chmod +x $@
diff --git a/1.1.x/var/Makefile.am b/1.1.x/var/Makefile.am
new file mode 100644
index 00000000..901ab2b7
--- /dev/null
+++ b/1.1.x/var/Makefile.am
@@ -0,0 +1,23 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+install-data-hook:
+ if test ! "$(mkdir_p)" = ""; then \
+ $(mkdir_p) "$(DESTDIR)$(localstatelibdir)"; \
+ $(mkdir_p) "$(DESTDIR)$(localstatelogdir)"; \
+ $(mkdir_p) "$(DESTDIR)$(localstaterundir)"; \
+ else \
+ echo "WARNING: You may have to create these directories by hand."; \
+ mkdir -p "$(DESTDIR)$(localstatelibdir)"; \
+ mkdir -p "$(DESTDIR)$(localstatelogdir)"; \
+ mkdir -p "$(DESTDIR)$(localstaterundir)"; \
+ fi