summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristopher Lenz <cmlenz@apache.org>2008-03-28 23:32:19 +0000
committerChristopher Lenz <cmlenz@apache.org>2008-03-28 23:32:19 +0000
commit544a38dd45f6a58d34296c6c768afd086eb2ac70 (patch)
treec84cc02340b06aae189cff0dbfaee698f273f1f5
parent804cbbe033b8e7a3e8d7058aaf31bdf69ef18ac5 (diff)
Imported trunk.
git-svn-id: https://svn.apache.org/repos/asf/incubator/couchdb/trunk@642432 13f79535-47bb-0310-9956-ffa450edef68
-rw-r--r--AUTHORS25
-rw-r--r--BUGS12
-rw-r--r--Makefile.am60
-rw-r--r--NEWS75
-rw-r--r--NOTICE50
-rw-r--r--README343
-rw-r--r--THANKS21
-rw-r--r--acinclude.m4.in31
-rw-r--r--authors.xml24
-rw-r--r--bin/Makefile.am79
-rw-r--r--bin/couchdb.tpl.in340
-rw-r--r--bin/couchjs.tpl.in103
-rwxr-xr-xbootstrap325
-rw-r--r--build-contrib/ac_check_icu.m4_2007-09-12.gzbin0 -> 946 bytes
-rw-r--r--build-contrib/apache_2.txt.gzbin0 -> 3952 bytes
-rw-r--r--build-contrib/config.guess_2007-09-12.gzbin0 -> 13530 bytes
-rw-r--r--build-contrib/config.sub_2007-09-12.gzbin0 -> 9685 bytes
-rw-r--r--configure.ac242
-rw-r--r--etc/Makefile.am128
-rw-r--r--etc/conf/mime.types462
-rw-r--r--etc/couch.ini.tpl.in19
-rw-r--r--etc/couch_httpd.conf.tpl.in11
-rw-r--r--etc/default/Makefile.am13
-rw-r--r--etc/default/couchdb.tpl.in10
-rw-r--r--etc/init/Makefile.am13
-rwxr-xr-xetc/init/couchdb.tpl.in174
-rw-r--r--etc/launchd/Makefile.am13
-rw-r--r--etc/launchd/org.apache.couchdb.plist.tpl.in29
-rw-r--r--etc/logrotate.d/Makefile.am13
-rw-r--r--etc/logrotate.d/couchdb.tpl.in9
-rw-r--r--share/Makefile.am56
-rw-r--r--share/server/main.js165
-rw-r--r--share/www/browse/_create_database.html33
-rw-r--r--share/www/browse/_create_document.html31
-rw-r--r--share/www/browse/_delete_database.html27
-rw-r--r--share/www/browse/_delete_document.html26
-rw-r--r--share/www/browse/_save_view_as.html35
-rw-r--r--share/www/browse/database.html151
-rw-r--r--share/www/browse/document.html88
-rw-r--r--share/www/browse/index.html65
-rw-r--r--share/www/couch_tests.html71
-rw-r--r--share/www/favicon.icobin0 -> 9326 bytes
-rw-r--r--share/www/image/add.gifbin0 -> 520 bytes
-rw-r--r--share/www/image/apply.gifbin0 -> 652 bytes
-rw-r--r--share/www/image/bg.pngbin0 -> 743 bytes
-rw-r--r--share/www/image/cancel.gifbin0 -> 659 bytes
-rw-r--r--share/www/image/delete-mini.gifbin0 -> 257 bytes
-rw-r--r--share/www/image/delete.gifbin0 -> 544 bytes
-rw-r--r--share/www/image/grippie.gifbin0 -> 75 bytes
-rw-r--r--share/www/image/hgrad.gifbin0 -> 118 bytes
-rw-r--r--share/www/image/load.gifbin0 -> 538 bytes
-rw-r--r--share/www/image/logo.pngbin0 -> 2738 bytes
-rw-r--r--share/www/image/order-asc.gifbin0 -> 195 bytes
-rw-r--r--share/www/image/order-desc.gifbin0 -> 187 bytes
-rw-r--r--share/www/image/path.gifbin0 -> 363 bytes
-rw-r--r--share/www/image/run-mini.gifbin0 -> 236 bytes
-rw-r--r--share/www/image/run.gifbin0 -> 489 bytes
-rw-r--r--share/www/image/running.gifbin0 -> 548 bytes
-rw-r--r--share/www/image/save.gifbin0 -> 537 bytes
-rw-r--r--share/www/image/spinner.gifbin0 -> 10819 bytes
-rw-r--r--share/www/image/test_failure.gifbin0 -> 114 bytes
-rw-r--r--share/www/image/test_success.gifbin0 -> 185 bytes
-rw-r--r--share/www/image/thead-key.gifbin0 -> 77 bytes
-rw-r--r--share/www/image/thead.gifbin0 -> 51 bytes
-rw-r--r--share/www/image/toggle-collapse.gifbin0 -> 176 bytes
-rw-r--r--share/www/image/toggle-expand.gifbin0 -> 181 bytes
-rw-r--r--share/www/image/twisty.gifbin0 -> 160 bytes
-rw-r--r--share/www/index.html95
-rw-r--r--share/www/replicator.html148
-rw-r--r--share/www/script/browse.js632
-rw-r--r--share/www/script/couch.js205
-rw-r--r--share/www/script/couch_tests.js1027
-rw-r--r--share/www/script/jquery.cookies.js47
-rw-r--r--share/www/script/jquery.dialog.js92
-rw-r--r--share/www/script/jquery.js3408
-rw-r--r--share/www/script/jquery.resizer.js55
-rw-r--r--share/www/script/jquery.suggest.js129
-rw-r--r--share/www/script/json2.js159
-rw-r--r--share/www/script/pprint.js60
-rw-r--r--share/www/script/shell.js700
-rw-r--r--share/www/shell.html79
-rw-r--r--share/www/style/layout.css405
-rw-r--r--src/Makefile.am15
-rw-r--r--src/couch_inets/Makefile.am177
-rw-r--r--src/couch_inets/couch_inets.app84
-rw-r--r--src/couch_inets/ftp.erl1597
-rw-r--r--src/couch_inets/ftp_internal.hrl19
-rw-r--r--src/couch_inets/ftp_progress.erl125
-rw-r--r--src/couch_inets/ftp_response.erl190
-rw-r--r--src/couch_inets/ftp_sup.erl57
-rw-r--r--src/couch_inets/http.erl396
-rw-r--r--src/couch_inets/http_base_64.erl126
-rw-r--r--src/couch_inets/http_chunk.erl289
-rw-r--r--src/couch_inets/http_cookie.erl389
-rw-r--r--src/couch_inets/http_internal.hrl105
-rw-r--r--src/couch_inets/http_request.erl278
-rw-r--r--src/couch_inets/http_response.erl206
-rw-r--r--src/couch_inets/http_transport.erl291
-rw-r--r--src/couch_inets/http_uri.erl113
-rw-r--r--src/couch_inets/http_util.erl171
-rw-r--r--src/couch_inets/httpc_handler.erl953
-rw-r--r--src/couch_inets/httpc_internal.hrl87
-rw-r--r--src/couch_inets/httpc_manager.erl475
-rw-r--r--src/couch_inets/httpc_request.erl193
-rw-r--r--src/couch_inets/httpc_response.erl320
-rw-r--r--src/couch_inets/httpc_sup.erl70
-rw-r--r--src/couch_inets/httpd.erl516
-rw-r--r--src/couch_inets/httpd.hrl78
-rw-r--r--src/couch_inets/httpd_acceptor.erl155
-rw-r--r--src/couch_inets/httpd_acceptor_sup.erl84
-rw-r--r--src/couch_inets/httpd_cgi.erl122
-rw-r--r--src/couch_inets/httpd_conf.erl720
-rw-r--r--src/couch_inets/httpd_esi.erl106
-rw-r--r--src/couch_inets/httpd_example.erl143
-rw-r--r--src/couch_inets/httpd_instance_sup.erl193
-rw-r--r--src/couch_inets/httpd_manager.erl829
-rw-r--r--src/couch_inets/httpd_misc_sup.erl89
-rw-r--r--src/couch_inets/httpd_request.erl337
-rw-r--r--src/couch_inets/httpd_request_handler.erl516
-rw-r--r--src/couch_inets/httpd_response.erl377
-rw-r--r--src/couch_inets/httpd_script_env.erl141
-rw-r--r--src/couch_inets/httpd_socket.erl62
-rw-r--r--src/couch_inets/httpd_sup.erl137
-rw-r--r--src/couch_inets/httpd_util.erl718
-rw-r--r--src/couch_inets/inets.erl34
-rw-r--r--src/couch_inets/inets_app.erl28
-rw-r--r--src/couch_inets/inets_internal.hrl27
-rw-r--r--src/couch_inets/inets_sup.erl106
-rw-r--r--src/couch_inets/mod_actions.erl92
-rw-r--r--src/couch_inets/mod_alias.erl180
-rw-r--r--src/couch_inets/mod_auth.erl784
-rw-r--r--src/couch_inets/mod_auth.hrl27
-rw-r--r--src/couch_inets/mod_auth_dets.erl228
-rw-r--r--src/couch_inets/mod_auth_mnesia.erl282
-rw-r--r--src/couch_inets/mod_auth_plain.erl295
-rw-r--r--src/couch_inets/mod_auth_server.erl374
-rw-r--r--src/couch_inets/mod_browser.erl247
-rw-r--r--src/couch_inets/mod_cgi.erl331
-rw-r--r--src/couch_inets/mod_dir.erl281
-rw-r--r--src/couch_inets/mod_disk_log.erl396
-rw-r--r--src/couch_inets/mod_esi.erl432
-rw-r--r--src/couch_inets/mod_get.erl125
-rw-r--r--src/couch_inets/mod_head.erl73
-rw-r--r--src/couch_inets/mod_htaccess.erl1075
-rw-r--r--src/couch_inets/mod_include.erl594
-rw-r--r--src/couch_inets/mod_log.erl253
-rw-r--r--src/couch_inets/mod_range.erl416
-rw-r--r--src/couch_inets/mod_responsecontrol.erl301
-rw-r--r--src/couch_inets/mod_security.erl294
-rw-r--r--src/couch_inets/mod_security_server.erl628
-rw-r--r--src/couch_inets/mod_trace.erl87
-rw-r--r--src/couch_inets/tftp.erl310
-rw-r--r--src/couch_inets/tftp.hrl47
-rw-r--r--src/couch_inets/tftp_binary.erl181
-rw-r--r--src/couch_inets/tftp_engine.erl1121
-rw-r--r--src/couch_inets/tftp_file.erl338
-rw-r--r--src/couch_inets/tftp_lib.erl418
-rw-r--r--src/couch_inets/tftp_sup.erl81
-rw-r--r--src/couchdb/Makefile.am97
-rw-r--r--src/couchdb/cjson.erl567
-rw-r--r--src/couchdb/couch.app.tpl.in29
-rw-r--r--src/couchdb/couch_btree.erl590
-rw-r--r--src/couchdb/couch_db.erl757
-rw-r--r--src/couchdb/couch_db.hrl56
-rw-r--r--src/couchdb/couch_db_update_notifier.erl66
-rw-r--r--src/couchdb/couch_doc.erl199
-rw-r--r--src/couchdb/couch_erl_driver.c160
-rw-r--r--src/couchdb/couch_event_sup.erl69
-rw-r--r--src/couchdb/couch_file.erl323
-rw-r--r--src/couchdb/couch_ft_query.erl78
-rw-r--r--src/couchdb/couch_js.c452
-rw-r--r--src/couchdb/couch_key_tree.erl139
-rw-r--r--src/couchdb/couch_log.erl130
-rw-r--r--src/couchdb/couch_query_servers.erl206
-rw-r--r--src/couchdb/couch_rep.erl308
-rw-r--r--src/couchdb/couch_server.erl215
-rw-r--r--src/couchdb/couch_server_sup.erl185
-rw-r--r--src/couchdb/couch_stream.erl252
-rw-r--r--src/couchdb/couch_util.erl316
-rw-r--r--src/couchdb/couch_view.erl616
-rw-r--r--src/couchdb/mod_couch.erl891
-rw-r--r--src/fulltext/lucene/CouchConfig.java62
-rw-r--r--src/fulltext/lucene/CouchDbDirFilter.java30
-rw-r--r--src/fulltext/lucene/LuceneIndexer.java355
-rw-r--r--src/fulltext/lucene/LuceneSearcher.java90
-rw-r--r--src/fulltext/lucene/readme.txt41
-rw-r--r--var/Makefile.am25
187 files changed, 40452 insertions, 0 deletions
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 00000000..3c9cb906
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,25 @@
+Apache CouchDB Authors
+======================
+
+Project Lead
+------------
+
+Apache CouchDB was originally developed by Damien Katz <damien_katz@yahoo.com>
+and he remains the project lead.
+
+Contributers
+------------
+
+A number of people have contributed directly to Apache CouchDB by writing
+documentation or developing software. A list of these people is included below.
+
+ * William Beh <willbeh@gmail.com>
+ * Jan Lehnardt <janlehnardt@gmail.com>
+ * Christopher Lenz <cmlenz@gmx.de>
+ * Dirk Schalge <dirk@epd-me.net>
+ * Noah Slater <nslater@bytesexual.org>
+
+See Also
+--------
+
+For a list of other credits see the `THANKS' file.
diff --git a/BUGS b/BUGS
new file mode 100644
index 00000000..5c7405e0
--- /dev/null
+++ b/BUGS
@@ -0,0 +1,12 @@
+Apache CouchDB Bugs
+===================
+
+Web
+---
+
+Report bugs via the web at <http://groups.google.com/group/couchdb>.
+
+Email
+-----
+
+Report bugs via email to <couchdb@googlegroups.com>.
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 00000000..aa07c7e7
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,60 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+datarootdir = @prefix@/share
+
+SUBDIRS = bin etc src share var
+
+pkgdoc_DATA = AUTHORS.gz BUGS.gz changelog.gz NEWS.gz README.gz THANKS.gz
+
+CLEANFILES = $(doc_DATA)
+
+DISTCLEANFILES = $(pkgdoc_DATA)
+
+EXTRA_DIST = AUTHORS BUGS ChangeLog NEWS README THANKS
+
+AUTHORS.gz: $(top_srcdir)/AUTHORS
+ -gzip -9 < $< > $@
+
+BUGS.gz: $(top_srcdir)/BUGS
+ -gzip -9 < $< > $@
+
+changelog.gz: $(top_srcdir)/ChangeLog
+ -gzip -9 < $< > $@
+
+NEWS.gz: $(top_srcdir)/NEWS
+ -gzip -9 < $< > $@
+
+README.gz: $(top_srcdir)/README
+ -gzip -9 < $< > $@
+
+THANKS.gz: $(top_srcdir)/THANKS
+ -gzip -9 < $< > $@
+
+local-clean: maintainer-clean
+ @echo "This command is intended for maintainers to use;"
+ @echo "it deletes files that may need special tools to rebuild."
+ rm -f COPYING
+ rm -f ChangeLog
+ rm -f INSTALL
+ rm -f acinclude.m4
+ rm -f aclocal.m4
+ rm -fr build-aux
+ rm -f config.h.in*
+ rm -f configure
+ rm -f configure~
+ rm -f couchdb-$(version).tar.gz
+ rm -f couchdb-$(version).zip
+ rm -fr m4
+ find . -name Makefile.in | grep -v 'src/js/fdlibm' | xargs rm
+
+.PHONY: local-clean
diff --git a/NEWS b/NEWS
new file mode 100644
index 00000000..32f7b813
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,75 @@
+Apache CouchDB News
+===================
+
+Version 0.7.3a1
+--------------
+
+ * Changed core licensing to the Apache License 2.0.
+ * Improved handling of ICU shared libraries.
+ * Apache CouchDB can automatically respawn following a server crash.
+ * Apache CouchDB no longer refuses to start with a stale PID file.
+ * System logrotate configuration provided.
+ * Miscellaneous improvements to the `couchdb' command.
+
+Version 0.7.2
+-------------
+
+ * Small changes to build process and couchdb command.
+ * CouchDB's official port is now 5984 TCP/UDP instead of 8888.
+
+Version 0.7.1
+-------------
+
+ * Small compatibility issue with Firefox 3 fixed.
+
+Version 0.7.0
+-------------
+
+ * CouchDB has been rewritten to use the GNU build system for portability.
+ * The built-in database browsing tool has been rewritten to provide a much
+ nicer interface for interacting directly with CouchDB from your web browser.
+ * XML and Fabric have been replaced with JSON and JavaScript for data
+ transport and View definitions.
+
+Version 0.6.0
+-------------
+
+ * A replication facility is now available.
+ * CouchPeek can now create, delete and view documents.
+ * Building from source is easier and less error prone.
+
+Version 0.5.0
+-------------
+
+ * A built-in CouchPeek utility.
+ * A full install kit buildable from a single command.
+ * A new GNU/Linux version is available. An OS X version is coming soon.
+
+Version 0.4.0
+-------------
+
+ * Non-existant variables are now nil lists.
+ * Couch error codes and messages are no longer sent in the HTTP fields,
+ instead they are exclusively returned in the XML body. This is to avoid HTTP
+ header parsing problems with oddly formed error messages.
+ * Returned error messages are now logged at the server at the `info' level to
+ make general debugging easier.
+ * Fixed a problem where big table builds caused timesout errors.
+ * Lots of changes in the low level machinery. Most formulas will continue to
+ function the same.
+ * Added full compiler support for extended characters in formula source.
+ * Support for Perl/Ruby like regular expressions.
+ * Added `total_rows' and `result_start' attributes to tables.
+
+Version 0.3.0
+-------------
+
+ * CouchDB now fully supports Unicode and locale specific collation via the ICU
+ library, both in the Fabric engine and computed tables.
+ * The `in' operator has been added to Fabric.
+ * The `startdoc' query string variable specifies the starting document to use if
+ there are multiple rows with identical startkeys.
+ * The `skip' query string variable specifies the number of rows to skip before
+ returning results. The `skip' value must be a positive integer. If used with
+ a `count' variable the skipped rows aren't counted as output.
+ * Various changes to the output XML format.
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 00000000..49a11755
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,50 @@
+Apache CouchDB Notice
+=====================
+
+Core Software
+-------------
+
+ * Copyright (C) 2008 Damien Katz <damien_katz@yahoo.com>
+ * Copyright (C) 2008 Jan Lehnardt <jan@php.net>
+ * Copyright (C) 2008 Noah Slater <nslater@bytesexual.org>
+
+Developed at The Apache Software Foundation <http://www.apache.org/>.
+
+Supporting Software
+-------------------
+
+m4/ac_check_icu.m4
+~~~~~~~~~~~~~~~~~~
+
+ * Copyright (C) 2005 Akos Maroy <darkeye@tyrell.hu>
+
+share/www/script/jquery.js
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ * Copyright (C) 2008 John Resig
+
+share/www/script/json2.js
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This file is in the public domain.
+
+src/couch_inets
+~~~~~~~~~~~~~~~
+
+ * Copyright (C) 1999 Ericsson Utvecklings AB
+
+src/js
+~~~~~~
+
+ * Copyright (C) 1999 Netscape Communications Corporation
+
+src/js/editline
+~~~~~~~~~~~~~~~
+
+ * Copyright (C) 1993 Simmule Turner
+ * Copyright (C) 1993 Rich Salz
+
+src/js/fdlibm
+~~~~~~~~~~~~~
+
+ * Copyright (C) 1993 by Sun Microsystems, Inc
diff --git a/README b/README
new file mode 100644
index 00000000..9cbcd95d
--- /dev/null
+++ b/README
@@ -0,0 +1,343 @@
+Apache CouchDB Readme
+=====================
+
+Documentation
+-------------
+
+Visit the wiki <http://www.couchdbwiki.com/> for more documentation.
+
+Building From Subversion
+------------------------
+
+Dependencies
+~~~~~~~~~~~~
+
+To build Apache CouchDB from checkout you need some of the following installed:
+
+ * GNU Automake (>=1.6.3) <http://www.gnu.org/software/automake/> (required)
+ * GNU Autoconf (>=2.59) <http://www.gnu.org/software/autoconf/> (required)
+ * GNU Libtool <http://www.gnu.org/software/libtool/> (required)
+ * svn2cl <http://ch.tudelft.nl/~arthur/svn2cl/> (optional)
+ * xsltproc <http://xmlsoft.org/XSLT/xsltproc2.html> (optional)
+ * help2man <http://www.gnu.org/software/help2man/> (optional)
+
+If you are running a Debian GNU/Linux system (or a derivative such as Ubuntu
+GNU/Linux) you can install these dependencies using the `apt-get' command:
+
+--------------------------------------------------------------------------------
+apt-get install automake autoconf libtool subversion-tools help2man
+--------------------------------------------------------------------------------
+
+If you are running OS X and have MacPorts <http://www.macports.org/> installed
+you can install some of these dependencies by using the `port' command:
+
+--------------------------------------------------------------------------------
+port install automake autoconf libtool help2man
+--------------------------------------------------------------------------------
+
+Note: OS X users should already have Automake, Autoconf and Libtool installed.
+
+Note: MacPorts users will need to install svn2cl by hand.
+
+Bootstrapping
+~~~~~~~~~~~~~
+
+Note: You must repeat this step every time you update your Subversion checkout.
+
+Bootstrap the pristine source by running the following command:
+
+--------------------------------------------------------------------------------
+./bootstrap
+--------------------------------------------------------------------------------
+
+You can use the `-C' option to generate a dummy `ChangeLog' file.
+
+Installation And First Run
+--------------------------
+
+You will need the following installed:
+
+ * Erlang OTP <http://erlang.org/> (required)
+ * ICU <http://icu.sourceforge.net/> (required)
+ * Mozilla SpiderMonkey <http://www.mozilla.org/js/spidermonkey/> (required)
+ * GNU Make <http://www.gnu.org/software/make/> (required)
+ * GNU Compiler Collection <http://gcc.gnu.org/> (required)
+
+UNIX-like Operating Systems (inc. OS X)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Dependencies
+^^^^^^^^^^^^
+
+Debian-based (inc. Ubuntu) Systems
+++++++++++++++++++++++++++++++++++
+
+If you are running a Debian GNU/Linux system (or a derivative such as Ubuntu
+GNU/Linux) you can install the dependencies using the `apt-get' command:
+
+--------------------------------------------------------------------------------
+apt-get install build-essential erlang libicu36 libicu36-dev libmozjs-dev
+--------------------------------------------------------------------------------
+
+If you get an error regarding the `libicu36' or `libicu36-dev' be sure to check
+the version used by your distribution (using `apt-cache search libicu') and
+install those packages instead.
+
+OS X
+++++
+
+To install GNU Make and the GNU Compiler Collection on OS X you should install
+the Xcode Tools metapackage by running the following command:
+
+--------------------------------------------------------------------------------
+open /Applications/Installers/Xcode\ Tools/XcodeTools.mpkg
+--------------------------------------------------------------------------------
+
+We recommend that you satisfy the other dependancies by installing MacPorts
+<http://www.macports.org/> and running the following command:
+
+--------------------------------------------------------------------------------
+port install icu erlang spidermonkey
+--------------------------------------------------------------------------------
+
+Note: Don't forget to open a new terminal after you have installed MacPorts
+as it updates your PATH and you will not be able to run the `port' command
+without the effects of this change.
+
+To update your `locate' database you may want to run the following command:
+
+--------------------------------------------------------------------------------
+sudo /usr/libexec/locate.updatedb
+--------------------------------------------------------------------------------
+
+Installing
+^^^^^^^^^^
+
+Once you have satisfied dependencies you should run the following command:
+
+--------------------------------------------------------------------------------
+./configure
+--------------------------------------------------------------------------------
+
+Note: Apache CouchDB is installed into `/usr/local' by default. If you want to
+change where Apache CouchDB is installed (or where to find Erlang) be sure to
+read the output from running the `./configure --help' command.
+
+Note: All the examples assume you have installed into `/usr/local'.
+
+If everything was successful you should see the following message:
+
+--------------------------------------------------------------------------------
+You have configured Apache CouchDB. Time to relax.
+--------------------------------------------------------------------------------
+
+Relax.
+
+To install Apache CouchDB you should then run the following command:
+
+--------------------------------------------------------------------------------
+make && sudo make install
+--------------------------------------------------------------------------------
+
+If you do not wish to be prompted to overwrite any existing Apache CouchDB
+configuration files you should run the following command:
+
+--------------------------------------------------------------------------------
+sudo make && yes | sudo make install
+--------------------------------------------------------------------------------
+
+Note: Use of the `sudo' command is only required if you are installing into a
+system owned directory. You do not need to do this if you are installing
+elsewhere, such as your home directory.
+
+More options can be found by reading the `INSTALL' file.
+
+Security Considerations
+^^^^^^^^^^^^^^^^^^^^^^^
+
+It is not advisable to run Apache CouchDB as the superuser. We strongly
+recommend that you create a specific user to run Apache CouchDB and own the
+data/log directories.
+
+You can use whatever tool your system provides to create a new `couchdb' user.
+
+On many UNIX-like systems you can run the following command:
+
+--------------------------------------------------------------------------------
+adduser couchdb
+--------------------------------------------------------------------------------
+
+OS X provides the standard Accounts option from the System Preferences
+application or you can optionally use the Workgroup Manager application which
+can be downloaded as part of the Server Admin Tools
+<http://www.apple.com/support/downloads/serveradmintools1047.html>.
+
+You should set the home directory of the `couchdb' user to
+`/usr/local/var/lib/couchdb' which is the Apache CouchDB database directory.
+
+Make sure to change the ownership of the Apache CouchDB data directories by
+running the following commands:
+
+--------------------------------------------------------------------------------
+chown -R couchdb /usr/local/var/lib/couchdb
+chown -R couchdb /usr/local/var/log/couchdb
+--------------------------------------------------------------------------------
+
+Running Manually
+^^^^^^^^^^^^^^^^
+
+To start the Apache CouchDB server you should run the following command:
+
+--------------------------------------------------------------------------------
+sudo -u couchdb couchdb
+--------------------------------------------------------------------------------
+
+This uses the `sudo' command to run the `couchdb' command as the `couchdb' user.
+
+When Apache CouchDB starts it should eventually display the following message:
+
+--------------------------------------------------------------------------------
+Apache CouchDB has started. Time to relax.
+--------------------------------------------------------------------------------
+
+Relax.
+
+To check that everything has worked point your web browser to
+<http://localhost:5984/_utils/index.html> and run the test suite.
+
+OS X
+++++
+
+If you get error when running Apache CouchDB that look like the following:
+
+--------------------------------------------------------------------------------
+dyld: Library not loaded: libicuuc.38.dy
+--------------------------------------------------------------------------------
+
+You should make sure that your `~/.profile' file contains the following line:
+
+--------------------------------------------------------------------------------
+export DYLD_LIBRARY_PATH=/opt/local/lib:$DYLD_LIBRARY_PATH
+--------------------------------------------------------------------------------
+
+This should have been added for you by MacPorts but may be missing.
+
+Running as a Daemon
+^^^^^^^^^^^^^^^^^^^
+
+Note: These instructions assume you have created the `couchdb' user. See the
+specific system information included below to learn how to reconfigure this.
+
+Note: If any of these methods report a failure you can run the `couchdb'
+command manually to see the error messages it is displaying.
+
+The `/usr/local/etc/logrotate.d/couchdb' file is provided as a logrotate
+configuration that you can use to rotate Apache CouchDB's logs.
+
+SysV/BSD-style Systems
+++++++++++++++++++++++
+
+Depending on your system the `couchdb' init script will be installed into a
+direcory called `init.d' (for SysV-style systems) or `rc.d' (for BSD-style
+systems). These examples use the `[init.d|rc.d]' notation to indicate this.
+
+You can control the Apache CouchDB daemon by running the following command:
+
+--------------------------------------------------------------------------------
+/usr/local/etc/[init.d|rc.d]/couchdb [start|stop|restart|force-reload|status]
+--------------------------------------------------------------------------------
+
+If you wish to configure how the init script works, such as which user to run
+Apache CouchDB as, you must edit the `/usr/local/etc/default/couchdb' file as
+appropriate. If you are running the init script as a non-superuser you need to
+remove the line with the `COUCHDB_USER' setting.
+
+If you wish the ApacheCouchDB daemon to run as a system service you need to copy
+the `/usr/local/etc/[init.d|rc.d]/couchdb' script into your system wide
+`/etc/[init.d|rc.d]' directory and update your system configuration as
+appropriate. Consult your system documentation for more information.
+
+If you are running a Debian GNU/Linux system (or a derivative such as Ubuntu
+GNU/Linux) you can configure your system using the following command:
+
+--------------------------------------------------------------------------------
+sudo update-rc.d couchdb defaults
+--------------------------------------------------------------------------------
+
+OS X
+++++
+
+You can use the `launchctl' command to control the Apache CouchDB daemon.
+
+To load the launchd configuration you must run the following command:
+
+--------------------------------------------------------------------------------
+sudo launchctl load /usr/local/Library/LaunchDaemons/org.apache.couchdb
+--------------------------------------------------------------------------------
+
+You can stop the Apache CouchDB daemon by running the following command:
+
+--------------------------------------------------------------------------------
+sudo launchctl unload /usr/local/Library/LaunchDaemons/org.apache.couchdb
+--------------------------------------------------------------------------------
+
+If you wish to change the launchd configuration, such as which user to run
+Apache CouchDB as, you must edit the
+`/usr/local/Library/LaunchDaemons/org.apache.couchdb.plist' file as
+appropriate.
+
+If you wish the Apache CouchDB daemon to run as a system service you need to
+copy the `/usr/local/Library/LaunchDaemons/org.apache.couchdb.plist' file into
+your system wide `/Library/LaunchDaemons' directory.
+
+Windows
+~~~~~~~
+
+Windows documentation is incomplete. Please submit suggestions.
+
+Development and Distribution
+----------------------------
+
+Reconfiguring the Build System
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you have edited any of the files used by the build system, such as the
+`Makefile.am' files, you will need to reconfigure your source.
+
+To reconfigure the source run the following command from the root directory:
+
+--------------------------------------------------------------------------------
+autoreconf && ./confgure
+--------------------------------------------------------------------------------
+
+Checking In Changes
+~~~~~~~~~~~~~~~~~~~
+
+If your source directory has been configured or built you will need to clean
+the generated files before checking into the repository by running the
+following command:
+
+--------------------------------------------------------------------------------
+make local-clean
+--------------------------------------------------------------------------------
+
+If everything was successful you should now have a pristine checkout.
+
+Preparing For Distribution
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To build the source for distribution you should then run the following command:
+
+--------------------------------------------------------------------------------
+./configure && make distcheck
+--------------------------------------------------------------------------------
+
+If everything was successful you should see a `zip' file and/or a `tar.gz' file
+sitting in the root directory ready for distribution.
+
+Release Checklist
+~~~~~~~~~~~~~~~~~
+
+ 1. Update this file with important information.
+ 2. Update the `NEWS' file with change information.
+ 3. Update the `acinclude.m4' file with version information.
diff --git a/THANKS b/THANKS
new file mode 100644
index 00000000..3a9d3cb8
--- /dev/null
+++ b/THANKS
@@ -0,0 +1,21 @@
+Apache CouchDB Thanks
+=====================
+
+Credits
+-------
+
+Apache CouchDB was originally developed by Damien Katz <damien_katz@yahoo.com>
+and a number of other contributors. Many people further contributed to Apache
+CouchDB by reporting problems, suggesting various improvements or submitting
+changes. A list of these people is included below.
+
+ * Benoit Chesneau <bchesneau@gmail.com>
+ * Till Klampaeckel <till@klampaeckel.de>
+ * Roger Leigh <rleigh@debian.org>
+ * Sam Ruby <rubys@intertwingly.net>
+ * Carlos Valiente <superdupont@gmail.com>
+
+See Also
+--------
+
+For a list of authors see the `AUTHORS' file.
diff --git a/acinclude.m4.in b/acinclude.m4.in
new file mode 100644
index 00000000..c537bf90
--- /dev/null
+++ b/acinclude.m4.in
@@ -0,0 +1,31 @@
+dnl Licensed under the Apache License, Version 2.0 (the "License"); you may not
+dnl use this file except in compliance with the License. dnl You may obtain a
+dnl copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+dnl WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+dnl License for the specific language governing permissions and limitations
+dnl under the License.
+
+m4_define([LOCAL_PACKAGE_AUTHOR_NAME], [Damien Katz])
+m4_define([LOCAL_PACKAGE_AUTHOR_ADDRESS], [damien_katz@yahoo.com])
+m4_define([LOCAL_PACKAGE_IDENTIFIER], [couchdb])
+m4_define([LOCAL_PACKAGE_NAME], [Apache CouchDB])
+
+m4_define([LOCAL_VERSION_MAJOR], [0])
+m4_define([LOCAL_VERSION_MINOR], [7])
+m4_define([LOCAL_VERSION_REVISION], [3])
+m4_define([LOCAL_VERSION_STAGE], [a])
+m4_define([LOCAL_VERSION_RELEASE], [%release%])
+m4_define([LOCAL_VERSION_PRIMARY],
+ [LOCAL_VERSION_MAJOR.LOCAL_VERSION_MINOR.LOCAL_VERSION_REVISION])
+m4_define([LOCAL_VERSION_SECONDARY],
+ [LOCAL_VERSION_STAGE[]LOCAL_VERSION_RELEASE])
+m4_define([LOCAL_VERSION],
+ [LOCAL_VERSION_PRIMARY[]LOCAL_VERSION_SECONDARY])
+
+m4_define([LOCAL_LIST_ADDRESS], [couchdb@googlegroups.com])
+m4_define([LOCAL_LIST_URI], [http://groups.google.com/group/couchdb])
diff --git a/authors.xml b/authors.xml
new file mode 100644
index 00000000..0e77cfbf
--- /dev/null
+++ b/authors.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="utf-8"?>
+<authors>
+ <author uid="willbeh">
+ William Beh &lt;willbeh@gmail.com&gt;
+ </author>
+ <author uid="damienkatz">
+ Damien Katz &lt;damien_katz@yahoo.com&gt;
+ </author>
+ <author uid="JanLehnardt">
+ Jan Lehnardt &lt;janlehnardt@gmail.com&gt;
+ </author>
+ <author uid="cmlenz">
+ Christopher Lenz &lt;cmlenz@gmx.de&gt;
+ </author>
+ <author uid="DirkSchalge">
+ Dirk Schalge &lt;dirk@epd-me.net&gt;
+ </author>
+ <author uid="nslater">
+ Noah Slater &lt;nslater@bytesexual.org&gt;
+ </author>
+ <author uid="nslater@bytesexual.org">
+ Noah Slater &lt;nslater@bytesexual.org&gt;
+ </author>
+</authors>
diff --git a/bin/Makefile.am b/bin/Makefile.am
new file mode 100644
index 00000000..c01d960e
--- /dev/null
+++ b/bin/Makefile.am
@@ -0,0 +1,79 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+datarootdir = @prefix@/share
+
+bin_SCRIPTS = couchdb couchjs
+
+if HELP2MAN
+dist_man1_MANS = couchdb.1 couchjs.1
+endif
+
+CLEANFILES = $(bin_SCRIPTS) $(dist_man1_MANS)
+
+transform = @program_transform_name@
+couchdb_command_name = `echo couchdb | sed '$(transform)'`
+couchjs_command_name = `echo couchjs | sed '$(transform)'`
+
+couchdb: couchdb.tpl
+ sed -e "s|%ERL%|$(ERL)|g" \
+ -e "s|%ICU_CONFIG%|$(ICU_CONFIG)|g" \
+ -e "s|%bindir%|@bindir@|g" \
+ -e "s|%erlangbindir%|@erlangbindir@|g" \
+ -e "s|%erlanglibdir%|@erlanglibdir@|g" \
+ -e "s|%pkgconfdir%|@pkgconfdir@|g" \
+ -e "s|%pkgstatelogdir%|@pkgstatelogdir@|g" \
+ -e "s|%pkgstatelibdir%|@pkgstatelibdir@|g" \
+ -e "s|%localstatedir%|@localstatedir@|g" \
+ -e "s|%list_address%|@list_address@|g" \
+ -e "s|%list_uri%|@list_uri@|g" \
+ -e "s|%package_author_address%|@package_author_address@|g" \
+ -e "s|%package_author_name%|@package_author_name@|g" \
+ -e "s|%package_name%|@package_name@|g" \
+ -e "s|%version%|@version@|g" \
+ -e "s|%couchdb_command_name%|$(couchdb_command_name)|g" > \
+ $@ < $<
+ chmod +x $@
+
+couchjs: couchjs.tpl
+ sed -e "s|%libbindir%|@libbindir@|g" \
+ -e "s|%list_address%|@list_address@|g" \
+ -e "s|%list_uri%|@list_uri@|g" \
+ -e "s|%package_author_address%|@package_author_address@|g" \
+ -e "s|%package_author_name%|@package_author_name@|g" \
+ -e "s|%package_name%|@package_name@|g" \
+ -e "s|%version%|@version@|g" \
+ -e "s|%couchjs_command_name%|$(couchjs_command_name)|g" > \
+ $@ < $<
+ chmod +x $@
+
+HELP2MAN_OPTION=--no-info --help-option="-h" --version-option="-V"
+
+# XXX: Because the scripts are made at bulid time for the user we need to
+# XXX: depend on the original templates so as not to cause the rebuilding of
+# XXX: the man pages.
+
+couchdb.1: couchdb.tpl.in
+ touch $@
+ if test -x "$(HELP2MAN_EXECUTABLE)"; then \
+ $(MAKE) -f Makefile couchdb; \
+ $(HELP2MAN_EXECUTABLE) $(HELP2MAN_OPTION) \
+ --name="Apache CouchDB database server" ./couchdb --output $@; \
+ fi
+
+couchjs.1: couchjs.tpl.in
+ touch $@
+ if test -x "$(HELP2MAN_EXECUTABLE)"; then \
+ $(MAKE) -f Makefile couchjs; \
+ $(HELP2MAN_EXECUTABLE) $(HELP2MAN_OPTION) \
+ --name="Apache CouchDB JavaScript interpreter" ./couchjs --output $@; \
+ fi
diff --git a/bin/couchdb.tpl.in b/bin/couchdb.tpl.in
new file mode 100644
index 00000000..a29edc47
--- /dev/null
+++ b/bin/couchdb.tpl.in
@@ -0,0 +1,340 @@
+#! /bin/sh -e
+
+# @configure_input@
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+SCRIPT_OK=0
+SCRIPT_ERROR=1
+
+INTERACTIVE_BOOLEAN=false
+BACKGROUND_BOOLEAN=false
+KILL_BOOLEAN=false
+SHUTDOWN_BOOLEAN=false
+RECURSED_BOOLEAN=false
+
+RESPAWN_TIMEOUT=0
+
+LIB_DIRECTORY=%pkgstatelibdir%
+LOG_DIRECTORY=%pkgstatelogdir%
+
+INI_FILE=%pkgconfdir%/couch.ini
+PID_FILE=%localstatedir%/run/couchdb.pid
+
+STDOUT_FILE=couchdb.stdout
+STDERR_FILE=couchdb.stderr
+
+HEART_COMMAND="%bindir%/%couchdb_command_name% -k"
+HEART_BEAT_TIMEOUT=11
+
+basename=$(basename $0)
+
+display_version () {
+ # Display version and copyright information.
+
+ cat << EOF
+$basename - %package_name% %version%
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+EOF
+ }
+
+display_help () {
+ # Display a short description of the script's behaviour.
+
+ cat << EOF
+Usage: $basename [OPTION]
+
+The $basename command runs the %package_name% server.
+
+The exit status is 0 for success or 1 for failure.
+
+The \`-s' option will exit 0 for running and 1 for not running.
+
+Options:
+
+ -h display a short help message and exit
+ -V display version information and exit
+ -c FILE set the configuration FILE (defaults to $INI_FILE)
+ -i use the interactive Erlang shell
+ -b spawn as a background process
+ -p FILE set the background PID FILE (defaults to $PID_FILE)
+ -r SECONDS respawn background process after SECONDS (defaults to no respawn)
+ -o FILE redirect background stdout to FILE (defaults to $STDOUT_FILE)
+ -e FILE redirect background stderr to FILE (defaults to $STDERR_FILE)
+ -s display the status of the background process
+ -k kill the background process, will respawn if needed
+ -d shutdown the background process
+
+Report bugs via the web at <%list_uri%>.
+
+Report bugs via email to <%list_address%>.
+EOF
+}
+
+display_error () {
+ # Display a short instruction referring users to further documentation.
+
+ if test -n "$1"; then
+ echo $1 >&2
+ fi
+ echo >&2
+ echo "Try \`"$basename" -h' for more information." >&2
+ exit $SCRIPT_ERROR
+}
+
+_load_configuration () {
+ # Load ini configuration and overwrite default variables.
+
+ dbrootdir_ini_value=$(awk -F = "/^DbRootDir=/{print \$2}" < $INI_FILE)
+ if test -n "$dbrootdir_ini_value"; then
+ LIB_DIRECTORY=$dbrootdir_ini_value
+ fi
+ logfile_ini_value=$(awk -F = "/^LogFile=/{print \$2}" < $INI_FILE)
+ if test -n "$logfile_ini_value"; then
+ LOG_DIRECTORY=$(dirname $logfile_ini_value)
+ fi
+}
+
+_get_pid () {
+ if test -f $PID_FILE; then
+ PID=$(cat $PID_FILE)
+ fi
+ if test -z "$PID"; then
+ # Resort to searching for the running process manually.
+ PID=$(ps auxww | grep erlang | grep -- "couch_server:start()" | \
+ grep -v grep | awk "{print \$2}")
+ fi
+ echo $PID
+}
+
+check_status () {
+ # Check the status of the running Apache CouchDB process.
+
+ PID=$(_get_pid)
+ if test -n "$PID"; then
+ if kill -0 $PID 2> /dev/null; then
+ echo "Apache CouchDB is running as process $PID. Time to relax."
+ return $SCRIPT_OK
+ else
+ echo >&2 << EOF
+Apache CouchDB is not running but a stale PID file exists: $PID_FILE"
+EOF
+ fi
+ else
+ echo "Apache CouchDB is not running." >&2
+ fi
+ return $SCRIPT_ERROR
+}
+
+check_environment () {
+ # Check the environment for common problems to prevent nasty backtraces.
+
+ message_prefix="Apache CouchDB needs write permission on the"
+ if test ! -w $LIB_DIRECTORY; then
+ echo "$message_prefix data directory: $LIB_DIRECTORY" >&2
+ exit $SCRIPT_ERROR
+ fi
+ if test ! -w $LOG_DIRECTORY; then
+ echo "$message_prefix log directory: $LOG_DIRECTORY" >&2
+ exit $SCRIPT_ERROR
+ fi
+ message_prefix="Apache CouchDB needs write permission on data file"
+ for file in $(find $LIB_DIRECTORY -type f); do
+ if test ! -w $file; then
+ echo "$message_prefix: $file" >&2
+ exit $SCRIPT_ERROR
+ fi
+ done
+ message_prefix="Apache CouchDB needs write permission on log file"
+ for file in $(find $LOG_DIRECTORY -type f); do
+ if test ! -w $file; then
+ echo "$message_prefix: $file" >&2
+ exit $SCRIPT_ERROR
+ fi
+ done
+ if test "$BACKGROUND_BOOLEAN" = "true"; then
+ touch $PID_FILE 2> /dev/null || true
+ touch $STDOUT_FILE 2> /dev/null || true
+ touch $STDERR_FILE 2> /dev/null || true
+ message_prefix="Apache CouchDB needs write permission on the"
+ if test ! -w $PID_FILE; then
+ echo "$message_prefix PID file: $PID_FILE" >&2
+ exit $SCRIPT_ERROR
+ fi
+ if test ! -w $STDOUT_FILE; then
+ echo "$message_prefix STDOUT file: $STDOUT_FILE" >&2
+ exit $SCRIPT_ERROR
+ fi
+ if test ! -w $STDERR_FILE; then
+ echo "$message_prefix STDERR file: $STDERR_FILE" >&2
+ exit $SCRIPT_ERROR
+ fi
+ message_prefix="Apache CouchDB needs a regular"
+ if ! echo 2> /dev/null >> $PID_FILE; then
+ echo "$message_prefix PID file: $PID_FILE" >&2
+ exit $SCRIPT_ERROR
+ fi
+ if ! echo 2> /dev/null >> $STDOUT_FILE; then
+ echo "$message_prefix STDOUT file: $STDOUT_FILE" >&2
+ exit $SCRIPT_ERROR
+ fi
+ if ! echo 2> /dev/null >> $STDERR_FILE; then
+ echo "$message_prefix STDERR file: $STDERR_FILE" >&2
+ exit $SCRIPT_ERROR
+ fi
+ fi
+}
+
+start_couchdb () {
+ # Start Apache CouchDB with the Erlang interpreter.
+
+ if test ! "$RECURSED_BOOLEAN" = "true"; then
+ if check_status 2> /dev/null; then
+ exit $SCRIPT_OK
+ fi
+ check_environment
+ fi
+ interactive_option="+Bd -noinput"
+ if test "$INTERACTIVE_BOOLEAN" = "true"; then
+ interactive_option=""
+ fi
+ if test "$BACKGROUND_BOOLEAN" = "true"; then
+ touch $PID_FILE
+ interactive_option="+Bd -noinput"
+ fi
+ command="`%ICU_CONFIG% --invoke` \
+ %ERL% $interactive_option -sasl errlog_type error \
+ -pa %erlanglibdir%/couch-%version%/ebin \
+ %erlanglibdir%/couch_inets-4.7.5/ebin \
+ -eval \"application:load(couch)\" \
+ -eval \"couch_server:start(), receive done -> done end.\" \
+ -couchini $INI_FILE"
+ if test "$BACKGROUND_BOOLEAN" = "true" \
+ -a "$RECURSED_BOOLEAN" = "false"; then
+ # Recursively call and detach a duplicate couchdb command.
+ $0 -c $INI_FILE -b -r $RESPAWN_TIMEOUT -p $PID_FILE \
+ -o $STDOUT_FILE -e $STDERR_FILE -R &
+ echo "Apache CouchDB has started. Time to relax."
+ else
+ if test "$RECURSED_BOOLEAN" = "true"; then
+ while true; do
+ export HEART_COMMAND
+ export HEART_BEAT_TIMEOUT
+ $(eval $command -pidfile $PID_FILE -heart \
+ > $STDOUT_FILE 2> $STDERR_FILE) || true
+ PID=$(_get_pid)
+ if test -n "$PID"; then
+ if kill -0 $PID 2> /dev/null; then
+ # Found an existing process, do not respawn.
+ return $SCRIPT_ERROR
+ fi
+ # Stale PID file, we should respawn.
+ else
+ # No PID file, do not respawn.
+ return $SCRIPT_OK
+ fi
+ if test "$RESPAWN_TIMEOUT" = "0"; then
+ # RESPAWN_TIMEOUT is zero, do not respawn.
+ return $SCRIPT_OK
+ fi
+ if test "$RESPAWN_TIMEOUT" != "1"; then
+ plural_ending="s"
+ fi
+ cat << EOF
+Apache CouchDB crashed, restarting in $RESPAWN_TIMEOUT second$plural_ending.
+EOF
+ sleep $RESPAWN_TIMEOUT
+ done
+ else
+ eval exec $command
+ fi
+ fi
+}
+
+stop_couchdb () {
+ # Send SIGHUP to the running Apache CouchDB process.
+
+ PID=$(_get_pid)
+ if test -n "$PID"; then
+ # Clean up PID file or leave stale for respawn.
+ if test "$1" = "false"; then
+ echo > $PID_FILE
+ fi
+ if kill -0 $PID 2> /dev/null; then
+ if kill -1 $PID 2> /dev/null; then
+ if test "$1" = "false"; then
+ echo "Apache CouchDB has been shutdown."
+ else
+ echo "Apache CouchDB has been killed."
+ fi
+ return $SCRIPT_OK
+ else
+ echo "Apache CouchDB could not be killed." >&2
+ return $SCRIPT_ERROR
+ fi
+ if test "$1" = "false"; then
+ echo "Stale PID file exists but Apache CouchDB is not running."
+ else
+ echo "Stale PID file existed but Apache CouchDB is not running."
+ fi
+ fi
+ else
+ echo "Apache CouchDB is not running."
+ fi
+}
+
+parse_script_option_list () {
+ # Parse the script option list and take the appropriate action.
+
+ if ! argument_list=$(getopt hVc:ibp:r:Ro:e:skd $@); then
+ display_error
+ fi
+ eval set -- "$argument_list"
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ -h) shift; display_help; exit $SCRIPT_OK;;
+ -V) shift; display_version; exit $SCRIPT_OK;;
+ -c) shift; INI_FILE=$1; shift;;
+ -i) shift; INTERACTIVE_BOOLEAN=true;;
+ -b) shift; BACKGROUND_BOOLEAN=true;;
+ -r) shift; RESPAWN_TIMEOUT=$1; shift;;
+ -R) shift; RECURSED_BOOLEAN=true;;
+ -p) shift; PID_FILE=$1; shift;;
+ -o) shift; STDOUT_FILE=$1; shift;;
+ -e) shift; STDERR_FILE=$1; shift;;
+ -s) shift; check_status; exit $SCRIPT_OK;;
+ -k) shift; KILL_BOOLEAN=true;;
+ -d) shift; SHUTDOWN_BOOLEAN=true;;
+ --) shift; break;;
+ *) display_error "Unknown option: $1" >&2;;
+ esac
+ done
+ _load_configuration
+ if test "$KILL_BOOLEAN" = "true" -o "$SHUTDOWN_BOOLEAN" = "true"; then
+ stop_couchdb $KILL_BOOLEAN
+ else
+ start_couchdb
+ fi
+}
+
+parse_script_option_list $@
diff --git a/bin/couchjs.tpl.in b/bin/couchjs.tpl.in
new file mode 100644
index 00000000..634940f2
--- /dev/null
+++ b/bin/couchjs.tpl.in
@@ -0,0 +1,103 @@
+#! /bin/sh -e
+
+# @configure_input@
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+SCRIPT_OK=0
+SCRIPT_ERROR=1
+
+DEFAULT_VERSION=170
+
+basename=$(basename $0)
+
+display_version () {
+ # Display version and copyright information.
+
+ cat << EOF
+$basename - %package_name% %version%
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+EOF
+}
+
+display_help () {
+ # Display a short description of the script's behaviour.
+
+ cat << EOF
+Usage: $basename [FILE]
+
+The $basename command runs the %package_name% JavaScript interpreter.
+
+The exit status is 0 for success or 1 for failure.
+
+Options:
+
+ -h display a short help message and exit
+ -V display version information and exit
+
+Report bugs via the web at <%list_uri%>.
+
+Report bugs via email to <%list_address%>.
+EOF
+}
+
+display_error () {
+ # Display a short instruction referring users to further documentation.
+
+ if test -n "$1"; then
+ echo $1 >&2
+ fi
+ echo >&2
+ echo "Try \`"$basename" -h' for more information." >&2
+ exit $SCRIPT_ERROR
+}
+
+run_couchjs () {
+ # Run the JavaScript interpreter shell with options.
+
+ exec %libbindir%/%couchjs_command_name% $@
+}
+
+parse_script_option_list () {
+ # Parse the script option list and take the appropriate action.
+
+ if ! argument_list=$(getopt hV $@); then
+ display_error
+ fi
+ eval set -- "$argument_list"
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ -h) shift; display_help; exit $SCRIPT_OK;;
+ -V) shift; display_version; exit $SCRIPT_OK;;
+ --) shift; break;;
+ *) break;;
+ esac
+ done
+ option_list=$(echo $@ | sed 's/--//')
+ if test -z "$option_list"; then
+ display_error "You must specify a FILE."
+ fi
+ run_couchjs $option_list
+}
+
+parse_script_option_list $@
diff --git a/bootstrap b/bootstrap
new file mode 100755
index 00000000..21137381
--- /dev/null
+++ b/bootstrap
@@ -0,0 +1,325 @@
+#! /bin/sh -e
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# Bootstrap the pristine source ready for distribution.
+
+SCRIPT_OK=0
+SCRIPT_ERROR=1
+
+CHANGELOG_PREDICATE=true
+
+ACINCLUDE_FILE="acinclude.m4"
+ACINCLUDE_IN_FILE="acinclude.m4.in"
+AC_CHECK_ICU_COMPRESSED_FILE="build-contrib/ac_check_icu.m4_2007-09-12.gz"
+AUTHORS_FILE="authors.xml"
+BUILD_AUX_DIRECTORY="build-aux"
+CHANGELOG_FILE="ChangeLog"
+CONFIG_GUESS_COMPRESSED_FILE="build-contrib/config.guess_2007-09-12.gz"
+CONFIG_GUESS_FILE="build-aux/config.guess"
+CONFIG_SUB_COMPRESSED_FILE="build-contrib/config.sub_2007-09-12.gz"
+CONFIG_SUB_FILE="build-aux/config.sub"
+COPYING_FILE="COPYING"
+LICENSE_COMPRESSED_FILE="build-contrib/apache_2.txt.gz"
+M4_DIRECTORY="m4"
+M4_AC_CHECK_ICU_FILE="m4/ac_check_icu.m4"
+REPOSITORY_URI="http://couchdb.googlecode.com/svn/trunk/"
+
+ACLOCAL_EXECUTABLE=$(which aclocal || true)
+AUTOCONF_EXECUTABLE=$(which autoconf || true)
+AUTOHEADER_EXECUTABLE=$(which autoheader || true)
+AUTOMAKE_EXECUTABLE=$(which automake || true)
+GLIBTOOLIZE_EXECUTABLE=$(which glibtoolize || true)
+LIBTOOLIZE_EXECUTABLE=$(which libtoolize || true)
+SVN_EXECUTABLE=$(which svn || true)
+SVN2CL_EXECUTABLE=$(which svn2cl || true)
+
+basename=$(basename $0)
+
+extract_configuration_variable () {
+ # Extract variables from the local M4 configuration.
+
+ variable_name=$1
+ temporary_file=$(mktemp)
+ if [ $? -ne 0 ]; then
+ echo "Error: Unable to create a temporary file."
+ exit $SCRIPT_ERROR
+ fi
+ echo "changequote(\`[', \`]')" > $temporary_file
+ sed "s/m4_//" < $ACINCLUDE_IN_FILE >> $temporary_file
+ echo $variable_name >> $temporary_file
+ m4 $temporary_file | grep -v "^$"
+ rm -f $temporary_file
+}
+
+display_version () {
+ # Display version and copyright information.
+
+ package_name=$(extract_configuration_variable LOCAL_PACKAGE_NAME)
+ version=$(extract_configuration_variable LOCAL_VERSION)
+ cat << EOF
+$basename - $package_name $version
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+EOF
+}
+
+display_help () {
+ # Display a short description of the script's behaviour.
+
+ list_address=$(extract_configuration_variable LOCAL_LIST_ADDRESS)
+ list_uri=$(extract_configuration_variable LOCAL_LIST_URI)
+ cat << EOF
+Usage: $basename [OPTION]...
+
+The $basename script bootstraps the pristeen source so that it can be built.
+
+The exit status is 0 for success or 1 for failure.
+
+Options:
+
+ -h display a short help message and exit
+ -v display version information and exit
+ -C do not generate a ChangeLog from Subversion
+
+Environment variables:
+
+ REPOSITORY_REVISION manual override for revision information
+
+Report bugs via the web at <$list_uri>.
+
+Report bugs via email to <$list_address>.
+EOF
+}
+
+display_error () {
+ # Display a short instruction referring users to further documentation.
+
+ if test -n "$1"; then
+ echo $1 >&2
+ fi
+ echo >&2
+ echo "Try \`"$basename" -h' for more information." >&2
+ exit $SCRIPT_ERROR
+}
+
+generate_dummy_changelog () {
+ # Generate a dummy ChangLog file.
+
+ echo "Warning: Generating a dummy ChangeLog file."
+ touch $CHANGELOG_FILE
+}
+
+check_svn_environment () {
+ # Check the Subversion environment for sanity.
+
+ if test -z "$SVN_EXECUTABLE"; then
+ echo "Warning: Unable to find the svn command."
+ return $SCRIPT_ERROR
+ fi
+ if test -n "$REPOSITORY_REVISION"; then
+ return
+ fi
+ if test -n "$($SVN_EXECUTABLE info . 2> /dev/null)"; then
+ SVN_CHECKOUT_BOOLEAN="true"
+ fi > /dev/null
+ if test "$SVN_CHECKOUT_BOOLEAN" != "true"; then
+ echo "Warning: Unable to determine checkout information."
+ fi
+}
+
+generate_changelog () {
+ # Generate the ChangLog file using the Subversion repository.
+
+ if test "$SVN_CHECKOUT_BOOLEAN" = "true"; then
+ SVN2CL_COMMAND_ARGUMENT="."
+ fi
+ if test -n "$REPOSITORY_REVISION"; then
+ SVN2CL_COMMAND_ARGUMENT="--revision $REPOSITORY_REVISION:1 $REPOSITORY_URI"
+ fi
+ if test -z "$SVN2CL_COMMAND_ARGUMENT"; then
+ return
+ fi
+ if test ! -x "$SVN2CL_EXECUTABLE"; then
+ echo "Warning: Unable to find the svn2cl command."
+ generate_dummy_changelog
+ else
+ echo "Generating \`"$CHANGELOG_FILE"' (may take a while)"
+ $SVN2CL_EXECUTABLE --authors=$AUTHORS_FILE $SVN2CL_COMMAND_ARGUMENT
+ fi
+}
+
+generate_acinclude () {
+ # Generate the acinclude.m4 file using the Subversion repository.
+
+ release_code=$(sed -e "s/\[//g" -e "s/\]//g" -e "s/(//g" -e "s/)//g" \
+ < $ACINCLUDE_IN_FILE | awk "/LOCAL_VERSION_STAGE, /{print \$2}")
+ repository_boolean=false
+ if test -n "$REPOSITORY_REVISION" -o "$SVN_CHECKOUT_BOOLEAN" = "true"; then
+ repository_boolean=true
+ fi
+ if test -z "$release_code" -o "$repository_boolean" = "false"; then
+ sed "s/%release%//" < $ACINCLUDE_IN_FILE > $ACINCLUDE_FILE
+ else
+ if test "$SVN_CHECKOUT_BOOLEAN" = "true"; then
+ revision_number=$($SVN_EXECUTABLE info . | \
+ awk "/Revision:/{print \$2}")
+ fi
+ if test -n "$REPOSITORY_REVISION"; then
+ revision_number="$REPOSITORY_REVISION"
+ fi
+ sed "s/%release%/$revision_number/" \
+ < $ACINCLUDE_IN_FILE > $ACINCLUDE_FILE
+ fi
+}
+
+process_file_collection () {
+ # Process files required to complete the bootstrap.
+
+ echo "Installing \`"$COPYING_FILE"'"
+ gzip --decompress --stdout $LICENSE_COMPRESSED_FILE > $COPYING_FILE
+ echo "Installing \`"$BUILD_AUX_DIRECTORY"'"
+ mkdir -p $BUILD_AUX_DIRECTORY
+ echo "Installing \`"$CONFIG_GUESS_FILE"'"
+ gzip --decompress --stdout \
+ $CONFIG_GUESS_COMPRESSED_FILE > $CONFIG_GUESS_FILE
+ echo "Installing \`"$CONFIG_SUB_FILE"'"
+ gzip --decompress --stdout \
+ $CONFIG_SUB_COMPRESSED_FILE > $CONFIG_SUB_FILE
+ echo "Installing \`"$M4_DIRECTORY"'"
+ mkdir -p $M4_DIRECTORY
+ echo "Installing \`"$M4_AC_CHECK_ICU_FILE"'"
+ gzip --decompress --stdout \
+ $AC_CHECK_ICU_COMPRESSED_FILE > $M4_AC_CHECK_ICU_FILE
+}
+
+run_aclocal () {
+ # Run the correct version of aclocal.
+
+ if test -x "$ACLOCAL_EXECUTABLE"; then
+ echo "Running $ACLOCAL_EXECUTABLE"
+ $ACLOCAL_EXECUTABLE -I m4
+ else
+ echo "Can't find aclocal"
+ exit $SCRIPT_ERROR
+ fi
+}
+
+run_libtoolize () {
+ # Run the correct version of libtoolize.
+
+ if test -x "$LIBTOOLIZE_EXECUTABLE"; then
+ echo "Running $LIBTOOLIZE_EXECUTABLE"
+ $LIBTOOLIZE_EXECUTABLE -f -c
+ else
+ if test -x "`which $GLIBTOOLIZE_EXECUTABLE`"; then
+ echo "Running $GLIBTOOLIZE_EXECUTABLE"
+ $GLIBTOOLIZE_EXECUTABLE -f -c
+ else
+ echo "Can't find libtoolize or glibtoolize"
+ exit $SCRIPT_ERROR
+ fi
+ fi
+}
+
+run_autoheader () {
+ # Run the correct version of autoheader.
+
+ if test -x "$AUTOHEADER_EXECUTABLE"; then
+ echo "Running $AUTOHEADER_EXECUTABLE"
+ $AUTOHEADER_EXECUTABLE -f
+ else
+ echo "Can't find autoheader"
+ exit $SCRIPT_ERROR
+ fi
+}
+
+run_automake () {
+ # Run the correct version of automake.
+
+ AUTOMAKE_OPTION_COLLECTION=""
+ if test -x "$AUTOMAKE_EXECUTABLE"; then
+ echo "Running $AUTOMAKE_EXECUTABLE"
+ $AUTOMAKE_EXECUTABLE -f -c -a --gnits
+ else
+ echo "Can't find automake"
+ exit $SCRIPT_ERROR
+ fi
+}
+
+run_autoconf () {
+ # Run the correct version of autoconf.
+
+ if test -x "$AUTOCONF_EXECUTABLE"; then
+ echo "Running $AUTOCONF_EXECUTABLE"
+ $AUTOCONF_EXECUTABLE -f
+ else
+ echo "Can't find autoconf"
+ exit $SCRIPT_ERROR
+ fi
+}
+
+run_command_collection () {
+ # Run commands required to complete the bootstrap.
+
+ run_libtoolize
+ run_aclocal
+ run_autoheader
+ run_automake
+ run_autoconf
+ cat << EOF
+
+You have bootstrapped Apache CouchDB. Time to relax.
+
+Run \`./configure' to configure the source before you install.
+EOF
+}
+
+parse_script_option_list () {
+ # Parse the script option list and take the appropriate action.
+
+ if ! argument_list=$(getopt vhC $@); then
+ display_error
+ fi
+ eval set -- "$argument_list"
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ -v) shift; display_version; exit $SCRIPT_OK;;
+ -h) shift; display_help; exit $SCRIPT_OK;;
+ -C) shift; CHANGELOG_PREDICATE=false;;
+ --) shift; break;;
+ *) display_error "Unknown option: $1" >&2;;
+ esac
+ done
+ cd $(dirname $0)
+ process_file_collection
+ check_svn_environment || true
+ if test "$CHANGELOG_PREDICATE" = "true"; then
+ generate_changelog
+ else
+ generate_dummy_changelog
+ fi
+ generate_acinclude
+ run_command_collection
+}
+
+parse_script_option_list $@
diff --git a/build-contrib/ac_check_icu.m4_2007-09-12.gz b/build-contrib/ac_check_icu.m4_2007-09-12.gz
new file mode 100644
index 00000000..917feaff
--- /dev/null
+++ b/build-contrib/ac_check_icu.m4_2007-09-12.gz
Binary files differ
diff --git a/build-contrib/apache_2.txt.gz b/build-contrib/apache_2.txt.gz
new file mode 100644
index 00000000..da76637c
--- /dev/null
+++ b/build-contrib/apache_2.txt.gz
Binary files differ
diff --git a/build-contrib/config.guess_2007-09-12.gz b/build-contrib/config.guess_2007-09-12.gz
new file mode 100644
index 00000000..9e8f905d
--- /dev/null
+++ b/build-contrib/config.guess_2007-09-12.gz
Binary files differ
diff --git a/build-contrib/config.sub_2007-09-12.gz b/build-contrib/config.sub_2007-09-12.gz
new file mode 100644
index 00000000..5f06c9ee
--- /dev/null
+++ b/build-contrib/config.sub_2007-09-12.gz
Binary files differ
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 00000000..16f0ae45
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,242 @@
+dnl Licensed under the Apache License, Version 2.0 (the "License"); you may not
+dnl use this file except in compliance with the License. dnl You may obtain a
+dnl copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+dnl WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+dnl License for the specific language governing permissions and limitations
+dnl under the License.
+
+m4_include([m4/ac_check_icu.m4])
+
+AC_INIT([LOCAL_PACKAGE_NAME], [LOCAL_VERSION], [LOCAL_LIST_ADDRESS],
+ [LOCAL_PACKAGE_IDENTIFIER])
+
+AC_PREREQ([2.59])
+
+AC_CONFIG_SRCDIR([ChangeLog])
+AC_CONFIG_AUX_DIR([build-aux])
+
+AM_CONFIG_HEADER([config.h])
+
+AM_INIT_AUTOMAKE([1.6.3 gnu check-news -Wno-portability])
+
+AC_GNU_SOURCE
+AC_ENABLE_SHARED
+AC_DISABLE_STATIC
+
+AC_PROG_CC
+AC_PROG_LIBTOOL
+AC_PROG_LN_S
+
+AC_MSG_CHECKING([for pthread_create in -lpthread])
+
+original_LIBS="$LIBS"
+LIBS="-lpthread $original_LIBS"
+
+AC_TRY_LINK([#include<pthread.h>],
+ [pthread_create((void *)0, (void *)0, (void *)0, (void *)0)],
+ [pthread=yes], [pthread=no])
+
+if test x${pthread} = xyes; then
+ AC_MSG_RESULT([yes])
+else
+ LIBS="$original_LIBS"
+ AC_MSG_RESULT([no])
+fi
+
+AC_ARG_WITH([erlang], [AC_HELP_STRING([--with-erlang=PATH],
+ [set PATH to the Erlang include directory])], [
+ ERLANG_FLAGS="-I$withval"
+], [
+ ERLANG_FLAGS="-I${libdir}/erlang/usr/include"
+ ERLANG_FLAGS="$ERLANG_FLAGS -I/usr/lib/erlang/usr/include"
+ ERLANG_FLAGS="$ERLANG_FLAGS -I/usr/local/lib/erlang/usr/include"
+ ERLANG_FLAGS="$ERLANG_FLAGS -I/opt/local/lib/erlang/usr/include"
+])
+
+AC_ARG_WITH([js-include], [AC_HELP_STRING([--with-js-include=PATH],
+ [set PATH to the SpiderMonkey include directory])], [
+ JS_INCLUDE_FLAGS="-I$withval"
+], [
+ JS_INCLUDE_FLAGS="-I/usr/include/js"
+ JS_INCLUDE_FLAGS="$JS_INCLUDE_FLAGS -I/usr/include/mozjs"
+ JS_INCLUDE_FLAGS="$JS_INCLUDE_FLAGS -I/usr/local/include/js"
+ JS_INCLUDE_FLAGS="$JS_INCLUDE_FLAGS -I/opt/local/include/js"
+])
+
+AC_ARG_WITH([js-lib], [AC_HELP_STRING([--with-js-lib=PATH],
+ [set PATH to the SpiderMonkey library directory])],
+ [JS_LIB_FLAGS="-L$withval"], [])
+
+# XP_UNIX required for jsapi.h and has been tested to work on Linux and Darwin.
+LIB_FLAGS="-L/usr/local/lib -L/opt/local/lib $JS_LIB_FLAGS"
+LIBS="$LIB_FLAGS $LIBS"
+FLAGS="$LIB_FLAGS $ERLANG_FLAGS $JS_INCLUDE_FLAGS -DXP_UNIX $FLAGS"
+CPPFLAGS="$FLAGS $CPPFLAGS"
+LDFLAGS="$FLAGS $LDFLAGS"
+
+AC_CHECK_LIB([js], [JS_NewContext], [], [
+ AC_CHECK_LIB([mozjs], [JS_NewContext], [], [
+ AC_MSG_ERROR([Could not find the js library.
+
+Is the Mozilla SpiderMonkey library installed?])])])
+
+AC_CHECK_HEADER([jsapi.h], [], [
+ AC_MSG_ERROR([Could not find the jsapi header.
+
+Are Mozilla SpiderMonkey headers installed?])])
+
+AC_CHECK_ICU([3])
+
+ICU_LOCAL_CFLAGS=`$ICU_CONFIG --cppflags-searchpath`
+ICU_LOCAL_LDFLAGS=`$ICU_CONFIG --ldflags-searchpath`
+
+AC_SUBST(ICU_CONFIG)
+AC_SUBST(ICU_LOCAL_CFLAGS)
+AC_SUBST(ICU_LOCAL_LDFLAGS)
+
+AC_PATH_PROG([ERL], [erl])
+
+if test x${ERL} = x; then
+ AC_MSG_ERROR([Could not find the `erl' executable. Is Erlang installed?])
+fi
+
+AC_PATH_PROG([ERLC], [erlc])
+
+if test x${ERLC} = x; then
+ AC_MSG_ERROR([Could not find the `erlc' executable. Is Erlang installed?])
+fi
+
+AC_CHECK_HEADER([erl_driver.h], [], [
+ AC_MSG_ERROR([Could not find the `erl_driver.h' header.
+
+Are the Erlang headers installed? Use the `--with-erlang' option to specify the
+path to the Erlang include directory.])])
+
+AC_PATH_PROG([HELP2MAN_EXECUTABLE], [help2man])
+if test x${HELP2MAN_EXECUTABLE} = x; then
+ AC_MSG_WARN([You will be unable to regenerate any man pages.])
+fi
+
+use_init=yes
+use_launchd=yes
+
+AC_ARG_ENABLE([init], [AC_HELP_STRING([--disable-init],
+ [don't install init script where applicable])], [
+ use_init=$enableval
+], [])
+
+AC_ARG_ENABLE([launchd], [AC_HELP_STRING([--disable-launchd],
+ [don't install launchd configuration where applicable])], [
+ use_launchd=$enableval
+], [])
+
+init_enabled=false
+launchd_enabled=false
+
+if test "$use_init" = "yes"; then
+ AC_MSG_CHECKING(location of init directory)
+ if test -d /etc/rc.d; then
+ init_enabled=true
+ AC_SUBST([initdir], ['${sysconfdir}/rc.d'])
+ AC_MSG_RESULT(${initdir})
+ else
+ if test -d /etc/init.d; then
+ init_enabled=true
+ AC_SUBST([initdir], ['${sysconfdir}/init.d'])
+ AC_MSG_RESULT(${initdir})
+ else
+ AC_MSG_RESULT(not found)
+ fi
+ fi
+fi
+
+if test "$use_launchd" = "yes"; then
+ AC_MSG_CHECKING(location of launchd directory)
+ if test -d /Library/LaunchDaemons; then
+ init_enabled=false
+ launchd_enabled=true
+ AC_SUBST([launchddir], ['${prefix}/Library/LaunchDaemons'])
+ AC_MSG_RESULT(${launchddir})
+ else
+ AC_MSG_RESULT(not found)
+ fi
+fi
+
+AC_ARG_VAR([ERL_EXECUTABLE], [path to the `erl' executable])
+AC_ARG_VAR([ERLC_EXECUTABLE], [path to the `erlc' executable])
+AC_ARG_VAR([HELP2MAN_EXECUTABLE], [path to the `help2man' program])
+
+if test -n "$HELP2MAN_EXECUTABLE"; then
+ help2man_enabled=true
+else
+ if test -f "$srcdir/bin/couchdb.1" -a -f "$srcdir/bin/couchjs.1"; then
+ help2man_enabled=true
+ else
+ help2man_enabled=false
+ fi
+fi
+
+AM_CONDITIONAL([INIT], [test x${init_enabled} = xtrue])
+AM_CONDITIONAL([LAUNCHD], [test x${launchd_enabled} = xtrue])
+AM_CONDITIONAL([HELP2MAN], [test x${help2man_enabled} = xtrue])
+
+AC_SUBST([package_author_name], ["LOCAL_PACKAGE_AUTHOR_NAME"])
+AC_SUBST([package_author_address], ["LOCAL_PACKAGE_AUTHOR_ADDRESS"])
+AC_SUBST([package_identifier], ["LOCAL_PACKAGE_IDENTIFIER"])
+AC_SUBST([package_name], ["LOCAL_PACKAGE_NAME"])
+
+AC_SUBST([version], ["LOCAL_VERSION"])
+AC_SUBST([version_major], ["LOCAL_VERSION_MAJOR"])
+AC_SUBST([version_minor], ["LOCAL_VERSION_MINOR"])
+AC_SUBST([version_revision], ["LOCAL_VERSION_REVISION"])
+AC_SUBST([version_stage], ["LOCAL_VERSION_STAGE"])
+AC_SUBST([version_release], ["LOCAL_VERSION_RELEASE"])
+
+AC_SUBST([list_address], ["LOCAL_LIST_ADDRESS"])
+AC_SUBST([list_uri], ["LOCAL_LIST_URI"])
+
+AC_SUBST([pkgconfdir], [${sysconfdir}/${PACKAGE_TARNAME}])
+AC_SUBST([pkgdatadir], [${datadir}/${PACKAGE_TARNAME}])
+AC_SUBST([pkgdocdir], [${datadir}/doc/${PACKAGE_TARNAME}])
+AC_SUBST([pkglibdir], [${libdir}/${PACKAGE_TARNAME}])
+AC_SUBST([pkgstatelibdir], [${localstatedir}/lib/${PACKAGE_TARNAME}])
+AC_SUBST([pkgstatelogdir], [${localstatedir}/log/${PACKAGE_TARNAME}])
+AC_SUBST([libbindir], [${pkglibdir}/bin])
+AC_SUBST([erlangbindir], [${pkglibdir}/erlang/bin])
+AC_SUBST([erlanglibdir], [${pkglibdir}/erlang/lib])
+
+AC_REVISION([LOCAL_VERSION])
+
+AC_CONFIG_FILES([Makefile])
+AC_CONFIG_FILES([bin/couchjs.tpl])
+AC_CONFIG_FILES([bin/couchdb.tpl])
+AC_CONFIG_FILES([bin/Makefile])
+AC_CONFIG_FILES([etc/couch_httpd.conf.tpl])
+AC_CONFIG_FILES([etc/couch.ini.tpl])
+AC_CONFIG_FILES([etc/default/couchdb.tpl])
+AC_CONFIG_FILES([etc/default/Makefile])
+AC_CONFIG_FILES([etc/init/couchdb.tpl])
+AC_CONFIG_FILES([etc/init/Makefile])
+AC_CONFIG_FILES([etc/launchd/org.apache.couchdb.plist.tpl])
+AC_CONFIG_FILES([etc/launchd/Makefile])
+AC_CONFIG_FILES([etc/logrotate.d/couchdb.tpl])
+AC_CONFIG_FILES([etc/logrotate.d/Makefile])
+AC_CONFIG_FILES([etc/Makefile])
+AC_CONFIG_FILES([share/Makefile])
+AC_CONFIG_FILES([src/couch_inets/Makefile])
+AC_CONFIG_FILES([src/couchdb/couch.app.tpl])
+AC_CONFIG_FILES([src/couchdb/Makefile])
+AC_CONFIG_FILES([src/Makefile])
+AC_CONFIG_FILES([var/Makefile])
+
+AC_OUTPUT
+
+echo
+echo "You have configured Apache CouchDB. Time to relax."
+echo
+echo "Run \`make && make install' to install."
diff --git a/etc/Makefile.am b/etc/Makefile.am
new file mode 100644
index 00000000..5ee51393
--- /dev/null
+++ b/etc/Makefile.am
@@ -0,0 +1,128 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+datarootdir = @prefix@/share
+
+couchprivlibdir = $(erlanglibdir)/couch-$(version)/priv/lib
+
+SUBDIRS = default init launchd logrotate.d
+
+default_sysconf_data = logrotate.d/couchdb.dist
+
+pkgconf_DATA = couch.ini.dist couch_httpd.conf.dist
+
+nobase_dist_pkgconf_DATA = conf/mime.types
+
+if INIT
+init_sysconf_data = default/couchdb.dist
+init_DATA = init/couchdb.dist
+endif
+
+if LAUNCHD
+launchd_DATA = launchd/org.apache.couchdb.plist.dist
+endif
+
+nobase_sysconf_DATA = $(default_sysconf_data) $(init_sysconf_data)
+
+CLEANFILES = $(init_DATA) $(nobase_sysconf_DATA) $(launchd_DATA) $(pkgconf_DATA)
+
+transform = @program_transform_name@
+couchdb_command_name = `echo couchdb | sed '$(transform)'`
+couchjs_command_name = `echo couchjs | sed '$(transform)'`
+
+couch.ini.dist: couch.ini.tpl
+ sed -e "s|%bindir%|@bindir@|g" \
+ -e "s|%pkgconfdir%|@pkgconfdir@|g" \
+ -e "s|%pkgdatadir%|@pkgdatadir@|g" \
+ -e "s|%pkgstatelibdir%|@pkgstatelibdir@|g" \
+ -e "s|%pkgstatelogdir%|@pkgstatelogdir@|g" \
+ -e "s|%couchprivlibdir%|$(couchprivlibdir)|g" \
+ -e "s|%couchjs_command_name%|$(couchjs_command_name)|g" \
+ < $< > $@
+
+couch_httpd.conf.dist: couch_httpd.conf.tpl
+ sed -e "s|%pkgconfdir%|@pkgconfdir@|g" \
+ -e "s|%pkgdatadir%|@pkgdatadir@|g" \
+ -e "s|%pkgstatelogdir%|@pkgstatelogdir@|g" \
+ < $< > $@
+
+default/couchdb.dist: default/couchdb.tpl
+ if test "$(mkdir_p)"; then \
+ $(mkdir_p) default; \
+ else \
+ if test ! -d default; then \
+ mkdir default; \
+ fi \
+ fi
+ sed -e "s|%configure_input%|$@. Generated from $< by configure.|" \
+ -e "s|%pkgconfdir%|@pkgconfdir@|g" \
+ -e "s|%sysconfdir%|@sysconfdir@|" \
+ -e "s|%localstatedir%|@localstatedir@|" \
+ < $< > $@
+
+init/couchdb.dist: init/couchdb.tpl
+ if test "$(mkdir_p)"; then \
+ $(mkdir_p) init; \
+ else \
+ if test ! -d init; then \
+ mkdir init; \
+ fi \
+ fi
+ sed -e "s|%configure_input%|$@. Generated from $< by configure.|" \
+ -e "s|%bindir%|@bindir@|" \
+ -e "s|%sysconfdir%|@sysconfdir@|" \
+ -e "s|%erlangbindir%|@erlangbindir@|" \
+ -e "s|%couchdb_command_name%|$(couchdb_command_name)|" \
+ < $< > $@
+
+logrotate.d/couchdb.dist: logrotate.d/couchdb.tpl
+ sed -e "s|%pkgstatelogdir%|@pkgstatelogdir@|g" < $< > $@
+
+launchd/org.apache.couchdb.plist.dist: launchd/org.apache.couchdb.plist.tpl
+ if test "$(mkdir_p)"; then \
+ $(mkdir_p) launchd; \
+ else \
+ if test ! -d launchd; then \
+ mkdir launchd; \
+ fi \
+ fi
+ sed -e "s|%configure_input%|$@. Generated from $< by configure.|" \
+ -e "s|%bindir%|@bindir@|" \
+ -e "s|%couchdb_command_name%|$(couchdb_command_name)|" \
+ < $< > $@
+
+install-data-hook:
+ if test $(init_DATA); then \
+ chmod 755 "$(DESTDIR)$(initdir)/couchdb"; \
+ mv -i "$(DESTDIR)$(sysconfdir)/default/couchdb.dist" \
+ "$(DESTDIR)$(sysconfdir)/default/couchdb"; \
+ mv -i "$(DESTDIR)$(initdir)/couchdb.dist" \
+ "$(DESTDIR)$(initdir)/couchdb"; \
+ fi
+ if test $(launchd_DATA); then \
+ mv -i "$(DESTDIR)$(launchddir)/org.apache.couchdb.plist.dist" \
+ "$(DESTDIR)$(launchddir)/org.apache.couchdb.plist"; \
+ fi
+ mv -i "$(DESTDIR)/$(sysconfdir)/logrotate.d/couchdb.dist" \
+ "$(DESTDIR)/$(sysconfdir)/logrotate.d/couchdb"
+ mv -i "$(DESTDIR)/$(pkgconfdir)/couch.ini.dist" \
+ "$(DESTDIR)/$(pkgconfdir)/couch.ini"
+ mv -i "$(DESTDIR)/$(pkgconfdir)/couch_httpd.conf.dist" \
+ "$(DESTDIR)/$(pkgconfdir)/couch_httpd.conf"
+
+uninstall-hook:
+ rm -f $(DESTDIR)$(initdir)/couchdb
+ rm -f $(DESTDIR)$(launchddir)/org.apache.couchdb.plist
+ rm -f $(DESTDIR)$(sysconfdir)/default/couchdb
+ rm -f $(DESTDIR)$(sysconfdir)/logrotate.d/couchdb
+ rm -f $(DESTDIR)/$(pkgconfdir)/couch.ini
+ rm -f $(DESTDIR)/$(pkgconfdir)/couch_httpd.conf
diff --git a/etc/conf/mime.types b/etc/conf/mime.types
new file mode 100644
index 00000000..9a08ba23
--- /dev/null
+++ b/etc/conf/mime.types
@@ -0,0 +1,462 @@
+# This is a comment. I love comments.
+
+# MIME type Extension
+application/EDI-Consent
+application/EDI-X12
+application/EDIFACT
+application/activemessage
+application/andrew-inset ez
+application/applefile
+application/atomicmail
+application/batch-SMTP
+application/beep+xml
+application/cals-1840
+application/commonground
+application/cybercash
+application/dca-rft
+application/dec-dx
+application/dvcs
+application/eshop
+application/http
+application/hyperstudio
+application/iges
+application/index
+application/index.cmd
+application/index.obj
+application/index.response
+application/index.vnd
+application/iotp
+application/ipp
+application/isup
+application/font-tdpfr
+application/mac-binhex40 hqx
+application/mac-compactpro cpt
+application/macwriteii
+application/marc
+application/mathematica
+application/mathematica-old
+application/msword doc
+application/news-message-id
+application/news-transmission
+application/ocsp-request
+application/ocsp-response
+application/octet-stream bin dms lha lzh exe class so dll
+application/oda oda
+application/parityfec
+application/pdf pdf
+application/pgp-encrypted
+application/pgp-keys
+application/pgp-signature
+application/pkcs10
+application/pkcs7-mime
+application/pkcs7-signature
+application/pkix-cert
+application/pkix-crl
+application/pkixcmp
+application/postscript ai eps ps
+application/prs.alvestrand.titrax-sheet
+application/prs.cww
+application/prs.nprend
+application/qsig
+application/remote-printing
+application/riscos
+application/rtf
+application/sdp
+application/set-payment
+application/set-payment-initiation
+application/set-registration
+application/set-registration-initiation
+application/sgml
+application/sgml-open-catalog
+application/sieve
+application/slate
+application/smil smi smil
+application/timestamp-query
+application/timestamp-reply
+application/vemmi
+application/vnd.3M.Post-it-Notes
+application/vnd.FloGraphIt
+application/vnd.accpac.simply.aso
+application/vnd.accpac.simply.imp
+application/vnd.acucobol
+application/vnd.aether.imp
+application/vnd.anser-web-certificate-issue-initiation
+application/vnd.anser-web-funds-transfer-initiation
+application/vnd.audiograph
+application/vnd.businessobjects
+application/vnd.bmi
+application/vnd.canon-cpdl
+application/vnd.canon-lips
+application/vnd.claymore
+application/vnd.commerce-battelle
+application/vnd.commonspace
+application/vnd.comsocaller
+application/vnd.contact.cmsg
+application/vnd.cosmocaller
+application/vnd.cups-postscript
+application/vnd.cups-raster
+application/vnd.cups-raw
+application/vnd.ctc-posml
+application/vnd.cybank
+application/vnd.dna
+application/vnd.dpgraph
+application/vnd.dxr
+application/vnd.ecdis-update
+application/vnd.ecowin.chart
+application/vnd.ecowin.filerequest
+application/vnd.ecowin.fileupdate
+application/vnd.ecowin.series
+application/vnd.ecowin.seriesrequest
+application/vnd.ecowin.seriesupdate
+application/vnd.enliven
+application/vnd.epson.esf
+application/vnd.epson.msf
+application/vnd.epson.quickanime
+application/vnd.epson.salt
+application/vnd.epson.ssf
+application/vnd.ericsson.quickcall
+application/vnd.eudora.data
+application/vnd.fdf
+application/vnd.ffsns
+application/vnd.framemaker
+application/vnd.fsc.weblaunch
+application/vnd.fujitsu.oasys
+application/vnd.fujitsu.oasys2
+application/vnd.fujitsu.oasys3
+application/vnd.fujitsu.oasysgp
+application/vnd.fujitsu.oasysprs
+application/vnd.fujixerox.ddd
+application/vnd.fujixerox.docuworks
+application/vnd.fujixerox.docuworks.binder
+application/vnd.fut-misnet
+application/vnd.grafeq
+application/vnd.groove-account
+application/vnd.groove-identity-message
+application/vnd.groove-injector
+application/vnd.groove-tool-message
+application/vnd.groove-tool-template
+application/vnd.groove-vcard
+application/vnd.hhe.lesson-player
+application/vnd.hp-HPGL
+application/vnd.hp-PCL
+application/vnd.hp-PCLXL
+application/vnd.hp-hpid
+application/vnd.hp-hps
+application/vnd.httphone
+application/vnd.hzn-3d-crossword
+application/vnd.ibm.afplinedata
+application/vnd.ibm.MiniPay
+application/vnd.ibm.modcap
+application/vnd.informix-visionary
+application/vnd.intercon.formnet
+application/vnd.intertrust.digibox
+application/vnd.intertrust.nncp
+application/vnd.intu.qbo
+application/vnd.intu.qfx
+application/vnd.irepository.package+xml
+application/vnd.is-xpr
+application/vnd.japannet-directory-service
+application/vnd.japannet-jpnstore-wakeup
+application/vnd.japannet-payment-wakeup
+application/vnd.japannet-registration
+application/vnd.japannet-registration-wakeup
+application/vnd.japannet-setstore-wakeup
+application/vnd.japannet-verification
+application/vnd.japannet-verification-wakeup
+application/vnd.koan
+application/vnd.lotus-1-2-3
+application/vnd.lotus-approach
+application/vnd.lotus-freelance
+application/vnd.lotus-notes
+application/vnd.lotus-organizer
+application/vnd.lotus-screencam
+application/vnd.lotus-wordpro
+application/vnd.mcd
+application/vnd.mediastation.cdkey
+application/vnd.meridian-slingshot
+application/vnd.mif mif
+application/vnd.minisoft-hp3000-save
+application/vnd.mitsubishi.misty-guard.trustweb
+application/vnd.mobius.daf
+application/vnd.mobius.dis
+application/vnd.mobius.msl
+application/vnd.mobius.plc
+application/vnd.mobius.txf
+application/vnd.motorola.flexsuite
+application/vnd.motorola.flexsuite.adsi
+application/vnd.motorola.flexsuite.fis
+application/vnd.motorola.flexsuite.gotap
+application/vnd.motorola.flexsuite.kmr
+application/vnd.motorola.flexsuite.ttc
+application/vnd.motorola.flexsuite.wem
+application/vnd.mozilla.xul+xml
+application/vnd.ms-artgalry
+application/vnd.ms-asf
+application/vnd.ms-excel xls
+application/vnd.ms-lrm
+application/vnd.ms-powerpoint ppt
+application/vnd.ms-project
+application/vnd.ms-tnef
+application/vnd.ms-works
+application/vnd.mseq
+application/vnd.msign
+application/vnd.music-niff
+application/vnd.musician
+application/vnd.netfpx
+application/vnd.noblenet-directory
+application/vnd.noblenet-sealer
+application/vnd.noblenet-web
+application/vnd.novadigm.EDM
+application/vnd.novadigm.EDX
+application/vnd.novadigm.EXT
+application/vnd.osa.netdeploy
+application/vnd.palm
+application/vnd.pg.format
+application/vnd.pg.osasli
+application/vnd.powerbuilder6
+application/vnd.powerbuilder6-s
+application/vnd.powerbuilder7
+application/vnd.powerbuilder7-s
+application/vnd.powerbuilder75
+application/vnd.powerbuilder75-s
+application/vnd.previewsystems.box
+application/vnd.publishare-delta-tree
+application/vnd.pvi.ptid1
+application/vnd.pwg-xhtml-print+xml
+application/vnd.rapid
+application/vnd.s3sms
+application/vnd.seemail
+application/vnd.shana.informed.formdata
+application/vnd.shana.informed.formtemplate
+application/vnd.shana.informed.interchange
+application/vnd.shana.informed.package
+application/vnd.sss-cod
+application/vnd.sss-dtf
+application/vnd.sss-ntf
+application/vnd.street-stream
+application/vnd.svd
+application/vnd.swiftview-ics
+application/vnd.triscape.mxs
+application/vnd.trueapp
+application/vnd.truedoc
+application/vnd.tve-trigger
+application/vnd.ufdl
+application/vnd.uplanet.alert
+application/vnd.uplanet.alert-wbxml
+application/vnd.uplanet.bearer-choice-wbxml
+application/vnd.uplanet.bearer-choice
+application/vnd.uplanet.cacheop
+application/vnd.uplanet.cacheop-wbxml
+application/vnd.uplanet.channel
+application/vnd.uplanet.channel-wbxml
+application/vnd.uplanet.list
+application/vnd.uplanet.list-wbxml
+application/vnd.uplanet.listcmd
+application/vnd.uplanet.listcmd-wbxml
+application/vnd.uplanet.signal
+application/vnd.vcx
+application/vnd.vectorworks
+application/vnd.vidsoft.vidconference
+application/vnd.visio
+application/vnd.vividence.scriptfile
+application/vnd.wap.sic
+application/vnd.wap.slc
+application/vnd.wap.wbxml wbxml
+application/vnd.wap.wmlc wmlc
+application/vnd.wap.wmlscriptc wmlsc
+application/vnd.webturbo
+application/vnd.wrq-hp3000-labelled
+application/vnd.wt.stf
+application/vnd.xara
+application/vnd.xfdl
+application/vnd.yellowriver-custom-menu
+application/whoispp-query
+application/whoispp-response
+application/wita
+application/wordperfect5.1
+application/x-bcpio bcpio
+application/x-cdlink vcd
+application/x-chess-pgn pgn
+application/x-compress
+application/x-cpio cpio
+application/x-csh csh
+application/x-director dcr dir dxr
+application/x-dvi dvi
+application/x-futuresplash spl
+application/x-gtar gtar
+application/x-gzip
+application/x-hdf hdf
+application/x-javascript js
+application/x-koan skp skd skt skm
+application/x-latex latex
+application/x-netcdf nc cdf
+application/x-sh sh
+application/x-shar shar
+application/x-shockwave-flash swf
+application/x-stuffit sit
+application/x-sv4cpio sv4cpio
+application/x-sv4crc sv4crc
+application/x-tar tar
+application/x-tcl tcl
+application/x-tex tex
+application/x-texinfo texinfo texi
+application/x-troff t tr roff
+application/x-troff-man man
+application/x-troff-me me
+application/x-troff-ms ms
+application/x-ustar ustar
+application/x-wais-source src
+application/x400-bp
+application/xml
+application/xml-dtd
+application/xml-external-parsed-entity
+application/zip zip
+audio/32kadpcm
+audio/basic au snd
+audio/g.722.1
+audio/l16
+audio/midi mid midi kar
+audio/mp4a-latm
+audio/mpa-robust
+audio/mpeg mpga mp2 mp3
+audio/parityfec
+audio/prs.sid
+audio/telephone-event
+audio/tone
+audio/vnd.cisco.nse
+audio/vnd.cns.anp1
+audio/vnd.cns.inf1
+audio/vnd.digital-winds
+audio/vnd.everad.plj
+audio/vnd.lucent.voice
+audio/vnd.nortel.vbk
+audio/vnd.nuera.ecelp4800
+audio/vnd.nuera.ecelp7470
+audio/vnd.nuera.ecelp9600
+audio/vnd.octel.sbc
+audio/vnd.qcelp
+audio/vnd.rhetorex.32kadpcm
+audio/vnd.vmx.cvsd
+audio/x-aiff aif aiff aifc
+audio/x-mpegurl m3u
+audio/x-pn-realaudio ram rm
+audio/x-pn-realaudio-plugin rpm
+audio/x-realaudio ra
+audio/x-wav wav
+chemical/x-pdb pdb
+chemical/x-xyz xyz
+image/bmp bmp
+image/cgm
+image/g3fax
+image/gif gif
+image/ief ief
+image/jpeg jpeg jpg jpe
+image/naplps
+image/png png
+image/prs.btif
+image/prs.pti
+image/tiff tiff tif
+image/vnd.cns.inf2
+image/vnd.dwg
+image/vnd.dxf
+image/vnd.fastbidsheet
+image/vnd.fpx
+image/vnd.fst
+image/vnd.fujixerox.edmics-mmr
+image/vnd.fujixerox.edmics-rlc
+image/vnd.mix
+image/vnd.net-fpx
+image/vnd.svf
+image/vnd.wap.wbmp wbmp
+image/vnd.xiff
+image/x-cmu-raster ras
+image/x-portable-anymap pnm
+image/x-portable-bitmap pbm
+image/x-portable-graymap pgm
+image/x-portable-pixmap ppm
+image/x-rgb rgb
+image/x-xbitmap xbm
+image/x-xpixmap xpm
+image/x-xwindowdump xwd
+message/delivery-status
+message/disposition-notification
+message/external-body
+message/http
+message/news
+message/partial
+message/rfc822
+message/s-http
+model/iges igs iges
+model/mesh msh mesh silo
+model/vnd.dwf
+model/vnd.flatland.3dml
+model/vnd.gdl
+model/vnd.gs-gdl
+model/vnd.gtw
+model/vnd.mts
+model/vnd.vtu
+model/vrml wrl vrml
+multipart/alternative
+multipart/appledouble
+multipart/byteranges
+multipart/digest
+multipart/encrypted
+multipart/form-data
+multipart/header-set
+multipart/mixed
+multipart/parallel
+multipart/related
+multipart/report
+multipart/signed
+multipart/voice-message
+text/calendar
+text/css css
+text/directory
+text/enriched
+text/html html htm
+text/parityfec
+text/plain asc txt
+text/prs.lines.tag
+text/rfc822-headers
+text/richtext rtx
+text/rtf rtf
+text/sgml sgml sgm
+text/tab-separated-values tsv
+text/t140
+text/uri-list
+text/vnd.DMClientScript
+text/vnd.IPTC.NITF
+text/vnd.IPTC.NewsML
+text/vnd.abc
+text/vnd.curl
+text/vnd.flatland.3dml
+text/vnd.fly
+text/vnd.fmi.flexstor
+text/vnd.in3d.3dml
+text/vnd.in3d.spot
+text/vnd.latex-z
+text/vnd.motorola.reflex
+text/vnd.ms-mediapackage
+text/vnd.wap.si
+text/vnd.wap.sl
+text/vnd.wap.wml wml
+text/vnd.wap.wmlscript wmls
+text/x-setext etx
+text/x-server-parsed-html shtml
+text/xml xml xsl
+text/xml-external-parsed-entity
+video/mp4v-es
+video/mpeg mpeg mpg mpe
+video/parityfec
+video/pointer
+video/quicktime qt mov
+video/vnd.fvt
+video/vnd.motorola.video
+video/vnd.motorola.videop
+video/vnd.mpegurl mxu
+video/vnd.mts
+video/vnd.nokia.interleaved-multimedia
+video/vnd.vivo
+video/x-msvideo avi
+video/x-sgi-movie movie
+x-conference/x-cooltalk ice
diff --git a/etc/couch.ini.tpl.in b/etc/couch.ini.tpl.in
new file mode 100644
index 00000000..81b47f7f
--- /dev/null
+++ b/etc/couch.ini.tpl.in
@@ -0,0 +1,19 @@
+; @configure_input@
+
+[Couch]
+
+ConsoleStartupMsg=Apache CouchDB is starting.
+
+DbRootDir=%pkgstatelibdir%
+
+LogFile=%pkgstatelogdir%/couch.log
+
+HttpConfigFile=%pkgconfdir%/couch_httpd.conf
+
+UtilDriverDir=%couchprivlibdir%
+
+LogLevel=info
+
+[Couch Query Servers]
+
+text/javascript=%bindir%/%couchjs_command_name% %pkgdatadir%/server/main.js
diff --git a/etc/couch_httpd.conf.tpl.in b/etc/couch_httpd.conf.tpl.in
new file mode 100644
index 00000000..e4d27493
--- /dev/null
+++ b/etc/couch_httpd.conf.tpl.in
@@ -0,0 +1,11 @@
+# @configure_input@
+
+Port 5984
+#BindAddress 127.0.0.1
+ServerName localhost
+SocketType ip_comm
+Modules mod_couch mod_get mod_log
+ServerRoot %pkgconfdir%
+DocumentRoot %pkgdatadir%/www
+ErrorLog %pkgstatelogdir%/http_error.log
+TransferLog %pkgstatelogdir%/http_access.log
diff --git a/etc/default/Makefile.am b/etc/default/Makefile.am
new file mode 100644
index 00000000..4953462c
--- /dev/null
+++ b/etc/default/Makefile.am
@@ -0,0 +1,13 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+## This file intentionally left blank.
diff --git a/etc/default/couchdb.tpl.in b/etc/default/couchdb.tpl.in
new file mode 100644
index 00000000..fadef1a7
--- /dev/null
+++ b/etc/default/couchdb.tpl.in
@@ -0,0 +1,10 @@
+# @configure_input@
+
+# Sourced by init script for configuration.
+
+COUCHDB_USER=couchdb
+COUCHDB_INI_FILE=%pkgconfdir%/couch.ini
+COUCHDB_PID_FILE=%localstatedir%/run/couchdb.pid
+COUCHDB_STDOUT_FILE=/dev/null
+COUCHDB_STDERR_FILE=/dev/null
+COUCHDB_RESPAWN_TIMEOUT=5
diff --git a/etc/init/Makefile.am b/etc/init/Makefile.am
new file mode 100644
index 00000000..4953462c
--- /dev/null
+++ b/etc/init/Makefile.am
@@ -0,0 +1,13 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+## This file intentionally left blank.
diff --git a/etc/init/couchdb.tpl.in b/etc/init/couchdb.tpl.in
new file mode 100755
index 00000000..fbeafa08
--- /dev/null
+++ b/etc/init/couchdb.tpl.in
@@ -0,0 +1,174 @@
+#!/bin/sh -e
+
+# @configure_input@
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+### BEGIN INIT INFO
+# Provides: couchdb
+# Required-Start: $local_fs $remote_fs
+# Required-Stop: $local_fs $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Apache CouchDB init script
+# Description: Apache CouchDB init script for the database server.
+### END INIT INFO
+
+SCRIPT_OK=0
+SCRIPT_ERROR=1
+
+DESCRIPTION="database server"
+NAME=couchdb
+SCRIPT_NAME=$(basename $0)
+COUCHDB=%bindir%/%couchdb_command_name%
+CONFIGURATION_FILE=%sysconfdir%/default/couchdb
+LSB_LIBRARY=/lib/lsb/init-functions
+
+if test ! -x $COUCHDB; then
+ exit $SCRIPT_ERROR
+fi
+
+if test -r $CONFIGURATION_FILE; then
+ . $CONFIGURATION_FILE
+fi
+
+log_daemon_msg () {
+ # Dummy function to be replaced by LSB library.
+
+ echo $@
+}
+
+log_end_msg () {
+ # Dummy function to be replaced by LSB library.
+
+ if test "$1" != "0"; then
+ echo "Error with $DESCRIPTION: $NAME"
+ fi
+ return $1
+}
+
+if test -r $LSB_LIBRARY; then
+ . $LSB_LIBRARY
+fi
+
+start_couchdb () {
+ # Start Apache CouchDB as a background process.
+
+ command="$COUCHDB -b"
+ if test -n "$COUCHDB_INI_FILE"; then
+ command="$command -c $COUCHDB_INI_FILE"
+ fi
+ if test -n "$COUCHDB_PID_FILE"; then
+ command="$command -p $COUCHDB_PID_FILE"
+ fi
+ if test -n "$COUCHDB_STDOUT_FILE"; then
+ command="$command -o $COUCHDB_STDOUT_FILE"
+ fi
+ if test -n "$COUCHDB_STDERR_FILE"; then
+ command="$command -e $COUCHDB_STDERR_FILE"
+ fi
+ if test -n "$COUCHDB_RESPAWN_TIMEOUT"; then
+ command="$command -r $COUCHDB_RESPAWN_TIMEOUT"
+ fi
+ if test -n "$COUCHDB_USER"; then
+ if test -n "$COUCHDB_PID_FILE"; then
+ touch $COUCHDB_PID_FILE
+ chown $COUCHDB_USER $COUCHDB_PID_FILE
+ fi
+ if su $COUCHDB_USER -c "$command" > /dev/null; then
+ return $SCRIPT_OK
+ else
+ return $SCRIPT_ERROR
+ fi
+ else
+ if $command > /dev/null; then
+ return $SCRIPT_OK
+ else
+ return $SCRIPT_ERROR
+ fi
+ fi
+}
+
+stop_couchdb () {
+ # Stop the running Apache CouchDB process.
+
+ command="$COUCHDB -d"
+ if test -n "$COUCHDB_PID_FILE"; then
+ command="$command -p $COUCHDB_PID_FILE"
+ fi
+ if test -n "$COUCHDB_USER"; then
+ if su $COUCHDB_USER -c "$command" > /dev/null; then
+ return $SCRIPT_OK
+ else
+ return $SCRIPT_ERROR
+ fi
+ else
+ if $command > /dev/null; then
+ return $SCRIPT_OK
+ else
+ return $SCRIPT_ERROR
+ fi
+ fi
+}
+
+display_status () {
+ # Display the status of the running Apache CouchDB process.
+
+ $COUCHDB -s
+}
+
+parse_script_option_list () {
+ # Parse arguments passed to the script and take appropriate action.
+
+ case "$1" in
+ start)
+ log_daemon_msg "Starting $DESCRIPTION" $NAME
+ if start_couchdb; then
+ log_end_msg $SCRIPT_OK
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESCRIPTION" $NAME
+ if stop_couchdb; then
+ log_end_msg $SCRIPT_OK
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ ;;
+ restart|force-reload)
+ log_daemon_msg "Restarting $DESCRIPTION" $NAME
+ if stop_couchdb; then
+ if start_couchdb; then
+ log_end_msg $SCRIPT_OK
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ ;;
+ status)
+ display_status
+ ;;
+ *)
+ cat << EOF >&2
+Usage: $SCRIPT_NAME {start|stop|restart|force-reload|status}
+EOF
+ exit $SCRIPT_ERROR
+ ;;
+ esac
+}
+
+parse_script_option_list $@
diff --git a/etc/launchd/Makefile.am b/etc/launchd/Makefile.am
new file mode 100644
index 00000000..4953462c
--- /dev/null
+++ b/etc/launchd/Makefile.am
@@ -0,0 +1,13 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+## This file intentionally left blank.
diff --git a/etc/launchd/org.apache.couchdb.plist.tpl.in b/etc/launchd/org.apache.couchdb.plist.tpl.in
new file mode 100644
index 00000000..ac0c89fd
--- /dev/null
+++ b/etc/launchd/org.apache.couchdb.plist.tpl.in
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN"
+ "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<!-- @configure_input@ -->
+<plist version="1.0">
+ <dict>
+ <key>Label</key>
+ <string>org.apache.couchdb</string>
+ <key>EnvironmentVariables</key>
+ <dict>
+ <key>HOME</key>
+ <string>~</string>
+ <key>DYLD_LIBRARY_PATH</key>
+ <string>/opt/local/lib:$DYLD_LIBRARY_PATH</string>
+ </dict>
+ <key>ProgramArguments</key>
+ <array>
+ <string>%bindir%/%couchdb_command_name%</string>
+ </array>
+ <key>UserName</key>
+ <string>couchdb</string>
+ <key>RunAtLoad</key>
+ <true/>
+ <key>KeepAlive</key>
+ <true/>
+ <key>OnDemand</key>
+ <false/>
+ </dict>
+</plist>
diff --git a/etc/logrotate.d/Makefile.am b/etc/logrotate.d/Makefile.am
new file mode 100644
index 00000000..4953462c
--- /dev/null
+++ b/etc/logrotate.d/Makefile.am
@@ -0,0 +1,13 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+## This file intentionally left blank.
diff --git a/etc/logrotate.d/couchdb.tpl.in b/etc/logrotate.d/couchdb.tpl.in
new file mode 100644
index 00000000..9bef5b2f
--- /dev/null
+++ b/etc/logrotate.d/couchdb.tpl.in
@@ -0,0 +1,9 @@
+%pkgstatelogdir%/*.log {
+ weekly
+ rotate 10
+ copytruncate
+ delaycompress
+ compress
+ notifempty
+ missingok
+}
diff --git a/share/Makefile.am b/share/Makefile.am
new file mode 100644
index 00000000..afd37f14
--- /dev/null
+++ b/share/Makefile.am
@@ -0,0 +1,56 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+datarootdir = @prefix@/share
+
+nobase_dist_pkgdata_DATA = \
+ server/main.js \
+ www/browse/database.html \
+ www/browse/document.html \
+ www/browse/index.html \
+ www/couch_tests.html \
+ www/favicon.ico \
+ www/image/add.gif \
+ www/image/apply.gif \
+ www/image/bg.png \
+ www/image/cancel.gif \
+ www/image/delete-mini.gif \
+ www/image/delete.gif \
+ www/image/hgrad.gif \
+ www/image/load.gif \
+ www/image/logo.png \
+ www/image/order-asc.gif \
+ www/image/order-desc.gif \
+ www/image/path.gif \
+ www/image/run-mini.gif \
+ www/image/run.gif \
+ www/image/running.gif \
+ www/image/save.gif \
+ www/image/test_failure.gif \
+ www/image/test_success.gif \
+ www/image/thead.gif \
+ www/image/thead-key.gif \
+ www/image/toggle-collapse.gif \
+ www/image/toggle-expand.gif \
+ www/image/twisty.gif \
+ www/index.html \
+ www/replicator.html \
+ www/script/browse.js \
+ www/script/couch.js \
+ www/script/couch_tests.js \
+ www/script/jquery.js \
+ www/script/jquery.cookies.js \
+ www/script/json2.js \
+ www/script/pprint.js \
+ www/script/shell.js \
+ www/shell.html \
+ www/style/layout.css
diff --git a/share/server/main.js b/share/server/main.js
new file mode 100644
index 00000000..91e13742
--- /dev/null
+++ b/share/server/main.js
@@ -0,0 +1,165 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var cmd;
+var map_funs = []; // The map functions to compute against documents
+var map_results = [];
+
+try {
+ var sandbox = evalcx('');
+ sandbox.map = function(key, value) {
+ map_results.push([key, value]);
+ }
+} catch (e) {
+ // fallback for older versions of spidermonkey that don't have evalcx
+ var sandbox = null;
+ map = function(key, value) {
+ map_results.push([key, value]);
+ }
+}
+
+// Commands are in the form of json arrays:
+// ["commandname",..optional args...]\n
+//
+// Responses are json values followed by a new line ("\n")
+
+while (cmd = eval(readline())) {
+ switch (cmd[0]) {
+ case "reset":
+ // clear the map_functions and run gc
+ map_funs = [];
+ gc();
+ print("true"); // indicates success
+ break;
+ case "add_fun":
+ // The second arg is a string that will compile to a function.
+ // and then we add it to map_functions array
+ try {
+ var functionObject = sandbox ? evalcx(cmd[1], sandbox) : eval(cmd[1]);
+ } catch (err) {
+ print(toJSON({error: {id: "map_compilation_error",
+ reason: err.toString() + " (" + toJSON(cmd[1]) + ")"}}));
+ break;
+ }
+ if (typeof(functionObject) == "function") {
+ map_funs.push(functionObject);
+ print("true");
+ } else {
+ print(toJSON({error: "map_compilation_error",
+ reason: "expression does not eval to a function. (" + cmd[1] + ")"}));
+ }
+ break;
+ case "map_doc":
+ // The second arg is a document. We compute all the map functions against
+ // it.
+ //
+ // Each function can output multiple keys value, pairs for each document
+ //
+ // Example output of map_doc after three functions set by add_fun cmds:
+ // [
+ // [["Key","Value"]], <- fun 1 returned 1 key value
+ // [], <- fun 2 returned 0 key values
+ // [["Key1","Value1"],["Key2","Value2"]],<- fun 3 returned 2 key values
+ // ]
+ //
+ var doc = cmd[1];
+ seal(doc); // seal to prevent map functions from changing doc
+ var buf = [];
+ for (var i = 0; i < map_funs.length; i++) {
+ map_results = [];
+ try {
+ map_funs[i](doc);
+ buf.push(map_results.filter(function(pair) {
+ return pair[0] !== undefined && pair[1] !== undefined;
+ }));
+ } catch (err) {
+ if (err == "fatal_error") {
+ // Only if it's a "fatal_error" do we exit. What's a fatal error?
+ // That's for the query to decide.
+ //
+ // This will make it possible for queries to completely error out,
+ // by catching their own local exception and rethrowing a
+ // fatal_error. But by default if they don't do error handling we
+ // just eat the exception and carry on.
+ print(toJSON({error: "map_runtime_error",
+ reason: "function raised fatal exception"}));
+ quit();
+ }
+ print(toJSON({log: "function raised exception (" + err + ")"}));
+ buf.push([]);
+ }
+ }
+ print(toJSON(buf));
+ break;
+ default:
+ print(toJSON({error: "query_server_error",
+ reason: "unknown command '" + cmd[0] + "'"}));
+ quit();
+ }
+}
+
+function toJSON(val) {
+ if (typeof(val) == "undefined") {
+ throw new TypeError("Cannot encode undefined value as JSON");
+ }
+ var subs = {'\b': '\\b', '\t': '\\t', '\n': '\\n', '\f': '\\f',
+ '\r': '\\r', '"' : '\\"', '\\': '\\\\'};
+ if (typeof(val) == "xml") { // E4X support
+ val = val.toXMLString();
+ }
+ return {
+ "Array": function(v) {
+ var buf = [];
+ for (var i = 0; i < v.length; i++) {
+ buf.push(toJSON(v[i]));
+ }
+ return "[" + buf.join(",") + "]";
+ },
+ "Boolean": function(v) {
+ return v.toString();
+ },
+ "Date": function(v) {
+ var f = function(n) { return n < 10 ? '0' + n : n }
+ return '"' + v.getUTCFullYear() + '-' +
+ f(v.getUTCMonth() + 1) + '-' +
+ f(v.getUTCDate()) + 'T' +
+ f(v.getUTCHours()) + ':' +
+ f(v.getUTCMinutes()) + ':' +
+ f(v.getUTCSeconds()) + 'Z"';
+ },
+ "Number": function(v) {
+ return isFinite(v) ? v.toString() : "null";
+ },
+ "Object": function(v) {
+ if (v === null) return "null";
+ var buf = [];
+ for (var k in v) {
+ if (!v.hasOwnProperty(k) || typeof(k) !== "string" || v[k] === undefined) {
+ continue;
+ }
+ buf.push(toJSON(k, val) + ": " + toJSON(v[k]));
+ }
+ return "{" + buf.join(",") + "}";
+ },
+ "String": function(v) {
+ if (/["\\\x00-\x1f]/.test(v)) {
+ v = v.replace(/([\x00-\x1f\\"])/g, function(a, b) {
+ var c = subs[b];
+ if (c) return c;
+ c = b.charCodeAt();
+ return '\\u00' + Math.floor(c / 16).toString(16) + (c % 16).toString(16);
+ });
+ }
+ return '"' + v + '"';
+ }
+ }[val != null ? val.constructor.name : "Object"](val);
+}
diff --git a/share/www/browse/_create_database.html b/share/www/browse/_create_database.html
new file mode 100644
index 00000000..5c909a7a
--- /dev/null
+++ b/share/www/browse/_create_database.html
@@ -0,0 +1,33 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Create New Database</h2>
+ <fieldset>
+ <p class="help">
+ Please enter the name of the database. Note that only lowercase
+ characters (<tt>a-z</tt>), digits (<tt>0-9</tt>), or any of the
+ characters <tt>_</tt>, <tt>$</tt>, <tt>(</tt>, <tt>)</tt>, <tt>+</tt>,
+ <tt>-</tt>, and <tt>/</tt> are allowed.
+ </p>
+ <table summary=""><tbody><tr>
+ <th><label>Database Name:</label></th>
+ <td><input type="text" name="name" size="24"></td>
+ </tr></table>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Create</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/share/www/browse/_create_document.html b/share/www/browse/_create_document.html
new file mode 100644
index 00000000..fc9eef02
--- /dev/null
+++ b/share/www/browse/_create_document.html
@@ -0,0 +1,31 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Create New Document</h2>
+ <fieldset>
+ <p class="help">
+ Please enter a unique ID of the document, or leave the field empty to get
+ an auto-generated ID.
+ </p>
+ <table summary=""><tbody><tr>
+ <th><label>Document ID:</label></th>
+ <td><input type="text" name="docid" size="32"></td>
+ </tr></table>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Create</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/share/www/browse/_delete_database.html b/share/www/browse/_delete_database.html
new file mode 100644
index 00000000..16be8f30
--- /dev/null
+++ b/share/www/browse/_delete_database.html
@@ -0,0 +1,27 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Delete Database</h2>
+ <fieldset>
+ <p class="help">
+ Are you sure you want to delete this database? Note that this is an
+ <strong>irreversible operation</strong>!
+ </p>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Delete</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/share/www/browse/_delete_document.html b/share/www/browse/_delete_document.html
new file mode 100644
index 00000000..6b5497a4
--- /dev/null
+++ b/share/www/browse/_delete_document.html
@@ -0,0 +1,26 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post">
+ <h2>Delete Document</h2>
+ <fieldset>
+ <p class="help">
+ Are you sure you want to delete this document?
+ </p>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Delete</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/share/www/browse/_save_view_as.html b/share/www/browse/_save_view_as.html
new file mode 100644
index 00000000..a7f10785
--- /dev/null
+++ b/share/www/browse/_save_view_as.html
@@ -0,0 +1,35 @@
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<form action="" method="post" id="view-save-as" onload="initForm(this)">
+ <h2>Save View As&hellip;</h2>
+ <fieldset>
+ <p class="help">
+ You can save this function code as a permanent view in the database. Just
+ enter or select the design document and the name of the view below. Note
+ that if you choose an existing view, it will be overwritten!
+ </p>
+ <table summary=""><tbody><tr>
+ <th><label for="input_docid">Design Document:</label></th>
+ <td><tt>_design/</tt><input type="text" id="input_docid" name="docid" size="20"></td>
+ </tr><tr>
+ <th><label for="input_name">View Name:<label></th>
+ <td><input type="text" id="input_name" name="name" size="30"></td>
+ </tr></table>
+ </fieldset>
+ <div class="buttons">
+ <button type="submit">Save</button>
+ <button type="button" class="cancel">Cancel</button>
+ </div>
+</form>
diff --git a/share/www/browse/database.html b/share/www/browse/database.html
new file mode 100644
index 00000000..33041bd7
--- /dev/null
+++ b/share/www/browse/database.html
@@ -0,0 +1,151 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Browse Database</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="../style/layout.css" type="text/css">
+ <script src="../script/json2.js"></script>
+ <script src="../script/jquery.js"></script>
+ <script src="../script/jquery.cookies.js"></script>
+ <script src="../script/jquery.dialog.js"></script>
+ <script src="../script/jquery.resizer.js"></script>
+ <script src="../script/jquery.suggest.js"></script>
+ <script src="../script/couch.js"></script>
+ <script src="../script/browse.js"></script>
+ <script src="../script/pprint.js"></script>
+ <script>
+ var page = new CouchDatabasePage();
+ if (window != parent) {
+ parent.updateNavigation(location.pathname, location.search.split("/", 2)[0]);
+ }
+
+ $(document).ready(function() {
+ $("h1 strong").text(page.dbName);
+ $("#viewcode label").click(function() {
+ $("#viewcode").toggleClass("expanded");
+ });
+ $("#viewcode button.run").click(function() {
+ page.updateDocumentListing();
+ });
+ $("#viewcode button.revert").click(function() {
+ page.revertViewChanges();
+ });
+ $("#viewcode button.save").click(function() {
+ page.saveViewChanges();
+ });
+ $("#viewcode button.saveas").click(function() {
+ page.saveViewAs();
+ });
+ $("#viewcode textarea").resizable({grippie: $("#viewcode .bottom")});
+
+ // Restore preferences/state from cookies
+ var query = $.cookies.get(page.dbName + ".query");
+ if (query) $("#viewcode textarea").val(query);
+ var desc = $.cookies.get(page.dbName + ".desc");
+ if (desc) $("#documents thead th.key").addClass("desc");
+ var rowsPerPage = $.cookies.get(page.dbName + ".perpage");
+ if (rowsPerPage) $("#perpage").val(rowsPerPage);
+
+ page.populateViewsMenu();
+ page.populateViewEditor();
+ page.updateDocumentListing();
+
+ $("#switch select").change(function() {
+ var viewName = $(this).val();
+ if (!viewName) $.cookies.remove(page.dbName + ".view");
+ location.href = "?" + page.dbName + (viewName ? "/" + viewName : "");
+ });
+ $("#documents thead th.key").click(function() {
+ $(this).toggleClass("desc");
+ page.updateDocumentListing();
+ });
+ $("#perpage").change(function() {
+ page.updateDocumentListing();
+ $.cookies.set(page.dbName + ".perpage", this.value);
+ });
+ $("#toolbar button.add").click(page.addDocument);
+ $("#toolbar button.delete").click(page.deleteDatabase);
+ });
+ </script>
+ </head>
+
+ <body>
+ <h1>
+ <a href="index.html">Overview</a>
+ <strong>?</strong>
+ </h1>
+ <div id="wrap">
+ <div id="switch">
+ <label>Select view: <select>
+ <option value="">All documents</option>
+ <option value="_design_docs">Design documents</option>
+ <option value="_temp_view">Custom query…</option>
+ </select></label>
+ </div>
+ <ul id="toolbar">
+ <li><button class="add">Create Document …</button></li>
+ <li><button class="delete">Delete Database</button></li>
+ </ul>
+
+ <div id="viewcode" style="display: none">
+ <div class="top">
+ <a id="designdoc-link"></a>
+ <label for="viewcode_textarea">View Function</label>
+ </div>
+ <textarea id="viewcode_textarea" rows="5" cols="79" spellcheck="false" wrap="off">function(doc) {
+ map(null, doc);
+}</textarea>
+ <div class="bottom">
+ <button class="save" type="button" disabled>Save</button>
+ <button class="saveas" type="button">Save As…</button>
+ <button class="revert" type="button" disabled>Revert</button>
+ <button class="run" type="button">Run</button>
+ </div>
+ </div>
+
+ <table id="documents" class="listing" cellspacing="0">
+ <caption>Documents</caption>
+ <thead>
+ <tr>
+ <th class="key"><div>Key</div></th>
+ <th class="value">Value</th>
+ </tr>
+ </thead>
+ <tbody class="content">
+ </tbody>
+ <tbody class="footer">
+ <tr>
+ <td colspan="4">
+ <div id="paging">
+ <a class="prev">← Previous Page</a> |
+ <label>Rows per page: <select id="perpage">
+ <option selected>10</option>
+ <option>25</option>
+ <option>50</option>
+ <option>100</option>
+ </select></label> |
+ <a class="next">Next Page →</a>
+ </div>
+ <span></span>
+ </td>
+ </tr>
+ </tbody>
+ </table>
+
+ </div>
+ </body>
+</html>
diff --git a/share/www/browse/document.html b/share/www/browse/document.html
new file mode 100644
index 00000000..f7876539
--- /dev/null
+++ b/share/www/browse/document.html
@@ -0,0 +1,88 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>View Document</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="../style/layout.css" type="text/css">
+ <script src="../script/json2.js"></script>
+ <script src="../script/jquery.js"></script>
+ <script src="../script/jquery.cookies.js"></script>
+ <script src="../script/jquery.dialog.js"></script>
+ <script src="../script/jquery.resizer.js"></script>
+ <script src="../script/couch.js"></script>
+ <script src="../script/browse.js"></script>
+ <script src="../script/pprint.js"></script>
+ <script>
+ var page = new CouchDocumentPage();
+ if (window != parent) {
+ var dbLink = $("h1 a.dbname").get(0);
+ parent.updateNavigation(
+ location.pathname.replace(/document\.html/, "database.html"),
+ location.search.split("/")[0]
+ );
+ }
+
+ $(function() {
+ $("h1 a.dbname").text(page.dbName).attr("href", "database.html?" + page.dbName);
+ $("h1 strong").text(page.doc._id);
+ page.updateFieldListing();
+ $("#toolbar button.save").click(page.saveDocument);
+ $("#toolbar button.add").click(page.addField);
+ $("#toolbar button.delete").click(page.deleteDocument);
+ });
+ </script>
+ </head>
+
+ <body>
+ <h1>
+ <a href="index.html">Overview</a>
+ <a class="dbname" href="#">?</a>
+ <strong>?</strong>
+ </h1>
+ <div id="wrap">
+ <ul id="toolbar">
+ <li><button class="save">Save Document</button></li>
+ <li><button class="add">Add Field</button></li>
+ <li><button class="delete">Delete Document</button></li>
+ </ul>
+
+ <table id="fields" class="listing" cellspacing="0">
+ <col class="field"><col class="value">
+ <caption>Fields</caption>
+ <thead>
+ <tr>
+ <th>Field</th>
+ <th>Value</th>
+ </tr>
+ </thead>
+ <tbody class="content">
+ </tbody>
+ <tbody class="footer">
+ <tr>
+ <td colspan="2">
+ <div id="paging">
+ <a class="prev">← Previous Version</a> | <a class="next">Next Version →</a>
+ </div>
+ <span></span>
+ </td>
+ </tr>
+ </tbody>
+ </table>
+
+ </div>
+ </body>
+</html>
diff --git a/share/www/browse/index.html b/share/www/browse/index.html
new file mode 100644
index 00000000..bc901dcf
--- /dev/null
+++ b/share/www/browse/index.html
@@ -0,0 +1,65 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Welcome</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="../style/layout.css" type="text/css">
+ <script src="../script/json2.js"></script>
+ <script src="../script/jquery.js"></script>
+ <script src="../script/jquery.cookies.js"></script>
+ <script src="../script/jquery.dialog.js"></script>
+ <script src="../script/couch.js"></script>
+ <script src="../script/browse.js"></script>
+ <script>
+ var page = new CouchIndexPage();
+ $(document).ready(function() {
+ page.updateDatabaseListing();
+ $("#toolbar button.add").click(function() {
+ page.addDatabase();
+ });
+ if (window != parent) parent.updateNavigation();
+ });
+ </script>
+ </head>
+ <body>
+ <h1><strong>Overview</strong></h1>
+ <div id="wrap">
+ <ul id="toolbar">
+ <li><button class="add">Create Database …</button></li>
+ </ul>
+
+ <table class="listing" id="databases" cellspacing="0">
+ <caption>Databases</caption>
+ <thead>
+ <tr>
+ <th>Name</th>
+ <th>Number of Documents</th>
+ <th>Update Seq</th>
+ </tr>
+ </thead>
+ <tbody class="content">
+ </tbody>
+ <tbody class="footer">
+ <tr>
+ <td colspan="3"></td>
+ </tr>
+ </tbody>
+ </table>
+
+ </div>
+ </body>
+</html>
diff --git a/share/www/couch_tests.html b/share/www/couch_tests.html
new file mode 100644
index 00000000..a1e4ee6e
--- /dev/null
+++ b/share/www/couch_tests.html
@@ -0,0 +1,71 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Test Suite</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="style/layout.css" type="text/css">
+ <script src="script/json2.js"></script>
+ <script src="script/jquery.js"></script>
+ <script src="script/couch.js"></script>
+ <script src="script/pprint.js"></script>
+ <script>
+ $(document).ready(function() {
+ $("#toolbar button.load").click(function() {
+ location.reload(true);
+ });
+ });
+ </script>
+ <script src="script/couch_tests.js"></script>
+ <script>
+ $(document).ready(function() {
+ updateTestsListing();
+ $("#toolbar button.run").click(runAllTests);
+ if (window != parent) parent.updateNavigation();
+ });
+ </script>
+ </head>
+ <body>
+ <h1>
+ <a href="browse/index.html">Overview</a>
+ <strong>Test Suite</strong>
+ </h1>
+ <div id="wrap">
+ <ul id="toolbar">
+ <li><button class="run">Run All</button></li>
+ <li><button class="load">Reload</button></li>
+ </ul>
+ <table class="listing" id="tests" cellspacing="0">
+ <caption>Tests</caption>
+ <thead>
+ <tr>
+ <th class="name">Name</th>
+ <th class="status">Status</th>
+ <th class="duration">Elapsed Time</th>
+ <th class="details">Details</th>
+ </tr>
+ </thead>
+ <tbody class="content">
+ </tbody>
+ <tbody class="footer">
+ <tr>
+ <td colspan="4"></td>
+ </tr>
+ </tbody>
+ </table>
+ </div>
+ </body>
+</html>
diff --git a/share/www/favicon.ico b/share/www/favicon.ico
new file mode 100644
index 00000000..34bfaa86
--- /dev/null
+++ b/share/www/favicon.ico
Binary files differ
diff --git a/share/www/image/add.gif b/share/www/image/add.gif
new file mode 100644
index 00000000..9a65fc80
--- /dev/null
+++ b/share/www/image/add.gif
Binary files differ
diff --git a/share/www/image/apply.gif b/share/www/image/apply.gif
new file mode 100644
index 00000000..63de0d53
--- /dev/null
+++ b/share/www/image/apply.gif
Binary files differ
diff --git a/share/www/image/bg.png b/share/www/image/bg.png
new file mode 100644
index 00000000..844add78
--- /dev/null
+++ b/share/www/image/bg.png
Binary files differ
diff --git a/share/www/image/cancel.gif b/share/www/image/cancel.gif
new file mode 100644
index 00000000..4329076e
--- /dev/null
+++ b/share/www/image/cancel.gif
Binary files differ
diff --git a/share/www/image/delete-mini.gif b/share/www/image/delete-mini.gif
new file mode 100644
index 00000000..b2090067
--- /dev/null
+++ b/share/www/image/delete-mini.gif
Binary files differ
diff --git a/share/www/image/delete.gif b/share/www/image/delete.gif
new file mode 100644
index 00000000..0e58475f
--- /dev/null
+++ b/share/www/image/delete.gif
Binary files differ
diff --git a/share/www/image/grippie.gif b/share/www/image/grippie.gif
new file mode 100644
index 00000000..a8807896
--- /dev/null
+++ b/share/www/image/grippie.gif
Binary files differ
diff --git a/share/www/image/hgrad.gif b/share/www/image/hgrad.gif
new file mode 100644
index 00000000..08aa80ca
--- /dev/null
+++ b/share/www/image/hgrad.gif
Binary files differ
diff --git a/share/www/image/load.gif b/share/www/image/load.gif
new file mode 100644
index 00000000..e06762e9
--- /dev/null
+++ b/share/www/image/load.gif
Binary files differ
diff --git a/share/www/image/logo.png b/share/www/image/logo.png
new file mode 100644
index 00000000..cbe991cd
--- /dev/null
+++ b/share/www/image/logo.png
Binary files differ
diff --git a/share/www/image/order-asc.gif b/share/www/image/order-asc.gif
new file mode 100644
index 00000000..d2a237ae
--- /dev/null
+++ b/share/www/image/order-asc.gif
Binary files differ
diff --git a/share/www/image/order-desc.gif b/share/www/image/order-desc.gif
new file mode 100644
index 00000000..1043b499
--- /dev/null
+++ b/share/www/image/order-desc.gif
Binary files differ
diff --git a/share/www/image/path.gif b/share/www/image/path.gif
new file mode 100644
index 00000000..98545488
--- /dev/null
+++ b/share/www/image/path.gif
Binary files differ
diff --git a/share/www/image/run-mini.gif b/share/www/image/run-mini.gif
new file mode 100644
index 00000000..6256ef1d
--- /dev/null
+++ b/share/www/image/run-mini.gif
Binary files differ
diff --git a/share/www/image/run.gif b/share/www/image/run.gif
new file mode 100644
index 00000000..386ee04a
--- /dev/null
+++ b/share/www/image/run.gif
Binary files differ
diff --git a/share/www/image/running.gif b/share/www/image/running.gif
new file mode 100644
index 00000000..b7924420
--- /dev/null
+++ b/share/www/image/running.gif
Binary files differ
diff --git a/share/www/image/save.gif b/share/www/image/save.gif
new file mode 100644
index 00000000..fb713ca7
--- /dev/null
+++ b/share/www/image/save.gif
Binary files differ
diff --git a/share/www/image/spinner.gif b/share/www/image/spinner.gif
new file mode 100644
index 00000000..d84f6537
--- /dev/null
+++ b/share/www/image/spinner.gif
Binary files differ
diff --git a/share/www/image/test_failure.gif b/share/www/image/test_failure.gif
new file mode 100644
index 00000000..2a873b24
--- /dev/null
+++ b/share/www/image/test_failure.gif
Binary files differ
diff --git a/share/www/image/test_success.gif b/share/www/image/test_success.gif
new file mode 100644
index 00000000..6df8bae2
--- /dev/null
+++ b/share/www/image/test_success.gif
Binary files differ
diff --git a/share/www/image/thead-key.gif b/share/www/image/thead-key.gif
new file mode 100644
index 00000000..42a43b58
--- /dev/null
+++ b/share/www/image/thead-key.gif
Binary files differ
diff --git a/share/www/image/thead.gif b/share/www/image/thead.gif
new file mode 100644
index 00000000..1587b1f2
--- /dev/null
+++ b/share/www/image/thead.gif
Binary files differ
diff --git a/share/www/image/toggle-collapse.gif b/share/www/image/toggle-collapse.gif
new file mode 100644
index 00000000..f0979304
--- /dev/null
+++ b/share/www/image/toggle-collapse.gif
Binary files differ
diff --git a/share/www/image/toggle-expand.gif b/share/www/image/toggle-expand.gif
new file mode 100644
index 00000000..03fa8360
--- /dev/null
+++ b/share/www/image/toggle-expand.gif
Binary files differ
diff --git a/share/www/image/twisty.gif b/share/www/image/twisty.gif
new file mode 100644
index 00000000..5ba57a1a
--- /dev/null
+++ b/share/www/image/twisty.gif
Binary files differ
diff --git a/share/www/index.html b/share/www/index.html
new file mode 100644
index 00000000..8616989a
--- /dev/null
+++ b/share/www/index.html
@@ -0,0 +1,95 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Apache CouchDB: Futon Utility Client</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="style/layout.css" type="text/css">
+ <style type="text/css">
+ html { height: 100%; overflow: hidden; }
+ body { background: #fff url(image/bg.png) 100% 0 repeat-y;
+ padding: 0 185px 0 0; height: 100%; overflow: hidden;
+ }
+ * html body { padding-right: 210px; }
+ iframe { background: transparent; border: none; width: 100%; height: 100%; }
+ </style>
+ <script src="script/json2.js"></script>
+ <script src="script/jquery.js"></script>
+ <script src="script/couch.js"></script>
+ <script>
+ function updateDatabaseList() {
+ var dbs = CouchDB.allDbs();
+ $("#dbs").empty();
+ for (var i = 0; i < dbs.length; i++) {
+ var dbname = dbs[i];
+ $("#dbs").append("<li><a href='browse/database.html?" + dbname +
+ "' target='content'>" + dbname + "</a></li>");
+ }
+ }
+ function updateNavigation(path, queryString) {
+ function fixupPath(path) { // hack for IE/Win
+ return (path.charAt(0) != "/") ? ("/" + path) : path;
+ }
+ if (path == null) {
+ var l = frames["content"].location;
+ path = l.pathname;
+ if (queryString == null) {
+ queryString = l.search;
+ }
+ }
+ var href = fixupPath(path + queryString);
+ $("#nav li li a").each(function() {
+ if (fixupPath(this.pathname) + this.search == href) {
+ $(this).parent("li").addClass("selected");
+ } else {
+ $(this).parent("li").removeClass("selected");
+ }
+ });
+ $("#nav>li").each(function() {
+ if ($(this).is(":has(li.selected)")) {
+ $(this).addClass("selected");
+ } else {
+ $(this).removeClass("selected");
+ }
+ });
+ }
+ $(function() {
+ $("#version").text(CouchDB.getVersion());
+ updateDatabaseList();
+ });
+ </script>
+ </head>
+ <body>
+ <a href="browse/index.html" target="content">
+ <img id="logo" src="image/logo.png" width="170" height="151" alt="Apache CouchDB: Relax">
+ </a>
+ <ul id="nav">
+ <li><span>Tools</span><ul>
+ <li><a href="browse/index.html" target="content">Overview</a></li>
+ <li><a href="shell.html" target="content">Javascript Shell</a></li>
+ <li><a href="replicator.html" target="content">Replicator</a></li>
+ <li><a href="couch_tests.html" target="content">Test Suite</a></li>
+ </ul></li>
+ <li><span>Databases</span>
+ <ul id="dbs"></ul>
+ </li>
+ </ul>
+ <div id="footer">Futon on Apache CouchDB <span id="version">?</span></div>
+ <div id="view">
+ <iframe name="content" src="browse/index.html" allowtransparency="true">
+ </div>
+ </body>
+</html>
diff --git a/share/www/replicator.html b/share/www/replicator.html
new file mode 100644
index 00000000..36dc3d17
--- /dev/null
+++ b/share/www/replicator.html
@@ -0,0 +1,148 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en">
+ <head>
+ <title>Replicator</title>
+ <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+ <link rel="stylesheet" href="style/layout.css" type="text/css">
+ <script src="script/json2.js"></script>
+ <script src="script/jquery.js"></script>
+ <script src="script/couch.js"></script>
+ <script src="script/pprint.js"></script>
+ <script>
+ $(document).ready(function() {
+ if (window !== parent) parent.updateNavigation();
+
+ $("fieldset input[type=radio]").click(function() {
+ var radio = this;
+ var fieldset = $(this).parents("fieldset").get(0);
+ $("input[type=text]", fieldset).each(function() {
+ this.disabled = radio.value == "local";
+ if (!this.disabled) this.focus();
+ });
+ $("select", fieldset).each(function() {
+ this.disabled = radio.value == "remote";
+ if (!this.disabled) this.focus();
+ });
+ });
+
+ var allDbs = CouchDB.allDbs();
+ $("fieldset select").each(function() {
+ for (var i = 0; i < allDbs.length; i++) {
+ $("<option></option>").text(allDbs[i]).appendTo(this);
+ }
+ this.selectedIndex = 0;
+ });
+
+ $("button#swap").click(function() {
+ var fromName = $("#source select").val();
+ $("#source select").val($("#target select").val());
+ $("#target select").val(fromName);
+
+ var fromUrl = $("#source input[type=text]").val();
+ $("#source input[type=text]").val($("#target input[type=text]").val());
+ $("#target input[type=text]").val(fromUrl);
+
+ var fromType = $("#source input[type=radio]").filter(function() {
+ return this.checked;
+ }).val();
+ var toType = $("#target input[type=radio]").filter(function() {
+ return this.checked;
+ }).val();
+ $("#source input[value=" + toType + "]").click();
+ $("#target input[value=" + fromType + "]").click();
+
+ $("#replicate").get(0).focus();
+ return false;
+ });
+
+ $("button#replicate").click(function() {
+ $("#records tbody.content").empty();
+ var source = $("#from_local")[0].checked ? $("#from_name").val() : $("#from_url").val();
+ var target = $("#to_local")[0].checked ? $("#to_name").val() : $("#to_url").val();
+ try {
+ var results = CouchDB.replicate(source, target);
+ } catch (e) {
+ alert(e.reason);
+ return;
+ }
+ for (var i = 0; i < results.history.length; i++) {
+ var record = results.history[i];
+ $("<tr><th></th><td class='seq'></td>" +
+ "<td class='read'></td><td class='copied'></td></tr>")
+ .find("th").text(record.start_time).end()
+ .find("td.seq").text(record.start_last_seq + "–" + record.end_last_seq).end()
+ .find("td.read").text(record.docs_read + " (" + record.read_errors + " errors)").end()
+ .find("td.copied").text(record.docs_copied + " (" + record.copy_errors + " errors)").end()
+ .appendTo("#records tbody.content");
+ }
+ $("#records tbody tr").removeClass("odd").filter(":odd").addClass("odd");
+ $("#records tbody.footer td").text("Replication session " + results.session_id);
+ });
+ });
+ </script>
+ </head>
+ <body>
+ <h1>
+ <a href="browse/index.html">Overview</a>
+ <strong>Replicator</strong>
+ </h1>
+ <div id="wrap">
+
+ <form id="replicator">
+ <fieldset id="source">
+ <legend>Replicate changes from:</legend>
+ <p>
+ <label><input type="radio" id="from_local" name="from_type" value="local" checked> Local</label>
+ <label>database: <select id="from_name" name="from_name"></select></label>
+ </p><p>
+ <label><input type="radio" id="from_to_remote" name="from_type" value="remote"> Remote</label>
+ <label>database: <input type="text" id="from_url" name="from_url" size="30" value="http://" disabled></label>
+ </p>
+ </fieldset>
+ <p class="swap"><button id="swap" tabindex="99">⇄</button></p>
+ <fieldset id="target">
+ <legend>to:</legend>
+ <p>
+ <label><input type="radio" id="to_local" name="to_type" value="local" checked> Local</label>
+ <label>database: <select id="to_name" name="to_name"></select></label>
+ </p><p>
+ <label><input type="radio" id="to_remote" name="to_type" value="remote"> Remote</label>
+ <label>database: <input type="text" id="to_url" name="to_url" size="30" value="http://" disabled></label>
+ </p>
+ </fieldset>
+ <p class="actions">
+ <button id="replicate" type="button">Replicate</button>
+ </p>
+ </form>
+
+ <table id="records" class="listing">
+ <caption>Replication History</caption>
+ <thead><tr>
+ <th>When</th>
+ <th>Sequences</th>
+ <th>Documents read</th>
+ <th>Documents copied</th>
+ </tr></thead>
+ <tbody class="content"></tbody>
+ <tbody class="footer"><tr>
+ <td colspan="4">No replication</td>
+ </tr></tbody>
+ </table>
+
+ </div>
+ </body>
+</html>
diff --git a/share/www/script/browse.js b/share/www/script/browse.js
new file mode 100644
index 00000000..c1127608
--- /dev/null
+++ b/share/www/script/browse.js
@@ -0,0 +1,632 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+/*
+ * Page class for browse/index.html
+ */
+function CouchIndexPage() {
+ page = this;
+
+ this.addDatabase = function() {
+ $.showDialog("_create_database.html", {
+ submit: function(data) {
+ if (!data.name || data.name.length == 0) {
+ return {name: "Please enter a name."};
+ }
+ try {
+ new CouchDB(data.name).createDb();
+ } catch (e) {
+ return {name: e.reason};
+ }
+ if (window !== parent) parent.setTimeout("updateDatabaseList()", 500);
+ window.open("database.html?" + data.name, "content");
+ }
+ });
+ return false;
+ }
+
+ this.updateDatabaseListing = function() {
+ var allDbs = CouchDB.allDbs();
+ for (var i = 0; i < allDbs.length; i++) {
+ var dbName = allDbs[i];
+ var info = new CouchDB(dbName).info();
+ $("#databases tbody.content").append(
+ "<tr><th><a href='database.html?" + dbName + "'>" +
+ dbName + "</a></th><td>" + info.doc_count +"</td><td>" +
+ info.update_seq + "</td></tr>");
+ $("#databases tbody tr:odd").addClass("odd");
+ $("#databases tbody.footer tr td").text(allDbs.length + " database(s)");
+ }
+ }
+
+}
+
+/*
+ * Page class for browse/database.html
+ */
+function CouchDatabasePage() {
+ var urlParts = location.search.substr(1).split("/");
+ var dbName = urlParts.shift();
+ var viewName = (urlParts.length > 0) ? urlParts.join("/") : null;
+ if (!viewName) {
+ viewName = $.cookies.get(dbName + ".view") || "";
+ } else {
+ $.cookies.set(dbName + ".view", viewName);
+ }
+ var db = new CouchDB(dbName);
+
+ this.dbName = dbName;
+ this.viewName = viewName;
+ this.db = db;
+ this.isDirty = false;
+ page = this;
+
+ this.addDocument = function() {
+ $.showDialog("_create_document.html", {
+ submit: function(data) {
+ try {
+ var result = db.save(data.docid ? {_id: data.docid} : {});
+ } catch (err) {
+ return {docid: err.reason};
+ }
+ location.href = "document.html?" + dbName + "/" + result.id;
+ }
+ });
+ }
+
+ this.deleteDatabase = function() {
+ $.showDialog("_delete_database.html", {
+ submit: function() {
+ db.deleteDb();
+ location.href = "index.html";
+ if (window !== null) {
+ parent.$("#dbs li").filter(function(index) {
+ return $("a", this).text() == dbName;
+ }).remove();
+ }
+ }
+ });
+ }
+
+ this.populateViewEditor = function() {
+ if (viewName.match(/^_design\//)) {
+ page.revertViewChanges();
+ var dirtyTimeout = null;
+ function updateDirtyState() {
+ clearTimeout(dirtyTimeout);
+ dirtyTimeout = setTimeout(function() {
+ var buttons = $("#viewcode button.save, #viewcode button.revert");
+ page.isDirty = $("#viewcode textarea").val() != page.storedViewCode;
+ if (page.isDirty) {
+ buttons.removeAttr("disabled");
+ } else {
+ buttons.attr("disabled", "disabled");
+ }
+ }, 100);
+ }
+ $("#viewcode textarea").bind("input", updateDirtyState);
+ if ($.browser.msie) { // sorry, browser detection
+ $("#viewcode textarea").get(0).onpropertychange = updateDirtyState
+ } else if ($.browser.safari) {
+ $("#viewcode textarea").bind("paste", updateDirtyState)
+ .bind("change", updateDirtyState)
+ .bind("keydown", updateDirtyState)
+ .bind("keypress", updateDirtyState)
+ .bind("keyup", updateDirtyState)
+ .bind("textInput", updateDirtyState);
+ }
+ }
+ }
+
+ this.populateViewsMenu = function() {
+ var designDocs = db.allDocs({startkey: "_design/", endkey: "_design/ZZZ"});
+ $("#switch select").each(function() {
+ this.options.length = 3;
+ for (var i = 0; i < designDocs.rows.length; i++) {
+ var doc = db.open(designDocs.rows[i].id);
+ var optGroup = $("<optgroup></optgroup>").attr("label", doc._id.substr(8));
+ for (var name in doc.views) {
+ if (!doc.views.hasOwnProperty(name)) continue;
+ $("<option></option>").attr("value", doc._id + "/" + name).text(name)
+ .appendTo(optGroup);
+ }
+ optGroup.appendTo(this);
+ }
+ this.autocomplete = false;
+ });
+ }
+
+ this.revertViewChanges = function() {
+ if (!page.storedViewCode) {
+ var viewNameParts = viewName.split("/");
+ var designDocId = viewNameParts[1];
+ var localViewName = viewNameParts[2];
+ var designDoc = db.open(["_design", designDocId].join("/"));
+ if (designDoc) {
+ page.storedViewCode = designDoc.views[localViewName];
+ }
+ }
+ $("#viewcode textarea").val(page.storedViewCode);
+ page.isDirty = false;
+ $("#viewcode button.revert, #viewcode button.save").attr("disabled", "disabled");
+ }
+
+ this.saveViewAs = function() {
+ if (viewName && /^_design/.test(viewName)) {
+ var viewNameParts = viewName.split("/");
+ var designDocId = viewNameParts[1];
+ var localViewName = viewNameParts[2];
+ } else {
+ var designDocId = "", localViewName = ""
+ }
+ $.showDialog("_save_view_as.html", {
+ load: function(elem) {
+ $("#input_docid", elem).val(designDocId).suggest(function(text, callback) {
+ var matches = [];
+ var docs = db.allDocs({
+ count: 10, startkey: "_design/" + text,
+ endkey: "_design/" + text + "ZZZZ"
+ });
+ for (var i = 0; i < docs.rows.length; i++) {
+ matches[i] = docs.rows[i].id.substr(8);
+ }
+ callback(matches);
+ });
+ $("#input_name", elem).val(localViewName).suggest(function(text, callback) {
+ var matches = [];
+ try {
+ var doc = db.open("_design/" + $("#input_docid").val());
+ } catch (err) {
+ return;
+ }
+ if (!doc || !doc.views) return;
+ for (var viewName in doc.views) {
+ if (!doc.views.hasOwnProperty(viewName) || !viewName.match("^" + text)) {
+ continue;
+ }
+ matches.push(viewName);
+ }
+ callback(matches);
+ });
+ },
+ submit: function(data) {
+ if (!data.docid || !data.name) {
+ var errors = {};
+ if (!data.docid) errors.docid = "Please enter a document ID";
+ if (!data.name) errors.name = "Please enter a view name";
+ return errors;
+ }
+ var viewCode = $("#viewcode textarea").val();
+ var docId = ["_design", data.docid].join("/");
+ var designDoc = db.open(docId);
+ if (!designDoc) designDoc = {_id: docId, language: "text/javascript"};
+ if (designDoc.views === undefined) designDoc.views = {};
+ designDoc.views[data.name] = viewCode;
+ db.save(designDoc);
+ page.isDirty = false;
+ location.href = "database.html?" + dbName + "/" + designDoc._id + "/" + data.name;
+ }
+ });
+ }
+
+ this.saveViewChanges = function() {
+ var viewNameParts = viewName.split("/");
+ var designDocId = viewNameParts[1];
+ var localViewName = viewNameParts[2];
+ var designDoc = db.open(["_design", designDocId].join("/"));
+ var viewCode = $("#viewcode textarea").val();
+ designDoc.views[localViewName] = viewCode;
+ db.save(designDoc);
+ page.isDirty = false;
+ $("#viewcode button.revert, #viewcode button.save").attr("disabled", "disabled");
+ }
+
+ this.updateDesignDocLink = function() {
+ if (viewName && /^_design/.test(viewName)) {
+ var docId = "_design/" + viewName.split("/")[1];
+ $("#designdoc-link").attr("href", "document.html?" + dbName + "/" + docId).text(docId);
+ } else {
+ $("#designdoc-link").removeAttr("href").text("");
+ }
+ }
+
+ this.updateDocumentListing = function(options) {
+ if (options === undefined) options = {};
+ if (options.count === undefined) {
+ options.count = parseInt($("#perpage").val(), 10);
+ }
+ if ($("#documents thead th.key").is(".desc")) {
+ options.descending = true;
+ $.cookies.set(dbName + ".desc", "1");
+ } else {
+ if (options.descending !== undefined) delete options.descending;
+ $.cookies.remove(dbName + ".desc");
+ }
+ $("#paging a").unbind();
+ $("#documents tbody.content").empty();
+ this.updateDesignDocLink();
+
+ var result = null;
+ if (!viewName) {
+ $("#switch select").get(0).selectedIndex = 0;
+ result = db.allDocs(options);
+ } else {
+ $("#switch select").each(function() {
+ for (var i = 0; i < this.options.length; i++) {
+ if (this.options[i].value == viewName) {
+ this.selectedIndex = i;
+ break;
+ }
+ }
+ });
+ docs = [];
+ if (viewName == "_temp_view") {
+ $("#viewcode").show().addClass("expanded");
+ var query = $("#viewcode textarea").val();
+ $.cookies.set(db.name + ".query", query);
+ try {
+ result = db.query(query, options);
+ } catch (e) {
+ alert(e.reason ? e.reason : e.message);
+ return;
+ }
+ } else if (viewName == "_design_docs") {
+ options.startkey = options.descending ? "_design/ZZZZ" : "_design/";
+ options.endkey = options.descending ? "_design/" : "_design/ZZZZ";
+ result = db.allDocs(options);
+ } else {
+ $("#viewcode").show();
+ var currentViewCode = $("#viewcode textarea").val();
+ if (currentViewCode != page.storedViewCode) {
+ result = db.query(currentViewCode, options);
+ } else {
+ result = db.view(viewName.substr(8), options);
+ }
+ }
+ }
+ if (result.offset === undefined) {
+ result.offset = 0;
+ }
+ if (result.offset > 0) {
+ $("#paging a.prev").attr("href", "#" + (result.offset - options.count)).click(function() {
+ var firstDoc = result.rows[0];
+ page.updateDocumentListing({
+ startkey: firstDoc.key !== undefined ? firstDoc.key : null,
+ startkey_docid: firstDoc.id,
+ skip: 1,
+ count: -options.count
+ });
+ return false;
+ });
+ } else {
+ $("#paging a.prev").removeAttr("href");
+ }
+ if (result.total_rows - result.offset > options.count) {
+ $("#paging a.next").attr("href", "#" + (result.offset + options.count)).click(function() {
+ var lastDoc = result.rows[result.rows.length - 1];
+ page.updateDocumentListing({
+ startkey: lastDoc.key !== undefined ? lastDoc.key : null,
+ startkey_docid: lastDoc.id,
+ skip: 1,
+ count: options.count
+ });
+ return false;
+ });
+ } else {
+ $("#paging a.next").removeAttr("href");
+ }
+
+ for (var i = 0; i < result.rows.length; i++) {
+ var row = result.rows[i];
+ var tr = $("<tr></tr>");
+ var key = row.key;
+ $("<td class='key'><a href='document.html?" + db.name + "/" + row.id + "'>" +
+ "<em></em><br><span class='docid'>ID:&nbsp;" + row.id +
+ "</span></a></td>").find("em").text(
+ key !== null ? prettyPrintJSON(key, 0, "") : "null"
+ ).end().appendTo(tr);
+ var value = row.value;
+ $("<td class='value'></td>").text(
+ value !== null ? prettyPrintJSON(value, 0, "") : "null"
+ ).appendTo(tr).dblclick(function() {
+ location.href = this.previousSibling.firstChild.href;
+ });
+ tr.appendTo("#documents tbody.content");
+ }
+
+ $("#documents tbody tr:odd").addClass("odd");
+ $("#documents tbody.footer td span").text(
+ "Showing " + Math.min(result.total_rows, result.offset + 1) + "-" +
+ (result.offset + result.rows.length) + " of " + result.total_rows +
+ " document" + (result.total_rows != 1 ? "s" : ""));
+ }
+
+ window.onbeforeunload = function() {
+ $("#switch select").val(viewName);
+ if (page.isDirty) {
+ return "You've made changes to the view code that have not been " +
+ "saved yet.";
+ }
+ }
+
+}
+
+/*
+ * Page class for browse/database.html
+ */
+function CouchDocumentPage() {
+ var urlParts = location.search.substr(1).split("/");
+ var dbName = urlParts.shift();
+ var idParts = urlParts.join("/").split("@", 2);
+ var docId = idParts[0];
+ var docRev = (idParts.length > 1) ? idParts[1] : null;
+ var db = new CouchDB(dbName);
+ var doc = db.open(docId, {revs_info: true});
+ var revs = doc._revs_info;
+ delete doc._revs_info;
+ if (docRev != null) {
+ try {
+ doc = db.open(docId, {rev: docRev});
+ } catch (e) {
+ alert("The requested revision was not found. " +
+ "You will be redirected back to the latest revision.");
+ location.href = "?" + dbName + "/" + docId;
+ return;
+ }
+ }
+
+ this.dbName = dbName;
+ this.db = db;
+ this.doc = doc;
+ this.isDirty = false;
+ page = this;
+
+ this.addField = function() {
+ var fieldName = "unnamed";
+ var fieldIdx = 1;
+ while (doc.hasOwnProperty(fieldName)) {
+ fieldName = "unnamed " + fieldIdx++;
+ }
+ doc[fieldName] = null;
+ var row = _addRowForField(fieldName);
+ page.isDirty = true;
+ _editKey(row.find("th"), fieldName);
+ }
+
+ this.updateFieldListing = function() {
+ $("#fields tbody.content").empty();
+ var propNames = [];
+ for (var prop in doc) {
+ if (!doc.hasOwnProperty(prop)) continue;
+ propNames.push(prop);
+ }
+ // Order properties alphabetically, but put internal fields first
+ propNames.sort(function(a, b) {
+ var a0 = a.charAt(0), b0 = b.charAt(0);
+ if (a0 == "_" && b0 != "_") {
+ return -1;
+ } else if (a0 != "_" && b0 == "_") {
+ return 1;
+ } else {
+ return a < b ? -1 : a != b ? 1 : 0;
+ }
+ });
+ for (var pi = 0; pi < propNames.length; pi++) {
+ _addRowForField(propNames[pi]);
+ }
+ if (revs.length > 1) {
+ var currentIndex = 0;
+ for (var i = 0; i < revs.length; i++) {
+ if (revs[i].rev == doc._rev) {
+ currentIndex = i;
+ break;
+ }
+ }
+ if (currentIndex < revs.length - 1) {
+ var prevRev = revs[currentIndex + 1].rev;
+ $("#paging a.prev").attr("href", "?" + dbName + "/" + docId + "@" + prevRev);
+ }
+ if (currentIndex > 0) {
+ var nextRev = revs[currentIndex - 1].rev;
+ $("#paging a.next").attr("href", "?" + dbName + "/" + docId + "@" + nextRev);
+ }
+ $("#fields tbody.footer td span").text("Showing revision " +
+ (revs.length - currentIndex) + " of " + revs.length);
+ }
+ }
+
+ this.deleteDocument = function() {
+ $.showDialog("_delete_document.html", {
+ submit: function() {
+ db.deleteDoc(doc);
+ location.href = "database.html?" + dbName;
+ }
+ });
+ }
+
+ this.saveDocument = function() {
+ try {
+ db.save(doc);
+ } catch (e) {
+ alert(e.reason);
+ return;
+ }
+ page.isDirty = false;
+ location.href = "?" + dbName + "/" + docId;
+ }
+
+ window.onbeforeunload = function() {
+ if (page.isDirty) {
+ return "You've made changes to this document that have not been " +
+ "saved yet.";
+ }
+ }
+
+ function _addRowForField(fieldName) {
+ var value = _renderValue(doc[fieldName]);
+ var row = $("<tr><th></th><td></td></tr>")
+ .find("th").append($("<b></b>").text(fieldName)).dblclick(function() {
+ _editKey(this, $(this).text());
+ }).end()
+ .find("td").append(value).dblclick(function() {
+ _editValue(this, $(this).prev("th").text());
+ }).end()
+ .appendTo("#fields tbody.content");
+ if (fieldName != "_id" && fieldName != "_rev") {
+ row.find("th, td").attr("title", "Double click to edit");
+ _initKey(row, fieldName);
+ _initValue(value);
+ }
+ $("#fields tbody tr").removeClass("odd").filter(":odd").addClass("odd");
+ return row;
+ }
+
+ function _editKey(cell, fieldName) {
+ if (fieldName == "_id" || fieldName == "_rev") return;
+ var th = $(cell);
+ th.empty();
+ var input = $("<input type='text' spellcheck='false'>");
+ input.dblclick(function() { return false; }).keydown(function(evt) {
+ switch (evt.keyCode) {
+ case 13: applyChange(); break;
+ case 27: cancelChange(); break;
+ }
+ });
+ var tools = $("<div class='tools'></div>");
+ function applyChange() {
+ input.nextAll().remove();
+ var newName = input.val();
+ if (!newName.length || newName == fieldName) {
+ cancelChange();
+ return;
+ }
+ doc[newName] = doc[fieldName];
+ delete doc[fieldName];
+ th.children().remove();
+ th.append($("<b></b>").text(newName));
+ _initKey(th.parent("tr"), fieldName);
+ page.isDirty = true;
+ }
+ function cancelChange() {
+ th.children().remove();
+ th.append($("<b></b>").text(fieldName));
+ _initKey(th.parent("tr"), fieldName);
+ }
+
+ $("<button type='button' class='apply'></button>").click(function() {
+ applyChange();
+ }).appendTo(tools);
+ $("<button type='button' class='cancel'></button>").click(function() {
+ cancelChange();
+ }).appendTo(tools);
+ tools.appendTo(th);
+ input.val(fieldName).appendTo(th);
+ input.each(function() { this.focus(); this.select(); });
+ }
+
+ function _editValue(cell, fieldName) {
+ if (fieldName == "_id" || fieldName == "_rev") return;
+ var td = $(cell);
+ var value = doc[fieldName];
+ var needsTextarea = $("dl", td).length > 0 || $("code", td).text().length > 60;
+ td.empty();
+ if (needsTextarea) {
+ var input = $("<textarea rows='8' cols='40' spellcheck='false'></textarea>");
+ } else {
+ var input = $("<input type='text' spellcheck='false'>");
+ }
+ input.dblclick(function() { return false; }).keydown(function(evt) {
+ switch (evt.keyCode) {
+ case 13: if (!needsTextarea) applyChange(); break;
+ case 27: cancelChange(); break;
+ }
+ });
+ var tools = $("<div class='tools'></div>");
+ function applyChange() {
+ input.nextAll().remove();
+ try {
+ var newValue = input.val() || "null";
+ if (newValue == doc[fieldName]) {
+ cancelChange();
+ return;
+ }
+ doc[fieldName] = JSON.parse(newValue);
+ td.children().remove();
+ page.isDirty = true;
+ var value = _renderValue(doc[fieldName]);
+ td.append(value);
+ _initValue(value);
+ } catch (err) {
+ input.addClass("invalid");
+ var msg = err.message;
+ if (msg == "parseJSON") {
+ msg = "Please enter a valid JSON value (for example, \"string\").";
+ }
+ $("<div class='error'></div>").text(msg).insertAfter(input);
+ }
+ }
+ function cancelChange() {
+ td.children().remove();
+ var value = _renderValue(doc[fieldName]);
+ td.append(value);
+ _initValue(value);
+ }
+
+ $("<button type='button' class='apply'></button>").click(function() {
+ applyChange();
+ }).appendTo(tools);
+ $("<button type='button' class='cancel'></button>").click(function() {
+ cancelChange();
+ }).appendTo(tools);
+ tools.appendTo(td);
+ input.val(prettyPrintJSON(value)).appendTo(td);
+ input.each(function() { this.focus(); this.select(); });
+ if (needsTextarea) input.resizable();
+ }
+
+ function _initKey(row, fieldName) {
+ if (fieldName != "_id" && fieldName != "_rev") {
+ $("<button type='button' class='delete' title='Delete field'></button>").click(function() {
+ delete doc[fieldName];
+ row.remove();
+ page.isDirty = true;
+ $("#fields tbody tr").removeClass("odd").filter(":odd").addClass("odd");
+ }).prependTo(row.find("th"));
+ }
+ }
+
+ function _initValue(value) {
+ value.find("dd").filter(":has(dl)").hide().prev("dt").addClass("collapsed");
+ value.find("dd").not(":has(dl)").addClass("inline").prev().addClass("inline");
+ value.find("dt.collapsed").click(function() {
+ $(this).toggleClass("collapsed").next().toggle();
+ });
+ }
+
+ function _renderValue(value) {
+ var type = typeof(value);
+ if (type == "object" && value !== null) {
+ var list = $("<dl></dl>");
+ for (var i in value) {
+ if (!value.hasOwnProperty(i)) continue;
+ $("<dt></dt>").text(i).appendTo(list);
+ $("<dd></dd>").append(_renderValue(value[i])).appendTo(list);
+ }
+ return list;
+ } else {
+ return $("<code></code>").addClass(type).text(
+ value !== null ? JSON.stringify(value) : "null"
+ );
+ }
+ }
+
+}
diff --git a/share/www/script/couch.js b/share/www/script/couch.js
new file mode 100644
index 00000000..9815882a
--- /dev/null
+++ b/share/www/script/couch.js
@@ -0,0 +1,205 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// A simple class to represent a database. Uses XMLHttpRequest to interface with
+// the CouchDB server.
+
+function CouchDB(name) {
+ this.name = name
+ this.uri = "/" + encodeURIComponent(name) + "/";
+ request = CouchDB.request;
+
+ // Creates the database on the server
+ this.createDb = function() {
+ var req = request("PUT", this.uri);
+ var result = JSON.parse(req.responseText);
+ if (req.status != 201)
+ throw result;
+ return result;
+ }
+
+ // Deletes the database on the server
+ this.deleteDb = function() {
+ var req = request("DELETE", this.uri);
+ if (req.status == 404)
+ return false;
+ var result = JSON.parse(req.responseText);
+ if (req.status != 202)
+ throw result;
+ return result;
+ }
+
+ // Save a document to the database
+ this.save = function(doc, options) {
+ var req;
+ if (doc._id == undefined)
+ req = request("POST", this.uri + encodeOptions(options), {
+ body: JSON.stringify(doc)
+ });
+ else
+ req = request("PUT", this.uri + encodeURIComponent(doc._id) + encodeOptions(options), {
+ body: JSON.stringify(doc)
+ });
+ var result = JSON.parse(req.responseText);
+ if (req.status != 201)
+ throw result;
+ // set the _id and _rev members on the input object, for caller convenience.
+ doc._id = result.id;
+ doc._rev = result.rev;
+ return result;
+ }
+
+ // Open a document from the database
+ this.open = function(docId, options) {
+ var req = request("GET", this.uri + encodeURIComponent(docId) + encodeOptions(options));
+ if (req.status == 404)
+ return null;
+ var result = JSON.parse(req.responseText);
+ if (req.status != 200)
+ throw result;
+ return result;
+ }
+
+ // Deletes a document from the database
+ this.deleteDoc = function(doc) {
+ var req = request("DELETE", this.uri + encodeURIComponent(doc._id) + "?rev=" + doc._rev);
+ var result = JSON.parse(req.responseText);
+ if (req.status != 202)
+ throw result;
+ doc._rev = result.rev; //record rev in input document
+ doc._deleted = true;
+ return result;
+ }
+
+ this.bulkSave = function(docs, options) {
+ var req = request("POST", this.uri + "_bulk_docs" + encodeOptions(options), {
+ body: JSON.stringify({"docs": docs})
+ });
+ var result = JSON.parse(req.responseText);
+ if (req.status != 201)
+ throw result;
+ return result;
+ }
+
+ // Applies the map function to the contents of database and returns the results.
+ this.query = function(mapFun, options) {
+ if (typeof(mapFun) != "string")
+ mapFun = mapFun.toSource ? mapFun.toSource() : "(" + mapFun.toString() + ")";
+ var req = request("POST", this.uri + "_temp_view" + encodeOptions(options), {
+ headers: {"Content-Type": "text/javascript"},
+ body: mapFun
+ });
+ var result = JSON.parse(req.responseText);
+ if (req.status != 200)
+ throw result;
+ return result;
+ }
+
+ this.view = function(viewname, options) {
+ var req = request("GET", this.uri + "_view/" + viewname + encodeOptions(options));
+ if (req.status == 404)
+ return null;
+ var result = JSON.parse(req.responseText);
+ if (req.status != 200)
+ throw result;
+ return result;
+ }
+
+ // gets information about the database
+ this.info = function() {
+ var req = request("GET", this.uri);
+ var result = JSON.parse(req.responseText);
+ if (req.status != 200)
+ throw result;
+ return result;
+ }
+
+ this.allDocs = function(options) {
+ var req = request("GET", this.uri + "_all_docs" + encodeOptions(options));
+ var result = JSON.parse(req.responseText);
+ if (req.status != 200)
+ throw result;
+ return result;
+ }
+
+ // Convert a options object to an url query string.
+ // ex: {key:'value',key2:'value2'} becomes '?key="value"&key2="value2"'
+ function encodeOptions(options) {
+ var buf = []
+ if (typeof(options) == "object" && options !== null) {
+ for (var name in options) {
+ if (!options.hasOwnProperty(name)) continue;
+ var value = options[name];
+ if (name == "key" || name == "startkey" || name == "endkey") {
+ value = toJSON(value);
+ }
+ buf.push(encodeURIComponent(name) + "=" + encodeURIComponent(value));
+ }
+ }
+ if (!buf.length) {
+ return "";
+ }
+ return "?" + buf.join("&");
+ }
+
+ function toJSON(obj) {
+ return obj !== null ? JSON.stringify(obj) : null;
+ }
+}
+
+CouchDB.allDbs = function() {
+ var req = CouchDB.request("GET", "/_all_dbs");
+ var result = JSON.parse(req.responseText);
+ if (req.status != 200)
+ throw result;
+ return result;
+}
+
+CouchDB.getVersion = function() {
+ var req = CouchDB.request("GET", "/");
+ var result = JSON.parse(req.responseText);
+ if (req.status != 200)
+ throw result;
+ return result.version;
+}
+
+CouchDB.replicate = function(source, target) {
+ var req = CouchDB.request("POST", "/_replicate", {
+ body: JSON.stringify({source: source, target: target})
+ });
+ var result = JSON.parse(req.responseText);
+ if (req.status != 200)
+ throw result;
+ return result;
+}
+
+CouchDB.request = function(method, uri, options) {
+ options = options || {};
+ var req = null;
+ if (typeof(XMLHttpRequest) != "undefined") {
+ req = new XMLHttpRequest();
+ } else if (typeof(ActiveXObject) != "undefined") {
+ req = new ActiveXObject("Microsoft.XMLHTTP");
+ } else {
+ throw new Error("No XMLHTTPRequest support detected");
+ }
+ req.open(method, uri, false);
+ if (options.headers) {
+ var headers = options.headers;
+ for (var headerName in headers) {
+ if (!headers.hasOwnProperty(headerName)) continue;
+ req.setRequestHeader(headerName, headers[headerName]);
+ }
+ }
+ req.send(options.body || null);
+ return req;
+}
diff --git a/share/www/script/couch_tests.js b/share/www/script/couch_tests.js
new file mode 100644
index 00000000..bd159310
--- /dev/null
+++ b/share/www/script/couch_tests.js
@@ -0,0 +1,1027 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var tests = {
+
+ // Do some basic tests.
+ basics: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // Get the database info, check the doc_count
+ T(db.info().doc_count == 0);
+
+ // create a document and save it to the database
+ var doc = {_id:"0",a:1,b:1};
+ var result = db.save(doc);
+
+ T(result.ok==true); // return object has an ok member with a value true
+ T(result.id); // the _id of the document is set.
+ T(result.rev); // the revision id of the document is set.
+
+ // Verify the input doc is now set with the doc id and rev
+ // (for caller convenience).
+ T(doc._id == result.id && doc._rev == result.rev);
+
+ var id = result.id; // save off the id for later
+
+ // Create some more documents.
+ // Notice the use of the ok member on the return result.
+ T(db.save({_id:"1",a:2,b:4}).ok);
+ T(db.save({_id:"2",a:3,b:9}).ok);
+ T(db.save({_id:"3",a:4,b:16}).ok);
+
+ // Check the database doc count
+ T(db.info().doc_count == 4);
+
+ // Check the all docs
+ var results = db.allDocs();
+ var rows = results.rows;
+
+ for(var i=0; i < rows.length; i++) {
+ T(rows[i].id >= "0" && rows[i].id <= "4");
+ }
+
+ // Test a simple map functions
+
+ // create a map function that selects all documents whose "a" member
+ // has a value of 4, and then returns the document's b value.
+ var mapFunction = function(doc){
+ if(doc.a==4)
+ map(null, doc.b);
+ };
+
+ results = db.query(mapFunction);
+
+ // verify only one document found and the result value (doc.b).
+ T(results.total_rows == 1 && results.rows[0].value == 16);
+
+ // reopen document we saved earlier
+ existingDoc = db.open(id);
+
+ T(existingDoc.a==1);
+
+ //modify and save
+ existingDoc.a=4;
+ db.save(existingDoc);
+
+ // redo the map query
+ results = db.query(mapFunction);
+
+ // the modified document should now be in the results.
+ T(results.total_rows == 2);
+
+ // write 2 more documents
+ T(db.save({a:3,b:9}).ok);
+ T(db.save({a:4,b:16}).ok);
+
+ results = db.query(mapFunction);
+
+ // 1 more document should now be in the result.
+ T(results.total_rows == 3);
+ T(db.info().doc_count == 6);
+
+ // delete a document
+ T(db.deleteDoc(existingDoc).ok);
+
+ // make sure we can't open the doc
+ T(db.open(existingDoc._id) == null);
+
+ results = db.query(mapFunction);
+
+ // 1 less document should now be in the results.
+ T(results.total_rows == 2);
+ T(db.info().doc_count == 5);
+ },
+
+ // Do some edit conflict detection tests
+ conflicts: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // create a doc and save
+ var doc = {_id:"foo",a:1,b:1};
+ T(db.save(doc).ok);
+
+ // reopen
+ var doc2 = db.open(doc._id);
+
+ // ensure the revisions are the same
+ T(doc._id == doc2._id && doc._rev == doc2._rev);
+
+ // edit the documents.
+ doc.a = 2;
+ doc2.a = 3;
+
+ // save one document
+ T(db.save(doc).ok);
+
+ // save the other document
+ try {
+ db.save(doc2); // this should generate a conflict exception
+ T("no save conflict 1" && false); // we shouldn't hit here
+ } catch (e) {
+ T(e.error == "conflict");
+ }
+
+ // Now clear out the _rev member and save. This indicates this document is
+ // new, not based on an existing revision.
+ doc2._rev = undefined;
+ try {
+ db.save(doc2); // this should generate a conflict exception
+ T("no save conflict 2" && false); // we shouldn't hit here
+ } catch (e) {
+ T(e.error == "conflict");
+ }
+
+ // Now delete the document from the database
+ T(db.deleteDoc(doc).ok);
+
+ T(db.save(doc2).ok); // we can save a new document over a deletion without
+ // knowing the deletion rev.
+ },
+
+ recreate_doc: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // First create a new document with the ID "foo", and delete it again
+ var doc = {_id: "foo", a: "bar", b: 42};
+ T(db.save(doc).ok);
+ T(db.deleteDoc(doc).ok);
+
+ // Now create a new document with the same ID, save it, and then modify it
+ // This should work fine, but currently results in a conflict error, at
+ // least "sometimes"
+ for (var i = 0; i < 10; i++) {
+ doc = {_id: "foo"};
+ T(db.save(doc).ok);
+ doc = db.open("foo");
+ doc.a = "baz";
+ try {
+ T(db.save(doc).ok);
+ } finally {
+ // And now, we can't even delete the document anymore :/
+ T(db.deleteDoc(doc).rev != undefined);
+ }
+ }
+ },
+
+ // test saving a semi-large quanitity of documents and do some view queries.
+ lots_of_docs: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // keep number lowish for now to keep tests fasts. Crank up manually to
+ // to really test.
+ var numDocsToCreate = 500;
+
+ var docs = makeDocs(numDocsToCreate);
+ T(db.bulkSave(docs).ok);
+
+ // query all documents, and return the doc.integer member as a key.
+ results = db.query(function(doc){ map(doc.integer, null) });
+
+ T(results.total_rows == numDocsToCreate);
+
+ // validate the keys are ordered ascending
+ for(var i=0; i<numDocsToCreate; i++) {
+ T(results.rows[i].key==i);
+ }
+
+ // do the query again, but with descending output
+ results = db.query(function(doc){ map(doc.integer, null) }, {descending:true});
+
+ T(results.total_rows == numDocsToCreate);
+
+ // validate the keys are ordered descending
+ for(var i=0; i<numDocsToCreate; i++) {
+ T(results.rows[numDocsToCreate-1-i].key==i);
+ }
+ },
+
+ multiple_rows: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var nc = {_id:"NC", cities:["Charlotte", "Raleigh"]};
+ var ma = {_id:"MA", cities:["Boston", "Lowell", "Worcester", "Cambridge", "Springfield"]};
+ var fl = {_id:"FL", cities:["Miami", "Tampa", "Orlando", "Springfield"]};
+
+ T(db.save(nc).ok);
+ T(db.save(ma).ok);
+ T(db.save(fl).ok);
+
+ var generateListOfCitiesAndState = "function(doc) {" +
+ " for (var i = 0; i < doc.cities.length; i++)" +
+ " map(doc.cities[i] + \", \" + doc._id, null);" +
+ "}";
+
+ var results = db.query(generateListOfCitiesAndState);
+ var rows = results.rows;
+
+ T(rows[0].key == "Boston, MA");
+ T(rows[1].key == "Cambridge, MA");
+ T(rows[2].key == "Charlotte, NC");
+ T(rows[3].key == "Lowell, MA");
+ T(rows[4].key == "Miami, FL");
+ T(rows[5].key == "Orlando, FL");
+ T(rows[6].key == "Raleigh, NC");
+ T(rows[7].key == "Springfield, FL");
+ T(rows[8].key == "Springfield, MA");
+ T(rows[9].key == "Tampa, FL");
+ T(rows[10].key == "Worcester, MA");
+
+ // add another city to NC
+ nc.cities.push("Wilmington");
+ T(db.save(nc).ok);
+
+ var results = db.query(generateListOfCitiesAndState);
+ var rows = results.rows;
+
+ T(rows[0].key == "Boston, MA");
+ T(rows[1].key == "Cambridge, MA");
+ T(rows[2].key == "Charlotte, NC");
+ T(rows[3].key == "Lowell, MA");
+ T(rows[4].key == "Miami, FL");
+ T(rows[5].key == "Orlando, FL");
+ T(rows[6].key == "Raleigh, NC");
+ T(rows[7].key == "Springfield, FL");
+ T(rows[8].key == "Springfield, MA");
+ T(rows[9].key == "Tampa, FL");
+ T(rows[10].key == "Wilmington, NC");
+ T(rows[11].key == "Worcester, MA");
+
+ // now delete MA
+ T(db.deleteDoc(ma).ok);
+
+ var results = db.query(generateListOfCitiesAndState);
+ var rows = results.rows;
+
+ T(rows[0].key == "Charlotte, NC");
+ T(rows[1].key == "Miami, FL");
+ T(rows[2].key == "Orlando, FL");
+ T(rows[3].key == "Raleigh, NC");
+ T(rows[4].key == "Springfield, FL");
+ T(rows[5].key == "Tampa, FL");
+ T(rows[6].key == "Wilmington, NC");
+ },
+
+ large_docs: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var longtext = "0123456789\n";
+
+ for (var i=0; i<10; i++) {
+ longtext = longtext + longtext
+ }
+ T(db.save({"longtest":longtext}).ok);
+ T(db.save({"longtest":longtext}).ok);
+ T(db.save({"longtest":longtext}).ok);
+ T(db.save({"longtest":longtext}).ok);
+
+ // query all documents, and return the doc.foo member as a key.
+ results = db.query(function(doc){
+ map(null, doc.longtest);
+ });
+ },
+
+ utf8: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var texts = [];
+
+ texts[0] = "1. Ascii: hello"
+ texts[1] = "2. Russian: На берегу пустынных волн"
+ texts[2] = "3. Math: ∮ E⋅da = Q, n → ∞, ∑ f(i) = ∏ g(i),"
+ texts[3] = "4. Geek: STARGΛ̊TE SG-1"
+ texts[4] = "5. Braille: ⡌⠁⠧⠑ ⠼⠁⠒ ⡍⠜⠇⠑⠹⠰⠎ ⡣⠕⠌"
+
+ // check that we can save a reload with full fidelity
+ for (var i=0; i<texts.length; i++) {
+ T(db.save({_id:i.toString(), text:texts[i]}).ok);
+ }
+
+ for (var i=0; i<texts.length; i++) {
+ T(db.open(i.toString()).text == texts[i]);
+ }
+
+ // check that views and key collation don't blow up
+ var rows = db.query(function(doc) { map(null, doc.text) }).rows;
+ for (var i=0; i<texts.length; i++) {
+ T(rows[i].value == texts[i]);
+ }
+ },
+
+ attachments: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var binAttDoc = {
+ _id:"bin_doc",
+ _attachments:{
+ "foo.txt": {
+ "content-type":"text/plain",
+ "data": "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ }
+
+ T(db.save(binAttDoc).ok);
+
+ var xhr = CouchDB.request("GET", "/test_suite_db/bin_doc/foo.txt");
+ T(xhr.responseText == "This is a base64 encoded text")
+ T(xhr.getResponseHeader("content-type") == "text/plain")
+ },
+
+ content_negotiation: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+ var xhr;
+
+ xhr = CouchDB.request("GET", "/test_suite_db/");
+ T(xhr.getResponseHeader("Content-Type") == "text/plain;charset=utf-8");
+
+ xhr = CouchDB.request("GET", "/test_suite_db/", {
+ headers: {"Accept": "text/html;text/plain;*/*"}
+ });
+ T(xhr.getResponseHeader("Content-Type") == "text/plain;charset=utf-8");
+
+ xhr = CouchDB.request("GET", "/test_suite_db/", {
+ headers: {"Accept": "application/json"}
+ });
+ T(xhr.getResponseHeader("Content-Type") == "application/json");
+ },
+
+ design_docs: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var numDocs = 50;
+
+ var designDoc = {
+ _id:"_design/test",
+ language: "text/javascript",
+ views: {
+ all_docs: "function(doc) { map(doc.integer, null) }",
+ no_docs: "function(doc) {}",
+ single_doc: "function(doc) { if (doc._id == \"1\") { map(1, null) }}"
+ }
+ }
+ T(db.save(designDoc).ok);
+
+ T(db.bulkSave(makeDocs(numDocs)).ok);
+
+ for (var loop = 0; loop < 2; loop++) {
+ var rows = db.view("test/all_docs").rows
+ for (var i=0; i < numDocs; i++) {
+ T(rows[i].key == i);
+ }
+ T(db.view("test/no_docs").total_rows == 0)
+ T(db.view("test/single_doc").total_rows == 1)
+ restartServer();
+ }
+
+ T(db.deleteDoc(designDoc).ok);
+ T(db.open(designDoc._id) == null);
+ T(db.view("test/no_docs") == null);
+
+ restartServer();
+ T(db.open(designDoc._id) == null);
+ T(db.view("test/no_docs") == null);
+ },
+
+ view_collation: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ // NOTE, the values are already in their correct sort order. Consider this
+ // a specification of collation of json types.
+
+ var values = []
+
+ // special values sort before all other types
+ values.push(null)
+ values.push(false)
+ values.push(true)
+
+ // then numbers
+ values.push(1)
+ values.push(2)
+ values.push(3.0)
+ values.push(4)
+
+ // then text, case sensitive
+ values.push("a")
+ values.push("A")
+ values.push("aa")
+ values.push("b")
+ values.push("B")
+ values.push("ba")
+ values.push("bb")
+
+ // then arrays. compared element by element until different.
+ // Longer arrays sort after their prefixes
+ values.push(["a"])
+ values.push(["b"])
+ values.push(["b","c"])
+ values.push(["b","c", "a"])
+ values.push(["b","d"])
+ values.push(["b","d", "e"])
+
+ // then object, compares each key value in the list until different.
+ // larger objects sort after their subset objects.
+ values.push({a:1})
+ values.push({a:2})
+ values.push({b:1})
+ values.push({b:2})
+ values.push({b:2, a:1}) // Member order does matter for collation.
+ // CouchDB preserves member order
+ // but doesn't require that clients will.
+ // (this test might fail if used with a js engine
+ // that doesn't preserve order)
+ values.push({b:2, c:2})
+
+ for (var i=0; i<values.length; i++) {
+ db.save({_id:(i).toString(), foo:values[i]});
+ }
+
+ var queryFun = function(doc) { map(doc.foo, null); }
+ var rows = db.query(queryFun).rows;
+ for (i=0; i<values.length; i++) {
+ T(equals(rows[i].key, values[i]))
+ }
+
+ // everything has collated correctly. Now to check the descending output
+ rows = db.query(queryFun, {descending:true}).rows
+ for (i=0; i<values.length; i++) {
+ T(equals(rows[i].key, values[values.length - 1 -i]))
+ }
+
+ // now check the key query args
+ for (i=1; i<values.length; i++) {
+ var queryOptions = {key:values[i]}
+ rows = db.query(queryFun, queryOptions).rows;
+ T(rows.length == 1 && equals(rows[0].key, values[i]))
+ }
+ },
+
+ view_conflicts: function(debug) {
+ var dbA = new CouchDB("test_suite_db_a");
+ dbA.deleteDb();
+ dbA.createDb();
+ var dbB = new CouchDB("test_suite_db_b");
+ dbB.deleteDb();
+ dbB.createDb();
+ if (debug) debugger;
+
+ var docA = {_id: "foo", bar: 42};
+ T(dbA.save(docA).ok);
+ CouchDB.replicate(dbA.name, dbB.name);
+
+ var docB = dbB.open("foo");
+ docB.bar = 43;
+ dbB.save(docB);
+ docA.bar = 41;
+ dbA.save(docA);
+ CouchDB.replicate(dbA.name, dbB.name);
+
+ var doc = dbB.open("foo", {conflicts: true});
+ T(doc._conflicts.length == 1);
+ var conflictRev = doc._conflicts[0];
+ if (doc.bar == 41) { // A won
+ T(conflictRev == docB._rev);
+ } else { // B won
+ T(doc.bar == 43);
+ T(conflictRev == docA._rev);
+ }
+
+ var results = dbB.query(function(doc) {
+ if (doc._conflicts) {
+ map(doc._id, doc._conflicts);
+ }
+ });
+ T(results.rows[0].value[0] == conflictRev);
+ },
+
+ view_pagination: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(100);
+ T(db.bulkSave(docs).ok);
+
+ var queryFun = function(doc) { map(doc.integer, null) };
+ var i;
+
+ // page through the view ascending and going forward
+ for (i = 0; i < docs.length; i += 10) {
+ var queryResults = db.query(queryFun, {startkey:i, startkey_docid:i, count:10});
+ T(queryResults.rows.length == 10)
+ T(queryResults.total_rows == docs.length)
+ T(queryResults.offset == i)
+ var j;
+ for (j = 0; j < 10;j++) {
+ T(queryResults.rows[j].key == i + j);
+ }
+ }
+
+ // page through the view ascending and going backward
+ for (i = docs.length - 1; i >= 0; i -= 10) {
+ var queryResults = db.query(queryFun, {startkey:i, startkey_docid:i,
+ count:-10})
+ T(queryResults.rows.length == 10)
+ T(queryResults.total_rows == docs.length)
+ T(queryResults.offset == i - 9)
+ var j;
+ for (j = 0; j < 10;j++) {
+ T(queryResults.rows[j].key == i - 9 + j);
+ }
+ }
+
+ // page through the view descending and going forward
+ for (i = docs.length - 1; i >= 0; i -= 10) {
+ var queryResults = db.query(queryFun, {startkey:i, startkey_docid:i,
+ descending:true, count:10})
+ T(queryResults.rows.length == 10)
+ T(queryResults.total_rows == docs.length)
+ T(queryResults.offset == docs.length - i - 1)
+ var j;
+ for (j = 0; j < 10; j++) {
+ T(queryResults.rows[j].key == i - j);
+ }
+ }
+
+ // page through the view descending and going backward
+ for (i = 0; i < docs.length; i += 10) {
+ var queryResults = db.query(queryFun, {startkey:i, startkey_docid:i,
+ descending:true, count:-10});
+ T(queryResults.rows.length == 10)
+ T(queryResults.total_rows == docs.length)
+ T(queryResults.offset == docs.length - i - 10)
+ var j;
+ for (j = 0; j < 10; j++) {
+ T(queryResults.rows[j].key == i + 9 - j);
+ }
+ }
+ },
+
+ view_sandboxing: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(2);
+ T(db.bulkSave(docs).ok);
+
+ // make sure that attempting to change the document throws an error
+ var results = db.query(function(doc) {
+ doc._id = "foo";
+ map(null, doc);
+ });
+ T(results.total_rows == 0);
+
+ // make sure that a view cannot invoke interpreter internals such as the
+ // garbage collector
+ var results = db.query(function(doc) {
+ gc();
+ map(null, doc);
+ });
+ T(results.total_rows == 0);
+
+ // make sure that a view cannot access the map_funs array defined used by
+ // the view server
+ var results = db.query(function(doc) { map_funs.push(1); map(null, doc) });
+ T(results.total_rows == 0);
+
+ // make sure that a view cannot access the map_results array defined used by
+ // the view server
+ var results = db.query(function(doc) { map_results.push(1); map(null, doc) });
+ T(results.total_rows == 0);
+ },
+
+ view_xml: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ db.save({content: "<doc><title id='xml'>Testing XML</title></doc>"});
+ db.save({content: "<doc><title id='e4x'>Testing E4X</title></doc>"});
+
+ var results = db.query(
+ "function(doc) {\n" +
+ " var xml = new XML(doc.content);\n" +
+ " map(xml.title.text(), null);\n" +
+ "}");
+ T(results.total_rows == 2);
+ T(results.rows[0].key == "Testing E4X");
+ T(results.rows[1].key == "Testing XML");
+
+ var results = db.query(
+ "function(doc) {\n" +
+ " var xml = new XML(doc.content);\n" +
+ " map(xml.title.@id, null);\n" +
+ "}");
+ T(results.total_rows == 2);
+ T(results.rows[0].key == "e4x");
+ T(results.rows[1].key == "xml");
+ },
+
+ replication: function(debug) {
+ if (debug) debugger;
+ var dbPairs = [
+ {source:"test_suite_db_a",
+ target:"test_suite_db_b"},
+ {source:"test_suite_db_a",
+ target:"http://localhost:5984/test_suite_db_b"},
+ {source:"http://localhost:5984/test_suite_db_a",
+ target:"test_suite_db_b"},
+ {source:"http://localhost:5984/test_suite_db_a",
+ target:"http://localhost:5984/test_suite_db_b"}
+ ]
+ var dbA = new CouchDB("test_suite_db_a");
+ var dbB = new CouchDB("test_suite_db_b");
+ var numDocs = 10;
+ var xhr;
+ for (var testPair = 0; testPair < dbPairs.length; testPair++) {
+ var A = dbPairs[testPair].source
+ var B = dbPairs[testPair].target
+
+ dbA.deleteDb();
+ dbA.createDb();
+ dbB.deleteDb();
+ dbB.createDb();
+
+ var docs = makeDocs(numDocs);
+ T(dbA.bulkSave(docs).ok);
+
+ T(CouchDB.replicate(A, B).ok);
+
+ for (var j = 0; j < numDocs; j++) {
+ docA = dbA.open("" + j);
+ docB = dbB.open("" + j);
+ T(docA._rev == docB._rev);
+ }
+
+ // now check binary attachments
+ var binDoc = {
+ _id:"bin_doc",
+ _attachments:{
+ "foo.txt": {
+ "type":"base64",
+ "data": "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ }
+ }
+
+ dbA.save(binDoc);
+
+ T(CouchDB.replicate(A, B).ok);
+ T(CouchDB.replicate(B, A).ok);
+
+ xhr = CouchDB.request("GET", "/test_suite_db_a/bin_doc/foo.txt");
+ T(xhr.responseText == "This is a base64 encoded text")
+
+ xhr = CouchDB.request("GET", "/test_suite_db_b/bin_doc/foo.txt");
+ T(xhr.responseText == "This is a base64 encoded text")
+
+ dbA.save({_id:"foo1",value:"a"});
+
+ T(CouchDB.replicate(A, B).ok);
+ T(CouchDB.replicate(B, A).ok);
+
+ docA = dbA.open("foo1");
+ docB = dbB.open("foo1");
+ T(docA._rev == docB._rev);
+
+ dbA.deleteDoc(docA);
+
+ T(CouchDB.replicate(A, B).ok);
+ T(CouchDB.replicate(B, A).ok);
+
+ T(dbA.open("foo1") == null);
+ T(dbB.open("foo1") == null);
+
+ dbA.save({_id:"foo",value:"a"});
+ dbB.save({_id:"foo",value:"b"});
+
+ T(CouchDB.replicate(A, B).ok);
+ T(CouchDB.replicate(B, A).ok);
+
+ // open documents and include the conflict meta data
+ docA = dbA.open("foo", {conflicts: true});
+ docB = dbB.open("foo", {conflicts: true});
+
+ // make sure the same rev is in each db
+ T(docA._rev === docB._rev);
+
+ // make sure the conflicts are the same in each db
+ T(docA._conflicts[0] === docB._conflicts[0]);
+
+ // delete a conflict.
+ dbA.deleteDoc({_id:"foo", _rev:docA._conflicts[0]});
+
+ // replicate the change
+ T(CouchDB.replicate(A, B).ok);
+
+ // open documents and include the conflict meta data
+ docA = dbA.open("foo", {conflicts: true});
+ docB = dbB.open("foo", {conflicts: true});
+
+ // We should have no conflicts this time
+ T(docA._conflicts === undefined)
+ T(docB._conflicts === undefined);
+ }
+ },
+
+ etags_head: function(debug) {
+ var db = new CouchDB("test_suite_db");
+ db.deleteDb();
+ db.createDb();
+ if (debug) debugger;
+
+ var xhr;
+
+ // create a new doc
+ xhr = CouchDB.request("PUT", "/test_suite_db/1", {
+ body: "{}"
+ });
+ T(xhr.status == 201);
+
+ // extract the ETag header values
+ var etag = xhr.getResponseHeader("etag")
+
+ // get the doc and verify the headers match
+ xhr = CouchDB.request("GET", "/test_suite_db/1");
+ T(etag == xhr.getResponseHeader("etag"));
+
+ // 'head' the doc and verify the headers match
+ xhr = CouchDB.request("HEAD", "/test_suite_db/1", {
+ headers: {"if-none-match": "s"}
+ });
+ T(etag == xhr.getResponseHeader("etag"));
+
+ // replace a doc
+ xhr = CouchDB.request("PUT", "/test_suite_db/1", {
+ body: "{}",
+ headers: {"if-match": etag}
+ });
+ T(xhr.status == 201);
+
+ // extract the new ETag value
+ var etagOld= etag;
+ etag = xhr.getResponseHeader("etag")
+
+ // fail to replace a doc
+ xhr = CouchDB.request("PUT", "/test_suite_db/1", {
+ body: "{}"
+ });
+ T(xhr.status == 412)
+
+ // verify get w/Etag
+ xhr = CouchDB.request("GET", "/test_suite_db/1", {
+ headers: {"if-none-match": etagOld}
+ });
+ T(xhr.status == 200);
+ xhr = CouchDB.request("GET", "/test_suite_db/1", {
+ headers: {"if-none-match": etag}
+ });
+ T(xhr.status == 304);
+
+ // fail to delete a doc
+ xhr = CouchDB.request("DELETE", "/test_suite_db/1", {
+ headers: {"if-match": etagOld}
+ });
+ T(xhr.status == 412);
+
+ //now do it for real
+ xhr = CouchDB.request("DELETE", "/test_suite_db/1", {
+ headers: {"if-match": etag}
+ });
+ T(xhr.status == 202)
+ }
+
+};
+
+function makeDocs(n, templateDoc) {
+ var templateDocSrc = templateDoc ? templateDoc.toSource() : "{}"
+ var docs = []
+ for(var i=0; i<n; i++) {
+ var newDoc = eval("(" + templateDocSrc + ")");
+ newDoc._id = (i).toString();
+ newDoc.integer = i
+ newDoc.string = (i).toString();
+ docs.push(newDoc)
+ }
+ return docs;
+}
+
+// *********************** Test Framework of Sorts ************************* //
+
+function patchTest(fun) {
+ var source = fun.toString();
+ var output = "";
+ var i = 0;
+ var testMarker = "T("
+ while (i < source.length) {
+ var testStart = source.indexOf(testMarker, i);
+ if (testStart == -1) {
+ output = output + source.substring(i, source.length);
+ break;
+ }
+ var testEnd = source.indexOf(");", testStart);
+ var testCode = source.substring(testStart + testMarker.length, testEnd);
+ output += source.substring(i, testStart) + "T(" + testCode + "," + JSON.stringify(testCode);
+ i = testEnd;
+ }
+ try {
+ return eval("(" + output + ")");
+ } catch (e) {
+ return null;
+ }
+}
+
+function runAllTests() {
+ var rows = $("#tests tbody.content tr");
+ $("td", rows).html("&nbsp;");
+ $("td.status", rows).removeClass("error").removeClass("failure").removeClass("success").text("not run");
+ var offset = 0;
+ function runNext() {
+ if (offset < rows.length) {
+ var row = rows.get(offset);
+ runTest($("th button", row).get(0), function() {
+ offset += 1;
+ setTimeout(runNext, 1000);
+ });
+ }
+ }
+ runNext();
+}
+
+var numFailures = 0;
+var currentRow = null;
+
+function runTest(button, callback, debug) {
+ if (currentRow != null) {
+ alert("Can not run multiple tests simultaneously.");
+ return;
+ }
+ var row = currentRow = $(button).parents("tr").get(0);
+ $("td.status", row).removeClass("error").removeClass("failure").removeClass("success");
+ $("td", row).html("&nbsp;");
+ var testFun = tests[row.id];
+ function run() {
+ numFailures = 0;
+ var start = new Date().getTime();
+ try {
+ if (debug == undefined || !debug) {
+ testFun = patchTest(testFun) || testFun;
+ }
+ testFun(debug);
+ var status = numFailures > 0 ? "failure" : "success";
+ } catch (e) {
+ var status = "error";
+ if ($("td.details ol", row).length == 0) {
+ $("<ol></ol>").appendTo($("td.details", row));
+ }
+ $("<li><b>Exception raised:</b> <code class='error'></code></li>")
+ .find("code").text(JSON.stringify(e)).end()
+ .appendTo($("td.details ol", row));
+ if (debug) {
+ currentRow = null;
+ throw e;
+ }
+ }
+ if ($("td.details ol", row).length) {
+ $("<a href='#'>Run with debugger</a>").click(function() {
+ runTest(this, undefined, true);
+ }).prependTo($("td.details ol", row));
+ }
+ var duration = new Date().getTime() - start;
+ $("td.status", row).removeClass("running").addClass(status).text(status);
+ $("td.duration", row).text(duration + "ms");
+ updateTestsFooter();
+ currentRow = null;
+ if (callback) callback();
+ }
+ $("td.status", row).addClass("running").text("running…");
+ setTimeout(run, 100);
+}
+
+function showSource(cell) {
+ var name = $(cell).text();
+ var win = window.open("", name, "width=700,height=500,resizable=yes,scrollbars=yes");
+ win.document.title = name;
+ $("<pre></pre>").text(tests[name].toString()).appendTo(win.document.body).fadeIn();
+}
+
+function updateTestsListing() {
+ for (var name in tests) {
+ if (!tests.hasOwnProperty(name)) continue;
+ var testFunction = tests[name];
+ var row = $("<tr><th></th><td></td><td></td><td></td></tr>")
+ .find("th").text(name).attr("title", "Show source").click(function() {
+ showSource(this);
+ }).end()
+ .find("td:nth(0)").addClass("status").text("not run").end()
+ .find("td:nth(1)").addClass("duration").html("&nbsp;").end()
+ .find("td:nth(2)").addClass("details").html("&nbsp;").end();
+ $("<button type='button' class='run' title='Run test'></button>").click(function() {
+ this.blur();
+ runTest(this);
+ return false;
+ }).prependTo(row.find("th"));
+ row.attr("id", name).appendTo("#tests tbody.content");
+ }
+ $("#tests tr").removeClass("odd").filter(":odd").addClass("odd");
+ updateTestsFooter();
+}
+
+function updateTestsFooter() {
+ var tests = $("#tests tbody.content tr td.status");
+ var testsRun = tests.not(":contains('not run'))");
+ var testsFailed = testsRun.not(".success");
+ $("#tests tbody.footer td").text(testsRun.length + " of " + tests.length +
+ " test(s) run, " + testsFailed.length + " failures");
+}
+
+// Use T to perform a test that returns false on failure and if the test fails,
+// display the line that failed.
+// Example:
+// T(MyValue==1);
+function T(arg1, arg2) {
+ if (!arg1) {
+ if (currentRow) {
+ if ($("td.details ol", currentRow).length == 0) {
+ $("<ol></ol>").appendTo($("td.details", currentRow));
+ }
+ $("<li><b>Assertion failed:</b> <code class='failure'></code></li>")
+ .find("code").text((arg2 != null ? arg2 : arg1).toString()).end()
+ .appendTo($("td.details ol", currentRow));
+ }
+ numFailures += 1
+ }
+}
+
+function equals(a,b) {
+ if (a === b) return true;
+ try {
+ return repr(a) === repr(b);
+ } catch (e) {
+ return false;
+ }
+}
+
+function repr(val) {
+ if (val === undefined) {
+ return null;
+ } else if (val === null) {
+ return "null";
+ } else {
+ return JSON.stringify(val);
+ }
+}
+
+function restartServer() {
+ var xhr = CouchDB.request("POST", "/_restart");
+ do {
+ xhr = CouchDB.request("GET", "/");
+ } while(xhr.status != 200);
+}
diff --git a/share/www/script/jquery.cookies.js b/share/www/script/jquery.cookies.js
new file mode 100644
index 00000000..a2817461
--- /dev/null
+++ b/share/www/script/jquery.cookies.js
@@ -0,0 +1,47 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+(function($) {
+ $.cookies = $.cookies || {}
+ $.fn.extend($.cookies, {
+
+ /* Return the value of a cookie. */
+ get: function(name) {
+ var nameEq = name + "=";
+ var parts = document.cookie.split(';');
+ for (var i = 0; i < parts.length; i++) {
+ var part = parts[i].replace(/^\s+/, "");
+ if (part.indexOf(nameEq) == 0) {
+ return unescape(part.substring(nameEq.length, part.length));
+ }
+ }
+ return null;
+ },
+
+ /* Create or update a cookie. */
+ set: function(name, value, days) {
+ var expires = "";
+ if (days) {
+ var date = new Date();
+ date.setTime(date.getTime() + (days * 24*60*60*1000));
+ expires = "; expires=" + date.toGMTString();
+ }
+ document.cookie = name + "=" + escape(value) + expires;
+ },
+
+ /* Remove a cookie. */
+ remove: function(name) {
+ $.cookies.set(name, "", -1);
+ }
+
+ });
+})(jQuery);
diff --git a/share/www/script/jquery.dialog.js b/share/www/script/jquery.dialog.js
new file mode 100644
index 00000000..dc8f9c5c
--- /dev/null
+++ b/share/www/script/jquery.dialog.js
@@ -0,0 +1,92 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+(function($) {
+
+ $.fn.centerBox = function() {
+ return this.each(function() {
+ var s = this.style;
+ s.left = (($(window).width() - $(this).width()) / 2) + "px";
+ s.top = (($(window).height() - $(this).height()) / 2) + "px";
+ });
+ }
+
+ $.showDialog = function(url, options) {
+ options = options || {};
+ options.load = options.load || function() {};
+ options.cancel = options.cancel || function() {};
+ options.validate = options.validate || function() { return true };
+ options.submit = options.submit || function() {};
+
+ var overlay = $('<div id="overlay" style="z-index:1001"></div>')
+ .css("opacity", "0");
+ var dialog = $('<div id="dialog" style="z-index:1002;position:fixed;display:none;"></div>');
+ if ($.browser.msie) {
+ var frame = $('<iframe id="overlay-frame" style="z-index:1000;border:none;margin:0;padding:0;position:absolute;width:100%;height:100%;top:0;left:0" src="javascript:false"></iframe>')
+ .css("opacity", "0").appendTo(document.body);
+ if (parseInt($.browser.version)<7) {
+ dialog.css("position", "absolute");
+ overlay.css("position", "absolute");
+ $("html,body").css({width: "100%", height: "100%"});
+ }
+ }
+ overlay.appendTo(document.body).fadeTo(100, 0.6);
+ dialog.appendTo(document.body).addClass("loading").centerBox().fadeIn(400);
+
+ $(document).keydown(function(e) {
+ if (e.keyCode == 27) dismiss(); // dismiss on escape key
+ });
+ function dismiss() {
+ dialog.fadeOut("fast", function() {
+ $("#dialog, #overlay, #overlay-frame").remove();
+ });
+ $(document).unbind("keydown");
+ }
+ overlay.click(function() { dismiss(); });
+
+ function showError(name, message) {
+ var input = dialog.find(":input[name=" + name + "]");
+ input.addClass("error").next("div.error").remove();
+ $('<div class="error"></div>').text(message).insertAfter(input);
+ }
+
+ $.get(url, function(html) {
+ $(html).appendTo(dialog);
+ dialog.removeClass("loading").addClass("loaded").centerBox().each(function() {
+ options.load(dialog.children()[0]);
+ $(":input:first", dialog).each(function() { this.focus() });
+ $("button.cancel", dialog).click(function() { // dismiss on cancel
+ dismiss();
+ options.cancel();
+ });
+ $("form", dialog).submit(function(e) { // invoke callback on submit
+ e.preventDefault();
+ dialog.find("div.error").remove().end().find(".error").removeClass("error");
+ var data = {};
+ $.each($("form :input", dialog).serializeArray(), function(i, field) {
+ data[field.name] = field.value;
+ });
+ var errors = options.submit(data);
+ if (errors == {}) {
+ dismiss();
+ } else {
+ for (var name in errors) {
+ showError(name, errors[name]);
+ }
+ }
+ return false;
+ });
+ });
+ });
+ }
+
+})(jQuery);
diff --git a/share/www/script/jquery.js b/share/www/script/jquery.js
new file mode 100644
index 00000000..2e43a823
--- /dev/null
+++ b/share/www/script/jquery.js
@@ -0,0 +1,3408 @@
+(function(){
+/*
+ * jQuery 1.2.3 - New Wave Javascript
+ *
+ * Copyright (c) 2008 John Resig (jquery.com)
+ * Dual licensed under the MIT (MIT-LICENSE.txt)
+ * and GPL (GPL-LICENSE.txt) licenses.
+ *
+ * $Date: 2008-02-06 00:21:25 -0500 (Wed, 06 Feb 2008) $
+ * $Rev: 4663 $
+ */
+
+// Map over jQuery in case of overwrite
+if ( window.jQuery )
+ var _jQuery = window.jQuery;
+
+var jQuery = window.jQuery = function( selector, context ) {
+ // The jQuery object is actually just the init constructor 'enhanced'
+ return new jQuery.prototype.init( selector, context );
+};
+
+// Map over the $ in case of overwrite
+if ( window.$ )
+ var _$ = window.$;
+
+// Map the jQuery namespace to the '$' one
+window.$ = jQuery;
+
+// A simple way to check for HTML strings or ID strings
+// (both of which we optimize for)
+var quickExpr = /^[^<]*(<(.|\s)+>)[^>]*$|^#(\w+)$/;
+
+// Is it a simple selector
+var isSimple = /^.[^:#\[\.]*$/;
+
+jQuery.fn = jQuery.prototype = {
+ init: function( selector, context ) {
+ // Make sure that a selection was provided
+ selector = selector || document;
+
+ // Handle $(DOMElement)
+ if ( selector.nodeType ) {
+ this[0] = selector;
+ this.length = 1;
+ return this;
+
+ // Handle HTML strings
+ } else if ( typeof selector == "string" ) {
+ // Are we dealing with HTML string or an ID?
+ var match = quickExpr.exec( selector );
+
+ // Verify a match, and that no context was specified for #id
+ if ( match && (match[1] || !context) ) {
+
+ // HANDLE: $(html) -> $(array)
+ if ( match[1] )
+ selector = jQuery.clean( [ match[1] ], context );
+
+ // HANDLE: $("#id")
+ else {
+ var elem = document.getElementById( match[3] );
+
+ // Make sure an element was located
+ if ( elem )
+ // Handle the case where IE and Opera return items
+ // by name instead of ID
+ if ( elem.id != match[3] )
+ return jQuery().find( selector );
+
+ // Otherwise, we inject the element directly into the jQuery object
+ else {
+ this[0] = elem;
+ this.length = 1;
+ return this;
+ }
+
+ else
+ selector = [];
+ }
+
+ // HANDLE: $(expr, [context])
+ // (which is just equivalent to: $(content).find(expr)
+ } else
+ return new jQuery( context ).find( selector );
+
+ // HANDLE: $(function)
+ // Shortcut for document ready
+ } else if ( jQuery.isFunction( selector ) )
+ return new jQuery( document )[ jQuery.fn.ready ? "ready" : "load" ]( selector );
+
+ return this.setArray(
+ // HANDLE: $(array)
+ selector.constructor == Array && selector ||
+
+ // HANDLE: $(arraylike)
+ // Watch for when an array-like object, contains DOM nodes, is passed in as the selector
+ (selector.jquery || selector.length && selector != window && !selector.nodeType && selector[0] != undefined && selector[0].nodeType) && jQuery.makeArray( selector ) ||
+
+ // HANDLE: $(*)
+ [ selector ] );
+ },
+
+ // The current version of jQuery being used
+ jquery: "1.2.3",
+
+ // The number of elements contained in the matched element set
+ size: function() {
+ return this.length;
+ },
+
+ // The number of elements contained in the matched element set
+ length: 0,
+
+ // Get the Nth element in the matched element set OR
+ // Get the whole matched element set as a clean array
+ get: function( num ) {
+ return num == undefined ?
+
+ // Return a 'clean' array
+ jQuery.makeArray( this ) :
+
+ // Return just the object
+ this[ num ];
+ },
+
+ // Take an array of elements and push it onto the stack
+ // (returning the new matched element set)
+ pushStack: function( elems ) {
+ // Build a new jQuery matched element set
+ var ret = jQuery( elems );
+
+ // Add the old object onto the stack (as a reference)
+ ret.prevObject = this;
+
+ // Return the newly-formed element set
+ return ret;
+ },
+
+ // Force the current matched set of elements to become
+ // the specified array of elements (destroying the stack in the process)
+ // You should use pushStack() in order to do this, but maintain the stack
+ setArray: function( elems ) {
+ // Resetting the length to 0, then using the native Array push
+ // is a super-fast way to populate an object with array-like properties
+ this.length = 0;
+ Array.prototype.push.apply( this, elems );
+
+ return this;
+ },
+
+ // Execute a callback for every element in the matched set.
+ // (You can seed the arguments with an array of args, but this is
+ // only used internally.)
+ each: function( callback, args ) {
+ return jQuery.each( this, callback, args );
+ },
+
+ // Determine the position of an element within
+ // the matched set of elements
+ index: function( elem ) {
+ var ret = -1;
+
+ // Locate the position of the desired element
+ this.each(function(i){
+ if ( this == elem )
+ ret = i;
+ });
+
+ return ret;
+ },
+
+ attr: function( name, value, type ) {
+ var options = name;
+
+ // Look for the case where we're accessing a style value
+ if ( name.constructor == String )
+ if ( value == undefined )
+ return this.length && jQuery[ type || "attr" ]( this[0], name ) || undefined;
+
+ else {
+ options = {};
+ options[ name ] = value;
+ }
+
+ // Check to see if we're setting style values
+ return this.each(function(i){
+ // Set all the styles
+ for ( name in options )
+ jQuery.attr(
+ type ?
+ this.style :
+ this,
+ name, jQuery.prop( this, options[ name ], type, i, name )
+ );
+ });
+ },
+
+ css: function( key, value ) {
+ // ignore negative width and height values
+ if ( (key == 'width' || key == 'height') && parseFloat(value) < 0 )
+ value = undefined;
+ return this.attr( key, value, "curCSS" );
+ },
+
+ text: function( text ) {
+ if ( typeof text != "object" && text != null )
+ return this.empty().append( (this[0] && this[0].ownerDocument || document).createTextNode( text ) );
+
+ var ret = "";
+
+ jQuery.each( text || this, function(){
+ jQuery.each( this.childNodes, function(){
+ if ( this.nodeType != 8 )
+ ret += this.nodeType != 1 ?
+ this.nodeValue :
+ jQuery.fn.text( [ this ] );
+ });
+ });
+
+ return ret;
+ },
+
+ wrapAll: function( html ) {
+ if ( this[0] )
+ // The elements to wrap the target around
+ jQuery( html, this[0].ownerDocument )
+ .clone()
+ .insertBefore( this[0] )
+ .map(function(){
+ var elem = this;
+
+ while ( elem.firstChild )
+ elem = elem.firstChild;
+
+ return elem;
+ })
+ .append(this);
+
+ return this;
+ },
+
+ wrapInner: function( html ) {
+ return this.each(function(){
+ jQuery( this ).contents().wrapAll( html );
+ });
+ },
+
+ wrap: function( html ) {
+ return this.each(function(){
+ jQuery( this ).wrapAll( html );
+ });
+ },
+
+ append: function() {
+ return this.domManip(arguments, true, false, function(elem){
+ if (this.nodeType == 1)
+ this.appendChild( elem );
+ });
+ },
+
+ prepend: function() {
+ return this.domManip(arguments, true, true, function(elem){
+ if (this.nodeType == 1)
+ this.insertBefore( elem, this.firstChild );
+ });
+ },
+
+ before: function() {
+ return this.domManip(arguments, false, false, function(elem){
+ this.parentNode.insertBefore( elem, this );
+ });
+ },
+
+ after: function() {
+ return this.domManip(arguments, false, true, function(elem){
+ this.parentNode.insertBefore( elem, this.nextSibling );
+ });
+ },
+
+ end: function() {
+ return this.prevObject || jQuery( [] );
+ },
+
+ find: function( selector ) {
+ var elems = jQuery.map(this, function(elem){
+ return jQuery.find( selector, elem );
+ });
+
+ return this.pushStack( /[^+>] [^+>]/.test( selector ) || selector.indexOf("..") > -1 ?
+ jQuery.unique( elems ) :
+ elems );
+ },
+
+ clone: function( events ) {
+ // Do the clone
+ var ret = this.map(function(){
+ if ( jQuery.browser.msie && !jQuery.isXMLDoc(this) ) {
+ // IE copies events bound via attachEvent when
+ // using cloneNode. Calling detachEvent on the
+ // clone will also remove the events from the orignal
+ // In order to get around this, we use innerHTML.
+ // Unfortunately, this means some modifications to
+ // attributes in IE that are actually only stored
+ // as properties will not be copied (such as the
+ // the name attribute on an input).
+ var clone = this.cloneNode(true),
+ container = document.createElement("div");
+ container.appendChild(clone);
+ return jQuery.clean([container.innerHTML])[0];
+ } else
+ return this.cloneNode(true);
+ });
+
+ // Need to set the expando to null on the cloned set if it exists
+ // removeData doesn't work here, IE removes it from the original as well
+ // this is primarily for IE but the data expando shouldn't be copied over in any browser
+ var clone = ret.find("*").andSelf().each(function(){
+ if ( this[ expando ] != undefined )
+ this[ expando ] = null;
+ });
+
+ // Copy the events from the original to the clone
+ if ( events === true )
+ this.find("*").andSelf().each(function(i){
+ if (this.nodeType == 3)
+ return;
+ var events = jQuery.data( this, "events" );
+
+ for ( var type in events )
+ for ( var handler in events[ type ] )
+ jQuery.event.add( clone[ i ], type, events[ type ][ handler ], events[ type ][ handler ].data );
+ });
+
+ // Return the cloned set
+ return ret;
+ },
+
+ filter: function( selector ) {
+ return this.pushStack(
+ jQuery.isFunction( selector ) &&
+ jQuery.grep(this, function(elem, i){
+ return selector.call( elem, i );
+ }) ||
+
+ jQuery.multiFilter( selector, this ) );
+ },
+
+ not: function( selector ) {
+ if ( selector.constructor == String )
+ // test special case where just one selector is passed in
+ if ( isSimple.test( selector ) )
+ return this.pushStack( jQuery.multiFilter( selector, this, true ) );
+ else
+ selector = jQuery.multiFilter( selector, this );
+
+ var isArrayLike = selector.length && selector[selector.length - 1] !== undefined && !selector.nodeType;
+ return this.filter(function() {
+ return isArrayLike ? jQuery.inArray( this, selector ) < 0 : this != selector;
+ });
+ },
+
+ add: function( selector ) {
+ return !selector ? this : this.pushStack( jQuery.merge(
+ this.get(),
+ selector.constructor == String ?
+ jQuery( selector ).get() :
+ selector.length != undefined && (!selector.nodeName || jQuery.nodeName(selector, "form")) ?
+ selector : [selector] ) );
+ },
+
+ is: function( selector ) {
+ return selector ?
+ jQuery.multiFilter( selector, this ).length > 0 :
+ false;
+ },
+
+ hasClass: function( selector ) {
+ return this.is( "." + selector );
+ },
+
+ val: function( value ) {
+ if ( value == undefined ) {
+
+ if ( this.length ) {
+ var elem = this[0];
+
+ // We need to handle select boxes special
+ if ( jQuery.nodeName( elem, "select" ) ) {
+ var index = elem.selectedIndex,
+ values = [],
+ options = elem.options,
+ one = elem.type == "select-one";
+
+ // Nothing was selected
+ if ( index < 0 )
+ return null;
+
+ // Loop through all the selected options
+ for ( var i = one ? index : 0, max = one ? index + 1 : options.length; i < max; i++ ) {
+ var option = options[ i ];
+
+ if ( option.selected ) {
+ // Get the specifc value for the option
+ value = jQuery.browser.msie && !option.attributes.value.specified ? option.text : option.value;
+
+ // We don't need an array for one selects
+ if ( one )
+ return value;
+
+ // Multi-Selects return an array
+ values.push( value );
+ }
+ }
+
+ return values;
+
+ // Everything else, we just grab the value
+ } else
+ return (this[0].value || "").replace(/\r/g, "");
+
+ }
+
+ return undefined;
+ }
+
+ return this.each(function(){
+ if ( this.nodeType != 1 )
+ return;
+
+ if ( value.constructor == Array && /radio|checkbox/.test( this.type ) )
+ this.checked = (jQuery.inArray(this.value, value) >= 0 ||
+ jQuery.inArray(this.name, value) >= 0);
+
+ else if ( jQuery.nodeName( this, "select" ) ) {
+ var values = value.constructor == Array ?
+ value :
+ [ value ];
+
+ jQuery( "option", this ).each(function(){
+ this.selected = (jQuery.inArray( this.value, values ) >= 0 ||
+ jQuery.inArray( this.text, values ) >= 0);
+ });
+
+ if ( !values.length )
+ this.selectedIndex = -1;
+
+ } else
+ this.value = value;
+ });
+ },
+
+ html: function( value ) {
+ return value == undefined ?
+ (this.length ?
+ this[0].innerHTML :
+ null) :
+ this.empty().append( value );
+ },
+
+ replaceWith: function( value ) {
+ return this.after( value ).remove();
+ },
+
+ eq: function( i ) {
+ return this.slice( i, i + 1 );
+ },
+
+ slice: function() {
+ return this.pushStack( Array.prototype.slice.apply( this, arguments ) );
+ },
+
+ map: function( callback ) {
+ return this.pushStack( jQuery.map(this, function(elem, i){
+ return callback.call( elem, i, elem );
+ }));
+ },
+
+ andSelf: function() {
+ return this.add( this.prevObject );
+ },
+
+ data: function( key, value ){
+ var parts = key.split(".");
+ parts[1] = parts[1] ? "." + parts[1] : "";
+
+ if ( value == null ) {
+ var data = this.triggerHandler("getData" + parts[1] + "!", [parts[0]]);
+
+ if ( data == undefined && this.length )
+ data = jQuery.data( this[0], key );
+
+ return data == null && parts[1] ?
+ this.data( parts[0] ) :
+ data;
+ } else
+ return this.trigger("setData" + parts[1] + "!", [parts[0], value]).each(function(){
+ jQuery.data( this, key, value );
+ });
+ },
+
+ removeData: function( key ){
+ return this.each(function(){
+ jQuery.removeData( this, key );
+ });
+ },
+
+ domManip: function( args, table, reverse, callback ) {
+ var clone = this.length > 1, elems;
+
+ return this.each(function(){
+ if ( !elems ) {
+ elems = jQuery.clean( args, this.ownerDocument );
+
+ if ( reverse )
+ elems.reverse();
+ }
+
+ var obj = this;
+
+ if ( table && jQuery.nodeName( this, "table" ) && jQuery.nodeName( elems[0], "tr" ) )
+ obj = this.getElementsByTagName("tbody")[0] || this.appendChild( this.ownerDocument.createElement("tbody") );
+
+ var scripts = jQuery( [] );
+
+ jQuery.each(elems, function(){
+ var elem = clone ?
+ jQuery( this ).clone( true )[0] :
+ this;
+
+ // execute all scripts after the elements have been injected
+ if ( jQuery.nodeName( elem, "script" ) ) {
+ scripts = scripts.add( elem );
+ } else {
+ // Remove any inner scripts for later evaluation
+ if ( elem.nodeType == 1 )
+ scripts = scripts.add( jQuery( "script", elem ).remove() );
+
+ // Inject the elements into the document
+ callback.call( obj, elem );
+ }
+ });
+
+ scripts.each( evalScript );
+ });
+ }
+};
+
+// Give the init function the jQuery prototype for later instantiation
+jQuery.prototype.init.prototype = jQuery.prototype;
+
+function evalScript( i, elem ) {
+ if ( elem.src )
+ jQuery.ajax({
+ url: elem.src,
+ async: false,
+ dataType: "script"
+ });
+
+ else
+ jQuery.globalEval( elem.text || elem.textContent || elem.innerHTML || "" );
+
+ if ( elem.parentNode )
+ elem.parentNode.removeChild( elem );
+}
+
+jQuery.extend = jQuery.fn.extend = function() {
+ // copy reference to target object
+ var target = arguments[0] || {}, i = 1, length = arguments.length, deep = false, options;
+
+ // Handle a deep copy situation
+ if ( target.constructor == Boolean ) {
+ deep = target;
+ target = arguments[1] || {};
+ // skip the boolean and the target
+ i = 2;
+ }
+
+ // Handle case when target is a string or something (possible in deep copy)
+ if ( typeof target != "object" && typeof target != "function" )
+ target = {};
+
+ // extend jQuery itself if only one argument is passed
+ if ( length == 1 ) {
+ target = this;
+ i = 0;
+ }
+
+ for ( ; i < length; i++ )
+ // Only deal with non-null/undefined values
+ if ( (options = arguments[ i ]) != null )
+ // Extend the base object
+ for ( var name in options ) {
+ // Prevent never-ending loop
+ if ( target === options[ name ] )
+ continue;
+
+ // Recurse if we're merging object values
+ if ( deep && options[ name ] && typeof options[ name ] == "object" && target[ name ] && !options[ name ].nodeType )
+ target[ name ] = jQuery.extend( target[ name ], options[ name ] );
+
+ // Don't bring in undefined values
+ else if ( options[ name ] != undefined )
+ target[ name ] = options[ name ];
+
+ }
+
+ // Return the modified object
+ return target;
+};
+
+var expando = "jQuery" + (new Date()).getTime(), uuid = 0, windowData = {};
+
+// exclude the following css properties to add px
+var exclude = /z-?index|font-?weight|opacity|zoom|line-?height/i;
+
+jQuery.extend({
+ noConflict: function( deep ) {
+ window.$ = _$;
+
+ if ( deep )
+ window.jQuery = _jQuery;
+
+ return jQuery;
+ },
+
+ // See test/unit/core.js for details concerning this function.
+ isFunction: function( fn ) {
+ return !!fn && typeof fn != "string" && !fn.nodeName &&
+ fn.constructor != Array && /function/i.test( fn + "" );
+ },
+
+ // check if an element is in a (or is an) XML document
+ isXMLDoc: function( elem ) {
+ return elem.documentElement && !elem.body ||
+ elem.tagName && elem.ownerDocument && !elem.ownerDocument.body;
+ },
+
+ // Evalulates a script in a global context
+ globalEval: function( data ) {
+ data = jQuery.trim( data );
+
+ if ( data ) {
+ // Inspired by code by Andrea Giammarchi
+ // http://webreflection.blogspot.com/2007/08/global-scope-evaluation-and-dom.html
+ var head = document.getElementsByTagName("head")[0] || document.documentElement,
+ script = document.createElement("script");
+
+ script.type = "text/javascript";
+ if ( jQuery.browser.msie )
+ script.text = data;
+ else
+ script.appendChild( document.createTextNode( data ) );
+
+ head.appendChild( script );
+ head.removeChild( script );
+ }
+ },
+
+ nodeName: function( elem, name ) {
+ return elem.nodeName && elem.nodeName.toUpperCase() == name.toUpperCase();
+ },
+
+ cache: {},
+
+ data: function( elem, name, data ) {
+ elem = elem == window ?
+ windowData :
+ elem;
+
+ var id = elem[ expando ];
+
+ // Compute a unique ID for the element
+ if ( !id )
+ id = elem[ expando ] = ++uuid;
+
+ // Only generate the data cache if we're
+ // trying to access or manipulate it
+ if ( name && !jQuery.cache[ id ] )
+ jQuery.cache[ id ] = {};
+
+ // Prevent overriding the named cache with undefined values
+ if ( data != undefined )
+ jQuery.cache[ id ][ name ] = data;
+
+ // Return the named cache data, or the ID for the element
+ return name ?
+ jQuery.cache[ id ][ name ] :
+ id;
+ },
+
+ removeData: function( elem, name ) {
+ elem = elem == window ?
+ windowData :
+ elem;
+
+ var id = elem[ expando ];
+
+ // If we want to remove a specific section of the element's data
+ if ( name ) {
+ if ( jQuery.cache[ id ] ) {
+ // Remove the section of cache data
+ delete jQuery.cache[ id ][ name ];
+
+ // If we've removed all the data, remove the element's cache
+ name = "";
+
+ for ( name in jQuery.cache[ id ] )
+ break;
+
+ if ( !name )
+ jQuery.removeData( elem );
+ }
+
+ // Otherwise, we want to remove all of the element's data
+ } else {
+ // Clean up the element expando
+ try {
+ delete elem[ expando ];
+ } catch(e){
+ // IE has trouble directly removing the expando
+ // but it's ok with using removeAttribute
+ if ( elem.removeAttribute )
+ elem.removeAttribute( expando );
+ }
+
+ // Completely remove the data cache
+ delete jQuery.cache[ id ];
+ }
+ },
+
+ // args is for internal usage only
+ each: function( object, callback, args ) {
+ if ( args ) {
+ if ( object.length == undefined ) {
+ for ( var name in object )
+ if ( callback.apply( object[ name ], args ) === false )
+ break;
+ } else
+ for ( var i = 0, length = object.length; i < length; i++ )
+ if ( callback.apply( object[ i ], args ) === false )
+ break;
+
+ // A special, fast, case for the most common use of each
+ } else {
+ if ( object.length == undefined ) {
+ for ( var name in object )
+ if ( callback.call( object[ name ], name, object[ name ] ) === false )
+ break;
+ } else
+ for ( var i = 0, length = object.length, value = object[0];
+ i < length && callback.call( value, i, value ) !== false; value = object[++i] ){}
+ }
+
+ return object;
+ },
+
+ prop: function( elem, value, type, i, name ) {
+ // Handle executable functions
+ if ( jQuery.isFunction( value ) )
+ value = value.call( elem, i );
+
+ // Handle passing in a number to a CSS property
+ return value && value.constructor == Number && type == "curCSS" && !exclude.test( name ) ?
+ value + "px" :
+ value;
+ },
+
+ className: {
+ // internal only, use addClass("class")
+ add: function( elem, classNames ) {
+ jQuery.each((classNames || "").split(/\s+/), function(i, className){
+ if ( elem.nodeType == 1 && !jQuery.className.has( elem.className, className ) )
+ elem.className += (elem.className ? " " : "") + className;
+ });
+ },
+
+ // internal only, use removeClass("class")
+ remove: function( elem, classNames ) {
+ if (elem.nodeType == 1)
+ elem.className = classNames != undefined ?
+ jQuery.grep(elem.className.split(/\s+/), function(className){
+ return !jQuery.className.has( classNames, className );
+ }).join(" ") :
+ "";
+ },
+
+ // internal only, use is(".class")
+ has: function( elem, className ) {
+ return jQuery.inArray( className, (elem.className || elem).toString().split(/\s+/) ) > -1;
+ }
+ },
+
+ // A method for quickly swapping in/out CSS properties to get correct calculations
+ swap: function( elem, options, callback ) {
+ var old = {};
+ // Remember the old values, and insert the new ones
+ for ( var name in options ) {
+ old[ name ] = elem.style[ name ];
+ elem.style[ name ] = options[ name ];
+ }
+
+ callback.call( elem );
+
+ // Revert the old values
+ for ( var name in options )
+ elem.style[ name ] = old[ name ];
+ },
+
+ css: function( elem, name, force ) {
+ if ( name == "width" || name == "height" ) {
+ var val, props = { position: "absolute", visibility: "hidden", display:"block" }, which = name == "width" ? [ "Left", "Right" ] : [ "Top", "Bottom" ];
+
+ function getWH() {
+ val = name == "width" ? elem.offsetWidth : elem.offsetHeight;
+ var padding = 0, border = 0;
+ jQuery.each( which, function() {
+ padding += parseFloat(jQuery.curCSS( elem, "padding" + this, true)) || 0;
+ border += parseFloat(jQuery.curCSS( elem, "border" + this + "Width", true)) || 0;
+ });
+ val -= Math.round(padding + border);
+ }
+
+ if ( jQuery(elem).is(":visible") )
+ getWH();
+ else
+ jQuery.swap( elem, props, getWH );
+
+ return Math.max(0, val);
+ }
+
+ return jQuery.curCSS( elem, name, force );
+ },
+
+ curCSS: function( elem, name, force ) {
+ var ret;
+
+ // A helper method for determining if an element's values are broken
+ function color( elem ) {
+ if ( !jQuery.browser.safari )
+ return false;
+
+ var ret = document.defaultView.getComputedStyle( elem, null );
+ return !ret || ret.getPropertyValue("color") == "";
+ }
+
+ // We need to handle opacity special in IE
+ if ( name == "opacity" && jQuery.browser.msie ) {
+ ret = jQuery.attr( elem.style, "opacity" );
+
+ return ret == "" ?
+ "1" :
+ ret;
+ }
+ // Opera sometimes will give the wrong display answer, this fixes it, see #2037
+ if ( jQuery.browser.opera && name == "display" ) {
+ var save = elem.style.outline;
+ elem.style.outline = "0 solid black";
+ elem.style.outline = save;
+ }
+
+ // Make sure we're using the right name for getting the float value
+ if ( name.match( /float/i ) )
+ name = styleFloat;
+
+ if ( !force && elem.style && elem.style[ name ] )
+ ret = elem.style[ name ];
+
+ else if ( document.defaultView && document.defaultView.getComputedStyle ) {
+
+ // Only "float" is needed here
+ if ( name.match( /float/i ) )
+ name = "float";
+
+ name = name.replace( /([A-Z])/g, "-$1" ).toLowerCase();
+
+ var getComputedStyle = document.defaultView.getComputedStyle( elem, null );
+
+ if ( getComputedStyle && !color( elem ) )
+ ret = getComputedStyle.getPropertyValue( name );
+
+ // If the element isn't reporting its values properly in Safari
+ // then some display: none elements are involved
+ else {
+ var swap = [], stack = [];
+
+ // Locate all of the parent display: none elements
+ for ( var a = elem; a && color(a); a = a.parentNode )
+ stack.unshift(a);
+
+ // Go through and make them visible, but in reverse
+ // (It would be better if we knew the exact display type that they had)
+ for ( var i = 0; i < stack.length; i++ )
+ if ( color( stack[ i ] ) ) {
+ swap[ i ] = stack[ i ].style.display;
+ stack[ i ].style.display = "block";
+ }
+
+ // Since we flip the display style, we have to handle that
+ // one special, otherwise get the value
+ ret = name == "display" && swap[ stack.length - 1 ] != null ?
+ "none" :
+ ( getComputedStyle && getComputedStyle.getPropertyValue( name ) ) || "";
+
+ // Finally, revert the display styles back
+ for ( var i = 0; i < swap.length; i++ )
+ if ( swap[ i ] != null )
+ stack[ i ].style.display = swap[ i ];
+ }
+
+ // We should always get a number back from opacity
+ if ( name == "opacity" && ret == "" )
+ ret = "1";
+
+ } else if ( elem.currentStyle ) {
+ var camelCase = name.replace(/\-(\w)/g, function(all, letter){
+ return letter.toUpperCase();
+ });
+
+ ret = elem.currentStyle[ name ] || elem.currentStyle[ camelCase ];
+
+ // From the awesome hack by Dean Edwards
+ // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291
+
+ // If we're not dealing with a regular pixel number
+ // but a number that has a weird ending, we need to convert it to pixels
+ if ( !/^\d+(px)?$/i.test( ret ) && /^\d/.test( ret ) ) {
+ // Remember the original values
+ var style = elem.style.left, runtimeStyle = elem.runtimeStyle.left;
+
+ // Put in the new values to get a computed value out
+ elem.runtimeStyle.left = elem.currentStyle.left;
+ elem.style.left = ret || 0;
+ ret = elem.style.pixelLeft + "px";
+
+ // Revert the changed values
+ elem.style.left = style;
+ elem.runtimeStyle.left = runtimeStyle;
+ }
+ }
+
+ return ret;
+ },
+
+ clean: function( elems, context ) {
+ var ret = [];
+ context = context || document;
+ // !context.createElement fails in IE with an error but returns typeof 'object'
+ if (typeof context.createElement == 'undefined')
+ context = context.ownerDocument || context[0] && context[0].ownerDocument || document;
+
+ jQuery.each(elems, function(i, elem){
+ if ( !elem )
+ return;
+
+ if ( elem.constructor == Number )
+ elem = elem.toString();
+
+ // Convert html string into DOM nodes
+ if ( typeof elem == "string" ) {
+ // Fix "XHTML"-style tags in all browsers
+ elem = elem.replace(/(<(\w+)[^>]*?)\/>/g, function(all, front, tag){
+ return tag.match(/^(abbr|br|col|img|input|link|meta|param|hr|area|embed)$/i) ?
+ all :
+ front + "></" + tag + ">";
+ });
+
+ // Trim whitespace, otherwise indexOf won't work as expected
+ var tags = jQuery.trim( elem ).toLowerCase(), div = context.createElement("div");
+
+ var wrap =
+ // option or optgroup
+ !tags.indexOf("<opt") &&
+ [ 1, "<select multiple='multiple'>", "</select>" ] ||
+
+ !tags.indexOf("<leg") &&
+ [ 1, "<fieldset>", "</fieldset>" ] ||
+
+ tags.match(/^<(thead|tbody|tfoot|colg|cap)/) &&
+ [ 1, "<table>", "</table>" ] ||
+
+ !tags.indexOf("<tr") &&
+ [ 2, "<table><tbody>", "</tbody></table>" ] ||
+
+ // <thead> matched above
+ (!tags.indexOf("<td") || !tags.indexOf("<th")) &&
+ [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ] ||
+
+ !tags.indexOf("<col") &&
+ [ 2, "<table><tbody></tbody><colgroup>", "</colgroup></table>" ] ||
+
+ // IE can't serialize <link> and <script> tags normally
+ jQuery.browser.msie &&
+ [ 1, "div<div>", "</div>" ] ||
+
+ [ 0, "", "" ];
+
+ // Go to html and back, then peel off extra wrappers
+ div.innerHTML = wrap[1] + elem + wrap[2];
+
+ // Move to the right depth
+ while ( wrap[0]-- )
+ div = div.lastChild;
+
+ // Remove IE's autoinserted <tbody> from table fragments
+ if ( jQuery.browser.msie ) {
+
+ // String was a <table>, *may* have spurious <tbody>
+ var tbody = !tags.indexOf("<table") && tags.indexOf("<tbody") < 0 ?
+ div.firstChild && div.firstChild.childNodes :
+
+ // String was a bare <thead> or <tfoot>
+ wrap[1] == "<table>" && tags.indexOf("<tbody") < 0 ?
+ div.childNodes :
+ [];
+
+ for ( var j = tbody.length - 1; j >= 0 ; --j )
+ if ( jQuery.nodeName( tbody[ j ], "tbody" ) && !tbody[ j ].childNodes.length )
+ tbody[ j ].parentNode.removeChild( tbody[ j ] );
+
+ // IE completely kills leading whitespace when innerHTML is used
+ if ( /^\s/.test( elem ) )
+ div.insertBefore( context.createTextNode( elem.match(/^\s*/)[0] ), div.firstChild );
+
+ }
+
+ elem = jQuery.makeArray( div.childNodes );
+ }
+
+ if ( elem.length === 0 && (!jQuery.nodeName( elem, "form" ) && !jQuery.nodeName( elem, "select" )) )
+ return;
+
+ if ( elem[0] == undefined || jQuery.nodeName( elem, "form" ) || elem.options )
+ ret.push( elem );
+
+ else
+ ret = jQuery.merge( ret, elem );
+
+ });
+
+ return ret;
+ },
+
+ attr: function( elem, name, value ) {
+ // don't set attributes on text and comment nodes
+ if (!elem || elem.nodeType == 3 || elem.nodeType == 8)
+ return undefined;
+
+ var fix = jQuery.isXMLDoc( elem ) ?
+ {} :
+ jQuery.props;
+
+ // Safari mis-reports the default selected property of a hidden option
+ // Accessing the parent's selectedIndex property fixes it
+ if ( name == "selected" && jQuery.browser.safari )
+ elem.parentNode.selectedIndex;
+
+ // Certain attributes only work when accessed via the old DOM 0 way
+ if ( fix[ name ] ) {
+ if ( value != undefined )
+ elem[ fix[ name ] ] = value;
+
+ return elem[ fix[ name ] ];
+
+ } else if ( jQuery.browser.msie && name == "style" )
+ return jQuery.attr( elem.style, "cssText", value );
+
+ else if ( value == undefined && jQuery.browser.msie && jQuery.nodeName( elem, "form" ) && (name == "action" || name == "method") )
+ return elem.getAttributeNode( name ).nodeValue;
+
+ // IE elem.getAttribute passes even for style
+ else if ( elem.tagName ) {
+
+ if ( value != undefined ) {
+ // We can't allow the type property to be changed (since it causes problems in IE)
+ if ( name == "type" && jQuery.nodeName( elem, "input" ) && elem.parentNode )
+ throw "type property can't be changed";
+
+ // convert the value to a string (all browsers do this but IE) see #1070
+ elem.setAttribute( name, "" + value );
+ }
+
+ if ( jQuery.browser.msie && /href|src/.test( name ) && !jQuery.isXMLDoc( elem ) )
+ return elem.getAttribute( name, 2 );
+
+ return elem.getAttribute( name );
+
+ // elem is actually elem.style ... set the style
+ } else {
+ // IE actually uses filters for opacity
+ if ( name == "opacity" && jQuery.browser.msie ) {
+ if ( value != undefined ) {
+ // IE has trouble with opacity if it does not have layout
+ // Force it by setting the zoom level
+ elem.zoom = 1;
+
+ // Set the alpha filter to set the opacity
+ elem.filter = (elem.filter || "").replace( /alpha\([^)]*\)/, "" ) +
+ (parseFloat( value ).toString() == "NaN" ? "" : "alpha(opacity=" + value * 100 + ")");
+ }
+
+ return elem.filter && elem.filter.indexOf("opacity=") >= 0 ?
+ (parseFloat( elem.filter.match(/opacity=([^)]*)/)[1] ) / 100).toString() :
+ "";
+ }
+
+ name = name.replace(/-([a-z])/ig, function(all, letter){
+ return letter.toUpperCase();
+ });
+
+ if ( value != undefined )
+ elem[ name ] = value;
+
+ return elem[ name ];
+ }
+ },
+
+ trim: function( text ) {
+ return (text || "").replace( /^\s+|\s+$/g, "" );
+ },
+
+ makeArray: function( array ) {
+ var ret = [];
+
+ // Need to use typeof to fight Safari childNodes crashes
+ if ( typeof array != "array" )
+ for ( var i = 0, length = array.length; i < length; i++ )
+ ret.push( array[ i ] );
+ else
+ ret = array.slice( 0 );
+
+ return ret;
+ },
+
+ inArray: function( elem, array ) {
+ for ( var i = 0, length = array.length; i < length; i++ )
+ if ( array[ i ] == elem )
+ return i;
+
+ return -1;
+ },
+
+ merge: function( first, second ) {
+ // We have to loop this way because IE & Opera overwrite the length
+ // expando of getElementsByTagName
+
+ // Also, we need to make sure that the correct elements are being returned
+ // (IE returns comment nodes in a '*' query)
+ if ( jQuery.browser.msie ) {
+ for ( var i = 0; second[ i ]; i++ )
+ if ( second[ i ].nodeType != 8 )
+ first.push( second[ i ] );
+
+ } else
+ for ( var i = 0; second[ i ]; i++ )
+ first.push( second[ i ] );
+
+ return first;
+ },
+
+ unique: function( array ) {
+ var ret = [], done = {};
+
+ try {
+
+ for ( var i = 0, length = array.length; i < length; i++ ) {
+ var id = jQuery.data( array[ i ] );
+
+ if ( !done[ id ] ) {
+ done[ id ] = true;
+ ret.push( array[ i ] );
+ }
+ }
+
+ } catch( e ) {
+ ret = array;
+ }
+
+ return ret;
+ },
+
+ grep: function( elems, callback, inv ) {
+ var ret = [];
+
+ // Go through the array, only saving the items
+ // that pass the validator function
+ for ( var i = 0, length = elems.length; i < length; i++ )
+ if ( !inv && callback( elems[ i ], i ) || inv && !callback( elems[ i ], i ) )
+ ret.push( elems[ i ] );
+
+ return ret;
+ },
+
+ map: function( elems, callback ) {
+ var ret = [];
+
+ // Go through the array, translating each of the items to their
+ // new value (or values).
+ for ( var i = 0, length = elems.length; i < length; i++ ) {
+ var value = callback( elems[ i ], i );
+
+ if ( value !== null && value != undefined ) {
+ if ( value.constructor != Array )
+ value = [ value ];
+
+ ret = ret.concat( value );
+ }
+ }
+
+ return ret;
+ }
+});
+
+var userAgent = navigator.userAgent.toLowerCase();
+
+// Figure out what browser is being used
+jQuery.browser = {
+ version: (userAgent.match( /.+(?:rv|it|ra|ie)[\/: ]([\d.]+)/ ) || [])[1],
+ safari: /webkit/.test( userAgent ),
+ opera: /opera/.test( userAgent ),
+ msie: /msie/.test( userAgent ) && !/opera/.test( userAgent ),
+ mozilla: /mozilla/.test( userAgent ) && !/(compatible|webkit)/.test( userAgent )
+};
+
+var styleFloat = jQuery.browser.msie ?
+ "styleFloat" :
+ "cssFloat";
+
+jQuery.extend({
+ // Check to see if the W3C box model is being used
+ boxModel: !jQuery.browser.msie || document.compatMode == "CSS1Compat",
+
+ props: {
+ "for": "htmlFor",
+ "class": "className",
+ "float": styleFloat,
+ cssFloat: styleFloat,
+ styleFloat: styleFloat,
+ innerHTML: "innerHTML",
+ className: "className",
+ value: "value",
+ disabled: "disabled",
+ checked: "checked",
+ readonly: "readOnly",
+ selected: "selected",
+ maxlength: "maxLength",
+ selectedIndex: "selectedIndex",
+ defaultValue: "defaultValue",
+ tagName: "tagName",
+ nodeName: "nodeName"
+ }
+});
+
+jQuery.each({
+ parent: function(elem){return elem.parentNode;},
+ parents: function(elem){return jQuery.dir(elem,"parentNode");},
+ next: function(elem){return jQuery.nth(elem,2,"nextSibling");},
+ prev: function(elem){return jQuery.nth(elem,2,"previousSibling");},
+ nextAll: function(elem){return jQuery.dir(elem,"nextSibling");},
+ prevAll: function(elem){return jQuery.dir(elem,"previousSibling");},
+ siblings: function(elem){return jQuery.sibling(elem.parentNode.firstChild,elem);},
+ children: function(elem){return jQuery.sibling(elem.firstChild);},
+ contents: function(elem){return jQuery.nodeName(elem,"iframe")?elem.contentDocument||elem.contentWindow.document:jQuery.makeArray(elem.childNodes);}
+}, function(name, fn){
+ jQuery.fn[ name ] = function( selector ) {
+ var ret = jQuery.map( this, fn );
+
+ if ( selector && typeof selector == "string" )
+ ret = jQuery.multiFilter( selector, ret );
+
+ return this.pushStack( jQuery.unique( ret ) );
+ };
+});
+
+jQuery.each({
+ appendTo: "append",
+ prependTo: "prepend",
+ insertBefore: "before",
+ insertAfter: "after",
+ replaceAll: "replaceWith"
+}, function(name, original){
+ jQuery.fn[ name ] = function() {
+ var args = arguments;
+
+ return this.each(function(){
+ for ( var i = 0, length = args.length; i < length; i++ )
+ jQuery( args[ i ] )[ original ]( this );
+ });
+ };
+});
+
+jQuery.each({
+ removeAttr: function( name ) {
+ jQuery.attr( this, name, "" );
+ if (this.nodeType == 1)
+ this.removeAttribute( name );
+ },
+
+ addClass: function( classNames ) {
+ jQuery.className.add( this, classNames );
+ },
+
+ removeClass: function( classNames ) {
+ jQuery.className.remove( this, classNames );
+ },
+
+ toggleClass: function( classNames ) {
+ jQuery.className[ jQuery.className.has( this, classNames ) ? "remove" : "add" ]( this, classNames );
+ },
+
+ remove: function( selector ) {
+ if ( !selector || jQuery.filter( selector, [ this ] ).r.length ) {
+ // Prevent memory leaks
+ jQuery( "*", this ).add(this).each(function(){
+ jQuery.event.remove(this);
+ jQuery.removeData(this);
+ });
+ if (this.parentNode)
+ this.parentNode.removeChild( this );
+ }
+ },
+
+ empty: function() {
+ // Remove element nodes and prevent memory leaks
+ jQuery( ">*", this ).remove();
+
+ // Remove any remaining nodes
+ while ( this.firstChild )
+ this.removeChild( this.firstChild );
+ }
+}, function(name, fn){
+ jQuery.fn[ name ] = function(){
+ return this.each( fn, arguments );
+ };
+});
+
+jQuery.each([ "Height", "Width" ], function(i, name){
+ var type = name.toLowerCase();
+
+ jQuery.fn[ type ] = function( size ) {
+ // Get window width or height
+ return this[0] == window ?
+ // Opera reports document.body.client[Width/Height] properly in both quirks and standards
+ jQuery.browser.opera && document.body[ "client" + name ] ||
+
+ // Safari reports inner[Width/Height] just fine (Mozilla and Opera include scroll bar widths)
+ jQuery.browser.safari && window[ "inner" + name ] ||
+
+ // Everyone else use document.documentElement or document.body depending on Quirks vs Standards mode
+ document.compatMode == "CSS1Compat" && document.documentElement[ "client" + name ] || document.body[ "client" + name ] :
+
+ // Get document width or height
+ this[0] == document ?
+ // Either scroll[Width/Height] or offset[Width/Height], whichever is greater
+ Math.max(
+ Math.max(document.body["scroll" + name], document.documentElement["scroll" + name]),
+ Math.max(document.body["offset" + name], document.documentElement["offset" + name])
+ ) :
+
+ // Get or set width or height on the element
+ size == undefined ?
+ // Get width or height on the element
+ (this.length ? jQuery.css( this[0], type ) : null) :
+
+ // Set the width or height on the element (default to pixels if value is unitless)
+ this.css( type, size.constructor == String ? size : size + "px" );
+ };
+});
+
+var chars = jQuery.browser.safari && parseInt(jQuery.browser.version) < 417 ?
+ "(?:[\\w*_-]|\\\\.)" :
+ "(?:[\\w\u0128-\uFFFF*_-]|\\\\.)",
+ quickChild = new RegExp("^>\\s*(" + chars + "+)"),
+ quickID = new RegExp("^(" + chars + "+)(#)(" + chars + "+)"),
+ quickClass = new RegExp("^([#.]?)(" + chars + "*)");
+
+jQuery.extend({
+ expr: {
+ "": function(a,i,m){return m[2]=="*"||jQuery.nodeName(a,m[2]);},
+ "#": function(a,i,m){return a.getAttribute("id")==m[2];},
+ ":": {
+ // Position Checks
+ lt: function(a,i,m){return i<m[3]-0;},
+ gt: function(a,i,m){return i>m[3]-0;},
+ nth: function(a,i,m){return m[3]-0==i;},
+ eq: function(a,i,m){return m[3]-0==i;},
+ first: function(a,i){return i==0;},
+ last: function(a,i,m,r){return i==r.length-1;},
+ even: function(a,i){return i%2==0;},
+ odd: function(a,i){return i%2;},
+
+ // Child Checks
+ "first-child": function(a){return a.parentNode.getElementsByTagName("*")[0]==a;},
+ "last-child": function(a){return jQuery.nth(a.parentNode.lastChild,1,"previousSibling")==a;},
+ "only-child": function(a){return !jQuery.nth(a.parentNode.lastChild,2,"previousSibling");},
+
+ // Parent Checks
+ parent: function(a){return a.firstChild;},
+ empty: function(a){return !a.firstChild;},
+
+ // Text Check
+ contains: function(a,i,m){return (a.textContent||a.innerText||jQuery(a).text()||"").indexOf(m[3])>=0;},
+
+ // Visibility
+ visible: function(a){return "hidden"!=a.type&&jQuery.css(a,"display")!="none"&&jQuery.css(a,"visibility")!="hidden";},
+ hidden: function(a){return "hidden"==a.type||jQuery.css(a,"display")=="none"||jQuery.css(a,"visibility")=="hidden";},
+
+ // Form attributes
+ enabled: function(a){return !a.disabled;},
+ disabled: function(a){return a.disabled;},
+ checked: function(a){return a.checked;},
+ selected: function(a){return a.selected||jQuery.attr(a,"selected");},
+
+ // Form elements
+ text: function(a){return "text"==a.type;},
+ radio: function(a){return "radio"==a.type;},
+ checkbox: function(a){return "checkbox"==a.type;},
+ file: function(a){return "file"==a.type;},
+ password: function(a){return "password"==a.type;},
+ submit: function(a){return "submit"==a.type;},
+ image: function(a){return "image"==a.type;},
+ reset: function(a){return "reset"==a.type;},
+ button: function(a){return "button"==a.type||jQuery.nodeName(a,"button");},
+ input: function(a){return /input|select|textarea|button/i.test(a.nodeName);},
+
+ // :has()
+ has: function(a,i,m){return jQuery.find(m[3],a).length;},
+
+ // :header
+ header: function(a){return /h\d/i.test(a.nodeName);},
+
+ // :animated
+ animated: function(a){return jQuery.grep(jQuery.timers,function(fn){return a==fn.elem;}).length;}
+ }
+ },
+
+ // The regular expressions that power the parsing engine
+ parse: [
+ // Match: [@value='test'], [@foo]
+ /^(\[) *@?([\w-]+) *([!*$^~=]*) *('?"?)(.*?)\4 *\]/,
+
+ // Match: :contains('foo')
+ /^(:)([\w-]+)\("?'?(.*?(\(.*?\))?[^(]*?)"?'?\)/,
+
+ // Match: :even, :last-chlid, #id, .class
+ new RegExp("^([:.#]*)(" + chars + "+)")
+ ],
+
+ multiFilter: function( expr, elems, not ) {
+ var old, cur = [];
+
+ while ( expr && expr != old ) {
+ old = expr;
+ var f = jQuery.filter( expr, elems, not );
+ expr = f.t.replace(/^\s*,\s*/, "" );
+ cur = not ? elems = f.r : jQuery.merge( cur, f.r );
+ }
+
+ return cur;
+ },
+
+ find: function( t, context ) {
+ // Quickly handle non-string expressions
+ if ( typeof t != "string" )
+ return [ t ];
+
+ // check to make sure context is a DOM element or a document
+ if ( context && context.nodeType != 1 && context.nodeType != 9)
+ return [ ];
+
+ // Set the correct context (if none is provided)
+ context = context || document;
+
+ // Initialize the search
+ var ret = [context], done = [], last, nodeName;
+
+ // Continue while a selector expression exists, and while
+ // we're no longer looping upon ourselves
+ while ( t && last != t ) {
+ var r = [];
+ last = t;
+
+ t = jQuery.trim(t);
+
+ var foundToken = false;
+
+ // An attempt at speeding up child selectors that
+ // point to a specific element tag
+ var re = quickChild;
+ var m = re.exec(t);
+
+ if ( m ) {
+ nodeName = m[1].toUpperCase();
+
+ // Perform our own iteration and filter
+ for ( var i = 0; ret[i]; i++ )
+ for ( var c = ret[i].firstChild; c; c = c.nextSibling )
+ if ( c.nodeType == 1 && (nodeName == "*" || c.nodeName.toUpperCase() == nodeName) )
+ r.push( c );
+
+ ret = r;
+ t = t.replace( re, "" );
+ if ( t.indexOf(" ") == 0 ) continue;
+ foundToken = true;
+ } else {
+ re = /^([>+~])\s*(\w*)/i;
+
+ if ( (m = re.exec(t)) != null ) {
+ r = [];
+
+ var merge = {};
+ nodeName = m[2].toUpperCase();
+ m = m[1];
+
+ for ( var j = 0, rl = ret.length; j < rl; j++ ) {
+ var n = m == "~" || m == "+" ? ret[j].nextSibling : ret[j].firstChild;
+ for ( ; n; n = n.nextSibling )
+ if ( n.nodeType == 1 ) {
+ var id = jQuery.data(n);
+
+ if ( m == "~" && merge[id] ) break;
+
+ if (!nodeName || n.nodeName.toUpperCase() == nodeName ) {
+ if ( m == "~" ) merge[id] = true;
+ r.push( n );
+ }
+
+ if ( m == "+" ) break;
+ }
+ }
+
+ ret = r;
+
+ // And remove the token
+ t = jQuery.trim( t.replace( re, "" ) );
+ foundToken = true;
+ }
+ }
+
+ // See if there's still an expression, and that we haven't already
+ // matched a token
+ if ( t && !foundToken ) {
+ // Handle multiple expressions
+ if ( !t.indexOf(",") ) {
+ // Clean the result set
+ if ( context == ret[0] ) ret.shift();
+
+ // Merge the result sets
+ done = jQuery.merge( done, ret );
+
+ // Reset the context
+ r = ret = [context];
+
+ // Touch up the selector string
+ t = " " + t.substr(1,t.length);
+
+ } else {
+ // Optimize for the case nodeName#idName
+ var re2 = quickID;
+ var m = re2.exec(t);
+
+ // Re-organize the results, so that they're consistent
+ if ( m ) {
+ m = [ 0, m[2], m[3], m[1] ];
+
+ } else {
+ // Otherwise, do a traditional filter check for
+ // ID, class, and element selectors
+ re2 = quickClass;
+ m = re2.exec(t);
+ }
+
+ m[2] = m[2].replace(/\\/g, "");
+
+ var elem = ret[ret.length-1];
+
+ // Try to do a global search by ID, where we can
+ if ( m[1] == "#" && elem && elem.getElementById && !jQuery.isXMLDoc(elem) ) {
+ // Optimization for HTML document case
+ var oid = elem.getElementById(m[2]);
+
+ // Do a quick check for the existence of the actual ID attribute
+ // to avoid selecting by the name attribute in IE
+ // also check to insure id is a string to avoid selecting an element with the name of 'id' inside a form
+ if ( (jQuery.browser.msie||jQuery.browser.opera) && oid && typeof oid.id == "string" && oid.id != m[2] )
+ oid = jQuery('[@id="'+m[2]+'"]', elem)[0];
+
+ // Do a quick check for node name (where applicable) so
+ // that div#foo searches will be really fast
+ ret = r = oid && (!m[3] || jQuery.nodeName(oid, m[3])) ? [oid] : [];
+ } else {
+ // We need to find all descendant elements
+ for ( var i = 0; ret[i]; i++ ) {
+ // Grab the tag name being searched for
+ var tag = m[1] == "#" && m[3] ? m[3] : m[1] != "" || m[0] == "" ? "*" : m[2];
+
+ // Handle IE7 being really dumb about <object>s
+ if ( tag == "*" && ret[i].nodeName.toLowerCase() == "object" )
+ tag = "param";
+
+ r = jQuery.merge( r, ret[i].getElementsByTagName( tag ));
+ }
+
+ // It's faster to filter by class and be done with it
+ if ( m[1] == "." )
+ r = jQuery.classFilter( r, m[2] );
+
+ // Same with ID filtering
+ if ( m[1] == "#" ) {
+ var tmp = [];
+
+ // Try to find the element with the ID
+ for ( var i = 0; r[i]; i++ )
+ if ( r[i].getAttribute("id") == m[2] ) {
+ tmp = [ r[i] ];
+ break;
+ }
+
+ r = tmp;
+ }
+
+ ret = r;
+ }
+
+ t = t.replace( re2, "" );
+ }
+
+ }
+
+ // If a selector string still exists
+ if ( t ) {
+ // Attempt to filter it
+ var val = jQuery.filter(t,r);
+ ret = r = val.r;
+ t = jQuery.trim(val.t);
+ }
+ }
+
+ // An error occurred with the selector;
+ // just return an empty set instead
+ if ( t )
+ ret = [];
+
+ // Remove the root context
+ if ( ret && context == ret[0] )
+ ret.shift();
+
+ // And combine the results
+ done = jQuery.merge( done, ret );
+
+ return done;
+ },
+
+ classFilter: function(r,m,not){
+ m = " " + m + " ";
+ var tmp = [];
+ for ( var i = 0; r[i]; i++ ) {
+ var pass = (" " + r[i].className + " ").indexOf( m ) >= 0;
+ if ( !not && pass || not && !pass )
+ tmp.push( r[i] );
+ }
+ return tmp;
+ },
+
+ filter: function(t,r,not) {
+ var last;
+
+ // Look for common filter expressions
+ while ( t && t != last ) {
+ last = t;
+
+ var p = jQuery.parse, m;
+
+ for ( var i = 0; p[i]; i++ ) {
+ m = p[i].exec( t );
+
+ if ( m ) {
+ // Remove what we just matched
+ t = t.substring( m[0].length );
+
+ m[2] = m[2].replace(/\\/g, "");
+ break;
+ }
+ }
+
+ if ( !m )
+ break;
+
+ // :not() is a special case that can be optimized by
+ // keeping it out of the expression list
+ if ( m[1] == ":" && m[2] == "not" )
+ // optimize if only one selector found (most common case)
+ r = isSimple.test( m[3] ) ?
+ jQuery.filter(m[3], r, true).r :
+ jQuery( r ).not( m[3] );
+
+ // We can get a big speed boost by filtering by class here
+ else if ( m[1] == "." )
+ r = jQuery.classFilter(r, m[2], not);
+
+ else if ( m[1] == "[" ) {
+ var tmp = [], type = m[3];
+
+ for ( var i = 0, rl = r.length; i < rl; i++ ) {
+ var a = r[i], z = a[ jQuery.props[m[2]] || m[2] ];
+
+ if ( z == null || /href|src|selected/.test(m[2]) )
+ z = jQuery.attr(a,m[2]) || '';
+
+ if ( (type == "" && !!z ||
+ type == "=" && z == m[5] ||
+ type == "!=" && z != m[5] ||
+ type == "^=" && z && !z.indexOf(m[5]) ||
+ type == "$=" && z.substr(z.length - m[5].length) == m[5] ||
+ (type == "*=" || type == "~=") && z.indexOf(m[5]) >= 0) ^ not )
+ tmp.push( a );
+ }
+
+ r = tmp;
+
+ // We can get a speed boost by handling nth-child here
+ } else if ( m[1] == ":" && m[2] == "nth-child" ) {
+ var merge = {}, tmp = [],
+ // parse equations like 'even', 'odd', '5', '2n', '3n+2', '4n-1', '-n+6'
+ test = /(-?)(\d*)n((?:\+|-)?\d*)/.exec(
+ m[3] == "even" && "2n" || m[3] == "odd" && "2n+1" ||
+ !/\D/.test(m[3]) && "0n+" + m[3] || m[3]),
+ // calculate the numbers (first)n+(last) including if they are negative
+ first = (test[1] + (test[2] || 1)) - 0, last = test[3] - 0;
+
+ // loop through all the elements left in the jQuery object
+ for ( var i = 0, rl = r.length; i < rl; i++ ) {
+ var node = r[i], parentNode = node.parentNode, id = jQuery.data(parentNode);
+
+ if ( !merge[id] ) {
+ var c = 1;
+
+ for ( var n = parentNode.firstChild; n; n = n.nextSibling )
+ if ( n.nodeType == 1 )
+ n.nodeIndex = c++;
+
+ merge[id] = true;
+ }
+
+ var add = false;
+
+ if ( first == 0 ) {
+ if ( node.nodeIndex == last )
+ add = true;
+ } else if ( (node.nodeIndex - last) % first == 0 && (node.nodeIndex - last) / first >= 0 )
+ add = true;
+
+ if ( add ^ not )
+ tmp.push( node );
+ }
+
+ r = tmp;
+
+ // Otherwise, find the expression to execute
+ } else {
+ var fn = jQuery.expr[ m[1] ];
+ if ( typeof fn == "object" )
+ fn = fn[ m[2] ];
+
+ if ( typeof fn == "string" )
+ fn = eval("false||function(a,i){return " + fn + ";}");
+
+ // Execute it against the current filter
+ r = jQuery.grep( r, function(elem, i){
+ return fn(elem, i, m, r);
+ }, not );
+ }
+ }
+
+ // Return an array of filtered elements (r)
+ // and the modified expression string (t)
+ return { r: r, t: t };
+ },
+
+ dir: function( elem, dir ){
+ var matched = [];
+ var cur = elem[dir];
+ while ( cur && cur != document ) {
+ if ( cur.nodeType == 1 )
+ matched.push( cur );
+ cur = cur[dir];
+ }
+ return matched;
+ },
+
+ nth: function(cur,result,dir,elem){
+ result = result || 1;
+ var num = 0;
+
+ for ( ; cur; cur = cur[dir] )
+ if ( cur.nodeType == 1 && ++num == result )
+ break;
+
+ return cur;
+ },
+
+ sibling: function( n, elem ) {
+ var r = [];
+
+ for ( ; n; n = n.nextSibling ) {
+ if ( n.nodeType == 1 && (!elem || n != elem) )
+ r.push( n );
+ }
+
+ return r;
+ }
+});
+
+/*
+ * A number of helper functions used for managing events.
+ * Many of the ideas behind this code orignated from
+ * Dean Edwards' addEvent library.
+ */
+jQuery.event = {
+
+ // Bind an event to an element
+ // Original by Dean Edwards
+ add: function(elem, types, handler, data) {
+ if ( elem.nodeType == 3 || elem.nodeType == 8 )
+ return;
+
+ // For whatever reason, IE has trouble passing the window object
+ // around, causing it to be cloned in the process
+ if ( jQuery.browser.msie && elem.setInterval != undefined )
+ elem = window;
+
+ // Make sure that the function being executed has a unique ID
+ if ( !handler.guid )
+ handler.guid = this.guid++;
+
+ // if data is passed, bind to handler
+ if( data != undefined ) {
+ // Create temporary function pointer to original handler
+ var fn = handler;
+
+ // Create unique handler function, wrapped around original handler
+ handler = function() {
+ // Pass arguments and context to original handler
+ return fn.apply(this, arguments);
+ };
+
+ // Store data in unique handler
+ handler.data = data;
+
+ // Set the guid of unique handler to the same of original handler, so it can be removed
+ handler.guid = fn.guid;
+ }
+
+ // Init the element's event structure
+ var events = jQuery.data(elem, "events") || jQuery.data(elem, "events", {}),
+ handle = jQuery.data(elem, "handle") || jQuery.data(elem, "handle", function(){
+ // returned undefined or false
+ var val;
+
+ // Handle the second event of a trigger and when
+ // an event is called after a page has unloaded
+ if ( typeof jQuery == "undefined" || jQuery.event.triggered )
+ return val;
+
+ val = jQuery.event.handle.apply(arguments.callee.elem, arguments);
+
+ return val;
+ });
+ // Add elem as a property of the handle function
+ // This is to prevent a memory leak with non-native
+ // event in IE.
+ handle.elem = elem;
+
+ // Handle multiple events seperated by a space
+ // jQuery(...).bind("mouseover mouseout", fn);
+ jQuery.each(types.split(/\s+/), function(index, type) {
+ // Namespaced event handlers
+ var parts = type.split(".");
+ type = parts[0];
+ handler.type = parts[1];
+
+ // Get the current list of functions bound to this event
+ var handlers = events[type];
+
+ // Init the event handler queue
+ if (!handlers) {
+ handlers = events[type] = {};
+
+ // Check for a special event handler
+ // Only use addEventListener/attachEvent if the special
+ // events handler returns false
+ if ( !jQuery.event.special[type] || jQuery.event.special[type].setup.call(elem) === false ) {
+ // Bind the global event handler to the element
+ if (elem.addEventListener)
+ elem.addEventListener(type, handle, false);
+ else if (elem.attachEvent)
+ elem.attachEvent("on" + type, handle);
+ }
+ }
+
+ // Add the function to the element's handler list
+ handlers[handler.guid] = handler;
+
+ // Keep track of which events have been used, for global triggering
+ jQuery.event.global[type] = true;
+ });
+
+ // Nullify elem to prevent memory leaks in IE
+ elem = null;
+ },
+
+ guid: 1,
+ global: {},
+
+ // Detach an event or set of events from an element
+ remove: function(elem, types, handler) {
+ // don't do events on text and comment nodes
+ if ( elem.nodeType == 3 || elem.nodeType == 8 )
+ return;
+
+ var events = jQuery.data(elem, "events"), ret, index;
+
+ if ( events ) {
+ // Unbind all events for the element
+ if ( types == undefined || (typeof types == "string" && types.charAt(0) == ".") )
+ for ( var type in events )
+ this.remove( elem, type + (types || "") );
+ else {
+ // types is actually an event object here
+ if ( types.type ) {
+ handler = types.handler;
+ types = types.type;
+ }
+
+ // Handle multiple events seperated by a space
+ // jQuery(...).unbind("mouseover mouseout", fn);
+ jQuery.each(types.split(/\s+/), function(index, type){
+ // Namespaced event handlers
+ var parts = type.split(".");
+ type = parts[0];
+
+ if ( events[type] ) {
+ // remove the given handler for the given type
+ if ( handler )
+ delete events[type][handler.guid];
+
+ // remove all handlers for the given type
+ else
+ for ( handler in events[type] )
+ // Handle the removal of namespaced events
+ if ( !parts[1] || events[type][handler].type == parts[1] )
+ delete events[type][handler];
+
+ // remove generic event handler if no more handlers exist
+ for ( ret in events[type] ) break;
+ if ( !ret ) {
+ if ( !jQuery.event.special[type] || jQuery.event.special[type].teardown.call(elem) === false ) {
+ if (elem.removeEventListener)
+ elem.removeEventListener(type, jQuery.data(elem, "handle"), false);
+ else if (elem.detachEvent)
+ elem.detachEvent("on" + type, jQuery.data(elem, "handle"));
+ }
+ ret = null;
+ delete events[type];
+ }
+ }
+ });
+ }
+
+ // Remove the expando if it's no longer used
+ for ( ret in events ) break;
+ if ( !ret ) {
+ var handle = jQuery.data( elem, "handle" );
+ if ( handle ) handle.elem = null;
+ jQuery.removeData( elem, "events" );
+ jQuery.removeData( elem, "handle" );
+ }
+ }
+ },
+
+ trigger: function(type, data, elem, donative, extra) {
+ // Clone the incoming data, if any
+ data = jQuery.makeArray(data || []);
+
+ if ( type.indexOf("!") >= 0 ) {
+ type = type.slice(0, -1);
+ var exclusive = true;
+ }
+
+ // Handle a global trigger
+ if ( !elem ) {
+ // Only trigger if we've ever bound an event for it
+ if ( this.global[type] )
+ jQuery("*").add([window, document]).trigger(type, data);
+
+ // Handle triggering a single element
+ } else {
+ // don't do events on text and comment nodes
+ if ( elem.nodeType == 3 || elem.nodeType == 8 )
+ return undefined;
+
+ var val, ret, fn = jQuery.isFunction( elem[ type ] || null ),
+ // Check to see if we need to provide a fake event, or not
+ event = !data[0] || !data[0].preventDefault;
+
+ // Pass along a fake event
+ if ( event )
+ data.unshift( this.fix({ type: type, target: elem }) );
+
+ // Enforce the right trigger type
+ data[0].type = type;
+ if ( exclusive )
+ data[0].exclusive = true;
+
+ // Trigger the event
+ if ( jQuery.isFunction( jQuery.data(elem, "handle") ) )
+ val = jQuery.data(elem, "handle").apply( elem, data );
+
+ // Handle triggering native .onfoo handlers
+ if ( !fn && elem["on"+type] && elem["on"+type].apply( elem, data ) === false )
+ val = false;
+
+ // Extra functions don't get the custom event object
+ if ( event )
+ data.shift();
+
+ // Handle triggering of extra function
+ if ( extra && jQuery.isFunction( extra ) ) {
+ // call the extra function and tack the current return value on the end for possible inspection
+ ret = extra.apply( elem, val == null ? data : data.concat( val ) );
+ // if anything is returned, give it precedence and have it overwrite the previous value
+ if (ret !== undefined)
+ val = ret;
+ }
+
+ // Trigger the native events (except for clicks on links)
+ if ( fn && donative !== false && val !== false && !(jQuery.nodeName(elem, 'a') && type == "click") ) {
+ this.triggered = true;
+ try {
+ elem[ type ]();
+ // prevent IE from throwing an error for some hidden elements
+ } catch (e) {}
+ }
+
+ this.triggered = false;
+ }
+
+ return val;
+ },
+
+ handle: function(event) {
+ // returned undefined or false
+ var val;
+
+ // Empty object is for triggered events with no data
+ event = jQuery.event.fix( event || window.event || {} );
+
+ // Namespaced event handlers
+ var parts = event.type.split(".");
+ event.type = parts[0];
+
+ var handlers = jQuery.data(this, "events") && jQuery.data(this, "events")[event.type], args = Array.prototype.slice.call( arguments, 1 );
+ args.unshift( event );
+
+ for ( var j in handlers ) {
+ var handler = handlers[j];
+ // Pass in a reference to the handler function itself
+ // So that we can later remove it
+ args[0].handler = handler;
+ args[0].data = handler.data;
+
+ // Filter the functions by class
+ if ( !parts[1] && !event.exclusive || handler.type == parts[1] ) {
+ var ret = handler.apply( this, args );
+
+ if ( val !== false )
+ val = ret;
+
+ if ( ret === false ) {
+ event.preventDefault();
+ event.stopPropagation();
+ }
+ }
+ }
+
+ // Clean up added properties in IE to prevent memory leak
+ if (jQuery.browser.msie)
+ event.target = event.preventDefault = event.stopPropagation =
+ event.handler = event.data = null;
+
+ return val;
+ },
+
+ fix: function(event) {
+ // store a copy of the original event object
+ // and clone to set read-only properties
+ var originalEvent = event;
+ event = jQuery.extend({}, originalEvent);
+
+ // add preventDefault and stopPropagation since
+ // they will not work on the clone
+ event.preventDefault = function() {
+ // if preventDefault exists run it on the original event
+ if (originalEvent.preventDefault)
+ originalEvent.preventDefault();
+ // otherwise set the returnValue property of the original event to false (IE)
+ originalEvent.returnValue = false;
+ };
+ event.stopPropagation = function() {
+ // if stopPropagation exists run it on the original event
+ if (originalEvent.stopPropagation)
+ originalEvent.stopPropagation();
+ // otherwise set the cancelBubble property of the original event to true (IE)
+ originalEvent.cancelBubble = true;
+ };
+
+ // Fix target property, if necessary
+ if ( !event.target )
+ event.target = event.srcElement || document; // Fixes #1925 where srcElement might not be defined either
+
+ // check if target is a textnode (safari)
+ if ( event.target.nodeType == 3 )
+ event.target = originalEvent.target.parentNode;
+
+ // Add relatedTarget, if necessary
+ if ( !event.relatedTarget && event.fromElement )
+ event.relatedTarget = event.fromElement == event.target ? event.toElement : event.fromElement;
+
+ // Calculate pageX/Y if missing and clientX/Y available
+ if ( event.pageX == null && event.clientX != null ) {
+ var doc = document.documentElement, body = document.body;
+ event.pageX = event.clientX + (doc && doc.scrollLeft || body && body.scrollLeft || 0) - (doc.clientLeft || 0);
+ event.pageY = event.clientY + (doc && doc.scrollTop || body && body.scrollTop || 0) - (doc.clientTop || 0);
+ }
+
+ // Add which for key events
+ if ( !event.which && ((event.charCode || event.charCode === 0) ? event.charCode : event.keyCode) )
+ event.which = event.charCode || event.keyCode;
+
+ // Add metaKey to non-Mac browsers (use ctrl for PC's and Meta for Macs)
+ if ( !event.metaKey && event.ctrlKey )
+ event.metaKey = event.ctrlKey;
+
+ // Add which for click: 1 == left; 2 == middle; 3 == right
+ // Note: button is not normalized, so don't use it
+ if ( !event.which && event.button )
+ event.which = (event.button & 1 ? 1 : ( event.button & 2 ? 3 : ( event.button & 4 ? 2 : 0 ) ));
+
+ return event;
+ },
+
+ special: {
+ ready: {
+ setup: function() {
+ // Make sure the ready event is setup
+ bindReady();
+ return;
+ },
+
+ teardown: function() { return; }
+ },
+
+ mouseenter: {
+ setup: function() {
+ if ( jQuery.browser.msie ) return false;
+ jQuery(this).bind("mouseover", jQuery.event.special.mouseenter.handler);
+ return true;
+ },
+
+ teardown: function() {
+ if ( jQuery.browser.msie ) return false;
+ jQuery(this).unbind("mouseover", jQuery.event.special.mouseenter.handler);
+ return true;
+ },
+
+ handler: function(event) {
+ // If we actually just moused on to a sub-element, ignore it
+ if ( withinElement(event, this) ) return true;
+ // Execute the right handlers by setting the event type to mouseenter
+ arguments[0].type = "mouseenter";
+ return jQuery.event.handle.apply(this, arguments);
+ }
+ },
+
+ mouseleave: {
+ setup: function() {
+ if ( jQuery.browser.msie ) return false;
+ jQuery(this).bind("mouseout", jQuery.event.special.mouseleave.handler);
+ return true;
+ },
+
+ teardown: function() {
+ if ( jQuery.browser.msie ) return false;
+ jQuery(this).unbind("mouseout", jQuery.event.special.mouseleave.handler);
+ return true;
+ },
+
+ handler: function(event) {
+ // If we actually just moused on to a sub-element, ignore it
+ if ( withinElement(event, this) ) return true;
+ // Execute the right handlers by setting the event type to mouseleave
+ arguments[0].type = "mouseleave";
+ return jQuery.event.handle.apply(this, arguments);
+ }
+ }
+ }
+};
+
+jQuery.fn.extend({
+ bind: function( type, data, fn ) {
+ return type == "unload" ? this.one(type, data, fn) : this.each(function(){
+ jQuery.event.add( this, type, fn || data, fn && data );
+ });
+ },
+
+ one: function( type, data, fn ) {
+ return this.each(function(){
+ jQuery.event.add( this, type, function(event) {
+ jQuery(this).unbind(event);
+ return (fn || data).apply( this, arguments);
+ }, fn && data);
+ });
+ },
+
+ unbind: function( type, fn ) {
+ return this.each(function(){
+ jQuery.event.remove( this, type, fn );
+ });
+ },
+
+ trigger: function( type, data, fn ) {
+ return this.each(function(){
+ jQuery.event.trigger( type, data, this, true, fn );
+ });
+ },
+
+ triggerHandler: function( type, data, fn ) {
+ if ( this[0] )
+ return jQuery.event.trigger( type, data, this[0], false, fn );
+ return undefined;
+ },
+
+ toggle: function() {
+ // Save reference to arguments for access in closure
+ var args = arguments;
+
+ return this.click(function(event) {
+ // Figure out which function to execute
+ this.lastToggle = 0 == this.lastToggle ? 1 : 0;
+
+ // Make sure that clicks stop
+ event.preventDefault();
+
+ // and execute the function
+ return args[this.lastToggle].apply( this, arguments ) || false;
+ });
+ },
+
+ hover: function(fnOver, fnOut) {
+ return this.bind('mouseenter', fnOver).bind('mouseleave', fnOut);
+ },
+
+ ready: function(fn) {
+ // Attach the listeners
+ bindReady();
+
+ // If the DOM is already ready
+ if ( jQuery.isReady )
+ // Execute the function immediately
+ fn.call( document, jQuery );
+
+ // Otherwise, remember the function for later
+ else
+ // Add the function to the wait list
+ jQuery.readyList.push( function() { return fn.call(this, jQuery); } );
+
+ return this;
+ }
+});
+
+jQuery.extend({
+ isReady: false,
+ readyList: [],
+ // Handle when the DOM is ready
+ ready: function() {
+ // Make sure that the DOM is not already loaded
+ if ( !jQuery.isReady ) {
+ // Remember that the DOM is ready
+ jQuery.isReady = true;
+
+ // If there are functions bound, to execute
+ if ( jQuery.readyList ) {
+ // Execute all of them
+ jQuery.each( jQuery.readyList, function(){
+ this.apply( document );
+ });
+
+ // Reset the list of functions
+ jQuery.readyList = null;
+ }
+
+ // Trigger any bound ready events
+ jQuery(document).triggerHandler("ready");
+ }
+ }
+});
+
+var readyBound = false;
+
+function bindReady(){
+ if ( readyBound ) return;
+ readyBound = true;
+
+ // Mozilla, Opera (see further below for it) and webkit nightlies currently support this event
+ if ( document.addEventListener && !jQuery.browser.opera)
+ // Use the handy event callback
+ document.addEventListener( "DOMContentLoaded", jQuery.ready, false );
+
+ // If IE is used and is not in a frame
+ // Continually check to see if the document is ready
+ if ( jQuery.browser.msie && window == top ) (function(){
+ if (jQuery.isReady) return;
+ try {
+ // If IE is used, use the trick by Diego Perini
+ // http://javascript.nwbox.com/IEContentLoaded/
+ document.documentElement.doScroll("left");
+ } catch( error ) {
+ setTimeout( arguments.callee, 0 );
+ return;
+ }
+ // and execute any waiting functions
+ jQuery.ready();
+ })();
+
+ if ( jQuery.browser.opera )
+ document.addEventListener( "DOMContentLoaded", function () {
+ if (jQuery.isReady) return;
+ for (var i = 0; i < document.styleSheets.length; i++)
+ if (document.styleSheets[i].disabled) {
+ setTimeout( arguments.callee, 0 );
+ return;
+ }
+ // and execute any waiting functions
+ jQuery.ready();
+ }, false);
+
+ if ( jQuery.browser.safari ) {
+ var numStyles;
+ (function(){
+ if (jQuery.isReady) return;
+ if ( document.readyState != "loaded" && document.readyState != "complete" ) {
+ setTimeout( arguments.callee, 0 );
+ return;
+ }
+ if ( numStyles === undefined )
+ numStyles = jQuery("style, link[rel=stylesheet]").length;
+ if ( document.styleSheets.length != numStyles ) {
+ setTimeout( arguments.callee, 0 );
+ return;
+ }
+ // and execute any waiting functions
+ jQuery.ready();
+ })();
+ }
+
+ // A fallback to window.onload, that will always work
+ jQuery.event.add( window, "load", jQuery.ready );
+}
+
+jQuery.each( ("blur,focus,load,resize,scroll,unload,click,dblclick," +
+ "mousedown,mouseup,mousemove,mouseover,mouseout,change,select," +
+ "submit,keydown,keypress,keyup,error").split(","), function(i, name){
+
+ // Handle event binding
+ jQuery.fn[name] = function(fn){
+ return fn ? this.bind(name, fn) : this.trigger(name);
+ };
+});
+
+// Checks if an event happened on an element within another element
+// Used in jQuery.event.special.mouseenter and mouseleave handlers
+var withinElement = function(event, elem) {
+ // Check if mouse(over|out) are still within the same parent element
+ var parent = event.relatedTarget;
+ // Traverse up the tree
+ while ( parent && parent != elem ) try { parent = parent.parentNode; } catch(error) { parent = elem; }
+ // Return true if we actually just moused on to a sub-element
+ return parent == elem;
+};
+
+// Prevent memory leaks in IE
+// And prevent errors on refresh with events like mouseover in other browsers
+// Window isn't included so as not to unbind existing unload events
+jQuery(window).bind("unload", function() {
+ jQuery("*").add(document).unbind();
+});
+jQuery.fn.extend({
+ load: function( url, params, callback ) {
+ if ( jQuery.isFunction( url ) )
+ return this.bind("load", url);
+
+ var off = url.indexOf(" ");
+ if ( off >= 0 ) {
+ var selector = url.slice(off, url.length);
+ url = url.slice(0, off);
+ }
+
+ callback = callback || function(){};
+
+ // Default to a GET request
+ var type = "GET";
+
+ // If the second parameter was provided
+ if ( params )
+ // If it's a function
+ if ( jQuery.isFunction( params ) ) {
+ // We assume that it's the callback
+ callback = params;
+ params = null;
+
+ // Otherwise, build a param string
+ } else {
+ params = jQuery.param( params );
+ type = "POST";
+ }
+
+ var self = this;
+
+ // Request the remote document
+ jQuery.ajax({
+ url: url,
+ type: type,
+ dataType: "html",
+ data: params,
+ complete: function(res, status){
+ // If successful, inject the HTML into all the matched elements
+ if ( status == "success" || status == "notmodified" )
+ // See if a selector was specified
+ self.html( selector ?
+ // Create a dummy div to hold the results
+ jQuery("<div/>")
+ // inject the contents of the document in, removing the scripts
+ // to avoid any 'Permission Denied' errors in IE
+ .append(res.responseText.replace(/<script(.|\s)*?\/script>/g, ""))
+
+ // Locate the specified elements
+ .find(selector) :
+
+ // If not, just inject the full result
+ res.responseText );
+
+ self.each( callback, [res.responseText, status, res] );
+ }
+ });
+ return this;
+ },
+
+ serialize: function() {
+ return jQuery.param(this.serializeArray());
+ },
+ serializeArray: function() {
+ return this.map(function(){
+ return jQuery.nodeName(this, "form") ?
+ jQuery.makeArray(this.elements) : this;
+ })
+ .filter(function(){
+ return this.name && !this.disabled &&
+ (this.checked || /select|textarea/i.test(this.nodeName) ||
+ /text|hidden|password/i.test(this.type));
+ })
+ .map(function(i, elem){
+ var val = jQuery(this).val();
+ return val == null ? null :
+ val.constructor == Array ?
+ jQuery.map( val, function(val, i){
+ return {name: elem.name, value: val};
+ }) :
+ {name: elem.name, value: val};
+ }).get();
+ }
+});
+
+// Attach a bunch of functions for handling common AJAX events
+jQuery.each( "ajaxStart,ajaxStop,ajaxComplete,ajaxError,ajaxSuccess,ajaxSend".split(","), function(i,o){
+ jQuery.fn[o] = function(f){
+ return this.bind(o, f);
+ };
+});
+
+var jsc = (new Date).getTime();
+
+jQuery.extend({
+ get: function( url, data, callback, type ) {
+ // shift arguments if data argument was ommited
+ if ( jQuery.isFunction( data ) ) {
+ callback = data;
+ data = null;
+ }
+
+ return jQuery.ajax({
+ type: "GET",
+ url: url,
+ data: data,
+ success: callback,
+ dataType: type
+ });
+ },
+
+ getScript: function( url, callback ) {
+ return jQuery.get(url, null, callback, "script");
+ },
+
+ getJSON: function( url, data, callback ) {
+ return jQuery.get(url, data, callback, "json");
+ },
+
+ post: function( url, data, callback, type ) {
+ if ( jQuery.isFunction( data ) ) {
+ callback = data;
+ data = {};
+ }
+
+ return jQuery.ajax({
+ type: "POST",
+ url: url,
+ data: data,
+ success: callback,
+ dataType: type
+ });
+ },
+
+ ajaxSetup: function( settings ) {
+ jQuery.extend( jQuery.ajaxSettings, settings );
+ },
+
+ ajaxSettings: {
+ global: true,
+ type: "GET",
+ timeout: 0,
+ contentType: "application/x-www-form-urlencoded",
+ processData: true,
+ async: true,
+ data: null,
+ username: null,
+ password: null,
+ accepts: {
+ xml: "application/xml, text/xml",
+ html: "text/html",
+ script: "text/javascript, application/javascript",
+ json: "application/json, text/javascript",
+ text: "text/plain",
+ _default: "*/*"
+ }
+ },
+
+ // Last-Modified header cache for next request
+ lastModified: {},
+
+ ajax: function( s ) {
+ var jsonp, jsre = /=\?(&|$)/g, status, data;
+
+ // Extend the settings, but re-extend 's' so that it can be
+ // checked again later (in the test suite, specifically)
+ s = jQuery.extend(true, s, jQuery.extend(true, {}, jQuery.ajaxSettings, s));
+
+ // convert data if not already a string
+ if ( s.data && s.processData && typeof s.data != "string" )
+ s.data = jQuery.param(s.data);
+
+ // Handle JSONP Parameter Callbacks
+ if ( s.dataType == "jsonp" ) {
+ if ( s.type.toLowerCase() == "get" ) {
+ if ( !s.url.match(jsre) )
+ s.url += (s.url.match(/\?/) ? "&" : "?") + (s.jsonp || "callback") + "=?";
+ } else if ( !s.data || !s.data.match(jsre) )
+ s.data = (s.data ? s.data + "&" : "") + (s.jsonp || "callback") + "=?";
+ s.dataType = "json";
+ }
+
+ // Build temporary JSONP function
+ if ( s.dataType == "json" && (s.data && s.data.match(jsre) || s.url.match(jsre)) ) {
+ jsonp = "jsonp" + jsc++;
+
+ // Replace the =? sequence both in the query string and the data
+ if ( s.data )
+ s.data = (s.data + "").replace(jsre, "=" + jsonp + "$1");
+ s.url = s.url.replace(jsre, "=" + jsonp + "$1");
+
+ // We need to make sure
+ // that a JSONP style response is executed properly
+ s.dataType = "script";
+
+ // Handle JSONP-style loading
+ window[ jsonp ] = function(tmp){
+ data = tmp;
+ success();
+ complete();
+ // Garbage collect
+ window[ jsonp ] = undefined;
+ try{ delete window[ jsonp ]; } catch(e){}
+ if ( head )
+ head.removeChild( script );
+ };
+ }
+
+ if ( s.dataType == "script" && s.cache == null )
+ s.cache = false;
+
+ if ( s.cache === false && s.type.toLowerCase() == "get" ) {
+ var ts = (new Date()).getTime();
+ // try replacing _= if it is there
+ var ret = s.url.replace(/(\?|&)_=.*?(&|$)/, "$1_=" + ts + "$2");
+ // if nothing was replaced, add timestamp to the end
+ s.url = ret + ((ret == s.url) ? (s.url.match(/\?/) ? "&" : "?") + "_=" + ts : "");
+ }
+
+ // If data is available, append data to url for get requests
+ if ( s.data && s.type.toLowerCase() == "get" ) {
+ s.url += (s.url.match(/\?/) ? "&" : "?") + s.data;
+
+ // IE likes to send both get and post data, prevent this
+ s.data = null;
+ }
+
+ // Watch for a new set of requests
+ if ( s.global && ! jQuery.active++ )
+ jQuery.event.trigger( "ajaxStart" );
+
+ // If we're requesting a remote document
+ // and trying to load JSON or Script with a GET
+ if ( (!s.url.indexOf("http") || !s.url.indexOf("//")) && s.dataType == "script" && s.type.toLowerCase() == "get" ) {
+ var head = document.getElementsByTagName("head")[0];
+ var script = document.createElement("script");
+ script.src = s.url;
+ if (s.scriptCharset)
+ script.charset = s.scriptCharset;
+
+ // Handle Script loading
+ if ( !jsonp ) {
+ var done = false;
+
+ // Attach handlers for all browsers
+ script.onload = script.onreadystatechange = function(){
+ if ( !done && (!this.readyState ||
+ this.readyState == "loaded" || this.readyState == "complete") ) {
+ done = true;
+ success();
+ complete();
+ head.removeChild( script );
+ }
+ };
+ }
+
+ head.appendChild(script);
+
+ // We handle everything using the script element injection
+ return undefined;
+ }
+
+ var requestDone = false;
+
+ // Create the request object; Microsoft failed to properly
+ // implement the XMLHttpRequest in IE7, so we use the ActiveXObject when it is available
+ var xml = window.ActiveXObject ? new ActiveXObject("Microsoft.XMLHTTP") : new XMLHttpRequest();
+
+ // Open the socket
+ xml.open(s.type, s.url, s.async, s.username, s.password);
+
+ // Need an extra try/catch for cross domain requests in Firefox 3
+ try {
+ // Set the correct header, if data is being sent
+ if ( s.data )
+ xml.setRequestHeader("Content-Type", s.contentType);
+
+ // Set the If-Modified-Since header, if ifModified mode.
+ if ( s.ifModified )
+ xml.setRequestHeader("If-Modified-Since",
+ jQuery.lastModified[s.url] || "Thu, 01 Jan 1970 00:00:00 GMT" );
+
+ // Set header so the called script knows that it's an XMLHttpRequest
+ xml.setRequestHeader("X-Requested-With", "XMLHttpRequest");
+
+ // Set the Accepts header for the server, depending on the dataType
+ xml.setRequestHeader("Accept", s.dataType && s.accepts[ s.dataType ] ?
+ s.accepts[ s.dataType ] + ", */*" :
+ s.accepts._default );
+ } catch(e){}
+
+ // Allow custom headers/mimetypes
+ if ( s.beforeSend )
+ s.beforeSend(xml);
+
+ if ( s.global )
+ jQuery.event.trigger("ajaxSend", [xml, s]);
+
+ // Wait for a response to come back
+ var onreadystatechange = function(isTimeout){
+ // The transfer is complete and the data is available, or the request timed out
+ if ( !requestDone && xml && (xml.readyState == 4 || isTimeout == "timeout") ) {
+ requestDone = true;
+
+ // clear poll interval
+ if (ival) {
+ clearInterval(ival);
+ ival = null;
+ }
+
+ status = isTimeout == "timeout" && "timeout" ||
+ !jQuery.httpSuccess( xml ) && "error" ||
+ s.ifModified && jQuery.httpNotModified( xml, s.url ) && "notmodified" ||
+ "success";
+
+ if ( status == "success" ) {
+ // Watch for, and catch, XML document parse errors
+ try {
+ // process the data (runs the xml through httpData regardless of callback)
+ data = jQuery.httpData( xml, s.dataType );
+ } catch(e) {
+ status = "parsererror";
+ }
+ }
+
+ // Make sure that the request was successful or notmodified
+ if ( status == "success" ) {
+ // Cache Last-Modified header, if ifModified mode.
+ var modRes;
+ try {
+ modRes = xml.getResponseHeader("Last-Modified");
+ } catch(e) {} // swallow exception thrown by FF if header is not available
+
+ if ( s.ifModified && modRes )
+ jQuery.lastModified[s.url] = modRes;
+
+ // JSONP handles its own success callback
+ if ( !jsonp )
+ success();
+ } else
+ jQuery.handleError(s, xml, status);
+
+ // Fire the complete handlers
+ complete();
+
+ // Stop memory leaks
+ if ( s.async )
+ xml = null;
+ }
+ };
+
+ if ( s.async ) {
+ // don't attach the handler to the request, just poll it instead
+ var ival = setInterval(onreadystatechange, 13);
+
+ // Timeout checker
+ if ( s.timeout > 0 )
+ setTimeout(function(){
+ // Check to see if the request is still happening
+ if ( xml ) {
+ // Cancel the request
+ xml.abort();
+
+ if( !requestDone )
+ onreadystatechange( "timeout" );
+ }
+ }, s.timeout);
+ }
+
+ // Send the data
+ try {
+ xml.send(s.data);
+ } catch(e) {
+ jQuery.handleError(s, xml, null, e);
+ }
+
+ // firefox 1.5 doesn't fire statechange for sync requests
+ if ( !s.async )
+ onreadystatechange();
+
+ function success(){
+ // If a local callback was specified, fire it and pass it the data
+ if ( s.success )
+ s.success( data, status );
+
+ // Fire the global callback
+ if ( s.global )
+ jQuery.event.trigger( "ajaxSuccess", [xml, s] );
+ }
+
+ function complete(){
+ // Process result
+ if ( s.complete )
+ s.complete(xml, status);
+
+ // The request was completed
+ if ( s.global )
+ jQuery.event.trigger( "ajaxComplete", [xml, s] );
+
+ // Handle the global AJAX counter
+ if ( s.global && ! --jQuery.active )
+ jQuery.event.trigger( "ajaxStop" );
+ }
+
+ // return XMLHttpRequest to allow aborting the request etc.
+ return xml;
+ },
+
+ handleError: function( s, xml, status, e ) {
+ // If a local callback was specified, fire it
+ if ( s.error ) s.error( xml, status, e );
+
+ // Fire the global callback
+ if ( s.global )
+ jQuery.event.trigger( "ajaxError", [xml, s, e] );
+ },
+
+ // Counter for holding the number of active queries
+ active: 0,
+
+ // Determines if an XMLHttpRequest was successful or not
+ httpSuccess: function( r ) {
+ try {
+ // IE error sometimes returns 1223 when it should be 204 so treat it as success, see #1450
+ return !r.status && location.protocol == "file:" ||
+ ( r.status >= 200 && r.status < 300 ) || r.status == 304 || r.status == 1223 ||
+ jQuery.browser.safari && r.status == undefined;
+ } catch(e){}
+ return false;
+ },
+
+ // Determines if an XMLHttpRequest returns NotModified
+ httpNotModified: function( xml, url ) {
+ try {
+ var xmlRes = xml.getResponseHeader("Last-Modified");
+
+ // Firefox always returns 200. check Last-Modified date
+ return xml.status == 304 || xmlRes == jQuery.lastModified[url] ||
+ jQuery.browser.safari && xml.status == undefined;
+ } catch(e){}
+ return false;
+ },
+
+ httpData: function( r, type ) {
+ var ct = r.getResponseHeader("content-type");
+ var xml = type == "xml" || !type && ct && ct.indexOf("xml") >= 0;
+ var data = xml ? r.responseXML : r.responseText;
+
+ if ( xml && data.documentElement.tagName == "parsererror" )
+ throw "parsererror";
+
+ // If the type is "script", eval it in global context
+ if ( type == "script" )
+ jQuery.globalEval( data );
+
+ // Get the JavaScript object, if JSON is used.
+ if ( type == "json" )
+ data = eval("(" + data + ")");
+
+ return data;
+ },
+
+ // Serialize an array of form elements or a set of
+ // key/values into a query string
+ param: function( a ) {
+ var s = [];
+
+ // If an array was passed in, assume that it is an array
+ // of form elements
+ if ( a.constructor == Array || a.jquery )
+ // Serialize the form elements
+ jQuery.each( a, function(){
+ s.push( encodeURIComponent(this.name) + "=" + encodeURIComponent( this.value ) );
+ });
+
+ // Otherwise, assume that it's an object of key/value pairs
+ else
+ // Serialize the key/values
+ for ( var j in a )
+ // If the value is an array then the key names need to be repeated
+ if ( a[j] && a[j].constructor == Array )
+ jQuery.each( a[j], function(){
+ s.push( encodeURIComponent(j) + "=" + encodeURIComponent( this ) );
+ });
+ else
+ s.push( encodeURIComponent(j) + "=" + encodeURIComponent( a[j] ) );
+
+ // Return the resulting serialization
+ return s.join("&").replace(/%20/g, "+");
+ }
+
+});
+jQuery.fn.extend({
+ show: function(speed,callback){
+ return speed ?
+ this.animate({
+ height: "show", width: "show", opacity: "show"
+ }, speed, callback) :
+
+ this.filter(":hidden").each(function(){
+ this.style.display = this.oldblock || "";
+ if ( jQuery.css(this,"display") == "none" ) {
+ var elem = jQuery("<" + this.tagName + " />").appendTo("body");
+ this.style.display = elem.css("display");
+ // handle an edge condition where css is - div { display:none; } or similar
+ if (this.style.display == "none")
+ this.style.display = "block";
+ elem.remove();
+ }
+ }).end();
+ },
+
+ hide: function(speed,callback){
+ return speed ?
+ this.animate({
+ height: "hide", width: "hide", opacity: "hide"
+ }, speed, callback) :
+
+ this.filter(":visible").each(function(){
+ this.oldblock = this.oldblock || jQuery.css(this,"display");
+ this.style.display = "none";
+ }).end();
+ },
+
+ // Save the old toggle function
+ _toggle: jQuery.fn.toggle,
+
+ toggle: function( fn, fn2 ){
+ return jQuery.isFunction(fn) && jQuery.isFunction(fn2) ?
+ this._toggle( fn, fn2 ) :
+ fn ?
+ this.animate({
+ height: "toggle", width: "toggle", opacity: "toggle"
+ }, fn, fn2) :
+ this.each(function(){
+ jQuery(this)[ jQuery(this).is(":hidden") ? "show" : "hide" ]();
+ });
+ },
+
+ slideDown: function(speed,callback){
+ return this.animate({height: "show"}, speed, callback);
+ },
+
+ slideUp: function(speed,callback){
+ return this.animate({height: "hide"}, speed, callback);
+ },
+
+ slideToggle: function(speed, callback){
+ return this.animate({height: "toggle"}, speed, callback);
+ },
+
+ fadeIn: function(speed, callback){
+ return this.animate({opacity: "show"}, speed, callback);
+ },
+
+ fadeOut: function(speed, callback){
+ return this.animate({opacity: "hide"}, speed, callback);
+ },
+
+ fadeTo: function(speed,to,callback){
+ return this.animate({opacity: to}, speed, callback);
+ },
+
+ animate: function( prop, speed, easing, callback ) {
+ var optall = jQuery.speed(speed, easing, callback);
+
+ return this[ optall.queue === false ? "each" : "queue" ](function(){
+ if ( this.nodeType != 1)
+ return false;
+
+ var opt = jQuery.extend({}, optall);
+ var hidden = jQuery(this).is(":hidden"), self = this;
+
+ for ( var p in prop ) {
+ if ( prop[p] == "hide" && hidden || prop[p] == "show" && !hidden )
+ return jQuery.isFunction(opt.complete) && opt.complete.apply(this);
+
+ if ( p == "height" || p == "width" ) {
+ // Store display property
+ opt.display = jQuery.css(this, "display");
+
+ // Make sure that nothing sneaks out
+ opt.overflow = this.style.overflow;
+ }
+ }
+
+ if ( opt.overflow != null )
+ this.style.overflow = "hidden";
+
+ opt.curAnim = jQuery.extend({}, prop);
+
+ jQuery.each( prop, function(name, val){
+ var e = new jQuery.fx( self, opt, name );
+
+ if ( /toggle|show|hide/.test(val) )
+ e[ val == "toggle" ? hidden ? "show" : "hide" : val ]( prop );
+ else {
+ var parts = val.toString().match(/^([+-]=)?([\d+-.]+)(.*)$/),
+ start = e.cur(true) || 0;
+
+ if ( parts ) {
+ var end = parseFloat(parts[2]),
+ unit = parts[3] || "px";
+
+ // We need to compute starting value
+ if ( unit != "px" ) {
+ self.style[ name ] = (end || 1) + unit;
+ start = ((end || 1) / e.cur(true)) * start;
+ self.style[ name ] = start + unit;
+ }
+
+ // If a +=/-= token was provided, we're doing a relative animation
+ if ( parts[1] )
+ end = ((parts[1] == "-=" ? -1 : 1) * end) + start;
+
+ e.custom( start, end, unit );
+ } else
+ e.custom( start, val, "" );
+ }
+ });
+
+ // For JS strict compliance
+ return true;
+ });
+ },
+
+ queue: function(type, fn){
+ if ( jQuery.isFunction(type) || ( type && type.constructor == Array )) {
+ fn = type;
+ type = "fx";
+ }
+
+ if ( !type || (typeof type == "string" && !fn) )
+ return queue( this[0], type );
+
+ return this.each(function(){
+ if ( fn.constructor == Array )
+ queue(this, type, fn);
+ else {
+ queue(this, type).push( fn );
+
+ if ( queue(this, type).length == 1 )
+ fn.apply(this);
+ }
+ });
+ },
+
+ stop: function(clearQueue, gotoEnd){
+ var timers = jQuery.timers;
+
+ if (clearQueue)
+ this.queue([]);
+
+ this.each(function(){
+ // go in reverse order so anything added to the queue during the loop is ignored
+ for ( var i = timers.length - 1; i >= 0; i-- )
+ if ( timers[i].elem == this ) {
+ if (gotoEnd)
+ // force the next step to be the last
+ timers[i](true);
+ timers.splice(i, 1);
+ }
+ });
+
+ // start the next in the queue if the last step wasn't forced
+ if (!gotoEnd)
+ this.dequeue();
+
+ return this;
+ }
+
+});
+
+var queue = function( elem, type, array ) {
+ if ( !elem )
+ return undefined;
+
+ type = type || "fx";
+
+ var q = jQuery.data( elem, type + "queue" );
+
+ if ( !q || array )
+ q = jQuery.data( elem, type + "queue",
+ array ? jQuery.makeArray(array) : [] );
+
+ return q;
+};
+
+jQuery.fn.dequeue = function(type){
+ type = type || "fx";
+
+ return this.each(function(){
+ var q = queue(this, type);
+
+ q.shift();
+
+ if ( q.length )
+ q[0].apply( this );
+ });
+};
+
+jQuery.extend({
+
+ speed: function(speed, easing, fn) {
+ var opt = speed && speed.constructor == Object ? speed : {
+ complete: fn || !fn && easing ||
+ jQuery.isFunction( speed ) && speed,
+ duration: speed,
+ easing: fn && easing || easing && easing.constructor != Function && easing
+ };
+
+ opt.duration = (opt.duration && opt.duration.constructor == Number ?
+ opt.duration :
+ { slow: 600, fast: 200 }[opt.duration]) || 400;
+
+ // Queueing
+ opt.old = opt.complete;
+ opt.complete = function(){
+ if ( opt.queue !== false )
+ jQuery(this).dequeue();
+ if ( jQuery.isFunction( opt.old ) )
+ opt.old.apply( this );
+ };
+
+ return opt;
+ },
+
+ easing: {
+ linear: function( p, n, firstNum, diff ) {
+ return firstNum + diff * p;
+ },
+ swing: function( p, n, firstNum, diff ) {
+ return ((-Math.cos(p*Math.PI)/2) + 0.5) * diff + firstNum;
+ }
+ },
+
+ timers: [],
+ timerId: null,
+
+ fx: function( elem, options, prop ){
+ this.options = options;
+ this.elem = elem;
+ this.prop = prop;
+
+ if ( !options.orig )
+ options.orig = {};
+ }
+
+});
+
+jQuery.fx.prototype = {
+
+ // Simple function for setting a style value
+ update: function(){
+ if ( this.options.step )
+ this.options.step.apply( this.elem, [ this.now, this ] );
+
+ (jQuery.fx.step[this.prop] || jQuery.fx.step._default)( this );
+
+ // Set display property to block for height/width animations
+ if ( this.prop == "height" || this.prop == "width" )
+ this.elem.style.display = "block";
+ },
+
+ // Get the current size
+ cur: function(force){
+ if ( this.elem[this.prop] != null && this.elem.style[this.prop] == null )
+ return this.elem[ this.prop ];
+
+ var r = parseFloat(jQuery.css(this.elem, this.prop, force));
+ return r && r > -10000 ? r : parseFloat(jQuery.curCSS(this.elem, this.prop)) || 0;
+ },
+
+ // Start an animation from one number to another
+ custom: function(from, to, unit){
+ this.startTime = (new Date()).getTime();
+ this.start = from;
+ this.end = to;
+ this.unit = unit || this.unit || "px";
+ this.now = this.start;
+ this.pos = this.state = 0;
+ this.update();
+
+ var self = this;
+ function t(gotoEnd){
+ return self.step(gotoEnd);
+ }
+
+ t.elem = this.elem;
+
+ jQuery.timers.push(t);
+
+ if ( jQuery.timerId == null ) {
+ jQuery.timerId = setInterval(function(){
+ var timers = jQuery.timers;
+
+ for ( var i = 0; i < timers.length; i++ )
+ if ( !timers[i]() )
+ timers.splice(i--, 1);
+
+ if ( !timers.length ) {
+ clearInterval( jQuery.timerId );
+ jQuery.timerId = null;
+ }
+ }, 13);
+ }
+ },
+
+ // Simple 'show' function
+ show: function(){
+ // Remember where we started, so that we can go back to it later
+ this.options.orig[this.prop] = jQuery.attr( this.elem.style, this.prop );
+ this.options.show = true;
+
+ // Begin the animation
+ this.custom(0, this.cur());
+
+ // Make sure that we start at a small width/height to avoid any
+ // flash of content
+ if ( this.prop == "width" || this.prop == "height" )
+ this.elem.style[this.prop] = "1px";
+
+ // Start by showing the element
+ jQuery(this.elem).show();
+ },
+
+ // Simple 'hide' function
+ hide: function(){
+ // Remember where we started, so that we can go back to it later
+ this.options.orig[this.prop] = jQuery.attr( this.elem.style, this.prop );
+ this.options.hide = true;
+
+ // Begin the animation
+ this.custom(this.cur(), 0);
+ },
+
+ // Each step of an animation
+ step: function(gotoEnd){
+ var t = (new Date()).getTime();
+
+ if ( gotoEnd || t > this.options.duration + this.startTime ) {
+ this.now = this.end;
+ this.pos = this.state = 1;
+ this.update();
+
+ this.options.curAnim[ this.prop ] = true;
+
+ var done = true;
+ for ( var i in this.options.curAnim )
+ if ( this.options.curAnim[i] !== true )
+ done = false;
+
+ if ( done ) {
+ if ( this.options.display != null ) {
+ // Reset the overflow
+ this.elem.style.overflow = this.options.overflow;
+
+ // Reset the display
+ this.elem.style.display = this.options.display;
+ if ( jQuery.css(this.elem, "display") == "none" )
+ this.elem.style.display = "block";
+ }
+
+ // Hide the element if the "hide" operation was done
+ if ( this.options.hide )
+ this.elem.style.display = "none";
+
+ // Reset the properties, if the item has been hidden or shown
+ if ( this.options.hide || this.options.show )
+ for ( var p in this.options.curAnim )
+ jQuery.attr(this.elem.style, p, this.options.orig[p]);
+ }
+
+ // If a callback was provided, execute it
+ if ( done && jQuery.isFunction( this.options.complete ) )
+ // Execute the complete function
+ this.options.complete.apply( this.elem );
+
+ return false;
+ } else {
+ var n = t - this.startTime;
+ this.state = n / this.options.duration;
+
+ // Perform the easing function, defaults to swing
+ this.pos = jQuery.easing[this.options.easing || (jQuery.easing.swing ? "swing" : "linear")](this.state, n, 0, 1, this.options.duration);
+ this.now = this.start + ((this.end - this.start) * this.pos);
+
+ // Perform the next step of the animation
+ this.update();
+ }
+
+ return true;
+ }
+
+};
+
+jQuery.fx.step = {
+ scrollLeft: function(fx){
+ fx.elem.scrollLeft = fx.now;
+ },
+
+ scrollTop: function(fx){
+ fx.elem.scrollTop = fx.now;
+ },
+
+ opacity: function(fx){
+ jQuery.attr(fx.elem.style, "opacity", fx.now);
+ },
+
+ _default: function(fx){
+ fx.elem.style[ fx.prop ] = fx.now + fx.unit;
+ }
+};
+// The Offset Method
+// Originally By Brandon Aaron, part of the Dimension Plugin
+// http://jquery.com/plugins/project/dimensions
+jQuery.fn.offset = function() {
+ var left = 0, top = 0, elem = this[0], results;
+
+ if ( elem ) with ( jQuery.browser ) {
+ var parent = elem.parentNode,
+ offsetChild = elem,
+ offsetParent = elem.offsetParent,
+ doc = elem.ownerDocument,
+ safari2 = safari && parseInt(version) < 522 && !/adobeair/i.test(userAgent),
+ fixed = jQuery.css(elem, "position") == "fixed";
+
+ // Use getBoundingClientRect if available
+ if ( elem.getBoundingClientRect ) {
+ var box = elem.getBoundingClientRect();
+
+ // Add the document scroll offsets
+ add(box.left + Math.max(doc.documentElement.scrollLeft, doc.body.scrollLeft),
+ box.top + Math.max(doc.documentElement.scrollTop, doc.body.scrollTop));
+
+ // IE adds the HTML element's border, by default it is medium which is 2px
+ // IE 6 and 7 quirks mode the border width is overwritable by the following css html { border: 0; }
+ // IE 7 standards mode, the border is always 2px
+ // This border/offset is typically represented by the clientLeft and clientTop properties
+ // However, in IE6 and 7 quirks mode the clientLeft and clientTop properties are not updated when overwriting it via CSS
+ // Therefore this method will be off by 2px in IE while in quirksmode
+ add( -doc.documentElement.clientLeft, -doc.documentElement.clientTop );
+
+ // Otherwise loop through the offsetParents and parentNodes
+ } else {
+
+ // Initial element offsets
+ add( elem.offsetLeft, elem.offsetTop );
+
+ // Get parent offsets
+ while ( offsetParent ) {
+ // Add offsetParent offsets
+ add( offsetParent.offsetLeft, offsetParent.offsetTop );
+
+ // Mozilla and Safari > 2 does not include the border on offset parents
+ // However Mozilla adds the border for table or table cells
+ if ( mozilla && !/^t(able|d|h)$/i.test(offsetParent.tagName) || safari && !safari2 )
+ border( offsetParent );
+
+ // Add the document scroll offsets if position is fixed on any offsetParent
+ if ( !fixed && jQuery.css(offsetParent, "position") == "fixed" )
+ fixed = true;
+
+ // Set offsetChild to previous offsetParent unless it is the body element
+ offsetChild = /^body$/i.test(offsetParent.tagName) ? offsetChild : offsetParent;
+ // Get next offsetParent
+ offsetParent = offsetParent.offsetParent;
+ }
+
+ // Get parent scroll offsets
+ while ( parent && parent.tagName && !/^body|html$/i.test(parent.tagName) ) {
+ // Remove parent scroll UNLESS that parent is inline or a table to work around Opera inline/table scrollLeft/Top bug
+ if ( !/^inline|table.*$/i.test(jQuery.css(parent, "display")) )
+ // Subtract parent scroll offsets
+ add( -parent.scrollLeft, -parent.scrollTop );
+
+ // Mozilla does not add the border for a parent that has overflow != visible
+ if ( mozilla && jQuery.css(parent, "overflow") != "visible" )
+ border( parent );
+
+ // Get next parent
+ parent = parent.parentNode;
+ }
+
+ // Safari <= 2 doubles body offsets with a fixed position element/offsetParent or absolutely positioned offsetChild
+ // Mozilla doubles body offsets with a non-absolutely positioned offsetChild
+ if ( (safari2 && (fixed || jQuery.css(offsetChild, "position") == "absolute")) ||
+ (mozilla && jQuery.css(offsetChild, "position") != "absolute") )
+ add( -doc.body.offsetLeft, -doc.body.offsetTop );
+
+ // Add the document scroll offsets if position is fixed
+ if ( fixed )
+ add(Math.max(doc.documentElement.scrollLeft, doc.body.scrollLeft),
+ Math.max(doc.documentElement.scrollTop, doc.body.scrollTop));
+ }
+
+ // Return an object with top and left properties
+ results = { top: top, left: left };
+ }
+
+ function border(elem) {
+ add( jQuery.curCSS(elem, "borderLeftWidth", true), jQuery.curCSS(elem, "borderTopWidth", true) );
+ }
+
+ function add(l, t) {
+ left += parseInt(l) || 0;
+ top += parseInt(t) || 0;
+ }
+
+ return results;
+};
+})();
diff --git a/share/www/script/jquery.resizer.js b/share/www/script/jquery.resizer.js
new file mode 100644
index 00000000..8ebd0c66
--- /dev/null
+++ b/share/www/script/jquery.resizer.js
@@ -0,0 +1,55 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+(function($) {
+
+ $.fn.resizable = function(options) {
+ if ($.browser.safari && parseInt($.browser.version) >= 522)
+ return this; // safari3 and later provides textarea resizing natively
+ options = options || {};
+ options.grippie = options.grippie || null;
+ options.minHeight = options.minHeight || 32;
+ options.maxHeight = options.maxHeight || null;
+
+ return this.each(function() {
+ var grippie = options.grippie;
+ if (!grippie) grippie = $("<div></div>").appendTo(this.parentNode);
+ grippie.addClass("grippie");
+ var elem = $(this);
+ grippie.mousedown(function(e) {
+ var pos = e.screenY;
+ var height = elem.height();
+ $(document)
+ .mousemove(function(e) {
+ var offset = e.screenY - pos;
+ if (offset) {
+ var newHeight = height + offset;
+ if (newHeight >= options.minHeight &&
+ (!options.maxHeight || newHeight <= options.maxHeight)) {
+ elem.height(newHeight);
+ height = newHeight;
+ }
+ pos = e.screenY;
+ }
+ document.onselectstart = function() { return false }; // for IE
+ return false;
+ })
+ .one("mouseup", function() {
+ $(document).unbind("mousemove");
+ document.onselectstart = null; // for IE
+ });
+ return false;
+ });
+ });
+ }
+
+})(jQuery);
diff --git a/share/www/script/jquery.suggest.js b/share/www/script/jquery.suggest.js
new file mode 100644
index 00000000..4bb8b2d0
--- /dev/null
+++ b/share/www/script/jquery.suggest.js
@@ -0,0 +1,129 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+(function($) {
+
+ suggest = function(elem, options) {
+ var timer = null;
+ var prevVal = null;
+
+ var input = $(elem).attr("autocomplete", "off");
+ var offset = input.offset();
+ var dropdown = $('<ul style="z-index: 10000"></ul>')
+ .addClass(options.dropdownClass).appendTo("body").css({
+ top: (offset.top + elem.offsetHeight) + "px",
+ left: offset.left + "px",
+ minWidth: input.css("width")
+ });
+
+ input
+ .blur(function() {
+ setTimeout(function() { dropdown.hide() }, 200);
+ })
+ .keydown(function(e) {
+ if (timer) clearTimeout(timer);
+ if ($.inArray(e.keyCode, [38, 40]) != -1 ||
+ (dropdown.is(":visible") && (e.keyCode == 27 ||
+ ($.inArray(e.keyCode, [9, 13]) != -1 && getSelection())))) {
+ e.preventDefault(); e.stopPropagation();
+ switch(e.keyCode) {
+ case 38: // up
+ moveUp();
+ break;
+ case 40: // down
+ moveDown();
+ break;
+ case 9: // tab
+ case 13: // return
+ commit();
+ break;
+ case 27: // escape
+ dropdown.hide();
+ break;
+ }
+ return false;
+ } else {
+ timer = setTimeout(function() { suggest() }, options.delay);
+ }
+ });
+
+ function suggest(force) {
+ var newVal = $.trim(input.val());
+ if (force || newVal != prevVal) {
+ if (force || newVal.length >= options.minChars) {
+ options.callback($.trim(input.val()), function(items) {
+ show(items);
+ });
+ } else {
+ dropdown.hide();
+ }
+ prevVal = newVal;
+ }
+ }
+
+ function show(items) {
+ if (!items) return;
+ if (!items.length) { dropdown.hide(); return; }
+ var html = [];
+ for (var i = 0; i < items.length; i++) {
+ html.push('<li>' + items[i] + '</li>');
+ }
+ dropdown.html(html.join("")).slideDown("fast");
+ dropdown.children('li').click(function(e) {
+ $(this).addClass("selected");
+ commit();
+ });
+ }
+
+ function commit() {
+ var sel = getSelection();
+ if (sel) {
+ prevVal = sel.text();
+ input.val(prevVal);
+ dropdown.hide();
+ }
+ if (timer) clearTimeout(timer)
+ }
+
+ function getSelection() {
+ if (!dropdown.is(":visible")) return null;
+ var sel = dropdown.children("li.selected");
+ return sel.length ? sel : null;
+ }
+
+ function moveDown() {
+ if (!dropdown.is(":visible")) suggest(true);
+ var sel = getSelection();
+ if (sel) sel.removeClass("selected").next().addClass("selected");
+ else dropdown.children("li:first-child").addClass("selected");
+ }
+
+ function moveUp() {
+ if (!dropdown.is(":visible")) suggest(true);
+ var sel = getSelection();
+ if (sel) sel.removeClass("selected").prev().addClass("selected");
+ else dropdown.children("li:last-child").addClass("selected");
+ }
+ }
+
+ $.fn.suggest = function(callback, options) {
+ options = options || {};
+ options.callback = callback;
+ options.delay = options.delay || 100;
+ options.dropdownClass = options.dropdownClass || "suggest-dropdown";
+ options.minChars = options.minChars || 1;
+ return this.each(function() {
+ suggest(this, options);
+ });
+ };
+
+})(jQuery);
diff --git a/share/www/script/json2.js b/share/www/script/json2.js
new file mode 100644
index 00000000..e412ed20
--- /dev/null
+++ b/share/www/script/json2.js
@@ -0,0 +1,159 @@
+/*
+ json2.js
+ 2008-03-14
+
+ Public Domain
+
+ No warranty expressed or implied. Use at your own risk.
+
+ See http://www.JSON.org/js.html
+
+ This is a reference implementation. You are free to copy, modify, or
+ redistribute.
+
+ Use your own copy. It is extremely unwise to load third party
+ code into your pages.
+*/
+
+if (!this.JSON) {
+
+ JSON = function () {
+
+ function f(n) { // Format integers to have at least two digits.
+ return n < 10 ? '0' + n : n;
+ }
+
+ Date.prototype.toJSON = function () {
+ return this.getUTCFullYear() + '-' +
+ f(this.getUTCMonth() + 1) + '-' +
+ f(this.getUTCDate()) + 'T' +
+ f(this.getUTCHours()) + ':' +
+ f(this.getUTCMinutes()) + ':' +
+ f(this.getUTCSeconds()) + 'Z';
+ };
+
+
+ var m = { // table of character substitutions
+ '\b': '\\b',
+ '\t': '\\t',
+ '\n': '\\n',
+ '\f': '\\f',
+ '\r': '\\r',
+ '"' : '\\"',
+ '\\': '\\\\'
+ };
+
+ function stringify(value, whitelist) {
+ var a, // The array holding the partial texts.
+ i, // The loop counter.
+ k, // The member key.
+ l, // Length.
+ r = /["\\\x00-\x1f\x7f-\x9f]/g,
+ v; // The member value.
+
+ switch (typeof value) {
+ case 'string':
+
+ return r.test(value) ?
+ '"' + value.replace(r, function (a) {
+ var c = m[a];
+ if (c) {
+ return c;
+ }
+ c = a.charCodeAt();
+ return '\\u00' + Math.floor(c / 16).toString(16) +
+ (c % 16).toString(16);
+ }) + '"' :
+ '"' + value + '"';
+
+ case 'number':
+
+ return isFinite(value) ? String(value) : 'null';
+
+ case 'boolean':
+ case 'null':
+ return String(value);
+
+ case 'object':
+
+ if (!value) {
+ return 'null';
+ }
+
+ if (typeof value.toJSON === 'function') {
+ return stringify(value.toJSON());
+ }
+ a = [];
+ if (typeof value.length === 'number' &&
+ !(value.propertyIsEnumerable('length'))) {
+
+ l = value.length;
+ for (i = 0; i < l; i += 1) {
+ a.push(stringify(value[i], whitelist) || 'null');
+ }
+
+ return '[' + a.join(',') + ']';
+ }
+ if (whitelist) {
+ l = whitelist.length;
+ for (i = 0; i < l; i += 1) {
+ k = whitelist[i];
+ if (typeof k === 'string') {
+ v = stringify(value[k], whitelist);
+ if (v) {
+ a.push(stringify(k) + ':' + v);
+ }
+ }
+ }
+ } else {
+
+ for (k in value) {
+ if (typeof k === 'string') {
+ v = stringify(value[k], whitelist);
+ if (v) {
+ a.push(stringify(k) + ':' + v);
+ }
+ }
+ }
+ }
+
+ return '{' + a.join(',') + '}';
+ }
+ }
+
+ return {
+ stringify: stringify,
+ parse: function (text, filter) {
+ var j;
+
+ function walk(k, v) {
+ var i, n;
+ if (v && typeof v === 'object') {
+ for (i in v) {
+ if (Object.prototype.hasOwnProperty.apply(v, [i])) {
+ n = walk(i, v[i]);
+ if (n !== undefined) {
+ v[i] = n;
+ } else {
+ delete v[i];
+ }
+ }
+ }
+ }
+ return filter(k, v);
+ }
+
+ if (/^[\],:{}\s]*$/.test(text.replace(/\\["\\\/bfnrtu]/g, '@').
+replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, ']').
+replace(/(?:^|:|,)(?:\s*\[)+/g, ''))) {
+
+ j = eval('(' + text + ')');
+
+ return typeof filter === 'function' ? walk('', j) : j;
+ }
+
+ throw new SyntaxError('parseJSON');
+ }
+ };
+ }();
+} \ No newline at end of file
diff --git a/share/www/script/pprint.js b/share/www/script/pprint.js
new file mode 100644
index 00000000..981db0f2
--- /dev/null
+++ b/share/www/script/pprint.js
@@ -0,0 +1,60 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+/* JSON pretty printing */
+
+function prettyPrintJSON(val, indent, linesep, depth) {
+ indent = indent != null ? indent : 4;
+ linesep = linesep != null ? linesep : "\n";
+ depth = depth != null ? depth : 1;
+ var propsep = linesep.length ? "," + linesep : ", ";
+ var tab = [];
+ for (var i = 0; i < indent * depth; i++) tab.push("");
+ tab = tab.join(" ");
+ switch (typeof val) {
+ case "boolean":
+ case "number":
+ case "string":
+ return JSON.stringify(val);
+ case "object": {
+ if (val === null) return "null";
+ if (val.constructor == Date) return JSON.stringify(val);
+ var buf = [];
+ if (val.constructor == Array) {
+ buf.push("[");
+ for (var index = 0; index < val.length; index++) {
+ buf.push(index > 0 ? propsep : linesep);
+ buf.push(
+ tab, prettyPrintJSON(val[index], indent, linesep, depth + 1)
+ );
+ }
+ if (index >= 0) buf.push(linesep, tab.substr(indent));
+ buf.push("]");
+ } else {
+ buf.push("{");
+ var index = 0;
+ for (var key in val) {
+ if (!val.hasOwnProperty(key)) continue;
+ buf.push(index > 0 ? propsep : linesep);
+ buf.push(
+ tab, JSON.stringify(key), ": ",
+ prettyPrintJSON(val[key], indent, linesep, depth + 1)
+ );
+ index++;
+ }
+ if (index >= 0) buf.push(linesep, tab.substr(indent));
+ buf.push("}");
+ }
+ return buf.join("");
+ }
+ }
+}
diff --git a/share/www/script/shell.js b/share/www/script/shell.js
new file mode 100644
index 00000000..f4c6aff5
--- /dev/null
+++ b/share/www/script/shell.js
@@ -0,0 +1,700 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy
+// of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var
+histList = [""],
+histPos = 0,
+_scope = {},
+_win, // a top-level context
+question,
+_in,
+_out,
+tooManyMatches = null,
+lastError = null;
+
+function refocus()
+{
+ _in.blur(); // Needed for Mozilla to scroll correctly.
+ _in.focus();
+}
+
+function init()
+{
+ _in = document.getElementById("input");
+ _out = document.getElementById("output");
+
+ _win = window;
+
+ if (opener && !opener.closed)
+ {
+ println("Using bookmarklet version of shell: commands will run in opener's context.", "message");
+ _win = opener;
+ }
+
+ initTarget();
+
+ recalculateInputHeight();
+ refocus();
+}
+
+function initTarget()
+{
+ _win.Shell = window;
+ _win.print = shellCommands.print;
+}
+
+
+// Unless the user is selected something, refocus the textbox.
+// (requested by caillon, brendan, asa)
+function keepFocusInTextbox(e)
+{
+ var g = e.srcElement ? e.srcElement : e.target; // IE vs. standard
+
+ while (!g.tagName)
+ g = g.parentNode;
+ var t = g.tagName.toUpperCase();
+ if (t=="A" || t=="INPUT")
+ return;
+
+ if (window.getSelection) {
+ // Mozilla
+ if (String(window.getSelection()))
+ return;
+ }
+ else if (document.getSelection) {
+ // Opera? Netscape 4?
+ if (document.getSelection())
+ return;
+ }
+ else {
+ // IE
+ if ( document.selection.createRange().text )
+ return;
+ }
+
+ refocus();
+}
+
+function inputKeydown(e) {
+ // Use onkeydown because IE doesn't support onkeypress for arrow keys
+
+ //alert(e.keyCode + " ^ " + e.keycode);
+
+ if (e.shiftKey && e.keyCode == 13) { // shift-enter
+ // don't do anything; allow the shift-enter to insert a line break as normal
+ } else if (e.keyCode == 13) { // enter
+ // execute the input on enter
+ try { go(); } catch(er) { alert(er); };
+ setTimeout(function() { _in.value = ""; }, 0); // can't preventDefault on input, so clear it later
+ } else if (e.keyCode == 38) { // up
+ // go up in history if at top or ctrl-up
+ if (e.ctrlKey || caretInFirstLine(_in))
+ hist(true);
+ } else if (e.keyCode == 40) { // down
+ // go down in history if at end or ctrl-down
+ if (e.ctrlKey || caretInLastLine(_in))
+ hist(false);
+ } else if (e.keyCode == 9) { // tab
+ tabcomplete();
+ setTimeout(function() { refocus(); }, 0); // refocus because tab was hit
+ } else { }
+
+ setTimeout(recalculateInputHeight, 0);
+
+ //return true;
+};
+
+function caretInFirstLine(textbox)
+{
+ // IE doesn't support selectionStart/selectionEnd
+ if (textbox.selectionStart == undefined)
+ return true;
+
+ var firstLineBreak = textbox.value.indexOf("\n");
+
+ return ((firstLineBreak == -1) || (textbox.selectionStart <= firstLineBreak));
+}
+
+function caretInLastLine(textbox)
+{
+ // IE doesn't support selectionStart/selectionEnd
+ if (textbox.selectionEnd == undefined)
+ return true;
+
+ var lastLineBreak = textbox.value.lastIndexOf("\n");
+
+ return (textbox.selectionEnd > lastLineBreak);
+}
+
+function recalculateInputHeight()
+{
+ var rows = _in.value.split(/\n/).length
+ + 1 // prevent scrollbar flickering in Mozilla
+ + (window.opera ? 1 : 0); // leave room for scrollbar in Opera
+
+ if (_in.rows != rows) // without this check, it is impossible to select text in Opera 7.60 or Opera 8.0.
+ _in.rows = rows;
+}
+
+function println(s, type)
+{
+ if((s=String(s)))
+ {
+ var newdiv = document.createElement("div");
+ newdiv.appendChild(document.createTextNode(s));
+ newdiv.className = type;
+ _out.appendChild(newdiv);
+ return newdiv;
+ }
+}
+
+function printWithRunin(h, s, type)
+{
+ var div = println(s, type);
+ var head = document.createElement("strong");
+ head.appendChild(document.createTextNode(h + ": "));
+ div.insertBefore(head, div.firstChild);
+}
+
+
+var shellCommands =
+{
+load : function load(url)
+{
+ var s = _win.document.createElement("script");
+ s.type = "text/javascript";
+ s.src = url;
+ _win.document.getElementsByTagName("head")[0].appendChild(s);
+ println("Loading " + url + "...", "message");
+},
+
+clear : function clear()
+{
+ var CHILDREN_TO_PRESERVE = 3;
+ while (_out.childNodes[CHILDREN_TO_PRESERVE])
+ _out.removeChild(_out.childNodes[CHILDREN_TO_PRESERVE]);
+},
+
+print : function print(s) { println(s, "print"); },
+
+// the normal function, "print", shouldn't return a value
+// (suggested by brendan; later noticed it was a problem when showing others)
+pr : function pr(s)
+{
+ shellCommands.print(s); // need to specify shellCommands so it doesn't try window.print()!
+ return s;
+},
+
+props : function props(e, onePerLine)
+{
+ if (e === null) {
+ println("props called with null argument", "error");
+ return;
+ }
+
+ if (e === undefined) {
+ println("props called with undefined argument", "error");
+ return;
+ }
+
+ var ns = ["Methods", "Fields", "Unreachables"];
+ var as = [[], [], []]; // array of (empty) arrays of arrays!
+ var p, j, i; // loop variables, several used multiple times
+
+ var protoLevels = 0;
+
+ for (p = e; p; p = p.__proto__)
+ {
+ for (i=0; i<ns.length; ++i)
+ as[i][protoLevels] = [];
+ ++protoLevels;
+ }
+
+ for(var a in e)
+ {
+ // Shortcoming: doesn't check that VALUES are the same in object and prototype.
+
+ var protoLevel = -1;
+ try
+ {
+ for (p = e; p && (a in p); p = p.__proto__)
+ ++protoLevel;
+ }
+ catch(er) { protoLevel = 0; } // "in" operator throws when param to props() is a string
+
+ var type = 1;
+ try
+ {
+ if ((typeof e[a]) == "function")
+ type = 0;
+ }
+ catch (er) { type = 2; }
+
+ as[type][protoLevel].push(a);
+ }
+
+ function times(s, n) { return n ? s + times(s, n-1) : ""; }
+
+ for (j=0; j<protoLevels; ++j)
+ for (i=0;i<ns.length;++i)
+ if (as[i][j].length)
+ printWithRunin(
+ ns[i] + times(" of prototype", j),
+ (onePerLine ? "\n\n" : "") + as[i][j].sort().join(onePerLine ? "\n" : ", ") + (onePerLine ? "\n\n" : ""),
+ "propList"
+ );
+},
+
+blink : function blink(node)
+{
+ if (!node) throw("blink: argument is null or undefined.");
+ if (node.nodeType == null) throw("blink: argument must be a node.");
+ if (node.nodeType == 3) throw("blink: argument must not be a text node");
+ if (node.documentElement) throw("blink: argument must not be the document object");
+
+ function setOutline(o) {
+ return function() {
+ if (node.style.outline != node.style.bogusProperty) {
+ // browser supports outline (Firefox 1.1 and newer, CSS3, Opera 8).
+ node.style.outline = o;
+ }
+ else if (node.style.MozOutline != node.style.bogusProperty) {
+ // browser supports MozOutline (Firefox 1.0.x and older)
+ node.style.MozOutline = o;
+ }
+ else {
+ // browser only supports border (IE). border is a fallback because it moves things around.
+ node.style.border = o;
+ }
+ }
+ }
+
+ function focusIt(a) {
+ return function() {
+ a.focus();
+ }
+ }
+
+ if (node.ownerDocument) {
+ var windowToFocusNow = (node.ownerDocument.defaultView || node.ownerDocument.parentWindow); // Moz vs. IE
+ if (windowToFocusNow)
+ setTimeout(focusIt(windowToFocusNow.top), 0);
+ }
+
+ for(var i=1;i<7;++i)
+ setTimeout(setOutline((i%2)?'3px solid red':'none'), i*100);
+
+ setTimeout(focusIt(window), 800);
+ setTimeout(focusIt(_in), 810);
+},
+
+scope : function scope(sc)
+{
+ if (!sc) sc = {};
+ _scope = sc;
+ println("Scope is now " + sc + ". If a variable is not found in this scope, window will also be searched. New variables will still go on window.", "message");
+},
+
+mathHelp : function mathHelp()
+{
+ printWithRunin("Math constants", "E, LN2, LN10, LOG2E, LOG10E, PI, SQRT1_2, SQRT2", "propList");
+ printWithRunin("Math methods", "abs, acos, asin, atan, atan2, ceil, cos, exp, floor, log, max, min, pow, random, round, sin, sqrt, tan", "propList");
+},
+
+ans : undefined
+};
+
+
+function hist(up)
+{
+ // histList[0] = first command entered, [1] = second, etc.
+ // type something, press up --> thing typed is now in "limbo"
+ // (last item in histList) and should be reachable by pressing
+ // down again.
+
+ var L = histList.length;
+
+ if (L == 1)
+ return;
+
+ if (up)
+ {
+ if (histPos == L-1)
+ {
+ // Save this entry in case the user hits the down key.
+ histList[histPos] = _in.value;
+ }
+
+ if (histPos > 0)
+ {
+ histPos--;
+ // Use a timeout to prevent up from moving cursor within new text
+ // Set to nothing first for the same reason
+ setTimeout(
+ function() {
+ _in.value = '';
+ _in.value = histList[histPos];
+ var caretPos = _in.value.length;
+ if (_in.setSelectionRange)
+ _in.setSelectionRange(caretPos, caretPos);
+ },
+ 0
+ );
+ }
+ }
+ else // down
+ {
+ if (histPos < L-1)
+ {
+ histPos++;
+ _in.value = histList[histPos];
+ }
+ else if (histPos == L-1)
+ {
+ // Already on the current entry: clear but save
+ if (_in.value)
+ {
+ histList[histPos] = _in.value;
+ ++histPos;
+ _in.value = "";
+ }
+ }
+ }
+}
+
+function tabcomplete()
+{
+ /*
+ * Working backwards from s[from], find the spot
+ * where this expression starts. It will scan
+ * until it hits a mismatched ( or a space,
+ * but it skips over quoted strings.
+ * If stopAtDot is true, stop at a '.'
+ */
+ function findbeginning(s, from, stopAtDot)
+ {
+ /*
+ * Complicated function.
+ *
+ * Return true if s[i] == q BUT ONLY IF
+ * s[i-1] is not a backslash.
+ */
+ function equalButNotEscaped(s,i,q)
+ {
+ if(s.charAt(i) != q) // not equal go no further
+ return false;
+
+ if(i==0) // beginning of string
+ return true;
+
+ if(s.charAt(i-1) == '\\') // escaped?
+ return false;
+
+ return true;
+ }
+
+ var nparens = 0;
+ var i;
+ for(i=from; i>=0; i--)
+ {
+ if(s.charAt(i) == ' ')
+ break;
+
+ if(stopAtDot && s.charAt(i) == '.')
+ break;
+
+ if(s.charAt(i) == ')')
+ nparens++;
+ else if(s.charAt(i) == '(')
+ nparens--;
+
+ if(nparens < 0)
+ break;
+
+ // skip quoted strings
+ if(s.charAt(i) == '\'' || s.charAt(i) == '\"')
+ {
+ //dump("skipping quoted chars: ");
+ var quot = s.charAt(i);
+ i--;
+ while(i >= 0 && !equalButNotEscaped(s,i,quot)) {
+ //dump(s.charAt(i));
+ i--;
+ }
+ //dump("\n");
+ }
+ }
+ return i;
+ }
+
+ // XXX should be used more consistently (instead of using selectionStart/selectionEnd throughout code)
+ // XXX doesn't work in IE, even though it contains IE-specific code
+ function getcaretpos(inp)
+ {
+ if(inp.selectionEnd != null)
+ return inp.selectionEnd;
+
+ if(inp.createTextRange)
+ {
+ var docrange = _win.Shell.document.selection.createRange();
+ var inprange = inp.createTextRange();
+ if (inprange.setEndPoint)
+ {
+ inprange.setEndPoint('EndToStart', docrange);
+ return inprange.text.length;
+ }
+ }
+
+ return inp.value.length; // sucks, punt
+ }
+
+ function setselectionto(inp,pos)
+ {
+ if(inp.selectionStart) {
+ inp.selectionStart = inp.selectionEnd = pos;
+ }
+ else if(inp.createTextRange) {
+ var docrange = _win.Shell.document.selection.createRange();
+ var inprange = inp.createTextRange();
+ inprange.move('character',pos);
+ inprange.select();
+ }
+ else { // err...
+ /*
+ inp.select();
+ if(_win.Shell.document.getSelection())
+ _win.Shell.document.getSelection() = "";
+ */
+ }
+ }
+ // get position of cursor within the input box
+ var caret = getcaretpos(_in);
+
+ if(caret) {
+ //dump("----\n");
+ var dotpos, spacepos, complete, obj;
+ //dump("caret pos: " + caret + "\n");
+ // see if there's a dot before here
+ dotpos = findbeginning(_in.value, caret-1, true);
+ //dump("dot pos: " + dotpos + "\n");
+ if(dotpos == -1 || _in.value.charAt(dotpos) != '.') {
+ dotpos = caret;
+//dump("changed dot pos: " + dotpos + "\n");
+ }
+
+ // look backwards for a non-variable-name character
+ spacepos = findbeginning(_in.value, dotpos-1, false);
+ //dump("space pos: " + spacepos + "\n");
+ // get the object we're trying to complete on
+ if(spacepos == dotpos || spacepos+1 == dotpos || dotpos == caret)
+ {
+ // try completing function args
+ if(_in.value.charAt(dotpos) == '(' ||
+ (_in.value.charAt(spacepos) == '(' && (spacepos+1) == dotpos))
+ {
+ var fn,fname;
+ var from = (_in.value.charAt(dotpos) == '(') ? dotpos : spacepos;
+ spacepos = findbeginning(_in.value, from-1, false);
+
+ fname = _in.value.substr(spacepos+1,from-(spacepos+1));
+ //dump("fname: " + fname + "\n");
+ try {
+ with(_win.Shell._scope)
+ with(_win)
+ with(Shell.shellCommands)
+ fn = eval(fname);
+ }
+ catch(er) {
+ //dump('fn is not a valid object\n');
+ return;
+ }
+ if(fn == undefined) {
+ //dump('fn is undefined');
+ return;
+ }
+ if(fn instanceof Function)
+ {
+ // Print function definition, including argument names, but not function body
+ if(!fn.toString().match(/function .+?\(\) +\{\n +\[native code\]\n\}/))
+ println(fn.toString().match(/function .+?\(.*?\)/), "tabcomplete");
+ }
+
+ return;
+ }
+ else
+ obj = _win;
+ }
+ else
+ {
+ var objname = _in.value.substr(spacepos+1,dotpos-(spacepos+1));
+ //dump("objname: |" + objname + "|\n");
+ try {
+ with(_win.Shell._scope)
+ with(_win)
+ obj = eval(objname);
+ }
+ catch(er) {
+ printError(er);
+ return;
+ }
+ if(obj == undefined) {
+ // sometimes this is tabcomplete's fault, so don't print it :(
+ // e.g. completing from "print(document.getElements"
+ // println("Can't complete from null or undefined expression " + objname, "error");
+ return;
+ }
+ }
+ //dump("obj: " + obj + "\n");
+ // get the thing we're trying to complete
+ if(dotpos == caret)
+ {
+ if(spacepos+1 == dotpos || spacepos == dotpos)
+ {
+ // nothing to complete
+ //dump("nothing to complete\n");
+ return;
+ }
+
+ complete = _in.value.substr(spacepos+1,dotpos-(spacepos+1));
+ }
+ else {
+ complete = _in.value.substr(dotpos+1,caret-(dotpos+1));
+ }
+ //dump("complete: " + complete + "\n");
+ // ok, now look at all the props/methods of this obj
+ // and find ones starting with 'complete'
+ var matches = [];
+ var bestmatch = null;
+ for(var a in obj)
+ {
+ //a = a.toString();
+ //XXX: making it lowercase could help some cases,
+ // but screws up my general logic.
+ if(a.substr(0,complete.length) == complete) {
+ matches.push(a);
+ ////dump("match: " + a + "\n");
+ // if no best match, this is the best match
+ if(bestmatch == null)
+ {
+ bestmatch = a;
+ }
+ else {
+ // the best match is the longest common string
+ function min(a,b){ return ((a<b)?a:b); }
+ var i;
+ for(i=0; i< min(bestmatch.length, a.length); i++)
+ {
+ if(bestmatch.charAt(i) != a.charAt(i))
+ break;
+ }
+ bestmatch = bestmatch.substr(0,i);
+ ////dump("bestmatch len: " + i + "\n");
+ }
+ ////dump("bestmatch: " + bestmatch + "\n");
+ }
+ }
+ bestmatch = (bestmatch || "");
+ ////dump("matches: " + matches + "\n");
+ var objAndComplete = (objname || obj) + "." + bestmatch;
+ //dump("matches.length: " + matches.length + ", tooManyMatches: " + tooManyMatches + ", objAndComplete: " + objAndComplete + "\n");
+ if(matches.length > 1 && (tooManyMatches == objAndComplete || matches.length <= 10)) {
+
+ printWithRunin("Matches: ", matches.join(', '), "tabcomplete");
+ tooManyMatches = null;
+ }
+ else if(matches.length > 10)
+ {
+ println(matches.length + " matches. Press tab again to see them all", "tabcomplete");
+ tooManyMatches = objAndComplete;
+ }
+ else {
+ tooManyMatches = null;
+ }
+ if(bestmatch != "")
+ {
+ var sstart;
+ if(dotpos == caret) {
+ sstart = spacepos+1;
+ }
+ else {
+ sstart = dotpos+1;
+ }
+ _in.value = _in.value.substr(0, sstart)
+ + bestmatch
+ + _in.value.substr(caret);
+ setselectionto(_in,caret + (bestmatch.length - complete.length));
+ }
+ }
+}
+
+function printQuestion(q)
+{
+ println(q, "input");
+}
+
+function printAnswer(a)
+{
+ if (a !== undefined) {
+ println(a, "normalOutput");
+ shellCommands.ans = a;
+ }
+}
+
+function printError(er)
+{
+ var lineNumberString;
+
+ lastError = er; // for debugging the shell
+ if (er.name)
+ {
+ // lineNumberString should not be "", to avoid a very wacky bug in IE 6.
+ lineNumberString = (er.lineNumber != undefined) ? (" on line " + er.lineNumber + ": ") : ": ";
+ println(er.name + lineNumberString + er.message, "error"); // Because IE doesn't have error.toString.
+ }
+ else
+ println(er, "error"); // Because security errors in Moz /only/ have toString.
+}
+
+function go(s)
+{
+ _in.value = question = s ? s : _in.value;
+
+ if (question == "")
+ return;
+
+ histList[histList.length-1] = question;
+ histList[histList.length] = "";
+ histPos = histList.length - 1;
+
+ // Unfortunately, this has to happen *before* the JavaScript is run, so that
+ // print() output will go in the right place.
+ _in.value='';
+ recalculateInputHeight();
+ printQuestion(question);
+
+ if (_win.closed) {
+ printError("Target window has been closed.");
+ return;
+ }
+
+ try { ("Shell" in _win) }
+ catch(er) {
+ printError("The JavaScript Shell cannot access variables in the target window. The most likely reason is that the target window now has a different page loaded and that page has a different hostname than the original page.");
+ return;
+ }
+
+ if (!("Shell" in _win))
+ initTarget(); // silent
+
+ // Evaluate Shell.question using _win's eval (this is why eval isn't in the |with|, IIRC).
+ _win.location.href = "javascript:try{ Shell.printAnswer(eval('with(Shell._scope) with(Shell.shellCommands) {' + Shell.question + String.fromCharCode(10) + '}')); } catch(er) { Shell.printError(er); }; setTimeout(Shell.refocus, 0); void 0";
+}
diff --git a/share/www/shell.html b/share/www/shell.html
new file mode 100644
index 00000000..ebc4a130
--- /dev/null
+++ b/share/www/shell.html
@@ -0,0 +1,79 @@
+<!DOCTYPE html>
+<!--
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+-->
+<html lang="en" onclick="keepFocusInTextbox(event)">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <title>JavaScript Shell 1.4</title>
+ <link rel="stylesheet" href="style/layout.css" type="text/css">
+ <script src="script/shell.js"></script>
+ <!-- for http://ted.mielczarek.org/code/mozilla/extensiondev/ -->
+ <script type="text/javascript" src="chrome://extensiondev/content/rdfhistory.js"></script>
+ <script type="text/javascript" src="chrome://extensiondev/content/chromeShellExtras.js"></script>
+ <script type='text/javascript' src='script/json2.js'></script>
+ <script type='text/javascript' src='script/couch.js'></script>
+ <style type="text/css">
+ #help { border-bottom: 1px solid #eee; color: #666; margin: 0 0 1em; }
+ /* Preserve line breaks, but wrap too if browser supports it */
+ #output { white-space: pre; white-space: -moz-pre-wrap; }
+ form { margin: 0; padding: 0; }
+ #input { background: transparent; border: none;
+ border-left: 2px solid #999; font: bold 110% monospace;
+ margin: .5em 0 0 -12px; overflow: auto; padding: 0 0 0 10px;
+ width: 100%;
+ }
+ .input { color: #000; font: bold 110% monospace; margin-top: .5em; }
+ .normalOutput, .print, .error, .propList, .message, .tabcomplete {
+ background: #e4e4e4; margin: 0 1em; padding: 2px;
+ }
+ .normalOutput { color: #666; font-family: monospace; }
+ .print { color: #666; }
+ .error { color: #f00; background: #f9e4e4; }
+ .propList { color: #0a0; }
+ .message { color: #0d0; }
+ .tabcomplete { color: #d0d; }
+ </style>
+ </head>
+ <body onload="init()">
+ <h1>
+ <a href="browse/index.html">Overview</a>
+ <strong>Javascript Shell</strong>
+ </h1>
+ <div id="wrap">
+ <div id="help">
+ <p>
+ Features: auto-completion of property names using Tab, multiline input
+ using Shift+Enter, input history with (Ctrl+)Up/Down,
+ <a accesskey="M" href="javascript:go('scope(Math); mathHelp();');" title="Accesskey: M">Math</a>,
+ <a accesskey="H" href="http://www.squarefree.com/shell/?ignoreReferrerFrom=shell1.4" title="Accesskey: H" target="_blank">help</a>
+ <br>
+ Values and functions: ans, print(string),
+ <code><a accesskey="P" href="javascript:go('props(ans)')" title="Accesskey: P">props(object)</a></code>,
+ <code><a accesskey="B" href="javascript:go('blink(ans)')" title="Accesskey: B">blink(node)</a></code>,
+ <code><a accesskey="C" href="javascript:go('clear()')" title="Accesskey: C">clear()</a></code>,
+ <code>load(scriptURL)</code>, <code>scope(object)</code>
+ </p>
+ </div>
+ <div id="output"></div>
+ <div>
+ <textarea id="input" class="input" wrap="off" onkeydown="inputKeydown(event)" rows="1"></textarea>
+ </div>
+
+ </div>
+ </body>
+ <script>
+ if (parent !== window) parent.updateNavigation();
+ </script>
+</html>
diff --git a/share/www/style/layout.css b/share/www/style/layout.css
new file mode 100644
index 00000000..37bf0c79
--- /dev/null
+++ b/share/www/style/layout.css
@@ -0,0 +1,405 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+/* General styles */
+
+html, body { background: transparent; color: #000;
+ font: normal 90% Arial,Helvetica,sans-serif; margin: 0;
+}
+:link, :visited { color: #ba1e16; text-decoration: none; }
+:link img, :visited img { border: none; }
+
+h1 { background: #666 url(../image/hgrad.gif) 100% 0 repeat-y;
+ border-bottom: 1px solid #333; color: #999;
+ font: 125% normal Arial,Helvetica,sans-serif;
+ line-height: 1.8em; margin: 0 0 1em; padding: 0 0 0 1em;
+}
+h1 :link, h1 :visited, h1 strong { padding: .4em .5em; }
+h1 :link, h1 :visited {
+ background: url(../image/path.gif) 100% 50% no-repeat; padding-right: 2em;
+}
+h1 :link, h1 :visited { color: #bbb; cursor: pointer;
+ text-shadow: #333 2px 2px 1px;
+}
+h1 strong { color: #fff; text-shadow: #000 2px 2px 4px; }
+
+hr { border: 1px solid #999; border-width: 1px 0 0; }
+dl dt { font-weight: bold; }
+code, tt { font-family: "DejaVu Sans Mono",Monaco,monospace; }
+
+button { font-size: 100%; -webkit-appearance: square-button; }
+input, select, textarea { background: #fff; border: 1px solid;
+ border-color: #999 #ddd #ddd #999; margin: 0; padding: 1px;
+}
+textarea { font-family: "DejaVu Sans Mono",Monaco,monospace; font-size: 100%; }
+fieldset { border: none; font-size: 95%; margin: 0; padding: .2em 0 0; }
+fieldset legend { color: #666; font-weight: bold; padding: 0; }
+fieldset input, fieldset select { font-size: 95%; }
+fieldset p { margin: .4em; }
+
+/* Tabular listings */
+
+table.listing { border-collapse: separate; border-spacing: 0;
+ border: 1px solid #a7a7a7; clear: both; width: 100%;
+}
+table.listing caption { display: none; }
+table.listing th, table.listing td { padding: .2em .5em; }
+table.listing thead th { background: #dadada url(../image/thead.gif) repeat-x;
+ border: 1px solid #a7a7a7; border-width: 0 0 1px 1px; color: #333;
+ font-size: 95%; font-weight: normal; text-align: left;
+ text-shadow: #999 2px 1px 2px; text-transform: capitalize;
+ white-space: nowrap;
+}
+table.listing thead th:first-child { border-left: none; }
+table.listing thead th.key {
+ background: #a7afb6 url(../image/thead-key.gif) 0 0 repeat-x;
+ padding-top: 2px;
+}
+table.listing thead th.key div {
+ background: url(../image/order-asc.gif) 100% 3px no-repeat; cursor: pointer;
+}
+table.listing thead th.desc div {
+ background-image: url(../image/order-desc.gif);
+}
+table.listing tbody tr th, table.listing tbody tr td { background: #feffea; }
+table.listing tbody tr.odd th, table.listing tbody tr.odd td,
+table.listing tbody.odd tr th, table.listing tbody.odd tr td {
+ background: #fff;
+}
+table.listing tbody th, table.listing tbody td {
+ border-left: 1px solid #d9d9d9; padding: .4em .5em; vertical-align: top;
+}
+table.listing tbody th:first-child, table.listing tbody td:first-child {
+ border-left: none;
+}
+table.listing tbody th { text-align: left; }
+table.listing tbody th :link, table.listing tbody th :visited {
+ display: block;
+}
+table.listing tbody.content th button {
+ background: transparent no-repeat; border: none; cursor: pointer;
+ float: left; margin: 0 5px 0 -20px; padding: 0; width: 15px; height: 15px;
+}
+table.listing tbody.content th button:hover { background-position: -15px 0; }
+table.listing tbody.footer tr td { background: #e9e9e9;
+ border-top: 1px solid #a7a7a7; color: #999; font-size: 90%;
+ line-height: 1.8em;
+}
+table.listing tbody.footer #paging { float: right; }
+table.listing tbody.footer #paging a,
+table.listing tbody.footer #paging label {
+ padding: 0 .5em;
+}
+table.listing tbody.footer #paging label { color: #666; }
+table.listing tbody.footer #paging select { font-size: 90%; padding: 0; }
+
+/* Resizer grippies */
+
+div.grippie { background: #e9e9e9 url(../image/grippie.gif) 50% 50% no-repeat;
+ border: 1px solid #aaa; border-top: none; cursor: row-resize;
+ min-height: 10px;
+}
+
+/* Suggest results */
+
+ul.suggest-dropdown { border: 1px solid #999; background-color: #eee;
+ padding: 0; margin: 0; list-style: none; opacity: .85; position: absolute;
+ z-index: 10000; display: none; -webkit-box-shadow: 2px 2px 10px #333;
+}
+ul.suggest-dropdown li { padding: 2px 5px; white-space: nowrap; color: #101010;
+ text-align: left;
+}
+ul.suggest-dropdown li.selected { cursor: pointer; background: Highlight;
+ color: HighlightText;
+}
+
+/* Logo & Navigation */
+
+#wrap { padding: 0 20px 3em; }
+
+#logo { position: absolute; top: 20px; right: 20px; }
+
+#nav { color: #333; font-size: 110%; font-weight: bold; list-style: none;
+ margin: 0; overflow: auto; padding: 0; position: absolute; top: 185px;
+ bottom: 20px; right: 0; width: 210px;
+}
+#nav ul { list-style: none; margin: 0; padding: 0; }
+#nav li { color: #999; margin: 5px 0 0; padding: 3px 0; }
+#nav li span { padding: 0 20px; }
+#nav li.selected { background: #e9e9e9; }
+#nav li li { font-size: 90%; font-weight: normal; margin: 0;
+ padding: 2px 20px 2px 40px;
+}
+#nav li li:hover { background: #e4e4e4; }
+#nav li.selected li:hover { background: #d7d7d7; }
+#nav li li :link, #nav li li :visited { color: #333; display: block;
+ text-decoration: none;
+}
+#nav li li :link:hover, #nav li li :visited:hover { color: #000; }
+#nav li li :link:focus, #nav li li :visited:focus { outline: none; }
+#nav li li.selected { background: #aaa !important; border-top: 1px solid #999;
+ color: #fff; padding-top: 1px;
+}
+#nav li li.selected :link, #nav li li.selected :visited { color: #fff; }
+#nav li li.selected :link:hover, #nav li li.selected :visited:hover {
+ color: #fff;
+}
+
+#footer { background: #ddd; border-top: 1px solid #bbb; color: #000;
+ font-size: 80%; opacity: .7; padding: 5px 10px; position: absolute; right: 0;
+ bottom: 0; height: 10px; width: 190px; text-align: right;
+}
+#view { position: absolute; left: 0; right: 210px; top: 0; bottom: 0;
+ height: 100%;
+}
+
+/* Toolbar */
+
+#toolbar { font-size: 90%; line-height: 16px; list-style: none;
+ margin: 0 0 .5em; padding: 5px 5px 5px 3px;
+}
+#toolbar li { display: inline; }
+#toolbar button { background: transparent 2px 2px no-repeat; border: none;
+ color: #666; margin: 0; padding: 2px 1em 2px 22px; cursor: pointer;
+ font-size: 95%; font-weight: bold; line-height: 16px;
+}
+#toolbar button:hover { background-position: 2px -30px; color: #000; }
+#toolbar button:active { background-position: 2px -62px; color: #000; }
+#toolbar button.add { background-image: url(../image/add.gif); }
+#toolbar button.delete { background-image: url(../image/delete.gif); }
+#toolbar button.edit { background-image: url(../image/edit.gif); }
+#toolbar button.load { background-image: url(../image/load.gif); }
+#toolbar button.run { background-image: url(../image/run.gif); }
+#toolbar button.save { background-image: url(../image/save.gif); }
+
+/* Dialogs */
+
+#overlay { background: #bbb; cursor: wait; position: fixed; width: 100%;
+ height: 100%; top: 0; left: 0;
+}
+*html #overlay { position: absolute;
+ width: expression(document.body.clientWidth + 'px');
+ height: expression(document.body.clientHeight + 'px');
+}
+#dialog { background: #333 url(../image/spinner.gif) 50% 50% no-repeat;
+ color: #f4f4f4; overflow: hidden; opacity: .95; max-width: 33em;
+ padding: 1em 1em 0; -moz-border-radius: 7px; -webkit-border-radius: 7px;
+ -webkit-box-shadow: 4px 4px 6px #333;
+}
+*html #dialog { width: 33em; }
+#dialog.loading { width: 220px; height: 80px; }
+#dialog.loaded { background-image: none; }
+#dialog h2 { background: #666; border-top: 1px solid #555;
+ border-bottom: 1px solid #777; color: #ccc; font-size: 110%;
+ font-weight: bold; margin: 0 -2em; padding: .35em 2em;
+}
+#dialog fieldset { background: #222; border-top: 1px solid #111;
+ margin: 0 0 1em; padding: .5em 1em 1em;
+ -moz-border-radius-bottomleft: 7px; -moz-border-radius-bottomright: 7px;
+ -webkit-border-bottom-left-radius: 7px;
+ -webkit-border-bottom-right-radius: 7px;
+}
+#dialog p.help { color: #bbb; margin: 0 0 1em; }
+#dialog fieldset table { margin-top: 1em; }
+#dialog fieldset th, #dialog fieldset td { padding: .5em;
+ vertical-align: top;
+}
+#dialog fieldset th { color: #999; font-weight: bold;
+ text-align: right;
+}
+#dialog fieldset input { background: #e9e9e9; vertical-align: middle; }
+#dialog fieldset input.error { background: #f9e4e4; }
+#dialog fieldset div.error { padding-top: .3em; color: #b33; }
+#dialog .buttons { padding: 0 .5em .5em; text-align: right; }
+#dialog .buttons button { background: #444; border: 1px solid #aaa;
+ color: #ddd; cursor: pointer; font-size: 90%; font-weight: normal;
+ margin: 0 0 0 5px; padding: .2em 2em; -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
+#dialog .buttons button[type=submit] { font-weight: bold; }
+#dialog .buttons button:hover { background: #555; }
+#dialog .buttons button:active { background: #333; color: #fff; }
+
+/* View selector */
+
+#switch { color: #666; float: right; font-size: 90%; font-weight: bold;
+ line-height: 16px; padding: 5px;
+}
+#switch select { font-size: 90%; }
+
+/* View function editing */
+
+#viewcode { background: #fff; border: 1px solid;
+ border-color: #999 #ddd #ddd #999; margin: 0 0 1em; padding: 0 .5em;
+}
+#viewcode div { background-color: #e9e9e9; border: 1px solid;
+ border-color: #ddd #ddd #e9e9e9 #ddd; color: #333; margin: 0 -.5em;
+ padding: 0 .5em 2px;
+}
+#viewcode .top { color: #aaa; font-size: 95%; }
+#viewcode .top a { float: right; font-size: 90%; line-height: 1.4em;
+ padding: 2px 2px 0 0;
+}
+#viewcode .top a:link, #viewcode .top a:visited { color: #999; }
+#viewcode label { background: url(../image/twisty.gif) 0 3px no-repeat;
+ color: #666; cursor: pointer; display: block; padding: 2px 0 0 15px;
+}
+#viewcode div.bottom, #viewcode textarea { display: none; }
+#viewcode textarea { border: none; color: #333; max-width: 100%;
+ min-height: 50px; padding: .5em 0; width: 100%;
+}
+#viewcode div.bottom { border-bottom: none; padding: 1px 3px; }
+#viewcode div.bottom button { font-size: 90%; margin: 0 1em 0 0;
+ padding-left: 2em; padding-right: 2em;
+}
+*html #viewcode div.bottom button { padding: 0 .5em; }
+*+html #viewcode div.bottom button { padding: 0 .5em; }
+#viewcode div.bottom button.revert, #viewcode div.bottom button.save,
+#viewcode div.bottom button.saveas {
+ float: right; margin: 0 0 0 1em;
+}
+#viewcode div.bottom button.save { font-weight: bold; }
+#viewcode.expanded label { background-position: 0 -96px; }
+#viewcode.expanded textarea, #viewcode.expanded div.bottom { display: block; }
+
+/* Documents table */
+
+#documents thead th { width: 50%; }
+#documents tbody.content td { color: #666;
+ font: normal 11px "DejaVu Sans Mono",Monaco,monospace;
+}
+#documents tbody.content td.key { color: #333; }
+#documents tbody.content td.key a { display: block; }
+#documents tbody.content td.key em { font-style: normal; }
+#documents tbody.content td.key span.docid { color: #999;
+ font: normal 10px Arial,Helvetica,sans-serif;
+}
+#documents tbody.content td.value { font-size: 10px; }
+
+/* Document fields table */
+
+#fields { table-layout: fixed; }
+#fields col.field { width: 33%; }
+#fields tbody.content th { padding-left: 25px; padding-right: 48px; }
+#fields tbody.content th button {
+ background-image: url(../image/delete-mini.gif);
+}
+#fields tbody.content th b { display: block; padding: 2px; }
+#fields tbody.content td { padding-left: 14px; padding-right: 48px; }
+#fields tbody.content td code { color: #999; display: block; font-size: 11px;
+ padding: 2px;
+}
+#fields tbody.content td code.string { color: #393; }
+#fields tbody.content td code.number, #fields tbody.content td code.boolean {
+ color: #339;
+}
+#fields tbody.content td dl { margin: 0; padding: 0; }
+#fields tbody.content td dt {
+ background: transparent url(../image/toggle-collapse.gif) 0 3px no-repeat;
+ clear: left; cursor: pointer; line-height: 1em; margin-left: -12px;
+ padding-left: 14px;
+}
+#fields tbody.content td dd { line-height: 1em; margin: 0;
+ padding: 0 0 0 1em;
+}
+#fields tbody.content td dt.collapsed {
+ background-image: url(../image/toggle-expand.gif);
+}
+#fields tbody.content td dt.inline { background-image: none; cursor: default;
+ float: left; margin-left: 0; padding-left: 2px; padding-right: .5em;
+ padding-top: 2px;
+}
+#fields tbody.content input, #fields tbody.content textarea {
+ background: #fff; border: 1px solid; border-color: #999 #ddd #ddd #999;
+ margin: 0; padding: 1px; width: 100%;
+}
+#fields tbody.content th input { font-family: inherit; font-size: inherit;
+ font-weight: bold;
+}
+#fields tbody.content td input, #fields tbody.content td textarea {
+ font: 10px normal "DejaVu Sans Mono",Monaco,monospace;
+}
+#fields tbody.content input.invalid,
+#fields tbody.content textarea.invalid {
+ background: #f9f4f4; border-color: #b66 #ebb #ebb #b66;
+}
+#fields tbody.content div.tools { margin: 2px 2px 0; float: right;
+ margin-right: -45px;
+}
+#fields tbody.content div.tools button { background: transparent 0 0 no-repeat;
+ border: none; cursor: pointer; display: block; float: left; margin: 0 .2em;
+ width: 11px; height: 11px;
+}
+#fields tbody.content div.tools button:hover { background-position: 0 -22px; }
+#fields tbody.content div.tools button:active { background-position: 0 -44px; }
+#fields tbody.content div.tools button.apply {
+ background-image: url(../image/apply.gif);
+}
+#fields tbody.content div.tools button.cancel {
+ background-image: url(../image/cancel.gif);
+}
+#fields tbody.content div.error { color: #d33; }
+
+/* Test suite */
+
+#tests { table-layout: fixed; }
+#tests thead th.name { width: 20%; }
+#tests thead th.status { padding-left: 20px; width: 10em; }
+#tests thead th.duration { text-align: right; width: 7em; }
+#tests tbody.content th { cursor: help; padding-left: 25px;
+ white-space: nowrap;
+}
+#tests tbody.content th button {
+ background-image: url(../image/run-mini.gif);
+}
+#tests tbody.content td.duration { text-align: right; width: 6em; }
+#tests tbody.content td.status { background-position: 5px 8px;
+ background-repeat: no-repeat; color: #999; padding-left: 20px;
+}
+#tests tbody.content td.details { width: 50%; overflow: auto; }
+#tests tbody.content td.details a { border-bottom: 1px dashed #ccc;
+ color: #999; float: right; font-size: 85%;
+}
+#tests tbody.content td.details ol { color: #999; margin: 0;
+ padding: 0 0 0 1.5em;
+}
+#tests tbody.content td.details ol b { color: #333; font-weight: normal; }
+#tests tbody.content td.details ol code { color: #c00; font-size: 100%; }
+#tests tbody.content td.details ol code.error { white-space: pre; }
+#tests tbody.content td.running {
+ background-image: url(../image/running.gif); color: #333;
+}
+#tests tbody.content td.success {
+ background-image: url(../image/test_success.gif); color: #060;
+}
+#tests tbody.content td.error, #tests tbody.content td.failure {
+ background-image: url(../image/test_failure.gif); color: #c00;
+}
+
+/* Replication */
+
+form#replicator { background: #f4f4f4; border: 1px solid;
+ border-color: #999 #ccc #ccc #999; margin: .5em 1em 1.5em; padding: .5em;
+ -moz-border-radius: 7px; -webkit-border-radius: 7px;
+}
+form#replicator fieldset { float: left; padding: 1px; }
+form#replicator p.swap { float: left; margin: 2em 0 0; padding: 1px 1em; }
+form#replicator p.swap button { background: transparent; border: none;
+ color: #666; cursor: pointer; font-size: 150%;
+}
+form#replicator p.swap button:hover { color: #000; }
+form#replicator p.actions { padding: 1px; clear: left; margin: 0;
+ text-align: right;
+}
diff --git a/src/Makefile.am b/src/Makefile.am
new file mode 100644
index 00000000..7d156f74
--- /dev/null
+++ b/src/Makefile.am
@@ -0,0 +1,15 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+datarootdir = @prefix@/share
+
+SUBDIRS = couch_inets couchdb
diff --git a/src/couch_inets/Makefile.am b/src/couch_inets/Makefile.am
new file mode 100644
index 00000000..bd11093a
--- /dev/null
+++ b/src/couch_inets/Makefile.am
@@ -0,0 +1,177 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+datarootdir = @prefix@/share
+
+couchinetsebindir = $(erlanglibdir)/couch_inets-4.7.5/ebin
+couchinetsincludedir = $(erlanglibdir)/couch_inets-4.7.5/include
+
+couchinets_file_collection = \
+ ftp.erl \
+ ftp_progress.erl \
+ ftp_response.erl \
+ ftp_sup.erl \
+ http_base_64.erl \
+ httpc_handler.erl \
+ http_chunk.erl \
+ httpc_manager.erl \
+ http_cookie.erl \
+ httpc_request.erl \
+ httpc_response.erl \
+ httpc_sup.erl \
+ httpd_acceptor.erl \
+ httpd_acceptor_sup.erl \
+ httpd_cgi.erl \
+ httpd_conf.erl \
+ httpd.erl \
+ httpd_esi.erl \
+ httpd_example.erl \
+ httpd_instance_sup.erl \
+ httpd_manager.erl \
+ httpd_misc_sup.erl \
+ httpd_request.erl \
+ httpd_request_handler.erl \
+ httpd_response.erl \
+ httpd_script_env.erl \
+ httpd_socket.erl \
+ httpd_sup.erl \
+ httpd_util.erl \
+ http.erl \
+ http_request.erl \
+ http_response.erl \
+ http_transport.erl \
+ http_uri.erl \
+ http_util.erl \
+ inets_app.erl \
+ inets.erl \
+ inets_sup.erl \
+ mod_actions.erl \
+ mod_alias.erl \
+ mod_auth_dets.erl \
+ mod_auth.erl \
+ mod_auth_mnesia.erl \
+ mod_auth_plain.erl \
+ mod_auth_server.erl \
+ mod_browser.erl \
+ mod_cgi.erl \
+ mod_dir.erl \
+ mod_disk_log.erl \
+ mod_esi.erl \
+ mod_get.erl \
+ mod_head.erl \
+ mod_htaccess.erl \
+ mod_include.erl \
+ mod_log.erl \
+ mod_range.erl \
+ mod_responsecontrol.erl \
+ mod_security.erl \
+ mod_security_server.erl \
+ mod_trace.erl \
+ tftp_binary.erl \
+ tftp_engine.erl \
+ tftp.erl \
+ tftp_file.erl \
+ tftp_lib.erl \
+ tftp_sup.erl
+
+couchinetsebin_static_file = couch_inets.app
+
+couchinetsebin_generated_file_list = \
+ ftp.beam \
+ ftp_progress.beam \
+ ftp_response.beam \
+ ftp_sup.beam \
+ http_base_64.beam \
+ http.beam \
+ httpc_handler.beam \
+ http_chunk.beam \
+ httpc_manager.beam \
+ http_cookie.beam \
+ httpc_request.beam \
+ httpc_response.beam \
+ httpc_sup.beam \
+ httpd_acceptor.beam \
+ httpd_acceptor_sup.beam \
+ httpd.beam \
+ httpd_cgi.beam \
+ httpd_conf.beam \
+ httpd_esi.beam \
+ httpd_example.beam \
+ httpd_instance_sup.beam \
+ httpd_manager.beam \
+ httpd_misc_sup.beam \
+ httpd_request.beam \
+ httpd_request_handler.beam \
+ httpd_response.beam \
+ httpd_script_env.beam \
+ httpd_socket.beam \
+ httpd_sup.beam \
+ httpd_util.beam \
+ http_request.beam \
+ http_response.beam \
+ http_transport.beam \
+ http_uri.beam \
+ http_util.beam \
+ inets_app.beam \
+ inets.beam \
+ inets_sup.beam \
+ mod_actions.beam \
+ mod_alias.beam \
+ mod_auth.beam \
+ mod_auth_dets.beam \
+ mod_auth_mnesia.beam \
+ mod_auth_plain.beam \
+ mod_auth_server.beam \
+ mod_browser.beam \
+ mod_cgi.beam \
+ mod_dir.beam \
+ mod_disk_log.beam \
+ mod_esi.beam \
+ mod_get.beam \
+ mod_head.beam \
+ mod_htaccess.beam \
+ mod_include.beam \
+ mod_log.beam \
+ mod_range.beam \
+ mod_responsecontrol.beam \
+ mod_security.beam \
+ mod_security_server.beam \
+ mod_trace.beam \
+ tftp.beam \
+ tftp_binary.beam \
+ tftp_engine.beam \
+ tftp_file.beam \
+ tftp_lib.beam \
+ tftp_sup.beam
+
+couchinetsinclude_DATA = \
+ ftp_internal.hrl \
+ httpc_internal.hrl \
+ httpd.hrl \
+ http_internal.hrl \
+ inets_internal.hrl \
+ mod_auth.hrl \
+ tftp.hrl
+
+couchinetsebin_DATA = \
+ $(couchinetsebin_static_file) \
+ $(couchinetsebin_generated_file_list)
+
+EXTRA_DIST = \
+ $(couchinets_file_collection) \
+ $(couchinetsinclude_DATA) \
+ $(couchinetsebin_static_file)
+
+CLEANFILES = $(couchinetsebin_generated_file_list)
+
+%.beam: %.erl
+ erlc $<
diff --git a/src/couch_inets/couch_inets.app b/src/couch_inets/couch_inets.app
new file mode 100644
index 00000000..cdbd0f9b
--- /dev/null
+++ b/src/couch_inets/couch_inets.app
@@ -0,0 +1,84 @@
+{application,couch_inets,
+ [{description,"Couch INETS"},
+ {vsn,"4.7.5"},
+ {modules,[
+ inets,
+ inets_sup,
+ inets_app,
+
+ %% FTP
+ ftp,
+ ftp_progress,
+ ftp_response,
+ ftp_sup,
+
+ %% HTTP client:
+ http,
+ httpc_handler,
+ httpc_manager,
+ httpc_request,
+ httpc_response,
+ httpc_sup,
+ http_cookie,
+ http_uri,
+
+ %% HTTP used by both client and server
+ http_base_64,
+ http_chunk,
+ http_request,
+ http_response,
+ http_transport,
+ http_util,
+
+ %% HTTP server:
+ httpd,
+ httpd_acceptor,
+ httpd_acceptor_sup,
+ httpd_cgi,
+ httpd_conf,
+ httpd_esi,
+ httpd_example,
+ httpd_instance_sup,
+ httpd_manager,
+ httpd_misc_sup,
+ httpd_request,
+ httpd_request_handler,
+ httpd_response,
+ httpd_script_env,
+ httpd_socket,
+ httpd_sup,
+ httpd_util,
+ mod_actions,
+ mod_alias,
+ mod_auth,
+ mod_auth_dets,
+ mod_auth_mnesia,
+ mod_auth_plain,
+ mod_auth_server,
+ mod_browser,
+ mod_cgi,
+ mod_dir,
+ mod_disk_log,
+ mod_esi,
+ mod_get,
+ mod_head,
+ mod_htaccess,
+ mod_include,
+ mod_log,
+ mod_range,
+ mod_responsecontrol,
+ mod_security,
+ mod_security_server,
+ mod_trace,
+
+ %% TFTP
+ tftp,
+ tftp_binary,
+ tftp_engine,
+ tftp_file,
+ tftp_lib,
+ tftp_sup
+ ]},
+ {registered,[inets_sup, httpc_manager]},
+ {applications,[kernel,stdlib]},
+ {mod,{inets_app,[]}}]}.
diff --git a/src/couch_inets/ftp.erl b/src/couch_inets/ftp.erl
new file mode 100644
index 00000000..92483943
--- /dev/null
+++ b/src/couch_inets/ftp.erl
@@ -0,0 +1,1597 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%% Description: This module implements an ftp client, RFC 959.
+%% It also supports ipv6 RFC 2428.
+
+-module(ftp).
+
+-behaviour(gen_server).
+
+%% API - Client interface
+-export([cd/2, close/1, delete/2, formaterror/1,
+ lcd/2, lpwd/1, ls/1, ls/2,
+ mkdir/2, nlist/1, nlist/2,
+ open/1, open/2, open/3, force_active/1,
+ pwd/1, quote/2,
+ recv/2, recv/3, recv_bin/2,
+ recv_chunk_start/2, recv_chunk/1,
+ rename/3, rmdir/2,
+ send/2, send/3, send_bin/3,
+ send_chunk_start/2, send_chunk/2, send_chunk_end/1,
+ type/2, user/3, user/4, account/2,
+ append/3, append/2, append_bin/3,
+ append_chunk/2, append_chunk_end/1, append_chunk_start/2]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2,
+ handle_info/2, terminate/2, code_change/3]).
+
+%% supervisor callbacks
+-export([start_link_sup/1]).
+
+-include("ftp_internal.hrl").
+
+%% Constante used in internal state definition
+-define(CONNECTION_TIMEOUT, 60*1000).
+-define(DEFAULT_MODE, passive).
+
+%% Internal Constants
+-define(FTP_PORT, 21).
+-define(FILE_BUFSIZE, 4096).
+
+%% Internal state
+-record(state, {
+ csock = undefined, % socket() - Control connection socket
+ dsock = undefined, % socket() - Data connection socket
+ verbose = false, % boolean()
+ ldir = undefined, % string() - Current local directory
+ type = ftp_server_default, % atom() - binary | ascii
+ chunk = false, % boolean() - Receiving data chunks
+ mode = ?DEFAULT_MODE, % passive | active
+ timeout = ?CONNECTION_TIMEOUT, % integer()
+ %% Data received so far on the data connection
+ data = <<>>, % binary()
+ %% Data received so far on the control connection
+ %% {BinStream, AccLines}. If a binary sequence
+ %% ends with ?CR then keep it in the binary to
+ %% be able to detect if the next received byte is ?LF
+ %% and hence the end of the response is reached!
+ ctrl_data = {<<>>, [], start}, % {binary(), [bytes()], LineStatus}
+ %% pid() - Client pid (note not the same as "From")
+ owner = undefined,
+ client = undefined, % "From" to be used in gen_server:reply/2
+ %% Function that activated a connection and maybe some
+ %% data needed further on.
+ caller = undefined, % term()
+ ip_v6_disabled, % boolean()
+ progress = ignore % ignore | pid()
+ }).
+
+%%%=========================================================================
+%%% API - CLIENT FUNCTIONS
+%%%=========================================================================
+%%--------------------------------------------------------------------------
+%% open(Host, <Port>, <Flags>) -> {ok, Pid} | {error, ehost}
+%% Host = string(),
+%% Port = integer(),
+%% Flags = [Flag],
+%% Flag = verbose | debug | trace
+%%
+%% Description: Start an ftp client and connect to a host.
+%%--------------------------------------------------------------------------
+%% The only option was the host in textual form
+open({option_list, Options})->
+ ensure_started(),
+ Flags = key_search(flags, Options, []),
+ {ok, Pid} = ftp_sup:start_child([[[{client, self()}, Flags], []]]),
+ call(Pid, {open, ip_comm, Options}, pid);
+
+%% The only option was the tuple form of the ip-number
+open(Host) when tuple(Host) ->
+ open(Host, ?FTP_PORT, []);
+
+%% Host is the string form of the hostname
+open(Host)->
+ open(Host, ?FTP_PORT, []).
+
+open(Host, Port) when integer(Port) ->
+ open(Host, Port, []);
+
+open(Host, Flags) when list(Flags) ->
+ open(Host, ?FTP_PORT, Flags).
+
+open(Host, Port, Flags) when integer(Port), list(Flags) ->
+ ensure_started(),
+ {ok, Pid} = ftp_sup:start_child([[[{client, self()}, Flags], []]]),
+ Opts = [{host, Host}, {port, Port}| Flags],
+ call(Pid, {open, ip_comm, Opts}, pid).
+
+%%--------------------------------------------------------------------------
+%% user(Pid, User, Pass, <Acc>) -> ok | {error, euser} | {error, econn}
+%% | {error, eacct}
+%% Pid = pid(),
+%% User = Pass = Acc = string()
+%%
+%% Description: Login with or without a supplied account name.
+%%--------------------------------------------------------------------------
+user(Pid, User, Pass) ->
+ call(Pid, {user, User, Pass}, atom).
+
+user(Pid, User, Pass, Acc) ->
+ call(Pid, {user, User, Pass, Acc}, atom).
+
+%%--------------------------------------------------------------------------
+%% account(Pid, Acc) -> ok | {error, eacct}
+%% Pid = pid()
+%% Acc= string()
+%%
+%% Description: Set a user Account.
+%%--------------------------------------------------------------------------
+account(Pid, Acc) ->
+ call(Pid, {account, Acc}, atom).
+
+%%--------------------------------------------------------------------------
+%% pwd(Pid) -> {ok, Dir} | {error, elogin} | {error, econn}
+%% Pid = pid()
+%% Dir = string()
+%%
+%% Description: Get the current working directory at remote server.
+%%--------------------------------------------------------------------------
+pwd(Pid) ->
+ call(Pid, pwd, ctrl).
+
+%%--------------------------------------------------------------------------
+%% lpwd(Pid) -> {ok, Dir} | {error, elogin}
+%% Pid = pid()
+%% Dir = string()
+%%
+%% Description: Get the current working directory at local server.
+%%--------------------------------------------------------------------------
+lpwd(Pid) ->
+ call(Pid, lpwd, string).
+
+%%--------------------------------------------------------------------------
+%% cd(Pid, Dir) -> ok | {error, epath} | {error, elogin} | {error, econn}
+%% Pid = pid()
+%% Dir = string()
+%%
+%% Description: Change current working directory at remote server.
+%%--------------------------------------------------------------------------
+cd(Pid, Dir) ->
+ call(Pid, {cd, Dir}, atom).
+
+%%--------------------------------------------------------------------------
+%% lcd(Pid, Dir) -> ok | {error, epath}
+%% Pid = pid()
+%% Dir = string()
+%%
+%% Description: Change current working directory for the local client.
+%%--------------------------------------------------------------------------
+lcd(Pid, Dir) ->
+ call(Pid, {lcd, Dir}, string).
+
+%%--------------------------------------------------------------------------
+%% ls(Pid, <Dir>) -> {ok, Listing} | {error, epath} | {error, elogin} |
+%% {error, econn}
+%% Pid = pid()
+%% Dir = string()
+%% Listing = string()
+%%
+%% Description: List the contents of current directory (ls/1) or
+%% directory Dir (ls/2) at remote server.
+%%--------------------------------------------------------------------------
+ls(Pid) ->
+ ls(Pid, "").
+ls(Pid, Dir) ->
+ call(Pid, {dir, long, Dir}, string).
+
+%%--------------------------------------------------------------------------
+%% nlist(Pid, <Dir>) -> {ok, Listing} | {error, epath} | {error, elogin} |
+%% {error, econn}
+%% Pid = pid()
+%% Dir = string()
+%%
+%% Description: List the contents of current directory (ls/1) or directory
+%% Dir (ls/2) at remote server. The returned list is a stream
+%% of file names.
+%%--------------------------------------------------------------------------
+nlist(Pid) ->
+ nlist(Pid, "").
+nlist(Pid, Dir) ->
+ call(Pid, {dir, short, Dir}, string).
+
+%%--------------------------------------------------------------------------
+%% rename(Pid, CurrFile, NewFile) -> ok | {error, epath} | {error, elogin}
+%% | {error, econn}
+%% Pid = pid()
+%% CurrFile = NewFile = string()
+%%
+%% Description: Rename a file at remote server.
+%%--------------------------------------------------------------------------
+rename(Pid, CurrFile, NewFile) ->
+ call(Pid, {rename, CurrFile, NewFile}, string).
+
+%%--------------------------------------------------------------------------
+%% delete(Pid, File) -> ok | {error, epath} | {error, elogin} |
+%% {error, econn}
+%% Pid = pid()
+%% File = string()
+%%
+%% Description: Remove file at remote server.
+%%--------------------------------------------------------------------------
+delete(Pid, File) ->
+ call(Pid, {delete, File}, string).
+
+%%--------------------------------------------------------------------------
+%% mkdir(Pid, Dir) -> ok | {error, epath} | {error, elogin} | {error, econn}
+%% Pid = pid(),
+%% Dir = string()
+%%
+%% Description: Make directory at remote server.
+%%--------------------------------------------------------------------------
+mkdir(Pid, Dir) ->
+ call(Pid, {mkdir, Dir}, atom).
+
+%%--------------------------------------------------------------------------
+%% rmdir(Pid, Dir) -> ok | {error, epath} | {error, elogin} | {error, econn}
+%% Pid = pid(),
+%% Dir = string()
+%%
+%% Description: Remove directory at remote server.
+%%--------------------------------------------------------------------------
+rmdir(Pid, Dir) ->
+ call(Pid, {rmdir, Dir}, atom).
+
+%%--------------------------------------------------------------------------
+%% type(Pid, Type) -> ok | {error, etype} | {error, elogin} | {error, econn}
+%% Pid = pid()
+%% Type = ascii | binary
+%%
+%% Description: Set transfer type.
+%%--------------------------------------------------------------------------
+type(Pid, Type) ->
+ call(Pid, {type, Type}, atom).
+
+%%--------------------------------------------------------------------------
+%% recv(Pid, RemoteFileName <LocalFileName>) -> ok | {error, epath} |
+%% {error, elogin} | {error, econn}
+%% Pid = pid()
+%% RemoteFileName = LocalFileName = string()
+%%
+%% Description: Transfer file from remote server.
+%%--------------------------------------------------------------------------
+recv(Pid, RemotFileName) ->
+ recv(Pid, RemotFileName, RemotFileName).
+
+recv(Pid, RemotFileName, LocalFileName) ->
+ call(Pid, {recv, RemotFileName, LocalFileName}, atom).
+
+%%--------------------------------------------------------------------------
+%% recv_bin(Pid, RemoteFile) -> {ok, Bin} | {error, epath} | {error, elogin}
+%% | {error, econn}
+%% Pid = pid()
+%% RemoteFile = string()
+%% Bin = binary()
+%%
+%% Description: Transfer file from remote server into binary.
+%%--------------------------------------------------------------------------
+recv_bin(Pid, RemoteFile) ->
+ call(Pid, {recv_bin, RemoteFile}, bin).
+
+%%--------------------------------------------------------------------------
+%% recv_chunk_start(Pid, RemoteFile) -> ok | {error, elogin} | {error, epath}
+%% | {error, econn}
+%% Pid = pid()
+%% RemoteFile = string()
+%%
+%% Description: Start receive of chunks of remote file.
+%%--------------------------------------------------------------------------
+recv_chunk_start(Pid, RemoteFile) ->
+ call(Pid, {recv_chunk_start, RemoteFile}, atom).
+
+%%--------------------------------------------------------------------------
+%% recv_chunk(Pid, RemoteFile) -> ok | {ok, Bin} | {error, Reason}
+%% Pid = pid()
+%% RemoteFile = string()
+%%
+%% Description: Transfer file from remote server into binary in chunks
+%%--------------------------------------------------------------------------
+recv_chunk(Pid) ->
+ call(Pid, recv_chunk, atom).
+
+%%--------------------------------------------------------------------------
+%% send(Pid, LocalFileName <RemotFileName>) -> ok | {error, epath}
+%% | {error, elogin}
+%% | {error, econn}
+%% Pid = pid()
+%% LocalFileName = RemotFileName = string()
+%%
+%% Description: Transfer file to remote server.
+%%--------------------------------------------------------------------------
+send(Pid, LocalFileName) ->
+ send(Pid, LocalFileName, LocalFileName).
+
+send(Pid, LocalFileName, RemotFileName) ->
+ call(Pid, {send, LocalFileName, RemotFileName}, atom).
+
+%%--------------------------------------------------------------------------
+%% send_bin(Pid, Bin, RemoteFile) -> ok | {error, epath} | {error, elogin}
+%% | {error, enotbinary} | {error, econn}
+%% Pid = pid()
+%% Bin = binary()
+%% RemoteFile = string()
+%%
+%% Description: Transfer a binary to a remote file.
+%%--------------------------------------------------------------------------
+send_bin(Pid, Bin, RemoteFile) when binary(Bin) ->
+ call(Pid, {send_bin, Bin, RemoteFile}, atom);
+send_bin(_Pid, _Bin, _RemoteFile) ->
+ {error, enotbinary}.
+
+%%--------------------------------------------------------------------------
+%% send_chunk_start(Pid, RemoteFile) -> ok | {error, elogin} | {error, epath}
+%% | {error, econn}
+%% Pid = pid()
+%% RemoteFile = string()
+%%
+%% Description: Start transfer of chunks to remote file.
+%%--------------------------------------------------------------------------
+send_chunk_start(Pid, RemoteFile) ->
+ call(Pid, {send_chunk_start, RemoteFile}, atom).
+
+%%--------------------------------------------------------------------------
+%% append_chunk_start(Pid, RemoteFile) -> ok | {error, elogin} |
+%% {error, epath} | {error, econn}
+%% Pid = pid()
+%% RemoteFile = string()
+%%
+%% Description: Start append chunks of data to remote file.
+%%--------------------------------------------------------------------------
+append_chunk_start(Pid, RemoteFile) ->
+ call(Pid, {append_chunk_start, RemoteFile}, atom).
+
+%%--------------------------------------------------------------------------
+%% send_chunk(Pid, Bin) -> ok | {error, elogin} | {error, enotbinary}
+%% | {error, echunk} | {error, econn}
+%% Pid = pid()
+%% Bin = binary().
+%%
+%% Purpose: Send chunk to remote file.
+%%--------------------------------------------------------------------------
+send_chunk(Pid, Bin) when binary(Bin) ->
+ call(Pid, {transfer_chunk, Bin}, atom);
+send_chunk(_Pid, _Bin) ->
+ {error, enotbinary}.
+
+%%--------------------------------------------------------------------------
+%% append_chunk(Pid, Bin) -> ok | {error, elogin} | {error, enotbinary}
+%% | {error, echunk} | {error, econn}
+%% Pid = pid()
+%% Bin = binary()
+%%
+%% Description: Append chunk to remote file.
+%%--------------------------------------------------------------------------
+append_chunk(Pid, Bin) when binary(Bin) ->
+ call(Pid, {transfer_chunk, Bin}, atom);
+append_chunk(_Pid, _Bin) ->
+ {error, enotbinary}.
+
+%%--------------------------------------------------------------------------
+%% send_chunk_end(Pid) -> ok | {error, elogin} | {error, echunk}
+%% | {error, econn}
+%% Pid = pid()
+%%
+%% Description: End sending of chunks to remote file.
+%%--------------------------------------------------------------------------
+send_chunk_end(Pid) ->
+ call(Pid, chunk_end, atom).
+
+%%--------------------------------------------------------------------------
+%% append_chunk_end(Pid) -> ok | {error, elogin} | {error, echunk}
+%% | {error, econn}
+%% Pid = pid()
+%%
+%% Description: End appending of chunks to remote file.
+%%--------------------------------------------------------------------------
+append_chunk_end(Pid) ->
+ call(Pid, chunk_end, atom).
+
+%%--------------------------------------------------------------------------
+%% append(Pid, LocalFileName, RemotFileName) -> ok | {error, epath}
+%% | {error, elogin} | {error, econn}
+%% Pid = pid()
+%% LocalFileName = RemotFileName = string()
+%%
+%% Description: Append the local file to the remote file
+%%--------------------------------------------------------------------------
+append(Pid, LocalFileName) ->
+ append(Pid, LocalFileName, LocalFileName).
+
+append(Pid, LocalFileName, RemotFileName) ->
+ call(Pid, {append, LocalFileName, RemotFileName}, atom).
+
+%%--------------------------------------------------------------------------
+%% append_bin(Pid, Bin, RemoteFile) -> ok | {error, epath} | {error, elogin}
+%% | {error, enotbinary} | {error, econn}
+%% Pid = pid()
+%% Bin = binary()
+%% RemoteFile = string()
+%%
+%% Purpose: Append a binary to a remote file.
+%%--------------------------------------------------------------------------
+append_bin(Pid, Bin, RemoteFile) when binary(Bin) ->
+ call(Pid, {append_bin, Bin, RemoteFile}, atom);
+append_bin(_Pid, _Bin, _RemoteFile) ->
+ {error, enotbinary}.
+
+%%--------------------------------------------------------------------------
+%% quote(Pid, Cmd) -> ok
+%% Pid = pid()
+%% Cmd = string()
+%%
+%% Description: Send arbitrary ftp command.
+%%--------------------------------------------------------------------------
+quote(Pid, Cmd) when list(Cmd) ->
+ call(Pid, {quote, Cmd}, atom).
+
+%%--------------------------------------------------------------------------
+%% close(Pid) -> ok
+%% Pid = pid()
+%%
+%% Description: End the ftp session.
+%%--------------------------------------------------------------------------
+close(Pid) ->
+ cast(Pid, close),
+ ok.
+
+%%--------------------------------------------------------------------------
+%% force_active(Pid) -> ok
+%% Pid = pid()
+%%
+%% Description: Force connection to use active mode.
+%%--------------------------------------------------------------------------
+force_active(Pid) ->
+ error_logger:info_report("This function is deprecated use the mode flag "
+ "to open/[1,2,3] instead", []),
+ call(Pid, force_active, atom).
+
+%%--------------------------------------------------------------------------
+%% formaterror(Tag) -> string()
+%% Tag = atom() | {error, atom()}
+%%
+%% Description: Return diagnostics.
+%%--------------------------------------------------------------------------
+formaterror(Tag) ->
+ ftp_response:error_string(Tag).
+
+%%%========================================================================
+%%% gen_server callback functions
+%%%========================================================================
+
+%%-------------------------------------------------------------------------
+%% init(Args) -> {ok, State} | {ok, State, Timeout} | {stop, Reason}
+%% Description: Initiates the erlang process that manages a ftp connection.
+%%-------------------------------------------------------------------------
+init([{client, ClientPid}, Flags]) ->
+ process_flag(trap_exit, true),
+ erlang:monitor(process, ClientPid),
+ inet_db:start(),
+ {ok, LDir} = file:get_cwd(),
+ State = case is_debug(Flags) or is_trace(Flags) of
+ true ->
+ dbg:tracer(),
+ dbg:p(all, [call]),
+ case is_debug(Flags) of
+ true ->
+ dbg:tp(ftp, [{'_', [], [{return_trace}]}]),
+ dbg:tp(ftp_response, [{'_', [],
+ [{return_trace}]}]),
+ dbg:tp(ftp_progress, [{'_', [],
+ [{return_trace}]}]);
+ false -> %trace
+ dbg:tpl(ftp, [{'_', [], [{return_trace}]}]),
+ dbg:tpl(ftp_response, [{'_', [],
+ [{return_trace}]}]),
+ dbg:tpl(ftp_progress, [{'_', [],
+ [{return_trace}]}])
+ end,
+ #state{ldir = LDir};
+ false ->
+ case is_verbose(Flags) of
+ true ->
+ #state{verbose = true, ldir = LDir};
+ false ->
+ #state{ldir = LDir}
+ end
+ end,
+ process_flag(priority, low),
+ {ok, State#state{owner = ClientPid,
+ ip_v6_disabled = is_ipv6_disabled(Flags)}}.
+
+
+%%--------------------------------------------------------------------------
+%% handle_call(Request, From, State) -> {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} |
+%% Description: Handle incoming requests.
+%%-------------------------------------------------------------------------
+handle_call({Pid, _}, _, #state{owner = Owner} = State) when Owner =/= Pid ->
+ {reply, {error, not_connection_owner}, State};
+
+handle_call({_, {open, ip_comm, Opts}}, From, State) ->
+ case key_search(host, Opts, undefined) of
+ undefined ->
+ {stop, normal, {error, ehost}, State};
+ Host ->
+ IsPosInt = fun(Int) when is_integer(Int), Int > 0 ->
+ true;
+ (_) ->
+ false
+ end,
+
+ IsModeAtom = fun(active) ->
+ true;
+ (passive) ->
+ true;
+ (_) ->
+ false
+ end,
+
+ Mode = check_option(IsModeAtom,
+ key_search(mode, Opts, ?DEFAULT_MODE),
+ ?DEFAULT_MODE),
+ Port = check_option(IsPosInt, key_search(port, Opts, ?FTP_PORT),
+ ?FTP_PORT),
+ Timeout = check_option(IsPosInt, key_search(timeout, Opts,
+ ?CONNECTION_TIMEOUT),
+ ?CONNECTION_TIMEOUT),
+ ProgressOptions = key_search(progress, Opts, ignore),
+
+ setup_ctrl_connection(Host, Port, Timeout,
+ State#state{client = From, mode = Mode,
+ progress =
+ progress(ProgressOptions)})
+ end;
+
+handle_call({_, force_active}, _, State) ->
+ {reply, ok, State#state{mode = active}};
+
+handle_call({_, {user, User, Password}}, From, State) ->
+ handle_user(User, Password, "", State#state{client = From});
+
+handle_call({_, {user, User, Password, Acc}}, From, State) ->
+ handle_user(User, Password, Acc, State#state{client = From});
+
+handle_call({_, {account, Acc}}, From, State)->
+ handle_user_account(Acc, State#state{client = From});
+
+handle_call({_, pwd}, From, #state{chunk = false} = State) ->
+ send_ctrl_message(State, mk_cmd("PWD", [])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{client = From, caller = pwd}};
+
+handle_call({_, lpwd}, From, #state{ldir = LDir} = State) ->
+ {reply, {ok, LDir}, State#state{client = From}};
+
+handle_call({_, {cd, Dir}}, From, #state{chunk = false}
+ = State) ->
+ send_ctrl_message(State, mk_cmd("CWD ~s", [Dir])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{client = From, caller = cd}};
+
+handle_call({_,{lcd, Dir}}, _From, #state{ldir = LDir0} = State) ->
+ LDir = filename:absname(Dir, LDir0),
+ case file:read_file_info(LDir) of %% FIX better check that LDir is a dir.
+ {ok, _ } ->
+ {reply, ok, State#state{ldir = LDir}};
+ _ ->
+ {reply, {error, epath}, State}
+ end;
+
+handle_call({_, {dir, Len, Dir}}, {_Pid, _} = From,
+ #state{chunk = false} = State) ->
+ setup_data_connection(State#state{caller = {dir, Dir, Len},
+ client = From});
+handle_call({_, {rename, CurrFile, NewFile}}, From,
+ #state{chunk = false} = State) ->
+ send_ctrl_message(State, mk_cmd("RNFR ~s", [CurrFile])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {rename, NewFile}, client = From}};
+
+handle_call({_, {delete, File}}, {_Pid, _} = From,
+ #state{chunk = false} = State) ->
+ send_ctrl_message(State, mk_cmd("DELE ~s", [File])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{client = From}};
+
+handle_call({_, {mkdir, Dir}}, From, #state{chunk = false} = State) ->
+ send_ctrl_message(State, mk_cmd("MKD ~s", [Dir])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{client = From}};
+
+handle_call({_,{rmdir, Dir}}, From, #state{chunk = false} = State) ->
+ send_ctrl_message(State, mk_cmd("RMD ~s", [Dir])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{client = From}};
+
+handle_call({_,{type, Type}}, From, #state{chunk = false}
+ = State) ->
+ case Type of
+ ascii ->
+ send_ctrl_message(State, mk_cmd("TYPE A", [])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = type, type = ascii,
+ client = From}};
+ binary ->
+ send_ctrl_message(State, mk_cmd("TYPE I", [])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = type, type = binary,
+ client = From}};
+ _ ->
+ {reply, {error, etype}, State}
+ end;
+
+handle_call({_,{recv, RemoteFile, LocalFile}}, From,
+ #state{chunk = false, ldir = LocalDir} = State) ->
+ progress_report({remote_file, RemoteFile}, State),
+ NewLocalFile = filename:absname(LocalFile, LocalDir),
+
+ case file_open(NewLocalFile, write) of
+ {ok, Fd} ->
+ setup_data_connection(State#state{client = From,
+ caller =
+ {recv_file,
+ RemoteFile, Fd}});
+ {error, _What} ->
+ {reply, {error, epath}, State}
+ end;
+
+handle_call({_, {recv_bin, RemoteFile}}, From, #state{chunk = false} =
+ State) ->
+ setup_data_connection(State#state{caller = {recv_bin, RemoteFile},
+ client = From});
+
+handle_call({_,{recv_chunk_start, RemoteFile}}, From, #state{chunk = false}
+ = State) ->
+ setup_data_connection(State#state{caller = {start_chunk_transfer,
+ "RETR", RemoteFile},
+ client = From});
+
+handle_call({_, recv_chunk}, _, #state{chunk = false} = State) ->
+ {reply, {error, "ftp:recv_chunk_start/2 not called"}, State};
+
+handle_call({_, recv_chunk}, From, #state{chunk = true} = State) ->
+ activate_data_connection(State),
+ {noreply, State#state{client = From, caller = recv_chunk}};
+
+handle_call({_, {send, LocalFile, RemoteFile}}, From,
+ #state{chunk = false, ldir = LocalDir} = State) ->
+ progress_report({local_file, filename:absname(LocalFile, LocalDir)},
+ State),
+ setup_data_connection(State#state{caller = {transfer_file,
+ {"STOR",
+ LocalFile, RemoteFile}},
+ client = From});
+handle_call({_, {append, LocalFile, RemoteFile}}, From,
+ #state{chunk = false} = State) ->
+ setup_data_connection(State#state{caller = {transfer_file,
+ {"APPE",
+ LocalFile, RemoteFile}},
+ client = From});
+handle_call({_, {send_bin, Bin, RemoteFile}}, From,
+ #state{chunk = false} = State) ->
+ setup_data_connection(State#state{caller = {transfer_data,
+ {"STOR", Bin, RemoteFile}},
+ client = From});
+handle_call({_,{append_bin, Bin, RemoteFile}}, From,
+ #state{chunk = false} = State) ->
+ setup_data_connection(State#state{caller = {transfer_data,
+ {"APPE", Bin, RemoteFile}},
+ client = From});
+handle_call({_, {send_chunk_start, RemoteFile}}, From, #state{chunk = false}
+ = State) ->
+ setup_data_connection(State#state{caller = {start_chunk_transfer,
+ "STOR", RemoteFile},
+ client = From});
+handle_call({_, {append_chunk_start, RemoteFile}}, From, #state{chunk = false}
+ = State) ->
+ setup_data_connection(State#state{caller = {start_chunk_transfer,
+ "APPE", RemoteFile},
+ client = From});
+handle_call({_, {transfer_chunk, Bin}}, _, #state{chunk = true} = State) ->
+ send_data_message(State, Bin),
+ {reply, ok, State};
+
+handle_call({_, chunk_end}, From, #state{chunk = true} = State) ->
+ close_data_connection(State),
+ activate_ctrl_connection(State),
+ {noreply, State#state{client = From, dsock = undefined,
+ caller = end_chunk_transfer, chunk = false}};
+
+handle_call({_, {quote, Cmd}}, From, #state{chunk = false} = State) ->
+ send_ctrl_message(State, mk_cmd(Cmd, [])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{client = From, caller = quote}};
+
+handle_call(_, _, #state{chunk = true} = State) ->
+ {reply, {error, echunk}, State};
+
+%% Catch all - This can only happen if the application programmer writes
+%% really bad code that violates the API.
+handle_call(Request, _Timeout, State) ->
+ {stop, {'API_violation_connection_closed', Request},
+ {error, {connection_terminated, 'API_violation'}}, State}.
+
+%%--------------------------------------------------------------------------
+%% handle_cast(Request, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State}
+%% Description: Handles cast messages.
+%%-------------------------------------------------------------------------
+handle_cast({Pid, close}, #state{owner = Pid} = State) ->
+ send_ctrl_message(State, mk_cmd("QUIT", [])),
+ close_ctrl_connection(State),
+ close_data_connection(State),
+ {stop, normal, State#state{csock = undefined, dsock = undefined}};
+
+handle_cast({Pid, close}, State) ->
+ error_logger:info_report("A none owner process ~p tried to close an "
+ "ftp connection: ~n", [Pid]),
+ {noreply, State};
+
+%% Catch all - This can oly happen if the application programmer writes
+%% really bad code that violates the API.
+handle_cast(Msg, State) ->
+ {stop, {'API_violation_connection_colsed', Msg}, State}.
+
+%%--------------------------------------------------------------------------
+%% handle_info(Msg, State) -> {noreply, State} | {noreply, State, Timeout} |
+%% {stop, Reason, State}
+%% Description: Handles tcp messages from the ftp-server.
+%% Note: The order of the function clauses is significant.
+%%--------------------------------------------------------------------------
+
+handle_info(timeout, #state{caller = open} = State) ->
+ {stop, timeout, State};
+
+handle_info(timeout, State) ->
+ {noreply, State};
+
+%%% Data socket messages %%%
+handle_info({tcp, Socket, Data},
+ #state{dsock = Socket,
+ caller = {recv_file, Fd}} = State) ->
+ file_write(binary_to_list(Data), Fd),
+ progress_report({binary, Data}, State),
+ activate_data_connection(State),
+ {noreply, State};
+
+handle_info({tcp, Socket, Data}, #state{dsock = Socket, client = From,
+ caller = recv_chunk}
+ = State) ->
+ gen_server:reply(From, {ok, Data}),
+ {noreply, State#state{client = undefined, data = <<>>}};
+
+handle_info({tcp, Socket, Data}, #state{dsock = Socket} = State) ->
+ activate_data_connection(State),
+ {noreply, State#state{data = <<(State#state.data)/binary,
+ Data/binary>>}};
+
+handle_info({tcp_closed, Socket}, #state{dsock = Socket,
+ caller = {recv_file, Fd}}
+ = State) ->
+ file_close(Fd),
+ progress_report({transfer_size, 0}, State),
+ activate_ctrl_connection(State),
+ {noreply, State#state{dsock = undefined, data = <<>>}};
+
+handle_info({tcp_closed, Socket}, #state{dsock = Socket, client = From,
+ caller = recv_chunk}
+ = State) ->
+ gen_server:reply(From, ok),
+ {noreply, State#state{dsock = undefined, client = undefined,
+ data = <<>>, caller = undefined,
+ chunk = false}};
+
+handle_info({tcp_closed, Socket}, #state{dsock = Socket, caller = recv_bin,
+ data = Data} = State) ->
+ activate_ctrl_connection(State),
+ {noreply, State#state{dsock = undefined, data = <<>>,
+ caller = {recv_bin, Data}}};
+
+handle_info({tcp_closed, Socket}, #state{dsock = Socket, data = Data,
+ caller = {handle_dir_result, Dir}}
+ = State) ->
+ activate_ctrl_connection(State),
+ {noreply, State#state{dsock = undefined,
+ caller = {handle_dir_result, Dir, Data},
+% data = <<?CR,?LF>>}};
+ data = <<>>}};
+
+handle_info({tcp_error, Socket, Reason}, #state{dsock = Socket,
+ client = From} = State) ->
+ gen_server:reply(From, {error, Reason}),
+ close_data_connection(State),
+ {noreply, State#state{dsock = undefined, client = undefined,
+ data = <<>>, caller = undefined, chunk = false}};
+
+%%% Ctrl socket messages %%%
+handle_info({tcp, Socket, Data}, #state{csock = Socket,
+ verbose = Verbose,
+ caller = Caller,
+ client = From,
+ ctrl_data = {CtrlData, AccLines,
+ LineStatus}}
+ = State) ->
+ case ftp_response:parse_lines(<<CtrlData/binary, Data/binary>>,
+ AccLines, LineStatus) of
+ {ok, Lines, NextMsgData} ->
+ verbose(Lines, Verbose, 'receive'),
+ CtrlResult = ftp_response:interpret(Lines),
+ case Caller of
+ quote ->
+ gen_server:reply(From, string:tokens(Lines, [?CR, ?LF])),
+ {noreply, State#state{client = undefined,
+ caller = undefined,
+ ctrl_data = {NextMsgData, [],
+ start}}};
+ _ ->
+ handle_ctrl_result(CtrlResult,
+ State#state{ctrl_data =
+ {NextMsgData, [], start}})
+ end;
+ {continue, NewCtrlData} ->
+ activate_ctrl_connection(State),
+ {noreply, State#state{ctrl_data = NewCtrlData}}
+ end;
+
+handle_info({tcp_closed, Socket}, #state{csock = Socket}) ->
+ %% If the server closes the control channel it is
+ %% the expected behavior that connection process terminates.
+ exit(normal); %% User will get error message from terminate/2
+
+handle_info({tcp_error, Socket, Reason}, _) ->
+ error_logger:error_report("tcp_error on socket: ~p for reason: ~p~n",
+ [Socket, Reason]),
+ %% If tcp does not work the only option is to terminate,
+ %% this is the expected behavior under these circumstances.
+ exit(normal); %% User will get error message from terminate/2
+
+%% Monitor messages - if the process owning the ftp connection goes
+%% down there is no point in continuing.
+handle_info({'DOWN', _Ref, _Type, _Process, normal}, State) ->
+ {stop, normal, State#state{client = undefined}};
+
+handle_info({'DOWN', _Ref, _Type, _Process, shutdown}, State) ->
+ {stop, normal, State#state{client = undefined}};
+
+handle_info({'DOWN', _Ref, _Type, _Process, timeout}, State) ->
+ {stop, normal, State#state{client = undefined}};
+
+handle_info({'DOWN', _Ref, _Type, Process, Reason}, State) ->
+ {stop, {stopped, {'EXIT', Process, Reason}},
+ State#state{client = undefined}};
+
+handle_info({'EXIT', Pid, Reason}, #state{progress = Pid} = State) ->
+ error_logger:info_report("Progress reporting stopped for reason ~p~n",
+ Reason),
+ {noreply, State#state{progress = ignore}};
+
+%% Catch all - throws away unknown messages (This could happen by "accident"
+%% so we do not want to crash, but we make a log entry as it is an
+%% unwanted behaviour.)
+handle_info(Info, State) ->
+ error_logger:info_report("ftp : ~p : Unexpected message: ~p\n",
+ [self(), Info]),
+ {noreply, State}.
+
+%%--------------------------------------------------------------------------
+%% terminate/2 and code_change/3
+%%--------------------------------------------------------------------------
+terminate(normal, State) ->
+ %% If terminate reason =/= normal the progress reporting process will
+ %% be killed by the exit signal.
+ progress_report(stop, State),
+ do_termiante({error, econn}, State);
+terminate(Reason, State) ->
+ error_logger:error_report("Ftp connection closed due to: ~p~n", [Reason]),
+ do_termiante({error, eclosed}, State).
+
+do_termiante(ErrorMsg, State) ->
+ close_data_connection(State),
+ close_ctrl_connection(State),
+ case State#state.client of
+ undefined ->
+ ok;
+ From ->
+ gen_server:reply(From, ErrorMsg)
+ end,
+ ok.
+
+code_change(_, State, _) ->
+ {ok, State}.
+
+%%%=========================================================================
+%% Start/stop
+%%%=========================================================================
+%%--------------------------------------------------------------------------
+%% start_link_sup([Args, Options]) -> {ok, Pid} | {error, Reason}
+%%
+%% Description: Callback function for the ftp supervisor. It is called
+%% : when open/[1,3] calls ftp_sup:start_child/1 to start an
+%% : instance of the ftp process.
+%%--------------------------------------------------------------------------
+start_link_sup([Args, Options]) ->
+ gen_server:start_link(?MODULE, Args, Options).
+
+%%% Stop functionality is handled by close/1
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+
+%%--------------------------------------------------------------------------
+%%% Help functions to handle_call and/or handle_ctrl_result
+%%--------------------------------------------------------------------------
+%% User handling
+handle_user(User, Password, Acc, State) ->
+ send_ctrl_message(State, mk_cmd("USER ~s", [User])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {handle_user, Password, Acc}}}.
+
+handle_user_passwd(Password, Acc, State) ->
+ send_ctrl_message(State, mk_cmd("PASS ~s", [Password])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {handle_user_passwd, Acc}}}.
+
+handle_user_account(Acc, State) ->
+ send_ctrl_message(State, mk_cmd("ACCT ~s", [Acc])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = handle_user_account}}.
+
+%%--------------------------------------------------------------------------
+%% handle_ctrl_result
+%%--------------------------------------------------------------------------
+%%--------------------------------------------------------------------------
+%% Handling of control connection setup
+handle_ctrl_result({pos_compl, _}, #state{caller = open, client = From}
+ = State) ->
+ gen_server:reply(From, {ok, self()}),
+ {noreply, State#state{client = undefined,
+ caller = undefined }};
+handle_ctrl_result({_, Lines}, #state{caller = open} = State) ->
+ ctrl_result_response(econn, State, {error, Lines});
+
+%%--------------------------------------------------------------------------
+%% Data connection setup active mode
+handle_ctrl_result({pos_compl, _Lines},
+ #state{mode = active,
+ caller = {setup_data_connection,
+ {LSock, Caller}}} = State) ->
+ handle_caller(State#state{caller = Caller, dsock = {lsock, LSock}});
+
+handle_ctrl_result({Status, Lines},
+ #state{mode = active,
+ caller = {setup_data_connection, {LSock, _}}}
+ = State) ->
+ close_connection(LSock),
+ ctrl_result_response(Status, State, {error, Lines});
+
+%% Data connection setup passive mode
+handle_ctrl_result({pos_compl, Lines}, #state{mode = passive,
+ ip_v6_disabled = false,
+ client=From,
+ caller =
+ {setup_data_connection,
+ Caller},
+ csock = CSock,
+ timeout = Timeout}
+ = State) ->
+ [_, PortStr | _] = lists:reverse(string:tokens(Lines, "|")),
+ {ok, {IP, _}} = inet:peername(CSock),
+ case connect(IP, list_to_integer(PortStr), Timeout, State) of
+ {_,{ok, Socket}} ->
+ handle_caller(State#state{caller = Caller, dsock = Socket});
+ {_,{error,Reason}} ->
+ gen_server:reply(From,{error,Reason}),
+ {noreply,State#state{client = undefined, caller = undefined}}
+ end;
+
+handle_ctrl_result({pos_compl, Lines},
+ #state{mode = passive, ip_v6_disabled = true,
+ client=From,
+ caller = {setup_data_connection, Caller},
+ timeout = Timeout} = State) ->
+
+ {_, [?LEFT_PAREN | Rest]} =
+ lists:splitwith(fun(?LEFT_PAREN) -> false; (_) -> true end, Lines),
+ {NewPortAddr, _} =
+ lists:splitwith(fun(?RIGHT_PAREN) -> false; (_) -> true end, Rest),
+ [A1, A2, A3, A4, P1, P2] = lists:map(fun(X) -> list_to_integer(X) end,
+ string:tokens(NewPortAddr, [$,])),
+ case connect({A1, A2, A3, A4}, (P1 * 256) + P2, Timeout, State) of
+ {_,{ok,Socket}} ->
+ handle_caller(State#state{caller = Caller, dsock = Socket});
+ {_,{error,Reason}} ->
+ gen_server:reply(From,{error,Reason}),
+ {noreply,State#state{client = undefined, caller = undefined}}
+ end;
+
+%% FTP server does not support passive mode try to fallback on active mode
+handle_ctrl_result(_, #state{mode = passive, caller = {setup_data_connection,
+ Caller}} = State) ->
+ setup_data_connection(State#state{mode = active, caller = Caller});
+
+%%--------------------------------------------------------------------------
+%% User handling
+handle_ctrl_result({pos_interm, _}, #state{caller =
+ {handle_user, PassWord, Acc}}
+ = State) ->
+ handle_user_passwd(PassWord, Acc, State);
+handle_ctrl_result({Status, _},
+ #state{caller = {handle_user, _, _}} = State) ->
+ ctrl_result_response(Status, State, {error, euser});
+
+%% Accounts
+handle_ctrl_result({pos_interm_acct, _}, #state{caller =
+ {handle_user_passwd, Acc}} =
+ State) when Acc =/= "" ->
+ handle_user_account(Acc, State);
+handle_ctrl_result({Status, _},
+ #state{caller = {handle_user_passwd, _}} = State) ->
+ ctrl_result_response(Status, State, {error, euser});
+
+%%--------------------------------------------------------------------------
+%% Print current working directory
+handle_ctrl_result({pos_compl, Lines}, #state{caller = pwd,
+ client = From} = State) ->
+ Dir = pwd_result(Lines),
+ gen_server:reply(From, {ok, Dir}),
+ {noreply, State#state{client = undefined, caller = undefined}};
+
+%%--------------------------------------------------------------------------
+%% Directory listing
+handle_ctrl_result({pos_prel, _}, #state{caller = {dir, Dir}} = State) ->
+ NewState = accept_data_connection(State),
+ activate_data_connection(NewState),
+ {noreply, NewState#state{caller = {handle_dir_result, Dir}}};
+
+handle_ctrl_result({pos_compl, _}, #state{caller = {handle_dir_result, Dir,
+ Data}, client = From}
+ = State) ->
+ case Dir of
+ "" -> % Current directory
+ gen_server:reply(From, {ok, Data}),
+ {noreply, State#state{client = undefined,
+ caller = undefined}};
+ _ ->
+ %% If there is only one line it might be a directory with on
+ %% file but it might be an error message that the directory
+ %% was not found. So in this case we have to endure a little
+ %% overhead to be able to give a good return value. Alas not
+ %% all ftp implementations behave the same and returning
+ %% an error string is allowed by the FTP RFC.
+ case lists:dropwhile(fun(?CR) -> false;(_) -> true end,
+ binary_to_list(Data)) of
+ L when L == [?CR, ?LF]; L == [] ->
+ send_ctrl_message(State, mk_cmd("PWD", [])),
+ activate_ctrl_connection(State),
+ {noreply,
+ State#state{caller = {handle_dir_data, Dir, Data}}};
+ _ ->
+ gen_server:reply(From, {ok, Data}),
+ {noreply, State#state{client = undefined,
+ caller = undefined}}
+ end
+ end;
+
+handle_ctrl_result({pos_compl, Lines},
+ #state{caller = {handle_dir_data, Dir, DirData}} =
+ State) ->
+ OldDir = pwd_result(Lines),
+ send_ctrl_message(State, mk_cmd("CWD ~s", [Dir])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {handle_dir_data_second_phase, OldDir,
+ DirData}}};
+handle_ctrl_result({Status, _},
+ #state{caller = {handle_dir_data, _, _}} = State) ->
+ ctrl_result_response(Status, State, {error, epath});
+
+handle_ctrl_result(S={_Status, _},
+ #state{caller = {handle_dir_result, _, _}} = State) ->
+ %% OTP-5731, macosx
+ ctrl_result_response(S, State, {error, epath});
+
+handle_ctrl_result({pos_compl, _},
+ #state{caller = {handle_dir_data_second_phase, OldDir,
+ DirData}} = State) ->
+ send_ctrl_message(State, mk_cmd("CWD ~s", [OldDir])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {handle_dir_data_third_phase, DirData}}};
+handle_ctrl_result({Status, _},
+ #state{caller = {handle_dir_data_second_phase, _, _}}
+ = State) ->
+ ctrl_result_response(Status, State, {error, epath});
+handle_ctrl_result(_, #state{caller = {handle_dir_data_third_phase, DirData},
+ client = From} = State) ->
+ gen_server:reply(From, {ok, DirData}),
+ {noreply, State#state{client = undefined, caller = undefined}};
+
+handle_ctrl_result({Status, _}, #state{caller = cd} = State) ->
+ ctrl_result_response(Status, State, {error, epath});
+
+handle_ctrl_result(Status={epath, _}, #state{caller = {dir,_}} = State) ->
+ ctrl_result_response(Status, State, {error, epath});
+
+%%--------------------------------------------------------------------------
+%% File renaming
+handle_ctrl_result({pos_interm, _}, #state{caller = {rename, NewFile}}
+ = State) ->
+ send_ctrl_message(State, mk_cmd("RNTO ~s", [NewFile])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = rename_second_phase}};
+
+handle_ctrl_result({Status, _},
+ #state{caller = {rename, _}} = State) ->
+ ctrl_result_response(Status, State, {error, epath});
+
+handle_ctrl_result({Status, _},
+ #state{caller = rename_second_phase} = State) ->
+ ctrl_result_response(Status, State, {error, epath});
+
+%%--------------------------------------------------------------------------
+%% File handling - recv_bin
+handle_ctrl_result({pos_prel, _}, #state{caller = recv_bin} = State) ->
+ NewState = accept_data_connection(State),
+ activate_data_connection(NewState),
+ {noreply, NewState};
+
+handle_ctrl_result({pos_compl, _}, #state{caller = {recv_bin, Data},
+ client = From} = State) ->
+ gen_server:reply(From, {ok, Data}),
+ close_data_connection(State),
+ {noreply, State#state{client = undefined, caller = undefined}};
+
+handle_ctrl_result({Status, _}, #state{caller = recv_bin} = State) ->
+ close_data_connection(State),
+ ctrl_result_response(Status, State#state{dsock = undefined},
+ {error, epath});
+
+handle_ctrl_result({Status, _}, #state{caller = {recv_bin, _}} = State) ->
+ close_data_connection(State),
+ ctrl_result_response(Status, State#state{dsock = undefined},
+ {error, epath});
+%%--------------------------------------------------------------------------
+%% File handling - start_chunk_transfer
+handle_ctrl_result({pos_prel, _}, #state{client = From,
+ caller = start_chunk_transfer}
+ = State) ->
+ NewState = accept_data_connection(State),
+ gen_server:reply(From, ok),
+ {noreply, NewState#state{chunk = true, client = undefined,
+ caller = undefined}};
+%%--------------------------------------------------------------------------
+%% File handling - recv_file
+handle_ctrl_result({pos_prel, _}, #state{caller = {recv_file, _}} = State) ->
+ NewState = accept_data_connection(State),
+ activate_data_connection(NewState),
+ {noreply, NewState};
+
+handle_ctrl_result({Status, _}, #state{caller = {recv_file, Fd}} = State) ->
+ file_close(Fd),
+ close_data_connection(State),
+ ctrl_result_response(Status, State#state{dsock = undefined},
+ {error, epath});
+%%--------------------------------------------------------------------------
+%% File handling - transfer_*
+handle_ctrl_result({pos_prel, _}, #state{caller = {transfer_file, Fd}}
+ = State) ->
+ NewState = accept_data_connection(State),
+ send_file(Fd, NewState);
+
+handle_ctrl_result({pos_prel, _}, #state{caller = {transfer_data, Bin}}
+ = State) ->
+ NewState = accept_data_connection(State),
+ send_data_message(NewState, Bin),
+ close_data_connection(NewState),
+ activate_ctrl_connection(NewState),
+ {noreply, NewState#state{caller = transfer_data_second_phase,
+ dsock = undefined}};
+%%--------------------------------------------------------------------------
+%% Default
+handle_ctrl_result({Status, Lines}, #state{client = From} = State)
+ when From =/= undefined ->
+ ctrl_result_response(Status, State, {error, Lines}).
+
+%%--------------------------------------------------------------------------
+%% Help functions to handle_ctrl_result
+%%--------------------------------------------------------------------------
+ctrl_result_response(pos_compl, #state{client = From} = State, _) ->
+ gen_server:reply(From, ok),
+ {noreply, State#state{client = undefined, caller = undefined}};
+
+ctrl_result_response(Status, #state{client = From} = State, _) when
+Status == etnospc; Status == epnospc; Status == efnamena; Status == econn ->
+%Status == etnospc; Status == epnospc; Status == econn ->
+ gen_server:reply(From, {error, Status}),
+%% {stop, normal, {error, Status}, State#state{client = undefined}};
+ {stop, normal, State#state{client = undefined}};
+
+ctrl_result_response(_, #state{client = From} = State, ErrorMsg) ->
+ gen_server:reply(From, ErrorMsg),
+ {noreply, State#state{client = undefined, caller = undefined}}.
+
+%%--------------------------------------------------------------------------
+handle_caller(#state{caller = {dir, Dir, Len}} = State) ->
+ Cmd = case Len of
+ short -> "NLST";
+ long -> "LIST"
+ end,
+ case Dir of
+ "" ->
+ send_ctrl_message(State, mk_cmd(Cmd, ""));
+ _ ->
+ send_ctrl_message(State, mk_cmd(Cmd ++ " ~s", [Dir]))
+ end,
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {dir, Dir}}};
+
+handle_caller(#state{caller = {recv_bin, RemoteFile}} = State) ->
+ send_ctrl_message(State, mk_cmd("RETR ~s", [RemoteFile])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = recv_bin}};
+
+handle_caller(#state{caller = {start_chunk_transfer, Cmd, RemoteFile}} =
+ State) ->
+ send_ctrl_message(State, mk_cmd("~s ~s", [Cmd, RemoteFile])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = start_chunk_transfer}};
+
+handle_caller(#state{caller = {recv_file, RemoteFile, Fd}} = State) ->
+ send_ctrl_message(State, mk_cmd("RETR ~s", [RemoteFile])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {recv_file, Fd}}};
+
+handle_caller(#state{caller = {transfer_file, {Cmd, LocalFile, RemoteFile}},
+ ldir = LocalDir, client = From} = State) ->
+ case file_open(filename:absname(LocalFile, LocalDir), read) of
+ {ok, Fd} ->
+ send_ctrl_message(State, mk_cmd("~s ~s", [Cmd, RemoteFile])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {transfer_file, Fd}}};
+ {error, _} ->
+ gen_server:reply(From, {error, epath}),
+ {noreply, State#state{client = undefined, caller = undefined,
+ dsock = undefined}}
+ end;
+
+handle_caller(#state{caller = {transfer_data, {Cmd, Bin, RemoteFile}}} =
+ State) ->
+ send_ctrl_message(State, mk_cmd("~s ~s", [Cmd, RemoteFile])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {transfer_data, Bin}}}.
+
+%% ----------- FTP SERVER COMMUNICATION -------------------------
+
+%% Connect to FTP server at Host (default is TCP port 21)
+%% in order to establish a control connection.
+setup_ctrl_connection(Host, Port, Timeout, State)->
+ MsTime = millisec_time(),
+ case connect(Host, Port, Timeout, State) of
+ {Ipv, {ok, CSock}} ->
+ NewState =
+ case Ipv of
+ ipv4 ->
+ State#state{csock = CSock, ip_v6_disabled = true};
+ ipv6 ->
+ State#state{csock = CSock}
+ end,
+ activate_ctrl_connection(NewState),
+ case Timeout - (millisec_time() - MsTime) of
+ Timeout2 when (Timeout2 >= 0) ->
+ {noreply, NewState#state{caller = open}, Timeout2};
+ _ ->
+ %% Oups: Simulate timeout
+ self() ! timeout,
+ {noreply, NewState#state{caller = open}}
+ end;
+ {_,{error, _}} ->
+ gen_server:reply(State#state.client, {error, ehost}),
+ {stop, normal, State#state{client = undefined}}
+ end.
+
+setup_data_connection(#state{mode = active,
+ caller = Caller,
+ csock = CSock} = State) ->
+ IntToString = fun(Element) -> integer_to_list(Element) end,
+
+ case (catch inet:sockname(CSock)) of
+ {ok, {{_, _, _, _, _, _, _, _} = IP, _}} ->
+ {ok, LSock} =
+ gen_tcp:listen(0, [{ip, IP}, {active, false},
+ inet6, binary, {packet, 0}]),
+ {ok, Port} = inet:port(LSock),
+ Cmd = mk_cmd("EPRT |2|~s:~s:~s:~s:~s:~s:~s:~s|~s|",
+ lists:map(IntToString,
+ tuple_to_list(IP) ++ [Port])),
+ send_ctrl_message(State, Cmd),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {setup_data_connection,
+ {LSock, Caller}}}};
+ {ok, {{_,_,_,_} = IP, _}} ->
+ {ok, LSock} = gen_tcp:listen(0, [{ip, IP}, {active, false},
+ binary, {packet, 0}]),
+ {ok, Port} = inet:port(LSock),
+ {IP1, IP2, IP3, IP4} = IP,
+ {Port1, Port2} = {Port div 256, Port rem 256},
+ send_ctrl_message(State,
+ mk_cmd("PORT ~w,~w,~w,~w,~w,~w",
+ [IP1, IP2, IP3, IP4, Port1, Port2])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {setup_data_connection,
+ {LSock, Caller}}}}
+ end;
+
+setup_data_connection(#state{mode = passive, ip_v6_disabled = false,
+ caller = Caller} = State) ->
+ send_ctrl_message(State, mk_cmd("EPSV", [])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {setup_data_connection, Caller}}};
+
+setup_data_connection(#state{mode = passive, ip_v6_disabled = true,
+ caller = Caller} = State) ->
+ send_ctrl_message(State, mk_cmd("PASV", [])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = {setup_data_connection, Caller}}}.
+
+connect(Host = {_,_,_,_}, Port, TimeOut, _) ->
+ {ipv4, gen_tcp:connect(Host, Port,[binary, {packet, 0}, {active, false}] ,
+ TimeOut)};
+connect(Host = {_,_,_,_,_,_,_,_}, Port, TimeOut,
+ #state{ip_v6_disabled = false}) ->
+ {ipv6, gen_tcp:connect(Host, Port,
+ [binary, {packet, 0}, {active, false}, inet6],
+ TimeOut)};
+connect(Host, Port, TimeOut, #state{ip_v6_disabled = false}) ->
+ {Opts, NewHost, Ipv} =
+ case (inet:getaddr(Host, inet6)) of
+ %% If an ipv4-mapped ipv6 address is returned
+ %% use ipv4 directly as some ftp-servers does not
+ %% handle "ip4-ipv6-compatiblity" mode well!
+ {ok, IP = {0, 0, 0, 0, 0, 16#ffff, _, _}} ->
+ case inet:getaddr(Host, inet) of
+ {ok,NewIP} ->
+ {[binary, {packet, 0}, {active, false}], NewIP, ipv4};
+ _Error ->
+ {[binary, {packet, 0}, {active, false}, inet6],
+ IP,ipv6}
+ end;
+ {ok, IP} ->
+ {[binary, {packet, 0}, {active, false}, inet6], IP, ipv6};
+ {error, _} ->
+ {[binary, {packet, 0}, {active, false}], Host, ipv4}
+ end,
+ {Ipv, gen_tcp:connect(NewHost, Port, Opts, TimeOut)};
+
+connect(Host, Port, TimeOut, #state{ip_v6_disabled = true}) ->
+ Opts = [binary, {packet, 0}, {active, false}],
+ {ipv4, gen_tcp:connect(Host, Port, Opts, TimeOut)}.
+
+accept_data_connection(#state{mode = active,
+ dsock = {lsock, LSock}} = State) ->
+ {ok, Socket} = gen_tcp:accept(LSock),
+ gen_tcp:close(LSock),
+ State#state{dsock = Socket};
+
+accept_data_connection(#state{mode = passive} = State) ->
+ State.
+
+send_ctrl_message(#state{csock = Socket,verbose=Verbose}, Message) ->
+% io:format("Sending: ~p~n",[Message]),
+ verbose(lists:flatten(Message),Verbose,send),
+ send_message(Socket, Message).
+
+send_data_message(#state{dsock = Socket}, Message) ->
+ send_message(Socket, Message).
+
+send_message(Socket, Message) ->
+ case gen_tcp:send(Socket, Message) of
+ ok ->
+ ok;
+ {error, Reason} ->
+ error_logger:error_report("gen_tcp:send/2 failed for "
+ "reason ~p~n", [Reason]),
+ %% If tcp does not work the only option is to terminate,
+ %% this is the expected behavior under these circumstances.
+ exit(normal) %% User will get error message from terminate/2
+ end.
+
+activate_ctrl_connection(#state{csock = Socket, ctrl_data = {<<>>, _, _}}) ->
+ activate_connection(Socket);
+activate_ctrl_connection(#state{csock = Socket}) ->
+ %% We have already received at least part of the next control message,
+ %% that has been saved in ctrl_data, process this first.
+ self() ! {tcp, Socket, <<>>}.
+
+activate_data_connection(#state{dsock = Socket}) ->
+ activate_connection(Socket).
+
+activate_connection(Socket) ->
+ inet:setopts(Socket, [{active, once}]).
+
+close_ctrl_connection(#state{csock = undefined}) ->
+ ok;
+close_ctrl_connection(#state{csock = Socket}) ->
+ close_connection(Socket).
+
+close_data_connection(#state{dsock = undefined}) ->
+ ok;
+close_data_connection(#state{dsock = {lsock, Socket}}) ->
+ close_connection(Socket);
+close_data_connection(#state{dsock = Socket}) ->
+ close_connection(Socket).
+
+close_connection(Socket) ->
+ gen_tcp:close(Socket).
+
+%% ------------ FILE HANDELING ----------------------------------------
+
+send_file(Fd, State) ->
+ case file_read(Fd) of
+ {ok, N, Bin} when N > 0->
+ send_data_message(State, Bin),
+ progress_report({binary, Bin}, State),
+ send_file(Fd, State);
+ {ok, _, _} ->
+ file_close(Fd),
+ close_data_connection(State),
+ progress_report({transfer_size, 0}, State),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = transfer_file_second_phase,
+ dsock = undefined}};
+ {error, Reason} ->
+ gen_server:reply(State#state.client, {error, Reason}),
+ {stop, normal, State#state{client = undefined}}
+ end.
+
+file_open(File, Option) ->
+ file:open(File, [raw, binary, Option]).
+
+file_close(Fd) ->
+ file:close(Fd).
+
+file_read(Fd) ->
+ case file:read(Fd, ?FILE_BUFSIZE) of
+ {ok, Bytes} ->
+ {ok, size(Bytes), Bytes};
+ eof ->
+ {ok, 0, []};
+ Other ->
+ Other
+ end.
+
+file_write(Bytes, Fd) ->
+ file:write(Fd, Bytes).
+
+%% -------------- MISC ----------------------------------------------
+
+call(GenServer, Msg, Format) ->
+ call(GenServer, Msg, Format, infinity).
+call(GenServer, Msg, Format, Timeout) ->
+
+ Result = (catch gen_server:call(GenServer, {self(), Msg}, Timeout)),
+
+ case Result of
+ {ok, Bin} when binary(Bin), Format == string ->
+ {ok, binary_to_list(Bin)};
+ {'EXIT', _} ->
+ {error, eclosed};
+ Result ->
+ Result
+ end.
+
+cast(GenServer, Msg) ->
+ gen_server:cast(GenServer, {self(), Msg}).
+
+mk_cmd(Fmt, Args) ->
+ [io_lib:format(Fmt, Args)| [?CR, ?LF]]. % Deep list ok.
+
+pwd_result(Lines) ->
+ {_, [?DOUBLE_QUOTE | Rest]} =
+ lists:splitwith(fun(?DOUBLE_QUOTE) -> false; (_) -> true end, Lines),
+ {Dir, _} =
+ lists:splitwith(fun(?DOUBLE_QUOTE) -> false; (_) -> true end, Rest),
+ Dir.
+
+is_verbose(Params) ->
+ check_param(verbose, Params).
+
+is_debug(Flags) ->
+ check_param(debug, Flags).
+
+is_trace(Flags) ->
+ check_param(trace, Flags).
+
+is_ipv6_disabled(Flags) ->
+ check_param(ip_v6_disabled, Flags).
+
+check_param(Param, Params) ->
+ lists:member(Param, Params).
+
+key_search(Key, List, Default)->
+ case lists:keysearch(Key, 1, List) of
+ {value, {_,Val}} ->
+ Val;
+ false ->
+ Default
+ end.
+
+check_option(Pred, Value, Default) ->
+ case Pred(Value) of
+ true ->
+ Value;
+ false ->
+ Default
+ end.
+
+verbose(Lines, true, Direction) ->
+ DirStr =
+ case Direction of
+ send ->
+ "Sending: ";
+ _ ->
+ "Receiving: "
+ end,
+ Str = string:strip(string:strip(Lines, right, ?LF), right, ?CR),
+ erlang:display(DirStr++Str);
+verbose(_, false,_) ->
+ ok.
+
+ensure_started() ->
+ %% Start of the inets application should really be handled by the
+ %% application using inets.
+ case application:start(inets) of
+ {error,{already_started,inets}} ->
+ ok;
+ {error,{{already_started, _}, % Started as an included application
+ {inets_app,start, _}}} ->
+ ok;
+ ok ->
+ error_logger:info_report("The inets application was not started."
+ " Has now been started as a temporary"
+ " application.")
+ end.
+
+progress(Options) ->
+ ftp_progress:start_link(Options).
+
+progress_report(_, #state{progress = ignore}) ->
+ ok;
+progress_report(stop, #state{progress = ProgressPid}) ->
+ ftp_progress:stop(ProgressPid);
+progress_report({binary, Data}, #state{progress = ProgressPid}) ->
+ ftp_progress:report(ProgressPid, {transfer_size, size(Data)});
+progress_report(Report, #state{progress = ProgressPid}) ->
+ ftp_progress:report(ProgressPid, Report).
+
+
+millisec_time() ->
+ {A,B,C} = erlang:now(),
+ A*1000000000+B*1000+(C div 1000).
diff --git a/src/couch_inets/ftp_internal.hrl b/src/couch_inets/ftp_internal.hrl
new file mode 100644
index 00000000..eb605af9
--- /dev/null
+++ b/src/couch_inets/ftp_internal.hrl
@@ -0,0 +1,19 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+
+-include("inets_internal.hrl").
diff --git a/src/couch_inets/ftp_progress.erl b/src/couch_inets/ftp_progress.erl
new file mode 100644
index 00000000..1b3c1072
--- /dev/null
+++ b/src/couch_inets/ftp_progress.erl
@@ -0,0 +1,125 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%% Description: This module impements a temporary process that
+%% performes progress reporting during file transfer calling a user
+%% defined callback function. Its life span is as long as the ftp connection
+%% processes that spawned it lives. The purpose of this process is to
+%% shild the ftp connection process from errors and time consuming operations
+%% in the user defined callback function.
+
+-module(ftp_progress).
+
+%% Internal API
+-export([start_link/1, report/2, stop/1]).
+
+%% Spawn export
+-export([init/1]).
+
+-include_lib("kernel/include/file.hrl").
+
+-record(progress, {
+ file, % string()
+ cb_module, % atom()
+ cb_function, % atom()
+ init_progress_term, % term()
+ current_progress_term % term()
+ }).
+
+%%%=========================================================================
+%%% Internal application API
+%%%=========================================================================
+%%--------------------------------------------------------------------------
+%% start_link(Options) -> ignore | pid()
+%% Options = ignore | {CBModule, CBFunction, InitProgressTerm}
+%%
+%% Description: Starts the progress report process unless progress reporting
+%% should not be performed.
+%%--------------------------------------------------------------------------
+start_link(ignore) ->
+ ignore;
+start_link(Options) ->
+ spawn_link(?MODULE, init, [Options]).
+
+%%--------------------------------------------------------------------------
+%% report_progress(Pid, Report) -> _
+%% Pid = pid()
+%% Report = {local_file, File} | {remote_file, File} |
+%% {transfer_size, Size}
+%% Size = integer()
+%%
+%% Description: Reports progress to the reporting process that calls the
+%% user defined callback function.
+%%--------------------------------------------------------------------------
+report(Pid, Report) ->
+ Pid ! {progress_report, Report}.
+
+%%--------------------------------------------------------------------------
+%% stop(Pid) -> _
+%% Pid = pid()
+%%
+%% Description:
+%%--------------------------------------------------------------------------
+stop(Pid) ->
+ Pid ! stop.
+
+%%%=========================================================================
+%%% Internal functions
+%%%=========================================================================
+init(Options) ->
+ loop(progress(Options)).
+
+loop(Progress) ->
+ receive
+ {progress_report, Report} ->
+ NewProgress = report_progress(Report, Progress),
+ loop(NewProgress);
+ stop ->
+ ok
+ end.
+
+progress({CBModule, CBFunction, InitProgressTerm}) when is_atom(CBModule),
+ is_atom(CBFunction) ->
+ #progress{cb_module = CBModule,
+ cb_function = CBFunction,
+ init_progress_term = InitProgressTerm,
+ current_progress_term = InitProgressTerm}.
+
+report_progress({local_file, File}, Progress) ->
+ {ok, FileInfo} = file:read_file_info(File),
+ report_progress({file_size, FileInfo#file_info.size},
+ Progress#progress{file = File});
+
+report_progress({remote_file, File}, Progress) ->
+ report_progress({file_size, unknown}, Progress#progress{file = File});
+
+report_progress(Size, #progress{file = File,
+ cb_module = CBModule,
+ cb_function = CBFunction,
+ current_progress_term = Term,
+ init_progress_term = InitTerm} = Progress) ->
+
+ NewProgressTerm = CBModule:CBFunction(Term, File, Size),
+
+ case Size of
+ {transfer_size, 0} ->
+ %% Transfer is compleat reset initial values
+ Progress#progress{current_progress_term = InitTerm,
+ file = undefined};
+ _ ->
+ Progress#progress{current_progress_term = NewProgressTerm}
+ end.
diff --git a/src/couch_inets/ftp_response.erl b/src/couch_inets/ftp_response.erl
new file mode 100644
index 00000000..55e745c4
--- /dev/null
+++ b/src/couch_inets/ftp_response.erl
@@ -0,0 +1,190 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%% Description: This module impements handling of ftp server responses.
+
+-module(ftp_response).
+
+%% Internal API
+-export([parse_lines/3, interpret/1, error_string/1]).
+
+-include("ftp_internal.hrl").
+
+%% First group of reply code digits
+-define(POS_PREL, 1).
+-define(POS_COMPL, 2).
+-define(POS_INTERM, 3).
+-define(TRANS_NEG_COMPL, 4).
+-define(PERM_NEG_COMPL, 5).
+%% Second group of reply code digits
+-define(SYNTAX,0).
+-define(INFORMATION,1).
+-define(CONNECTION,2).
+-define(AUTH_ACC,3).
+-define(UNSPEC,4).
+-define(FILE_SYSTEM,5).
+
+%%%=========================================================================
+%%% INTERNAL API
+%%%=========================================================================
+
+%%--------------------------------------------------------------------------
+%% parse_lines(Data, AccLines, StatusCode) -> {ok, Lines} |
+%% {continue, {Data,
+%% AccLines, StatusCode}}
+%%
+%% Data = binary() - data recived on the control connection from the
+%% ftp-server.
+%% AccLines = [string()]
+%% StatusCode = start | {byte(), byte(), byte()} | finish -
+%% Indicates where in the parsing process we are.
+%% start - (looking for the status code of the message)
+%% {byte(), byte(), byte()} - status code found, now
+%% looking for the last line indication.
+%% finish - now on the last line.
+%% Description: Parses a ftp control response message.
+%% "A reply is defined to contain the 3-digit code, followed by Space
+%% <SP>, followed by one line of text (where some maximum line length
+%% has been specified), and terminated by the Telnet end-of-line
+%% code (CRLF), or a so called multilined reply for example:
+%%
+%% 123-First line
+%% Second line
+%% 234 A line beginning with numbers
+%% 123 The last line
+%%
+%% The user-process then simply needs to search for the second
+%% occurrence of the same reply code, followed by <SP> (Space), at
+%% the beginning of a line, and ignore all intermediary lines. If
+%% an intermediary line begins with a 3-digit number, the Server
+%% will pad the front to avoid confusion.
+%%--------------------------------------------------------------------------
+
+%% Make sure we received the first 4 bytes so we know how to parse
+%% the FTP server response e.i. is the response composed of one
+%% or multiple lines.
+parse_lines(Bin, Lines, start) when size(Bin) < 4 ->
+ {continue, {Bin, Lines, start}};
+%% Multiple lines exist
+parse_lines(<<C1, C2, C3, $-, Rest/binary>>, Lines, start) ->
+ parse_lines(Rest, [$-, C3, C2, C1 | Lines], {C1, C2, C3});
+%% Only one line exists
+parse_lines(<<C1, C2, C3, ?WHITE_SPACE, Bin/binary>>, Lines, start) ->
+ parse_lines(Bin, [?WHITE_SPACE, C3, C2, C1 | Lines], finish);
+
+%% Last line found
+parse_lines(<<C1, C2, C3, ?WHITE_SPACE, Rest/binary>>, Lines, {C1, C2, C3}) ->
+ parse_lines(Rest, [?WHITE_SPACE, C3, C2, C1 | Lines], finish);
+%% Potential end found wait for more data
+parse_lines(<<C1, C2, C3>> = Bin, Lines, {C1, C2, C3}) ->
+ {continue, {Bin, Lines, {C1, C2, C3}}};
+%% Intermidate line begining with status code
+parse_lines(<<C1, C2, C3, Rest/binary>>, Lines, {C1, C2, C3}) ->
+ parse_lines(Rest, [C3, C2, C1 | Lines], {C1, C2, C3});
+
+%% Potential last line wait for more data
+parse_lines(<<C1, C2>> = Data, Lines, {C1, C2, _} = StatusCode) ->
+ {continue, {Data, Lines, StatusCode}};
+parse_lines(<<C1>> = Data, Lines, {C1, _, _} = StatusCode) ->
+ {continue, {Data, Lines, StatusCode}};
+parse_lines(<<>> = Data, Lines, {_,_,_} = StatusCode) ->
+ {continue, {Data, Lines, StatusCode}};
+%% Part of the multiple lines
+parse_lines(<<Octet, Rest/binary>>, Lines, {_,_, _} = StatusCode) ->
+ parse_lines(Rest, [Octet | Lines], StatusCode);
+
+%% End of FTP server response found
+parse_lines(<<?CR, ?LF>>, Lines, finish) ->
+ {ok, lists:reverse([?LF, ?CR | Lines]), <<>>};
+parse_lines(<<?CR, ?LF, Rest/binary>>, Lines, finish) ->
+ {ok, lists:reverse([?LF, ?CR | Lines]), Rest};
+
+%% Potential end found wait for more data
+parse_lines(<<?CR>> = Data, Lines, finish) ->
+ {continue, {Data, Lines, finish}};
+parse_lines(<<>> = Data, Lines, finish) ->
+ {continue, {Data, Lines, finish}};
+%% Part of last line
+parse_lines(<<Octet, Rest/binary>>, Lines, finish) ->
+ parse_lines(Rest, [Octet | Lines], finish).
+
+%%--------------------------------------------------------------------------
+%% interpret(Lines) -> {Status, Text}
+%% Lines = [byte(), byte(), byte() | Text] - ftp server response as
+%% returned by parse_lines/3
+%% Stauts = atom() (see interpret_status/3)
+%% Text = [string()]
+%%
+%% Description: Create nicer data to match on.
+%%--------------------------------------------------------------------------
+interpret([Didgit1, Didgit2, Didgit3 | Data]) ->
+ Code1 = Didgit1 - $0,
+ Code2 = Didgit2 - $0,
+ Code3 = Didgit3 - $0,
+ {interpret_status(Code1, Code2, Code3), Data}.
+
+%%--------------------------------------------------------------------------
+%% error_string(Error) -> string()
+%% Error = {error, term()} | term()
+%%
+%% Description: Translates error codes into strings intended for
+%% human interpretation.
+%%--------------------------------------------------------------------------
+error_string({error, Reason}) ->
+ error_string(Reason);
+
+error_string(echunk) -> "Synchronisation error during chunk sending.";
+error_string(eclosed) -> "Session has been closed.";
+error_string(econn) -> "Connection to remote server prematurely closed.";
+error_string(eexists) ->"File or directory already exists.";
+error_string(ehost) -> "Host not found, FTP server not found, "
+ "or connection rejected.";
+error_string(elogin) -> "User not logged in.";
+error_string(enotbinary) -> "Term is not a binary.";
+error_string(epath) -> "No such file or directory, already exists, "
+ "or permission denied.";
+error_string(etype) -> "No such type.";
+error_string(euser) -> "User name or password not valid.";
+error_string(etnospc) -> "Insufficient storage space in system.";
+error_string(epnospc) -> "Exceeded storage allocation "
+ "(for current directory or dataset).";
+error_string(efnamena) -> "File name not allowed.";
+error_string(Reason) ->
+ lists:flatten(io_lib:format("Unknown error: ~w", [Reason])).
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+
+%% Positive Preleminary Reply
+interpret_status(?POS_PREL,_,_) -> pos_prel;
+%% Positive Completion Reply
+interpret_status(?POS_COMPL,_,_) -> pos_compl;
+%% Positive Intermediate Reply nedd account
+interpret_status(?POS_INTERM,?AUTH_ACC,2) -> pos_interm_acct;
+%% Positive Intermediate Reply
+interpret_status(?POS_INTERM,_,_) -> pos_interm;
+%% No storage area no action taken
+interpret_status(?TRANS_NEG_COMPL,?FILE_SYSTEM,2) -> etnospc;
+%% Temporary Error, no action taken
+interpret_status(?TRANS_NEG_COMPL,_,_) -> trans_neg_compl;
+%% Permanent disk space error, the user shall not try again
+interpret_status(?PERM_NEG_COMPL,?FILE_SYSTEM,0) -> epath;
+interpret_status(?PERM_NEG_COMPL,?FILE_SYSTEM,2) -> epnospc;
+interpret_status(?PERM_NEG_COMPL,?FILE_SYSTEM,3) -> efnamena;
+interpret_status(?PERM_NEG_COMPL,_,_) -> perm_neg_compl.
+
diff --git a/src/couch_inets/ftp_sup.erl b/src/couch_inets/ftp_sup.erl
new file mode 100644
index 00000000..c564fb0b
--- /dev/null
+++ b/src/couch_inets/ftp_sup.erl
@@ -0,0 +1,57 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%%----------------------------------------------------------------------
+%% Purpose: The top supervisor for the ftp hangs under inets_sup.
+%%----------------------------------------------------------------------
+-module(ftp_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/0]).
+-export([start_child/1]).
+
+%% Supervisor callback
+-export([init/1]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+start_child(Args) ->
+ supervisor:start_child(?MODULE, Args).
+
+%%%=========================================================================
+%%% Supervisor callback
+%%%=========================================================================
+init(_) ->
+ RestartStrategy = simple_one_for_one,
+ MaxR = 0,
+ MaxT = 3600,
+
+ Name = undefined, % As simple_one_for_one is used.
+ StartFunc = {ftp, start_link_sup, []},
+ Restart = temporary, % E.g. should not be restarted
+ Shutdown = 4000,
+ Modules = [ftp],
+ Type = worker,
+
+ ChildSpec = {Name, StartFunc, Restart, Shutdown, Type, Modules},
+ {ok, {{RestartStrategy, MaxR, MaxT}, [ChildSpec]}}.
diff --git a/src/couch_inets/http.erl b/src/couch_inets/http.erl
new file mode 100644
index 00000000..bdc7f73a
--- /dev/null
+++ b/src/couch_inets/http.erl
@@ -0,0 +1,396 @@
+% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%% This module is very loosely based on code initially developed by
+%% Johan Blom at Mobile Arts AB
+%% Description:
+%%% This version of the HTTP/1.1 client supports:
+%%% - RFC 2616 HTTP 1.1 client part
+%%% - RFC 2818 HTTP Over TLS
+
+-module(http).
+
+%% API
+-export([request/1, request/4, cancel_request/1, set_options/1,
+ verify_cookies/2, cookie_header/1]).
+
+-include("http_internal.hrl").
+-include("httpc_internal.hrl").
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+%%--------------------------------------------------------------------------
+%% request(Method, Request, HTTPOptions, Options) ->
+%% {ok, {StatusLine, Headers, Body}} | {ok, {Status, Body}} |
+%% {ok, RequestId} | {error,Reason} | {ok, {saved_as, FilePath}
+%%
+%% Method - atom() = head | get | put | post | trace | options| delete
+%% Request - {Url, Headers} | {Url, Headers, ContentType, Body}
+%% Url - string()
+%% HTTPOptions - [HttpOption]
+%% HTTPOption - {timeout, Time} | {ssl, SSLOptions} |
+%% {proxy_auth, {User, Password}}
+%% Ssloptions = [SSLOption]
+%% SSLOption = {verify, code()} | {depth, depth()} | {certfile, path()} |
+%% {keyfile, path()} | {password, string()} | {cacertfile, path()} |
+%% {ciphers, string()}
+%% Options - [Option]
+%% Option - {sync, Boolean} | {body_format, BodyFormat} |
+%% {full_result, Boolean} | {stream, To} |
+%% {headers_as_is, Boolean}
+%% StatusLine = {HTTPVersion, StatusCode, ReasonPhrase}</v>
+%% HTTPVersion = string()
+%% StatusCode = integer()
+%% ReasonPhrase = string()
+%% Headers = [Header]
+%% Header = {Field, Value}
+%% Field = string()
+%% Value = string()
+%% Body = string() | binary() - HTLM-code
+%%
+%% Description: Sends a HTTP-request. The function can be both
+%% syncronus and asynchronous in the later case the function will
+%% return {ok, RequestId} and later on a message will be sent to the
+%% calling process on the format {http, {RequestId, {StatusLine,
+%% Headers, Body}}} or {http, {RequestId, {error, Reason}}}
+%% %%--------------------------------------------------------------------------
+request(Url) ->
+ request(get, {Url, []}, [], []).
+
+request(Method, {Url, Headers}, HTTPOptions, Options)
+ when Method==options;Method==get;Method==head;Method==delete;Method==trace ->
+ case http_uri:parse(Url) of
+ {error,Reason} ->
+ {error,Reason};
+ ParsedUrl ->
+ handle_request(Method, Url, {ParsedUrl, Headers, [], []},
+ HTTPOptions, Options)
+ end;
+
+request(Method, {Url,Headers,ContentType,Body}, HTTPOptions, Options)
+ when Method==post;Method==put ->
+ case http_uri:parse(Url) of
+ {error,Reason} ->
+ {error,Reason};
+ ParsedUrl ->
+ handle_request(Method, Url,
+ {ParsedUrl, Headers, ContentType, Body},
+ HTTPOptions, Options)
+ end.
+
+%%--------------------------------------------------------------------------
+%% request(RequestId) -> ok
+%% RequestId - As returned by request/4
+%%
+%% Description: Cancels a HTTP-request.
+%%-------------------------------------------------------------------------
+cancel_request(RequestId) ->
+ ok = httpc_manager:cancel_request(RequestId),
+ receive
+ %% If the request was allready fullfilled throw away the
+ %% answer as the request has been canceled.
+ {http, {RequestId, _}} ->
+ ok
+ after 0 ->
+ ok
+ end.
+
+%%--------------------------------------------------------------------------
+%% set_options(Options) ->
+%% Options - [Option]
+%% Option - {proxy, {Proxy, NoProxy}} | {max_sessions, MaxSessions} |
+%% {max_pipeline_length, MaxPipeline} |
+%% {pipeline_timeout, PipelineTimeout} | {cookies, CookieMode}
+%% | {ipv6, Ipv6Mode}
+%% Proxy - {Host, Port}
+%% NoProxy - [Domain | HostName | IPAddress]
+%% MaxSessions, MaxPipeline, PipelineTimeout = integer()
+%% CookieMode - enabled | disabled | verify
+%% Ipv6Mode - enabled | disabled
+%% Description: Informs the httpc_manager of the new settings.
+%%-------------------------------------------------------------------------
+set_options(Options) ->
+ ensure_started(no_scheme),
+ httpc_manager:set_options(Options).
+
+verify_cookies(SetCookieHeaders, Url) ->
+ {_, _, Host, Port, Path, _} = http_uri:parse(Url),
+ Cookies = http_cookie:cookies(SetCookieHeaders, Path, Host),
+ httpc_manager:store_cookies(Cookies, {Host, Port}),
+ ok.
+
+cookie_header(Url) ->
+ httpc_manager:cookies(Url).
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+handle_request(Method, Url, {{Scheme, UserInfo, Host, Port, Path, Query},
+ Headers, ContentType, Body}, HTTPOptions, Options) ->
+ HTTPRecordOptions = http_options(HTTPOptions, #http_options{}),
+
+ Sync = http_util:key1search(Options, sync, true),
+ NewHeaders = lists:map(fun({Key, Val}) ->
+ {http_util:to_lower(Key), Val} end,
+ Headers),
+ Stream = http_util:key1search(Options, stream, none),
+
+ case {Sync, Stream} of
+ {true, self} ->
+ {error, streaming_error};
+ _ ->
+ RecordHeaders = header_record(NewHeaders, #http_request_h{}, Host),
+ Request = #request{from = self(),
+ scheme = Scheme, address = {Host,Port},
+ path = Path, pquery = Query, method = Method,
+ headers = RecordHeaders,
+ content = {ContentType,Body},
+ settings = HTTPRecordOptions,
+ abs_uri = Url, userinfo = UserInfo,
+ stream = Stream,
+ headers_as_is =
+ headers_as_is(Headers, Options)},
+
+ ensure_started(Scheme),
+
+ case httpc_manager:request(Request) of
+ {ok, RequestId} ->
+ handle_answer(RequestId, Sync, Options);
+ {error, Reason} ->
+ {error, Reason}
+ end
+ end.
+
+handle_answer(RequestId, false, _) ->
+ {ok, RequestId};
+handle_answer(RequestId, true, Options) ->
+ receive
+ {http, {RequestId, saved_to_file}} ->
+ {ok, saved_to_file};
+ {http, {RequestId, Result = {_,_,_}}} ->
+ return_answer(Options, Result);
+ {http, {RequestId, {error, Reason}}} ->
+ {error, Reason}
+ end.
+
+return_answer(Options, {StatusLine, Headers, BinBody}) ->
+ Body =
+ case http_util:key1search(Options, body_format, string) of
+ string ->
+ binary_to_list(BinBody);
+ _ ->
+ BinBody
+ end,
+ case http_util:key1search(Options, full_result, true) of
+ true ->
+ {ok, {StatusLine, Headers, Body}};
+ false ->
+ {_, Status, _} = StatusLine,
+ {ok, {Status, Body}}
+ end.
+
+
+%% This options is a workaround for http servers that do not follow the
+%% http standard and have case sensative header parsing. Should only be
+%% used if there is no other way to communicate with the server or for
+%% testing purpose.
+headers_as_is(Headers, Options) ->
+ case http_util:key1search(Options, headers_as_is, false) of
+ false ->
+ [];
+ true ->
+ Headers
+ end.
+
+http_options([], Acc) ->
+ Acc;
+http_options([{timeout, Val} | Settings], Acc)
+ when is_integer(Val), Val >= 0->
+ http_options(Settings, Acc#http_options{timeout = Val});
+http_options([{timeout, infinity} | Settings], Acc) ->
+ http_options(Settings, Acc#http_options{timeout = infinity});
+http_options([{autoredirect, Val} | Settings], Acc)
+ when Val == true; Val == false ->
+ http_options(Settings, Acc#http_options{autoredirect = Val});
+http_options([{ssl, Val} | Settings], Acc) ->
+ http_options(Settings, Acc#http_options{ssl = Val});
+http_options([{relaxed, Val} | Settings], Acc)
+ when Val == true; Val == false ->
+ http_options(Settings, Acc#http_options{relaxed = Val});
+http_options([{proxy_auth, Val = {User, Passwd}} | Settings], Acc)
+ when is_list(User),
+ is_list(Passwd) ->
+ http_options(Settings, Acc#http_options{proxy_auth = Val});
+http_options([Option | Settings], Acc) ->
+ error_logger:info_report("Invalid option ignored ~p~n", [Option]),
+ http_options(Settings, Acc).
+
+header_record([], RequestHeaders, Host) ->
+ validate_headers(RequestHeaders, Host);
+header_record([{"cache-control", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'cache-control' = Val},
+ Host);
+header_record([{"connection", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{connection = Val}, Host);
+header_record([{"date", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{date = Val}, Host);
+header_record([{"pragma", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{pragma = Val}, Host);
+header_record([{"trailer", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{trailer = Val}, Host);
+header_record([{"transfer-encoding", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest,
+ RequestHeaders#http_request_h{'transfer-encoding' = Val},
+ Host);
+header_record([{"upgrade", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{upgrade = Val}, Host);
+header_record([{"via", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{via = Val}, Host);
+header_record([{"warning", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{warning = Val}, Host);
+header_record([{"accept", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{accept = Val}, Host);
+header_record([{"accept-charset", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'accept-charset' = Val},
+ Host);
+header_record([{"accept-encoding", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'accept-encoding' = Val},
+ Host);
+header_record([{"accept-language", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'accept-language' = Val},
+ Host);
+header_record([{"authorization", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{authorization = Val},
+ Host);
+header_record([{"expect", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{expect = Val}, Host);
+header_record([{"from", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{from = Val}, Host);
+header_record([{"host", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{host = Val}, Host);
+header_record([{"if-match", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'if-match' = Val},
+ Host);
+header_record([{"if-modified-since", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest,
+ RequestHeaders#http_request_h{'if-modified-since' = Val},
+ Host);
+header_record([{"if-none-match", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'if-none-match' = Val},
+ Host);
+header_record([{"if-range", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'if-range' = Val},
+ Host);
+
+header_record([{"if-unmodified-since", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'if-unmodified-since'
+ = Val}, Host);
+header_record([{"max-forwards", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'max-forwards' = Val},
+ Host);
+header_record([{"proxy-authorization", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'proxy-authorization'
+ = Val}, Host);
+header_record([{"range", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{range = Val}, Host);
+header_record([{"referer", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{referer = Val}, Host);
+header_record([{"te", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{te = Val}, Host);
+header_record([{"user-agent", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'user-agent' = Val},
+ Host);
+header_record([{"allow", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{allow = Val}, Host);
+header_record([{"content-encoding", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest,
+ RequestHeaders#http_request_h{'content-encoding' = Val},
+ Host);
+header_record([{"content-language", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest,
+ RequestHeaders#http_request_h{'content-language' = Val},
+ Host);
+header_record([{"content-length", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'content-length' = Val},
+ Host);
+header_record([{"content-location", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest,
+ RequestHeaders#http_request_h{'content-location' = Val},
+ Host);
+header_record([{"content-md5", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'content-md5' = Val},
+ Host);
+header_record([{"content-range", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'content-range' = Val},
+ Host);
+header_record([{"content-type", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'content-type' = Val},
+ Host);
+header_record([{"expires", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{expires = Val}, Host);
+header_record([{"last-modified", Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{'last-modified' = Val},
+ Host);
+header_record([{Key, Val} | Rest], RequestHeaders, Host) ->
+ header_record(Rest, RequestHeaders#http_request_h{
+ other = [{Key, Val} |
+ RequestHeaders#http_request_h.other]},
+ Host).
+
+validate_headers(RequestHeaders = #http_request_h{te = undefined}, Host) ->
+ validate_headers(RequestHeaders#http_request_h{te = ""}, Host);
+validate_headers(RequestHeaders = #http_request_h{host = undefined}, Host) ->
+ validate_headers(RequestHeaders#http_request_h{host = Host}, Host);
+validate_headers(RequestHeaders, _) ->
+ RequestHeaders.
+
+ensure_started(Scheme) ->
+ %% Start of the inets application should really be handled by the
+ %% application using inets.
+ case application:start(couch_inets) of
+ {error,{already_started,couch_inets}} ->
+ ok;
+ {error, {{already_started,_}, % Started as an included application
+ {inets_app, start, _}}} ->
+ ok;
+ ok ->
+ error_logger:info_report("The inets application was not started."
+ " Has now been started as a temporary"
+ " application.")
+ end,
+
+ case Scheme of
+ https ->
+ %% Start of the ssl application should really be handled by the
+ %% application using inets.
+ case application:start(ssl) of
+ {error,{already_started,ssl}} ->
+ ok;
+ %% Started as an included application
+ {error, {{already_started,_},
+ {ssl_app, start, _}}} ->
+ ok;
+ ok ->
+ error_logger:info_report("The ssl application was not "
+ "started. Has now been started "
+ "as a temporary application.")
+ end;
+ _ ->
+ ok
+ end.
diff --git a/src/couch_inets/http_base_64.erl b/src/couch_inets/http_base_64.erl
new file mode 100644
index 00000000..00cd966b
--- /dev/null
+++ b/src/couch_inets/http_base_64.erl
@@ -0,0 +1,126 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%% Description: Implements base 64 encode and decode, see RFC2045.
+-module(http_base_64).
+
+-export([encode/1, decode/1]).
+
+-deprecated({'_', '_', next_major_release}).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+%%-------------------------------------------------------------------------
+%% encode(ASCII) -> Base64
+%% ASCII - string()
+%% Base64 - string()
+%%
+%% Description: Encodes a plain ASCII string into base64.
+%%-------------------------------------------------------------------------
+encode(ASCII) when is_list(ASCII) ->
+ encode_base64_list(ASCII).
+
+
+%%-------------------------------------------------------------------------
+%% decode(Base64) -> ASCII
+%% Base64 - string()
+%% ASCII - string()
+%%
+%% Description: Decodes an base64 encoded string to plain ASCII.
+%%-------------------------------------------------------------------------
+decode(Base64) when is_list(Base64) ->
+ decode_base64_list(sixtets(Base64), []).
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+
+%% Base-64 encoding: take 6 bits at a time from the head of the binary
+%% and emit it as 8 bit characters.
+encode_base64_list([]) ->
+ [];
+encode_base64_list([A]) ->
+ [int_to_b64(A bsr 2), int_to_b64((A band 3) bsl 4), $=, $=];
+encode_base64_list([A,B]) ->
+ [int_to_b64(A bsr 2), int_to_b64(((A band 3) bsl 4) bor (B bsr 4)),
+ int_to_b64((B band 15) bsl 2), $=];
+encode_base64_list([A,B,C|Ls]) ->
+ encode_base64_list_do(A,B,C, Ls).
+
+encode_base64_list_do(A,B,C, Rest) ->
+ BB = (A bsl 16) bor (B bsl 8) bor C,
+ [int_to_b64(BB bsr 18), int_to_b64((BB bsr 12) band 63),
+ int_to_b64((BB bsr 6) band 63), int_to_b64(BB band 63) |
+ encode_base64_list(Rest)].
+
+int_to_b64(X) when X >= 0, X =< 25 -> X + $A;
+int_to_b64(X) when X >= 26, X =< 51 -> X - 26 + $a;
+int_to_b64(X) when X >= 52, X =< 61 -> X - 52 + $0;
+int_to_b64(62) -> $+;
+int_to_b64(63) -> $/.
+
+%% This version works by consuming groups of 4 input characters to create
+%% a group of 3 output characters, with the three special-cases for
+%% end-of-input first:
+
+decode_base64_list({[],[]}, Acc) ->
+ lists:reverse(Acc);
+decode_base64_list({[Sixtet1,Sixtet2,pad,pad], []}, Acc) ->
+ Bits2x6 = (Sixtet1 bsl 18) bor (Sixtet2 bsl 12),
+ Octet1 = Bits2x6 bsr 16,
+ lists:reverse([Octet1 | Acc]);
+decode_base64_list({[Sixtet1,Sixtet2,Sixtet3,pad], []}, Acc) ->
+ Bits3x6 = (Sixtet1 bsl 18) bor (Sixtet2 bsl 12) bor (Sixtet3 bsl 6),
+ Octet1 = Bits3x6 bsr 16,
+ Octet2 = (Bits3x6 bsr 8) band 16#ff,
+ lists:reverse([Octet2, Octet1 | Acc]);
+decode_base64_list({[Sixtet1,Sixtet2,Sixtet3,Sixtet4],Rest}, Acc) when
+ Sixtet1 =/= pad,
+ Sixtet2 =/= pad,
+ Sixtet3 =/= pad,
+ Sixtet4 =/= pad ->
+ Bits4x6 =
+ (Sixtet1 bsl 18) bor (Sixtet2 bsl 12) bor (Sixtet3 bsl 6) bor Sixtet4,
+ Octet1 = Bits4x6 bsr 16,
+ Octet2 = (Bits4x6 bsr 8) band 16#ff,
+ Octet3 = Bits4x6 band 16#ff,
+ decode_base64_list(sixtets(Rest), [Octet3, Octet2, Octet1 | Acc]).
+
+b64_to_int(X) when X >= $A, X =< $Z -> X - $A;
+b64_to_int(X) when X >= $a, X =< $z -> X - $a + 26;
+b64_to_int(X) when X >= $0, X =< $9 -> X - $0 + 52;
+b64_to_int($+) -> 62;
+b64_to_int($/) -> 63;
+b64_to_int($=) -> pad; % Padding will be removed by decode_base64_list/2
+b64_to_int(_) -> ignore. % Not in base 64 should be ignored
+
+sixtets(Str) ->
+ sixtets(Str, []).
+
+sixtets([], Sixtets) ->
+ {lists:reverse(Sixtets), []};
+sixtets(Rest, Sixtets) when length(Sixtets) == 4 ->
+ {lists:reverse(Sixtets), Rest};
+sixtets([Base64 | Tail], Sixtets) when length(Sixtets) < 4 ->
+ case b64_to_int(Base64) of
+ ignore ->
+ sixtets(Tail, Sixtets);
+ Int ->
+ sixtets(Tail, [Int | Sixtets])
+ end.
diff --git a/src/couch_inets/http_chunk.erl b/src/couch_inets/http_chunk.erl
new file mode 100644
index 00000000..462cfcc5
--- /dev/null
+++ b/src/couch_inets/http_chunk.erl
@@ -0,0 +1,289 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%% Description: Implements chunked transfer encoding see RFC2616 section
+%% 3.6.1
+-module(http_chunk).
+
+-include("http_internal.hrl").
+
+%% API
+-export([decode/3, decode/4, encode/1, encode_last/0, handle_headers/2]).
+%% Callback API - used for example if the chunkedbody is received a
+%% little at a time on a socket.
+-export([decode_size/1, ignore_extensions/1, decode_data/1, decode_trailer/1]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+%%-------------------------------------------------------------------------
+%% decode(ChunkedBody, MaxBodySize, MaxHeaderSize, <Stream>) ->
+%% {ok, {Headers, Body}} | {Module, Function, Args}
+%%
+%% Headers = ["Header:Value"]
+%% ChunkedBody = binary()
+%% MaxBodySize = integer()
+%% MaxHeaderSize = integer()
+%% Stream = {Code, Request} - if Request#request.stream =/= none
+%% and Code == 200 the side effect of sending each decode chunk to the
+%% client/file before the whole body is received will take place.
+%%
+%% Note: decode/4 should only be used from httpc_handler module.
+%% Otherwhise use the side effect free decode/3.
+%%
+%% Description: Decodes a body encoded by the chunked transfer
+%% encoding. If the ChunkedBody is not compleate it returns {Module,
+%% Function, Args} so that decoding can be continued when more of the
+%% data has been received by calling Module:Function([NewData | Args]).
+%%
+%% Note: In the case of pipelining a call to decode might contain data
+%% that belongs to the next request/response and will be returned as
+%% part of the body, hence functions calling http_chunk:decode must
+%% look at the returned content-length header to make sure that they
+%% split the actual body and data that possible should be passed along to
+%% the next pass in the loop.
+%%-------------------------------------------------------------------------
+decode(ChunkedBody, MaxBodySize, MaxHeaderSize) ->
+ decode(ChunkedBody, MaxBodySize, MaxHeaderSize, false).
+
+decode(ChunkedBody, MaxBodySize, MaxHeaderSize, Stream) ->
+ %% Note decode_size will call decode_data.
+ decode_size([ChunkedBody, <<>>, [],
+ {MaxBodySize, <<>>, 0, MaxHeaderSize, Stream}]).
+
+%%-------------------------------------------------------------------------
+%% encode(Chunk) -> EncodedChunk
+%%
+%% Chunked = binary()
+%% EncodedChunk = binary()
+%%
+%% Description: Encodes a body part with the chunked transfer encoding.
+%% Chunks are returned as lists or binaries depending on the
+%% input format. When sending the data on the both formats
+%% are accepted.
+%%-------------------------------------------------------------------------
+encode(Chunk) when is_binary(Chunk)->
+ HEXSize = list_to_binary(http_util:integer_to_hexlist(size(Chunk))),
+ <<HEXSize/binary, ?CR, ?LF, Chunk/binary, ?CR, ?LF>>;
+
+encode(Chunk) when is_list(Chunk)->
+ HEXSize = http_util:integer_to_hexlist(length(Chunk)),
+ [HEXSize, ?CR, ?LF, Chunk, ?CR, ?LF].
+
+encode_last() ->
+ <<$0, ?CR, ?LF, ?CR, ?LF >>.
+
+%%-------------------------------------------------------------------------
+%% handle_headers(HeaderRecord, ChunkedHeaders) -> NewHeaderRecord
+%%
+%% HeaderRecord = NewHeaderRecord = #http_request_h{} | #http_response_h{}
+%% ChunkedHeaders = ["Header:Value"] as returnde by http_chunk:decode/3
+%%
+%% Description: Removes chunked from the header as we now have decode
+%% the body and adds a content-length header and any other headers
+%% found in the chunked trail.
+%%-------------------------------------------------------------------------
+handle_headers(RequestHeaderRecord = #http_request_h{}, ChunkedHeaders) ->
+ NewHeaders = http_request:headers(ChunkedHeaders, RequestHeaderRecord),
+ TransferEncoding =
+ case NewHeaders#http_request_h.'transfer-encoding' -- "chunked" of
+ "" ->
+ undefined;
+ Other ->
+ Other
+ end,
+ NewHeaders#http_request_h{'transfer-encoding' = TransferEncoding};
+
+handle_headers(ResponseHeaderRecord = #http_response_h{}, ChunkedHeaders) ->
+ NewHeaders = http_response:headers(ChunkedHeaders, ResponseHeaderRecord),
+ TransferEncoding =
+ case NewHeaders#http_response_h.'transfer-encoding' -- "chunked" of
+ "" ->
+ undefined;
+ Other ->
+ Other
+ end,
+ NewHeaders#http_response_h{'transfer-encoding' = TransferEncoding}.
+
+%% Functions that may be returned during the decoding process
+%% if the input data is incompleate.
+decode_size([Bin, Rest, HexList, Info]) ->
+ decode_size(<<Rest/binary, Bin/binary>>, HexList, Info).
+
+ignore_extensions([Bin, Rest, NextFunction]) ->
+ ignore_extensions(<<Rest/binary, Bin/binary>>, NextFunction).
+
+decode_data([Bin, ChunkSize, TotalChunk, Info]) ->
+ decode_data(ChunkSize, <<TotalChunk/binary, Bin/binary>>, Info).
+
+decode_trailer([Bin, Rest, Header, Headers, MaxHeaderSize, Body,
+ BodyLength]) ->
+ decode_trailer(<<Rest/binary, Bin/binary>>,
+ Header, Headers, MaxHeaderSize, Body, BodyLength).
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+decode_size(<<>>, HexList, Info) ->
+ {?MODULE, decode_size, [<<>>, HexList, Info]};
+decode_size(Data = <<?CR, ?LF, ChunkRest/binary>>, HexList,
+ {MaxBodySize, Body,
+ AccLength,
+ MaxHeaderSize, Stream}) ->
+ ChunkSize = http_util:hexlist_to_integer(lists:reverse(HexList)),
+ case ChunkSize of
+ 0 -> % Last chunk, there was no data
+ ignore_extensions(Data, {?MODULE, decode_trailer,
+ [<<>>, [],[], MaxHeaderSize,
+ Body,
+ integer_to_list(AccLength)]});
+ _ ->
+ %% Note decode_data may call decode_size again if there
+ %% is more than one chunk, hence here is where the last parameter
+ %% to this function comes in.
+ decode_data(ChunkSize, ChunkRest, {MaxBodySize, Body,
+ ChunkSize + AccLength ,
+ MaxHeaderSize, Stream})
+ end;
+decode_size(<<";", Rest/binary>>, HexList, Info) ->
+ %% Note ignore_extensions will call decode_size/1 again when
+ %% it ignored all extensions.
+ ignore_extensions(Rest, {?MODULE, decode_size, [<<>>, HexList, Info]});
+decode_size(<<?CR>> = Data, HexList, Info) ->
+ {?MODULE, decode_size, [Data, HexList, Info]};
+decode_size(<<Octet, Rest/binary>>, HexList, Info) ->
+ decode_size(Rest, [Octet | HexList], Info).
+
+%% "All applications MUST ignore chunk-extension extensions they
+%% do not understand.", see RFC 2616 Section 3.6.1 We don't
+%% understand any extension...
+ignore_extensions(<<>>, NextFunction) ->
+ {?MODULE, ignore_extensions, [<<>>, NextFunction]};
+ignore_extensions(Data = <<?CR, ?LF, _ChunkRest/binary>>,
+ {Module, Function, Args}) ->
+ Module:Function([Data | Args]);
+ignore_extensions(<<?CR>> = Data, NextFunction) ->
+ {?MODULE, ignore_extensions, [Data, NextFunction]};
+ignore_extensions(<<_Octet, Rest/binary>>, NextFunction) ->
+ ignore_extensions(Rest, NextFunction).
+
+decode_data(ChunkSize, TotalChunk,
+ Info = {MaxBodySize, BodySoFar, AccLength, MaxHeaderSize, Stream})
+ when ChunkSize =< size(TotalChunk) ->
+ case TotalChunk of
+ %% Potential last chunk
+ <<_:ChunkSize/binary, ?CR, ?LF, "0">> ->
+ {?MODULE, decode_data, [ChunkSize, TotalChunk, Info]};
+ <<_:ChunkSize/binary, ?CR, ?LF, "0", ?CR>> ->
+ {?MODULE, decode_data, [ChunkSize, TotalChunk, Info]};
+ <<_:ChunkSize/binary, ?CR, ?LF>> ->
+ {?MODULE, decode_data, [ChunkSize, TotalChunk, Info]};
+ %% Last chunk
+ <<Data:ChunkSize/binary, ?CR, ?LF, "0", ";">> ->
+ %% Note ignore_extensions will call decode_trailer/1
+ %% once it ignored all extensions.
+ {NewBody, _} =
+ stream(<<BodySoFar/binary, Data/binary>>, Stream),
+ {?MODULE, ignore_extensions,
+ [<<>>,
+ {?MODULE, decode_trailer, [<<>>, [],[], MaxHeaderSize,
+ NewBody,
+ integer_to_list(AccLength)]}]};
+ <<Data:ChunkSize/binary, ?CR, ?LF, "0", ";", Rest/binary>> ->
+ %% Note ignore_extensions will call decode_trailer/1
+ %% once it ignored all extensions.
+ {NewBody, _} = stream(<<BodySoFar/binary, Data/binary>>, Stream),
+ ignore_extensions(Rest, {?MODULE, decode_trailer,
+ [<<>>, [],[], MaxHeaderSize,
+ NewBody,
+ integer_to_list(AccLength)]});
+ <<Data:ChunkSize/binary, ?CR, ?LF, "0", ?CR, ?LF>> ->
+ {NewBody, _} = stream(<<BodySoFar/binary, Data/binary>>, Stream),
+ {?MODULE, decode_trailer, [<<?CR, ?LF>>, [],[], MaxHeaderSize,
+ NewBody,
+ integer_to_list(AccLength)]};
+ <<Data:ChunkSize/binary, ?CR, ?LF, "0", ?CR, ?LF, Rest/binary>> ->
+ {NewBody,_}= stream(<<BodySoFar/binary, Data/binary>>, Stream),
+ decode_trailer(<<?CR, ?LF, Rest/binary>>, [],[], MaxHeaderSize,
+ NewBody,
+ integer_to_list(AccLength));
+ %% There are more chunks, so here we go agin...
+ <<Data:ChunkSize/binary, ?CR, ?LF, Rest/binary>>
+ when (AccLength < MaxBodySize) or (MaxBodySize == nolimit) ->
+ {NewBody, NewStream} =
+ stream(<<BodySoFar/binary, Data/binary>>, Stream),
+ decode_size(Rest, [],
+ {MaxBodySize, NewBody,
+ AccLength, MaxHeaderSize, NewStream});
+ <<_:ChunkSize/binary, ?CR, ?LF, _/binary>> ->
+ throw({error, body_too_big});
+ _ ->
+ {?MODULE, decode_data, [ChunkSize, TotalChunk, Info]}
+ end;
+decode_data(ChunkSize, TotalChunk, Info) ->
+ {?MODULE, decode_data, [ChunkSize, TotalChunk, Info]}.
+
+decode_trailer(<<>>, Header, Headers, MaxHeaderSize, Body, BodyLength) ->
+ {?MODULE, decode_trailer, [<<>>, Header, Headers, MaxHeaderSize, Body,
+ BodyLength]};
+
+%% Note: If Bin is not empty it is part of a pipelined request/response.
+decode_trailer(<<?CR,?LF,?CR,?LF, Bin/binary>>, [], [], _, Body, BodyLength) ->
+ {ok, {["content-length:" ++ BodyLength], <<Body/binary, Bin/binary>>}};
+decode_trailer(<<?CR,?LF,?CR,?LF, Bin/binary>>,
+ Header, Headers, MaxHeaderSize, Body, BodyLength) ->
+ NewHeaders = case Header of
+ [] ->
+ Headers;
+ _ ->
+ [lists:reverse(Header) | Headers]
+ end,
+ Length = length(NewHeaders),
+ case Length > MaxHeaderSize of
+ true ->
+ throw({error, {header_too_long, MaxHeaderSize,
+ MaxHeaderSize-Length}});
+ false ->
+ {ok, {["content-length:" ++ BodyLength | NewHeaders],
+ <<Body/binary, Bin/binary>>}}
+ end;
+decode_trailer(<<?CR,?LF,?CR>> = Data, Header, Headers, MaxHeaderSize,
+ Body, BodyLength) ->
+ {?MODULE, decode_trailer, [Data, Header, Headers, MaxHeaderSize, Body,
+ BodyLength]};
+decode_trailer(<<?CR,?LF>> = Data, Header, Headers, MaxHeaderSize,
+ Body, BodyLength) ->
+ {?MODULE, decode_trailer, [Data, Header, Headers, MaxHeaderSize, Body,
+ BodyLength]};
+decode_trailer(<<?CR>> = Data, Header, Headers, MaxHeaderSize,
+ Body, BodyLength) ->
+ {?MODULE, decode_trailer, [Data, Header, Headers, MaxHeaderSize, Body,
+ BodyLength]};
+decode_trailer(<<?CR, ?LF, Rest/binary>>, Header, Headers,
+ MaxHeaderSize, Body, BodyLength) ->
+ decode_trailer(Rest, [], [lists:reverse(Header) | Headers],
+ MaxHeaderSize, Body, BodyLength);
+
+decode_trailer(<<Octet, Rest/binary>>, Header, Headers, MaxHeaderSize, Body,
+ BodyLength) ->
+ decode_trailer(Rest, [Octet | Header], Headers, MaxHeaderSize,
+ Body, BodyLength).
+
+stream(BodyPart, false) ->
+ {BodyPart, false};
+stream(BodyPart, {Code, Request}) ->
+ {NewBody, NewRequest} = httpc_handler:stream(BodyPart, Request, Code),
+ {NewBody, {Code, NewRequest}}.
diff --git a/src/couch_inets/http_cookie.erl b/src/couch_inets/http_cookie.erl
new file mode 100644
index 00000000..a8e68651
--- /dev/null
+++ b/src/couch_inets/http_cookie.erl
@@ -0,0 +1,389 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%% Description: Cookie handling according to RFC 2109
+
+-module(http_cookie).
+
+-include("httpc_internal.hrl").
+
+-export([header/4, cookies/3, open_cookie_db/1, close_cookie_db/1, insert/2]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+header(Scheme, {Host, _}, Path, CookieDb) ->
+ case lookup_cookies(Host, Path, CookieDb) of
+ [] ->
+ {"cookie", ""};
+ Cookies ->
+ {"cookie", cookies_to_string(Scheme, Cookies)}
+ end.
+
+cookies(Headers, RequestPath, RequestHost) ->
+ Cookies = parse_set_cookies(Headers, {RequestPath, RequestHost}),
+ accept_cookies(Cookies, RequestPath, RequestHost).
+
+open_cookie_db({{_, only_session_cookies}, SessionDbName}) ->
+ EtsDb = ets:new(SessionDbName, [protected, bag,
+ {keypos, #http_cookie.domain}]),
+ {undefined, EtsDb};
+
+open_cookie_db({{DbName, Dbdir}, SessionDbName}) ->
+ File = filename:join(Dbdir, atom_to_list(DbName)),
+ {ok, DetsDb} = dets:open_file(DbName, [{keypos, #http_cookie.domain},
+ {type, bag},
+ {file, File},
+ {ram_file, true}]),
+ EtsDb = ets:new(SessionDbName, [protected, bag,
+ {keypos, #http_cookie.domain}]),
+ {DetsDb, EtsDb}.
+
+close_cookie_db({undefined, EtsDb}) ->
+ ets:delete(EtsDb);
+
+close_cookie_db({DetsDb, EtsDb}) ->
+ dets:close(DetsDb),
+ ets:delete(EtsDb).
+
+%% If no persistent cookie database is defined we
+%% treat all cookies as if they where session cookies.
+insert(Cookie = #http_cookie{max_age = Int},
+ Dbs = {undefined, _}) when is_integer(Int) ->
+ insert(Cookie#http_cookie{max_age = session}, Dbs);
+
+insert(Cookie = #http_cookie{domain = Key, name = Name,
+ path = Path, max_age = session},
+ Db = {_, CookieDb}) ->
+ case ets:match_object(CookieDb, #http_cookie{domain = Key,
+ name = Name,
+ path = Path,
+ _ = '_'}) of
+ [] ->
+ ets:insert(CookieDb, Cookie);
+ [NewCookie] ->
+ delete(NewCookie, Db),
+ ets:insert(CookieDb, Cookie)
+ end,
+ ok;
+insert(#http_cookie{domain = Key, name = Name,
+ path = Path, max_age = 0},
+ Db = {CookieDb, _}) ->
+ case dets:match_object(CookieDb, #http_cookie{domain = Key,
+ name = Name,
+ path = Path,
+ _ = '_'}) of
+ [] ->
+ ok;
+ [NewCookie] ->
+ delete(NewCookie, Db)
+ end,
+ ok;
+insert(Cookie = #http_cookie{domain = Key, name = Name, path = Path},
+ Db = {CookieDb, _}) ->
+ case dets:match_object(CookieDb, #http_cookie{domain = Key,
+ name = Name,
+ path = Path,
+ _ = '_'}) of
+ [] ->
+ dets:insert(CookieDb, Cookie);
+ [NewCookie] ->
+ delete(NewCookie, Db),
+ dets:insert(CookieDb, Cookie)
+ end,
+ ok.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+lookup_cookies(Key, {undefined, Ets}) ->
+ ets:match_object(Ets, #http_cookie{domain = Key,
+ _ = '_'});
+lookup_cookies(Key, {Dets,Ets}) ->
+ SessionCookies = ets:match_object(Ets, #http_cookie{domain = Key,
+ _ = '_'}),
+ Cookies = dets:match_object(Dets, #http_cookie{domain = Key,
+ _ = '_'}),
+ Cookies ++ SessionCookies.
+
+delete(Cookie = #http_cookie{max_age = session}, {_, CookieDb}) ->
+ ets:delete_object(CookieDb, Cookie);
+delete(Cookie, {CookieDb, _}) ->
+ dets:delete_object(CookieDb, Cookie).
+
+lookup_cookies(Host, Path, Db) ->
+ Cookies =
+ case http_util:is_hostname(Host) of
+ true ->
+ HostCookies = lookup_cookies(Host, Db),
+ [_| DomainParts] = string:tokens(Host, "."),
+ lookup_domain_cookies(DomainParts, Db, HostCookies);
+ false -> % IP-adress
+ lookup_cookies(Host, Db)
+ end,
+ ValidCookies = valid_cookies(Cookies, [], Db),
+ lists:filter(fun(Cookie) ->
+ lists:prefix(Cookie#http_cookie.path, Path)
+ end, ValidCookies).
+
+%% For instance if Host=localhost
+lookup_domain_cookies([], _, AccCookies) ->
+ lists:flatten(AccCookies);
+%% Top domains can not have cookies
+lookup_domain_cookies([_], _, AccCookies) ->
+ lists:flatten(AccCookies);
+lookup_domain_cookies([Next | DomainParts], CookieDb, AccCookies) ->
+ Domain = merge_domain_parts(DomainParts, [Next ++ "."]),
+ lookup_domain_cookies(DomainParts, CookieDb,
+ [lookup_cookies(Domain, CookieDb)
+ | AccCookies]).
+
+merge_domain_parts([Part], Merged) ->
+ lists:flatten(["." | lists:reverse([Part | Merged])]);
+merge_domain_parts([Part| Rest], Merged) ->
+ merge_domain_parts(Rest, [".", Part | Merged]).
+
+cookies_to_string(Scheme, Cookies = [Cookie | _]) ->
+ Version = "$Version=" ++ Cookie#http_cookie.version ++ "; ",
+ cookies_to_string(Scheme, path_sort(Cookies), [Version]).
+
+cookies_to_string(_, [], CookieStrs) ->
+ case length(CookieStrs) of
+ 1 ->
+ "";
+ _ ->
+ lists:flatten(lists:reverse(CookieStrs))
+ end;
+
+cookies_to_string(https, [Cookie = #http_cookie{secure = true}| Cookies],
+ CookieStrs) ->
+ Str = case Cookies of
+ [] ->
+ cookie_to_string(Cookie);
+ _ ->
+ cookie_to_string(Cookie) ++ "; "
+ end,
+ cookies_to_string(https, Cookies, [Str | CookieStrs]);
+
+cookies_to_string(Scheme, [#http_cookie{secure = true}| Cookies],
+ CookieStrs) ->
+ cookies_to_string(Scheme, Cookies, CookieStrs);
+
+cookies_to_string(Scheme, [Cookie | Cookies], CookieStrs) ->
+ Str = case Cookies of
+ [] ->
+ cookie_to_string(Cookie);
+ _ ->
+ cookie_to_string(Cookie) ++ "; "
+ end,
+ cookies_to_string(Scheme, Cookies, [Str | CookieStrs]).
+
+cookie_to_string(Cookie = #http_cookie{name = Name, value = Value}) ->
+ Str = Name ++ "=" ++ Value,
+ add_domain(add_path(Str, Cookie), Cookie).
+
+add_path(Str, #http_cookie{path_default = true}) ->
+ Str;
+add_path(Str, #http_cookie{path = Path}) ->
+ Str ++ "; $Path=" ++ Path.
+
+add_domain(Str, #http_cookie{domain_default = true}) ->
+ Str;
+add_domain(Str, #http_cookie{domain = Domain}) ->
+ Str ++ "; $Domain=" ++ Domain.
+
+parse_set_cookies(OtherHeaders, DefaultPathDomain) ->
+ SetCookieHeaders = lists:foldl(fun({"set-cookie", Value}, Acc) ->
+ [string:tokens(Value, ",")| Acc];
+ (_, Acc) ->
+ Acc
+ end, [], OtherHeaders),
+
+ lists:flatten(lists:map(fun(CookieHeader) ->
+ NewHeader =
+ fix_netscape_cookie(CookieHeader,
+ []),
+ parse_set_cookie(NewHeader, [],
+ DefaultPathDomain) end,
+ SetCookieHeaders)).
+
+parse_set_cookie([], AccCookies, _) ->
+ AccCookies;
+parse_set_cookie([CookieHeader | CookieHeaders], AccCookies,
+ Defaults = {DefaultPath, DefaultDomain}) ->
+ [CookieStr | Attributes] = case string:tokens(CookieHeader, ";") of
+ [CStr] ->
+ [CStr, ""];
+ [CStr | Attr] ->
+ [CStr, Attr]
+ end,
+ Pos = string:chr(CookieStr, $=),
+ Name = string:substr(CookieStr, 1, Pos - 1),
+ Value = string:substr(CookieStr, Pos + 1),
+ Cookie = #http_cookie{name = string:strip(Name),
+ value = string:strip(Value)},
+ NewAttributes = parse_set_cookie_attributes(Attributes),
+ TmpCookie = cookie_attributes(NewAttributes, Cookie),
+ %% Add runtime defult values if necessary
+ NewCookie = domain_default(path_default(TmpCookie, DefaultPath),
+ DefaultDomain),
+ parse_set_cookie(CookieHeaders, [NewCookie | AccCookies], Defaults).
+
+parse_set_cookie_attributes([]) ->
+ [];
+parse_set_cookie_attributes([Attributes]) ->
+ lists:map(fun(Attr) ->
+ [AttrName, AttrValue] =
+ case string:tokens(Attr, "=") of
+ %% All attributes have the form
+ %% Name=Value except "secure"!
+ [Name] ->
+ [Name, ""];
+ [Name, Value] ->
+ [Name, Value];
+ %% Anything not expected will be
+ %% disregarded
+ _ ->
+ ["Dummy",""]
+ end,
+ {http_util:to_lower(string:strip(AttrName)),
+ string:strip(AttrValue)}
+ end, Attributes).
+
+cookie_attributes([], Cookie) ->
+ Cookie;
+cookie_attributes([{"comment", Value}| Attributes], Cookie) ->
+ cookie_attributes(Attributes,
+ Cookie#http_cookie{comment = Value});
+cookie_attributes([{"domain", Value}| Attributes], Cookie) ->
+ cookie_attributes(Attributes,
+ Cookie#http_cookie{domain = Value});
+cookie_attributes([{"max-age", Value}| Attributes], Cookie) ->
+ ExpireTime = cookie_expires(list_to_integer(Value)),
+ cookie_attributes(Attributes,
+ Cookie#http_cookie{max_age = ExpireTime});
+%% Backwards compatibility with netscape cookies
+cookie_attributes([{"expires", Value}| Attributes], Cookie) ->
+ Time = http_util:convert_netscapecookie_date(Value),
+ ExpireTime = calendar:datetime_to_gregorian_seconds(Time),
+ cookie_attributes(Attributes,
+ Cookie#http_cookie{max_age = ExpireTime});
+cookie_attributes([{"path", Value}| Attributes], Cookie) ->
+ cookie_attributes(Attributes,
+ Cookie#http_cookie{path = Value});
+cookie_attributes([{"secure", _}| Attributes], Cookie) ->
+ cookie_attributes(Attributes,
+ Cookie#http_cookie{secure = true});
+cookie_attributes([{"version", Value}| Attributes], Cookie) ->
+ cookie_attributes(Attributes,
+ Cookie#http_cookie{version = Value});
+%% Disregard unknown attributes.
+cookie_attributes([_| Attributes], Cookie) ->
+ cookie_attributes(Attributes, Cookie).
+
+domain_default(Cookie = #http_cookie{domain = undefined},
+ DefaultDomain) ->
+ Cookie#http_cookie{domain = DefaultDomain, domain_default = true};
+domain_default(Cookie, _) ->
+ Cookie.
+
+path_default(Cookie = #http_cookie{path = undefined},
+ DefaultPath) ->
+ Cookie#http_cookie{path = skip_right_most_slash(DefaultPath),
+ path_default = true};
+path_default(Cookie, _) ->
+ Cookie.
+
+%% Note: if the path is only / that / will be keept
+skip_right_most_slash("/") ->
+ "/";
+skip_right_most_slash(Str) ->
+ string:strip(Str, right, $/).
+
+accept_cookies(Cookies, RequestPath, RequestHost) ->
+ lists:filter(fun(Cookie) ->
+ accept_cookie(Cookie, RequestPath, RequestHost)
+ end, Cookies).
+
+accept_cookie(Cookie, RequestPath, RequestHost) ->
+ accept_path(Cookie, RequestPath) and accept_domain(Cookie, RequestHost).
+
+accept_path(#http_cookie{path = Path}, RequestPath) ->
+ lists:prefix(Path, RequestPath).
+
+accept_domain(#http_cookie{domain = RequestHost}, RequestHost) ->
+ true;
+
+accept_domain(#http_cookie{domain = Domain}, RequestHost) ->
+ HostCheck = case http_util:is_hostname(RequestHost) of
+ true ->
+ (lists:suffix(Domain, RequestHost) andalso
+ (not
+ lists:member($.,
+ string:substr(RequestHost, 1,
+ (length(RequestHost) -
+ length(Domain))))));
+ false ->
+ false
+ end,
+ HostCheck andalso (hd(Domain) == $.)
+ andalso (length(string:tokens(Domain, ".")) > 1).
+
+cookie_expires(0) ->
+ 0;
+cookie_expires(DeltaSec) ->
+ NowSec = calendar:datetime_to_gregorian_seconds({date(), time()}),
+ NowSec + DeltaSec.
+
+is_cookie_expired(#http_cookie{max_age = session}) ->
+ false;
+is_cookie_expired(#http_cookie{max_age = ExpireTime}) ->
+ NowSec = calendar:datetime_to_gregorian_seconds({date(), time()}),
+ ExpireTime - NowSec =< 0.
+
+valid_cookies([], Valid, _) ->
+ Valid;
+
+valid_cookies([Cookie | Cookies], Valid, Db) ->
+ case is_cookie_expired(Cookie) of
+ true ->
+ delete(Cookie, Db),
+ valid_cookies(Cookies, Valid, Db);
+ false ->
+ valid_cookies(Cookies, [Cookie | Valid], Db)
+ end.
+
+path_sort(Cookies)->
+ lists:reverse(lists:keysort(#http_cookie.path, Cookies)).
+
+
+%% Informally, the Set-Cookie response header comprises the token
+%% Set-Cookie:, followed by a comma-separated list of one or more
+%% cookies. Netscape cookies expires attribute may also have a
+%% , in this case the header list will have been incorrectly split
+%% in parse_set_cookies/2 this functions fixs that problem.
+fix_netscape_cookie([Cookie1, Cookie2 | Rest], Acc) ->
+ case regexp:match(Cookie1, "expires=") of
+ {_, _, _} ->
+ fix_netscape_cookie(Rest, [Cookie1 ++ Cookie2 | Acc]);
+ nomatch ->
+ fix_netscape_cookie([Cookie2 |Rest], [Cookie1| Acc])
+ end;
+fix_netscape_cookie([Cookie | Rest], Acc) ->
+ fix_netscape_cookie(Rest, [Cookie | Acc]);
+
+fix_netscape_cookie([], Acc) ->
+ Acc.
diff --git a/src/couch_inets/http_internal.hrl b/src/couch_inets/http_internal.hrl
new file mode 100644
index 00000000..2dda55a2
--- /dev/null
+++ b/src/couch_inets/http_internal.hrl
@@ -0,0 +1,105 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+
+-include("inets_internal.hrl").
+
+-define(HTTP_MAX_BODY_SIZE, nolimit).
+-define(HTTP_MAX_HEADER_SIZE, 10240).
+
+%%% Response headers
+-record(http_response_h,{
+%%% --- Standard "General" headers
+ 'cache-control',
+ connection,
+ date,
+ pragma,
+ trailer,
+ 'transfer-encoding',
+ upgrade,
+ via,
+ warning,
+%%% --- Standard "Response" headers
+ 'accept-ranges',
+ age,
+ etag,
+ location,
+ 'proxy-authenticate',
+ 'retry-after',
+ server,
+ vary,
+ 'www-authenticate',
+%%% --- Standard "Entity" headers
+ allow,
+ 'content-encoding',
+ 'content-language',
+ 'content-length' = "-1",
+ 'content-location',
+ 'content-md5',
+ 'content-range',
+ 'content-type',
+ expires,
+ 'last-modified',
+ other=[] % list() - Key/Value list with other headers
+ }).
+
+
+%%% Request headers
+-record(http_request_h,{
+%%% --- Standard "General" headers
+ 'cache-control',
+ connection = "keep-alive",
+ date,
+ pragma,
+ trailer,
+ 'transfer-encoding',
+ upgrade,
+ via,
+ warning,
+%%% --- Standard "Request" headers
+ accept,
+ 'accept-charset',
+ 'accept-encoding',
+ 'accept-language',
+ authorization,
+ expect,
+ from,
+ host,
+ 'if-match',
+ 'if-modified-since',
+ 'if-none-match',
+ 'if-range',
+ 'if-unmodified-since',
+ 'max-forwards',
+ 'proxy-authorization',
+ range,
+ referer,
+ te,
+ 'user-agent',
+%%% --- Standard "Entity" headers
+ allow,
+ 'content-encoding',
+ 'content-language',
+ 'content-length' = "0",
+ 'content-location',
+ 'content-md5',
+ 'content-range',
+ 'content-type',
+ expires,
+ 'last-modified',
+ other=[] % list() - Key/Value list with other headers
+ }).
diff --git a/src/couch_inets/http_request.erl b/src/couch_inets/http_request.erl
new file mode 100644
index 00000000..138dc338
--- /dev/null
+++ b/src/couch_inets/http_request.erl
@@ -0,0 +1,278 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+
+-module(http_request).
+
+-include("http_internal.hrl").
+
+-export([headers/2, http_headers/1, is_absolut_uri/1]).
+
+%%-------------------------------------------------------------------------
+%% headers(HeaderList, #http_request_h{}) -> #http_request_h{}
+%% HeaderList - ["HeaderField:Value"]
+%% HeaderField - string()
+%% Value - string()
+%%
+%% Description: Creates a http_request_h-record used internally to
+%% handle http-headers.
+%%-------------------------------------------------------------------------
+headers([], Headers) ->
+ Headers;
+headers([Header | Tail], Headers) ->
+ case lists:splitwith(fun($:) -> false; (_) -> true end, Header) of
+ {Key, [$: | Value]} ->
+ headers(Tail, headers(http_util:to_lower(string:strip(Key)),
+ string:strip(Value), Headers));
+ {_, []} ->
+ error_logger:error_report("Ignored invalid HTTP-header: ~p~n",
+ [Header]),
+ headers(Tail, Headers)
+ end.
+
+%%-------------------------------------------------------------------------
+%% headers(#http_request_h{}) -> HeaderList
+%% HeaderList - ["HeaderField:Value"]
+%% HeaderField - string()
+%% Value - string()
+%%
+%% Description: Creates a HTTP header string.
+%%-------------------------------------------------------------------------
+http_headers(Headers = #http_request_h{other = Other}) ->
+ HeaderFields = record_info(fields, http_request_h) -- [other],
+ HeaderStr = lists:foldl(fun(Key, Acc) ->
+ case key_value_str(Key, Headers) of
+ undefined ->
+ Acc;
+ Str ->
+ [Str | Acc]
+ end
+ end,
+ [], HeaderFields),
+
+ lists:flatten([HeaderStr | headers_other(Other, [])]).
+
+%%-------------------------------------------------------------------------
+%% is_absolut_uri(URI) -> true | false
+%% URI - string()
+%%
+%% Description: Checks if an URI is absolute or relative
+%%-------------------------------------------------------------------------
+is_absolut_uri("http://" ++ _) ->
+ true;
+is_absolut_uri("https://" ++ _) ->
+ true;
+is_absolut_uri(_) ->
+ false.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+
+%%% --- Request headers
+headers("accept", Value, Headers) ->
+ Headers#http_request_h{accept = Value};
+headers("accept-charset", Value, Headers) ->
+ Headers#http_request_h{'accept-charset' = Value};
+headers("accept-encoding", Value, Headers) ->
+ Headers#http_request_h{'accept-encoding' = Value};
+headers("accept-language", Value, Headers) ->
+ Headers#http_request_h{'accept-language' = Value};
+headers("authorization", Value, Headers) ->
+ Headers#http_request_h{authorization = Value};
+headers("expect", Value, Headers) ->
+ Headers#http_request_h{expect = Value};
+headers("from", Value, Headers) ->
+ Headers#http_request_h{from = Value};
+headers("host", Value, Headers) ->
+ Headers#http_request_h{host = Value};
+headers("if-match", Value, Headers) ->
+ Headers#http_request_h{'if-match' = Value};
+headers("if-modified-since", Value, Headers) ->
+ Headers#http_request_h{'if-modified-since' = Value};
+headers("if-none-match", Value, Headers) ->
+ Headers#http_request_h{'if-none-match' = Value};
+headers("if-range", Value, Headers) ->
+ Headers#http_request_h{'if-range' = Value};
+headers("if-unmodified-since", Value, Headers) ->
+ Headers#http_request_h{'if-unmodified-since' = Value};
+headers("max-forwards", Value, Headers) ->
+ Headers#http_request_h{'max-forwards' = Value};
+headers("proxy-authorization", Value, Headers) ->
+ Headers#http_request_h{'proxy-authorization' = Value};
+headers("range", Value, Headers) ->
+ Headers#http_request_h{range = Value};
+headers("referer", Value, Headers) ->
+ Headers#http_request_h{referer = Value};
+headers("te", Value, Headers) ->
+ Headers#http_request_h{te = Value};
+headers("user-agent", Value, Headers) ->
+ Headers#http_request_h{'user-agent' = Value};
+
+%% General-Headers
+headers("cache-control", Value, Headers) ->
+ Headers#http_request_h{'cache-control' = Value};
+headers("connection", Value, Headers) ->
+ Headers#http_request_h{connection = Value};
+headers("date", Value, Headers) ->
+ Headers#http_request_h{date = Value};
+headers("pragma", Value, Headers) ->
+ Headers#http_request_h{pragma = Value};
+headers("trailer", Value, Headers) ->
+ Headers#http_request_h{trailer = Value};
+headers("transfer-encoding", Value, Headers) ->
+ Headers#http_request_h{'transfer-encoding' = Value};
+headers("upgrade", Value, Headers) ->
+ Headers#http_request_h{upgrade = Value};
+headers("via", Value, Headers) ->
+ Headers#http_request_h{via = Value};
+headers("warning", Value, Headers) ->
+ Headers#http_request_h{warning = Value};
+
+%% Entity header
+headers("allow", Value, Headers) ->
+ Headers#http_request_h{allow = Value};
+headers("content-encoding", Value, Headers) ->
+ Headers#http_request_h{'content-encoding' = Value};
+headers("content-language", Value, Headers) ->
+ Headers#http_request_h{'content-language' = Value};
+headers("content-length", Value, Headers) ->
+ Headers#http_request_h{'content-length' = Value};
+headers("content-location", Value, Headers) ->
+ Headers#http_request_h{'content-location' = Value};
+headers("content-md5", Value, Headers) ->
+ Headers#http_request_h{'content-md5' = Value};
+headers("content-range", Value, Headers) ->
+ Headers#http_request_h{'content-range' = Value};
+headers("content-type", Value, Headers) ->
+ Headers#http_request_h{'content-type' = Value};
+headers("expires", Value, Headers) ->
+ Headers#http_request_h{expires = Value};
+headers("last-modified", Value, Headers) ->
+ Headers#http_request_h{'last-modified' = Value};
+headers(Key, Value, Headers) ->
+ Headers#http_request_h{other=
+ [{Key, Value} | Headers#http_request_h.other]}.
+
+key_value_str(Key = 'cache-control', Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.'cache-control');
+key_value_str(Key = connection, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.connection);
+key_value_str(Key = date, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.date);
+key_value_str(Key = pragma, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.pragma);
+key_value_str(Key = trailer, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.trailer);
+key_value_str(Key = 'transfer-encoding', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'transfer-encoding');
+key_value_str(Key = upgrade, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.upgrade);
+key_value_str(Key = via, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.via);
+key_value_str(Key = warning, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.warning);
+key_value_str(Key = accept, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.accept);
+key_value_str(Key = 'accept-charset', Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.'accept-charset');
+key_value_str(Key = 'accept-encoding', Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.'accept-encoding');
+key_value_str(Key = 'accept-language', Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.'accept-language');
+key_value_str(Key = authorization, Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.authorization);
+key_value_str(Key = expect, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.expect);
+key_value_str(Key = from, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.from);
+key_value_str(Key = host, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.host);
+key_value_str(Key = 'if-match', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'if-match');
+key_value_str(Key = 'if-modified-since', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'if-modified-since');
+key_value_str(Key = 'if-none-match', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'if-none-match');
+key_value_str(Key = 'if-range', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'if-range');
+key_value_str(Key = 'if-unmodified-since', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'if-unmodified-since');
+key_value_str(Key = 'max-forwards', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'max-forwards');
+key_value_str(Key = 'proxy-authorization', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'proxy-authorization');
+key_value_str(Key = range, Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.range);
+key_value_str(Key = referer, Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.referer);
+key_value_str(Key = te, Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.te);
+key_value_str(Key = 'user-agent', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'user-agent');
+key_value_str(Key = allow, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.allow);
+key_value_str(Key = 'content-encoding', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'content-encoding');
+key_value_str(Key = 'content-language', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'content-language');
+key_value_str(Key = 'content-length', Headers) ->
+ case Headers#http_request_h.'content-length' of
+ "0" ->
+ undefined;
+ _ ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'content-length')
+ end;
+key_value_str(Key = 'content-location', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'content-location');
+key_value_str(Key = 'content-md5', Headers) ->
+ key_value_str(atom_to_list(Key),
+ Headers#http_request_h.'content-md5');
+key_value_str(Key = 'content-range', Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.'content-range');
+key_value_str(Key = 'content-type', Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.'content-type');
+key_value_str(Key = expires, Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.expires);
+key_value_str(Key = 'last-modified', Headers) ->
+ key_value_str(atom_to_list(Key), Headers#http_request_h.'last-modified');
+key_value_str(_, undefined) ->
+ undefined;
+key_value_str(Key, Value) ->
+ Key ++ ": " ++ Value ++ ?CRLF.
+
+headers_other([], Headers) ->
+ Headers;
+headers_other([{Key,Value} | Rest], Headers) ->
+ Header = Key ++ ": " ++ Value ++ ?CRLF,
+ headers_other(Rest, [Header | Headers]).
diff --git a/src/couch_inets/http_response.erl b/src/couch_inets/http_response.erl
new file mode 100644
index 00000000..27e807eb
--- /dev/null
+++ b/src/couch_inets/http_response.erl
@@ -0,0 +1,206 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+
+-module(http_response).
+
+-include("http_internal.hrl").
+
+-export([headers/2, header_list/1]).
+
+%%-------------------------------------------------------------------------
+%% headers(HeaderList, #http_response_h{}) -> #http_response_h{}
+%% HeaderList - ["HeaderField:Value"]
+%% HeaderField - string()
+%% Value - string()
+%%
+%% Description: Creates a http_response_h-record used internally to
+%% handle http-headers.
+%%-------------------------------------------------------------------------
+headers([], Headers) ->
+ Headers;
+
+headers([Header | Tail], Headers) ->
+ {Key, [$: | Value]} =
+ lists:splitwith(fun($:) -> false; (_) -> true end, Header),
+ headers(Tail, headers(http_util:to_lower(string:strip(Key)),
+ string:strip(Value), Headers)).
+
+%%-------------------------------------------------------------------------
+%% headers(#http_response_h{}) -> HeaderList
+%% HeaderList - [{"HeaderField", Value"}]
+%% HeaderField - string()
+%% Value - string()
+%%
+%% Description: Creates a list of key value tuples from the #http_response_h
+%% record, to be returned to the application programmer. We
+%% do not wish to make the application programmer dependent on
+%% our records.
+%%-------------------------------------------------------------------------
+header_list(Headers) ->
+ HeaderFields = record_info(fields, http_response_h) -- [other],
+ HeaderList = lists:foldl(fun(Key, Acc) ->
+ case key_value_tuple(Key, Headers) of
+ undefined ->
+ Acc;
+ Tuple ->
+ [Tuple | Acc]
+ end
+ end,
+ [], HeaderFields),
+ lists:reverse(HeaderList) ++ Headers#http_response_h.other.
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+headers("cache-control", Value, Headers) ->
+ Headers#http_response_h{'cache-control'= Value};
+headers("connection", Value, Headers) ->
+ Headers#http_response_h{connection = Value};
+headers("date", Value, Headers) ->
+ Headers#http_response_h{date = Value};
+headers("pragma", Value, Headers) ->
+ Headers#http_response_h{pragma = Value};
+headers("trailer", Value, Headers) ->
+ Headers#http_response_h{trailer = Value};
+headers("transfer-encoding", Value, Headers) ->
+ Headers#http_response_h{'transfer-encoding' = Value};
+headers("upgrade", Value, Headers) ->
+ Headers#http_response_h{upgrade = Value};
+headers("via", Value, Headers) ->
+ Headers#http_response_h{via = Value};
+headers("warning", Value, Headers) ->
+ Headers#http_response_h{warning = Value};
+headers("accept-ranges", Value, Headers) ->
+ Headers#http_response_h{'accept-ranges' = Value};
+headers("age", Value, Headers) ->
+ Headers#http_response_h{age = Value};
+headers("etag", Value, Headers) ->
+ Headers#http_response_h{etag = Value};
+headers("location", Value, Headers) ->
+ Headers#http_response_h{location = Value};
+headers("proxy-authenticate", Value, Headers) ->
+ Headers#http_response_h{'proxy-authenticate' = Value};
+headers("retry-after", Value, Headers) ->
+ Headers#http_response_h{'retry-after' = Value};
+headers("server", Value, Headers) ->
+ Headers#http_response_h{server = Value};
+headers("vary", Value, Headers) ->
+ Headers#http_response_h{vary = Value};
+headers("www-authenticate", Value, Headers) ->
+ Headers#http_response_h{'www-authenticate' = Value};
+headers("allow", Value, Headers) ->
+ Headers#http_response_h{allow = Value};
+headers("content-encoding", Value, Headers) ->
+ Headers#http_response_h{'content-encoding' = Value};
+headers("content-language", Value, Headers) ->
+ Headers#http_response_h{'content-language' = Value};
+headers("content-length", Value, Headers) ->
+ Headers#http_response_h{'content-length' = Value};
+headers("content-location", Value, Headers) ->
+ Headers#http_response_h{'content-location' = Value};
+headers("content-md5", Value, Headers) ->
+ Headers#http_response_h{'content-md5' = Value};
+headers("content-range", Value, Headers) ->
+ Headers#http_response_h{'content-range' = Value};
+headers("content-type", Value, Headers) ->
+ Headers#http_response_h{'content-type' = Value};
+headers("expires", Value, Headers) ->
+ Headers#http_response_h{expires = Value};
+headers("last-modified", Value, Headers) ->
+ Headers#http_response_h{'last-modified' = Value};
+headers(Key, Value, Headers) ->
+ Headers#http_response_h{other=
+ [{Key, Value} | Headers#http_response_h.other]}.
+
+
+key_value_tuple(Key = 'cache-control', Headers) ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'cache-control');
+key_value_tuple(Key = connection, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.connection);
+key_value_tuple(Key = date, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.date);
+key_value_tuple(Key = pragma, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.pragma);
+key_value_tuple(Key = trailer, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.trailer);
+key_value_tuple(Key ='transfer-encoding', Headers) ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'transfer-encoding');
+key_value_tuple(Key = upgrade, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.upgrade);
+key_value_tuple(Key = via, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.via);
+key_value_tuple(Key = warning, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.warning);
+key_value_tuple(Key = 'accept-ranges', Headers) ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'accept-ranges');
+key_value_tuple(Key = age, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.age);
+key_value_tuple(Key = etag, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.etag);
+key_value_tuple(Key = location, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.location);
+key_value_tuple(Key = 'proxy-authenticate', Headers) ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'proxy-authenticate');
+key_value_tuple(Key = 'retry-after', Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.'retry-after');
+key_value_tuple(Key = server, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.server);
+key_value_tuple(Key = vary, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.vary);
+key_value_tuple(Key = 'www-authenticate', Headers) ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'www-authenticate');
+key_value_tuple(Key = allow, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.allow);
+key_value_tuple(Key = 'content-encoding', Headers) ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'content-encoding');
+key_value_tuple(Key = 'content-language', Headers) ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'content-language');
+key_value_tuple(Key = 'content-length', Headers) ->
+ case Headers#http_response_h.'content-length' of
+ "-1" ->
+ undefined;
+ _ ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'content-length')
+ end;
+key_value_tuple(Key = 'content-location', Headers) ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'content-location');
+key_value_tuple(Key = 'content-md5', Headers) ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'content-md5');
+key_value_tuple(Key = 'content-range', Headers) ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'content-range');
+key_value_tuple(Key = 'content-type', Headers) ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'content-type');
+key_value_tuple(Key = expires, Headers) ->
+ key_value_tuple(atom_to_list(Key), Headers#http_response_h.expires);
+key_value_tuple(Key = 'last-modified', Headers) ->
+ key_value_tuple(atom_to_list(Key),
+ Headers#http_response_h.'last-modified');
+key_value_tuple(_, undefined) ->
+ undefined;
+key_value_tuple(Key, Value) ->
+ {Key, Value}.
diff --git a/src/couch_inets/http_transport.erl b/src/couch_inets/http_transport.erl
new file mode 100644
index 00000000..57787ef3
--- /dev/null
+++ b/src/couch_inets/http_transport.erl
@@ -0,0 +1,291 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%
+-module(http_transport).
+
+% Internal application API
+-export([start/1, connect/3, connect/4, listen/2, listen/3,
+ accept/2, accept/3, close/2,
+ send/3, controlling_process/3, setopts/3,
+ peername/2, resolve/0]).
+
+%%%=========================================================================
+%%% Internal application API
+%%%=========================================================================
+
+%%-------------------------------------------------------------------------
+%% start(SocketType) -> ok | {error, Reason}
+%% SocketType = ip_comm | {ssl, _}
+%%
+%% Description: Makes sure inet_db or ssl is started.
+%%-------------------------------------------------------------------------
+start(ip_comm) ->
+ case inet_db:start() of
+ {ok, _} ->
+ ok;
+ {error, {already_started, _}} ->
+ ok;
+ Error ->
+ Error
+ end;
+start({ssl, _}) ->
+ case ssl:start() of
+ ok ->
+ ok;
+ {ok, _} ->
+ ok;
+ {error, {already_started,_}} ->
+ ok;
+ Error ->
+ Error
+ end.
+
+%%-------------------------------------------------------------------------
+%% connect(SocketType, Address, IPV6, Timeout) ->
+%% {ok, Socket} | {error, Reason}
+%% SocketType = ip_comm | {ssl, SslConfig}
+%% Address = {Host, Port}
+%% IPV6 = disabled | enabled
+%% Socket = socket()
+%%
+%% Description: Connects to the Host and Port specified in HTTPRequest.
+%% uses ipv6 if possible.
+%%-------------------------------------------------------------------------
+connect(SocketType, Address, IPV6) ->
+ connect(SocketType, Address, IPV6, infinity).
+
+connect(ip_comm, {Host, Port}, enabled, Timeout) ->
+ {Opts, NewHost} =
+ case inet:getaddr(Host, inet6) of
+ {ok, IPAddr = {0, 0, 0, 0, 0, 16#ffff, _, _}} ->
+ case inet:getaddr(Host, inet) of
+ {ok,NewIP} ->
+ {[binary, {packet, 0}, {active, false},
+ {reuseaddr,true}], NewIP};
+ _Error ->
+ {[binary, {packet, 0}, {active, false},
+ {reuseaddr,true}, inet6], IPAddr}
+ end;
+ {ok, IPAddr} ->
+ {[binary, {packet, 0}, {active, false},
+ {reuseaddr,true}, inet6], IPAddr};
+ _ ->
+ {[binary, {packet, 0}, {active, false},
+ {reuseaddr,true}], Host}
+ end,
+ gen_tcp:connect(NewHost, Port, Opts, Timeout);
+
+connect(ip_comm, {Host, Port}, disabled, Timeout) ->
+ Opts = [binary, {packet, 0}, {active, false}, {reuseaddr,true}],
+ gen_tcp:connect(Host, Port, Opts, Timeout);
+
+connect({ssl, SslConfig}, {Host, Port}, _, Timeout) ->
+ Opts = [binary, {active, false}] ++ SslConfig,
+ ssl:connect(Host, Port, Opts, Timeout).
+
+%%-------------------------------------------------------------------------
+%% listen(SocketType, Port) -> {ok, Socket} | {error, Reason}
+%% SocketType = ip_comm | {ssl, SSLConfig}
+%% Port = integer()
+%% Socket = socket()
+%%
+%% Description: Sets up socket to listen on the port Port on the local
+%% host using either gen_tcp or ssl. In the gen_tcp case the port
+%% might allready have been initiated by a wrapper-program and is
+%% given as an Fd that can be retrieved by init:get_argument. The
+%% reason for this to enable a HTTP-server not runnig as root to use
+%% port 80.
+%%-------------------------------------------------------------------------
+listen(SocketType, Port) ->
+ listen(SocketType, undefined, Port).
+
+listen(ip_comm, Addr, Port) ->
+ FdName = list_to_atom("httpd_" ++ integer_to_list(Port)),
+ {NewPort, Opt} =
+ case init:get_argument(FdName) of
+ {ok, [[FdStr]]} ->
+ Fd = list_to_integer(FdStr),
+ {0,
+ sock_opt(ip_comm, Addr, [{backlog, 128},
+ {reuseaddr,true}, {fd,Fd}, {nodelay, true}])};
+ error ->
+ {Port,
+ sock_opt(ip_comm, Addr,
+ [{backlog, 128}, {reuseaddr, true}, {nodelay, true}])}
+ end,
+ gen_tcp:listen(NewPort, Opt);
+
+listen({ssl, SSLConfig} = Ssl, Addr, Port) ->
+ Opt = sock_opt(Ssl, Addr, SSLConfig),
+ ssl:listen(Port, Opt).
+
+%%-------------------------------------------------------------------------
+%% accept(SocketType, ListenSocket) -> {ok, Socket} | {error, Reason}
+%% accept(SocketType, ListenSocket, Timeout) -> ok | {error, Reason}
+%% SocketType = ip_comm | {ssl, SSLConfig}
+%% ListenSocket = socket()
+%% Timeout = infinity | integer() >= 0
+%% Socket = socket()
+%%
+%% Description: Accepts an incoming connection request on a listen socket,
+%% using either gen_tcp or ssl.
+%%-------------------------------------------------------------------------
+accept(SocketType, ListenSocket) ->
+ accept(SocketType, ListenSocket, infinity).
+accept(ip_comm, ListenSocket, Timeout) ->
+ gen_tcp:accept(ListenSocket, Timeout);
+accept({ssl,_SSLConfig}, ListenSocket, Timeout) ->
+ ssl:accept(ListenSocket, Timeout).
+
+%%-------------------------------------------------------------------------
+%% controlling_process(SocketType, Socket, NewOwner) -> ok | {error, Reason}
+%% SocketType = ip_comm | {ssl, _}
+%% Socket = socket()
+%% NewOwner = pid()
+%%
+%% Description: Assigns a new controlling process to Socket.
+%%-------------------------------------------------------------------------
+controlling_process(ip_comm, Socket, NewOwner) ->
+ gen_tcp:controlling_process(Socket, NewOwner);
+controlling_process({ssl, _}, Socket, NewOwner) ->
+ ssl:controlling_process(Socket, NewOwner).
+
+%%-------------------------------------------------------------------------
+%% setopts(SocketType, Socket, Options) -> ok | {error, Reason}
+%% SocketType = ip_comm | {ssl, _}
+%% Socket = socket()
+%% Options = list()
+%% Description: Sets one or more options for a socket, using either
+%% gen_tcp or ssl.
+%%-------------------------------------------------------------------------
+setopts(ip_comm, Socket, Options) ->
+ inet:setopts(Socket,Options);
+setopts({ssl, _}, Socket, Options) ->
+ ssl:setopts(Socket, Options).
+
+%%-------------------------------------------------------------------------
+%% send(RequestOrSocketType, Socket, Message) -> ok | {error, Reason}
+%% SocketType = ip_comm | {ssl, _}
+%% Socket = socket()
+%% Message = list() | binary()
+%% Description: Sends a packet on a socket, using either gen_tcp or ssl.
+%%-------------------------------------------------------------------------
+send(ip_comm, Socket, Message) ->
+ gen_tcp:send(Socket, Message);
+send({ssl, _}, Socket, Message) ->
+ ssl:send(Socket, Message).
+
+%%-------------------------------------------------------------------------
+%% close(SocketType, Socket) -> ok | {error, Reason}
+%% SocketType = ip_comm | {ssl, _}
+%% Socket = socket()
+%%
+%% Description: Closes a socket, using either gen_tcp or ssl.
+%%-------------------------------------------------------------------------
+close(ip_comm, Socket) ->
+ gen_tcp:close(Socket);
+close({ssl, _}, Socket) ->
+ ssl:close(Socket).
+
+%%-------------------------------------------------------------------------
+%% peername(SocketType, Socket) -> ok | {error, Reason}
+%% SocketType = ip_comm | {ssl, _}
+%% Socket = socket()
+%%
+%% Description: Returns the address and port for the other end of a
+%% connection, usning either gen_tcp or ssl.
+%%-------------------------------------------------------------------------
+peername(ip_comm, Socket) ->
+ case inet:peername(Socket) of
+ {ok,{{A, B, C, D}, Port}} ->
+ PeerName = integer_to_list(A)++"."++integer_to_list(B)++"."++
+ integer_to_list(C)++"."++integer_to_list(D),
+ {Port, PeerName};
+ {ok,{{A, B, C, D, E, F, G, H}, Port}} ->
+ PeerName = http_util:integer_to_hexlist(A) ++ ":"++
+ http_util:integer_to_hexlist(B) ++ ":" ++
+ http_util:integer_to_hexlist(C) ++ ":" ++
+ http_util:integer_to_hexlist(D) ++ ":" ++
+ http_util:integer_to_hexlist(E) ++ ":" ++
+ http_util:integer_to_hexlist(F) ++ ":" ++
+ http_util:integer_to_hexlist(G) ++":"++
+ http_util:integer_to_hexlist(H),
+ {Port, PeerName};
+ {error, _} ->
+ {-1, "unknown"}
+ end;
+
+peername({ssl, _}, Socket) ->
+ case ssl:peername(Socket) of
+ {ok,{{A, B, C, D}, Port}} ->
+ PeerName = integer_to_list(A)++"."++integer_to_list(B)++"."++
+ integer_to_list(C)++"."++integer_to_list(D),
+ {Port, PeerName};
+ {error, _} ->
+ {-1, "unknown"}
+ end.
+
+%%-------------------------------------------------------------------------
+%% resolve() -> HostName
+%% HostName = string()
+%%
+%% Description: Returns the local hostname.
+%%-------------------------------------------------------------------------
+resolve() ->
+ {ok, Name} = inet:gethostname(),
+ Name.
+
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+
+%% Address any comes from directive: BindAddress "*"
+sock_opt(ip_comm, any = Addr, Opt) ->
+ sock_opt1([{ip, Addr} | Opt]);
+sock_opt(ip_comm, undefined, Opt) ->
+ sock_opt1(Opt);
+sock_opt(_, any = Addr, Opt) ->
+ sock_opt2([{ip, Addr} | Opt]);
+sock_opt(_, undefined, Opt) ->
+ sock_opt2(Opt);
+sock_opt(_, Addr, Opt) when size(Addr) == 4 ->
+ sock_opt2([{ip, Addr} | Opt]);
+sock_opt(ip_comm, Addr, Opt) ->
+ sock_opt2([inet6, {ip, Addr} | Opt]);
+sock_opt(_, Addr, Opt) ->
+ sock_opt2([{ip, Addr} | Opt]).
+
+sock_opt1(Opt) ->
+ case has_inet6_supported() of
+ yes ->
+ sock_opt2([inet6 | Opt]);
+ no ->
+ sock_opt2(Opt)
+ end.
+
+sock_opt2(Opt) ->
+ [{packet, 0}, {active, false} | Opt].
+
+has_inet6_supported() ->
+ case (catch inet:getaddr("localhost", inet6)) of
+ {ok, {0, 0, 0, 0, 0, 16#ffff, _, _}} ->
+ no;
+ {ok,_} -> yes;
+ _ ->
+ no
+ end.
diff --git a/src/couch_inets/http_uri.erl b/src/couch_inets/http_uri.erl
new file mode 100644
index 00000000..8e2be1a9
--- /dev/null
+++ b/src/couch_inets/http_uri.erl
@@ -0,0 +1,113 @@
+% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(http_uri).
+
+-export([parse/1]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+parse(AbsURI) ->
+ case parse_scheme(AbsURI) of
+ {error, Reason} ->
+ {error, Reason};
+ {Scheme, Rest} ->
+ case (catch parse_uri_rest(Scheme, Rest)) of
+ {UserInfo, Host, Port, Path, Query} ->
+ {Scheme, UserInfo, Host, Port, Path, Query};
+ _ ->
+ {error, {malformed_url, AbsURI}}
+ end
+ end.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+parse_scheme(AbsURI) ->
+ case split_uri(AbsURI, ":", {error, no_scheme}, 1, 1) of
+ {error, no_scheme} ->
+ {error, no_scheme};
+ {StrScheme, Rest} ->
+ case list_to_atom(http_util:to_lower(StrScheme)) of
+ Scheme when Scheme == http; Scheme == https ->
+ {Scheme, Rest};
+ Scheme ->
+ {error, {not_supported_scheme, Scheme}}
+ end
+ end.
+
+parse_uri_rest(Scheme, "//" ++ URIPart) ->
+
+ {Authority, PathQuery} =
+ case split_uri(URIPart, "/", URIPart, 1, 0) of
+ Split = {_, _} ->
+ Split;
+ URIPart ->
+ case split_uri(URIPart, "\\?", URIPart, 1, 0) of
+ Split = {_, _} ->
+ Split;
+ URIPart ->
+ {URIPart,""}
+ end
+ end,
+
+ {UserInfo, HostPort} = split_uri(Authority, "@", {"", Authority}, 1, 1),
+ {Host, Port} = parse_host_port(Scheme, HostPort),
+ {Path, Query} = parse_path_query(PathQuery),
+ {UserInfo, Host, Port, Path, Query}.
+
+
+parse_path_query(PathQuery) ->
+ {Path, Query} = split_uri(PathQuery, "\\?", {PathQuery, ""}, 1, 0),
+ {path(Path), Query}.
+
+
+parse_host_port(Scheme,"[" ++ HostPort) -> %ipv6
+ DefaultPort = default_port(Scheme),
+ {Host, ColonPort} = split_uri(HostPort, "\\]", {HostPort, ""}, 1, 1),
+ {_, Port} = split_uri(ColonPort, ":", {"", DefaultPort}, 0, 1),
+ {Host, int_port(Port)};
+
+parse_host_port(Scheme, HostPort) ->
+ DefaultPort = default_port(Scheme),
+ {Host, Port} = split_uri(HostPort, ":", {HostPort, DefaultPort}, 1, 1),
+ {Host, int_port(Port)}.
+
+split_uri(UriPart, SplitChar, NoMatchResult, SkipLeft, SkipRight) ->
+ case regexp:first_match(UriPart, SplitChar) of
+ {match, Match, _} ->
+ {string:substr(UriPart, 1, Match - SkipLeft),
+ string:substr(UriPart, Match + SkipRight, length(UriPart))};
+ nomatch ->
+ NoMatchResult
+ end.
+
+default_port(http) ->
+ 80;
+default_port(https) ->
+ 443.
+
+int_port(Port) when is_integer(Port) ->
+ Port;
+int_port(Port) when is_list(Port) ->
+ list_to_integer(Port).
+
+path("") ->
+ "/";
+path(Path) ->
+ Path.
diff --git a/src/couch_inets/http_util.erl b/src/couch_inets/http_util.erl
new file mode 100644
index 00000000..7396fec6
--- /dev/null
+++ b/src/couch_inets/http_util.erl
@@ -0,0 +1,171 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(http_util).
+
+-export([key1search/2, key1search/3,
+ to_upper/1, to_lower/1, convert_netscapecookie_date/1,
+ hexlist_to_integer/1, integer_to_hexlist/1,
+ convert_month/1, is_hostname/1]).
+
+%%%=========================================================================
+%%% Internal application API
+%%%=========================================================================
+key1search(TupleList,Key) ->
+ key1search(TupleList,Key,undefined).
+
+key1search(TupleList,Key,Undefined) ->
+ case lists:keysearch(Key,1,TupleList) of
+ {value,{Key,Value}} ->
+ Value;
+ false ->
+ Undefined
+ end.
+
+to_upper(Str) ->
+ to_upper(Str, []).
+
+to_upper([C|Cs], Acc) when C >= $a, C =< $z ->
+ to_upper(Cs, [C-($a-$A)| Acc]);
+to_upper([C|Cs], Acc) ->
+ to_upper(Cs, [C | Acc]);
+to_upper([], Acc) ->
+ lists:reverse(Acc).
+
+to_lower(Str) ->
+ to_lower(Str, []).
+to_lower([C|Cs], Acc) when C >= $A, C =< $Z ->
+ to_lower(Cs, [C+($a-$A)| Acc]);
+to_lower([C|Cs], Acc) ->
+ to_lower(Cs, [C| Acc]);
+to_lower([], Acc) ->
+ lists:reverse(Acc).
+
+convert_netscapecookie_date([_D,_A,_Y, $,, _SP,
+ D1,D2,_DA,
+ M,O,N,_DA,
+ Y1,Y2,Y3,Y4,_SP,
+ H1,H2,_Col,
+ M1,M2,_Col,
+ S1,S2|_Rest]) ->
+ Year=list_to_integer([Y1,Y2,Y3,Y4]),
+ Day=list_to_integer([D1,D2]),
+ Month=convert_month([M,O,N]),
+ Hour=list_to_integer([H1,H2]),
+ Min=list_to_integer([M1,M2]),
+ Sec=list_to_integer([S1,S2]),
+ {{Year,Month,Day},{Hour,Min,Sec}};
+
+convert_netscapecookie_date([_D,_A,_Y, _SP,
+ D1,D2,_DA,
+ M,O,N,_DA,
+ Y1,Y2,Y3,Y4,_SP,
+ H1,H2,_Col,
+ M1,M2,_Col,
+ S1,S2|_Rest]) ->
+ Year=list_to_integer([Y1,Y2,Y3,Y4]),
+ Day=list_to_integer([D1,D2]),
+ Month=convert_month([M,O,N]),
+ Hour=list_to_integer([H1,H2]),
+ Min=list_to_integer([M1,M2]),
+ Sec=list_to_integer([S1,S2]),
+ {{Year,Month,Day},{Hour,Min,Sec}}.
+
+hexlist_to_integer([])->
+ empty;
+%%When the string only contains one value its eaasy done.
+%% 0-9
+hexlist_to_integer([Size]) when Size >= 48 , Size =< 57 ->
+ Size - 48;
+%% A-F
+hexlist_to_integer([Size]) when Size >= 65 , Size =< 70 ->
+ Size - 55;
+%% a-f
+hexlist_to_integer([Size]) when Size >= 97 , Size =< 102 ->
+ Size - 87;
+hexlist_to_integer([_Size]) ->
+ not_a_num;
+
+hexlist_to_integer(Size) ->
+ Len = string:span(Size, "1234567890abcdefABCDEF"),
+ hexlist_to_integer2(Size, 16 bsl (4 *(Len-2)),0).
+
+integer_to_hexlist(Num)->
+ integer_to_hexlist(Num, get_size(Num), []).
+
+convert_month("Jan") -> 1;
+convert_month("Feb") -> 2;
+convert_month("Mar") -> 3;
+convert_month("Apr") -> 4;
+convert_month("May") -> 5;
+convert_month("Jun") -> 6;
+convert_month("Jul") -> 7;
+convert_month("Aug") -> 8;
+convert_month("Sep") -> 9;
+convert_month("Oct") -> 10;
+convert_month("Nov") -> 11;
+convert_month("Dec") -> 12.
+
+is_hostname(Dest) ->
+ inet_parse:domain(Dest).
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+hexlist_to_integer2([],_Pos,Sum)->
+ Sum;
+hexlist_to_integer2([HexVal | HexString], Pos, Sum)
+ when HexVal >= 48, HexVal =< 57 ->
+ hexlist_to_integer2(HexString, Pos bsr 4, Sum + ((HexVal-48) * Pos));
+
+hexlist_to_integer2([HexVal | HexString], Pos, Sum)
+ when HexVal >= 65, HexVal =<70 ->
+ hexlist_to_integer2(HexString, Pos bsr 4, Sum + ((HexVal-55) * Pos));
+
+hexlist_to_integer2([HexVal | HexString], Pos, Sum)
+ when HexVal>=97, HexVal=<102 ->
+ hexlist_to_integer2(HexString, Pos bsr 4, Sum + ((HexVal-87) * Pos));
+
+hexlist_to_integer2(_AfterHexString, _Pos, Sum)->
+ Sum.
+
+integer_to_hexlist(Num, Pot, Res) when Pot<0 ->
+ convert_to_ascii([Num | Res]);
+
+integer_to_hexlist(Num,Pot,Res) ->
+ Position = (16 bsl (Pot*4)),
+ PosVal = Num div Position,
+ integer_to_hexlist(Num - (PosVal*Position), Pot-1, [PosVal | Res]).
+
+get_size(Num)->
+ get_size(Num, 0).
+
+get_size(Num, Pot) when Num < (16 bsl(Pot *4)) ->
+ Pot-1;
+
+get_size(Num, Pot) ->
+ get_size(Num, Pot+1).
+
+convert_to_ascii(RevesedNum) ->
+ convert_to_ascii(RevesedNum, []).
+
+convert_to_ascii([], Num)->
+ Num;
+convert_to_ascii([Num | Reversed], Number) when Num > -1, Num < 10 ->
+ convert_to_ascii(Reversed, [Num + 48 | Number]);
+convert_to_ascii([Num | Reversed], Number) when Num > 9, Num < 16 ->
+ convert_to_ascii(Reversed, [Num + 55 | Number]).
diff --git a/src/couch_inets/httpc_handler.erl b/src/couch_inets/httpc_handler.erl
new file mode 100644
index 00000000..8019b72b
--- /dev/null
+++ b/src/couch_inets/httpc_handler.erl
@@ -0,0 +1,953 @@
+% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+
+-module(httpc_handler).
+
+-behaviour(gen_server).
+
+-include("httpc_internal.hrl").
+-include("http_internal.hrl").
+
+%%--------------------------------------------------------------------
+%% Application API
+-export([start_link/2, send/2, cancel/2, stream/3]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(timers, {request_timers = [], % [ref()]
+ pipeline_timer % ref()
+ }).
+
+-record(state, {request, % #request{}
+ session, % #tcp_session{}
+ status_line, % {Version, StatusCode, ReasonPharse}
+ headers, % #http_response_h{}
+ body, % binary()
+ mfa, % {Moduel, Function, Args}
+ pipeline = queue:new(),% queue()
+ status = new, % new | pipeline | close | ssl_tunnel
+ canceled = [], % [RequestId]
+ max_header_size = nolimit, % nolimit | integer()
+ max_body_size = nolimit, % nolimit | integer()
+ options, % #options{}
+ timers = #timers{} % #timers{}
+ }).
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start() -> {ok, Pid}
+%%
+%% Description: Starts a http-request handler process. Intended to be
+%% called by the httpc manager process.
+%%
+%% Note: Uses proc_lib and gen_server:enter_loop so that waiting
+%% for gen_tcp:connect to timeout in init/1 will not
+%% block the httpc manager process in odd cases such as trying to call
+%% a server that does not exist. (See OTP-6735) The only API function
+%% sending messages to the handler process that can be called before
+%% init has compleated is cancel and that is not a problem! (Send and
+%% stream will not be called before the first request has been sent and
+%% the reply or part of it has arrived.)
+%%--------------------------------------------------------------------
+start_link(Request, ProxyOptions) ->
+ {ok, proc_lib:spawn_link(?MODULE, init, [[Request, ProxyOptions]])}.
+
+%%--------------------------------------------------------------------
+%% Function: send(Request, Pid) -> ok
+%% Request = #request{}
+%% Pid = pid() - the pid of the http-request handler process.
+%%
+%% Description: Uses this handlers session to send a request. Intended
+%% to be called by the httpc manager process.
+%%--------------------------------------------------------------------
+send(Request, Pid) ->
+ call(Request, Pid, 5000).
+
+%%--------------------------------------------------------------------
+%% Function: cancel(RequestId, Pid) -> ok
+%% RequestId = ref()
+%% Pid = pid() - the pid of the http-request handler process.
+%%
+%% Description: Cancels a request. Intended to be called by the httpc
+%% manager process.
+%%--------------------------------------------------------------------
+cancel(RequestId, Pid) ->
+ cast({cancel, RequestId}, Pid).
+
+%%--------------------------------------------------------------------
+%% Function: stream(BodyPart, Request, Code) -> _
+%% BodyPart = binary()
+%% Request = #request{}
+%% Code = integer()
+%%
+%% Description: Stream the HTTP body to the caller process (client)
+%% or to a file. Note that the data that has been stream
+%% does not have to be saved. (We do not want to use up
+%% memory in vain.)
+%%--------------------------------------------------------------------
+%% Request should not be streamed
+stream(BodyPart, Request = #request{stream = none}, _) ->
+ {BodyPart, Request};
+
+stream(BodyPart, Request = #request{stream = self}, 200) -> % Stream to caller
+ httpc_response:send(Request#request.from,
+ {Request#request.id, stream, BodyPart}),
+ {<<>>, Request};
+
+stream(BodyPart, Request = #request{stream = Filename}, 200)
+ when is_list(Filename) -> % Stream to file
+ case file:open(Filename, [write, raw, append, delayed_write]) of
+ {ok, Fd} ->
+ stream(BodyPart, Request#request{stream = Fd}, 200);
+ {error, Reason} ->
+ exit({stream_to_file_failed, Reason})
+ end;
+
+stream(BodyPart, Request = #request{stream = Fd}, 200) -> % Stream to file
+ case file:write(Fd, BodyPart) of
+ ok ->
+ {<<>>, Request};
+ {error, Reason} ->
+ exit({stream_to_file_failed, Reason})
+ end;
+stream(BodyPart, Request,_) -> % only 200 responses can be streamed
+ {BodyPart, Request}.
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: init([Request, Session]) -> {ok, State} |
+%% {ok, State, Timeout} | ignore |{stop, Reason}
+%%
+%% Description: Initiates the httpc_handler process
+%%
+%% Note: The init function may not fail, that will kill the
+%% httpc_manager process. We could make the httpc_manager more comlex
+%% but we do not want that so errors will be handled by the process
+%% sending an init_error message to itself.
+%%--------------------------------------------------------------------
+init([Request, Options]) ->
+ process_flag(trap_exit, true),
+ handle_verbose(Options#options.verbose),
+ Address = handle_proxy(Request#request.address, Options#options.proxy),
+ {ok, State} =
+ case {Address /= Request#request.address, Request#request.scheme} of
+ {true, https} ->
+ Error = https_through_proxy_is_not_currently_supported,
+ self() ! {init_error,
+ Error, httpc_response:error(Request, Error)},
+ {ok, #state{request = Request, options = Options,
+ status = ssl_tunnel}};
+ %% This is what we should do if and when ssl supports
+ %% "socket upgrading"
+ %%send_ssl_tunnel_request(Address, Request,
+ %% #state{options = Options,
+ %% status = ssl_tunnel});
+ {_, _} ->
+ send_first_request(Address, Request, #state{options = Options})
+ end,
+ gen_server:enter_loop(?MODULE, [], State).
+
+%%--------------------------------------------------------------------
+%% Function: handle_call(Request, From, State) -> {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} | (terminate/2 is called)
+%% {stop, Reason, State} (terminate/2 is called)
+%% Description: Handling call messages
+%%--------------------------------------------------------------------
+handle_call(Request, _, State = #state{session = Session =
+ #tcp_session{socket = Socket},
+ timers = Timers,
+ options = Options}) ->
+ Address = handle_proxy(Request#request.address, Options#options.proxy),
+
+ case httpc_request:send(Address, Request, Socket) of
+ ok ->
+ %% Activate the request time out for the new request
+ NewState = activate_request_timeout(State#state{request =
+ Request}),
+
+ ClientClose = httpc_request:is_client_closing(
+ Request#request.headers),
+ case State#state.request of
+ #request{} -> %% Old request no yet finished
+ %% Make sure to use the new value of timers in state
+ NewTimers = NewState#state.timers,
+ NewPipeline = queue:in(Request, State#state.pipeline),
+ NewSession =
+ Session#tcp_session{pipeline_length =
+ %% Queue + current
+ queue:len(NewPipeline) + 1,
+ client_close = ClientClose},
+ httpc_manager:insert_session(NewSession),
+ {reply, ok, State#state{pipeline = NewPipeline,
+ session = NewSession,
+ timers = NewTimers}};
+ undefined ->
+ %% Note: tcp-message reciving has already been
+ %% activated by handle_pipeline/2. Also
+ %% the parsing-function #state.mfa is initiated
+ %% by handle_pipeline/2.
+ cancel_timer(Timers#timers.pipeline_timer,
+ timeout_pipeline),
+ NewSession =
+ Session#tcp_session{pipeline_length = 1,
+ client_close = ClientClose},
+ httpc_manager:insert_session(NewSession),
+ {reply, ok,
+ NewState#state{request = Request,
+ session = NewSession,
+ timers =
+ Timers#timers{pipeline_timer =
+ undefined}}}
+ end;
+ {error, Reason} ->
+ {reply, {pipline_failed, Reason}, State}
+ end.
+%%--------------------------------------------------------------------
+%% Function: handle_cast(Msg, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%% Description: Handling cast messages
+%%--------------------------------------------------------------------
+
+%% When the request in process has been canceld the handler process is
+%% stopped and the pipelined requests will be reissued. This is is
+%% based on the assumption that it is proably cheaper to reissue the
+%% requests than to wait for a potentiall large response that we then
+%% only throw away. This of course is not always true maybe we could
+%% do something smarter here?! If the request canceled is not
+%% the one handled right now the same effect will take place in
+%% handle_pipeline/2 when the canceled request is on turn.
+handle_cast({cancel, RequestId}, State = #state{request = Request =
+ #request{id = RequestId}}) ->
+ httpc_manager:request_canceled(RequestId),
+ {stop, normal,
+ State#state{canceled = [RequestId | State#state.canceled],
+ request = Request#request{from = answer_sent}}};
+handle_cast({cancel, RequestId}, State) ->
+ httpc_manager:request_canceled(RequestId),
+ {noreply, State#state{canceled = [RequestId | State#state.canceled]}}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info(Info, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%% Description: Handling all non call/cast messages
+%%--------------------------------------------------------------------
+handle_info({Proto, _Socket, Data}, State =
+ #state{mfa = {Module, Function, Args},
+ request = #request{method = Method,
+ stream = Stream} = Request,
+ session = Session, status_line = StatusLine})
+ when Proto == tcp; Proto == ssl; Proto == httpc_handler ->
+
+ case Module:Function([Data | Args]) of
+ {ok, Result} ->
+ handle_http_msg(Result, State);
+ {_, whole_body, _} when Method == head ->
+ handle_response(State#state{body = <<>>});
+ {Module, whole_body, [Body, Length]} ->
+ {_, Code, _} = StatusLine,
+ {NewBody, NewRequest} = stream(Body, Request, Code),
+ %% When we stream we will not keep the already
+ %% streamed data, that would be a waste of memory.
+ NewLength = case Stream of
+ none ->
+ Length;
+ _ ->
+ Length - size(Body)
+ end,
+ http_transport:setopts(socket_type(Session#tcp_session.scheme),
+ Session#tcp_session.socket,
+ [{active, once}]),
+ {noreply, State#state{mfa = {Module, whole_body,
+ [NewBody, NewLength]},
+ request = NewRequest}};
+ NewMFA ->
+ http_transport:setopts(socket_type(Session#tcp_session.scheme),
+ Session#tcp_session.socket,
+ [{active, once}]),
+ {noreply, State#state{mfa = NewMFA}}
+ end;
+
+%% The Server may close the connection to indicate that the
+%% whole body is now sent instead of sending an lengh
+%% indicator.
+handle_info({tcp_closed, _}, State = #state{mfa = {_, whole_body, Args}}) ->
+ handle_response(State#state{body = hd(Args)});
+handle_info({ssl_closed, _}, State = #state{mfa = {_, whole_body, Args}}) ->
+ handle_response(State#state{body = hd(Args)});
+
+%%% Server closes idle pipeline
+handle_info({tcp_closed, _}, State = #state{request = undefined}) ->
+ {stop, normal, State};
+handle_info({ssl_closed, _}, State = #state{request = undefined}) ->
+ {stop, normal, State};
+
+%%% Error cases
+handle_info({tcp_closed, _}, State) ->
+ {stop, session_remotly_closed, State};
+handle_info({ssl_closed, _}, State) ->
+ {stop, session_remotly_closed, State};
+handle_info({tcp_error, _, _} = Reason, State) ->
+ {stop, Reason, State};
+handle_info({ssl_error, _, _} = Reason, State) ->
+ {stop, Reason, State};
+
+%%% Timeouts
+%% Internaly, to a request handling process, a request time out is
+%% seen as a canceld request.
+handle_info({timeout, RequestId}, State =
+ #state{request = Request = #request{id = RequestId}}) ->
+ httpc_response:send(Request#request.from,
+ httpc_response:error(Request,timeout)),
+ {stop, normal,
+ State#state{canceled = [RequestId | State#state.canceled],
+ request = Request#request{from = answer_sent}}};
+handle_info({timeout, RequestId}, State = #state{request = Request}) ->
+ httpc_response:send(Request#request.from,
+ httpc_response:error(Request,timeout)),
+ {noreply, State#state{canceled = [RequestId | State#state.canceled]}};
+
+handle_info(timeout_pipeline, State = #state{request = undefined}) ->
+ {stop, normal, State};
+
+%% Setting up the connection to the server somehow failed.
+handle_info({init_error, _, ClientErrMsg},
+ State = #state{request = Request}) ->
+ NewState = answer_request(Request, ClientErrMsg, State),
+ {stop, normal, NewState};
+
+%%% httpc_manager process dies.
+handle_info({'EXIT', _, _}, State = #state{request = undefined}) ->
+ {stop, normal, State};
+%%Try to finish the current request anyway,
+%% there is a fairly high probability that it can be done successfully.
+%% Then close the connection, hopefully a new manager is started that
+%% can retry requests in the pipeline.
+handle_info({'EXIT', _, _}, State) ->
+ {noreply, State#state{status = close}}.
+
+%%--------------------------------------------------------------------
+%% Function: terminate(Reason, State) -> _ (ignored by gen_server)
+%% Description: Shutdown the httpc_handler
+%%--------------------------------------------------------------------
+terminate(normal, #state{session = undefined}) ->
+ ok; %% Init error there is no socket to be closed.
+terminate(normal, #state{request = Request,
+ session = #tcp_session{id = undefined,
+ socket = Socket}}) ->
+ %% Init error sending, no session information has been setup but
+ %% there is a socket that needs closing.
+ http_transport:close(socket_type(Request), Socket);
+
+terminate(_, State = #state{session = Session, request = undefined,
+ timers = Timers}) ->
+ catch httpc_manager:delete_session(Session#tcp_session.id),
+
+ case queue:is_empty(State#state.pipeline) of
+ false ->
+ retry_pipline(queue:to_list(State#state.pipeline), State);
+ true ->
+ ok
+ end,
+ cancel_timer(Timers#timers.pipeline_timer, timeout_pipeline),
+ http_transport:close(socket_type(Session#tcp_session.scheme),
+ Session#tcp_session.socket);
+
+terminate(Reason, State = #state{request = Request})->
+ NewState = case Request#request.from of
+ answer_sent ->
+ State;
+ _ ->
+ answer_request(Request,
+ httpc_response:error(Request, Reason),
+ State)
+ end,
+ terminate(Reason, NewState#state{request = undefined}).
+
+
+%%--------------------------------------------------------------------
+%% Func: code_change(_OldVsn, State, Extra) -> {ok, NewState}
+%% Purpose: Convert process state when code is changed
+%%--------------------------------------------------------------------
+code_change(_, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+send_first_request(Address, Request, State) ->
+ Ipv6 = (State#state.options)#options.ipv6,
+ SocketType = socket_type(Request),
+ TimeOut = (Request#request.settings)#http_options.timeout,
+ case http_transport:connect(SocketType, Address, Ipv6, TimeOut) of
+ {ok, Socket} ->
+ case httpc_request:send(Address, Request, Socket) of
+ ok ->
+ ClientClose =
+ httpc_request:is_client_closing(
+ Request#request.headers),
+ Session =
+ #tcp_session{id = {Request#request.address, self()},
+ scheme = Request#request.scheme,
+ socket = Socket,
+ client_close = ClientClose},
+ TmpState = State#state{request = Request,
+ session = Session,
+ mfa =
+ {httpc_response, parse,
+ [State#state.max_header_size]},
+ status_line = undefined,
+ headers = undefined,
+ body = undefined,
+ status = new},
+ http_transport:setopts(SocketType,
+ Socket, [{active, once}]),
+ NewState = activate_request_timeout(TmpState),
+ {ok, NewState};
+ {error, Reason} ->
+ %% Commented out in wait of ssl support to avoid
+ %% dialyzer warning
+ %%case State#state.status of
+ %% new -> % Called from init/1
+ self() ! {init_error, error_sending,
+ httpc_response:error(Request, Reason)},
+ {ok, State#state{request = Request,
+ session =
+ #tcp_session{socket = Socket}}}
+ %%ssl_tunnel -> % Not called from init/1
+ %% NewState =
+ %% answer_request(Request,
+ %%httpc_response:error(Request,
+ %%Reason),
+ %% State),
+ %% {stop, normal, NewState}
+ %% end
+ end;
+ {error, Reason} ->
+ %% Commented out in wait of ssl support to avoid
+ %% dialyzer warning
+ %% case State#state.status of
+ %% new -> % Called from init/1
+ self() ! {init_error, error_connecting,
+ httpc_response:error(Request, Reason)},
+ {ok, State#state{request = Request}}
+ %% ssl_tunnel -> % Not called from init/1
+ %% NewState =
+ %% answer_request(Request,
+ %% httpc_response:error(Request,
+ %% Reason),
+ %% State),
+ %% {stop, normal, NewState}
+ %%end
+ end.
+
+handle_http_msg({Version, StatusCode, ReasonPharse, Headers, Body},
+ State = #state{request = Request}) ->
+
+ case Headers#http_response_h.'content-type' of
+ "multipart/byteranges" ++ _Param ->
+ exit(not_yet_implemented);
+ _ ->
+ start_stream({{Version, StatusCode, ReasonPharse}, Headers},
+ Request),
+ handle_http_body(Body,
+ State#state{status_line = {Version,
+ StatusCode,
+ ReasonPharse},
+ headers = Headers})
+ end;
+handle_http_msg({ChunkedHeaders, Body},
+ State = #state{headers = Headers}) ->
+ NewHeaders = http_chunk:handle_headers(Headers, ChunkedHeaders),
+ handle_response(State#state{headers = NewHeaders, body = Body});
+handle_http_msg(Body, State = #state{status_line = {_,Code, _}}) ->
+ {NewBody, NewRequest}= stream(Body, State#state.request, Code),
+ handle_response(State#state{body = NewBody, request = NewRequest}).
+
+handle_http_body(<<>>, State = #state{request = #request{method = head}}) ->
+ handle_response(State#state{body = <<>>});
+
+handle_http_body(Body, State = #state{headers = Headers, session = Session,
+ max_body_size = MaxBodySize,
+ status_line = {_,Code, _},
+ request = Request}) ->
+ case Headers#http_response_h.'transfer-encoding' of
+ "chunked" ->
+ case http_chunk:decode(Body, State#state.max_body_size,
+ State#state.max_header_size,
+ {Code, Request}) of
+ {Module, Function, Args} ->
+ http_transport:setopts(socket_type(
+ Session#tcp_session.scheme),
+ Session#tcp_session.socket,
+ [{active, once}]),
+ {noreply, State#state{mfa =
+ {Module, Function, Args}}};
+ {ok, {ChunkedHeaders, NewBody}} ->
+ NewHeaders = http_chunk:handle_headers(Headers,
+ ChunkedHeaders),
+ handle_response(State#state{headers = NewHeaders,
+ body = NewBody})
+ end;
+ Encoding when list(Encoding) ->
+ NewState = answer_request(Request,
+ httpc_response:error(Request,
+ unknown_encoding),
+ State),
+ {stop, normal, NewState};
+ _ ->
+ Length =
+ list_to_integer(Headers#http_response_h.'content-length'),
+ case ((Length =< MaxBodySize) or (MaxBodySize == nolimit)) of
+ true ->
+ case httpc_response:whole_body(Body, Length) of
+ {ok, Body} ->
+ {NewBody, NewRequest}= stream(Body, Request, Code),
+ handle_response(State#state{body = NewBody,
+ request = NewRequest});
+ MFA ->
+ http_transport:setopts(
+ socket_type(Session#tcp_session.scheme),
+ Session#tcp_session.socket,
+ [{active, once}]),
+ {noreply, State#state{mfa = MFA}}
+ end;
+ false ->
+ NewState =
+ answer_request(Request,
+ httpc_response:error(Request,
+ body_too_big),
+ State),
+ {stop, normal, NewState}
+ end
+ end.
+
+%%% Normaly I do not comment out code, I throw it away. But this might
+%%% actually be used on day if ssl is improved.
+%% handle_response(State = #state{status = ssl_tunnel,
+%% request = Request,
+%% options = Options,
+%% session = #tcp_session{socket = Socket,
+%% scheme = Scheme},
+%% status_line = {_, 200, _}}) ->
+%% %%% Insert code for upgrading the socket if and when ssl supports this.
+%% Address = handle_proxy(Request#request.address, Options#options.proxy),
+%% send_first_request(Address, Request, State);
+%% handle_response(State = #state{status = ssl_tunnel,
+%% request = Request}) ->
+%% NewState = answer_request(Request,
+%% httpc_response:error(Request,
+%% ssl_proxy_tunnel_failed),
+%% State),
+%% {stop, normal, NewState};
+
+handle_response(State = #state{status = new}) ->
+ handle_response(try_to_enable_pipline(State));
+
+handle_response(State = #state{request = Request,
+ status = Status,
+ session = Session,
+ status_line = StatusLine,
+ headers = Headers,
+ body = Body,
+ options = Options}) when Status =/= new ->
+
+ handle_cookies(Headers, Request, Options),
+ case httpc_response:result({StatusLine, Headers, Body}, Request) of
+ %% 100-continue
+ continue ->
+ %% Send request body
+ {_, RequestBody} = Request#request.content,
+ http_transport:send(socket_type(Session#tcp_session.scheme),
+ Session#tcp_session.socket,
+ RequestBody),
+ %% Wait for next response
+ http_transport:setopts(socket_type(Session#tcp_session.scheme),
+ Session#tcp_session.socket,
+ [{active, once}]),
+ {noreply,
+ State#state{mfa = {httpc_response, parse,
+ [State#state.max_header_size]},
+ status_line = undefined,
+ headers = undefined,
+ body = undefined
+ }};
+ %% Ignore unexpected 100-continue response and receive the
+ %% actual response that the server will send right away.
+ {ignore, Data} ->
+ NewState = State#state{mfa =
+ {httpc_response, parse,
+ [State#state.max_header_size]},
+ status_line = undefined,
+ headers = undefined,
+ body = undefined},
+ handle_info({httpc_handler, dummy, Data}, NewState);
+ %% On a redirect or retry the current request becomes
+ %% obsolete and the manager will create a new request
+ %% with the same id as the current.
+ {redirect, NewRequest, Data}->
+ ok = httpc_manager:redirect_request(NewRequest),
+ handle_pipeline(State#state{request = undefined}, Data);
+ {retry, TimeNewRequest, Data}->
+ ok = httpc_manager:retry_request(TimeNewRequest),
+ handle_pipeline(State#state{request = undefined}, Data);
+ {ok, Msg, Data} ->
+ end_stream(StatusLine, Request),
+ NewState = answer_request(Request, Msg, State),
+ handle_pipeline(NewState, Data);
+ {stop, Msg} ->
+ end_stream(StatusLine, Request),
+ NewState = answer_request(Request, Msg, State),
+ {stop, normal, NewState}
+ end.
+
+handle_cookies(_,_, #options{cookies = disabled}) ->
+ ok;
+%% User wants to verify the cookies before they are stored,
+%% so the user will have to call a store command.
+handle_cookies(_,_, #options{cookies = verify}) ->
+ ok;
+handle_cookies(Headers, Request, #options{cookies = enabled}) ->
+ {Host, _ } = Request#request.address,
+ Cookies = http_cookie:cookies(Headers#http_response_h.other,
+ Request#request.path, Host),
+ httpc_manager:store_cookies(Cookies, Request#request.address).
+
+%% This request could not be pipelined
+handle_pipeline(State = #state{status = close}, _) ->
+ {stop, normal, State};
+
+handle_pipeline(State = #state{status = pipeline, session = Session},
+ Data) ->
+ case queue:out(State#state.pipeline) of
+ {empty, _} ->
+ %% The server may choose too teminate an idle pipeline
+ %% in this case we want to receive the close message
+ %% at once and not when trying to pipline the next
+ %% request.
+ http_transport:setopts(socket_type(Session#tcp_session.scheme),
+ Session#tcp_session.socket,
+ [{active, once}]),
+ %% If a pipeline that has been idle for some time is not
+ %% closed by the server, the client may want to close it.
+ NewState = activate_pipeline_timeout(State),
+ NewSession = Session#tcp_session{pipeline_length = 0},
+ httpc_manager:insert_session(NewSession),
+ {noreply,
+ NewState#state{request = undefined,
+ mfa = {httpc_response, parse,
+ [NewState#state.max_header_size]},
+ status_line = undefined,
+ headers = undefined,
+ body = undefined
+ }
+ };
+ {{value, NextRequest}, Pipeline} ->
+ case lists:member(NextRequest#request.id,
+ State#state.canceled) of
+ true ->
+ %% See comment for handle_cast({cancel, RequestId})
+ {stop, normal,
+ State#state{request =
+ NextRequest#request{from = answer_sent}}};
+ false ->
+ NewSession =
+ Session#tcp_session{pipeline_length =
+ %% Queue + current
+ queue:len(Pipeline) + 1},
+ httpc_manager:insert_session(NewSession),
+ NewState =
+ State#state{pipeline = Pipeline,
+ request = NextRequest,
+ mfa = {httpc_response, parse,
+ [State#state.max_header_size]},
+ status_line = undefined,
+ headers = undefined,
+ body = undefined},
+ case Data of
+ <<>> ->
+ http_transport:setopts(
+ socket_type(Session#tcp_session.scheme),
+ Session#tcp_session.socket,
+ [{active, once}]),
+ {noreply, NewState};
+ _ ->
+ %% If we already received some bytes of
+ %% the next response
+ handle_info({httpc_handler, dummy, Data},
+ NewState)
+ end
+ end
+ end.
+
+call(Msg, Pid, Timeout) ->
+ gen_server:call(Pid, Msg, Timeout).
+
+cast(Msg, Pid) ->
+ gen_server:cast(Pid, Msg).
+
+activate_request_timeout(State = #state{request = Request}) ->
+ Time = (Request#request.settings)#http_options.timeout,
+ case Time of
+ infinity ->
+ State;
+ _ ->
+ Ref = erlang:send_after(Time, self(),
+ {timeout, Request#request.id}),
+ State#state
+ {timers =
+ #timers{request_timers =
+ [{Request#request.id, Ref}|
+ (State#state.timers)#timers.request_timers]}}
+ end.
+
+activate_pipeline_timeout(State = #state{options =
+ #options{pipeline_timeout =
+ infinity}}) ->
+ State;
+activate_pipeline_timeout(State = #state{options =
+ #options{pipeline_timeout = Time}}) ->
+ Ref = erlang:send_after(Time, self(), timeout_pipeline),
+ State#state{timers = #timers{pipeline_timer = Ref}}.
+
+is_pipeline_capable_server("HTTP/1." ++ N, _) when hd(N) >= $1 ->
+ true;
+is_pipeline_capable_server("HTTP/1.0",
+ #http_response_h{connection = "keep-alive"}) ->
+ true;
+is_pipeline_capable_server(_,_) ->
+ false.
+
+is_keep_alive_connection(Headers, Session) ->
+ (not ((Session#tcp_session.client_close) or
+ httpc_response:is_server_closing(Headers))).
+
+try_to_enable_pipline(State = #state{session = Session,
+ request = #request{method = Method},
+ status_line = {Version, _, _},
+ headers = Headers}) ->
+ case (is_pipeline_capable_server(Version, Headers)) and
+ (is_keep_alive_connection(Headers, Session)) and
+ (httpc_request:is_idempotent(Method)) of
+ true ->
+ httpc_manager:insert_session(Session),
+ State#state{status = pipeline};
+ false ->
+ State#state{status = close}
+ end.
+
+answer_request(Request, Msg, State = #state{timers = Timers}) ->
+ httpc_response:send(Request#request.from, Msg),
+ RequestTimers = Timers#timers.request_timers,
+ TimerRef =
+ http_util:key1search(RequestTimers, Request#request.id, undefined),
+ Timer = {Request#request.id, TimerRef},
+ cancel_timer(TimerRef, {timeout, Request#request.id}),
+ State#state{request = Request#request{from = answer_sent},
+ timers =
+ Timers#timers{request_timers =
+ lists:delete(Timer, RequestTimers)}}.
+cancel_timer(undefined, _) ->
+ ok;
+cancel_timer(Timer, TimeoutMsg) ->
+ erlang:cancel_timer(Timer),
+ receive
+ TimeoutMsg ->
+ ok
+ after 0 ->
+ ok
+ end.
+
+retry_pipline([], _) ->
+ ok;
+
+retry_pipline([Request |PipeLine], State = #state{timers = Timers}) ->
+ NewState =
+ case (catch httpc_manager:retry_request(Request)) of
+ ok ->
+ RequestTimers = Timers#timers.request_timers,
+ Timer = {_, TimerRef} =
+ http_util:key1search(RequestTimers, Request#request.id,
+ {undefined, undefined}),
+ cancel_timer(TimerRef, {timeout, Request#request.id}),
+ State#state{timers = Timers#timers{request_timers =
+ lists:delete(Timer,
+ RequestTimers)}};
+ Error ->
+ answer_request(Request#request.from,
+ httpc_response:error(Request, Error), State)
+ end,
+ retry_pipline(PipeLine, NewState).
+
+%%% Check to see if the given {Host,Port} tuple is in the NoProxyList
+%%% Returns an eventually updated {Host,Port} tuple, with the proxy address
+handle_proxy(HostPort = {Host, _Port}, {Proxy, NoProxy}) ->
+ case Proxy of
+ undefined ->
+ HostPort;
+ Proxy ->
+ case is_no_proxy_dest(Host, NoProxy) of
+ true ->
+ HostPort;
+ false ->
+ Proxy
+ end
+ end.
+
+is_no_proxy_dest(_, []) ->
+ false;
+is_no_proxy_dest(Host, [ "*." ++ NoProxyDomain | NoProxyDests]) ->
+
+ case is_no_proxy_dest_domain(Host, NoProxyDomain) of
+ true ->
+ true;
+ false ->
+ is_no_proxy_dest(Host, NoProxyDests)
+ end;
+
+is_no_proxy_dest(Host, [NoProxyDest | NoProxyDests]) ->
+ IsNoProxyDest = case http_util:is_hostname(NoProxyDest) of
+ true ->
+ fun is_no_proxy_host_name/2;
+ false ->
+ fun is_no_proxy_dest_address/2
+ end,
+
+ case IsNoProxyDest(Host, NoProxyDest) of
+ true ->
+ true;
+ false ->
+ is_no_proxy_dest(Host, NoProxyDests)
+ end.
+
+is_no_proxy_host_name(Host, Host) ->
+ true;
+is_no_proxy_host_name(_,_) ->
+ false.
+
+is_no_proxy_dest_domain(Dest, DomainPart) ->
+ lists:suffix(DomainPart, Dest).
+
+is_no_proxy_dest_address(Dest, Dest) ->
+ true;
+is_no_proxy_dest_address(Dest, AddressPart) ->
+ lists:prefix(AddressPart, Dest).
+
+socket_type(#request{scheme = http}) ->
+ ip_comm;
+socket_type(#request{scheme = https, settings = Settings}) ->
+ {ssl, Settings#http_options.ssl};
+socket_type(http) ->
+ ip_comm;
+socket_type(https) ->
+ {ssl, []}. %% Dummy value ok for ex setops that does not use this value
+
+start_stream(_, #request{stream = none}) ->
+ ok;
+start_stream({{_, 200, _}, Headers}, Request = #request{stream = self}) ->
+ Msg = httpc_response:stream_start(Headers, Request),
+ httpc_response:send(Request#request.from, Msg);
+start_stream(_, _) ->
+ ok.
+
+%% Note the end stream message is handled by httpc_response and will
+%% be sent by answer_request
+end_stream(_, #request{stream = none}) ->
+ ok;
+end_stream(_, #request{stream = self}) ->
+ ok;
+end_stream({_,200,_}, #request{stream = Fd}) ->
+ case file:close(Fd) of
+ ok ->
+ ok;
+ {error, enospc} -> % Could be due to delayed_write
+ file:close(Fd)
+ end;
+end_stream(_, _) ->
+ ok.
+
+handle_verbose(verbose) ->
+ dbg:p(self(), [r]);
+handle_verbose(debug) ->
+ dbg:p(self(), [call]),
+ dbg:tp(?MODULE, [{'_', [], [{return_trace}]}]);
+handle_verbose(trace) ->
+ dbg:p(self(), [call]),
+ dbg:tpl(?MODULE, [{'_', [], [{return_trace}]}]);
+handle_verbose(_) ->
+ ok.
+
+%%% Normaly I do not comment out code, I throw it away. But this might
+%%% actually be used on day if ssl is improved.
+%% send_ssl_tunnel_request(Address, Request = #request{address = {Host, Port}},
+%% State) ->
+%% %% A ssl tunnel request is a special http request that looks like
+%% %% CONNECT host:port HTTP/1.1
+%% SslTunnelRequest = #request{method = connect, scheme = http,
+%% headers =
+%% #http_request_h{
+%% host = Host,
+%% address = Address,
+%% path = Host ++ ":",
+%% pquery = integer_to_list(Port),
+%% other = [{ "Proxy-Connection", "keep-alive"}]},
+%% Ipv6 = (State#state.options)#options.ipv6,
+%% SocketType = socket_type(SslTunnelRequest),
+%% case http_transport:connect(SocketType,
+%% SslTunnelRequest#request.address, Ipv6) of
+%% {ok, Socket} ->
+%% case httpc_request:send(Address, SslTunnelRequest, Socket) of
+%% ok ->
+%% Session = #tcp_session{id =
+%% {SslTunnelRequest#request.address,
+%% self()},
+%% scheme =
+%% SslTunnelRequest#request.scheme,
+%% socket = Socket},
+%% NewState = State#state{mfa =
+%% {httpc_response, parse,
+%% [State#state.max_header_size]},
+%% request = Request,
+%% session = Session},
+%% http_transport:setopts(socket_type(
+%% SslTunnelRequest#request.scheme),
+%% Socket,
+%% [{active, once}]),
+%% {ok, NewState};
+%% {error, Reason} ->
+%% self() ! {init_error, error_sending,
+%% httpc_response:error(Request, Reason)},
+%% {ok, State#state{request = Request,
+%% session = #tcp_session{socket =
+%% Socket}}}
+%% end;
+%% {error, Reason} ->
+%% self() ! {init_error, error_connecting,
+%% httpc_response:error(Request, Reason)},
+%% {ok, State#state{request = Request}}
+%% end.
diff --git a/src/couch_inets/httpc_internal.hrl b/src/couch_inets/httpc_internal.hrl
new file mode 100644
index 00000000..86a33cd3
--- /dev/null
+++ b/src/couch_inets/httpc_internal.hrl
@@ -0,0 +1,87 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+
+-define(HTTP_REQUEST_TIMEOUT, infinity).
+-define(HTTP_PIPELINE_TIMEOUT, 0).
+-define(HTTP_PIPELINE_LENGTH, 2).
+-define(HTTP_MAX_TCP_SESSIONS, 2).
+-define(HTTP_MAX_REDIRECTS, 4).
+
+%%% HTTP Client per request settings
+-record(http_options,{
+ %% Milliseconds before a request times out
+ timeout = ?HTTP_REQUEST_TIMEOUT,
+ %% bool() - True if automatic redirection on 30X responses.
+ autoredirect = true,
+ ssl = [], % Ssl socket options
+ proxy_auth, % {User, Password} = {strring(), string()}
+ relaxed = false % bool() true if not strictly standard compliant
+ }).
+
+%%% HTTP Client per profile setting. Currently there is only one profile.
+-record(options, {
+ proxy = {undefined, []}, % {{ProxyHost, ProxyPort}, [NoProxy]},
+ pipeline_timeout = ?HTTP_PIPELINE_TIMEOUT,
+ max_pipeline_length = ?HTTP_PIPELINE_LENGTH,
+ max_sessions = ?HTTP_MAX_TCP_SESSIONS,
+ cookies = disabled, % enabled | disabled | verify
+ ipv6 = enabled, % enabled | disabled
+ verbose = false
+ }).
+
+%%% All data associated to a specific HTTP request
+-record(request,{
+ id, % ref() - Request Id
+ from, % pid() - Caller
+ redircount = 0,% Number of redirects made for this request
+ scheme, % http | https
+ address, % ({Host,Port}) Destination Host and Port
+ path, % string() - Path of parsed URL
+ pquery, % string() - Rest of parsed URL
+ method, % atom() - HTTP request Method
+ headers, % #http_request_h{}
+ content, % {ContentType, Body} - Current HTTP request
+ settings, % #http_options{} - User defined settings
+ abs_uri, % string() ex: "http://www.erlang.org"
+ userinfo, % string() - optinal "<userinfo>@<host>:<port>"
+ stream, % Boolean() - stream async reply?
+ headers_as_is % Boolean() - workaround for servers that does
+ %% not honor the http standard, can also be used for testing purposes.
+ }).
+
+-record(tcp_session,{
+ id, % {{Host, Port}, HandlerPid}
+ client_close, % true | false
+ scheme, % http (HTTP/TCP) | https (HTTP/SSL/TCP)
+ socket, % Open socket, used by connection
+ pipeline_length = 1 % Current length of pipeline
+ }).
+
+-record(http_cookie,{
+ domain,
+ domain_default = false,
+ name,
+ value,
+ comment,
+ max_age = session,
+ path,
+ path_default = false,
+ secure = false,
+ version = "0"
+ }).
+
diff --git a/src/couch_inets/httpc_manager.erl b/src/couch_inets/httpc_manager.erl
new file mode 100644
index 00000000..cf55827b
--- /dev/null
+++ b/src/couch_inets/httpc_manager.erl
@@ -0,0 +1,475 @@
+% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+
+-module(httpc_manager).
+
+-behaviour(gen_server).
+
+-include("httpc_internal.hrl").
+-include("http_internal.hrl").
+
+%% Application API
+-export([start_link/1, request/1, cancel_request/1,
+ request_canceled/1, retry_request/1, redirect_request/1,
+ insert_session/1, delete_session/1, set_options/1, store_cookies/2,
+ cookies/1]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-record(state, {
+ cancel = [], % [{RequestId, HandlerPid, ClientPid}]
+ handler_db, % ets() - Entry: {Requestid, HandlerPid, ClientPid}
+ cookie_db, % {ets(), dets()} - {session_cookie_db, cookie_db}
+ options = #options{}
+ }).
+
+%%====================================================================
+%% Application API
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link() -> {ok, Pid}
+%%
+%% Description: Starts the http request manger process. (Started by
+%% the intes supervisor.)
+%%--------------------------------------------------------------------
+start_link({default, CookieDir}) ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE,
+ [{http_default_cookie_db, CookieDir}], []).
+
+%%--------------------------------------------------------------------
+%% Function: request() -> {ok, Requestid} | {error, Reason}
+%% Request = #request{}
+%%
+%% Description: Sends a request to the httpc manager process.
+%%--------------------------------------------------------------------
+request(Request) ->
+ call({request, Request}, infinity).
+
+%%--------------------------------------------------------------------
+%% Function: retry_request(Request) -> _
+%% Request = #request{}
+%%
+%% Description: Resends a request to the httpc manager process, intended
+%% to be called by the httpc handler process if it has to terminate with
+%% a non empty pipeline.
+%%--------------------------------------------------------------------
+retry_request(Request) ->
+ cast({retry_or_redirect_request, Request}).
+
+%%--------------------------------------------------------------------
+%% Function: redirect_request(Request) -> _
+%% Request = #request{}
+%%
+%% Description: Sends an atoumatic redirect request to the httpc
+%% manager process, intended to be called by the httpc handler process
+%% when the automatic redirect option is set.
+%%--------------------------------------------------------------------
+redirect_request(Request) ->
+ cast({retry_or_redirect_request, Request}).
+
+%%--------------------------------------------------------------------
+%% Function: cancel_request(RequestId) -> ok
+%% RequestId - ref()
+%%
+%% Description: Cancels the request with <RequestId>.
+%%--------------------------------------------------------------------
+cancel_request(RequestId) ->
+ call({cancel_request, RequestId}, infinity).
+
+%%--------------------------------------------------------------------
+%% Function: request_canceled(RequestId) -> ok
+%% RequestId - ref()
+%%
+%% Description: Confirms that a request has been canceld. Intended to
+%% be called by the httpc handler process.
+%%--------------------------------------------------------------------
+request_canceled(RequestId) ->
+ cast({request_canceled, RequestId}).
+
+%%--------------------------------------------------------------------
+%% Function: insert_session(Session) -> _
+%% Session - #tcp_session{}
+%%
+%% Description: Inserts session information into the httpc manager table
+%% httpc_manager_session_db. Intended to be called by the httpc request
+%% handler process.
+%%--------------------------------------------------------------------
+insert_session(Session) ->
+ ets:insert(httpc_manager_session_db, Session).
+
+%%--------------------------------------------------------------------
+%% Function: delete_session(SessionId) -> _
+%% SessionId - {{Host, Port}, HandlerPid}
+%%
+%% Description: Deletes session information from the httpc manager table
+%% httpc_manager_session_db. Intended to be called by the httpc request
+%% handler process.
+%%--------------------------------------------------------------------
+delete_session(SessionId) ->
+ ets:delete(httpc_manager_session_db, SessionId).
+
+%%--------------------------------------------------------------------
+%% Function: set_options(Options) -> ok
+%%
+%% Options = [Option]
+%% Option = {proxy, {Proxy, [NoProxy]}} | {max_pipeline_length, integer()} |
+%% {max_sessions, integer()} | {pipeline_timeout, integer()}
+%% Proxy = {Host, Port}
+%% NoProxy - [Domain | HostName | IPAddress]
+%% Max - integer()
+%%
+%% Description: Sets the options to be used by the client.
+%%--------------------------------------------------------------------
+set_options(Options) ->
+ cast({set_options, Options}).
+
+%%--------------------------------------------------------------------
+%% Function: store_cookies(Cookies, Address) -> ok
+%%
+%% Cookies = [Cookie]
+%% Cookie = #http_cookie{}
+%%
+%% Description: Stores cookies from the server.
+%%--------------------------------------------------------------------
+store_cookies([], _) ->
+ ok;
+store_cookies(Cookies, Address) ->
+ cast({store_cookies, {Cookies, Address}}).
+
+%%--------------------------------------------------------------------
+%% Function: cookies(Url) -> ok
+%%
+%% Url = string()
+%%
+%% Description: Retrieves the cookies that
+%%--------------------------------------------------------------------
+cookies(Url) ->
+ call({cookies, Url}, infinity).
+
+%%====================================================================
+%% gen_server callback functions
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: init([Request, Session]) -> {ok, State} |
+%% {ok, State, Timeout} | ignore |{stop, Reason}
+%% Description: Initiates the httpc_manger process
+%%--------------------------------------------------------------------
+init([CookiesConf|_Options]) ->
+ process_flag(trap_exit, true),
+ ets:new(httpc_manager_session_db,
+ [public, set, named_table, {keypos, #tcp_session.id}]),
+ {ok, #state{handler_db = ets:new(handler_db, [protected, set]),
+ cookie_db =
+ http_cookie:open_cookie_db({CookiesConf,
+ http_session_cookie_db})
+ }}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_call(Request, From, State) -> {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} | (terminate/2 is called)
+%% {stop, Reason, State} (terminate/2 is called)
+%% Description: Handling call messages
+%%--------------------------------------------------------------------
+handle_call({request, Request}, _, State) ->
+ case (catch handle_request(Request, State)) of
+ {reply, Msg, NewState} ->
+ {reply, Msg, NewState};
+ Error ->
+ {stop, Error, httpc_response:error(Request, Error), State}
+ end;
+
+handle_call({cancel_request, RequestId}, From, State) ->
+ case ets:lookup(State#state.handler_db, RequestId) of
+ [] ->
+ ok, %% Nothing to cancel
+ {reply, ok, State};
+ [{_, Pid, _}] ->
+ httpc_handler:cancel(RequestId, Pid),
+ {noreply, State#state{cancel =
+ [{RequestId, Pid, From} |
+ State#state.cancel]}}
+ end;
+
+handle_call({cookies, Url}, _, State) ->
+ case http_uri:parse(Url) of
+ {Scheme, _, Host, Port, Path, _} ->
+ CookieHeaders =
+ http_cookie:header(Scheme, {Host, Port},
+ Path, State#state.cookie_db),
+ {reply, CookieHeaders, State};
+ Msg ->
+ {reply, Msg, State}
+ end;
+
+handle_call(Msg, From, State) ->
+ error_logger:error_report("HTTPC_MANAGER recived unkown call: ~p"
+ "from: ~p~n", [Msg, From]),
+ {reply, {error, 'API_violation'}, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast(Msg, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%% Description: Handling cast messages
+%%--------------------------------------------------------------------
+handle_cast({retry_or_redirect_request, {Time, Request}}, State) ->
+ {ok, _} = timer:apply_after(Time, ?MODULE, retry_request, [Request]),
+ {noreply, State};
+
+handle_cast({retry_or_redirect_request, Request}, State) ->
+ case (catch handle_request(Request, State)) of
+ {reply, {ok, _}, NewState} ->
+ {noreply, NewState};
+ Error ->
+ httpc_response:error(Request, Error),
+ {stop, Error, State}
+ end;
+
+handle_cast({request_canceled, RequestId}, State) ->
+ ets:delete(State#state.handler_db, RequestId),
+ case lists:keysearch(RequestId, 1, State#state.cancel) of
+ {value, Entry = {RequestId, _, From}} ->
+ gen_server:reply(From, ok),
+ {noreply,
+ State#state{cancel = lists:delete(Entry, State#state.cancel)}};
+ _ ->
+ {noreply, State}
+ end;
+handle_cast({set_options, Options}, State = #state{options = OldOptions}) ->
+ NewOptions =
+ #options{proxy =
+ http_util:key1search(Options, proxy,
+ OldOptions#options.proxy),
+ pipeline_timeout =
+ http_util:key1search(Options, pipeline_timeout,
+ OldOptions#options.pipeline_timeout),
+ max_pipeline_length =
+ http_util:key1search(Options, max_pipeline_length,
+ OldOptions#options.max_pipeline_length),
+ max_sessions =
+ http_util:key1search(Options, max_sessions,
+ OldOptions#options.max_sessions),
+ cookies = http_util:key1search(Options, cookies,
+ OldOptions#options.cookies),
+ ipv6 = http_util:key1search(Options, ipv6,
+ OldOptions#options.ipv6),
+ verbose = http_util:key1search(Options, verbose,
+ OldOptions#options.verbose)
+ },
+ case {OldOptions#options.verbose, NewOptions#options.verbose} of
+ {Same, Same} ->
+ ok;
+ {_, false} ->
+ dbg:stop();
+ {false, Level} ->
+ dbg:tracer(),
+ handle_verbose(Level);
+ {_, Level} ->
+ dbg:stop(),
+ dbg:tracer(),
+ handle_verbose(Level)
+ end,
+
+ {noreply, State#state{options = NewOptions}};
+
+handle_cast({store_cookies, _},
+ State = #state{options = #options{cookies = disabled}}) ->
+ {noreply, State};
+
+handle_cast({store_cookies, {Cookies, _}}, State) ->
+ ok = do_store_cookies(Cookies, State),
+ {noreply, State};
+
+handle_cast(Msg, State) ->
+ error_logger:error_report("HTTPC_MANAGER recived unkown cast: ~p", [Msg]),
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info(Info, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%% Description: Handling all non call/cast messages
+%%---------------------------------------------------------
+handle_info({'EXIT', _, _}, State) ->
+ %% Handled in DOWN
+ {noreply, State};
+handle_info({'DOWN', _, _, Pid, _}, State) ->
+ ets:match_delete(State#state.handler_db, {'_', Pid, '_'}),
+
+ %% If there where any canceled request, handled by the
+ %% the process that now has terminated, the
+ %% cancelation can be viewed as sucessfull!
+ NewCanceldList =
+ lists:foldl(fun(Entry = {_, HandlerPid, From}, Acc) ->
+ case HandlerPid of
+ Pid ->
+ gen_server:reply(From, ok),
+ lists:delete(Entry, Acc);
+ _ ->
+ Acc
+ end
+ end, State#state.cancel, State#state.cancel),
+ {noreply, State#state{cancel = NewCanceldList}};
+handle_info(Info, State) ->
+ error_logger:error_report("Unknown message in "
+ "httpc_manager:handle_info ~p~n", [Info]),
+ {noreply, State}.
+%%--------------------------------------------------------------------
+%% Function: terminate(Reason, State) -> _ (ignored by gen_server)
+%% Description: Shutdown the httpc_handler
+%%--------------------------------------------------------------------
+terminate(_, State) ->
+ http_cookie:close_cookie_db(State#state.cookie_db),
+ ets:delete(httpc_manager_session_db),
+ ets:delete(State#state.handler_db).
+
+%%--------------------------------------------------------------------
+%% Func: code_change(_OldVsn, State, Extra) -> {ok, NewState}
+%% Purpose: Convert process state when code is changed
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%% Internal functions
+%%--------------------------------------------------------------------
+handle_request(Request, State) ->
+ NewRequest = handle_cookies(generate_request_id(Request), State),
+ case select_session(Request#request.method,
+ Request#request.address,
+ Request#request.scheme, State) of
+ {ok, HandlerPid} ->
+ pipeline(NewRequest, HandlerPid, State);
+ no_connection ->
+ start_handler(NewRequest, State);
+ {no_session, OpenSessions} when OpenSessions
+ < State#options.max_sessions ->
+ start_handler(NewRequest, State);
+ {no_session, _} ->
+ %% Do not start any more persistent connections
+ %% towards this server.
+ NewHeaders =
+ (NewRequest#request.headers)#http_request_h{connection
+ = "close"},
+ start_handler(NewRequest#request{headers = NewHeaders}, State)
+ end,
+ {reply, {ok, NewRequest#request.id}, State}.
+
+select_session(Method, HostPort, Scheme, #state{options =
+ #options{max_pipeline_length =
+ Max}}) ->
+ case httpc_request:is_idempotent(Method) of
+ true ->
+ Candidates = ets:match(httpc_manager_session_db,
+ {'_', {HostPort, '$1'},
+ false, Scheme, '_', '$2'}),
+ select_session(Candidates, Max);
+ false ->
+ no_connection
+ end.
+
+select_session(Candidates, MaxPipeline) ->
+ case Candidates of
+ [] ->
+ no_connection;
+ _ ->
+ NewCandidates =
+ lists:foldl(
+ fun([Pid, PipelineLength], Acc) when
+ PipelineLength =< MaxPipeline ->
+ [{Pid, PipelineLength} | Acc];
+ (_, Acc) ->
+ Acc
+ end, [], Candidates),
+
+ case lists:keysort(2, NewCandidates) of
+ [] ->
+ {no_session, length(Candidates)};
+ [{HandlerPid, _} | _] ->
+ {ok, HandlerPid}
+ end
+ end.
+
+pipeline(Request, HandlerPid, State) ->
+ case (catch httpc_handler:send(Request, HandlerPid)) of
+ ok ->
+ ets:insert(State#state.handler_db, {Request#request.id,
+ HandlerPid,
+ Request#request.from});
+ _ -> %timeout pipelining failed
+ start_handler(Request, State)
+ end.
+
+start_handler(Request, State) ->
+ {ok, Pid} = httpc_handler:start_link(Request, State#state.options),
+ ets:insert(State#state.handler_db, {Request#request.id,
+ Pid, Request#request.from}),
+ erlang:monitor(process, Pid).
+
+generate_request_id(Request) ->
+ case Request#request.id of
+ undefined ->
+ RequestId = make_ref(),
+ Request#request{id = RequestId};
+ _ ->
+ %% This is an automatic redirect or a retryed pipelined
+ %% request keep the old id.
+ Request
+ end.
+
+handle_cookies(Request, #state{options = #options{cookies = disabled}}) ->
+ Request;
+handle_cookies(Request = #request{scheme = Scheme, address = Address,
+ path = Path, headers =
+ Headers = #http_request_h{other = Other}},
+ #state{cookie_db = Db}) ->
+ case http_cookie:header(Scheme, Address, Path, Db) of
+ {"cookie", ""} ->
+ Request;
+ CookieHeader ->
+ NewHeaders =
+ Headers#http_request_h{other = [CookieHeader | Other]},
+ Request#request{headers = NewHeaders}
+ end.
+
+do_store_cookies([], _) ->
+ ok;
+do_store_cookies([Cookie | Cookies], State) ->
+ ok = http_cookie:insert(Cookie, State#state.cookie_db),
+ do_store_cookies(Cookies, State).
+
+call(Msg, Timeout) ->
+ gen_server:call(?MODULE, Msg, Timeout).
+
+cast(Msg) ->
+ gen_server:cast(?MODULE, Msg).
+
+handle_verbose(debug) ->
+ dbg:p(self(), [call]),
+ dbg:tp(?MODULE, [{'_', [], [{return_trace}]}]);
+handle_verbose(trace) ->
+ dbg:p(self(), [call]),
+ dbg:tpl(?MODULE, [{'_', [], [{return_trace}]}]);
+handle_verbose(_) ->
+ ok.
+
diff --git a/src/couch_inets/httpc_request.erl b/src/couch_inets/httpc_request.erl
new file mode 100644
index 00000000..1c74dc7b
--- /dev/null
+++ b/src/couch_inets/httpc_request.erl
@@ -0,0 +1,193 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+
+-module(httpc_request).
+
+-include("http_internal.hrl").
+-include("httpc_internal.hrl").
+
+%% We will not make the change to use base64 in stdlib in inets just yet.
+%% it will be included in the next major release of inets.
+-compile({nowarn_deprecated_function, {http_base_64, encode, 1}}).
+
+%%% Internal API
+-export([send/3, is_idempotent/1, is_client_closing/1]).
+
+%%%=========================================================================
+%%% Internal application API
+%%%=========================================================================
+%%-------------------------------------------------------------------------
+%% send(MaybeProxy, Request) ->
+%% MaybeProxy - {Host, Port}
+%% Host = string()
+%% Port = integer()
+%% Request - #request{}
+%% Socket - socket()
+%% CookieSupport - enabled | disabled | verify
+%%
+%% Description: Composes and sends a HTTP-request.
+%%-------------------------------------------------------------------------
+send(SendAddr, #request{method = Method, scheme = Scheme,
+ path = Path, pquery = Query, headers = Headers,
+ content = Content, address = Address,
+ abs_uri = AbsUri, headers_as_is = HeadersAsIs,
+ settings = HttpOptions,
+ userinfo = UserInfo},
+ Socket) ->
+
+ TmpHeaders = handle_user_info(UserInfo, Headers),
+
+ {TmpHeaders2, Body} = post_data(Method, TmpHeaders, Content, HeadersAsIs),
+
+ {NewHeaders, Uri} = case Address of
+ SendAddr ->
+ {TmpHeaders2, Path ++ Query};
+ _Proxy ->
+ TmpHeaders3 =
+ handle_proxy(HttpOptions, TmpHeaders2),
+ {TmpHeaders3, AbsUri}
+ end,
+
+ FinalHeaders = case NewHeaders of
+ HeaderList when is_list(HeaderList) ->
+ headers(HeaderList, []);
+ _ ->
+ http_request:http_headers(NewHeaders)
+ end,
+
+ Message =
+ lists:append([method(Method), " ", Uri, " HTTP/1.1", ?CRLF,
+ FinalHeaders, ?CRLF, Body]),
+
+ http_transport:send(socket_type(Scheme), Socket, Message).
+
+%%-------------------------------------------------------------------------
+%% is_idempotent(Method) ->
+%% Method = atom()
+%%
+%% Description: Checks if Methode is considered idempotent.
+%%-------------------------------------------------------------------------
+
+%% In particular, the convention has been established that the GET and
+%% HEAD methods SHOULD NOT have the significance of taking an action
+%% other than retrieval. These methods ought to be considered "safe".
+is_idempotent(head) ->
+ true;
+is_idempotent(get) ->
+ true;
+%% Methods can also have the property of "idempotence" in that (aside
+%% from error or expiration issues) the side-effects of N > 0
+%% identical requests is the same as for a single request.
+is_idempotent(put) ->
+ true;
+is_idempotent(delete) ->
+ true;
+%% Also, the methods OPTIONS and TRACE SHOULD NOT have side effects,
+%% and so are inherently idempotent.
+is_idempotent(trace) ->
+ true;
+is_idempotent(options) ->
+ true;
+is_idempotent(_) ->
+ false.
+
+%%-------------------------------------------------------------------------
+%% is_client_closing(Headers) ->
+%% Headers = #http_request_h{}
+%%
+%% Description: Checks if the client has supplied a "Connection: close" header.
+%%-------------------------------------------------------------------------
+is_client_closing(Headers) ->
+ case Headers#http_request_h.connection of
+ "close" ->
+ true;
+ _ ->
+ false
+ end.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+post_data(Method, Headers, {ContentType, Body}, HeadersAsIs)
+ when Method == post; Method == put ->
+ ContentLength = body_length(Body),
+ NewBody = case Headers#http_request_h.expect of
+ "100-continue" ->
+ "";
+ _ ->
+ Body
+ end,
+
+ NewHeaders = case HeadersAsIs of
+ [] ->
+ Headers#http_request_h{'content-type' =
+ ContentType,
+ 'content-length' =
+ ContentLength};
+ _ ->
+ HeadersAsIs
+ end,
+
+ {NewHeaders, NewBody};
+
+post_data(_, Headers, _, []) ->
+ {Headers, ""};
+post_data(_, _, _, HeadersAsIs = [_|_]) ->
+ {HeadersAsIs, ""}.
+
+body_length(Body) when is_binary(Body) ->
+ integer_to_list(size(Body));
+
+body_length(Body) when is_list(Body) ->
+ integer_to_list(length(Body)).
+
+method(Method) ->
+ http_util:to_upper(atom_to_list(Method)).
+
+socket_type(http) ->
+ ip_comm;
+socket_type(https) ->
+ {ssl, []}.
+
+headers([], Headers) ->
+ lists:flatten(Headers);
+headers([{Key,Value} | Rest], Headers) ->
+ Header = Key ++ ": " ++ Value ++ ?CRLF,
+ headers(Rest, [Header | Headers]).
+
+handle_proxy(_, Headers) when is_list(Headers) ->
+ Headers; %% Headers as is option was specified
+handle_proxy(HttpOptions, Headers) ->
+ case HttpOptions#http_options.proxy_auth of
+ undefined ->
+ Headers;
+ {User, Password} ->
+ UserPasswd = http_base_64:encode(User ++ ":" ++ Password),
+ Headers#http_request_h{'proxy-authorization' =
+ "Basic " ++ UserPasswd}
+ end.
+
+handle_user_info([], Headers) ->
+ Headers;
+handle_user_info(UserInfo, Headers) ->
+ case string:tokens(UserInfo, ":") of
+ [User, Passwd] ->
+ UserPasswd = http_base_64:encode(User ++ ":" ++ Passwd),
+ Headers#http_request_h{authorization = "Basic " ++ UserPasswd};
+ _ ->
+ Headers
+ end.
diff --git a/src/couch_inets/httpc_response.erl b/src/couch_inets/httpc_response.erl
new file mode 100644
index 00000000..5b6244b6
--- /dev/null
+++ b/src/couch_inets/httpc_response.erl
@@ -0,0 +1,320 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+
+-module(httpc_response).
+
+-include("http_internal.hrl").
+-include("httpc_internal.hrl").
+
+%% API
+-export([parse/1, result/2, send/2, error/2, is_server_closing/1,
+ stream_start/2]).
+
+%% Callback API - used for example if the header/body is received a
+%% little at a time on a socket.
+-export([parse_version/1, parse_status_code/1, parse_reason_phrase/1,
+ parse_headers/1, whole_body/1, whole_body/2]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+parse([Bin, MaxHeaderSize]) ->
+ parse_version(Bin, [], MaxHeaderSize, []).
+
+whole_body([Bin, Body, Length]) ->
+ whole_body(<<Body/binary, Bin/binary>>, Length).
+
+%% Functions that may be returned during the decoding process
+%% if the input data is incompleate.
+parse_version([Bin, Version, MaxHeaderSize, Result]) ->
+ parse_version(Bin, Version, MaxHeaderSize, Result).
+
+parse_status_code([Bin, Code, MaxHeaderSize, Result]) ->
+ parse_status_code(Bin, Code, MaxHeaderSize, Result).
+
+parse_reason_phrase([Bin, Rest, Phrase, MaxHeaderSize, Result]) ->
+ parse_reason_phrase(<<Rest/binary, Bin/binary>>, Phrase,
+ MaxHeaderSize, Result).
+
+parse_headers([Bin, Rest,Header, Headers, MaxHeaderSize, Result]) ->
+ parse_headers(<<Rest/binary, Bin/binary>>, Header, Headers,
+ MaxHeaderSize, Result).
+
+whole_body(Body, Length) ->
+ case size(Body) of
+ N when N < Length, N > 0 ->
+ {?MODULE, whole_body, [Body, Length]};
+ %% OBS! The Server may close the connection to indicate that the
+ %% whole body is now sent instead of sending a lengh
+ %% indicator.In this case the lengh indicator will be
+ %% -1.
+ N when N >= Length, Length >= 0 ->
+ %% Potential trailing garbage will be thrown away in
+ %% format_response/1 Some servers may send a 100-continue
+ %% response without the client requesting it through an
+ %% expect header in this case the trailing bytes may be
+ %% part of the real response message.
+ {ok, Body};
+ _ -> %% Length == -1
+ {?MODULE, whole_body, [Body, Length]}
+ end.
+
+%%-------------------------------------------------------------------------
+%% result(Response, Request) ->
+%% Response - {StatusLine, Headers, Body}
+%% Request - #request{}
+%% Session - #tcp_session{}
+%%
+%% Description: Checks the status code ...
+%%-------------------------------------------------------------------------
+result(Response = {{_,200,_}, _, _},
+ Request = #request{stream = Stream}) when Stream =/= none ->
+ stream_end(Response, Request);
+
+result(Response = {{_,100,_}, _, _}, Request) ->
+ status_continue(Response, Request);
+
+%% In redirect loop
+result(Response = {{_, Code, _}, _, _}, Request =
+ #request{redircount = Redirects,
+ settings = #http_options{autoredirect = true}})
+ when Code div 100 == 3, Redirects > ?HTTP_MAX_REDIRECTS ->
+ transparent(Response, Request);
+
+%% multiple choices
+result(Response = {{_, 300, _}, _, _},
+ Request = #request{settings =
+ #http_options{autoredirect =
+ true}}) ->
+ redirect(Response, Request);
+
+result(Response = {{_, Code, _}, _, _},
+ Request = #request{settings =
+ #http_options{autoredirect = true},
+ method = head}) when Code == 301;
+ Code == 302;
+ Code == 303;
+ Code == 307 ->
+ redirect(Response, Request);
+result(Response = {{_, Code, _}, _, _},
+ Request = #request{settings =
+ #http_options{autoredirect = true},
+ method = get}) when Code == 301;
+ Code == 302;
+ Code == 303;
+ Code == 307 ->
+ redirect(Response, Request);
+
+
+result(Response = {{_,503,_}, _, _}, Request) ->
+ status_service_unavailable(Response, Request);
+result(Response = {{_,Code,_}, _, _}, Request) when (Code div 100) == 5 ->
+ status_server_error_50x(Response, Request);
+
+result(Response, Request) ->
+ transparent(Response, Request).
+
+send(To, Msg) ->
+ To ! {http, Msg}.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+parse_version(<<>>, Version, MaxHeaderSize, Result) ->
+ {?MODULE, parse_version, [Version, MaxHeaderSize,Result]};
+parse_version(<<?SP, Rest/binary>>, Version, MaxHeaderSize, Result) ->
+ parse_status_code(Rest, [], MaxHeaderSize,
+ [lists:reverse(Version) | Result]);
+parse_version(<<Octet, Rest/binary>>, Version, MaxHeaderSize, Result) ->
+ parse_version(Rest, [Octet | Version], MaxHeaderSize,Result).
+
+parse_status_code(<<>>, StatusCodeStr, MaxHeaderSize, Result) ->
+ {?MODULE, parse_status_code, [StatusCodeStr, MaxHeaderSize, Result]};
+parse_status_code(<<?SP, Rest/binary>>, StatusCodeStr,
+ MaxHeaderSize, Result) ->
+ parse_reason_phrase(Rest, [], MaxHeaderSize,
+ [list_to_integer(lists:reverse(StatusCodeStr)) |
+ Result]);
+parse_status_code(<<Octet, Rest/binary>>, StatusCodeStr,
+ MaxHeaderSize,Result) ->
+ parse_status_code(Rest, [Octet | StatusCodeStr], MaxHeaderSize, Result).
+
+parse_reason_phrase(<<>>, Phrase, MaxHeaderSize, Result) ->
+ {?MODULE, parse_reason_phrase, [<<>>, Phrase, MaxHeaderSize,Result]};
+parse_reason_phrase(<<?CR, ?LF, Rest/binary>>, Phrase,
+ MaxHeaderSize, Result) ->
+ parse_headers(Rest, [], [], MaxHeaderSize,
+ [lists:reverse(Phrase) | Result]);
+parse_reason_phrase(<<?CR>> = Data, Phrase, MaxHeaderSize, Result) ->
+ {?MODULE, parse_reason_phrase, [Data, Phrase, MaxHeaderSize,Result]};
+parse_reason_phrase(<<Octet, Rest/binary>>, Phrase, MaxHeaderSize, Result) ->
+ parse_reason_phrase(Rest, [Octet | Phrase], MaxHeaderSize, Result).
+
+parse_headers(<<>>, Header, Headers, MaxHeaderSize, Result) ->
+ {?MODULE, parse_headers, [<<>>, Header, Headers, MaxHeaderSize, Result]};
+parse_headers(<<?CR,?LF,?CR,?LF,Body/binary>>, Header, Headers,
+ MaxHeaderSize, Result) ->
+ HTTPHeaders = [lists:reverse(Header) | Headers],
+ Length = lists:foldl(fun(H, Acc) -> length(H) + Acc end,
+ 0, HTTPHeaders),
+ case ((Length =< MaxHeaderSize) or (MaxHeaderSize == nolimit)) of
+ true ->
+ ResponseHeaderRcord =
+ http_response:headers(HTTPHeaders, #http_response_h{}),
+ {ok, list_to_tuple(
+ lists:reverse([Body, ResponseHeaderRcord | Result]))};
+ false ->
+ throw({error, {header_too_long, MaxHeaderSize,
+ MaxHeaderSize-Length}})
+ end;
+parse_headers(<<?CR,?LF,?CR>> = Data, Header, Headers,
+ MaxHeaderSize, Result) ->
+ {?MODULE, parse_headers, [Data, Header, Headers, MaxHeaderSize, Result]};
+parse_headers(<<?CR,?LF>> = Data, Header, Headers,
+ MaxHeaderSize, Result) ->
+ {?MODULE, parse_headers, [Data, Header, Headers, MaxHeaderSize, Result]};
+parse_headers(<<?CR,?LF, Octet, Rest/binary>>, Header, Headers,
+ MaxHeaderSize, Result) ->
+ parse_headers(Rest, [Octet],
+ [lists:reverse(Header) | Headers], MaxHeaderSize, Result);
+parse_headers(<<?CR>> = Data, Header, Headers,
+ MaxHeaderSize, Result) ->
+ {?MODULE, parse_headers, [Data, Header, Headers, MaxHeaderSize, Result]};
+parse_headers(<<Octet, Rest/binary>>, Header, Headers,
+ MaxHeaderSize, Result) ->
+ parse_headers(Rest, [Octet | Header], Headers, MaxHeaderSize, Result).
+
+
+%% RFC2616, Section 10.1.1
+%% Note:
+%% - Only act on the 100 status if the request included the
+%% "Expect:100-continue" header, otherwise just ignore this response.
+status_continue(_, #request{headers =
+ #http_request_h{expect = "100-continue"}}) ->
+ continue;
+
+status_continue({_,_, Data}, _) ->
+ %% The data in the body in this case is actually part of the real
+ %% response sent after the "fake" 100-continue.
+ {ignore, Data}.
+
+status_service_unavailable(Response = {_, Headers, _}, Request) ->
+ case Headers#http_response_h.'retry-after' of
+ undefined ->
+ status_server_error_50x(Response, Request);
+ Time when length(Time) < 3 -> % Wait only 99 s or less
+ NewTime = list_to_integer(Time) * 100, % time in ms
+ {_, Data} = format_response(Response),
+ {retry, {NewTime, Request}, Data};
+ _ ->
+ status_server_error_50x(Response, Request)
+ end.
+
+status_server_error_50x(Response, Request) ->
+ {Msg, _} = format_response(Response),
+ {stop, {Request#request.id, Msg}}.
+
+
+redirect(Response = {StatusLine, Headers, Body}, Request) ->
+ {_, Data} = format_response(Response),
+ case Headers#http_response_h.location of
+ undefined ->
+ transparent(Response, Request);
+ RedirUrl ->
+ case http_uri:parse(RedirUrl) of
+ {error, no_scheme} when
+ (Request#request.settings)#http_options.relaxed ->
+ NewLocation = fix_relative_uri(Request, RedirUrl),
+ redirect({StatusLine, Headers#http_response_h{
+ location = NewLocation},
+ Body}, Request);
+ {error, Reason} ->
+ {ok, error(Request, Reason), Data};
+ %% Automatic redirection
+ {Scheme, _, Host, Port, Path, Query} ->
+ NewHeaders =
+ (Request#request.headers)#http_request_h{host =
+ Host},
+ NewRequest =
+ Request#request{redircount =
+ Request#request.redircount+1,
+ scheme = Scheme,
+ headers = NewHeaders,
+ address = {Host,Port},
+ path = Path,
+ pquery = Query,
+ abs_uri =
+ atom_to_list(Scheme) ++ "://" ++
+ Host ++ ":" ++
+ integer_to_list(Port) ++
+ Path ++ Query},
+ {redirect, NewRequest, Data}
+ end
+ end.
+
+%%% Guessing that we received a relative URI, fix it to become an absoluteURI
+fix_relative_uri(Request, RedirUrl) ->
+ {Server, Port} = Request#request.address,
+ Path = Request#request.path,
+ atom_to_list(Request#request.scheme) ++ "://" ++ Server ++ ":" ++ Port
+ ++ Path ++ RedirUrl.
+
+error(#request{id = Id}, Reason) ->
+ {Id, {error, Reason}}.
+
+transparent(Response, Request) ->
+ {Msg, Data} = format_response(Response),
+ {ok, {Request#request.id, Msg}, Data}.
+
+stream_start(Headers, Request) ->
+ {Request#request.id, stream_start, http_response:header_list(Headers)}.
+
+stream_end(Response, Request = #request{stream = self}) ->
+ {{_, Headers, _}, Data} = format_response(Response),
+ {ok, {Request#request.id, stream_end, Headers}, Data};
+stream_end(Response, Request) ->
+ {_, Data} = format_response(Response),
+ {ok, {Request#request.id, saved_to_file}, Data}.
+
+is_server_closing(Headers) when record(Headers,http_response_h) ->
+ case Headers#http_response_h.connection of
+ "close" ->
+ true;
+ _ ->
+ false
+ end.
+
+format_response({StatusLine, Headers, Body = <<>>}) ->
+ {{StatusLine, http_response:header_list(Headers), Body}, <<>>};
+
+format_response({StatusLine, Headers, Body}) ->
+ Length = list_to_integer(Headers#http_response_h.'content-length'),
+ {NewBody, Data} =
+ case Length of
+ 0 ->
+ {Body, <<>>};
+ -1 -> % When no lenght indicator is provided
+ {Body, <<>>};
+ Length when Length =< size(Body) ->
+ <<BodyThisReq:Length/binary, Next/binary>> = Body,
+ {BodyThisReq, Next};
+ _ -> %% Connection prematurely ended.
+ {Body, <<>>}
+ end,
+ {{StatusLine, http_response:header_list(Headers), NewBody}, Data}.
+
diff --git a/src/couch_inets/httpc_sup.erl b/src/couch_inets/httpc_sup.erl
new file mode 100644
index 00000000..5583c6c8
--- /dev/null
+++ b/src/couch_inets/httpc_sup.erl
@@ -0,0 +1,70 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(httpc_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/1]).
+
+%% Supervisor callback
+-export([init/1]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+start_link(HttpcServices) ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, [HttpcServices]).
+
+%%%=========================================================================
+%%% Supervisor callback
+%%%=========================================================================
+init([]) ->
+ init([[]]);
+init([HttpcServices]) ->
+ RestartStrategy = one_for_one,
+ MaxR = 10,
+ MaxT = 3600,
+ Children = child_spec(HttpcServices, []),
+ {ok, {{RestartStrategy, MaxR, MaxT}, Children}}.
+
+child_spec([], []) ->
+ [httpc_child_spec(default, only_session_cookies)];
+child_spec([], Acc) ->
+ Acc;
+child_spec([{httpc, {Profile, Dir}} | Rest], Acc) ->
+ case httpc_child_spec(Profile, Dir) of
+ {} ->
+ child_spec(Rest, Acc);
+ Spec ->
+ child_spec(Rest, [Spec | Acc])
+ end.
+
+%% Note currently only one profile is supported e.i. the default profile
+httpc_child_spec(default, Dir) ->
+ Name = httpc_manager,
+ StartFunc = {httpc_manager, start_link, [{default, Dir}]},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [httpc_manager],
+ Type = worker,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules};
+httpc_child_spec(_,_) ->
+ {}.
+
+
diff --git a/src/couch_inets/httpd.erl b/src/couch_inets/httpd.erl
new file mode 100644
index 00000000..830753c4
--- /dev/null
+++ b/src/couch_inets/httpd.erl
@@ -0,0 +1,516 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(httpd).
+
+-export([multi_start/1, multi_start_link/1,
+ start/0, start/1,
+ start_link/0, start_link/1,
+ start_child/0,start_child/1,
+ multi_stop/1,
+ stop/0,stop/1,stop/2,
+ stop_child/0,stop_child/1,stop_child/2,
+ multi_restart/1,
+ restart/0,restart/1,restart/2,
+ parse_query/1]).
+
+%% Optional start related stuff...
+-export([load/1, load_mime_types/1, start2/1, start_link2/1, stop2/1]).
+
+%% Management stuff
+-export([block/0,block/1,block/2,block/3,block/4,
+ unblock/0,unblock/1,unblock/2]).
+
+%% Debugging and status info stuff...
+-export([get_status/1,get_status/2,get_status/3,
+ get_admin_state/0,get_admin_state/1,get_admin_state/2,
+ get_usage_state/0,get_usage_state/1,get_usage_state/2]).
+
+-include("httpd.hrl").
+
+start() ->
+ start("/var/tmp/server_root/conf/5984.conf").
+
+start(ConfigFile) ->
+ httpd_instance_sup:start(ConfigFile).
+
+start_link() ->
+ start("/var/tmp/server_root/conf/5984.conf").
+
+start_link(ConfigFile) when is_list(ConfigFile) ->
+ httpd_instance_sup:start_link(ConfigFile).
+
+start2(Config) when is_list(Config) ->
+ httpd_instance_sup:start2(Config).
+
+start_link2(Config) ->
+ httpd_instance_sup:start_link2(Config).
+
+stop() ->
+ stop(5984).
+
+stop(Port) when is_integer(Port) ->
+ stop(undefined, Port);
+stop(Pid) when is_pid(Pid) ->
+ httpd_instance_sup:stop(Pid);
+stop(ConfigFile) when is_list(ConfigFile) ->
+ httpd_instance_sup:stop(ConfigFile).
+
+stop(Addr, Port) when is_integer(Port) ->
+ httpd_instance_sup:stop(Addr, Port).
+
+stop2(Config) when is_list(Config) ->
+ httpd_instance_sup:stop2(Config).
+
+start_child() ->
+ start_child("/var/tmp/server_root/conf/5984.conf").
+
+start_child(ConfigFile) ->
+ httpd_sup:start_child(ConfigFile).
+
+stop_child() ->
+ stop_child(5984).
+
+stop_child(Port) ->
+ stop_child(undefined, Port).
+
+stop_child(Addr, Port) when integer(Port) ->
+ httpd_sup:stop_child(Addr, Port).
+
+multi_start(MultiConfigFile) ->
+ case read_multi_file(MultiConfigFile) of
+ {ok,ConfigFiles} ->
+ mstart(ConfigFiles);
+ Error ->
+ Error
+ end.
+
+mstart(ConfigFiles) ->
+ mstart(ConfigFiles,[]).
+mstart([],Results) ->
+ {ok,lists:reverse(Results)};
+mstart([H|T],Results) ->
+ Res = start(H),
+ mstart(T,[Res|Results]).
+
+multi_start_link(MultiConfigFile) ->
+ case read_multi_file(MultiConfigFile) of
+ {ok,ConfigFiles} ->
+ mstart_link(ConfigFiles);
+ Error ->
+ Error
+ end.
+mstart_link(ConfigFiles) ->
+ mstart_link(ConfigFiles,[]).
+mstart_link([],Results) ->
+ {ok,lists:reverse(Results)};
+mstart_link([H|T],Results) ->
+ Res = start_link(H),
+ mstart_link(T,[Res|Results]).
+
+multi_stop(MultiConfigFile) ->
+ case read_multi_file(MultiConfigFile) of
+ {ok,ConfigFiles} ->
+ mstop(ConfigFiles);
+ Error ->
+ Error
+ end.
+
+mstop(ConfigFiles) ->
+ mstop(ConfigFiles,[]).
+mstop([],Results) ->
+ {ok,lists:reverse(Results)};
+mstop([H|T],Results) ->
+ Res = stop(H),
+ mstop(T,[Res|Results]).
+
+multi_restart(MultiConfigFile) ->
+ case read_multi_file(MultiConfigFile) of
+ {ok,ConfigFiles} ->
+ mrestart(ConfigFiles);
+ Error ->
+ Error
+ end.
+
+mrestart(ConfigFiles) ->
+ mrestart(ConfigFiles,[]).
+mrestart([],Results) ->
+ {ok,lists:reverse(Results)};
+mrestart([H|T],Results) ->
+ Res = restart(H),
+ mrestart(T,[Res|Results]).
+
+restart() -> restart(undefined,5984).
+
+restart(Port) when is_integer(Port) ->
+ restart(undefined,Port);
+restart(ConfigFile) when is_list(ConfigFile) ->
+ case get_addr_and_port(ConfigFile) of
+ {ok,Addr,Port} ->
+ restart(Addr,Port);
+ Error ->
+ Error
+ end.
+
+restart(Addr,Port) when is_integer(Port) ->
+ do_restart(Addr,Port).
+
+do_restart(Addr,Port) when is_integer(Port) ->
+ Name = make_name(Addr,Port),
+ case whereis(Name) of
+ Pid when pid(Pid) ->
+ httpd_manager:restart(Pid);
+ _ ->
+ {error,not_started}
+ end.
+
+
+%%% =========================================================
+%%% Function: block/0, block/1, block/2, block/3, block/4
+%%% block()
+%%% block(Port)
+%%% block(ConfigFile)
+%%% block(Addr,Port)
+%%% block(Port,Mode)
+%%% block(ConfigFile,Mode)
+%%% block(Addr,Port,Mode)
+%%% block(ConfigFile,Mode,Timeout)
+%%% block(Addr,Port,Mode,Timeout)
+%%%
+%%% Returns: ok | {error,Reason}
+%%%
+%%% Description: This function is used to block an HTTP server.
+%%% The blocking can be done in two ways,
+%%% disturbing or non-disturbing. Default is disturbing.
+%%% When a HTTP server is blocked, all requests are rejected
+%%% (status code 503).
+%%%
+%%% disturbing:
+%%% By performing a disturbing block, the server
+%%% is blocked forcefully and all ongoing requests
+%%% are terminated. No new connections are accepted.
+%%% If a timeout time is given then, on-going requests
+%%% are given this much time to complete before the
+%%% server is forcefully blocked. In this case no new
+%%% connections is accepted.
+%%%
+%%% non-disturbing:
+%%% A non-disturbing block is more gracefull. No
+%%% new connections are accepted, but the ongoing
+%%% requests are allowed to complete.
+%%% If a timeout time is given, it waits this long before
+%%% giving up (the block operation is aborted and the
+%%% server state is once more not-blocked).
+%%%
+%%% Types: Port -> integer()
+%%% Addr -> {A,B,C,D} | string() | undefined
+%%% ConfigFile -> string()
+%%% Mode -> disturbing | non_disturbing
+%%% Timeout -> integer()
+%%%
+block() -> block(undefined,5984,disturbing).
+
+block(Port) when is_integer(Port) ->
+ block(undefined,Port,disturbing);
+
+block(ConfigFile) when is_list(ConfigFile) ->
+ case get_addr_and_port(ConfigFile) of
+ {ok,Addr,Port} ->
+ block(Addr,Port,disturbing);
+ Error ->
+ Error
+ end.
+
+block(Addr,Port) when is_integer(Port) ->
+ block(Addr,Port,disturbing);
+
+block(Port,Mode) when is_integer(Port), is_atom(Mode) ->
+ block(undefined,Port,Mode);
+
+block(ConfigFile,Mode) when is_list(ConfigFile), is_atom(Mode) ->
+ case get_addr_and_port(ConfigFile) of
+ {ok,Addr,Port} ->
+ block(Addr,Port,Mode);
+ Error ->
+ Error
+ end.
+
+
+block(Addr,Port,disturbing) when is_integer(Port) ->
+ do_block(Addr,Port,disturbing);
+block(Addr,Port,non_disturbing) when is_integer(Port) ->
+ do_block(Addr,Port,non_disturbing);
+
+block(ConfigFile,Mode,Timeout) when is_list(ConfigFile), is_atom(Mode),
+ is_integer(Timeout) ->
+ case get_addr_and_port(ConfigFile) of
+ {ok,Addr,Port} ->
+ block(Addr,Port,Mode,Timeout);
+ Error ->
+ Error
+ end.
+
+
+block(Addr,Port,non_disturbing,Timeout) when
+ is_integer(Port), is_integer(Timeout) ->
+ do_block(Addr,Port,non_disturbing,Timeout);
+block(Addr,Port,disturbing,Timeout) when is_integer(Port),
+ is_integer(Timeout) ->
+ do_block(Addr,Port,disturbing,Timeout).
+
+do_block(Addr,Port,Mode) when is_integer(Port), is_atom(Mode) ->
+ Name = make_name(Addr,Port),
+ case whereis(Name) of
+ Pid when pid(Pid) ->
+ httpd_manager:block(Pid,Mode);
+ _ ->
+ {error,not_started}
+ end.
+
+
+do_block(Addr,Port,Mode,Timeout) when is_integer(Port), is_atom(Mode) ->
+ Name = make_name(Addr,Port),
+ case whereis(Name) of
+ Pid when pid(Pid) ->
+ httpd_manager:block(Pid,Mode,Timeout);
+ _ ->
+ {error,not_started}
+ end.
+
+
+%%% =========================================================
+%%% Function: unblock/0, unblock/1, unblock/2
+%%% unblock()
+%%% unblock(Port)
+%%% unblock(ConfigFile)
+%%% unblock(Addr,Port)
+%%%
+%%% Description: This function is used to reverse a previous block
+%%% operation on the HTTP server.
+%%%
+%%% Types: Port -> integer()
+%%% Addr -> {A,B,C,D} | string() | undefined
+%%% ConfigFile -> string()
+%%%
+unblock() -> unblock(undefined,5984).
+unblock(Port) when is_integer(Port) -> unblock(undefined,Port);
+
+unblock(ConfigFile) when is_list(ConfigFile) ->
+ case get_addr_and_port(ConfigFile) of
+ {ok,Addr,Port} ->
+ unblock(Addr,Port);
+ Error ->
+ Error
+ end.
+
+unblock(Addr,Port) when is_integer(Port) ->
+ Name = make_name(Addr,Port),
+ case whereis(Name) of
+ Pid when pid(Pid) ->
+ httpd_manager:unblock(Pid);
+ _ ->
+ {error,not_started}
+ end.
+
+%%% =========================================================
+%%% Function: get_admin_state/0, get_admin_state/1, get_admin_state/2
+%%% get_admin_state()
+%%% get_admin_state(Port)
+%%% get_admin_state(Addr,Port)
+%%%
+%%% Returns: {ok,State} | {error,Reason}
+%%%
+%%% Description: This function is used to retrieve the administrative
+%%% state of the HTTP server.
+%%%
+%%% Types: Port -> integer()
+%%% Addr -> {A,B,C,D} | string() | undefined
+%%% State -> unblocked | shutting_down | blocked
+%%% Reason -> term()
+%%%
+get_admin_state() -> get_admin_state(undefined,5984).
+get_admin_state(Port) when is_integer(Port) -> get_admin_state(undefined,Port);
+
+get_admin_state(ConfigFile) when is_list(ConfigFile) ->
+ case get_addr_and_port(ConfigFile) of
+ {ok,Addr,Port} ->
+ unblock(Addr,Port);
+ Error ->
+ Error
+ end.
+
+get_admin_state(Addr,Port) when is_integer(Port) ->
+ Name = make_name(Addr,Port),
+ case whereis(Name) of
+ Pid when is_pid(Pid) ->
+ httpd_manager:get_admin_state(Pid);
+ _ ->
+ {error,not_started}
+ end.
+
+
+
+%%% =========================================================
+%%% Function: get_usage_state/0, get_usage_state/1, get_usage_state/2
+%%% get_usage_state()
+%%% get_usage_state(Port)
+%%% get_usage_state(Addr,Port)
+%%%
+%%% Returns: {ok,State} | {error,Reason}
+%%%
+%%% Description: This function is used to retrieve the usage
+%%% state of the HTTP server.
+%%%
+%%% Types: Port -> integer()
+%%% Addr -> {A,B,C,D} | string() | undefined
+%%% State -> idle | active | busy
+%%% Reason -> term()
+%%%
+get_usage_state() -> get_usage_state(undefined,5984).
+get_usage_state(Port) when is_integer(Port) -> get_usage_state(undefined,Port);
+
+get_usage_state(ConfigFile) when is_list(ConfigFile) ->
+ case get_addr_and_port(ConfigFile) of
+ {ok,Addr,Port} ->
+ unblock(Addr,Port);
+ Error ->
+ Error
+ end.
+
+get_usage_state(Addr,Port) when is_integer(Port) ->
+ Name = make_name(Addr,Port),
+ case whereis(Name) of
+ Pid when is_pid(Pid) ->
+ httpd_manager:get_usage_state(Pid);
+ _ ->
+ {error,not_started}
+ end.
+
+
+
+%%% =========================================================
+%% Function: get_status(ConfigFile) -> Status
+%% get_status(Port) -> Status
+%% get_status(Addr,Port) -> Status
+%% get_status(Port,Timeout) -> Status
+%% get_status(Addr,Port,Timeout) -> Status
+%%
+%% Arguments: ConfigFile -> string()
+%% Configuration file from which Port and
+%% BindAddress will be extracted.
+%% Addr -> {A,B,C,D} | string()
+%% Bind Address of the http server
+%% Port -> integer()
+%% Port number of the http server
+%% Timeout -> integer()
+%% Timeout time for the call
+%%
+%% Returns: Status -> list()
+%%
+%% Description: This function is used when the caller runs in the
+%% same node as the http server or if calling with a
+%% program such as erl_call (see erl_interface).
+%%
+
+get_status(ConfigFile) when is_list(ConfigFile) ->
+ case get_addr_and_port(ConfigFile) of
+ {ok,Addr,Port} ->
+ get_status(Addr,Port);
+ Error ->
+ Error
+ end;
+
+get_status(Port) when is_integer(Port) ->
+ get_status(undefined,Port,5000).
+
+get_status(Port,Timeout) when is_integer(Port), is_integer(Timeout) ->
+ get_status(undefined,Port,Timeout);
+
+get_status(Addr,Port) when is_list(Addr), is_integer(Port) ->
+ get_status(Addr,Port,5000).
+
+get_status(Addr,Port,Timeout) when is_integer(Port) ->
+ Name = make_name(Addr,Port),
+ case whereis(Name) of
+ Pid when is_pid(Pid) ->
+ httpd_manager:get_status(Pid,Timeout);
+ _ ->
+ not_started
+ end.
+
+load(ConfigFile) ->
+ httpd_conf:load(ConfigFile).
+
+load_mime_types(MimeTypesFile) ->
+ httpd_conf:load_mime_types(MimeTypesFile).
+
+parse_query(String) ->
+ {ok, SplitString} = regexp:split(String,"[&;]"),
+ foreach(SplitString).
+
+foreach([]) ->
+ [];
+foreach([KeyValue|Rest]) ->
+ {ok, Plus2Space, _} = regexp:gsub(KeyValue,"[\+]"," "),
+ case regexp:split(Plus2Space,"=") of
+ {ok,[Key|Value]} ->
+ [{httpd_util:decode_hex(Key),
+ httpd_util:decode_hex(lists:flatten(Value))}|foreach(Rest)];
+ {ok,_} ->
+ foreach(Rest)
+ end.
+
+get_addr_and_port(ConfigFile) ->
+ case httpd_conf:load(ConfigFile) of
+ {ok,ConfigList} ->
+ Port = httpd_util:key1search(ConfigList,port,80),
+ Addr = httpd_util:key1search(ConfigList,bind_address),
+ {ok,Addr,Port};
+ Error ->
+ Error
+ end.
+
+
+make_name(Addr,Port) ->
+ httpd_util:make_name("httpd",Addr,Port).
+
+
+%% Multi stuff
+%%
+
+read_multi_file(File) ->
+ read_mfile(file:open(File,read)).
+
+read_mfile({ok,Fd}) ->
+ read_mfile(read_line(Fd),Fd,[]);
+read_mfile(Error) ->
+ Error.
+
+read_mfile(eof, _Fd, SoFar) ->
+ {ok,lists:reverse(SoFar)};
+read_mfile([$# | _Comment], Fd, SoFar) ->
+ read_mfile(read_line(Fd), Fd, SoFar);
+read_mfile([], Fd, SoFar) ->
+ read_mfile(read_line(Fd), Fd, SoFar);
+read_mfile(Line, Fd, SoFar) ->
+ read_mfile(read_line(Fd), Fd, [Line | SoFar]).
+
+read_line(Fd) -> read_line1(io:get_line(Fd, [])).
+read_line1(eof) -> eof;
+read_line1(String) -> httpd_conf:clean(String).
+
+
diff --git a/src/couch_inets/httpd.hrl b/src/couch_inets/httpd.hrl
new file mode 100644
index 00000000..f9c74926
--- /dev/null
+++ b/src/couch_inets/httpd.hrl
@@ -0,0 +1,78 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+
+-include_lib("kernel/include/file.hrl").
+
+-ifndef(SERVER_SOFTWARE).
+-define(SERVER_SOFTWARE,"inets/develop"). % Define in Makefile!
+-endif.
+-define(SERVER_PROTOCOL,"HTTP/1.1").
+-define(SOCKET_CHUNK_SIZE,8192).
+-define(SOCKET_MAX_POLL,25).
+-define(FILE_CHUNK_SIZE,64*1024).
+-define(GATEWAY_INTERFACE,"CGI/1.1").
+-define(NICE(Reason),lists:flatten(atom_to_list(?MODULE)++": "++Reason)).
+-define(DEFAULT_CONTEXT,
+ [{errmsg,"[an error occurred while processing this directive]"},
+ {timefmt,"%A, %d-%b-%y %T %Z"},
+ {sizefmt,"abbrev"}]).
+
+
+-ifdef(inets_error).
+-define(ERROR(Format, Args), io:format("E(~p:~p:~p) : "++Format++"~n",
+ [self(),?MODULE,?LINE]++Args)).
+-else.
+-define(ERROR(F,A),[]).
+-endif.
+
+-ifdef(inets_log).
+-define(LOG(Format, Args), io:format("L(~p:~p:~p) : "++Format++"~n",
+ [self(),?MODULE,?LINE]++Args)).
+-else.
+-define(LOG(F,A),[]).
+-endif.
+
+-ifdef(inets_debug).
+-define(DEBUG(Format, Args), io:format("D(~p:~p:~p) : "++Format++"~n",
+ [self(),?MODULE,?LINE]++Args)).
+-else.
+-define(DEBUG(F,A),[]).
+-endif.
+
+-ifdef(inets_cdebug).
+-define(CDEBUG(Format, Args), io:format("C(~p:~p:~p) : "++Format++"~n",
+ [self(),?MODULE,?LINE]++Args)).
+-else.
+-define(CDEBUG(F,A),[]).
+-endif.
+
+
+-record(init_data,{peername,resolve}).
+-record(mod,{init_data,
+ data=[],
+ socket_type=ip_comm,
+ socket,
+ config_db,
+ method,
+ absolute_uri=[],
+ request_uri,
+ http_version,
+ request_line,
+ parsed_header=[],
+ entity_body,
+ connection}).
diff --git a/src/couch_inets/httpd_acceptor.erl b/src/couch_inets/httpd_acceptor.erl
new file mode 100644
index 00000000..9138912f
--- /dev/null
+++ b/src/couch_inets/httpd_acceptor.erl
@@ -0,0 +1,155 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(httpd_acceptor).
+
+-include("httpd.hrl").
+
+%% External API
+-export([start_link/5, start_link/6]).
+
+%% Other exports (for spawn's etc.)
+-export([acceptor/5, acceptor/6, acceptor/7]).
+
+
+%%
+%% External API
+%%
+
+%% start_link
+
+start_link(Manager, SocketType, Addr, Port, ConfigDb) ->
+ start_link(Manager, SocketType, Addr, Port, ConfigDb, 15000).
+
+start_link(Manager, SocketType, Addr, Port, ConfigDb,AcceptTimeout) ->
+ Args = [self(), Manager, SocketType, Addr, Port, ConfigDb, AcceptTimeout],
+ proc_lib:start_link(?MODULE, acceptor, Args).
+
+acceptor(Parent, Manager, SocketType, Addr, Port, ConfigDb) ->
+ acceptor(Parent, Manager, SocketType, Addr, Port, ConfigDb, 15000).
+acceptor(Parent, Manager, SocketType, Addr, Port, ConfigDb, AcceptTimeout) ->
+ case (catch do_init(SocketType, Addr, Port)) of
+ {ok, ListenSocket} ->
+ proc_lib:init_ack(Parent, {ok, self()}),
+ acceptor(Manager, SocketType, ListenSocket, ConfigDb, AcceptTimeout);
+ Error ->
+ proc_lib:init_ack(Parent, Error),
+ error
+ end.
+
+do_init(SocketType, Addr, Port) ->
+ do_socket_start(SocketType),
+ ListenSocket = do_socket_listen(SocketType, Addr, Port),
+ {ok, ListenSocket}.
+
+
+do_socket_start(SocketType) ->
+ case http_transport:start(SocketType) of
+ ok ->
+ ok;
+ {error, Reason} ->
+ throw({error, {socket_start_failed, Reason}})
+ end.
+
+
+do_socket_listen(SocketType, Addr, Port) ->
+ case http_transport:listen(SocketType, Addr, Port) of
+ {ok, ListenSocket} ->
+ ListenSocket;
+ {error, Reason} ->
+ throw({error, {listen, Reason}})
+ end.
+
+
+%% acceptor
+
+acceptor(Manager, SocketType, ListenSocket, ConfigDb, AcceptTimeout) ->
+ case (catch http_transport:accept(SocketType, ListenSocket, 50000)) of
+ {ok, Socket} ->
+ handle_connection(Manager, ConfigDb, AcceptTimeout, SocketType, Socket),
+ ?MODULE:acceptor(Manager, SocketType, ListenSocket, ConfigDb,AcceptTimeout);
+ {error, Reason} ->
+ handle_error(Reason, ConfigDb, SocketType),
+ ?MODULE:acceptor(Manager, SocketType, ListenSocket, ConfigDb, AcceptTimeout);
+ {'EXIT', Reason} ->
+ handle_error({'EXIT', Reason}, ConfigDb, SocketType),
+ ?MODULE:acceptor(Manager, SocketType, ListenSocket, ConfigDb, AcceptTimeout)
+ end.
+
+
+handle_connection(Manager, ConfigDb, AcceptTimeout, SocketType, Socket) ->
+ {ok, Pid} = httpd_request_handler:start(Manager, ConfigDb, AcceptTimeout),
+ http_transport:controlling_process(SocketType, Socket, Pid),
+ httpd_request_handler:socket_ownership_transfered(Pid, SocketType, Socket).
+
+handle_error(timeout, _, _) ->
+ ok;
+
+handle_error({enfile, _}, _, _) ->
+ %% Out of sockets...
+ sleep(200);
+
+handle_error(emfile, _, _) ->
+ %% Too many open files -> Out of sockets...
+ sleep(200);
+
+handle_error(closed, _, _) ->
+ error_logger:info_report("The httpd accept socket was closed by"
+ "a third party. "
+ "This will not have an impact on inets "
+ "that will open a new accept socket and "
+ "go on as nothing happened. It does however "
+ "indicate that some other software is behaving "
+ "badly."),
+ exit(normal);
+
+%% This will only happen when the client is terminated abnormaly
+%% and is not a problem for the server, so we want
+%% to terminate normal so that we can restart without any
+%% error messages.
+handle_error(econnreset,_,_) ->
+ exit(normal);
+
+handle_error(econnaborted, _, _) ->
+ ok;
+
+handle_error(esslaccept, _, _) ->
+ %% The user has selected to cancel the installation of
+ %% the certifikate, This is not a real error, so we do
+ %% not write an error message.
+ ok;
+
+handle_error({'EXIT', Reason}, ConfigDb, SocketType) ->
+ String = lists:flatten(io_lib:format("Accept exit: ~p", [Reason])),
+ accept_failed(SocketType, ConfigDb, String);
+
+handle_error(Reason, ConfigDb, SocketType) ->
+ String = lists:flatten(io_lib:format("Accept error: ~p", [Reason])),
+ accept_failed(SocketType, ConfigDb, String).
+
+
+accept_failed(SocketType, ConfigDb, String) ->
+ error_logger:error_report(String),
+ mod_log:error_log(SocketType, undefined, ConfigDb,
+ {0, "unknown"}, String),
+ mod_disk_log:error_log(SocketType, undefined, ConfigDb,
+ {0, "unknown"}, String),
+ exit({accept_failed, String}).
+
+sleep(T) -> receive after T -> ok end.
+
+
diff --git a/src/couch_inets/httpd_acceptor_sup.erl b/src/couch_inets/httpd_acceptor_sup.erl
new file mode 100644
index 00000000..a0656e0a
--- /dev/null
+++ b/src/couch_inets/httpd_acceptor_sup.erl
@@ -0,0 +1,84 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%%----------------------------------------------------------------------
+%% Purpose: The supervisor for acceptor processes in the http server,
+%% hangs under the httpd_instance_sup_<Addr>_<Port> supervisor.
+%%----------------------------------------------------------------------
+
+-module(httpd_acceptor_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/2, start_acceptor/4, start_acceptor/5, stop_acceptor/2]).
+
+%% Supervisor callback
+-export([init/1]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+start_link(Addr, Port) ->
+ SupName = make_name(Addr, Port),
+ supervisor:start_link({local, SupName}, ?MODULE, []).
+
+%%----------------------------------------------------------------------
+%% Function: [start|stop]_acceptor/5
+%% Description: Starts/stops an [auth | security] worker (child) process
+%%----------------------------------------------------------------------
+start_acceptor(SocketType, Addr, Port, ConfigDb) ->
+ start_acceptor(SocketType, Addr, Port, ConfigDb,15000).
+start_acceptor(SocketType, Addr, Port, ConfigDb, AcceptTimeout) ->
+ start_worker(httpd_acceptor, SocketType, Addr, Port,
+ ConfigDb, AcceptTimeout, self(), []).
+
+stop_acceptor(Addr, Port) ->
+ stop_worker(httpd_acceptor, Addr, Port).
+
+%%%=========================================================================
+%%% Supervisor callback
+%%%=========================================================================
+init(_) ->
+ Flags = {one_for_one, 500, 100},
+ Workers = [],
+ {ok, {Flags, Workers}}.
+
+%%%=========================================================================
+%%% Internal functions
+%%%=========================================================================
+
+make_name(Addr,Port) ->
+ httpd_util:make_name("httpd_acc_sup", Addr, Port).
+
+start_worker(M, SocketType, Addr, Port, ConfigDB, AcceptTimeout, Manager, Modules) ->
+ SupName = make_name(Addr, Port),
+ Args = [Manager, SocketType, Addr, Port, ConfigDB, AcceptTimeout],
+ Spec = {{M, Addr, Port},
+ {M, start_link, Args},
+ permanent, timer:seconds(1), worker, [M] ++ Modules},
+ supervisor:start_child(SupName, Spec).
+
+stop_worker(M, Addr, Port) ->
+ SupName = make_name(Addr, Port),
+ Name = {M, Addr, Port},
+ case supervisor:terminate_child(SupName, Name) of
+ ok ->
+ supervisor:delete_child(SupName, Name);
+ Error ->
+ Error
+ end.
diff --git a/src/couch_inets/httpd_cgi.erl b/src/couch_inets/httpd_cgi.erl
new file mode 100644
index 00000000..0e0d7f2b
--- /dev/null
+++ b/src/couch_inets/httpd_cgi.erl
@@ -0,0 +1,122 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(httpd_cgi).
+
+-export([parse_headers/1, handle_headers/1]).
+
+-include("inets_internal.hrl").
+
+%%%=========================================================================
+%%% Internal application API
+%%%=========================================================================
+
+%%--------------------------------------------------------------------------
+%% parse_headers([Bin, Data, Header, Headers]) -> {RevHeaders, Body} |
+%% {Module, Function, Args}
+%% Bin = Data = binary()
+%% Header = string() - Accumulator should be [] in first call
+%% Headers = [Header] - Accumulator should be [] in first call
+%% Body = string()
+%% RevHeaders = string() - Note CGI-headers not HTTP-headers
+%%
+%% Description: Parses "<<Bin/binary, Data/binary>>" returned from the
+%% CGI-script until it findes the end of the CGI-headers (at least one
+%% CGI-HeaderField must be supplied) then it returns the CGI-headers
+%% and maybe some body data. If {Module, Function, Args} is
+%% returned it means that more data needs to be collected from the
+%% cgi-script as the end of the headers was not yet found. When more
+%% data has been collected call Module:Function([NewData | Args]).
+%%
+%% NOTE: The headers are backwards and should
+%% be so, devide_and_reverse_headers will reverse them back after
+%% taking advantage of the fact that they where backwards.
+%%--------------------------------------------------------------------------
+parse_headers([Data, Bin, Header, Headers]) ->
+ parse_headers(<<Bin/binary, Data/binary>>, Header, Headers).
+
+%%--------------------------------------------------------------------------
+%% handle_headers(CGIHeaders) -> {ok, HTTPHeaders, StatusCode} |
+%% {proceed, AbsPath}
+%% CGIHeaders = [string()]
+%% HTTPHeaders = [{HeaderField, HeaderValue}]
+%% HeaderField = string()
+%% HeaderValue = string()
+%% StatusCode = integer()
+%%
+%% Description: Interprets CGI headers and creates HTTP headers and a
+%% appropriate HTTP status code. Note if a CGI location header is present
+%% the return value will be {proceed, AbsPath}
+%%--------------------------------------------------------------------------
+handle_headers(CGIHeaders) ->
+ handle_headers(CGIHeaders, [], {200, "ok"}).
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+parse_headers(<<>>, Header, Headers) ->
+ {?MODULE, parse_headers, [<<>>, Header, Headers]};
+parse_headers(<<?CR,?LF>>, Header, Headers) ->
+ {?MODULE, parse_headers, [<<?CR,?LF>>, Header, Headers]};
+parse_headers(<<?LF>>, Header, Headers) ->
+ {?MODULE, parse_headers, [<<?LF>>, Header, Headers]};
+parse_headers(<<?CR, ?LF, ?CR, ?LF, Rest/binary>>, Header, Headers) ->
+ {ok, {[lists:reverse([?LF, ?CR | Header]) | Headers], Rest}};
+parse_headers(<<?LF, ?LF, Rest/binary>>, Header, Headers) ->
+ {ok, {[lists:reverse([?LF | Header]) | Headers], Rest}};
+parse_headers(<<?CR, ?LF, Rest/binary>>, Header, Headers) ->
+ parse_headers(Rest, [], [lists:reverse([?LF, ?CR | Header]) | Headers]);
+parse_headers(<<?LF, Rest/binary>>, Header, Headers) ->
+ parse_headers(Rest, [], [lists:reverse([?LF | Header]) | Headers]);
+parse_headers(<<Octet, Rest/binary>>, Header, Headers) ->
+ parse_headers(Rest, [Octet | Header], Headers).
+
+handle_headers([], HTTPHeaders, Status) ->
+ {ok, HTTPHeaders, Status};
+
+handle_headers([CGIHeader | CGIHeaders], HTTPHeaders, Status) ->
+
+ {FieldName, FieldValue} = httpd_response:split_header(CGIHeader, []),
+
+ case FieldName of
+ "content-type" ->
+ handle_headers(CGIHeaders,
+ [{FieldName, FieldValue} | HTTPHeaders],
+ Status);
+ "location" ->
+ case http_request:is_absolut_uri(FieldValue) of
+ true ->
+ handle_headers(CGIHeaders,
+ [{FieldName, FieldValue} |
+ HTTPHeaders], {302, "Redirect"});
+ false ->
+ {proceed, FieldValue}
+ end;
+ "status" ->
+ CodePhrase =
+ case httpd_util:split(FieldValue," ",2) of
+ {ok,[Code, Phrase]} ->
+ {list_to_integer(Code), Phrase};
+ _ ->
+ {200, "OK"}
+ end,
+ handle_headers(CGIHeaders, HTTPHeaders, CodePhrase);
+ _ -> %% Extension headers
+ handle_headers(CGIHeaders,
+ [{FieldName, FieldValue} | HTTPHeaders], Status)
+ end.
+
diff --git a/src/couch_inets/httpd_conf.erl b/src/couch_inets/httpd_conf.erl
new file mode 100644
index 00000000..a9dac808
--- /dev/null
+++ b/src/couch_inets/httpd_conf.erl
@@ -0,0 +1,720 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(httpd_conf).
+
+%% EWSAPI
+-export([is_directory/1, is_file/1, make_integer/1, clean/1,
+ custom_clean/3, check_enum/2]).
+
+%% Application internal API
+-export([load/1, load/2, load_mime_types/1, store/1, store/2,
+ remove/1, remove_all/1, config/1]).
+
+-define(VMODULE,"CONF").
+-include("httpd.hrl").
+
+%%%=========================================================================
+%%% EWSAPI
+%%%=========================================================================
+%%-------------------------------------------------------------------------
+%% is_directory(FilePath) -> Result
+%% FilePath = string()
+%% Result = {ok,Directory} | {error,Reason}
+%% Directory = string()
+%% Reason = string() | enoent | eaccess | enotdir | FileInfo
+%% FileInfo = File info record
+%%
+%% Description: Checks if FilePath is a directory in which case it is
+%% returned.
+%%-------------------------------------------------------------------------
+is_directory(Directory) ->
+ case file:read_file_info(Directory) of
+ {ok,FileInfo} ->
+ #file_info{type = Type, access = Access} = FileInfo,
+ is_directory(Type,Access,FileInfo,Directory);
+ {error,Reason} ->
+ {error,Reason}
+ end.
+is_directory(directory,read,_FileInfo,Directory) ->
+ {ok,Directory};
+is_directory(directory,read_write,_FileInfo,Directory) ->
+ {ok,Directory};
+is_directory(_Type,_Access,FileInfo,_Directory) ->
+ {error,FileInfo}.
+%%-------------------------------------------------------------------------
+%% is_file(FilePath) -> Result
+%% FilePath = string()
+%% Result = {ok,File} | {error,Reason}
+%% File = string()
+%% Reason = string() | enoent | eaccess | enotdir | FileInfo
+%% FileInfo = File info record
+%%
+%% Description: Checks if FilePath is a regular file in which case it
+%% is returned.
+%%-------------------------------------------------------------------------
+is_file(File) ->
+ case file:read_file_info(File) of
+ {ok,FileInfo} ->
+ #file_info{type = Type, access = Access} = FileInfo,
+ is_file(Type,Access,FileInfo,File);
+ {error,Reason} ->
+ {error,Reason}
+ end.
+is_file(regular,read,_FileInfo,File) ->
+ {ok,File};
+is_file(regular,read_write,_FileInfo,File) ->
+ {ok,File};
+is_file(_Type,_Access,FileInfo,_File) ->
+ {error,FileInfo}.
+%%-------------------------------------------------------------------------
+%% make_integer(String) -> Result
+%% String = string()
+%% Result = {ok,integer()} | {error,nomatch}
+%%
+%% Description: make_integer/1 returns an integer representation of String.
+%%-------------------------------------------------------------------------
+make_integer(String) ->
+ case regexp:match(clean(String),"[0-9]+") of
+ {match, _, _} ->
+ {ok, list_to_integer(clean(String))};
+ nomatch ->
+ {error, nomatch}
+ end.
+%%-------------------------------------------------------------------------
+%% clean(String) -> Stripped
+%% String = Stripped = string()
+%%
+%% Description:clean/1 removes leading and/or trailing white spaces
+%% from String.
+%%-------------------------------------------------------------------------
+clean(String) ->
+ {ok,CleanedString,_} =
+ regexp:gsub(String, "^[ \t\n\r\f]*|[ \t\n\r\f]*\$",""),
+ CleanedString.
+%%-------------------------------------------------------------------------
+%% custom_clean(String,Before,After) -> Stripped
+%% Before = After = regexp()
+%% String = Stripped = string()
+%%
+%% Description: custom_clean/3 removes leading and/or trailing white
+%% spaces and custom characters from String.
+%%-------------------------------------------------------------------------
+custom_clean(String,MoreBefore,MoreAfter) ->
+ {ok,CleanedString,_} = regexp:gsub(String,"^[ \t\n\r\f"++MoreBefore++
+ "]*|[ \t\n\r\f"++MoreAfter++"]*\$",""),
+ CleanedString.
+%%-------------------------------------------------------------------------
+%% check_enum(EnumString,ValidEnumStrings) -> Result
+%% EnumString = string()
+%% ValidEnumStrings = [string()]
+%% Result = {ok,atom()} | {error,not_valid}
+%%
+%% Description: check_enum/2 checks if EnumString is a valid
+%% enumeration of ValidEnumStrings in which case it is returned as an
+%% atom.
+%%-------------------------------------------------------------------------
+check_enum(_Enum,[]) ->
+ {error, not_valid};
+check_enum(Enum,[Enum|_Rest]) ->
+ {ok, list_to_atom(Enum)};
+check_enum(Enum, [_NotValid|Rest]) ->
+ check_enum(Enum, Rest).
+
+%%%=========================================================================
+%%% Application internal API
+%%%=========================================================================
+%% The configuration data is handled in three (3) phases:
+%% 1. Parse the config file and put all directives into a key-vale
+%% tuple list (load/1).
+%% 2. Traverse the key-value tuple list store it into an ETS table.
+%% Directives depending on other directives are taken care of here
+%% (store/1).
+%% 3. Traverse the ETS table and do a complete clean-up (remove/1).
+
+%% Phase 1: Load
+load(ConfigFile) ->
+ case read_config_file(ConfigFile) of
+ {ok, Config} ->
+ case bootstrap(Config) of
+ {error, Reason} ->
+ {error, Reason};
+ {ok, Modules} ->
+ load_config(Config, lists:append(Modules, [?MODULE]))
+ end;
+ {error, Reason} ->
+ {error, ?NICE("Error while reading config file: "++Reason)}
+ end.
+
+load(eof, []) ->
+ eof;
+load("MaxHeaderSize " ++ MaxHeaderSize, []) ->
+ case make_integer(MaxHeaderSize) of
+ {ok, Integer} ->
+ {ok, [], {max_header_size,Integer}};
+ {error, _} ->
+ {error, ?NICE(clean(MaxHeaderSize)++
+ " is an invalid number of MaxHeaderSize")}
+ end;
+load("MaxHeaderAction " ++ Action, []) ->
+ {ok, [], {max_header_action,list_to_atom(clean(Action))}};
+load("MaxBodySize " ++ MaxBodySize, []) ->
+ case make_integer(MaxBodySize) of
+ {ok, Integer} ->
+ {ok, [], {max_body_size,Integer}};
+ {error, _} ->
+ {error, ?NICE(clean(MaxBodySize)++
+ " is an invalid number of MaxBodySize")}
+ end;
+load("MaxBodyAction " ++ Action, []) ->
+ {ok, [], {max_body_action,list_to_atom(clean(Action))}};
+load("ServerName " ++ ServerName, []) ->
+ {ok,[],{server_name,clean(ServerName)}};
+load("SocketType " ++ SocketType, []) ->
+ case check_enum(clean(SocketType),["ssl","ip_comm"]) of
+ {ok, ValidSocketType} ->
+ {ok, [], {com_type,ValidSocketType}};
+ {error,_} ->
+ {error, ?NICE(clean(SocketType) ++ " is an invalid SocketType")}
+ end;
+load("Port " ++ Port, []) ->
+ case make_integer(Port) of
+ {ok, Integer} ->
+ {ok, [], {port,Integer}};
+ {error, _} ->
+ {error, ?NICE(clean(Port)++" is an invalid Port")}
+ end;
+load("BindAddress " ++ Address, []) ->
+ %% If an ipv6 address is provided in URL-syntax strip the
+ %% url specific part e.i. "[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]"
+ %% -> "FEDC:BA98:7654:3210:FEDC:BA98:7654:3210"
+ NewAddress = string:strip(string:strip(clean(Address),
+ left, $[),
+ right, $]),
+ case NewAddress of
+ "*" ->
+ {ok, [], {bind_address,any}};
+ CAddress ->
+ case (catch inet:getaddr(CAddress,inet6)) of
+ {ok, {0, 0, 0, 0, 0, 16#ffff, _, _}} ->
+ case inet:getaddr(CAddress, inet) of
+ {ok, IPAddr} ->
+ {ok, [], {bind_address,IPAddr}};
+ {error, _} ->
+ {error, ?NICE(CAddress++" is an invalid address")}
+ end;
+ {ok, IPAddr} ->
+ {ok, [], {bind_address, IPAddr}};
+ _ ->
+ case inet:getaddr(CAddress, inet) of
+ {ok, IPAddr} ->
+ {ok, [], {bind_address,IPAddr}};
+ {error, _} ->
+ {error, ?NICE(CAddress++" is an invalid address")}
+ end
+ end
+ end;
+load("KeepAlive " ++ OnorOff, []) ->
+ case list_to_atom(clean(OnorOff)) of
+ off ->
+ {ok, [], {persistent_conn, false}};
+ _ ->
+ {ok, [], {persistent_conn, true}}
+ end;
+load("MaxKeepAliveRequests " ++ MaxRequests, []) ->
+ case make_integer(MaxRequests) of
+ {ok, Integer} ->
+ {ok, [], {max_keep_alive_request, Integer}};
+ {error, _} ->
+ {error, ?NICE(clean(MaxRequests) ++
+ " is an invalid MaxKeepAliveRequests")}
+ end;
+%% This clause is keept for backwards compability
+load("MaxKeepAliveRequest " ++ MaxRequests, []) ->
+ case make_integer(MaxRequests) of
+ {ok, Integer} ->
+ {ok, [], {max_keep_alive_request, Integer}};
+ {error, _} ->
+ {error, ?NICE(clean(MaxRequests) ++
+ " is an invalid MaxKeepAliveRequest")}
+ end;
+load("KeepAliveTimeout " ++ Timeout, []) ->
+ case make_integer(Timeout) of
+ {ok, Integer} ->
+ {ok, [], {keep_alive_timeout, Integer*1000}};
+ {error, _} ->
+ {error, ?NICE(clean(Timeout)++" is an invalid KeepAliveTimeout")}
+ end;
+load("Modules " ++ Modules, []) ->
+ {ok, ModuleList} = regexp:split(Modules," "),
+ {ok, [], {modules,[list_to_atom(X) || X <- ModuleList]}};
+load("ServerAdmin " ++ ServerAdmin, []) ->
+ {ok, [], {server_admin,clean(ServerAdmin)}};
+load("ServerRoot " ++ ServerRoot, []) ->
+ case is_directory(clean(ServerRoot)) of
+ {ok, Directory} ->
+ MimeTypesFile =
+ filename:join([clean(ServerRoot),"conf", "mime.types"]),
+ case load_mime_types(MimeTypesFile) of
+ {ok, MimeTypesList} ->
+ {ok, [], [{server_root,string:strip(Directory,right,$/)},
+ {mime_types,MimeTypesList}]};
+ {error, Reason} ->
+ {error, Reason}
+ end;
+ {error, _} ->
+ {error, ?NICE(clean(ServerRoot)++" is an invalid ServerRoot")}
+ end;
+load("MaxClients " ++ MaxClients, []) ->
+ case make_integer(MaxClients) of
+ {ok, Integer} ->
+ {ok, [], {max_clients,Integer}};
+ {error, _} ->
+ {error, ?NICE(clean(MaxClients) ++
+ " is an invalid number of MaxClients")}
+ end;
+load("DocumentRoot " ++ DocumentRoot,[]) ->
+ case is_directory(clean(DocumentRoot)) of
+ {ok, Directory} ->
+ {ok, [], {document_root,string:strip(Directory,right,$/)}};
+ {error, _} ->
+ {error, ?NICE(clean(DocumentRoot)++"is an invalid DocumentRoot")}
+ end;
+load("DefaultType " ++ DefaultType, []) ->
+ {ok, [], {default_type,clean(DefaultType)}};
+load("SSLCertificateFile " ++ SSLCertificateFile, []) ->
+ case is_file(clean(SSLCertificateFile)) of
+ {ok, File} ->
+ {ok, [], {ssl_certificate_file,File}};
+ {error, _} ->
+ {error, ?NICE(clean(SSLCertificateFile)++
+ " is an invalid SSLCertificateFile")}
+ end;
+load("SSLCertificateKeyFile " ++ SSLCertificateKeyFile, []) ->
+ case is_file(clean(SSLCertificateKeyFile)) of
+ {ok, File} ->
+ {ok, [], {ssl_certificate_key_file,File}};
+ {error, _} ->
+ {error, ?NICE(clean(SSLCertificateKeyFile)++
+ " is an invalid SSLCertificateKeyFile")}
+ end;
+load("SSLVerifyClient " ++ SSLVerifyClient, []) ->
+ case make_integer(clean(SSLVerifyClient)) of
+ {ok, Integer} when Integer >=0,Integer =< 2 ->
+ {ok, [], {ssl_verify_client,Integer}};
+ {ok, _Integer} ->
+ {error,?NICE(clean(SSLVerifyClient) ++
+ " is an invalid SSLVerifyClient")};
+ {error, nomatch} ->
+ {error,?NICE(clean(SSLVerifyClient) ++
+ " is an invalid SSLVerifyClient")}
+ end;
+load("SSLVerifyDepth " ++ SSLVerifyDepth, []) ->
+ case make_integer(clean(SSLVerifyDepth)) of
+ {ok, Integer} when Integer > 0 ->
+ {ok, [], {ssl_verify_client_depth,Integer}};
+ {ok, _Integer} ->
+ {error,?NICE(clean(SSLVerifyDepth) ++
+ " is an invalid SSLVerifyDepth")};
+ {error, nomatch} ->
+ {error,?NICE(clean(SSLVerifyDepth) ++
+ " is an invalid SSLVerifyDepth")}
+ end;
+load("SSLCiphers " ++ SSLCiphers, []) ->
+ {ok, [], {ssl_ciphers, clean(SSLCiphers)}};
+load("SSLCACertificateFile " ++ SSLCACertificateFile, []) ->
+ case is_file(clean(SSLCACertificateFile)) of
+ {ok, File} ->
+ {ok, [], {ssl_ca_certificate_file,File}};
+ {error, _} ->
+ {error, ?NICE(clean(SSLCACertificateFile)++
+ " is an invalid SSLCACertificateFile")}
+ end;
+load("SSLPasswordCallbackModule " ++ SSLPasswordCallbackModule, []) ->
+ {ok, [], {ssl_password_callback_module,
+ list_to_atom(clean(SSLPasswordCallbackModule))}};
+load("SSLPasswordCallbackFunction " ++ SSLPasswordCallbackFunction, []) ->
+ {ok, [], {ssl_password_callback_function,
+ list_to_atom(clean(SSLPasswordCallbackFunction))}};
+load("DisableChunkedTransferEncodingSend " ++ TrueOrFalse, []) ->
+ case list_to_atom(clean(TrueOrFalse)) of
+ true ->
+ {ok, [], {disable_chunked_transfer_encoding_send, true}};
+ _ ->
+ {ok, [], {disable_chunked_transfer_encoding_send, false}}
+ end.
+
+%%
+%% load_mime_types/1 -> {ok, MimeTypes} | {error, Reason}
+%%
+load_mime_types(MimeTypesFile) ->
+ case file:open(MimeTypesFile, read) of
+ {ok, Stream} ->
+ parse_mime_types(Stream, []);
+ {error, _} ->
+ {error, ?NICE("Can't open " ++ MimeTypesFile)}
+ end.
+
+%% Phase 2: Store
+store(ConfigList) ->
+ Modules = httpd_util:key1search(ConfigList, modules, []),
+ Port = httpd_util:key1search(ConfigList, port),
+ Addr = httpd_util:key1search(ConfigList,bind_address),
+ Name = httpd_util:make_name("httpd_conf",Addr,Port),
+ ConfigDB = ets:new(Name, [named_table, bag, protected]),
+ store(ConfigDB, ConfigList, lists:append(Modules,[?MODULE]),ConfigList).
+
+store({mime_types,MimeTypesList},ConfigList) ->
+ Port = httpd_util:key1search(ConfigList, port),
+ Addr = httpd_util:key1search(ConfigList, bind_address),
+ Name = httpd_util:make_name("httpd_mime",Addr,Port),
+ {ok, MimeTypesDB} = store_mime_types(Name,MimeTypesList),
+ {ok, {mime_types,MimeTypesDB}};
+store(ConfigListEntry, _ConfigList) ->
+ {ok, ConfigListEntry}.
+
+%% Phase 3: Remove
+remove_all(ConfigDB) ->
+ Modules = httpd_util:lookup(ConfigDB,modules,[]),
+ remove_traverse(ConfigDB, lists:append(Modules,[?MODULE])).
+
+remove(ConfigDB) ->
+ ets:delete(ConfigDB),
+ ok.
+
+config(ConfigDB) ->
+ case httpd_util:lookup(ConfigDB,com_type,ip_comm) of
+ ssl ->
+ case ssl_certificate_file(ConfigDB) of
+ undefined ->
+ {error,
+ "Directive SSLCertificateFile "
+ "not found in the config file"};
+ SSLCertificateFile ->
+ {ssl,
+ SSLCertificateFile++
+ ssl_certificate_key_file(ConfigDB)++
+ ssl_verify_client(ConfigDB)++
+ ssl_ciphers(ConfigDB)++
+ ssl_password(ConfigDB)++
+ ssl_verify_depth(ConfigDB)++
+ ssl_ca_certificate_file(ConfigDB)}
+ end;
+ ip_comm ->
+ ip_comm
+ end.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+%%% Phase 1 Load:
+bootstrap([]) ->
+ {error, ?NICE("Modules must be specified in the config file")};
+bootstrap([Line|Config]) ->
+ case Line of
+ "Modules " ++ Modules ->
+ {ok, ModuleList} = regexp:split(Modules," "),
+ TheMods = [list_to_atom(X) || X <- ModuleList],
+ case verify_modules(TheMods) of
+ ok ->
+ {ok, TheMods};
+ {error, Reason} ->
+ {error, Reason}
+ end;
+ _ ->
+ bootstrap(Config)
+ end.
+
+load_config(Config, Modules) ->
+ %% Create default contexts for all modules
+ Contexts = lists:duplicate(length(Modules), []),
+ load_config(Config, Modules, Contexts, []).
+load_config([], _Modules, _Contexts, ConfigList) ->
+ case a_must(ConfigList, [server_name,port,server_root,document_root]) of
+ ok ->
+ {ok, ConfigList};
+ {missing, Directive} ->
+ {error, ?NICE(atom_to_list(Directive)++
+ " must be specified in the config file")}
+ end;
+load_config([Line|Config], Modules, Contexts, ConfigList) ->
+ case load_traverse(Line, Contexts, Modules, [], ConfigList, no) of
+ {ok, NewContexts, NewConfigList} ->
+ load_config(Config, Modules, NewContexts, NewConfigList);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+
+%% This loads the config file into each module specified by Modules
+%% Each module has its own context that is passed to and (optionally)
+%% returned by the modules load function. The module can also return
+%% a ConfigEntry, which will be added to the global configuration
+%% list.
+%% All configuration directives are guaranteed to be passed to all
+%% modules. Each module only implements the function clauses of
+%% the load function for the configuration directives it supports,
+%% it's ok if an apply returns {'EXIT', {function_clause, ..}}.
+load_traverse(Line, [], [], _NewContexts, _ConfigList, no) ->
+ {error, ?NICE("Configuration directive not recognized: "++Line)};
+load_traverse(_Line, [], [], NewContexts, ConfigList, yes) ->
+ {ok, lists:reverse(NewContexts), ConfigList};
+load_traverse(Line, [Context|Contexts], [Module|Modules], NewContexts,
+ ConfigList, State) ->
+ case catch apply(Module, load, [Line, Context]) of
+ {'EXIT', {function_clause, _}} ->
+ load_traverse(Line, Contexts, Modules,
+ [Context|NewContexts], ConfigList, State);
+ {'EXIT',{undef, _}} ->
+ load_traverse(Line, Contexts, Modules,
+ [Context|NewContexts], ConfigList,yes);
+ {'EXIT', Reason} ->
+ error_logger:error_report({'EXIT', Reason}),
+ load_traverse(Line, Contexts, Modules,
+ [Context|NewContexts], ConfigList, State);
+ {ok, NewContext} ->
+ load_traverse(Line, Contexts, Modules,
+ [NewContext|NewContexts], ConfigList,yes);
+ {ok, NewContext, ConfigEntry} when tuple(ConfigEntry) ->
+ load_traverse(Line, Contexts, Modules, [NewContext|NewContexts],
+ [ConfigEntry|ConfigList], yes);
+ {ok, NewContext, ConfigEntry} when list(ConfigEntry) ->
+ load_traverse(Line, Contexts, Modules, [NewContext|NewContexts],
+ lists:append(ConfigEntry, ConfigList), yes);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+%% Verifies that all specified modules are available.
+verify_modules([]) ->
+ ok;
+verify_modules([Mod|Rest]) ->
+ case code:which(Mod) of
+ non_existing ->
+ {error, ?NICE(atom_to_list(Mod)++" does not exist")};
+ _Path ->
+ verify_modules(Rest)
+ end.
+
+%% Reads the entire configuration file and returns list of strings or
+%% and error.
+read_config_file(FileName) ->
+ case file:open(FileName, read) of
+ {ok, Stream} ->
+ read_config_file(Stream, []);
+ {error, _Reason} ->
+ {error, ?NICE("Cannot open "++FileName)}
+ end.
+read_config_file(Stream, SoFar) ->
+ case io:get_line(Stream, []) of
+ eof ->
+ file:close(Stream),
+ {ok, lists:reverse(SoFar)};
+ {error, Reason} ->
+ file:close(Stream),
+ {error, Reason};
+ [$#|_Rest] ->
+ %% Ignore commented lines for efficiency later ..
+ read_config_file(Stream, SoFar);
+ Line ->
+ {ok, NewLine, _}=regexp:sub(clean(Line),"[\t\r\f ]"," "),
+ case NewLine of
+ [] ->
+ %% Also ignore empty lines ..
+ read_config_file(Stream, SoFar);
+ _Other ->
+ read_config_file(Stream, [NewLine|SoFar])
+ end
+ end.
+
+parse_mime_types(Stream,MimeTypesList) ->
+ Line=
+ case io:get_line(Stream,'') of
+ eof ->
+ eof;
+ String ->
+ clean(String)
+ end,
+ parse_mime_types(Stream, MimeTypesList, Line).
+parse_mime_types(Stream, MimeTypesList, eof) ->
+ file:close(Stream),
+ {ok, MimeTypesList};
+parse_mime_types(Stream, MimeTypesList, "") ->
+ parse_mime_types(Stream, MimeTypesList);
+parse_mime_types(Stream, MimeTypesList, [$#|_]) ->
+ parse_mime_types(Stream, MimeTypesList);
+parse_mime_types(Stream, MimeTypesList, Line) ->
+ case regexp:split(Line, " ") of
+ {ok, [NewMimeType|Suffixes]} ->
+ parse_mime_types(Stream,
+ lists:append(suffixes(NewMimeType,Suffixes),
+ MimeTypesList));
+ {ok, _} ->
+ {error, ?NICE(Line)}
+ end.
+
+suffixes(_MimeType,[]) ->
+ [];
+suffixes(MimeType,[Suffix|Rest]) ->
+ [{Suffix,MimeType}|suffixes(MimeType,Rest)].
+
+a_must(_ConfigList,[]) ->
+ ok;
+a_must(ConfigList,[Directive|Rest]) ->
+ case httpd_util:key1search(ConfigList,Directive) of
+ undefined ->
+ {missing,Directive};
+ _ ->
+ a_must(ConfigList,Rest)
+ end.
+
+%% Pahse 2: store
+store(ConfigDB, _ConfigList, _Modules,[]) ->
+ {ok, ConfigDB};
+store(ConfigDB, ConfigList, Modules, [ConfigListEntry|Rest]) ->
+ case store_traverse(ConfigListEntry,ConfigList,Modules) of
+ {ok, ConfigDBEntry} when tuple(ConfigDBEntry) ->
+ ets:insert(ConfigDB,ConfigDBEntry),
+ store(ConfigDB,ConfigList,Modules,Rest);
+ {ok, ConfigDBEntry} when list(ConfigDBEntry) ->
+ lists:foreach(fun(Entry) ->
+ ets:insert(ConfigDB,Entry)
+ end,ConfigDBEntry),
+ store(ConfigDB,ConfigList,Modules,Rest);
+ {error, Reason} ->
+ {error,Reason}
+ end.
+
+store_traverse(_ConfigListEntry, _ConfigList,[]) ->
+ {error,?NICE("Unable to store configuration...")};
+store_traverse(ConfigListEntry, ConfigList, [Module|Rest]) ->
+ case catch apply(Module,store,[ConfigListEntry, ConfigList]) of
+ {'EXIT',{function_clause,_}} ->
+ store_traverse(ConfigListEntry,ConfigList,Rest);
+ {'EXIT',{undef, _}} ->
+ store_traverse(ConfigListEntry,ConfigList,Rest);
+ {'EXIT', Reason} ->
+ error_logger:error_report({'EXIT',Reason}),
+ store_traverse(ConfigListEntry,ConfigList,Rest);
+ Result ->
+ Result
+ end.
+
+store_mime_types(Name,MimeTypesList) ->
+ %% Make sure that the ets table is not duplicated
+ %% when reloading configuration
+ catch ets:delete(Name),
+ MimeTypesDB = ets:new(Name, [named_table, set, protected]),
+ store_mime_types1(MimeTypesDB, MimeTypesList).
+store_mime_types1(MimeTypesDB,[]) ->
+ {ok, MimeTypesDB};
+store_mime_types1(MimeTypesDB,[Type|Rest]) ->
+ ets:insert(MimeTypesDB, Type),
+ store_mime_types1(MimeTypesDB, Rest).
+
+
+%% Phase 3: remove
+remove_traverse(_ConfigDB,[]) ->
+ ok;
+remove_traverse(ConfigDB,[Module|Rest]) ->
+ case (catch apply(Module,remove,[ConfigDB])) of
+ {'EXIT',{undef,_}} ->
+ remove_traverse(ConfigDB,Rest);
+ {'EXIT',{function_clause,_}} ->
+ remove_traverse(ConfigDB,Rest);
+ {'EXIT',Reason} ->
+ error_logger:error_report({'EXIT',Reason}),
+ remove_traverse(ConfigDB,Rest);
+ {error,Reason} ->
+ error_logger:error_report(Reason),
+ remove_traverse(ConfigDB,Rest);
+ _ ->
+ remove_traverse(ConfigDB,Rest)
+ end.
+
+ssl_certificate_file(ConfigDB) ->
+ case httpd_util:lookup(ConfigDB,ssl_certificate_file) of
+ undefined ->
+ undefined;
+ SSLCertificateFile ->
+ [{certfile,SSLCertificateFile}]
+ end.
+
+ssl_certificate_key_file(ConfigDB) ->
+ case httpd_util:lookup(ConfigDB,ssl_certificate_key_file) of
+ undefined ->
+ [];
+ SSLCertificateKeyFile ->
+ [{keyfile,SSLCertificateKeyFile}]
+ end.
+
+ssl_verify_client(ConfigDB) ->
+ case httpd_util:lookup(ConfigDB,ssl_verify_client) of
+ undefined ->
+ [];
+ SSLVerifyClient ->
+ [{verify,SSLVerifyClient}]
+ end.
+
+ssl_ciphers(ConfigDB) ->
+ case httpd_util:lookup(ConfigDB,ssl_ciphers) of
+ undefined ->
+ [];
+ Ciphers ->
+ [{ciphers, Ciphers}]
+ end.
+
+ssl_password(ConfigDB) ->
+ case httpd_util:lookup(ConfigDB,ssl_password_callback_module) of
+ undefined ->
+ [];
+ Module ->
+ case httpd_util:lookup(ConfigDB,
+ ssl_password_callback_function) of
+ undefined ->
+ [];
+ Function ->
+ case catch apply(Module, Function, []) of
+ Password when list(Password) ->
+ [{password, Password}];
+ Error ->
+ error_report(ssl_password,Module,Function,Error),
+ []
+ end
+ end
+ end.
+
+ssl_verify_depth(ConfigDB) ->
+ case httpd_util:lookup(ConfigDB, ssl_verify_client_depth) of
+ undefined ->
+ [];
+ Depth ->
+ [{depth, Depth}]
+ end.
+
+ssl_ca_certificate_file(ConfigDB) ->
+ case httpd_util:lookup(ConfigDB, ssl_ca_certificate_file) of
+ undefined ->
+ [];
+ File ->
+ [{cacertfile, File}]
+ end.
+
+error_report(Where,M,F,Error) ->
+ error_logger:error_report([{?MODULE, Where},
+ {apply, {M, F, []}}, Error]).
+
diff --git a/src/couch_inets/httpd_esi.erl b/src/couch_inets/httpd_esi.erl
new file mode 100644
index 00000000..2f7c9d38
--- /dev/null
+++ b/src/couch_inets/httpd_esi.erl
@@ -0,0 +1,106 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(httpd_esi).
+
+-export([parse_headers/1, handle_headers/1]).
+
+-include("inets_internal.hrl").
+
+%%%=========================================================================
+%%% Internal application API
+%%%=========================================================================
+
+%%--------------------------------------------------------------------------
+%% parse_headers(Data) -> {Headers, Body}
+%%
+%% Data = string() | io_list()
+%% Headers = string()
+%% Body = io_list()
+%%
+%% Description: Parses <Data> and divides it to a header part and a
+%% body part. Note that it is presumed that <Data> starts with a
+%% string including "\r\n\r\n" if there is any header information
+%% present. The returned headers will not contain the HTTP header body
+%% delimiter \r\n. (All header, header delimiters are keept.)
+%% Ex: ["Content-Type : text/html\r\n Connection : closing \r\n\r\n" |
+%% io_list()] --> {"Content-Type : text/html\r\n Connection : closing \r\n",
+%% io_list()}
+%%--------------------------------------------------------------------------
+parse_headers(Data) ->
+ parse_headers(Data, []).
+
+%%--------------------------------------------------------------------------
+%% handle_headers(Headers) -> {ok, HTTPHeaders, StatusCode} |
+%% {proceed, AbsPath}
+%% Headers = string()
+%% HTTPHeaders = [{HeaderField, HeaderValue}]
+%% HeaderField = string()
+%% HeaderValue = string()
+%% StatusCode = integer()
+%%
+%% Description: Transforms the plain HTTP header string data received
+%% from the ESI program into a list of header values and an
+%% appropriate HTTP status code. Note if a location header is present
+%% the return value will be {proceed, AbsPath}
+%%--------------------------------------------------------------------------
+handle_headers("") ->
+ {ok, [], 200};
+handle_headers(Headers) ->
+ NewHeaders = string:tokens(Headers, ?CRLF),
+ handle_headers(NewHeaders, [], 200).
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+parse_headers([], Acc) ->
+ {[], lists:reverse(Acc)};
+parse_headers([?CR, ?LF, ?CR, ?LF], Acc) ->
+ {lists:reverse(Acc) ++ [?CR, ?LF], []};
+parse_headers([?CR, ?LF, ?CR, ?LF | Rest], Acc) ->
+ {lists:reverse(Acc) ++ [?CR, ?LF], Rest};
+parse_headers([Char | Rest], Acc) ->
+ parse_headers(Rest, [Char | Acc]).
+
+handle_headers([], NewHeaders, StatusCode) ->
+ {ok, NewHeaders, StatusCode};
+
+handle_headers([Header | Headers], NewHeaders, StatusCode) ->
+ {FieldName, FieldValue} = httpd_response:split_header(Header, []),
+ case FieldName of
+ "location" ->
+ case http_request:is_absolut_uri(FieldValue) of
+ true ->
+ handle_headers(Headers,
+ [{FieldName, FieldValue} | NewHeaders],
+ 302);
+ false ->
+ {proceed, FieldValue}
+ end;
+ "status" ->
+ NewStatusCode =
+ case httpd_util:split(FieldValue," ",2) of
+ {ok,[Code,_]} ->
+ list_to_integer(Code);
+ _ ->
+ 200
+ end,
+ handle_headers(Headers, NewHeaders, NewStatusCode);
+ _ ->
+ handle_headers(Headers,
+ [{FieldName, FieldValue}| NewHeaders], StatusCode)
+ end.
diff --git a/src/couch_inets/httpd_example.erl b/src/couch_inets/httpd_example.erl
new file mode 100644
index 00000000..66c6ca39
--- /dev/null
+++ b/src/couch_inets/httpd_example.erl
@@ -0,0 +1,143 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(httpd_example).
+-export([print/1]).
+-export([get/2, post/2, yahoo/2, test1/2, get_bin/2]).
+
+-export([newformat/3]).
+%% These are used by the inets test-suite
+-export([delay/1]).
+
+
+print(String) ->
+ [header(),
+ top("Print"),
+ String++"\n",
+ footer()].
+
+test1(Env, []) ->
+ io:format("Env:~p~n",[Env]),
+ ["<html>",
+ "<head>",
+ "<title>Test1</title>",
+ "</head>",
+ "<body>",
+ "<h1>Erlang Body</h1>",
+ "<h2>Stuff</h2>",
+ "</body>",
+ "</html>"].
+
+
+get(_Env,[]) ->
+ [header(),
+ top("GET Example"),
+ "<FORM ACTION=\"/cgi-bin/erl/httpd_example:get\" METHOD=GET>
+<B>Input:</B> <INPUT TYPE=\"text\" NAME=\"input1\">
+<INPUT TYPE=\"text\" NAME=\"input2\">
+<INPUT TYPE=\"submit\"><BR>
+</FORM>" ++ "\n",
+ footer()];
+
+get(Env,Input) ->
+ default(Env,Input).
+
+get_bin(_Env,_Input) ->
+ [list_to_binary(header()),
+ list_to_binary(top("GET Example")),
+ list_to_binary("<FORM ACTION=\"/cgi-bin/erl/httpd_example:get\" METHOD=GET>
+<B>Input:</B> <INPUT TYPE=\"text\" NAME=\"input1\">
+<INPUT TYPE=\"text\" NAME=\"input2\">
+<INPUT TYPE=\"submit\"><BR>
+</FORM>" ++ "\n"),
+ footer()].
+
+post(_Env,[]) ->
+ [header(),
+ top("POST Example"),
+ "<FORM ACTION=\"/cgi-bin/erl/httpd_example:post\" METHOD=POST>
+<B>Input:</B> <INPUT TYPE=\"text\" NAME=\"input1\">
+<INPUT TYPE=\"text\" NAME=\"input2\">
+<INPUT TYPE=\"submit\"><BR>
+</FORM>" ++ "\n",
+ footer()];
+
+post(Env,Input) ->
+ default(Env,Input).
+
+yahoo(_Env,_Input) ->
+ "Location: http://www.yahoo.com\r\n\r\n".
+
+default(Env,Input) ->
+ [header(),
+ top("Default Example"),
+ "<B>Environment:</B> ",io_lib:format("~p",[Env]),"<BR>\n",
+ "<B>Input:</B> ",Input,"<BR>\n",
+ "<B>Parsed Input:</B> ",
+ io_lib:format("~p",[httpd:parse_query(Input)]),"\n",
+ footer()].
+
+header() ->
+ header("text/html").
+header(MimeType) ->
+ "Content-type: " ++ MimeType ++ "\r\n\r\n".
+
+top(Title) ->
+ "<HTML>
+<HEAD>
+<TITLE>" ++ Title ++ "</TITLE>
+</HEAD>
+<BODY>\n".
+
+footer() ->
+ "</BODY>
+</HTML>\n".
+
+
+newformat(SessionID, _Env, _Input)->
+ mod_esi:deliver(SessionID, "Content-Type:text/html\r\n\r\n"),
+ mod_esi:deliver(SessionID, top("new esi format test")),
+ mod_esi:deliver(SessionID, "This new format is nice<BR>"),
+ mod_esi:deliver(SessionID, "This new format is nice<BR>"),
+ mod_esi:deliver(SessionID, "This new format is nice<BR>"),
+ mod_esi:deliver(SessionID, footer()).
+
+%% ------------------------------------------------------
+
+delay(Time) when integer(Time) ->
+ i("httpd_example:delay(~p) -> do the delay",[Time]),
+ sleep(Time),
+ i("httpd_example:delay(~p) -> done, now reply",[Time]),
+ delay_reply("delay ok");
+delay(Time) when list(Time) ->
+ delay(httpd_conf:make_integer(Time));
+delay({ok,Time}) when integer(Time) ->
+ delay(Time);
+delay({error,_Reason}) ->
+ i("delay -> called with invalid time"),
+ delay_reply("delay failed: invalid delay time").
+
+delay_reply(Reply) ->
+ [header(),
+ top("delay"),
+ Reply,
+ footer()].
+
+i(F) -> i(F,[]).
+i(F,A) -> io:format(F ++ "~n",A).
+
+sleep(T) -> receive after T -> ok end.
diff --git a/src/couch_inets/httpd_instance_sup.erl b/src/couch_inets/httpd_instance_sup.erl
new file mode 100644
index 00000000..ddf81a49
--- /dev/null
+++ b/src/couch_inets/httpd_instance_sup.erl
@@ -0,0 +1,193 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%%----------------------------------------------------------------------
+%% Purpose: The top supervisor for an instance of the http server. (You may
+%% have several instances running on the same machine.) Hangs under
+%% httpd_sup.
+%%----------------------------------------------------------------------
+
+-module(httpd_instance_sup).
+
+-behaviour(supervisor).
+
+-export([init/1]).
+
+%% Internal API
+-export([start/1, start_link/1, start_link/3, start2/1, start_link2/1,
+ stop/1, stop/2, stop2/1]).
+
+%%%=========================================================================
+%%% Supervisor callback
+%%%=========================================================================
+init([ConfigFile, ConfigList, AcceptTimeout, Debug, Addr, Port]) ->
+ httpd_util:enable_debug(Debug),
+ Flags = {one_for_one, 0, 1},
+ Children = [sup_spec(httpd_acceptor_sup, Addr, Port),
+ sup_spec(httpd_misc_sup, Addr, Port),
+ worker_spec(httpd_manager, Addr, Port,
+ ConfigFile, ConfigList,AcceptTimeout)],
+ {ok, {Flags, Children}}.
+
+
+%%%=========================================================================
+%%% ??? functions
+%%%=========================================================================
+
+start(ConfigFile) ->
+ case start_link(ConfigFile) of
+ {ok, Pid} ->
+ unlink(Pid),
+ {ok, Pid};
+
+ Else ->
+ Else
+ end.
+
+start_link(Config) ->
+ case catch httpd_options(Config) of
+ {error,Reason} ->
+ error_logger:error_report(Reason),
+ {stop, Reason};
+ {ConfigFile,AcceptTimeout,Debug} ->
+ start_link(ConfigFile, AcceptTimeout, Debug)
+ end.
+start_link(ConfigFile, AcceptTimeout, Debug) ->
+ case get_addr_and_port(ConfigFile) of
+ {ok, ConfigList, Addr, Port} ->
+ Name = make_name(Addr, Port),
+ SupName = {local, Name},
+ supervisor:start_link(SupName, ?MODULE,
+ [ConfigFile, ConfigList ,AcceptTimeout ,
+ Debug, Addr, Port]);
+ {error, Reason} ->
+ error_logger:error_report(Reason),
+ {stop, Reason}
+ end.
+
+
+start2(ConfigList) ->
+ case start_link2(ConfigList) of
+ {ok, Pid} ->
+ unlink(Pid),
+ {ok, Pid};
+
+ Else ->
+ Else
+ end.
+
+
+start_link2(ConfigList) ->
+ {ok, Addr, Port} = get_addr_and_port2(ConfigList),
+ Name = make_name(Addr, Port),
+ SupName = {local, Name},
+ Debug = [],
+ AcceptTimeout = 15000,
+ supervisor:start_link(SupName, ?MODULE,
+ [undefined, ConfigList, AcceptTimeout,
+ Debug, Addr, Port]).
+
+
+stop(Pid) when pid(Pid) ->
+ do_stop(Pid);
+stop(ConfigFile) when list(ConfigFile) ->
+ case get_addr_and_port(ConfigFile) of
+ {ok, _, Addr, Port} ->
+ stop(Addr, Port);
+
+ Error ->
+ Error
+ end;
+stop(_StartArgs) ->
+ ok.
+
+
+stop(Addr, Port) when integer(Port) ->
+ Name = make_name(Addr, Port),
+ case whereis(Name) of
+ Pid when pid(Pid) ->
+ do_stop(Pid),
+ ok;
+ _ ->
+ not_started
+ end.
+
+
+stop2(ConfigList) when list(ConfigList) ->
+ {ok, Addr, Port} = get_addr_and_port2(ConfigList),
+ stop(Addr, Port).
+
+%%%=========================================================================
+%%% Internal functions
+%%%=========================================================================
+do_stop(Pid) ->
+ exit(Pid, shutdown).
+
+sup_spec(SupModule, Addr, Port) ->
+ Name = {SupModule, Addr, Port},
+ StartFunc = {SupModule, start_link, [Addr, Port]},
+ Restart = permanent,
+ Shutdown = infinity,
+ Modules = [SupModule],
+ Type = supervisor,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+worker_spec(WorkerModule, Addr, Port, ConfigFile, ConfigList, AcceptTimeout) ->
+ Name = {WorkerModule, Addr, Port},
+ StartFunc = {WorkerModule, start_link,
+ [ConfigFile, ConfigList, AcceptTimeout]},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [WorkerModule],
+ Type = worker,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+httpd_options(Config) ->
+ OptionList = mk_tuple_list(Config),
+ Debug = http_util:key1search(OptionList,debug,[]),
+ AcceptTimeout = http_util:key1search(OptionList,accept_timeout,15000),
+ ConfigFile =
+ case http_util:key1search(OptionList,file) of
+ undefined -> throw({error,{mandatory_conf_file_missed}});
+ File -> File
+ end,
+ httpd_util:valid_options(Debug,AcceptTimeout,ConfigFile),
+ {ConfigFile, AcceptTimeout, Debug}.
+
+mk_tuple_list([]) ->
+ [];
+mk_tuple_list([H={_,_}|T]) ->
+ [H|mk_tuple_list(T)];
+mk_tuple_list(F) when list(F) ->
+ [{file,F}].
+
+make_name(Addr,Port) ->
+ httpd_util:make_name("httpd_instance_sup",Addr,Port).
+
+get_addr_and_port(ConfigFile) ->
+ case httpd_conf:load(ConfigFile) of
+ {ok, ConfigList} ->
+ {ok, Addr, Port} = get_addr_and_port2(ConfigList),
+ {ok, ConfigList, Addr, Port};
+ Error ->
+ Error
+ end.
+
+get_addr_and_port2(ConfigList) ->
+ Port = httpd_util:key1search(ConfigList, port, 80),
+ Addr = httpd_util:key1search(ConfigList, bind_address),
+ {ok, Addr, Port}.
diff --git a/src/couch_inets/httpd_manager.erl b/src/couch_inets/httpd_manager.erl
new file mode 100644
index 00000000..b7529f37
--- /dev/null
+++ b/src/couch_inets/httpd_manager.erl
@@ -0,0 +1,829 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+
+-module(httpd_manager).
+
+-include("httpd.hrl").
+
+-behaviour(gen_server).
+
+%% External API
+-export([start/2, start_link/2, start_link/3, stop/1, restart/1]).
+
+%% Internal API
+-export([new_connection/1, done_connection/1]).
+
+%% Module API
+-export([config_lookup/2, config_lookup/3,
+ config_multi_lookup/2, config_multi_lookup/3,
+ config_match/2, config_match/3]).
+
+%% gen_server exports
+-export([init/1,
+ handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+
+%% Management exports
+-export([block/2, block/3, unblock/1]).
+-export([get_admin_state/1, get_usage_state/1]).
+-export([is_busy/1,is_busy/2,is_busy_or_blocked/1,is_blocked/1]). %% ???????
+-export([get_status/1, get_status/2]).
+
+-export([c/1]).
+
+-record(state,{socket_type = ip_comm,
+ config_file,
+ config_db = null,
+ connections, %% Current request handlers
+ admin_state = unblocked,
+ blocker_ref = undefined,
+ blocking_tmr = undefined,
+ status = []}).
+
+
+c(Port) ->
+ Ref = httpd_util:make_name("httpd",undefined,Port),
+ gen_server:call(Ref, fake_close).
+
+
+%%
+%% External API
+%%
+
+start(ConfigFile, ConfigList) ->
+ Port = httpd_util:key1search(ConfigList,port,80),
+ Addr = httpd_util:key1search(ConfigList,bind_address),
+ Name = make_name(Addr,Port),
+ gen_server:start({local,Name},?MODULE,
+ [ConfigFile, ConfigList, 15000, Addr, Port],[]).
+
+start_link(ConfigFile, ConfigList) ->
+ start_link(ConfigFile, ConfigList, 15000).
+start_link(ConfigFile, ConfigList, AcceptTimeout) ->
+ Port = httpd_util:key1search(ConfigList,port,80),
+ Addr = httpd_util:key1search(ConfigList,bind_address),
+ Name = make_name(Addr,Port),
+ gen_server:start_link({local, Name},?MODULE,
+ [ConfigFile, ConfigList, AcceptTimeout, Addr, Port],[]).
+
+stop(ServerRef) ->
+ gen_server:call(ServerRef, stop).
+
+restart(ServerRef) ->
+ gen_server:call(ServerRef, restart).
+
+
+%%%----------------------------------------------------------------
+
+block(ServerRef, disturbing) ->
+ call(ServerRef,block);
+
+block(ServerRef, non_disturbing) ->
+ do_block(ServerRef, non_disturbing, infinity).
+
+block(ServerRef, Method, Timeout) ->
+ do_block(ServerRef, Method, Timeout).
+
+
+%% The reason for not using call here, is that the manager cannot
+%% _wait_ for completion of the requests. It must be able to do
+%% do other things at the same time as the blocking goes on.
+do_block(ServerRef, Method, infinity) ->
+ Ref = make_ref(),
+ cast(ServerRef, {block, Method, infinity, self(), Ref}),
+ receive
+ {block_reply, Reply, Ref} ->
+ Reply
+ end;
+do_block(ServerRef,Method,Timeout) when Timeout > 0 ->
+ Ref = make_ref(),
+ cast(ServerRef,{block,Method,Timeout,self(),Ref}),
+ receive
+ {block_reply,Reply,Ref} ->
+ Reply
+ end.
+
+
+%%%----------------------------------------------------------------
+
+%% unblock
+
+unblock(ServerRef) ->
+ call(ServerRef,unblock).
+
+%% get admin/usage state
+
+get_admin_state(ServerRef) ->
+ call(ServerRef,get_admin_state).
+
+get_usage_state(ServerRef) ->
+ call(ServerRef,get_usage_state).
+
+
+%% get_status
+
+get_status(ServerRef) ->
+ gen_server:call(ServerRef,get_status).
+
+get_status(ServerRef,Timeout) ->
+ gen_server:call(ServerRef,get_status,Timeout).
+
+%%
+%% Internal API
+%%
+
+
+%% new_connection
+
+new_connection(Manager) ->
+ gen_server:call(Manager, {new_connection, self()}, infinity).
+
+%% done
+
+done_connection(Manager) ->
+ gen_server:cast(Manager, {done_connection, self()}).
+
+
+%% is_busy(ServerRef) -> true | false
+%%
+%% Tests if the server is (in usage state) busy,
+%% i.e. has rached the heavy load limit.
+%%
+
+is_busy(ServerRef) ->
+ gen_server:call(ServerRef,is_busy).
+
+is_busy(ServerRef,Timeout) ->
+ gen_server:call(ServerRef,is_busy,Timeout).
+
+
+%% is_busy_or_blocked(ServerRef) -> busy | blocked | false
+%%
+%% Tests if the server is busy (usage state), i.e. has rached,
+%% the heavy load limit, or blocked (admin state) .
+%%
+
+is_busy_or_blocked(ServerRef) ->
+ gen_server:call(ServerRef,is_busy_or_blocked).
+
+
+%% is_blocked(ServerRef) -> true | false
+%%
+%% Tests if the server is blocked (admin state) .
+%%
+
+is_blocked(ServerRef) ->
+ gen_server:call(ServerRef,is_blocked).
+
+
+%%
+%% Module API. Theese functions are intended for use from modules only.
+%%
+
+config_lookup(Port, Query) ->
+ config_lookup(undefined, Port, Query).
+config_lookup(Addr, Port, Query) ->
+ Name = httpd_util:make_name("httpd",Addr,Port),
+ gen_server:call(whereis(Name), {config_lookup, Query}).
+
+config_multi_lookup(Port, Query) ->
+ config_multi_lookup(undefined,Port,Query).
+config_multi_lookup(Addr,Port, Query) ->
+ Name = httpd_util:make_name("httpd",Addr,Port),
+ gen_server:call(whereis(Name), {config_multi_lookup, Query}).
+
+config_match(Port, Pattern) ->
+ config_match(undefined,Port,Pattern).
+config_match(Addr, Port, Pattern) ->
+ Name = httpd_util:make_name("httpd",Addr,Port),
+ gen_server:call(whereis(Name), {config_match, Pattern}).
+
+
+%%
+%% Server call-back functions
+%%
+
+%% init
+
+init([ConfigFile, ConfigList, AcceptTimeout, Addr, Port]) ->
+ process_flag(trap_exit, true),
+ case (catch do_init(ConfigFile, ConfigList, AcceptTimeout, Addr, Port)) of
+ {error, Reason} ->
+ String = lists:flatten(io_lib:format("Failed initiating web server: ~n~p~n~p~n",[ConfigFile,Reason])),
+ error_logger:error_report(String),
+ {stop, Reason};
+ {ok, State} ->
+ {ok, State}
+ end.
+
+
+do_init(ConfigFile, ConfigList, AcceptTimeout, Addr, Port) ->
+ ConfigDB = do_initial_store(ConfigList),
+ SocketType = httpd_conf:config(ConfigDB),
+ case httpd_acceptor_sup:start_acceptor(SocketType, Addr,
+ Port, ConfigDB, AcceptTimeout) of
+ {ok, _Pid} ->
+ Status = [{max_conn,0}, {last_heavy_load,never},
+ {last_connection,never}],
+ State = #state{socket_type = SocketType,
+ config_file = ConfigFile,
+ config_db = ConfigDB,
+ connections = [],
+ status = Status},
+ {ok, State};
+ Else ->
+ Else
+ end.
+
+
+do_initial_store(ConfigList) ->
+ case httpd_conf:store(ConfigList) of
+ {ok, ConfigDB} ->
+ ConfigDB;
+ {error, Reason} ->
+ throw({error, Reason})
+ end.
+
+
+
+%% handle_call
+
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State};
+
+handle_call({config_lookup, Query}, _From, State) ->
+ Res = httpd_util:lookup(State#state.config_db, Query),
+ {reply, Res, State};
+
+handle_call({config_multi_lookup, Query}, _From, State) ->
+ Res = httpd_util:multi_lookup(State#state.config_db, Query),
+ {reply, Res, State};
+
+handle_call({config_match, Query}, _From, State) ->
+ Res = ets:match_object(State#state.config_db, Query),
+ {reply, Res, State};
+
+handle_call(get_status, _From, State) ->
+ ManagerStatus = manager_status(self()),
+ S1 = [{current_conn,length(State#state.connections)}|State#state.status]++
+ [ManagerStatus],
+ {reply,S1,State};
+
+handle_call(is_busy, _From, State) ->
+ Reply = case get_ustate(State) of
+ busy ->
+ true;
+ _ ->
+ false
+ end,
+ {reply,Reply,State};
+
+handle_call(is_busy_or_blocked, _From, State) ->
+ Reply =
+ case get_astate(State) of
+ unblocked ->
+ case get_ustate(State) of
+ busy ->
+ busy;
+ _ ->
+ false
+ end;
+ _ ->
+ blocked
+ end,
+ {reply,Reply,State};
+
+handle_call(is_blocked, _From, State) ->
+ Reply =
+ case get_astate(State) of
+ unblocked ->
+ false;
+ _ ->
+ true
+ end,
+ {reply,Reply,State};
+
+handle_call(get_admin_state, _From, State) ->
+ Reply = get_astate(State),
+ {reply,Reply,State};
+
+handle_call(get_usage_state, _From, State) ->
+ Reply = get_ustate(State),
+ {reply,Reply,State};
+
+handle_call(restart, _From, State) when State#state.admin_state == blocked ->
+ case handle_restart(State) of
+ {stop, Reply,S1} ->
+ {stop, Reply, S1};
+ {_, Reply, S1} ->
+ {reply,Reply,S1}
+ end;
+
+handle_call(restart, _From, State) ->
+ {reply,{error,{invalid_admin_state,State#state.admin_state}},State};
+
+handle_call(block, _From, State) ->
+ {Reply,S1} = handle_block(State),
+ {reply,Reply,S1};
+
+handle_call(unblock, {From,_Tag}, State) ->
+ {Reply,S1} = handle_unblock(State,From),
+ {reply, Reply, S1};
+
+handle_call({new_connection, Pid}, _From, State) ->
+ {Status, NewState} = handle_new_connection(State, Pid),
+ {reply, Status, NewState};
+
+handle_call(Request, From, State) ->
+ String =
+ lists:flatten(
+ io_lib:format("Unknown request "
+ "~n ~p"
+ "~nto manager (~p)"
+ "~nfrom ~p",
+ [Request, self(), From])),
+ report_error(State,String),
+ {reply, ok, State}.
+
+
+%% handle_cast
+
+handle_cast({done_connection, Pid}, State) ->
+ S1 = handle_done_connection(State, Pid),
+ {noreply, S1};
+
+handle_cast({block, disturbing, Timeout, From, Ref}, State) ->
+ S1 = handle_block(State, Timeout, From, Ref),
+ {noreply,S1};
+
+handle_cast({block, non_disturbing, Timeout, From, Ref}, State) ->
+ S1 = handle_nd_block(State, Timeout, From, Ref),
+ {noreply,S1};
+
+handle_cast(Message, State) ->
+ String =
+ lists:flatten(
+ io_lib:format("Unknown message "
+ "~n ~p"
+ "~nto manager (~p)",
+ [Message, self()])),
+ report_error(State, String),
+ {noreply, State}.
+
+%% handle_info
+
+handle_info({block_timeout, Method}, State) ->
+ S1 = handle_block_timeout(State,Method),
+ {noreply, S1};
+
+handle_info({'DOWN', Ref, process, _Object, _Info}, State) ->
+ S1 =
+ case State#state.blocker_ref of
+ Ref ->
+ handle_blocker_exit(State);
+ _ ->
+ %% Not our blocker, so ignore
+ State
+ end,
+ {noreply, S1};
+
+handle_info({'EXIT', _, normal}, State) ->
+ {noreply, State};
+
+handle_info({'EXIT', _, blocked}, S) ->
+ {noreply, S};
+
+handle_info({'EXIT', Pid, Reason}, State) ->
+ S1 = check_connections(State, Pid, Reason),
+ {noreply, S1};
+
+handle_info(Info, State) ->
+ String =
+ lists:flatten(
+ io_lib:format("Unknown info "
+ "~n ~p"
+ "~nto manager (~p)",
+ [Info, self()])),
+ report_error(State, String),
+ {noreply, State}.
+
+
+%% terminate
+
+terminate(_, #state{config_db = Db}) ->
+ httpd_conf:remove_all(Db),
+ ok.
+
+
+%% code_change({down,ToVsn}, State, Extra)
+%%
+%% NOTE:
+%% Actually upgrade from 2.5.1 to 2.5.3 and downgrade from
+%% 2.5.3 to 2.5.1 is done with an application restart, so
+%% these function is actually never used. The reason for keeping
+%% this stuff is only for future use.
+%%
+code_change({down,_ToVsn}, State, _Extra) ->
+ {ok,State};
+
+%% code_change(FromVsn, State, Extra)
+%%
+code_change(_FromVsn, State, _Extra) ->
+ {ok,State}.
+
+
+
+%% -------------------------------------------------------------------------
+%% check_connection
+%%
+%%
+%%
+%%
+
+check_connections(#state{connections = []} = State, _Pid, _Reason) ->
+ State;
+check_connections(#state{admin_state = shutting_down,
+ connections = Connections} = State, Pid, Reason) ->
+ %% Could be a crashing request handler
+ case lists:delete(Pid, Connections) of
+ [] -> % Crashing request handler => block complete
+ String =
+ lists:flatten(
+ io_lib:format("request handler (~p) crashed:"
+ "~n ~p", [Pid, Reason])),
+ report_error(State, String),
+ demonitor_blocker(State#state.blocker_ref),
+ {Tmr,From,Ref} = State#state.blocking_tmr,
+ stop_block_tmr(Tmr),
+ From ! {block_reply,ok,Ref},
+ State#state{admin_state = blocked, connections = [],
+ blocker_ref = undefined};
+ Connections1 ->
+ State#state{connections = Connections1}
+ end;
+check_connections(#state{connections = Connections} = State, Pid, Reason) ->
+ case lists:delete(Pid, Connections) of
+ Connections -> % Not a request handler, so ignore
+ State;
+ NewConnections ->
+ String =
+ lists:flatten(
+ io_lib:format("request handler (~p) crashed:"
+ "~n ~p", [Pid, Reason])),
+ report_error(State, String),
+ State#state{connections = NewConnections}
+ end.
+
+
+%% -------------------------------------------------------------------------
+%% handle_[new | done]_connection
+%%
+%%
+%%
+%%
+
+handle_new_connection(State, Handler) ->
+ UsageState = get_ustate(State),
+ AdminState = get_astate(State),
+ handle_new_connection(UsageState, AdminState, State, Handler).
+
+handle_new_connection(busy, unblocked, State, _Handler) ->
+ Status = update_heavy_load_status(State#state.status),
+ {{reject, busy},
+ State#state{status = Status}};
+
+handle_new_connection(_UsageState, unblocked, State, Handler) ->
+ Connections = State#state.connections,
+ Status = update_connection_status(State#state.status,
+ length(Connections)+1),
+ link(Handler),
+ {{ok, accept},
+ State#state{connections = [Handler|Connections], status = Status}};
+
+handle_new_connection(_UsageState, _AdminState, State, _Handler) ->
+ {{reject, blocked},
+ State}.
+
+handle_done_connection(#state{admin_state = shutting_down,
+ connections = Connections} = State, Handler) ->
+ unlink(Handler),
+ case lists:delete(Handler, Connections) of
+ [] -> % Ok, block complete
+ demonitor_blocker(State#state.blocker_ref),
+ {Tmr,From,Ref} = State#state.blocking_tmr,
+ stop_block_tmr(Tmr),
+ From ! {block_reply,ok,Ref},
+ State#state{admin_state = blocked, connections = [],
+ blocker_ref = undefined};
+ Connections1 ->
+ State#state{connections = Connections1}
+ end;
+
+handle_done_connection(#state{connections = Connections} = State, Handler) ->
+ State#state{connections = lists:delete(Handler, Connections)}.
+
+
+%% -------------------------------------------------------------------------
+%% handle_block
+%%
+%%
+%%
+%%
+handle_block(#state{admin_state = AdminState} = S) ->
+ handle_block(S, AdminState).
+
+handle_block(S,unblocked) ->
+ %% Kill all connections
+ [kill_handler(Pid) || Pid <- S#state.connections],
+ {ok,S#state{connections = [], admin_state = blocked}};
+handle_block(S,blocked) ->
+ {ok,S};
+handle_block(S,shutting_down) ->
+ {{error,shutting_down},S}.
+
+
+kill_handler(Pid) ->
+ exit(Pid, blocked).
+
+handle_block(S,Timeout,From,Ref) when Timeout >= 0 ->
+ do_block(S,Timeout,From,Ref);
+
+handle_block(S,Timeout,From,Ref) ->
+ Reply = {error,{invalid_block_request,Timeout}},
+ From ! {block_reply,Reply,Ref},
+ S.
+
+do_block(S,Timeout,From,Ref) ->
+ case S#state.connections of
+ [] ->
+ %% Already in idle usage state => go directly to blocked
+ From ! {block_reply,ok,Ref},
+ S#state{admin_state = blocked};
+ _ ->
+ %% Active or Busy usage state => go to shutting_down
+ %% Make sure we get to know if blocker dies...
+ MonitorRef = monitor_blocker(From),
+ Tmr = {start_block_tmr(Timeout,disturbing),From,Ref},
+ S#state{admin_state = shutting_down,
+ blocker_ref = MonitorRef, blocking_tmr = Tmr}
+ end.
+
+handle_nd_block(S,infinity,From,Ref) ->
+ do_nd_block(S,infinity,From,Ref);
+
+handle_nd_block(S,Timeout,From,Ref) when Timeout >= 0 ->
+ do_nd_block(S,Timeout,From,Ref);
+
+handle_nd_block(S,Timeout,From,Ref) ->
+ Reply = {error,{invalid_block_request,Timeout}},
+ From ! {block_reply,Reply,Ref},
+ S.
+
+do_nd_block(S,Timeout,From,Ref) ->
+ case S#state.connections of
+ [] ->
+ %% Already in idle usage state => go directly to blocked
+ From ! {block_reply,ok,Ref},
+ S#state{admin_state = blocked};
+ _ ->
+ %% Active or Busy usage state => go to shutting_down
+ %% Make sure we get to know if blocker dies...
+ MonitorRef = monitor_blocker(From),
+ Tmr = {start_block_tmr(Timeout,non_disturbing),From,Ref},
+ S#state{admin_state = shutting_down,
+ blocker_ref = MonitorRef, blocking_tmr = Tmr}
+ end.
+
+handle_block_timeout(S,Method) ->
+ %% Time to take this to the road...
+ demonitor_blocker(S#state.blocker_ref),
+ handle_block_timeout1(S,Method,S#state.blocking_tmr).
+
+handle_block_timeout1(S,non_disturbing,{_,From,Ref}) ->
+ From ! {block_reply,{error,timeout},Ref},
+ S#state{admin_state = unblocked,
+ blocker_ref = undefined, blocking_tmr = undefined};
+
+handle_block_timeout1(S,disturbing,{_,From,Ref}) ->
+ [exit(Pid,blocked) || Pid <- S#state.connections],
+
+ From ! {block_reply,ok,Ref},
+ S#state{admin_state = blocked, connections = [],
+ blocker_ref = undefined, blocking_tmr = undefined};
+
+handle_block_timeout1(S,Method,{_,From,Ref}) ->
+ From ! {block_reply,{error,{unknown_block_method,Method}},Ref},
+ S#state{admin_state = blocked, connections = [],
+ blocker_ref = undefined, blocking_tmr = undefined};
+
+handle_block_timeout1(S, _Method, _TmrInfo) ->
+ S#state{admin_state = unblocked,
+ blocker_ref = undefined, blocking_tmr = undefined}.
+
+handle_unblock(S, FromA) ->
+ handle_unblock(S, FromA, S#state.admin_state).
+
+handle_unblock(S, _FromA, unblocked) ->
+ {ok,S};
+handle_unblock(S, FromA, _AdminState) ->
+ stop_block_tmr(S#state.blocking_tmr),
+ case S#state.blocking_tmr of
+ {_Tmr,FromB,Ref} ->
+ %% Another process is trying to unblock
+ %% Inform the blocker
+ FromB ! {block_reply, {error,{unblocked,FromA}},Ref};
+ _ ->
+ ok
+ end,
+ {ok,S#state{admin_state = unblocked, blocking_tmr = undefined}}.
+
+%% The blocker died so we give up on the block.
+handle_blocker_exit(S) ->
+ {Tmr,_From,_Ref} = S#state.blocking_tmr,
+ stop_block_tmr(Tmr),
+ S#state{admin_state = unblocked,
+ blocker_ref = undefined, blocking_tmr = undefined}.
+
+
+
+%% -------------------------------------------------------------------------
+%% handle_restart
+%%
+%%
+%%
+%%
+handle_restart(#state{config_file = undefined} = State) ->
+ {continue, {error, undefined_config_file}, State};
+handle_restart(#state{config_db = Db, config_file = ConfigFile} = State) ->
+ {ok, Config} = httpd_conf:load(ConfigFile),
+ case (catch check_constant_values(Db, Config)) of
+ ok ->
+ %% If something goes wrong between the remove
+ %% and the store where fu-ed
+ httpd_conf:remove_all(Db),
+ case httpd_conf:store(Config) of
+ {ok, NewConfigDB} ->
+ {continue, ok, State#state{config_db = NewConfigDB}};
+ Error ->
+ {stop, Error, State}
+ end;
+ Error ->
+ {continue, Error, State}
+ end.
+
+
+check_constant_values(Db, Config) ->
+ %% Check port number
+ Port = httpd_util:lookup(Db,port),
+ case httpd_util:key1search(Config,port) of %% MUST be equal
+ Port ->
+ ok;
+ OtherPort ->
+ throw({error,{port_number_changed,Port,OtherPort}})
+ end,
+
+ %% Check bind address
+ Addr = httpd_util:lookup(Db,bind_address),
+ case httpd_util:key1search(Config,bind_address) of %% MUST be equal
+ Addr ->
+ ok;
+ OtherAddr ->
+ throw({error,{addr_changed,Addr,OtherAddr}})
+ end,
+
+ %% Check socket type
+ SockType = httpd_util:lookup(Db, com_type),
+ case httpd_util:key1search(Config, com_type) of %% MUST be equal
+ SockType ->
+ ok;
+ OtherSockType ->
+ throw({error,{sock_type_changed,SockType,OtherSockType}})
+ end,
+ ok.
+
+
+%% get_ustate(State) -> idle | active | busy
+%%
+%% Retrieve the usage state of the HTTP server:
+%% 0 active connection -> idle
+%% max_clients active connections -> busy
+%% Otherwise -> active
+%%
+get_ustate(State) ->
+ get_ustate(length(State#state.connections),State).
+
+get_ustate(0,_State) ->
+ idle;
+get_ustate(ConnectionCnt,State) ->
+ ConfigDB = State#state.config_db,
+ case httpd_util:lookup(ConfigDB, max_clients, 150) of
+ ConnectionCnt ->
+ busy;
+ _ ->
+ active
+ end.
+
+
+get_astate(S) -> S#state.admin_state.
+
+
+%% Timer handling functions
+start_block_tmr(infinity,_) ->
+ undefined;
+start_block_tmr(T,M) ->
+ erlang:send_after(T,self(),{block_timeout,M}).
+
+stop_block_tmr(undefined) ->
+ ok;
+stop_block_tmr(Ref) ->
+ erlang:cancel_timer(Ref).
+
+
+%% Monitor blocker functions
+monitor_blocker(Pid) when pid(Pid) ->
+ case (catch erlang:monitor(process,Pid)) of
+ {'EXIT', _Reason} ->
+ undefined;
+ MonitorRef ->
+ MonitorRef
+ end;
+monitor_blocker(_) ->
+ undefined.
+
+demonitor_blocker(undefined) ->
+ ok;
+demonitor_blocker(Ref) ->
+ (catch erlang:demonitor(Ref)).
+
+
+%% Some status utility functions
+
+update_heavy_load_status(Status) ->
+ update_status_with_time(Status,last_heavy_load).
+
+update_connection_status(Status,ConnCount) ->
+ S1 = case lists:keysearch(max_conn,1,Status) of
+ {value, {max_conn, C1}} when ConnCount > C1 ->
+ lists:keyreplace(max_conn,1,Status,{max_conn,ConnCount});
+ {value, {max_conn, _C2}} ->
+ Status;
+ false ->
+ [{max_conn, ConnCount} | Status]
+ end,
+ update_status_with_time(S1,last_connection).
+
+update_status_with_time(Status,Key) ->
+ lists:keyreplace(Key,1,Status,{Key,universal_time()}).
+
+universal_time() -> calendar:universal_time().
+
+manager_status(P) ->
+ Items = [status, message_queue_len, reductions,
+ heap_size, stack_size],
+ {manager_status, process_status(P,Items,[])}.
+
+
+process_status(P,[],L) ->
+ [{pid,P}|lists:reverse(L)];
+process_status(P,[H|T],L) ->
+ case (catch process_info(P,H)) of
+ {H, Value} ->
+ process_status(P,T,[{H,Value}|L]);
+ _ ->
+ process_status(P,T,[{H,undefined}|L])
+ end.
+
+make_name(Addr,Port) ->
+ httpd_util:make_name("httpd",Addr,Port).
+
+
+report_error(State,String) ->
+ Cdb = State#state.config_db,
+ error_logger:error_report(String),
+ mod_log:report_error(Cdb,String),
+ mod_disk_log:report_error(Cdb,String).
+
+%%
+call(ServerRef,Request) ->
+ gen_server:call(ServerRef,Request).
+
+cast(ServerRef,Message) ->
+ gen_server:cast(ServerRef,Message).
+
diff --git a/src/couch_inets/httpd_misc_sup.erl b/src/couch_inets/httpd_misc_sup.erl
new file mode 100644
index 00000000..35a65344
--- /dev/null
+++ b/src/couch_inets/httpd_misc_sup.erl
@@ -0,0 +1,89 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%%----------------------------------------------------------------------
+%% Purpose: The supervisor for auth and sec processes in the http server,
+%% hangs under the httpd_instance_sup_<Addr>_<Port> supervisor.
+%%----------------------------------------------------------------------
+
+-module(httpd_misc_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/2, start_auth_server/2, stop_auth_server/2,
+ start_sec_server/2, stop_sec_server/2]).
+
+%% Supervisor callback
+-export([init/1]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+start_link(Addr, Port) ->
+ SupName = make_name(Addr, Port),
+ supervisor:start_link({local, SupName}, ?MODULE, []).
+
+%%----------------------------------------------------------------------
+%% Function: [start|stop]_[auth|sec]_server/3
+%% Description: Starts a [auth | security] worker (child) process
+%%----------------------------------------------------------------------
+start_auth_server(Addr, Port) ->
+ start_permanent_worker(mod_auth_server, Addr, Port, [gen_server]).
+
+stop_auth_server(Addr, Port) ->
+ stop_permanent_worker(mod_auth_server, Addr, Port).
+
+
+start_sec_server(Addr, Port) ->
+ start_permanent_worker(mod_security_server, Addr, Port, [gen_server]).
+
+stop_sec_server(Addr, Port) ->
+ stop_permanent_worker(mod_security_server, Addr, Port).
+
+
+%%%=========================================================================
+%%% Supervisor callback
+%%%=========================================================================
+init(_) ->
+ Flags = {one_for_one, 0, 1},
+ Workers = [],
+ {ok, {Flags, Workers}}.
+
+%%%=========================================================================
+%%% Internal functions
+%%%=========================================================================
+start_permanent_worker(Mod, Addr, Port, Modules) ->
+ SupName = make_name(Addr, Port),
+ Spec = {{Mod, Addr, Port},
+ {Mod, start_link, [Addr, Port]},
+ permanent, timer:seconds(1), worker, [Mod] ++ Modules},
+ supervisor:start_child(SupName, Spec).
+
+stop_permanent_worker(Mod, Addr, Port) ->
+ SupName = make_name(Addr, Port),
+ Name = {Mod, Addr, Port},
+ case supervisor:terminate_child(SupName, Name) of
+ ok ->
+ supervisor:delete_child(SupName, Name);
+ Error ->
+ Error
+ end.
+
+make_name(Addr,Port) ->
+ httpd_util:make_name("httpd_misc_sup",Addr,Port).
diff --git a/src/couch_inets/httpd_request.erl b/src/couch_inets/httpd_request.erl
new file mode 100644
index 00000000..bce7e725
--- /dev/null
+++ b/src/couch_inets/httpd_request.erl
@@ -0,0 +1,337 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+
+-module(httpd_request).
+
+-include("http_internal.hrl").
+-include("httpd.hrl").
+
+-export([parse/1, whole_body/2, validate/3, update_mod_data/5,
+ body_data/2]).
+
+%% Callback API - used for example if the header/body is received a
+%% little at a time on a socket.
+-export([parse_method/1, parse_uri/1, parse_version/1, parse_headers/1,
+ whole_body/1]).
+
+%%%=========================================================================
+%%% Internal application API
+%%%=========================================================================
+parse([Bin, MaxHeaderSize]) ->
+ parse_method(Bin, [], MaxHeaderSize, []).
+
+%% Functions that may be returned during the decoding process
+%% if the input data is incompleate.
+parse_method([Bin, Method, MaxHeaderSize, Result]) ->
+ parse_method(Bin, Method, MaxHeaderSize, Result).
+
+parse_uri([Bin, URI, MaxHeaderSize, Result]) ->
+ parse_uri(Bin, URI, MaxHeaderSize, Result).
+
+parse_version([Bin, Rest, Version, MaxHeaderSize, Result]) ->
+ parse_version(<<Rest/binary, Bin/binary>>, Version, MaxHeaderSize,
+ Result).
+
+parse_headers([Bin, Rest, Header, Headers, MaxHeaderSize, Result]) ->
+ parse_headers(<<Rest/binary, Bin/binary>>,
+ Header, Headers, MaxHeaderSize, Result).
+
+whole_body([Bin, Body, Length]) ->
+ whole_body(<<Body/binary, Bin/binary>>, Length).
+
+
+%% Separate the body for this request from a possible piplined new
+%% request and convert the body data to "string" format.
+body_data(Headers, Body) ->
+ ContentLength = list_to_integer(Headers#http_request_h.'content-length'),
+ case size(Body) - ContentLength of
+ 0 ->
+ {binary_to_list(Body), <<>>};
+ _ ->
+ <<BodyThisReq:ContentLength/binary, Next/binary>> = Body,
+ {binary_to_list(BodyThisReq), Next}
+ end.
+
+%%-------------------------------------------------------------------------
+%% validate(Method, Uri, Version) -> ok | {error, {bad_request, Reason} |
+%% {error, {not_supported, {Method, Uri, Version}}
+%% Method = "HEAD" | "GET" | "POST" | "TRACE"
+%% Uri = uri()
+%% Version = "HTTP/N.M"
+%% Description: Checks that HTTP-request-line is valid.
+%%-------------------------------------------------------------------------
+validate("HEAD", Uri, "HTTP/1." ++ _N) ->
+ validate_uri(Uri);
+validate("GET", Uri, []) -> %% Simple HTTP/0.9
+ validate_uri(Uri);
+validate("GET", Uri, "HTTP/0.9") ->
+ validate_uri(Uri);
+validate("GET", Uri, "HTTP/1." ++ _N) ->
+ validate_uri(Uri);
+validate("POST", Uri, "HTTP/1." ++ _N) ->
+ validate_uri(Uri);
+validate("TRACE", Uri, "HTTP/1." ++ N) when hd(N) >= $1 ->
+ validate_uri(Uri);
+validate("PUT", Uri, "HTTP/1." ++ _N) ->
+ validate_uri(Uri);
+validate("DELETE", Uri, "HTTP/1." ++ _N) ->
+ validate_uri(Uri);
+validate(Method, Uri, Version) ->
+ {error, {not_supported, {Method, Uri, Version}}}.
+
+%%----------------------------------------------------------------------
+%% The request is passed through the server as a record of type mod
+%% create it.
+%% ----------------------------------------------------------------------
+update_mod_data(ModData, Method, RequestURI, HTTPVersion, Headers)->
+ ParsedHeaders = tagup_header(Headers),
+ PersistentConn = get_persistens(HTTPVersion, ParsedHeaders,
+ ModData#mod.config_db),
+ {ok, ModData#mod{data = [],
+ method = Method,
+ absolute_uri = format_absolute_uri(RequestURI,
+ ParsedHeaders),
+ request_uri = format_request_uri(RequestURI),
+ http_version = HTTPVersion,
+ request_line = Method ++ " " ++ RequestURI ++
+ " " ++ HTTPVersion,
+ parsed_header = ParsedHeaders,
+ connection = PersistentConn}}.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+parse_method(<<>>, Method, MaxHeaderSize, Result) ->
+ {?MODULE, parse_method, [Method, MaxHeaderSize, Result]};
+parse_method(<<?SP, Rest/binary>>, Method, MaxHeaderSize, Result) ->
+ parse_uri(Rest, [], MaxHeaderSize,
+ [string:strip(lists:reverse(Method)) | Result]);
+parse_method(<<Octet, Rest/binary>>, Method, MaxHeaderSize, Result) ->
+ parse_method(Rest, [Octet | Method], MaxHeaderSize, Result).
+
+parse_uri(<<>>, URI, MaxHeaderSize, Result) ->
+ {?MODULE, parse_uri, [URI, MaxHeaderSize, Result]};
+parse_uri(<<?SP, Rest/binary>>, URI, MaxHeaderSize, Result) ->
+ parse_version(Rest, [], MaxHeaderSize,
+ [string:strip(lists:reverse(URI)) | Result]);
+%% Can happen if it is a simple HTTP/0.9 request e.i "GET /\r\n\r\n"
+parse_uri(<<?CR, _Rest/binary>> = Data, URI, MaxHeaderSize, Result) ->
+ parse_version(Data, [], MaxHeaderSize,
+ [string:strip(lists:reverse(URI)) | Result]);
+parse_uri(<<Octet, Rest/binary>>, URI, MaxHeaderSize, Result) ->
+ parse_uri(Rest, [Octet | URI], MaxHeaderSize, Result).
+
+parse_version(<<>>, Version, MaxHeaderSize, Result) ->
+ {?MODULE, parse_version, [<<>>, Version, MaxHeaderSize, Result]};
+parse_version(<<?CR, ?LF, Rest/binary>>, Version, MaxHeaderSize, Result) ->
+ parse_headers(Rest, [], [], MaxHeaderSize,
+ [string:strip(lists:reverse(Version)) | Result]);
+parse_version(<<?CR>> = Data, Version, MaxHeaderSize, Result) ->
+ {?MODULE, parse_version, [Data, Version, MaxHeaderSize, Result]};
+parse_version(<<Octet, Rest/binary>>, Version, MaxHeaderSize, Result) ->
+ parse_version(Rest, [Octet | Version], MaxHeaderSize, Result).
+
+parse_headers(<<>>, Header, Headers, MaxHeaderSize, Result) ->
+ {?MODULE, parse_headers, [<<>>, Header, Headers, MaxHeaderSize, Result]};
+parse_headers(<<?CR,?LF,?CR,?LF,Body/binary>>, [], [], _, Result) ->
+ NewResult = list_to_tuple(lists:reverse([Body, {#http_request_h{}, []} |
+ Result])),
+ {ok, NewResult};
+parse_headers(<<?CR,?LF,?CR,?LF,Body/binary>>, Header, Headers,
+ MaxHeaderSize, Result) ->
+ HTTPHeaders = [lists:reverse(Header) | Headers],
+ Length = lists:foldl(fun(H, Acc) -> length(H) + Acc end,
+ 0, HTTPHeaders),
+ case ((Length > MaxHeaderSize) or (MaxHeaderSize == nolimit)) of
+ true ->
+ {error, {header_too_long, MaxHeaderSize},
+ lists:nth(3, lists:reverse(Result))}; % HTTP Version
+ false ->
+ RequestHeaderRcord =
+ http_request:headers(HTTPHeaders, #http_request_h{}),
+ NewResult =
+ list_to_tuple(lists:reverse([Body, {RequestHeaderRcord,
+ HTTPHeaders} | Result])),
+ {ok, NewResult}
+ end;
+parse_headers(<<?CR,?LF,?CR>> = Data, Header, Headers,
+ MaxHeaderSize, Result) ->
+ {?MODULE, parse_headers, [Data, Header, Headers, MaxHeaderSize, Result]};
+
+%% There where no headers, which is unlikely to happen.
+parse_headers(<<?CR,?LF>>, [], [], _, Result) ->
+ NewResult = list_to_tuple(lists:reverse([<<>>, {#http_request_h{}, []} |
+ Result])),
+ {ok, NewResult};
+parse_headers(<<?CR,?LF>> = Data, Header, Headers,
+ MaxHeaderSize, Result) ->
+ {?MODULE, parse_headers, [Data, Header, Headers, MaxHeaderSize, Result]};
+parse_headers(<<?CR,?LF, Octet, Rest/binary>>, Header, Headers,
+ MaxHeaderSize, Result) ->
+ parse_headers(Rest, [Octet], [lists:reverse(Header) | Headers],
+ MaxHeaderSize, Result);
+parse_headers(<<?CR>> = Data, Header, Headers,
+ MaxHeaderSize, Result) ->
+ {?MODULE, parse_headers, [Data, Header, Headers, MaxHeaderSize, Result]};
+parse_headers(<<Octet, Rest/binary>>, Header, Headers,
+ MaxHeaderSize, Result) ->
+ parse_headers(Rest, [Octet | Header], Headers, MaxHeaderSize, Result).
+
+whole_body(Body, Length) ->
+ case size(Body) of
+ N when N < Length, Length > 0 ->
+ {?MODULE, whole_body, [Body, Length]};
+ N when N >= Length, Length >= 0 ->
+ %% When a client uses pipelining trailing data
+ %% may be part of the next request!
+ %% Trailing data will be separated from
+ %% the actual body in body_data/2.
+ {ok, Body}
+ end.
+
+%% Prevent people from trying to access directories/files
+%% relative to the ServerRoot.
+validate_uri(RequestURI) ->
+ UriNoQueryNoHex =
+ case string:str(RequestURI, "?") of
+ 0 ->
+ (catch httpd_util:decode_hex(RequestURI));
+ Ndx ->
+ (catch httpd_util:decode_hex(string:left(RequestURI, Ndx)))
+ end,
+ case UriNoQueryNoHex of
+ {'EXIT',_Reason} ->
+ {error, {bad_request, {malformed_syntax, RequestURI}}};
+ _ ->
+ Path = format_request_uri(UriNoQueryNoHex),
+ Path2=[X||X<-string:tokens(Path, "/"),X=/="."], %% OTP-5938
+ validate_path( Path2,0, RequestURI)
+ end.
+
+validate_path([], _, _) ->
+ ok;
+validate_path([".." | _], 0, RequestURI) ->
+ {error, {bad_request, {forbidden, RequestURI}}};
+validate_path([".." | Rest], N, RequestURI) ->
+ validate_path(Rest, N - 1, RequestURI);
+validate_path([_ | Rest], N, RequestURI) ->
+ validate_path(Rest, N + 1, RequestURI).
+
+%%----------------------------------------------------------------------
+%% There are 3 possible forms of the reuqest URI
+%%
+%% 1. * When the request is not for a special assset. is is instead
+%% to the server itself
+%%
+%% 2. absoluteURI the whole servername port and asset is in the request
+%%
+%% 3. The most common form that http/1.0 used abs path that is a path
+%% to the requested asset.
+%%----------------------------------------------------------------------
+format_request_uri("*")->
+ "*";
+format_request_uri("http://" ++ ServerAndPath) ->
+ remove_server(ServerAndPath);
+
+format_request_uri("HTTP://" ++ ServerAndPath) ->
+ remove_server(ServerAndPath);
+
+format_request_uri(ABSPath) ->
+ ABSPath.
+
+remove_server([]) ->
+ "/";
+remove_server([$\/|Url])->
+ case Url of
+ []->
+ "/";
+ _->
+ [$\/|Url]
+ end;
+remove_server([_|Url]) ->
+ remove_server(Url).
+
+format_absolute_uri("http://"++ Uri, _)->
+ "HTTP://" ++ Uri;
+
+format_absolute_uri(OrigUri = "HTTP://" ++ _, _)->
+ OrigUri;
+
+format_absolute_uri(Uri,ParsedHeader)->
+ case httpd_util:key1search(ParsedHeader,"host") of
+ undefined ->
+ nohost;
+ Host ->
+ Host++Uri
+ end.
+
+get_persistens(HTTPVersion,ParsedHeader,ConfigDB)->
+ case httpd_util:lookup(ConfigDB, persistent_conn, true) of
+ true->
+ case HTTPVersion of
+ %%If it is version prio to 1.1 kill the conneciton
+ "HTTP/1." ++ NList ->
+ case httpd_util:key1search(ParsedHeader,
+ "connection", "keep-alive") of
+ %%if the connection isnt ordered to go down
+ %%let it live The keep-alive value is the
+ %%older http/1.1 might be older Clients that
+ %%use it.
+ "keep-alive" when hd(NList) >= 49 ->
+ ?DEBUG("CONNECTION MODE: ~p",[true]),
+ true;
+ "close" ->
+ ?DEBUG("CONNECTION MODE: ~p",[false]),
+ false;
+ _Connect ->
+ ?DEBUG("CONNECTION MODE: ~p VALUE: ~p",
+ [false, _Connect]),
+ false
+ end;
+ _ ->
+ ?DEBUG("CONNECTION MODE: ~p VERSION: ~p",
+ [false, HTTPVersion]),
+ false
+ end;
+ _ ->
+ false
+ end.
+
+
+%%----------------------------------------------------------------------
+%% tagup_header
+%%
+%% Parses the header of a HTTP request and returns a key,value tuple
+%% list containing Name and Value of each header directive as of:
+%%
+%% Content-Type: multipart/mixed -> {"Content-Type", "multipart/mixed"}
+%%
+%% But in http/1.1 the field-names are case insencitive so now it must be
+%% Content-Type: multipart/mixed -> {"content-type", "multipart/mixed"}
+%% The standard furthermore says that leading and traling white space
+%% is not a part of the fieldvalue and shall therefore be removed.
+%%----------------------------------------------------------------------
+tagup_header([]) -> [];
+tagup_header([Line|Rest]) -> [tag(Line, [])|tagup_header(Rest)].
+
+tag([], Tag) ->
+ {http_util:to_lower(lists:reverse(Tag)), ""};
+tag([$:|Rest], Tag) ->
+ {http_util:to_lower(lists:reverse(Tag)), string:strip(Rest)};
+tag([Chr|Rest], Tag) ->
+ tag(Rest, [Chr|Tag]).
+
diff --git a/src/couch_inets/httpd_request_handler.erl b/src/couch_inets/httpd_request_handler.erl
new file mode 100644
index 00000000..e6764737
--- /dev/null
+++ b/src/couch_inets/httpd_request_handler.erl
@@ -0,0 +1,516 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%% Description: Implements a request handler process for the HTTP server.
+%%
+
+-module(httpd_request_handler).
+
+-behaviour(gen_server).
+
+%% Application internal API
+-export([start/2, start/3, socket_ownership_transfered/3]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-include("httpd.hrl").
+-include("http_internal.hrl").
+
+-record(state, {mod, %% #mod{}
+ manager, %% pid()
+ status, %% accept | busy | blocked
+ mfa, %% {Module, Function, Args}
+ max_keep_alive_request = infinity, %% integer() | infinity
+ response_sent = false, %% true | false
+ timeout, %% infinity | integer() > 0
+ timer, %% ref() - Request timer
+ headers, %% #http_request_h{}
+ body %% binary()
+ }).
+
+%%====================================================================
+%% Application internal API
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start() -> {ok, Pid} | ignore | {error,Error}
+%% Description: Starts a httpd-request handler process. Intended to be
+%% called by the httpd acceptor process.
+%%--------------------------------------------------------------------
+start(Manager, ConfigDB) ->
+ start(Manager, ConfigDB, 15000).
+start(Manager, ConfigDB, AcceptTimeout) ->
+ proc_lib:start(?MODULE, init, [[Manager, ConfigDB,AcceptTimeout]]).
+
+%%--------------------------------------------------------------------
+%% socket_ownership_transfered(Pid, SocketType, Socket) -> void()
+%%
+%% Pid = pid()
+%% SocketType = ip_comm | ssl
+%% Socket = socket()
+%%
+%% Description: Send a message to the request handler process
+%% confirming that the socket ownership has now sucssesfully been
+%% transfered to it. Intended to be called by the httpd acceptor
+%% process.
+%%--------------------------------------------------------------------
+socket_ownership_transfered(Pid, SocketType, Socket) ->
+ Pid ! {socket_ownership_transfered, SocketType, Socket}.
+
+%%--------------------------------------------------------------------
+%% Function: init(Args) -> _
+%%
+%% Description: Initiates the server. Obs special init that uses
+%% gen_server:enter_loop/3. This is used instead of the normal
+%% gen_server callback init, as a more complex init than the
+%% gen_server provides is needed.
+%%--------------------------------------------------------------------
+init([Manager, ConfigDB,AcceptTimeout]) ->
+ %% Make sure this process terminates if the httpd manager process
+ %% should die!
+ link(Manager),
+ %% At this point the function httpd_request_handler:start/2 will return.
+ proc_lib:init_ack({ok, self()}),
+
+ {SocketType, Socket} = await_socket_ownership_transfer(AcceptTimeout),
+
+ Resolve = http_transport:resolve(),
+ Peername = httpd_socket:peername(SocketType, Socket),
+ InitData = #init_data{peername = Peername, resolve = Resolve},
+ Mod = #mod{config_db = ConfigDB,
+ socket_type = SocketType,
+ socket = Socket,
+ init_data = InitData},
+
+ MaxHeaderSize = httpd_util:lookup(ConfigDB, max_header_size,
+ ?HTTP_MAX_HEADER_SIZE),
+ TimeOut = httpd_util:lookup(ConfigDB, keep_alive_timeout, 150000),
+ NrOfRequest = httpd_util:lookup(ConfigDB,
+ max_keep_alive_request, infinity),
+
+ {_, Status} = httpd_manager:new_connection(Manager),
+
+
+ State = #state{mod = Mod, manager = Manager, status = Status,
+ timeout = TimeOut, max_keep_alive_request = NrOfRequest,
+ mfa = {httpd_request, parse, [MaxHeaderSize]}},
+
+ NewState = activate_request_timeout(State),
+
+ http_transport:setopts(SocketType, Socket, [binary,{packet, 0},
+ {active, once}]),
+ gen_server:enter_loop(?MODULE, [], NewState).
+
+%%====================================================================
+%% gen_server callbacks
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% handle_call(Request, From, State) -> {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} |
+%% {stop, Reason, State}
+%% Description: Handling call messages
+%%--------------------------------------------------------------------
+handle_call(Request, From, State) ->
+ {stop, {call_api_violation, Request, From}, State}.
+
+%%--------------------------------------------------------------------
+%% handle_cast(Msg, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State}
+%% Description: Handling cast messages
+%%--------------------------------------------------------------------
+handle_cast(Msg, State) ->
+ {reply, {cast_api_violation, Msg}, State}.
+
+%%--------------------------------------------------------------------
+%% handle_info(Info, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State}
+%% Description: Handling all non call/cast messages
+%%--------------------------------------------------------------------
+handle_info({Proto, Socket, Data}, State =
+ #state{mfa = {Module, Function, Args},
+ mod = #mod{socket_type = SockType,
+ socket = Socket} = ModData}
+ = State) when Proto == tcp; Proto == ssl; Proto == dummy ->
+
+ case Module:Function([Data | Args]) of
+ {ok, Result} ->
+ NewState = cancel_request_timeout(State),
+ handle_http_msg(Result, NewState);
+ {error, {header_too_long, MaxHeaderSize}, Version} ->
+ NewModData = ModData#mod{http_version = Version},
+ httpd_response:send_status(NewModData, 413, "Header too big"),
+ Reason = io_lib:format("Header too big, max size is ~p~n",
+ [MaxHeaderSize]),
+ error_log(Reason, NewModData),
+ {stop, normal, State#state{response_sent = true,
+ mod = NewModData}};
+ NewMFA ->
+ http_transport:setopts(SockType, Socket, [{active, once}]),
+ {noreply, State#state{mfa = NewMFA}}
+ end;
+
+%% Error cases
+handle_info({tcp_closed, _}, State) ->
+ {stop, normal, State};
+handle_info({ssl_closed, _}, State) ->
+ {stop, normal, State};
+handle_info({tcp_error, _, _} = Reason, State) ->
+ {stop, Reason, State};
+handle_info({ssl_error, _, _} = Reason, State) ->
+ {stop, Reason, State};
+
+%% Timeouts
+handle_info(timeout, #state{mod = ModData, mfa = {_, parse, _}} = State) ->
+ error_log("No request received on keep-alive connection"
+ "before server side timeout", ModData),
+ %% No response should be sent!
+ {stop, normal, State#state{response_sent = true}};
+handle_info(timeout, #state{mod = ModData} = State) ->
+ httpd_response:send_status(ModData, 408, "Request timeout"),
+ error_log("The client did not send the whole request before the"
+ "server side timeout", ModData),
+ {stop, normal, State#state{response_sent = true}};
+
+%% Default case
+handle_info(Info, #state{mod = ModData} = State) ->
+ Error = lists:flatten(
+ io_lib:format("Unexpected message received: ~n~p~n", [Info])),
+ error_log(Error, ModData),
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% terminate(Reason, State) -> void()
+%%
+%% Description: This function is called by a gen_server when it is about to
+%% terminate. It should be the opposite of Module:init/1 and do any necessary
+%% cleaning up. When it returns, the gen_server terminates with Reason.
+%% The return value is ignored.
+%%--------------------------------------------------------------------
+terminate(normal, State) ->
+ do_terminate(State);
+terminate(Reason, #state{response_sent = false, mod = ModData} = State) ->
+ httpd_response:send_status(ModData, 500, none),
+ error_log(httpd_util:reason_phrase(500), ModData),
+ terminate(Reason, State#state{response_sent = true, mod = ModData});
+terminate(_, State) ->
+ do_terminate(State).
+
+do_terminate(#state{mod = ModData, manager = Manager} = State) ->
+ catch httpd_manager:done_connection(Manager),
+ cancel_request_timeout(State),
+ httpd_socket:close(ModData#mod.socket_type, ModData#mod.socket).
+
+%%--------------------------------------------------------------------
+%% code_change(OldVsn, State, Extra) -> {ok, NewState}
+%%
+%% Description: Convert process state when code is changed
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+await_socket_ownership_transfer(AcceptTimeout) ->
+ receive
+ {socket_ownership_transfered, SocketType, Socket} ->
+ {SocketType, Socket}
+ after AcceptTimeout ->
+ exit(accept_socket_timeout)
+ end.
+
+handle_http_msg({_, _, Version, {_, _}, _}, #state{status = busy,
+ mod = ModData} = State) ->
+ handle_manager_busy(State#state{mod =
+ ModData#mod{http_version = Version}}),
+ {stop, normal, State};
+
+handle_http_msg({_, _, Version, {_, _}, _},
+ #state{status = blocked, mod = ModData} = State) ->
+ handle_manager_blocked(State#state{mod =
+ ModData#mod{http_version = Version}}),
+ {stop, normal, State};
+
+handle_http_msg({Method, Uri, Version, {RecordHeaders, Headers}, Body},
+ #state{status = accept, mod = ModData} = State) ->
+ case httpd_request:validate(Method, Uri, Version) of
+ ok ->
+ {ok, NewModData} =
+ httpd_request:update_mod_data(ModData, Method, Uri,
+ Version, Headers),
+
+ case is_host_specified_if_required(NewModData#mod.absolute_uri,
+ RecordHeaders, Version) of
+ true ->
+ handle_body(State#state{headers = RecordHeaders,
+ body = Body,
+ mod = NewModData});
+ false ->
+ httpd_response:send_status(ModData#mod{http_version =
+ Version},
+ 400, none),
+ {stop, normal, State#state{response_sent = true}}
+ end;
+ {error, {not_supported, What}} ->
+ httpd_response:send_status(ModData#mod{http_version = Version},
+ 501, {Method, Uri, Version}),
+ Reason = io_lib:format("Not supported: ~p~n", [What]),
+ error_log(Reason, ModData),
+ {stop, normal, State#state{response_sent = true}};
+ {error, {bad_request, {forbidden, URI}}} ->
+ httpd_response:send_status(ModData#mod{http_version = Version},
+ 403, URI),
+ Reason = io_lib:format("Forbidden URI: ~p~n", [URI]),
+ error_log(Reason, ModData),
+ {stop, normal, State#state{response_sent = true}};
+ {error,{bad_request, {malformed_syntax, URI}}} ->
+ httpd_response:send_status(ModData#mod{http_version = Version},
+ 400, URI),
+ Reason = io_lib:format("Malformed syntax in URI: ~p~n", [URI]),
+ error_log(Reason, ModData),
+ {stop, normal, State#state{response_sent = true}}
+ end;
+handle_http_msg({ChunkedHeaders, Body},
+ State = #state{headers = Headers}) ->
+ NewHeaders = http_chunk:handle_headers(Headers, ChunkedHeaders),
+ handle_response(State#state{headers = NewHeaders, body = Body});
+handle_http_msg(Body, State) ->
+ handle_response(State#state{body = Body}).
+
+handle_manager_busy(#state{mod = #mod{config_db = ConfigDB}} = State) ->
+ MaxClients = httpd_util:lookup(ConfigDB, max_clients, 150),
+ Reason = io_lib:format("heavy load (>~w processes)", [MaxClients]),
+ reject_connection(State, lists:flatten(Reason)).
+
+handle_manager_blocked(State) ->
+ Reason = "Server maintenance performed, try again later",
+ reject_connection(State, Reason).
+
+reject_connection(#state{mod = ModData} = State, Reason) ->
+ httpd_response:send_status(ModData, 503, Reason),
+ {stop, normal, State#state{response_sent = true}}.
+
+is_host_specified_if_required(nohost, #http_request_h{host = undefined},
+ "HTTP/1.1") ->
+ false;
+is_host_specified_if_required(_, _, _) ->
+ true.
+
+handle_body(#state{mod = #mod{config_db = ConfigDB}} = State) ->
+
+ MaxHeaderSize =
+ httpd_util:lookup(ConfigDB, max_header_size, ?HTTP_MAX_HEADER_SIZE),
+ MaxBodySize = httpd_util:lookup(ConfigDB, max_body_size, nolimit),
+
+ case handle_expect(State, MaxBodySize) of
+ ok ->
+ handle_body(State, MaxHeaderSize, MaxBodySize);
+ Other ->
+ Other
+
+ end.
+
+handle_body(#state{headers = Headers, body = Body, mod = ModData} = State,
+ MaxHeaderSize, MaxBodySize) ->
+ case Headers#http_request_h.'transfer-encoding' of
+ "chunked" ->
+ case http_chunk:decode(Body, MaxBodySize, MaxHeaderSize) of
+ {Module, Function, Args} ->
+ http_transport:setopts(ModData#mod.socket_type,
+ ModData#mod.socket,
+ [{active, once}]),
+ {noreply, State#state{mfa =
+ {Module, Function, Args}}};
+ {ok, {ChunkedHeaders, NewBody}} ->
+ NewHeaders =
+ http_chunk:handle_headers(Headers, ChunkedHeaders),
+ handle_response(State#state{headers = NewHeaders,
+ body = NewBody})
+ end;
+ Encoding when list(Encoding) ->
+ httpd_response:send_status(ModData, 501,
+ "Unknown Transfer-Encoding"),
+ Reason = io_lib:format("Unknown Transfer-Encoding: ~p~n",
+ [Encoding]),
+ error_log(Reason, ModData),
+ {stop, normal, State#state{response_sent = true}};
+ _ ->
+ Length =
+ list_to_integer(Headers#http_request_h.'content-length'),
+ case ((Length =< MaxBodySize) or (MaxBodySize == nolimit)) of
+ true ->
+ case httpd_request:whole_body(Body, Length) of
+ {Module, Function, Args} ->
+ http_transport:setopts(ModData#mod.socket_type,
+ ModData#mod.socket,
+ [{active, once}]),
+ {noreply, State#state{mfa =
+ {Module, Function, Args}}};
+
+ {ok, NewBody} ->
+ handle_response(
+ State#state{headers = Headers,
+ body = NewBody})
+ end;
+ false ->
+ httpd_response:send_status(ModData, 413, "Body too big"),
+ error_log("Body too big", ModData),
+ {stop, normal, State#state{response_sent = true}}
+ end
+ end.
+
+handle_expect(#state{headers = Headers, mod =
+ #mod{config_db = ConfigDB} = ModData} = State,
+ MaxBodySize) ->
+ Length = Headers#http_request_h.'content-length',
+ case expect(Headers, ModData#mod.http_version, ConfigDB) of
+ continue when MaxBodySize > Length; MaxBodySize == nolimit ->
+ httpd_response:send_status(ModData, 100, ""),
+ ok;
+ continue when MaxBodySize < Length ->
+ httpd_response:send_status(ModData, 413, "Body too big"),
+ error_log("Body too big", ModData),
+ {stop, normal, State#state{response_sent = true}};
+ {break, Value} ->
+ httpd_response:send_status(ModData, 417,
+ "Unexpected expect value"),
+ Reason = io_lib:format("Unexpected expect value: ~p~n", [Value]),
+ error_log(Reason, ModData),
+ {stop, normal, State#state{response_sent = true}};
+ no_expect_header ->
+ ok;
+ http_1_0_expect_header ->
+ httpd_response:send_status(ModData, 400,
+ "Only HTTP/1.1 Clients "
+ "may use the Expect Header"),
+ error_log("Client with lower version than 1.1 tried to send"
+ "an expect header", ModData),
+ {stop, normal, State#state{response_sent = true}}
+ end.
+
+expect(Headers, "HTTP/1.1", _) ->
+ case Headers#http_request_h.expect of
+ "100-continue" ->
+ continue;
+ undefined ->
+ no_expect_header;
+ Other ->
+ {break, Other}
+ end;
+expect(Headers, _, ConfigDB) ->
+ case Headers#http_request_h.expect of
+ undefined ->
+ no_expect_header;
+ _ ->
+ case httpd_util:lookup(ConfigDB, expect, continue) of
+ continue->
+ no_expect_header;
+ _ ->
+ http_1_0_expect_header
+ end
+ end.
+
+handle_response(#state{body = Body, mod = ModData, headers = Headers,
+ max_keep_alive_request = Max} = State) when Max > 0 ->
+ {NewBody, Data} = httpd_request:body_data(Headers, Body),
+ ok = httpd_response:generate_and_send_response(
+ ModData#mod{entity_body = NewBody}),
+ handle_next_request(State#state{response_sent = true}, Data);
+
+handle_response(#state{body = Body, headers = Headers,
+ mod = ModData} = State) ->
+ {NewBody, _} = httpd_request:body_data(Headers, Body),
+ ok = httpd_response:generate_and_send_response(
+ ModData#mod{entity_body = NewBody}),
+ {stop, normal, State#state{response_sent = true}}.
+
+handle_next_request(#state{mod = #mod{connection = true} = ModData,
+ max_keep_alive_request = Max} = State, Data) ->
+ NewModData = #mod{socket_type = ModData#mod.socket_type,
+ socket = ModData#mod.socket,
+ config_db = ModData#mod.config_db,
+ init_data = ModData#mod.init_data},
+ MaxHeaderSize =
+ httpd_util:lookup(ModData#mod.config_db,
+ max_header_size, ?HTTP_MAX_HEADER_SIZE),
+
+ TmpState = State#state{mod = NewModData,
+ mfa = {httpd_request, parse, [MaxHeaderSize]},
+ max_keep_alive_request = decrease(Max),
+ headers = undefined, body = undefined,
+ response_sent = false},
+
+ NewState = activate_request_timeout(TmpState),
+
+ case Data of
+ <<>> ->
+ http_transport:setopts(ModData#mod.socket_type,
+ ModData#mod.socket, [{active, once}]),
+ {noreply, NewState};
+ _ ->
+ handle_info({dummy, ModData#mod.socket, Data}, NewState)
+ end;
+
+handle_next_request(State, _) ->
+ {stop, normal, State}.
+
+activate_request_timeout(#state{timeout = Time} = State) ->
+ Ref = erlang:send_after(Time, self(), timeout),
+ State#state{timer = Ref}.
+
+cancel_request_timeout(#state{timer = undefined} = State) ->
+ State;
+cancel_request_timeout(#state{timer = Timer} = State) ->
+ erlang:cancel_timer(Timer),
+ receive
+ timeout ->
+ ok
+ after 0 ->
+ ok
+ end,
+ State#state{timer = undefined}.
+
+decrease(N) when integer(N)->
+ N-1;
+decrease(N) ->
+ N.
+
+error_log(ReasonString, #mod{socket = Socket, socket_type = SocketType,
+ config_db = ConfigDB,
+ init_data = #init_data{peername = Peername}}) ->
+ Error = lists:flatten(
+ io_lib:format("Error reading request: ~s",[ReasonString])),
+ error_log(mod_log, SocketType, Socket, ConfigDB, Peername, Error),
+ error_log(mod_disk_log, SocketType, Socket, ConfigDB, Peername, Error).
+
+error_log(Mod, SocketType, Socket, ConfigDB, Peername, String) ->
+ Modules = httpd_util:lookup(ConfigDB, modules,
+ [mod_get, mod_head, mod_log]),
+ case lists:member(Mod, Modules) of
+ true ->
+ Mod:error_log(SocketType, Socket, ConfigDB, Peername, String);
+ _ ->
+ ok
+ end.
+
diff --git a/src/couch_inets/httpd_response.erl b/src/couch_inets/httpd_response.erl
new file mode 100644
index 00000000..a39adc7d
--- /dev/null
+++ b/src/couch_inets/httpd_response.erl
@@ -0,0 +1,377 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(httpd_response).
+-export([generate_and_send_response/1, send_status/3, send_header/3,
+ send_body/3, send_chunk/3, send_final_chunk/2, split_header/2,
+ is_disable_chunked_send/1, cache_headers/1]).
+
+-include("httpd.hrl").
+-include("http_internal.hrl").
+
+-define(VMODULE,"RESPONSE").
+
+%% If peername does not exist the client already discarded the
+%% request so we do not need to send a reply.
+generate_and_send_response(#mod{init_data =
+ #init_data{peername = {_,"unknown"}}}) ->
+ ok;
+generate_and_send_response(#mod{config_db = ConfigDB} = ModData) ->
+ Modules = httpd_util:lookup(ConfigDB,modules,
+ [mod_get, mod_head, mod_log]),
+ case traverse_modules(ModData, Modules) of
+ done ->
+ ok;
+ {proceed, Data} ->
+ case httpd_util:key1search(Data, status) of
+ {StatusCode, PhraseArgs, _Reason} ->
+ send_status(ModData, StatusCode, PhraseArgs),
+ ok;
+ undefined ->
+ case httpd_util:key1search(Data, response) of
+ {already_sent, _StatusCode, _Size} ->
+ ok;
+ {response, Header, Body} -> %% New way
+ send_response(ModData, Header, Body),
+ ok;
+ {StatusCode, Response} -> %% Old way
+ send_response_old(ModData, StatusCode, Response),
+ ok;
+ undefined ->
+ send_status(ModData, 500, none),
+ ok
+ end
+ end
+ end.
+
+
+%% traverse_modules
+
+traverse_modules(ModData,[]) ->
+ {proceed,ModData#mod.data};
+traverse_modules(ModData,[Module|Rest]) ->
+ case (catch apply(Module,do,[ModData])) of
+ {'EXIT', Reason} ->
+ String =
+ lists:flatten(
+ io_lib:format("traverse exit from apply: ~p:do => ~n~p",
+ [Module, Reason])),
+ report_error(mod_log, ModData#mod.config_db, String),
+ report_error(mod_disk_log, ModData#mod.config_db, String),
+ done;
+ done ->
+ done;
+ {break,NewData} ->
+ {proceed,NewData};
+ {proceed,NewData} ->
+ traverse_modules(ModData#mod{data=NewData},Rest)
+ end.
+
+%% send_status %%
+
+
+send_status(ModData, 100, _PhraseArgs) ->
+ send_header(ModData, 100, [{content_length, "0"}]);
+
+send_status(#mod{socket_type = SocketType,
+ socket = Socket,
+ config_db = ConfigDB} = ModData, StatusCode, PhraseArgs) ->
+
+ ReasonPhrase = httpd_util:reason_phrase(StatusCode),
+ Message = httpd_util:message(StatusCode, PhraseArgs, ConfigDB),
+ Body = get_body(ReasonPhrase, Message),
+
+ send_header(ModData, StatusCode, [{content_type, "text/html"},
+ {content_length, integer_to_list(length(Body))}]),
+ httpd_socket:deliver(SocketType, Socket, Body).
+
+
+get_body(ReasonPhrase, Message)->
+ "<HTML>
+ <HEAD>
+ <TITLE>"++ReasonPhrase++"</TITLE>
+ </HEAD>
+ <BODY>
+ <H1>"++ReasonPhrase++"</H1>\n"++Message++"\n</BODY>
+ </HTML>\n".
+
+
+send_response(ModData, Header, Body) ->
+ case httpd_util:key1search(Header, code) of
+ undefined ->
+ %% No status code
+ %% Ooops this must be very bad:
+ %% generate a 404 content not availible
+ send_status(ModData, 404, "The file is not availible");
+ StatusCode ->
+ case send_header(ModData, StatusCode, lists:keydelete(code, 1,
+ Header)) of
+ ok ->
+ send_body(ModData, StatusCode, Body);
+ _ ->
+ done
+ end
+ end.
+
+send_header(#mod{socket_type = Type, socket = Sock,
+ http_version = Ver, connection = Conn} = _ModData,
+ StatusCode, KeyValueTupleHeaders) ->
+ Headers = create_header(lists:map(fun transform/1, KeyValueTupleHeaders)),
+ NewVer = case {Ver, StatusCode} of
+ {[], _} ->
+ %% May be implicit!
+ "HTTP/0.9";
+ {unknown, 408} ->
+ %% This will proably never happen! It means the
+ %% server has timed out the request without
+ %% receiving a version for the request! Send the
+ %% lowest version so to ensure that the client
+ %% will be able to handle it, probably the
+ %% sensible thing to do!
+ "HTTP/0.9";
+ {undefined,_} ->
+ "HTTP/1.0"; %% See rfc2145 2.3 last paragraph
+ _ ->
+ Ver
+ end,
+ StatusLine = [NewVer, " ", io_lib:write(StatusCode), " ",
+ httpd_util:reason_phrase(StatusCode), ?CRLF],
+ ConnectionHeader = get_connection(Conn, NewVer),
+ Head = list_to_binary([StatusLine, Headers, ConnectionHeader , ?CRLF]),
+ httpd_socket:deliver(Type, Sock, Head).
+
+send_body(#mod{socket_type = Type, socket = Socket}, _, nobody) ->
+ httpd_socket:close(Type, Socket),
+ ok;
+
+send_body(#mod{socket_type = Type, socket = Sock},
+ _StatusCode, Body) when list(Body) ->
+ ok = httpd_socket:deliver(Type, Sock, Body);
+
+send_body(#mod{socket_type = Type, socket = Sock} = ModData,
+ StatusCode, {Fun, Args}) ->
+ case (catch apply(Fun, Args)) of
+ close ->
+ httpd_socket:close(Type, Sock),
+ done;
+
+ sent ->
+ {proceed,[{response,{already_sent, StatusCode,
+ httpd_util:key1search(ModData#mod.data,
+ content_length)}}]};
+ {ok, Body} ->
+ case httpd_socket:deliver(Type, Sock, Body) of
+ ok ->
+ {proceed,[{response,
+ {already_sent, StatusCode,
+ httpd_util:key1search(ModData#mod.data,
+ content_length)}}]};
+ _ ->
+ done
+ end;
+
+ _ ->
+ done
+ end.
+
+split_header([$: | Value], AccName) ->
+ Name = http_util:to_lower(string:strip(AccName)),
+ {lists:reverse(Name),
+ string:strip(string:strip(string:strip(Value, right, ?LF), right, ?CR))};
+split_header([Char | Rest], AccName) ->
+ split_header(Rest, [Char | AccName]).
+
+send_chunk(_, <<>>, _) ->
+ ok;
+send_chunk(_, [], _) ->
+ ok;
+
+send_chunk(#mod{http_version = "HTTP/1.1",
+ socket_type = Type, socket = Sock}, Response0, false) ->
+ Response = http_chunk:encode(Response0),
+ httpd_socket:deliver(Type, Sock, Response);
+
+send_chunk(#mod{socket_type = Type, socket = Sock} = _ModData, Response, _) ->
+ httpd_socket:deliver(Type, Sock, Response).
+
+send_final_chunk(#mod{http_version = "HTTP/1.1",
+ socket_type = Type, socket = Sock}, false) ->
+ httpd_socket:deliver(Type, Sock, http_chunk:encode_last());
+send_final_chunk(#mod{socket_type = Type, socket = Sock}, _) ->
+ httpd_socket:close(Type, Sock).
+
+is_disable_chunked_send(Db) ->
+ httpd_util:lookup(Db, disable_chunked_transfer_encoding_send, false).
+
+%% Return a HTTP-header field that indicates that the
+%% connection will be inpersistent
+get_connection(true,"HTTP/1.0")->
+ "Connection:close\r\n";
+get_connection(false,"HTTP/1.1") ->
+ "Connection:close\r\n";
+get_connection(_,_) ->
+ "".
+
+cache_headers(#mod{config_db = Db}) ->
+ case httpd_util:lookup(Db, script_nocache, false) of
+ true ->
+ Date = httpd_util:rfc1123_date(),
+ [{"cache-control", "no-cache"},
+ {"pragma", "no-cache"},
+ {"expires", Date}];
+ false ->
+ []
+ end.
+
+create_header(KeyValueTupleHeaders) ->
+ NewHeaders = add_default_headers([{"date", httpd_util:rfc1123_date()},
+ {"content-type", "text/html"},
+ {"server", ?SERVER_SOFTWARE}],
+ KeyValueTupleHeaders),
+ lists:map(fun fix_header/1, NewHeaders).
+
+fix_header({Key0, Value}) ->
+ %% make sure first letter is capital
+ Words1 = string:tokens(Key0, "-"),
+ Words2 = upify(Words1, []),
+ Key = new_key(Words2),
+ Key ++ ": " ++ Value ++ ?CRLF .
+
+new_key([]) ->
+ "";
+new_key([W]) ->
+ W;
+new_key([W1,W2]) ->
+ W1 ++ "-" ++ W2;
+new_key([W|R]) ->
+ W ++ "-" ++ new_key(R).
+
+upify([], Acc) ->
+ lists:reverse(Acc);
+upify([Key|Rest], Acc) ->
+ upify(Rest, [upify2(Key)|Acc]).
+
+upify2([C|Rest]) when C >= $a, C =< $z ->
+ [C-($a-$A)|Rest];
+upify2(Str) ->
+ Str.
+
+add_default_headers([], Headers) ->
+ Headers;
+
+add_default_headers([Header = {Default, _} | Defaults], Headers) ->
+ case lists:keysearch(Default, 1, Headers) of
+ {value, _} ->
+ add_default_headers(Defaults, Headers);
+ _ ->
+ add_default_headers(Defaults, [Header | Headers])
+ end.
+
+transform({content_type, Value}) ->
+ {"content-type", Value};
+transform({accept_ranges, Value}) ->
+ {"accept-ranges", Value};
+transform({cache_control, Value}) ->
+ {"cache-control",Value};
+transform({transfer_encoding, Value}) ->
+ {"transfer-encoding", Value};
+transform({content_encoding, Value}) ->
+ {"content-encoding", Value};
+transform({content_language, Value}) ->
+ {"content-language", Value};
+transform({retry_after, Value}) ->
+ {"retry-after", Value};
+transform({content_location, Value}) ->
+ {"Content-Location:", Value};
+transform({content_length, Value}) ->
+ {"content-length", Value};
+transform({content_MD5, Value}) ->
+ {"content-md5", Value};
+transform({content_range, Value}) ->
+ {"content-range", Value};
+transform({last_modified, Value}) ->
+ {"last-modified", Value};
+transform({Field, Value}) when is_atom(Field) ->
+ {atom_to_list(Field), Value};
+transform({Field, Value}) when is_list(Field) ->
+ {Field, Value}.
+
+%%----------------------------------------------------------------------
+%% This is the old way of sending data it is strongly encouraged to
+%% Leave this method and go on to the newer form of response
+%% OTP-4408
+%%----------------------------------------------------------------------
+send_response_old(#mod{method = "HEAD"} = ModData,
+ StatusCode, Response) ->
+ NewResponse = lists:flatten(Response),
+
+ case httpd_util:split(NewResponse, [?CR, ?LF, ?CR, ?LF],2) of
+ {ok, [Head, Body]} ->
+ {ok, NewHead} = handle_headers(string:tokens(Head, [?CR,?LF]), []),
+ send_header(ModData, StatusCode, [{content_length,
+ content_length(Body)} | NewHead]);
+ {ok, [NewResponse]} ->
+ send_header(ModData, StatusCode, [{content_length,
+ content_length(NewResponse)}]);
+ _Error ->
+ send_status(ModData, 500, "Internal Server Error")
+ end;
+
+send_response_old(#mod{socket_type = Type,
+ socket = Sock} = ModData,
+ StatusCode, Response) ->
+
+ NewResponse = lists:flatten(Response),
+
+ case httpd_util:split(NewResponse, [?CR, ?LF, ?CR, ?LF], 2) of
+ {ok, [Head, Body]} ->
+ {ok, NewHead} = handle_headers(string:tokens(Head,
+ [?CR,?LF]), []),
+ send_header(ModData, StatusCode, [{content_length,
+ content_length(Body)} |
+ NewHead]),
+ httpd_socket:deliver(Type, Sock, Body);
+ {ok, [NewResponse]} ->
+ send_header(ModData, StatusCode, [{content_length,
+ content_length(NewResponse)}]),
+ httpd_socket:deliver(Type, Sock, NewResponse);
+
+ {error, _Reason} ->
+ send_status(ModData, 500, "Internal Server Error")
+ end.
+
+content_length(Body)->
+ integer_to_list(httpd_util:flatlength(Body)).
+
+report_error(Mod, ConfigDB, Error) ->
+ Modules = httpd_util:lookup(ConfigDB, modules,
+ [mod_get, mod_head, mod_log]),
+ case lists:member(Mod, Modules) of
+ true ->
+ Mod:report_error(ConfigDB, Error);
+ _ ->
+ ok
+ end.
+
+handle_headers([], NewHeaders) ->
+ {ok, NewHeaders};
+
+handle_headers([Header | Headers], NewHeaders) ->
+ {FieldName, FieldValue} = split_header(Header, []),
+ handle_headers(Headers,
+ [{FieldName, FieldValue}| NewHeaders]).
+
diff --git a/src/couch_inets/httpd_script_env.erl b/src/couch_inets/httpd_script_env.erl
new file mode 100644
index 00000000..d34e9716
--- /dev/null
+++ b/src/couch_inets/httpd_script_env.erl
@@ -0,0 +1,141 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+
+-module(httpd_script_env).
+
+-export([create_env/3]).
+
+-include("httpd.hrl").
+
+%%%=========================================================================
+%%% Internal application API
+%%%=========================================================================
+%%--------------------------------------------------------------------------
+%% create_env(ScriptType, ModData, ScriptElements) -> [{EnvVariable, Value}]
+%%
+%% ScriptType = cgi | esi
+%% ModData = #mod{}
+%% ScriptElements = [{Element, Value}]
+%% Element = path_info | query_string | entity_body
+%% Value = term()
+%% EnvVariable = string() - cgi | atom() - esi
+%%
+%% Description: Creates a list of cgi/esi environment variables and
+%% there values.
+%%--------------------------------------------------------------------------
+create_env(ScriptType, ModData, ScriptElements) ->
+ create_basic_elements(ScriptType, ModData)
+ ++ create_http_header_elements(ScriptType, ModData#mod.parsed_header)
+ ++ create_script_elements(ScriptType, ModData, ScriptElements)
+ ++ create_mod_interaction_elements(ScriptType, ModData).
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+create_basic_elements(esi, ModData) ->
+ {_, RemoteAddr} = (ModData#mod.init_data)#init_data.peername,
+ [{server_software, ?SERVER_SOFTWARE},
+ {server_name, (ModData#mod.init_data)#init_data.resolve},
+ {gateway_interface,?GATEWAY_INTERFACE},
+ {server_protocol, ?SERVER_PROTOCOL},
+ {server_port, httpd_util:lookup(ModData#mod.config_db,port,80)},
+ {request_method, ModData#mod.method},
+ {remote_addr, RemoteAddr},
+ {script_name, ModData#mod.request_uri}];
+
+create_basic_elements(cgi, ModData) ->
+ {_, RemoteAddr} = (ModData#mod.init_data)#init_data.peername,
+ [{"SERVER_SOFTWARE",?SERVER_SOFTWARE},
+ {"SERVER_NAME", (ModData#mod.init_data)#init_data.resolve},
+ {"GATEWAY_INTERFACE",?GATEWAY_INTERFACE},
+ {"SERVER_PROTOCOL",?SERVER_PROTOCOL},
+ {"SERVER_PORT",
+ integer_to_list(httpd_util:lookup(
+ ModData#mod.config_db, port, 80))},
+ {"REQUEST_METHOD", ModData#mod.method},
+ {"REMOTE_ADDR", RemoteAddr},
+ {"SCRIPT_NAME", ModData#mod.request_uri}].
+
+create_http_header_elements(ScriptType, Headers) ->
+ create_http_header_elements(ScriptType, Headers, []).
+
+create_http_header_elements(_, [], Acc) ->
+ Acc;
+create_http_header_elements(ScriptType, [{Name, [Value | _] = Values } |
+ Headers], Acc) when list(Value) ->
+ NewName = lists:map(fun(X) -> if X == $- -> $_; true -> X end end, Name),
+ Element = http_env_element(ScriptType, NewName, multi_value(Values)),
+ create_http_header_elements(ScriptType, Headers, [Element | Acc]);
+
+create_http_header_elements(ScriptType, [{Name, Value} | Headers], Acc)
+ when list(Value) ->
+ {ok, NewName, _} = regexp:gsub(Name,"-","_"),
+ Element = http_env_element(ScriptType, NewName, Value),
+ create_http_header_elements(ScriptType, Headers, [Element | Acc]).
+
+http_env_element(cgi, VarName, Value) ->
+ {"HTTP_"++ http_util:to_upper(VarName), Value};
+http_env_element(esi, VarName, Value) ->
+ {list_to_atom("http_"++ http_util:to_lower(VarName)), Value}.
+
+multi_value([]) ->
+ [];
+multi_value([Value]) ->
+ Value;
+multi_value([Value | Rest]) ->
+ Value ++ ", " ++ multi_value(Rest).
+
+create_script_elements(ScriptType, ModData, ScriptElements) ->
+ lists:flatmap(fun({Element, Data}) ->
+ create_script_elements(ScriptType,
+ Element,
+ Data, ModData)
+ end, ScriptElements).
+
+create_script_elements(esi, query_string, QueryString, _) ->
+ [{query_string, QueryString}];
+create_script_elements(cgi, query_string, QueryString, _) ->
+ [{"QUERY_STRING", QueryString}];
+create_script_elements(esi, path_info, PathInfo, ModData) ->
+ Aliases = httpd_util:multi_lookup(ModData#mod.config_db, alias),
+ {_,PathTranslated,_} =
+ mod_alias:real_name(ModData#mod.config_db, PathInfo,
+ Aliases),
+ [{path_info, PathInfo},
+ {path_translated, PathTranslated}];
+create_script_elements(cgi, path_info, PathInfo, ModData) ->
+ Aliases = httpd_util:multi_lookup(ModData#mod.config_db, alias),
+ {_,PathTranslated,_} =
+ mod_alias:real_name(ModData#mod.config_db, PathInfo,
+ Aliases),
+ [{"PATH_INFO", PathInfo},
+ {"PATH_TRANSLATED", PathTranslated}];
+create_script_elements(esi, entity_body, Body, _) ->
+ [{content_length, httpd_util:flatlength(Body)}];
+create_script_elements(cgi, entity_body, Body, _) ->
+ [{"CONTENT_LENGTH", httpd_util:flatlength(Body)}];
+create_script_elements(_, _, _, _) ->
+ [].
+
+create_mod_interaction_elements(_, ModData)->
+ case httpd_util:key1search(ModData#mod.data, remote_user) of
+ undefined ->
+ [];
+ RemoteUser ->
+ [{remote_user, RemoteUser}]
+ end.
diff --git a/src/couch_inets/httpd_socket.erl b/src/couch_inets/httpd_socket.erl
new file mode 100644
index 00000000..8e6a54f4
--- /dev/null
+++ b/src/couch_inets/httpd_socket.erl
@@ -0,0 +1,62 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(httpd_socket).
+
+%% API (document close ?)
+-export([deliver/3, peername/2, resolve/0, close/2]).
+
+-include("httpd.hrl").
+
+-define(VMODULE,"SOCKET").
+-include_lib("kernel/include/inet.hrl").
+
+deliver(SocketType, Socket, IOListOrBinary) ->
+ case http_transport:send(SocketType, Socket, IOListOrBinary) of
+ {error, _Reason} ->
+ (catch close(SocketType, Socket)),
+ socket_closed;
+ _ ->
+ ok
+ end.
+
+peername(SocketType, Socket) ->
+ http_transport:peername(SocketType, Socket).
+
+resolve() ->
+ http_transport:resolve().
+
+close(SocketType, Socket) ->
+ close_sleep(SocketType, 1000),
+ Res =
+ case (catch http_transport:close(SocketType, Socket)) of
+ ok -> ok;
+ {error,Reason} -> {error,Reason};
+ {'EXIT',{noproc,_}} -> {error,closed};
+ {'EXIT',Reason} -> {error,Reason};
+ Otherwise -> {error,Otherwise}
+ end,
+ Res.
+
+%% Workaround for ssl problem when ssl does not deliver the message
+%% sent prior to the close before the close signal.
+close_sleep({ssl, _}, Time) ->
+ sleep(Time);
+close_sleep(_, _) ->
+ ok.
+
+sleep(T) -> receive after T -> ok end.
diff --git a/src/couch_inets/httpd_sup.erl b/src/couch_inets/httpd_sup.erl
new file mode 100644
index 00000000..94573394
--- /dev/null
+++ b/src/couch_inets/httpd_sup.erl
@@ -0,0 +1,137 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%%----------------------------------------------------------------------
+%% Purpose: The top supervisor for the http server (httpd) hangs under
+%% inets_sup.
+%%----------------------------------------------------------------------
+
+-module(httpd_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/1]).
+-export([start_child/1, stop_child/2]).
+
+%% Supervisor callback
+-export([init/1]).
+
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+start_link(HttpdServices) ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, [HttpdServices]).
+
+start_child(ConfigFile) ->
+ {ok, Spec} = httpd_child_spec(ConfigFile, 15000, []),
+ supervisor:start_child(?MODULE, Spec).
+
+stop_child(Addr, Port) ->
+ Name = {httpd_instance_sup, Addr, Port},
+ case supervisor:terminate_child(?MODULE, Name) of
+ ok ->
+ supervisor:delete_child(?MODULE, Name);
+ Error ->
+ Error
+ end.
+
+%%%=========================================================================
+%%% Supervisor callback
+%%%=========================================================================
+init([HttpdServices]) ->
+ RestartStrategy = one_for_one,
+ MaxR = 10,
+ MaxT = 3600,
+ Children = child_spec(HttpdServices, []),
+ {ok, {{RestartStrategy, MaxR, MaxT}, Children}}.
+
+%%%=========================================================================
+%%% Internal functions
+%%%=========================================================================
+%% The format of the httpd service is:
+%% httpd_service() -> {httpd,httpd()}
+%% httpd() -> [httpd_config()] | file()
+%% httpd_config() -> {file,file()} |
+%% {debug,debug()} |
+%% {accept_timeout,integer()}
+%% debug() -> disable | [debug_options()]
+%% debug_options() -> {all_functions,modules()} |
+%% {exported_functions,modules()} |
+%% {disable,modules()}
+%% modules() -> [atom()]
+child_spec([], Acc) ->
+ Acc;
+child_spec([{httpd, HttpdService} | Rest], Acc) ->
+ NewHttpdService = mk_tuple_list(HttpdService),
+ %% Acc2 = child_spec2(NewHttpdService,Acc),
+ NewAcc=
+ case catch child_spec2(NewHttpdService) of
+ {ok,Acc2} ->
+ [Acc2|Acc];
+ {error,Reason} ->
+ error_msg("failed to create child spec for ~n~p~ndue to: ~p",
+ [HttpdService,Reason]),
+% exit({error,Reason})
+ Acc
+ end,
+ child_spec(Rest,NewAcc).
+
+child_spec2(HttpdService) ->
+ Debug = http_util:key1search(HttpdService,debug,[]),
+ AcceptTimeout = http_util:key1search(HttpdService,accept_timeout,15000),
+ ConfigFile =
+ case http_util:key1search(HttpdService,file) of
+ undefined -> throw({error,{mandatory_conf_file_missed}});
+ File -> File
+ end,
+ httpd_util:valid_options(Debug,AcceptTimeout,ConfigFile),
+ httpd_child_spec(ConfigFile,AcceptTimeout,Debug).
+
+
+httpd_child_spec(ConfigFile,AcceptTimeout,Debug) ->
+ case httpd_conf:load(ConfigFile) of
+ {ok, ConfigList} ->
+ Port = httpd_util:key1search(ConfigList, port, 80),
+ Addr = httpd_util:key1search(ConfigList, bind_address),
+ {ok, httpd_child_spec(ConfigFile, AcceptTimeout,
+ Debug, Addr, Port)};
+ Error ->
+ Error
+ end.
+
+httpd_child_spec(ConfigFile, AcceptTimeout, Debug, Addr, Port) ->
+ Name = {httpd_instance_sup, Addr, Port},
+ StartFunc = {httpd_instance_sup, start_link,
+ [ConfigFile,AcceptTimeout,Debug]},
+ Restart = permanent,
+ Shutdown = infinity,
+ Modules = [httpd_instance_sup],
+ Type = supervisor,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+
+mk_tuple_list([]) ->
+ [];
+mk_tuple_list([H={_,_}|T]) ->
+ [H|mk_tuple_list(T)];
+mk_tuple_list(F) ->
+ [{file,F}].
+
+error_msg(F, A) ->
+ error_logger:error_msg(F ++ "~n", A).
diff --git a/src/couch_inets/httpd_util.erl b/src/couch_inets/httpd_util.erl
new file mode 100644
index 00000000..96fe4238
--- /dev/null
+++ b/src/couch_inets/httpd_util.erl
@@ -0,0 +1,718 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(httpd_util).
+-export([key1search/2, key1search/3, lookup/2, lookup/3, multi_lookup/2,
+ lookup_mime/2, lookup_mime/3, lookup_mime_default/2,
+ lookup_mime_default/3, reason_phrase/1, message/3, rfc1123_date/0,
+ rfc1123_date/1, day/1, month/1, decode_hex/1, decode_base64/1,
+ encode_base64/1,
+ flatlength/1, split_path/1, split_script_path/1, suffix/1, to_upper/1,
+ to_lower/1, split/3, uniq/1,
+ make_name/2,make_name/3,make_name/4,strip/1,
+ hexlist_to_integer/1,integer_to_hexlist/1,
+ convert_request_date/1,create_etag/1,create_etag/2,
+ convert_netscapecookie_date/1, enable_debug/1, valid_options/3]).
+
+-export([encode_hex/1]).
+-include_lib("kernel/include/file.hrl").
+
+%% We will not make the change to use base64 in stdlib in inets just yet.
+%% it will be included in the next major release of inets.
+-compile({nowarn_deprecated_function, {http_base_64, encode, 1}}).
+-compile({nowarn_deprecated_function, {http_base_64, decode, 1}}).
+
+-deprecated([{to_lower, 1, next_major_release},
+ {to_upper, 1, next_major_release},
+ {decode_base64, 1, next_major_release},
+ {encode_base64, 1, next_major_release}
+ ]).
+
+%% key1search
+
+key1search(TupleList,Key) ->
+ key1search(TupleList,Key,undefined).
+
+key1search(TupleList,Key,Undefined) ->
+ case lists:keysearch(Key,1,TupleList) of
+ {value,{Key,Value}} ->
+ Value;
+ false ->
+ Undefined
+ end.
+
+%% lookup
+
+lookup(Table,Key) ->
+ lookup(Table,Key,undefined).
+
+lookup(Table,Key,Undefined) ->
+ case catch ets:lookup(Table,Key) of
+ [{Key,Value}|_] ->
+ Value;
+ _->
+ Undefined
+ end.
+
+%% multi_lookup
+
+multi_lookup(Table,Key) ->
+ remove_key(ets:lookup(Table,Key)).
+
+remove_key([]) ->
+ [];
+remove_key([{_Key, Value}| Rest]) ->
+ [Value | remove_key(Rest)].
+
+%% lookup_mime
+
+lookup_mime(ConfigDB,Suffix) ->
+ lookup_mime(ConfigDB,Suffix,undefined).
+
+lookup_mime(ConfigDB,Suffix,Undefined) ->
+ [{mime_types,MimeTypesDB}|_]=ets:lookup(ConfigDB,mime_types),
+ case ets:lookup(MimeTypesDB,Suffix) of
+ [] ->
+ Undefined;
+ [{Suffix,MimeType}|_] ->
+ MimeType
+ end.
+
+%% lookup_mime_default
+
+lookup_mime_default(ConfigDB,Suffix) ->
+ lookup_mime_default(ConfigDB,Suffix,undefined).
+
+lookup_mime_default(ConfigDB,Suffix,Undefined) ->
+ [{mime_types,MimeTypesDB}|_]=ets:lookup(ConfigDB,mime_types),
+ case ets:lookup(MimeTypesDB,Suffix) of
+ [] ->
+ case ets:lookup(ConfigDB,default_type) of
+ [] ->
+ Undefined;
+ [{default_type,DefaultType}|_] ->
+ DefaultType
+ end;
+ [{Suffix,MimeType}|_] ->
+ MimeType
+ end.
+
+%%% RFC 2616, HTTP 1.1 Status codes
+reason_phrase(100) -> "Continue";
+reason_phrase(101) -> "Switching Protocols" ;
+reason_phrase(200) -> "OK" ;
+reason_phrase(201) -> "Created" ;
+reason_phrase(202) -> "Accepted" ;
+reason_phrase(203) -> "Non-Authoritative Information" ;
+reason_phrase(204) -> "No Content" ;
+reason_phrase(205) -> "Reset Content" ;
+reason_phrase(206) -> "Partial Content" ;
+reason_phrase(300) -> "Multiple Choices" ;
+reason_phrase(301) -> "Moved Permanently" ;
+reason_phrase(302) -> "Moved Temporarily" ;
+reason_phrase(303) -> "See Other" ;
+reason_phrase(304) -> "Not Modified" ;
+reason_phrase(305) -> "Use Proxy" ;
+reason_phrase(306) -> "(unused)" ;
+reason_phrase(307) -> "Temporary Redirect" ;
+reason_phrase(400) -> "Bad Request";
+reason_phrase(401) -> "Unauthorized";
+reason_phrase(402) -> "Payment Required";
+reason_phrase(403) -> "Forbidden" ;
+reason_phrase(404) -> "Object Not Found" ;
+reason_phrase(405) -> "Method Not Allowed" ;
+reason_phrase(406) -> "Not Acceptable" ;
+reason_phrase(407) -> "Proxy Authentication Required" ;
+reason_phrase(408) -> "Request Time-out" ;
+reason_phrase(409) -> "Conflict" ;
+reason_phrase(410) -> "Gone" ;
+reason_phrase(411) -> "Length Required" ;
+reason_phrase(412) -> "Precondition Failed" ;
+reason_phrase(413) -> "Request Entity Too Large" ;
+reason_phrase(414) -> "Request-URI Too Large" ;
+reason_phrase(415) -> "Unsupported Media Type" ;
+reason_phrase(416) -> "Requested Range Not Satisfiable" ;
+reason_phrase(417) -> "Expectation Failed" ;
+reason_phrase(500) -> "Internal Server Error" ;
+reason_phrase(501) -> "Not Implemented" ;
+reason_phrase(502) -> "Bad Gateway" ;
+reason_phrase(503) -> "Service Unavailable" ;
+reason_phrase(504) -> "Gateway Time-out" ;
+reason_phrase(505) -> "HTTP Version not supported";
+
+%%% RFC 2518, HTTP Extensions for Distributed Authoring -- WEBDAV
+reason_phrase(102) -> "Processing";
+reason_phrase(207) -> "Multi-Status";
+reason_phrase(422) -> "Unprocessable Entity";
+reason_phrase(423) -> "Locked";
+reason_phrase(424) -> "Failed Dependency";
+reason_phrase(507) -> "Insufficient Storage";
+
+%%% (Work in Progress) WebDAV Advanced Collections
+reason_phrase(425) -> "";
+
+%%% RFC 2817, HTTP Upgrade to TLS
+reason_phrase(426) -> "Upgrade Required";
+
+%%% RFC 3229, Delta encoding in HTTP
+reason_phrase(226) -> "IM Used";
+
+reason_phrase(_) -> "Internal Server Error".
+
+
+%% message
+
+message(301,URL,_) ->
+ "The document has moved <A HREF=\""++URL++"\">here</A>.";
+message(304, _URL,_) ->
+ "The document has not been changed.";
+message(400,none,_) ->
+ "Your browser sent a query that this server could not understand.";
+message(400,Msg,_) ->
+ "Your browser sent a query that this server could not understand. "++Msg;
+message(401,none,_) ->
+ "This server could not verify that you
+are authorized to access the document you
+ requested. Either you supplied the wrong
+credentials (e.g., bad password), or your
+browser doesn't understand how to supply
+the credentials required.";
+message(403,RequestURI,_) ->
+ "You don't have permission to access "++RequestURI++" on this server.";
+message(404,RequestURI,_) ->
+ "The requested URL "++RequestURI++" was not found on this server.";
+message(408, Timeout, _) ->
+ Timeout;
+message(412,none,_) ->
+ "The requested preconditions where false";
+message(413, Reason,_) ->
+ "Entity: " ++ Reason;
+message(414,ReasonPhrase,_) ->
+ "Message "++ReasonPhrase++".";
+message(416,ReasonPhrase,_) ->
+ ReasonPhrase;
+
+message(500,_,ConfigDB) ->
+ ServerAdmin=lookup(ConfigDB,server_admin,"unknown@unknown"),
+ "The server encountered an internal error or "
+ "misconfiguration and was unable to complete "
+ "your request.<P>Please contact the server administrator "
+ ++ ServerAdmin ++ ", and inform them of the time the error occurred "
+ "and anything you might have done that may have caused the error.";
+
+message(501,{Method, RequestURI, HTTPVersion}, _ConfigDB) ->
+ if
+ atom(Method) ->
+ atom_to_list(Method)++
+ " to "++RequestURI++" ("++HTTPVersion++") not supported.";
+ list(Method) ->
+ Method++
+ " to "++RequestURI++" ("++HTTPVersion++") not supported."
+ end;
+
+message(503, String, _ConfigDB) ->
+ "This service in unavailable due to: "++String.
+
+%%convert_rfc_date(Date)->{{YYYY,MM,DD},{HH,MIN,SEC}}
+
+convert_request_date([D,A,Y,DateType| Rest])->
+ Func=case DateType of
+ $\, ->
+ fun convert_rfc1123_date/1;
+ $\ ->
+ fun convert_ascii_date/1;
+ _ ->
+ fun convert_rfc850_date/1
+ end,
+ case catch Func([D,A,Y,DateType| Rest]) of
+ {ok,Date} ->
+ Date;
+ _Error->
+ bad_date
+ end.
+convert_rfc850_date(DateStr) ->
+ [_WeekDay,Date,Time,_TimeZone|_Rest] = string:tokens(DateStr," "),
+ convert_rfc850_date(Date,Time).
+
+convert_rfc850_date([D1,D2,_,
+ M,O,N,_,
+ Y1,Y2|_Rest],[H1,H2,_Col,M1,M2,_Col,S1,S2|_Rest2])->
+ Year=list_to_integer([50,48,Y1,Y2]),
+ Day=list_to_integer([D1,D2]),
+ Month = http_util:convert_month([M,O,N]),
+ Hour=list_to_integer([H1,H2]),
+ Min=list_to_integer([M1,M2]),
+ Sec=list_to_integer([S1,S2]),
+ {ok,{{Year,Month,Day},{Hour,Min,Sec}}}.
+
+convert_ascii_date([_D,_A,_Y,_SP,
+ M,O,N,_SP,
+ D1,D2,_SP,
+ H1,H2,_Col,
+ M1,M2,_Col,
+ S1,S2,_SP,
+ Y1,Y2,Y3,Y4| _Rest])->
+ Year=list_to_integer([Y1,Y2,Y3,Y4]),
+ Day=case D1 of
+ $\ ->
+ list_to_integer([D2]);
+ _->
+ list_to_integer([D1,D2])
+ end,
+ Month=http_util:convert_month([M,O,N]),
+ Hour=list_to_integer([H1,H2]),
+ Min=list_to_integer([M1,M2]),
+ Sec=list_to_integer([S1,S2]),
+ {ok,{{Year,Month,Day},{Hour,Min,Sec}}}.
+
+convert_rfc1123_date([_D,_A,_Y,_C,_SP,
+ D1,D2,_SP,
+ M,O,N,_SP,
+ Y1,Y2,Y3,Y4,_SP,
+ H1,H2,_Col,
+ M1,M2,_Col,
+ S1,S2|_Rest]) ->
+ Year=list_to_integer([Y1,Y2,Y3,Y4]),
+ Day=list_to_integer([D1,D2]),
+ Month=http_util:convert_month([M,O,N]),
+ Hour=list_to_integer([H1,H2]),
+ Min=list_to_integer([M1,M2]),
+ Sec=list_to_integer([S1,S2]),
+ {ok,{{Year,Month,Day},{Hour,Min,Sec}}}.
+
+convert_netscapecookie_date(Date)->
+ case (catch http_util:convert_netscapecookie_date(Date)) of
+ Ret = {ok, _} ->
+ Ret;
+ _ ->
+ {error,bad_date}
+ end.
+
+
+%% rfc1123_date
+
+rfc1123_date() ->
+ {{YYYY,MM,DD},{Hour,Min,Sec}} = calendar:universal_time(),
+ DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
+ lists:flatten(
+ io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
+ [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])).
+
+rfc1123_date(undefined) ->
+ undefined;
+rfc1123_date(LocalTime) ->
+ {{YYYY,MM,DD},{Hour,Min,Sec}} =
+ case calendar:local_time_to_universal_time_dst(LocalTime) of
+ [Gmt] -> Gmt;
+ [_,Gmt] -> Gmt
+ end,
+ DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
+ lists:flatten(
+ io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
+ [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])).
+
+%% uniq
+
+uniq([]) ->
+ [];
+uniq([First,First|Rest]) ->
+ uniq([First|Rest]);
+uniq([First|Rest]) ->
+ [First|uniq(Rest)].
+
+
+%% day
+
+day(1) -> "Mon";
+day(2) -> "Tue";
+day(3) -> "Wed";
+day(4) -> "Thu";
+day(5) -> "Fri";
+day(6) -> "Sat";
+day(7) -> "Sun".
+
+%% month
+
+month(1) -> "Jan";
+month(2) -> "Feb";
+month(3) -> "Mar";
+month(4) -> "Apr";
+month(5) -> "May";
+month(6) -> "Jun";
+month(7) -> "Jul";
+month(8) -> "Aug";
+month(9) -> "Sep";
+month(10) -> "Oct";
+month(11) -> "Nov";
+month(12) -> "Dec".
+
+%% decode_hex
+
+decode_hex([$%,Hex1,Hex2|Rest]) ->
+ [hex2dec(Hex1)*16+hex2dec(Hex2)|decode_hex(Rest)];
+decode_hex([First|Rest]) ->
+ [First|decode_hex(Rest)];
+decode_hex([]) ->
+ [].
+
+hex2dec(X) when X>=$0,X=<$9 -> X-$0;
+hex2dec(X) when X>=$A,X=<$F -> X-$A+10;
+hex2dec(X) when X>=$a,X=<$f -> X-$a+10.
+
+%% decode_base64 (DEBUG STRING: QWxhZGRpbjpvcGVuIHNlc2FtZQ==)
+
+%%% Base-64 decoding (RFC2045)
+%% Keep for backward compatibility
+decode_base64(Base64) ->
+ http_base_64:decode(Base64).
+encode_base64(ASCII) ->
+ http_base_64:encode(ASCII).
+
+%% flatlength
+flatlength(List) ->
+ flatlength(List, 0).
+
+flatlength([H|T],L) when list(H) ->
+ flatlength(H,flatlength(T,L));
+flatlength([H|T],L) when binary(H) ->
+ flatlength(T,L+size(H));
+flatlength([_H|T],L) ->
+ flatlength(T,L+1);
+flatlength([],L) ->
+ L.
+
+%% split_path
+
+split_path(Path) ->
+ case regexp:match(Path,"[\?].*\$") of
+ %% A QUERY_STRING exists!
+ {match,Start,Length} ->
+ {httpd_util:decode_hex(string:substr(Path,1,Start-1)),
+ string:substr(Path,Start,Length)};
+ %% A possible PATH_INFO exists!
+ nomatch ->
+ split_path(Path,[])
+ end.
+
+split_path([],SoFar) ->
+ {httpd_util:decode_hex(lists:reverse(SoFar)),[]};
+split_path([$/|Rest],SoFar) ->
+ Path=httpd_util:decode_hex(lists:reverse(SoFar)),
+ case file:read_file_info(Path) of
+ {ok,FileInfo} when FileInfo#file_info.type == regular ->
+ {Path,[$/|Rest]};
+ {ok, _FileInfo} ->
+ split_path(Rest,[$/|SoFar]);
+ {error, _Reason} ->
+ split_path(Rest,[$/|SoFar])
+ end;
+split_path([C|Rest],SoFar) ->
+ split_path(Rest,[C|SoFar]).
+
+%% split_script_path
+
+split_script_path(Path) ->
+ case split_script_path(Path, []) of
+ {Script, AfterPath} ->
+ {PathInfo, QueryString} = pathinfo_querystring(AfterPath),
+ {Script, {PathInfo, QueryString}};
+ not_a_script ->
+ not_a_script
+ end.
+
+pathinfo_querystring(Str) ->
+ pathinfo_querystring(Str, []).
+pathinfo_querystring([], SoFar) ->
+ {lists:reverse(SoFar), []};
+pathinfo_querystring([$?|Rest], SoFar) ->
+ {lists:reverse(SoFar), Rest};
+pathinfo_querystring([C|Rest], SoFar) ->
+ pathinfo_querystring(Rest, [C|SoFar]).
+
+split_script_path([$?|QueryString], SoFar) ->
+ Path = httpd_util:decode_hex(lists:reverse(SoFar)),
+ case file:read_file_info(Path) of
+ {ok,FileInfo} when FileInfo#file_info.type == regular ->
+ {Path, [$?|QueryString]};
+ {ok, _FileInfo} ->
+ not_a_script;
+ {error, _Reason} ->
+ not_a_script
+ end;
+split_script_path([], SoFar) ->
+ Path = httpd_util:decode_hex(lists:reverse(SoFar)),
+ case file:read_file_info(Path) of
+ {ok,FileInfo} when FileInfo#file_info.type == regular ->
+ {Path, []};
+ {ok, _FileInfo} ->
+ not_a_script;
+ {error, _Reason} ->
+ not_a_script
+ end;
+split_script_path([$/|Rest], SoFar) ->
+ Path = httpd_util:decode_hex(lists:reverse(SoFar)),
+ case file:read_file_info(Path) of
+ {ok, FileInfo} when FileInfo#file_info.type == regular ->
+ {Path, [$/|Rest]};
+ {ok, _FileInfo} ->
+ split_script_path(Rest, [$/|SoFar]);
+ {error, _Reason} ->
+ split_script_path(Rest, [$/|SoFar])
+ end;
+split_script_path([C|Rest], SoFar) ->
+ split_script_path(Rest,[C|SoFar]).
+
+%% suffix
+
+suffix(Path) ->
+ case filename:extension(Path) of
+ [] ->
+ [];
+ Extension ->
+ tl(Extension)
+ end.
+
+%% to_upper
+
+to_upper(Str) ->
+ http_util:to_upper(Str).
+
+%% to_lower
+
+to_lower(Str) ->
+ http_util:to_lower(Str).
+
+
+
+%% strip
+strip(Value)->
+ lists:reverse(remove_ws(lists:reverse(remove_ws(Value)))).
+
+remove_ws([$\s|Rest])->
+ remove_ws(Rest);
+remove_ws([$\t|Rest]) ->
+ remove_ws(Rest);
+remove_ws(Rest) ->
+ Rest.
+
+%% split
+
+split(String,RegExp,Limit) ->
+ case regexp:parse(RegExp) of
+ {error,Reason} ->
+ {error,Reason};
+ {ok,_} ->
+ {ok,do_split(String,RegExp,Limit)}
+ end.
+
+do_split(String, _RegExp, 1) ->
+ [String];
+
+do_split(String,RegExp,Limit) ->
+ case regexp:first_match(String,RegExp) of
+ {match,Start,Length} ->
+ [string:substr(String,1,Start-1)|
+ do_split(lists:nthtail(Start+Length-1,String),RegExp,Limit-1)];
+ nomatch ->
+ [String]
+ end.
+
+%% make_name/2, make_name/3
+%% Prefix -> string()
+%% First part of the name, e.g. "httpd"
+%% Addr -> {A,B,C,D} | string() | undefined
+%% The address part of the name.
+%% e.g. "123.234.55.66" or {123,234,55,66} or "otp.ericsson.se"
+%% for a host address or undefined if local host.
+%% Port -> integer()
+%% Last part of the name, such as the HTTPD server port
+%% number (80).
+%% Postfix -> Any string that will be added last to the name
+%%
+%% Example:
+%% make_name("httpd","otp.ericsson.se",80) => httpd__otp_ericsson_se__80
+%% make_name("httpd",undefined,8088) => httpd_8088
+
+make_name(Prefix,Port) ->
+ make_name(Prefix,undefined,Port,"").
+
+make_name(Prefix,Addr,Port) ->
+ make_name(Prefix,Addr,Port,"").
+
+make_name(Prefix,"*",Port,Postfix) ->
+ make_name(Prefix,undefined,Port,Postfix);
+
+make_name(Prefix,any,Port,Postfix) ->
+ make_name1(io_lib:format("~s_~w~s",[Prefix,Port,Postfix]));
+
+make_name(Prefix,undefined,Port,Postfix) ->
+ make_name1(io_lib:format("~s_~w~s",[Prefix,Port,Postfix]));
+
+make_name(Prefix,Addr,Port,Postfix) ->
+ NameString =
+ Prefix ++ "__" ++ make_name2(Addr) ++ "__" ++
+ integer_to_list(Port) ++ Postfix,
+ make_name1(NameString).
+
+make_name1(String) ->
+ list_to_atom(lists:flatten(String)).
+
+make_name2({A,B,C,D}) ->
+ io_lib:format("~w_~w_~w_~w",[A,B,C,D]);
+
+make_name2({A, B, C, D, E, F, G, H}) ->
+ io_lib:format("~w_~w_~w_~w_~w_~w_~w_~w",[integer_to_hexlist(A),
+ integer_to_hexlist(B),
+ integer_to_hexlist(C),
+ integer_to_hexlist(D),
+ integer_to_hexlist(E),
+ integer_to_hexlist(F),
+ integer_to_hexlist(G),
+ integer_to_hexlist(H)
+ ]);
+make_name2(Addr) ->
+ search_and_replace(Addr,$.,$_).
+
+search_and_replace(S,A,B) ->
+ Fun = fun(What) ->
+ case What of
+ A -> B;
+ O -> O
+ end
+ end,
+ lists:map(Fun,S).
+
+
+
+%%----------------------------------------------------------------------
+%% Converts a string that constists of 0-9,A-F,a-f to a
+%% integer
+%%----------------------------------------------------------------------
+
+hexlist_to_integer(List)->
+ http_util:hexlist_to_integer(List).
+
+%%----------------------------------------------------------------------
+%%Converts an integer to an hexlist
+%%----------------------------------------------------------------------
+encode_hex(Num)->
+ integer_to_hexlist(Num).
+
+integer_to_hexlist(Num) when is_integer(Num) ->
+ http_util:integer_to_hexlist(Num).
+
+create_etag(FileInfo)->
+ create_etag(FileInfo#file_info.mtime,FileInfo#file_info.size).
+
+create_etag({{Year,Month,Day},{Hour,Min,Sec}},Size)->
+ create_part([Year,Month,Day,Hour,Min,Sec])++io_lib:write(Size);
+
+create_etag(FileInfo,Size)->
+ create_etag(FileInfo#file_info.mtime,Size).
+
+create_part(Values)->
+ lists:map(fun(Val0)->
+ Val=Val0 rem 60,
+ if
+ Val=<25 ->
+ 65+Val; % A-Z
+ Val=<50 ->
+ 72+Val; % a-z
+ %%Since no date s
+ true ->
+ Val-3
+ end
+ end,Values).
+
+
+%%----------------------------------------------------------------------
+%% Enable debugging, validate httpd options
+%%----------------------------------------------------------------------
+
+enable_debug([]) ->
+ ok;
+enable_debug(Debug) ->
+ dbg:tracer(),
+ dbg:p(all, [call]),
+ do_enable_debug(Debug).
+
+do_enable_debug(disable) ->
+ dbg:stop();
+do_enable_debug([]) ->
+ ok;
+do_enable_debug([{Level,Modules}|Rest]) when atom(Level),list(Modules) ->
+ case Level of
+ all_functions ->
+ io:format("Tracing on all functions set on modules: ~p~n",
+ [Modules]),
+ lists:foreach(fun(X)->dbg:tpl(X, [{'_', [], [{return_trace}]}]) end,Modules);
+ exported_functions ->
+ io:format("Tracing on exported functions set on modules: ~p~n",[Modules]),
+ lists:foreach(fun(X)->dbg:tp(X, [{'_', [], [{return_trace}]}]) end,Modules);
+ disable ->
+ io:format("Tracing disabled on modules: ~p~n",[Modules]),
+ lists:foreach(fun(X)->dbg:ctp(X) end,Modules);
+ _ ->
+ ok
+ end,
+ do_enable_debug(Rest).
+
+
+
+valid_options(Debug,AcceptTimeout,ConfigFile) ->
+ valid_debug(Debug),
+ valid_accept_timeout(AcceptTimeout),
+ valid_config_file(ConfigFile).
+valid_debug([]) ->
+ ok;
+valid_debug(disable) ->
+ ok;
+valid_debug(L) when list(L) ->
+ valid_debug2(L);
+valid_debug(D) ->
+ throw({error,{bad_debug_option,D}}).
+valid_debug2([{all_functions,L}|Rest]) when list(L) ->
+ test_load_modules(L),
+ valid_debug2(Rest);
+valid_debug2([{exported_functions,L}|Rest]) when list(L) ->
+ test_load_modules(L),
+ valid_debug2(Rest);
+valid_debug2([{disable,L}|Rest]) when list(L) ->
+ test_load_modules(L),
+ valid_debug2(Rest);
+valid_debug2([H|_T]) ->
+ throw({error,{bad_debug_option,H}});
+valid_debug2([]) ->
+ ok.
+valid_accept_timeout(I) when is_integer(I) ->
+ ok;
+valid_accept_timeout(A) ->
+ throw({error,{bad_debug_option,A}}).
+valid_config_file(_) ->
+ ok.
+
+test_load_modules([H|T]) when atom(H) ->
+ case code:which(H) of
+ non_existing ->
+ throw({error,{module_does_not_exist,H}});
+ _ -> ok
+ end,
+ test_load_modules(T);
+test_load_modules([H|_T]) ->
+ throw({error,{module_name_not_atom,H}});
+test_load_modules([]) ->
+ ok.
diff --git a/src/couch_inets/inets.erl b/src/couch_inets/inets.erl
new file mode 100644
index 00000000..0203d8d5
--- /dev/null
+++ b/src/couch_inets/inets.erl
@@ -0,0 +1,34 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%%----------------------------------------------------------------------
+%% Purpose: The main interface module of the inets application
+%%----------------------------------------------------------------------
+
+-module(inets).
+
+-export([start/0, start/1, stop/0]).
+
+start() ->
+ application:start(inets).
+
+start(Type) ->
+ application:start(inets, Type).
+
+stop() ->
+ application:stop(inets).
+
diff --git a/src/couch_inets/inets_app.erl b/src/couch_inets/inets_app.erl
new file mode 100644
index 00000000..189c99f7
--- /dev/null
+++ b/src/couch_inets/inets_app.erl
@@ -0,0 +1,28 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(inets_app).
+
+-behaviour(application).
+
+-export([start/2, stop/1]).
+
+start(_Type, _State) ->
+ supervisor:start_link({local, inets_sup}, inets_sup, []).
+
+stop(_State) ->
+ ok.
diff --git a/src/couch_inets/inets_internal.hrl b/src/couch_inets/inets_internal.hrl
new file mode 100644
index 00000000..fd8d0caa
--- /dev/null
+++ b/src/couch_inets/inets_internal.hrl
@@ -0,0 +1,27 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+
+-define(CR, $\r).
+-define(LF, $\n).
+-define(CRLF, [$\r,$\n]).
+-define(SP, $\s).
+-define(TAB, $\t).
+-define(LEFT_PAREN, $().
+-define(RIGHT_PAREN, $)).
+-define(WHITE_SPACE, $ ).
+-define(DOUBLE_QUOTE, $").
diff --git a/src/couch_inets/inets_sup.erl b/src/couch_inets/inets_sup.erl
new file mode 100644
index 00000000..d0b533cb
--- /dev/null
+++ b/src/couch_inets/inets_sup.erl
@@ -0,0 +1,106 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%%----------------------------------------------------------------------
+%% Purpose: The top supervisor for the inets application
+%%----------------------------------------------------------------------
+
+-module(inets_sup).
+
+-behaviour(supervisor).
+
+-export([init/1]).
+
+%%%=========================================================================
+%%% Supervisor callback
+%%%=========================================================================
+init([]) ->
+ SupFlags = {one_for_one, 10, 3600},
+ Children = children(),
+ {ok, {SupFlags, Children}}.
+
+%%%=========================================================================
+%%% Internal functions
+%%%=========================================================================
+get_services() ->
+ case (catch application:get_env(inets, services)) of
+ {ok, Services} ->
+ Services;
+ _ ->
+ []
+ end.
+
+children() ->
+ Services = get_services(),
+ HttpdServices = [Service || Service <- Services, is_httpd(Service)],
+ HttpcServices = [Service || Service <- Services, is_httpc(Service)],
+ TftpdServices = [Service || Service <- Services, is_tftpd(Service)],
+ [ftp_child_spec(), httpc_child_spec(HttpcServices),
+ httpd_child_spec(HttpdServices), tftpd_child_spec(TftpdServices)].
+
+ftp_child_spec() ->
+ Name = ftp_sup,
+ StartFunc = {ftp_sup, start_link, []},
+ Restart = permanent,
+ Shutdown = infinity,
+ Modules = [ftp_sup],
+ Type = supervisor,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+httpc_child_spec(HttpcServices) ->
+ Name = httpc_sup,
+ StartFunc = {httpc_sup, start_link, [HttpcServices]},
+ Restart = permanent,
+ Shutdown = infinity,
+ Modules = [httpc_sup],
+ Type = supervisor,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+httpd_child_spec(HttpdServices) ->
+ Name = httpd_sup,
+ StartFunc = {httpd_sup, start_link, [HttpdServices]},
+ Restart = permanent,
+ Shutdown = infinity,
+ Modules = [httpd_sup],
+ Type = supervisor,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+tftpd_child_spec(TftpServices) ->
+ Name = tftp_sup,
+ StartFunc = {tftp_sup, start_link, [TftpServices]},
+ Restart = permanent,
+ Shutdown = infinity,
+ Modules = [tftp_sup],
+ Type = supervisor,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+is_httpd({httpd, _}) ->
+ true;
+is_httpd({httpd, _, _}) ->
+ true;
+is_httpd(_) ->
+ false.
+
+is_httpc({httpc, _}) ->
+ true;
+is_httpc(_) ->
+ false.
+
+is_tftpd({tftpd, _}) ->
+ true;
+is_tftpd(_) ->
+ false.
diff --git a/src/couch_inets/mod_actions.erl b/src/couch_inets/mod_actions.erl
new file mode 100644
index 00000000..3447196c
--- /dev/null
+++ b/src/couch_inets/mod_actions.erl
@@ -0,0 +1,92 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_actions).
+-export([do/1,load/2]).
+
+-include("httpd.hrl").
+
+%% do
+
+do(Info) ->
+ case httpd_util:key1search(Info#mod.data,status) of
+ %% A status code has been generated!
+ {_StatusCode, _PhraseArgs, _Reason} ->
+ {proceed,Info#mod.data};
+ %% No status code has been generated!
+ undefined ->
+ case httpd_util:key1search(Info#mod.data,response) of
+ %% No response has been generated!
+ undefined ->
+ Path = mod_alias:path(Info#mod.data,Info#mod.config_db,
+ Info#mod.request_uri),
+ Suffix = httpd_util:suffix(Path),
+ MimeType = httpd_util:lookup_mime(Info#mod.config_db,Suffix,
+ "text/plain"),
+ Actions = httpd_util:multi_lookup(Info#mod.config_db,action),
+ case action(Info#mod.request_uri,MimeType,Actions) of
+ {yes, RequestURI} ->
+ {proceed, [{new_request_uri, RequestURI} | Info#mod.data]};
+ no ->
+ Scripts = httpd_util:multi_lookup(Info#mod.config_db, script),
+ case script(Info#mod.request_uri, Info#mod.method, Scripts) of
+ {yes, RequestURI} ->
+ {proceed,[{new_request_uri, RequestURI} | Info#mod.data]};
+ no ->
+ {proceed, Info#mod.data}
+ end
+ end;
+ %% A response has been generated or sent!
+ _Response ->
+ {proceed, Info#mod.data}
+ end
+ end.
+
+action(_RequestURI, _MimeType, []) ->
+ no;
+action(RequestURI, MimeType, [{MimeType, CGIScript} | _Rest]) ->
+ {yes, CGIScript ++ RequestURI};
+action(RequestURI, MimeType, [_ | Rest]) ->
+ action(RequestURI, MimeType, Rest).
+
+script(_RequestURI, _Method, []) ->
+ no;
+script(RequestURI, Method, [{Method, CGIScript} | _Rest]) ->
+ {yes, CGIScript ++ RequestURI};
+script(RequestURI, Method, [_ | Rest]) ->
+ script(RequestURI, Method, Rest).
+
+%%
+%% Configuration
+%%
+
+%% load
+
+load("Action "++ Action, []) ->
+ case regexp:split(Action, " ") of
+ {ok,[MimeType, CGIScript]} ->
+ {ok,[],{action, {MimeType, CGIScript}}};
+ {ok,_} ->
+ {error,?NICE(httpd_conf:clean(Action)++" is an invalid Action")}
+ end;
+load("Script " ++ Script,[]) ->
+ case regexp:split(Script, " ") of
+ {ok,[Method, CGIScript]} ->
+ {ok,[],{script, {Method, CGIScript}}};
+ {ok,_} ->
+ {error,?NICE(httpd_conf:clean(Script)++" is an invalid Script")}
+ end.
diff --git a/src/couch_inets/mod_alias.erl b/src/couch_inets/mod_alias.erl
new file mode 100644
index 00000000..703e0d1e
--- /dev/null
+++ b/src/couch_inets/mod_alias.erl
@@ -0,0 +1,180 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_alias).
+
+-export([do/1,
+ real_name/3,
+ real_script_name/3,
+ default_index/2,
+ load/2,
+ path/3]).
+
+-include("httpd.hrl").
+
+-define(VMODULE,"ALIAS").
+
+%% do
+
+do(Info) ->
+ case httpd_util:key1search(Info#mod.data, status) of
+ %% A status code has been generated!
+ {_StatusCode, _PhraseArgs, _Reason} ->
+ {proceed,Info#mod.data};
+ %% No status code has been generated!
+ undefined ->
+ case httpd_util:key1search(Info#mod.data, response) of
+ %% No response has been generated!
+ undefined ->
+ do_alias(Info);
+ %% A response has been generated or sent!
+ _Response ->
+ {proceed, Info#mod.data}
+ end
+ end.
+
+do_alias(Info) ->
+ {ShortPath, Path, AfterPath} =
+ real_name(Info#mod.config_db,
+ Info#mod.request_uri,
+ httpd_util:multi_lookup(Info#mod.config_db,alias)),
+ %% Relocate if a trailing slash is missing else proceed!
+ LastChar = lists:last(ShortPath),
+ case file:read_file_info(ShortPath) of
+ {ok, FileInfo} when FileInfo#file_info.type == directory,
+ LastChar /= $/ ->
+ ServerName = httpd_util:lookup(Info#mod.config_db, server_name),
+ Port = port_string(httpd_util:lookup(Info#mod.config_db,port, 80)),
+ URL = "http://" ++ ServerName ++ Port ++
+ Info#mod.request_uri ++ "/",
+ ReasonPhrase = httpd_util:reason_phrase(301),
+ Message = httpd_util:message(301, URL, Info#mod.config_db),
+ {proceed,
+ [{response,
+ {301, ["Location: ", URL, "\r\n"
+ "Content-Type: text/html\r\n",
+ "\r\n",
+ "<HTML>\n<HEAD>\n<TITLE>",ReasonPhrase,
+ "</TITLE>\n</HEAD>\n"
+ "<BODY>\n<H1>",ReasonPhrase,
+ "</H1>\n", Message,
+ "\n</BODY>\n</HTML>\n"]}}|
+ [{real_name, {Path, AfterPath}} | Info#mod.data]]};
+ _NoFile ->
+ {proceed,[{real_name, {Path, AfterPath}} | Info#mod.data]}
+ end.
+
+port_string(80) ->
+ "";
+port_string(Port) ->
+ ":"++integer_to_list(Port).
+
+%% real_name
+
+real_name(ConfigDB, RequestURI, []) ->
+ DocumentRoot = httpd_util:lookup(ConfigDB, document_root, ""),
+ RealName = DocumentRoot ++ RequestURI,
+ {ShortPath, _AfterPath} = httpd_util:split_path(RealName),
+ {Path, AfterPath} = httpd_util:split_path(default_index(ConfigDB,
+ RealName)),
+ {ShortPath, Path, AfterPath};
+real_name(ConfigDB, RequestURI, [{FakeName,RealName}|Rest]) ->
+ case regexp:match(RequestURI, "^" ++ FakeName) of
+ {match, _, _} ->
+ {ok, ActualName, _} = regexp:sub(RequestURI,
+ "^" ++ FakeName, RealName),
+ {ShortPath, _AfterPath} = httpd_util:split_path(ActualName),
+ {Path, AfterPath} =
+ httpd_util:split_path(default_index(ConfigDB, ActualName)),
+ {ShortPath, Path, AfterPath};
+ nomatch ->
+ real_name(ConfigDB,RequestURI,Rest)
+ end.
+
+%% real_script_name
+
+real_script_name(_ConfigDB, _RequestURI, []) ->
+ not_a_script;
+real_script_name(ConfigDB, RequestURI, [{FakeName,RealName} | Rest]) ->
+ case regexp:match(RequestURI,"^"++FakeName) of
+ {match,_,_} ->
+ {ok,ActualName,_}=regexp:sub(RequestURI,"^"++FakeName,RealName),
+ httpd_util:split_script_path(default_index(ConfigDB,ActualName));
+ nomatch ->
+ real_script_name(ConfigDB,RequestURI,Rest)
+ end.
+
+%% default_index
+
+default_index(ConfigDB, Path) ->
+ case file:read_file_info(Path) of
+ {ok, FileInfo} when FileInfo#file_info.type == directory ->
+ DirectoryIndex = httpd_util:lookup(ConfigDB, directory_index, []),
+ append_index(Path, DirectoryIndex);
+ _ ->
+ Path
+ end.
+
+append_index(RealName, []) ->
+ RealName;
+append_index(RealName, [Index | Rest]) ->
+ case file:read_file_info(filename:join(RealName, Index)) of
+ {error, _Reason} ->
+ append_index(RealName, Rest);
+ _ ->
+ filename:join(RealName, Index)
+ end.
+
+%% path
+
+path(Data, ConfigDB, RequestURI) ->
+ case httpd_util:key1search(Data, real_name) of
+ undefined ->
+ DocumentRoot = httpd_util:lookup(ConfigDB, document_root, ""),
+ {Path, _AfterPath} =
+ httpd_util:split_path(DocumentRoot++RequestURI),
+ Path;
+ {Path, _AfterPath} ->
+ Path
+ end.
+
+%%
+%% Configuration
+%%
+
+%% load
+
+load("DirectoryIndex " ++ DirectoryIndex, []) ->
+ {ok, DirectoryIndexes} = regexp:split(DirectoryIndex," "),
+ {ok,[], {directory_index, DirectoryIndexes}};
+load("Alias " ++ Alias,[]) ->
+ case regexp:split(Alias," ") of
+ {ok, [FakeName, RealName]} ->
+ {ok,[],{alias,{FakeName,RealName}}};
+ {ok, _} ->
+ {error,?NICE(httpd_conf:clean(Alias)++" is an invalid Alias")}
+ end;
+load("ScriptAlias " ++ ScriptAlias, []) ->
+ case regexp:split(ScriptAlias, " ") of
+ {ok, [FakeName, RealName]} ->
+ %% Make sure the path always has a trailing slash..
+ RealName1 = filename:join(filename:split(RealName)),
+ {ok, [], {script_alias, {FakeName, RealName1++"/"}}};
+ {ok, _} ->
+ {error, ?NICE(httpd_conf:clean(ScriptAlias)++
+ " is an invalid ScriptAlias")}
+ end.
diff --git a/src/couch_inets/mod_auth.erl b/src/couch_inets/mod_auth.erl
new file mode 100644
index 00000000..e94699fe
--- /dev/null
+++ b/src/couch_inets/mod_auth.erl
@@ -0,0 +1,784 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_auth).
+
+%% The functions that the webbserver call on startup stop
+%% and when the server traverse the modules.
+-export([do/1, load/2, store/2, remove/1]).
+
+%% User entries to the gen-server.
+-export([add_user/2, add_user/5, add_user/6,
+ add_group_member/3, add_group_member/4, add_group_member/5,
+ list_users/1, list_users/2, list_users/3,
+ delete_user/2, delete_user/3, delete_user/4,
+ delete_group_member/3, delete_group_member/4, delete_group_member/5,
+ list_groups/1, list_groups/2, list_groups/3,
+ delete_group/2, delete_group/3, delete_group/4,
+ get_user/2, get_user/3, get_user/4,
+ list_group_members/2, list_group_members/3, list_group_members/4,
+ update_password/6, update_password/5]).
+
+-include("httpd.hrl").
+-include("mod_auth.hrl").
+
+%% We will not make the change to use base64 in stdlib in inets just yet.
+%% it will be included in the next major release of inets.
+-compile({nowarn_deprecated_function, {http_base_64, encode, 1}}).
+
+-define(VMODULE,"AUTH").
+
+-define(NOPASSWORD,"NoPassword").
+
+%% do
+do(Info) ->
+ case httpd_util:key1search(Info#mod.data,status) of
+ %% A status code has been generated!
+ {_StatusCode, _PhraseArgs, _Reason} ->
+ {proceed, Info#mod.data};
+ %% No status code has been generated!
+ undefined ->
+ case httpd_util:key1search(Info#mod.data,response) of
+ %% No response has been generated!
+ undefined ->
+ Path = mod_alias:path(Info#mod.data,Info#mod.config_db,
+ Info#mod.request_uri),
+ %% Is it a secret area?
+ case secretp(Path,Info#mod.config_db) of
+ {yes, Directory, DirectoryData} ->
+ %% Authenticate (allow)
+ case allow((Info#mod.init_data)#init_data.peername,
+ Info#mod.socket_type,Info#mod.socket,
+ DirectoryData) of
+ allowed ->
+ case deny((Info#mod.init_data)#init_data.peername,
+ Info#mod.socket_type,
+ Info#mod.socket,
+ DirectoryData) of
+ not_denied ->
+ case httpd_util:key1search(
+ DirectoryData,
+ auth_type) of
+ undefined ->
+ {proceed, Info#mod.data};
+ none ->
+ {proceed, Info#mod.data};
+ AuthType ->
+ do_auth(Info,
+ Directory,
+ DirectoryData,
+ AuthType)
+ end;
+ {denied, Reason} ->
+ {proceed,
+ [{status, {403,
+ Info#mod.request_uri,
+ Reason}}|
+ Info#mod.data]}
+ end;
+ {not_allowed, Reason} ->
+ {proceed,[{status,{403,
+ Info#mod.request_uri,
+ Reason}} |
+ Info#mod.data]}
+ end;
+ no ->
+ {proceed, Info#mod.data}
+ end;
+ %% A response has been generated or sent!
+ _Response ->
+ {proceed, Info#mod.data}
+ end
+ end.
+
+
+do_auth(Info, Directory, DirectoryData, _AuthType) ->
+ %% Authenticate (require)
+ case require(Info, Directory, DirectoryData) of
+ authorized ->
+ {proceed,Info#mod.data};
+ {authorized, User} ->
+ {proceed, [{remote_user,User}|Info#mod.data]};
+ {authorization_required, Realm} ->
+ ReasonPhrase = httpd_util:reason_phrase(401),
+ Message = httpd_util:message(401,none,Info#mod.config_db),
+ {proceed,
+ [{response,
+ {401,
+ ["WWW-Authenticate: Basic realm=\"",Realm,
+ "\"\r\n\r\n","<HTML>\n<HEAD>\n<TITLE>",
+ ReasonPhrase,"</TITLE>\n",
+ "</HEAD>\n<BODY>\n<H1>",ReasonPhrase,
+ "</H1>\n",Message,"\n</BODY>\n</HTML>\n"]}}|
+ Info#mod.data]};
+ {status, {StatusCode,PhraseArgs,Reason}} ->
+ {proceed, [{status,{StatusCode,PhraseArgs,Reason}}|
+ Info#mod.data]}
+ end.
+
+%% require
+
+require(Info, Directory, DirectoryData) ->
+ ParsedHeader = Info#mod.parsed_header,
+ ValidUsers = httpd_util:key1search(DirectoryData, require_user),
+ ValidGroups = httpd_util:key1search(DirectoryData, require_group),
+
+ %% Any user or group restrictions?
+ case ValidGroups of
+ undefined when ValidUsers == undefined ->
+ authorized;
+ _ ->
+ case httpd_util:key1search(ParsedHeader, "authorization") of
+ undefined ->
+ authorization_required(DirectoryData);
+ %% Check credentials!
+ "Basic" ++ EncodedString = Credentials ->
+ case (catch http_base_64:decode(EncodedString)) of
+ {'EXIT',{function_clause, _}} ->
+ {status, {401, none, ?NICE("Bad credentials "++
+ Credentials)}};
+ DecodedString ->
+ validate_user(Info, Directory, DirectoryData,
+ ValidUsers, ValidGroups,
+ DecodedString)
+ end;
+ %% Bad credentials!
+ BadCredentials ->
+ {status, {401, none, ?NICE("Bad credentials "++
+ BadCredentials)}}
+ end
+ end.
+
+authorization_required(DirectoryData) ->
+ case httpd_util:key1search(DirectoryData, auth_name) of
+ undefined ->
+ {status,{500, none,?NICE("AuthName directive not specified")}};
+ Realm ->
+ {authorization_required, Realm}
+ end.
+
+
+validate_user(Info, Directory, DirectoryData, ValidUsers,
+ ValidGroups, DecodedString) ->
+ case a_valid_user(Info, DecodedString,
+ ValidUsers, ValidGroups,
+ Directory, DirectoryData) of
+ {yes, User} ->
+ {authorized, User};
+ {no, _Reason} ->
+ authorization_required(DirectoryData);
+ {status, {StatusCode,PhraseArgs,Reason}} ->
+ {status,{StatusCode,PhraseArgs,Reason}}
+ end.
+
+a_valid_user(Info,DecodedString,ValidUsers,ValidGroups,Dir,DirData) ->
+ case httpd_util:split(DecodedString,":",2) of
+ {ok,[SupposedUser, Password]} ->
+ case user_accepted(SupposedUser, ValidUsers) of
+ true ->
+ check_password(SupposedUser, Password, Dir, DirData);
+ false ->
+ case group_accepted(Info,SupposedUser,
+ ValidGroups,Dir,DirData) of
+ true ->
+ check_password(SupposedUser,Password,Dir,DirData);
+ false ->
+ {no,?NICE("No such user exists")}
+ end
+ end;
+ {ok,BadCredentials} ->
+ {status,{401,none,?NICE("Bad credentials "++BadCredentials)}}
+ end.
+
+user_accepted(_SupposedUser, undefined) ->
+ false;
+user_accepted(SupposedUser, ValidUsers) ->
+ lists:member(SupposedUser, ValidUsers).
+
+
+group_accepted(_Info, _User, undefined, _Dir, _DirData) ->
+ false;
+group_accepted(_Info, _User, [], _Dir, _DirData) ->
+ false;
+group_accepted(Info, User, [Group|Rest], Dir, DirData) ->
+ Ret = int_list_group_members(Group, Dir, DirData),
+ case Ret of
+ {ok, UserList} ->
+ case lists:member(User, UserList) of
+ true ->
+ true;
+ false ->
+ group_accepted(Info, User, Rest, Dir, DirData)
+ end;
+ _ ->
+ false
+ end.
+
+check_password(User, Password, _Dir, DirData) ->
+ case int_get_user(DirData, User) of
+ {ok, UStruct} ->
+ case UStruct#httpd_user.password of
+ Password ->
+ %% FIXME
+ {yes, UStruct#httpd_user.username};
+ _ ->
+ {no, "No such user"} % Don't say 'Bad Password' !!!
+ end;
+ _ ->
+ {no, "No such user"}
+ end.
+
+
+%% Middle API. Theese functions call the appropriate authentication module.
+int_get_user(DirData, User) ->
+ AuthMod = auth_mod_name(DirData),
+ apply(AuthMod, get_user, [DirData, User]).
+
+int_list_group_members(Group, _Dir, DirData) ->
+ AuthMod = auth_mod_name(DirData),
+ apply(AuthMod, list_group_members, [DirData, Group]).
+
+auth_mod_name(DirData) ->
+ case httpd_util:key1search(DirData, auth_type, plain) of
+ plain -> mod_auth_plain;
+ mnesia -> mod_auth_mnesia;
+ dets -> mod_auth_dets
+ end.
+
+
+%%
+%% Is it a secret area?
+%%
+
+%% secretp
+
+secretp(Path,ConfigDB) ->
+ Directories = ets:match(ConfigDB,{directory,'$1','_'}),
+ case secret_path(Path, Directories) of
+ {yes,Directory} ->
+ {yes,Directory,
+ lists:flatten(ets:match(ConfigDB,{directory,Directory,'$1'}))};
+ no ->
+ no
+ end.
+
+secret_path(Path, Directories) ->
+ secret_path(Path, httpd_util:uniq(lists:sort(Directories)),to_be_found).
+
+secret_path(_Path, [], to_be_found) ->
+ no;
+secret_path(_Path, [], Directory) ->
+ {yes, Directory};
+secret_path(Path, [[NewDirectory] | Rest], Directory) ->
+ case regexp:match(Path, NewDirectory) of
+ {match, _, _} when Directory == to_be_found ->
+ secret_path(Path, Rest, NewDirectory);
+ {match, _, Length} when Length > length(Directory)->
+ secret_path(Path, Rest,NewDirectory);
+ {match, _, _Length} ->
+ secret_path(Path, Rest, Directory);
+ nomatch ->
+ secret_path(Path, Rest, Directory)
+ end.
+
+%%
+%% Authenticate
+%%
+
+%% allow
+
+allow({_,RemoteAddr}, _SocketType, _Socket, DirectoryData) ->
+ Hosts = httpd_util:key1search(DirectoryData, allow_from, all),
+ case validate_addr(RemoteAddr, Hosts) of
+ true ->
+ allowed;
+ false ->
+ {not_allowed, ?NICE("Connection from your host is not allowed")}
+ end.
+
+validate_addr(_RemoteAddr, all) -> % When called from 'allow'
+ true;
+validate_addr(_RemoteAddr, none) -> % When called from 'deny'
+ false;
+validate_addr(_RemoteAddr, []) ->
+ false;
+validate_addr(RemoteAddr, [HostRegExp | Rest]) ->
+ case regexp:match(RemoteAddr, HostRegExp) of
+ {match,_,_} ->
+ true;
+ nomatch ->
+ validate_addr(RemoteAddr,Rest)
+ end.
+
+%% deny
+
+deny({_,RemoteAddr}, _SocketType, _Socket,DirectoryData) ->
+ Hosts = httpd_util:key1search(DirectoryData, deny_from, none),
+ case validate_addr(RemoteAddr,Hosts) of
+ true ->
+ {denied, ?NICE("Connection from your host is not allowed")};
+ false ->
+ not_denied
+ end.
+
+%%
+%% Configuration
+%%
+
+%% load/2
+%%
+
+%% mod_auth recognizes the following Configuration Directives:
+%% <Directory /path/to/directory>
+%% AuthDBType
+%% AuthName
+%% AuthUserFile
+%% AuthGroupFile
+%% AuthAccessPassword
+%% require
+%% allow
+%% </Directory>
+
+%% When a <Directory> directive is found, a new context is set to
+%% [{directory, Directory, DirData}|OtherContext]
+%% DirData in this case is a key-value list of data belonging to the
+%% directory in question.
+%%
+%% When the </Directory> statement is found, the Context created earlier
+%% will be returned as a ConfigList and the context will return to the
+%% state it was previously.
+
+load("<Directory " ++ Directory,[]) ->
+ Dir = httpd_conf:custom_clean(Directory,"",">"),
+ {ok,[{directory, Dir, [{path, Dir}]}]};
+load(eof,[{directory, Directory, _DirData}|_]) ->
+ {error, ?NICE("Premature end-of-file in "++ Directory)};
+
+load("AuthName " ++ AuthName, [{directory,Directory, DirData}|Rest]) ->
+ {ok, [{directory,Directory,
+ [ {auth_name, httpd_conf:clean(AuthName)}|DirData]} | Rest ]};
+
+load("AuthUserFile " ++ AuthUserFile0,
+ [{directory, Directory, DirData}|Rest]) ->
+ AuthUserFile = httpd_conf:clean(AuthUserFile0),
+ {ok,[{directory,Directory,
+ [ {auth_user_file, AuthUserFile}|DirData]} | Rest ]};
+
+load([$A,$u,$t,$h,$G,$r,$o,$u,$p,$F,$i,$l,$e,$ |AuthGroupFile0],
+ [{directory,Directory, DirData}|Rest]) ->
+ AuthGroupFile = httpd_conf:clean(AuthGroupFile0),
+ {ok,[{directory,Directory,
+ [ {auth_group_file, AuthGroupFile}|DirData]} | Rest]};
+
+%AuthAccessPassword
+load("AuthAccessPassword " ++ AuthAccessPassword0,
+ [{directory,Directory, DirData}|Rest]) ->
+ AuthAccessPassword = httpd_conf:clean(AuthAccessPassword0),
+ {ok,[{directory,Directory,
+ [{auth_access_password, AuthAccessPassword}|DirData]} | Rest]};
+
+
+
+
+load("AuthDBType " ++ Type,
+ [{directory, Dir, DirData}|Rest]) ->
+ case httpd_conf:clean(Type) of
+ "plain" ->
+ {ok, [{directory, Dir, [{auth_type, plain}|DirData]} | Rest ]};
+ "mnesia" ->
+ {ok, [{directory, Dir, [{auth_type, mnesia}|DirData]} | Rest ]};
+ "dets" ->
+ {ok, [{directory, Dir, [{auth_type, dets}|DirData]} | Rest ]};
+ _ ->
+ {error, ?NICE(httpd_conf:clean(Type)++" is an invalid AuthDBType")}
+ end;
+
+load("require " ++ Require,[{directory,Directory, DirData}|Rest]) ->
+ case regexp:split(Require," ") of
+ {ok,["user"|Users]} ->
+ {ok,[{directory,Directory,
+ [{require_user,Users}|DirData]} | Rest]};
+ {ok,["group"|Groups]} ->
+ {ok,[{directory,Directory,
+ [{require_group,Groups}|DirData]} | Rest]};
+ {ok,_} ->
+ {error,?NICE(httpd_conf:clean(Require) ++" is an invalid require")}
+ end;
+
+load("allow " ++ Allow,[{directory,Directory, DirData}|Rest]) ->
+ case regexp:split(Allow," ") of
+ {ok,["from","all"]} ->
+ {ok,[{directory,Directory,
+ [{allow_from,all}|DirData]} | Rest]};
+ {ok,["from"|Hosts]} ->
+ {ok,[{directory,Directory,
+ [{allow_from,Hosts}|DirData]} | Rest]};
+ {ok,_} ->
+ {error,?NICE(httpd_conf:clean(Allow) ++" is an invalid allow")}
+ end;
+
+load("deny " ++ Deny,[{directory,Directory, DirData}|Rest]) ->
+ case regexp:split(Deny," ") of
+ {ok, ["from", "all"]} ->
+ {ok,[{directory, Directory,
+ [{deny_from, all}|DirData]} | Rest]};
+ {ok, ["from"|Hosts]} ->
+ {ok,[{directory, Directory,
+ [{deny_from, Hosts}|DirData]} | Rest]};
+ {ok, _} ->
+ {error,?NICE(httpd_conf:clean(Deny) ++" is an invalid deny")}
+ end;
+
+load("</Directory>",[{directory,Directory, DirData}|Rest]) ->
+ directory_config_check(Directory, DirData),
+ {ok, Rest, {directory, Directory, DirData}};
+
+load("AuthMnesiaDB " ++ AuthMnesiaDB,
+ [{directory, Dir, DirData}|Rest]) ->
+ case httpd_conf:clean(AuthMnesiaDB) of
+ "On" ->
+ {ok,[{directory,Dir,[{auth_type,mnesia}|DirData]}|Rest]};
+ "Off" ->
+ {ok,[{directory,Dir,[{auth_type,plain}|DirData]}|Rest]};
+ _ ->
+ {error, ?NICE(httpd_conf:clean(AuthMnesiaDB) ++
+ " is an invalid AuthMnesiaDB")}
+ end.
+
+directory_config_check(Directory, DirData) ->
+ case httpd_util:key1search(DirData,auth_type) of
+ plain ->
+ check_filename_present(Directory,auth_user_file,DirData),
+ check_filename_present(Directory,auth_group_file,DirData);
+ undefined ->
+ throw({error,
+ ?NICE("Server configuration missed AuthDBType directive")});
+ _ ->
+ ok
+ end.
+check_filename_present(_Dir,AuthFile,DirData) ->
+ case httpd_util:key1search(DirData,AuthFile) of
+ Name when list(Name) ->
+ ok;
+ _ ->
+ throw({error,?NICE("Server configuration missed "++
+ directive(AuthFile)++" directive")})
+ end.
+
+directive(auth_user_file) ->
+ "AuthUserFile";
+directive(auth_group_file) ->
+ "AuthGroupFile".
+
+%% store
+
+store({directory,Directory0, DirData0}, ConfigList) ->
+ Port = httpd_util:key1search(ConfigList, port),
+ DirData = case httpd_util:key1search(ConfigList, bind_address) of
+ undefined ->
+ [{port, Port}|DirData0];
+ Addr ->
+ [{port, Port},{bind_address,Addr}|DirData0]
+ end,
+ Directory =
+ case filename:pathtype(Directory0) of
+ relative ->
+ SR = httpd_util:key1search(ConfigList, server_root),
+ filename:join(SR, Directory0);
+ _ ->
+ Directory0
+ end,
+ AuthMod =
+ case httpd_util:key1search(DirData0, auth_type) of
+ mnesia -> mod_auth_mnesia;
+ dets -> mod_auth_dets;
+ plain -> mod_auth_plain;
+ _ -> no_module_at_all
+ end,
+ case AuthMod of
+ no_module_at_all ->
+ {ok, {directory, Directory, DirData}};
+ _ ->
+ %% Control that there are a password or add a standard password:
+ %% "NoPassword"
+ %% In this way a user must select to use a noPassword
+ Pwd = case httpd_util:key1search(DirData,auth_access_password)of
+ undefined->
+ ?NOPASSWORD;
+ PassW->
+ PassW
+ end,
+ DirDataLast = lists:keydelete(auth_access_password,1,DirData),
+ case catch AuthMod:store_directory_data(Directory, DirDataLast) of
+ ok ->
+ add_auth_password(Directory,Pwd,ConfigList),
+ {ok, {directory, Directory, DirDataLast}};
+ {ok, NewDirData} ->
+ add_auth_password(Directory,Pwd,ConfigList),
+ {ok, {directory, Directory, NewDirData}};
+ {error, Reason} ->
+ {error, Reason};
+ Other ->
+ {error, Other}
+ end
+ end.
+
+
+add_auth_password(Dir, Pwd0, ConfigList) ->
+ Addr = httpd_util:key1search(ConfigList, bind_address),
+ Port = httpd_util:key1search(ConfigList, port),
+ mod_auth_server:start(Addr, Port),
+ mod_auth_server:add_password(Addr, Port, Dir, Pwd0).
+
+%% remove
+
+
+remove(ConfigDB) ->
+ lists:foreach(fun({directory, _Dir, DirData}) ->
+ AuthMod = auth_mod_name(DirData),
+ (catch apply(AuthMod, remove, [DirData]))
+ end,
+ ets:match_object(ConfigDB,{directory,'_','_'})),
+ Addr = case lookup(ConfigDB, bind_address) of
+ [] ->
+ undefined;
+ [{bind_address, Address}] ->
+ Address
+ end,
+ [{port, Port}] = lookup(ConfigDB, port),
+ mod_auth_server:stop(Addr, Port),
+ ok.
+
+
+
+
+%% --------------------------------------------------------------------
+
+%% update_password
+
+update_password(Port, Dir, Old, New, New)->
+ update_password(undefined, Port, Dir, Old, New, New).
+
+update_password(Addr, Port, Dir, Old, New, New) when list(New) ->
+ mod_auth_server:update_password(Addr, Port, Dir, Old, New);
+
+update_password(_Addr, _Port, _Dir, _Old, _New, _New) ->
+ {error, badtype};
+update_password(_Addr, _Port, _Dir, _Old, _New, _New1) ->
+ {error, notqeual}.
+
+
+%% add_user
+
+add_user(UserName, Opt) ->
+ case get_options(Opt, mandatory) of
+ {Addr, Port, Dir, AuthPwd}->
+ case get_options(Opt, userData) of
+ {error, Reason}->
+ {error, Reason};
+ {UserData, Password}->
+ User = [#httpd_user{username = UserName,
+ password = Password,
+ user_data = UserData}],
+ mod_auth_server:add_user(Addr, Port, Dir, User, AuthPwd)
+ end
+ end.
+
+
+add_user(UserName, Password, UserData, Port, Dir) ->
+ add_user(UserName, Password, UserData, undefined, Port, Dir).
+add_user(UserName, Password, UserData, Addr, Port, Dir) ->
+ User = [#httpd_user{username = UserName,
+ password = Password,
+ user_data = UserData}],
+ mod_auth_server:add_user(Addr, Port, Dir, User, ?NOPASSWORD).
+
+
+%% get_user
+
+get_user(UserName, Opt) ->
+ case get_options(Opt, mandatory) of
+ {Addr, Port, Dir, AuthPwd} ->
+ mod_auth_server:get_user(Addr, Port, Dir, UserName, AuthPwd);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+get_user(UserName, Port, Dir) ->
+ get_user(UserName, undefined, Port, Dir).
+get_user(UserName, Addr, Port, Dir) ->
+ mod_auth_server:get_user(Addr, Port, Dir, UserName, ?NOPASSWORD).
+
+
+%% add_group_member
+
+add_group_member(GroupName, UserName, Opt)->
+ case get_options(Opt, mandatory) of
+ {Addr, Port, Dir, AuthPwd}->
+ mod_auth_server:add_group_member(Addr, Port, Dir,
+ GroupName, UserName, AuthPwd);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+add_group_member(GroupName, UserName, Port, Dir) ->
+ add_group_member(GroupName, UserName, undefined, Port, Dir).
+
+add_group_member(GroupName, UserName, Addr, Port, Dir) ->
+ mod_auth_server:add_group_member(Addr, Port, Dir,
+ GroupName, UserName, ?NOPASSWORD).
+
+
+%% delete_group_member
+
+delete_group_member(GroupName, UserName, Opt) ->
+ case get_options(Opt, mandatory) of
+ {Addr, Port, Dir, AuthPwd} ->
+ mod_auth_server:delete_group_member(Addr, Port, Dir,
+ GroupName, UserName, AuthPwd);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+delete_group_member(GroupName, UserName, Port, Dir) ->
+ delete_group_member(GroupName, UserName, undefined, Port, Dir).
+delete_group_member(GroupName, UserName, Addr, Port, Dir) ->
+ mod_auth_server:delete_group_member(Addr, Port, Dir,
+ GroupName, UserName, ?NOPASSWORD).
+
+
+%% list_users
+
+list_users(Opt) ->
+ case get_options(Opt, mandatory) of
+ {Addr, Port, Dir, AuthPwd} ->
+ mod_auth_server:list_users(Addr, Port, Dir, AuthPwd);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+list_users(Port, Dir) ->
+ list_users(undefined, Port, Dir).
+list_users(Addr, Port, Dir) ->
+ mod_auth_server:list_users(Addr, Port, Dir, ?NOPASSWORD).
+
+
+%% delete_user
+
+delete_user(UserName, Opt) ->
+ case get_options(Opt, mandatory) of
+ {Addr, Port, Dir, AuthPwd} ->
+ mod_auth_server:delete_user(Addr, Port, Dir, UserName, AuthPwd);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+delete_user(UserName, Port, Dir) ->
+ delete_user(UserName, undefined, Port, Dir).
+delete_user(UserName, Addr, Port, Dir) ->
+ mod_auth_server:delete_user(Addr, Port, Dir, UserName, ?NOPASSWORD).
+
+
+%% delete_group
+
+delete_group(GroupName, Opt) ->
+ case get_options(Opt, mandatory) of
+ {Addr, Port, Dir, AuthPwd}->
+ mod_auth_server:delete_group(Addr, Port, Dir, GroupName, AuthPwd);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+delete_group(GroupName, Port, Dir) ->
+ delete_group(GroupName, undefined, Port, Dir).
+delete_group(GroupName, Addr, Port, Dir) ->
+ mod_auth_server:delete_group(Addr, Port, Dir, GroupName, ?NOPASSWORD).
+
+
+%% list_groups
+
+list_groups(Opt) ->
+ case get_options(Opt, mandatory) of
+ {Addr, Port, Dir, AuthPwd}->
+ mod_auth_server:list_groups(Addr, Port, Dir, AuthPwd);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+list_groups(Port, Dir) ->
+ list_groups(undefined, Port, Dir).
+list_groups(Addr, Port, Dir) ->
+ mod_auth_server:list_groups(Addr, Port, Dir, ?NOPASSWORD).
+
+
+%% list_group_members
+
+list_group_members(GroupName,Opt) ->
+ case get_options(Opt, mandatory) of
+ {Addr, Port, Dir, AuthPwd} ->
+ mod_auth_server:list_group_members(Addr, Port, Dir, GroupName,
+ AuthPwd);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+list_group_members(GroupName, Port, Dir) ->
+ list_group_members(GroupName, undefined, Port, Dir).
+list_group_members(GroupName, Addr, Port, Dir) ->
+ mod_auth_server:list_group_members(Addr, Port, Dir,
+ GroupName, ?NOPASSWORD).
+
+%% Opt = [{port, Port},
+%% {addr, Addr},
+%% {dir, Dir},
+%% {authPassword, AuthPassword} | FunctionSpecificData]
+get_options(Opt, mandatory)->
+ case httpd_util:key1search(Opt, port, undefined) of
+ Port when integer(Port) ->
+ case httpd_util:key1search(Opt, dir, undefined) of
+ Dir when list(Dir) ->
+ Addr = httpd_util:key1search(Opt,
+ addr,
+ undefined),
+ AuthPwd = httpd_util:key1search(Opt,
+ authPassword,
+ ?NOPASSWORD),
+ {Addr, Port, Dir, AuthPwd};
+ _->
+ {error, bad_dir}
+ end;
+ _ ->
+ {error, bad_dir}
+ end;
+
+%% FunctionSpecificData = {userData, UserData} | {password, Password}
+get_options(Opt, userData)->
+ case httpd_util:key1search(Opt, userData, undefined) of
+ undefined ->
+ {error, no_userdata};
+ UserData ->
+ case httpd_util:key1search(Opt, password, undefined) of
+ undefined->
+ {error, no_password};
+ Pwd ->
+ {UserData, Pwd}
+ end
+ end.
+
+
+lookup(Db, Key) ->
+ ets:lookup(Db, Key).
diff --git a/src/couch_inets/mod_auth.hrl b/src/couch_inets/mod_auth.hrl
new file mode 100644
index 00000000..d49c97bc
--- /dev/null
+++ b/src/couch_inets/mod_auth.hrl
@@ -0,0 +1,27 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+
+-record(httpd_user,
+ {username,
+ password,
+ user_data}).
+
+-record(httpd_group,
+ {name,
+ userlist}).
+
diff --git a/src/couch_inets/mod_auth_dets.erl b/src/couch_inets/mod_auth_dets.erl
new file mode 100644
index 00000000..f63457dc
--- /dev/null
+++ b/src/couch_inets/mod_auth_dets.erl
@@ -0,0 +1,228 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_auth_dets).
+
+%% dets authentication storage
+
+-export([get_user/2,
+ list_group_members/2,
+ add_user/2,
+ add_group_member/3,
+ list_users/1,
+ delete_user/2,
+ list_groups/1,
+ delete_group_member/3,
+ delete_group/2,
+ remove/1]).
+
+-export([store_directory_data/2]).
+
+-include("httpd.hrl").
+-include("mod_auth.hrl").
+
+store_directory_data(_Directory, DirData) ->
+ ?CDEBUG("store_directory_data -> ~n"
+ " Directory: ~p~n"
+ " DirData: ~p",
+ [_Directory, DirData]),
+
+ PWFile = httpd_util:key1search(DirData, auth_user_file),
+ GroupFile = httpd_util:key1search(DirData, auth_group_file),
+ Addr = httpd_util:key1search(DirData, bind_address),
+ Port = httpd_util:key1search(DirData, port),
+
+ PWName = httpd_util:make_name("httpd_dets_pwdb",Addr,Port),
+ case dets:open_file(PWName,[{type,set},{file,PWFile},{repair,true}]) of
+ {ok, PWDB} ->
+ GDBName = httpd_util:make_name("httpd_dets_groupdb",Addr,Port),
+ case dets:open_file(GDBName,[{type,set},{file,GroupFile},{repair,true}]) of
+ {ok, GDB} ->
+ NDD1 = lists:keyreplace(auth_user_file, 1, DirData,
+ {auth_user_file, PWDB}),
+ NDD2 = lists:keyreplace(auth_group_file, 1, NDD1,
+ {auth_group_file, GDB}),
+ {ok, NDD2};
+ {error, Err}->
+ {error, {{file, GroupFile},Err}}
+ end;
+ {error, Err2} ->
+ {error, {{file, PWFile},Err2}}
+ end.
+
+%%
+%% Storage format of users in the dets table:
+%% {{UserName, Addr, Port, Dir}, Password, UserData}
+%%
+
+add_user(DirData, UStruct) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ PWDB = httpd_util:key1search(DirData, auth_user_file),
+ Record = {{UStruct#httpd_user.username, Addr, Port, Dir},
+ UStruct#httpd_user.password, UStruct#httpd_user.user_data},
+ case dets:lookup(PWDB, UStruct#httpd_user.username) of
+ [Record] ->
+ {error, user_already_in_db};
+ _ ->
+ dets:insert(PWDB, Record),
+ true
+ end.
+
+get_user(DirData, UserName) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ PWDB = httpd_util:key1search(DirData, auth_user_file),
+ User = {UserName, Addr, Port, Dir},
+ case dets:lookup(PWDB, User) of
+ [{User, Password, UserData}] ->
+ {ok, #httpd_user{username=UserName, password=Password, user_data=UserData}};
+ _ ->
+ {error, no_such_user}
+ end.
+
+list_users(DirData) ->
+ ?DEBUG("list_users -> ~n"
+ " DirData: ~p", [DirData]),
+ {Addr, Port, Dir} = lookup_common(DirData),
+ PWDB = httpd_util:key1search(DirData, auth_user_file),
+ case dets:traverse(PWDB, fun(X) -> {continue, X} end) of %% SOOOO Ugly !
+ Records when list(Records) ->
+ ?DEBUG("list_users -> ~n"
+ " Records: ~p", [Records]),
+ {ok, [UserName || {{UserName, AnyAddr, AnyPort, AnyDir},
+ _Password, _Data} <- Records,
+ AnyAddr == Addr, AnyPort == Port,
+ AnyDir == Dir]};
+ _O ->
+ ?DEBUG("list_users -> ~n"
+ " O: ~p", [_O]),
+ {ok, []}
+ end.
+
+delete_user(DirData, UserName) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ PWDB = httpd_util:key1search(DirData, auth_user_file),
+ User = {UserName, Addr, Port, Dir},
+ case dets:lookup(PWDB, User) of
+ [{User, _SomePassword, _UserData}] ->
+ dets:delete(PWDB, User),
+ {ok, Groups} = list_groups(DirData),
+ lists:foreach(fun(Group) ->
+ delete_group_member(DirData,
+ Group, UserName) end,
+ Groups),
+ true;
+ _ ->
+ {error, no_such_user}
+ end.
+
+%%
+%% Storage of groups in the dets table:
+%% {Group, UserList} where UserList is a list of strings.
+%%
+add_group_member(DirData, GroupName, UserName) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ GDB = httpd_util:key1search(DirData, auth_group_file),
+ Group = {GroupName, Addr, Port, Dir},
+ case dets:lookup(GDB, Group) of
+ [{Group, Users}] ->
+ case lists:member(UserName, Users) of
+ true ->
+ true;
+ false ->
+ dets:insert(GDB, {Group, [UserName|Users]}),
+ true
+ end;
+ [] ->
+ dets:insert(GDB, {Group, [UserName]}),
+ true;
+ Other ->
+ {error, Other}
+ end.
+
+list_group_members(DirData, GroupName) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ GDB = httpd_util:key1search(DirData, auth_group_file),
+ Group = {GroupName, Addr, Port, Dir},
+ case dets:lookup(GDB, Group) of
+ [{Group, Users}] ->
+ {ok, Users};
+ _ ->
+ {error, no_such_group}
+ end.
+
+list_groups(DirData) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ GDB = httpd_util:key1search(DirData, auth_group_file),
+ case dets:match(GDB, {'$1', '_'}) of
+ [] ->
+ {ok, []};
+ List when list(List) ->
+ Groups = lists:flatten(List),
+ {ok, [GroupName ||
+ {GroupName, AnyAddr, AnyPort, AnyDir} <- Groups,
+ AnyAddr == Addr, AnyPort == Port, AnyDir == Dir]};
+ _ ->
+ {ok, []}
+ end.
+
+delete_group_member(DirData, GroupName, UserName) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ GDB = httpd_util:key1search(DirData, auth_group_file),
+ Group = {GroupName, Addr, Port, Dir},
+ case dets:lookup(GDB, GroupName) of
+ [{Group, Users}] ->
+ case lists:member(UserName, Users) of
+ true ->
+ dets:delete(GDB, Group),
+ dets:insert(GDB, {Group,
+ lists:delete(UserName, Users)}),
+ true;
+ false ->
+ {error, no_such_group_member}
+ end;
+ _ ->
+ {error, no_such_group}
+ end.
+
+delete_group(DirData, GroupName) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ GDB = httpd_util:key1search(DirData, auth_group_file),
+ Group = {GroupName, Addr, Port, Dir},
+ case dets:lookup(GDB, Group) of
+ [{Group, _Users}] ->
+ dets:delete(GDB, Group),
+ true;
+ _ ->
+ {error, no_such_group}
+ end.
+
+lookup_common(DirData) ->
+ Dir = httpd_util:key1search(DirData, path),
+ Port = httpd_util:key1search(DirData, port),
+ Addr = httpd_util:key1search(DirData, bind_address),
+ {Addr, Port, Dir}.
+
+%% remove/1
+%%
+%% Closes dets tables used by this auth mod.
+%%
+remove(DirData) ->
+ PWDB = httpd_util:key1search(DirData, auth_user_file),
+ GDB = httpd_util:key1search(DirData, auth_group_file),
+ dets:close(GDB),
+ dets:close(PWDB),
+ ok.
diff --git a/src/couch_inets/mod_auth_mnesia.erl b/src/couch_inets/mod_auth_mnesia.erl
new file mode 100644
index 00000000..6c122565
--- /dev/null
+++ b/src/couch_inets/mod_auth_mnesia.erl
@@ -0,0 +1,282 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_auth_mnesia).
+-export([get_user/2,
+ list_group_members/2,
+ add_user/2,
+ add_group_member/3,
+ list_users/1,
+ delete_user/2,
+ list_groups/1,
+ delete_group_member/3,
+ delete_group/2]).
+
+-export([store_user/5, store_user/6,
+ store_group_member/5, store_group_member/6,
+ list_group_members/3, list_group_members/4,
+ list_groups/2, list_groups/3,
+ list_users/2, list_users/3,
+ remove_user/4, remove_user/5,
+ remove_group_member/5, remove_group_member/6,
+ remove_group/4, remove_group/5]).
+
+-export([store_directory_data/2]).
+
+-include("httpd.hrl").
+-include("mod_auth.hrl").
+
+
+
+store_directory_data(_Directory, _DirData) ->
+ %% We don't need to do anything here, we could of course check that
+ %% the appropriate mnesia tables has been created prior to
+ %% starting the http server.
+ ok.
+
+
+%%
+%% API
+%%
+
+%% Compability API
+
+store_user(UserName, Password, Port, Dir, _AccessPassword) ->
+ %% AccessPassword is ignored - was not used in previous version
+ DirData = [{path,Dir},{port,Port}],
+ UStruct = #httpd_user{username = UserName,
+ password = Password},
+ add_user(DirData, UStruct).
+
+store_user(UserName, Password, Addr, Port, Dir, _AccessPassword) ->
+ %% AccessPassword is ignored - was not used in previous version
+ DirData = [{path,Dir},{bind_address,Addr},{port,Port}],
+ UStruct = #httpd_user{username = UserName,
+ password = Password},
+ add_user(DirData, UStruct).
+
+store_group_member(GroupName, UserName, Port, Dir, _AccessPassword) ->
+ DirData = [{path,Dir},{port,Port}],
+ add_group_member(DirData, GroupName, UserName).
+
+store_group_member(GroupName, UserName, Addr, Port, Dir, _AccessPassword) ->
+ DirData = [{path,Dir},{bind_address,Addr},{port,Port}],
+ add_group_member(DirData, GroupName, UserName).
+
+list_group_members(GroupName, Port, Dir) ->
+ DirData = [{path,Dir},{port,Port}],
+ list_group_members(DirData, GroupName).
+
+list_group_members(GroupName, Addr, Port, Dir) ->
+ DirData = [{path,Dir},{bind_address,Addr},{port,Port}],
+ list_group_members(DirData, GroupName).
+
+list_groups(Port, Dir) ->
+ DirData = [{path,Dir},{port,Port}],
+ list_groups(DirData).
+
+list_groups(Addr, Port, Dir) ->
+ DirData = [{path,Dir},{bind_address,Addr},{port,Port}],
+ list_groups(DirData).
+
+list_users(Port, Dir) ->
+ DirData = [{path,Dir},{port,Port}],
+ list_users(DirData).
+
+list_users(Addr, Port, Dir) ->
+ DirData = [{path,Dir},{bind_address,Addr},{port,Port}],
+ list_users(DirData).
+
+remove_user(UserName, Port, Dir, _AccessPassword) ->
+ DirData = [{path,Dir},{port,Port}],
+ delete_user(DirData, UserName).
+
+remove_user(UserName, Addr, Port, Dir, _AccessPassword) ->
+ DirData = [{path,Dir},{bind_address,Addr},{port,Port}],
+ delete_user(DirData, UserName).
+
+remove_group_member(GroupName,UserName,Port,Dir,_AccessPassword) ->
+ DirData = [{path,Dir},{port,Port}],
+ delete_group_member(DirData, GroupName, UserName).
+
+remove_group_member(GroupName,UserName,Addr,Port,Dir,_AccessPassword) ->
+ DirData = [{path,Dir},{bind_address,Addr},{port,Port}],
+ delete_group_member(DirData, GroupName, UserName).
+
+remove_group(GroupName,Port,Dir,_AccessPassword) ->
+ DirData = [{path,Dir},{port,Port}],
+ delete_group(DirData, GroupName).
+
+remove_group(GroupName,Addr,Port,Dir,_AccessPassword) ->
+ DirData = [{path,Dir},{bind_address,Addr},{port,Port}],
+ delete_group(DirData, GroupName).
+
+%%
+%% Storage format of users in the mnesia table:
+%% httpd_user records
+%%
+
+add_user(DirData, UStruct) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ UserName = UStruct#httpd_user.username,
+ Password = UStruct#httpd_user.password,
+ Data = UStruct#httpd_user.user_data,
+ User=#httpd_user{username={UserName,Addr,Port,Dir},
+ password=Password,
+ user_data=Data},
+ case mnesia:transaction(fun() -> mnesia:write(User) end) of
+ {aborted,Reason} ->
+ {error,Reason};
+ _ ->
+ true
+ end.
+
+get_user(DirData, UserName) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ case mnesia:transaction(fun() ->
+ mnesia:read({httpd_user,
+ {UserName,Addr,Port,Dir}})
+ end) of
+ {aborted,Reason} ->
+ {error, Reason};
+ {atomic,[]} ->
+ {error, no_such_user};
+ {atomic, [Record]} when record(Record, httpd_user) ->
+ {ok, Record#httpd_user{username=UserName}};
+ _ ->
+ {error, no_such_user}
+ end.
+
+list_users(DirData) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ case mnesia:transaction(fun() ->
+ mnesia:match_object({httpd_user,
+ {'_',Addr,Port,Dir},'_','_'})
+ end) of
+ {aborted,Reason} ->
+ {error,Reason};
+ {atomic,Users} ->
+ {ok,
+ lists:foldr(fun({httpd_user,
+ {UserName, _AnyAddr, _AnyPort, _AnyDir},
+ _Password, _Data}, Acc) ->
+ [UserName|Acc]
+ end,
+ [], Users)}
+ end.
+
+delete_user(DirData, UserName) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ case mnesia:transaction(fun() ->
+ mnesia:delete({httpd_user,
+ {UserName,Addr,Port,Dir}})
+ end) of
+ {aborted,Reason} ->
+ {error,Reason};
+ _ ->
+ true
+ end.
+
+%%
+%% Storage of groups in the mnesia table:
+%% Multiple instances of {#httpd_group, User}
+%%
+
+add_group_member(DirData, GroupName, User) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ Group=#httpd_group{name={GroupName, Addr, Port, Dir}, userlist=User},
+ case mnesia:transaction(fun() -> mnesia:write(Group) end) of
+ {aborted,Reason} ->
+ {error,Reason};
+ _ ->
+ true
+ end.
+
+list_group_members(DirData, GroupName) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ case mnesia:transaction(fun() ->
+ mnesia:read({httpd_group,
+ {GroupName,Addr,Port,Dir}})
+ end) of
+ {aborted, Reason} ->
+ {error,Reason};
+ {atomic, Members} ->
+ {ok,[UserName || {httpd_group,{AnyGroupName,AnyAddr,
+ AnyPort,AnyDir},UserName}
+ <- Members,
+ AnyGroupName == GroupName, AnyAddr == Addr,
+ AnyPort == Port, AnyDir == Dir]}
+ end.
+
+list_groups(DirData) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ case mnesia:transaction(fun() ->
+ mnesia:match_object({httpd_group,
+ {'_',Addr,Port,Dir},
+ '_'})
+ end) of
+ {aborted, Reason} ->
+ {error, Reason};
+ {atomic, Groups} ->
+ GroupNames=
+ [GroupName || {httpd_group,{GroupName,AnyAddr,AnyPort,AnyDir},
+ _UserName} <- Groups,
+ AnyAddr == Addr, AnyPort == AnyPort,
+ AnyDir == Dir],
+ {ok, httpd_util:uniq(lists:sort(GroupNames))}
+ end.
+
+delete_group_member(DirData, GroupName, UserName) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ Group = #httpd_group{name={GroupName, Addr, Port, Dir}, userlist=UserName},
+ case mnesia:transaction(fun() -> mnesia:delete_object(Group) end) of
+ {aborted,Reason} ->
+ {error,Reason};
+ _ ->
+ true
+ end.
+
+%% THIS IS WRONG (?) !
+%% Should first match out all httpd_group records for this group and then
+%% do mnesia:delete on those. Or ?
+
+delete_group(DirData, GroupName) ->
+ {Addr, Port, Dir} = lookup_common(DirData),
+ case mnesia:transaction(fun() ->
+ mnesia:delete({httpd_group,
+ {GroupName,Addr,Port,Dir}})
+ end) of
+ {aborted,Reason} ->
+ {error,Reason};
+ _ ->
+ true
+ end.
+
+%% Utility functions.
+
+lookup_common(DirData) ->
+ Dir = httpd_util:key1search(DirData, path),
+ Port = httpd_util:key1search(DirData, port),
+ Addr = httpd_util:key1search(DirData, bind_address),
+ {Addr, Port, Dir}.
+
+
+
+
+
+
+
diff --git a/src/couch_inets/mod_auth_plain.erl b/src/couch_inets/mod_auth_plain.erl
new file mode 100644
index 00000000..ad6bb999
--- /dev/null
+++ b/src/couch_inets/mod_auth_plain.erl
@@ -0,0 +1,295 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_auth_plain).
+
+-include("httpd.hrl").
+-include("mod_auth.hrl").
+
+-define(VMODULE,"AUTH_PLAIN").
+
+%% Internal API
+-export([store_directory_data/2]).
+
+
+-export([get_user/2,
+ list_group_members/2,
+ add_user/2,
+ add_group_member/3,
+ list_users/1,
+ delete_user/2,
+ list_groups/1,
+ delete_group_member/3,
+ delete_group/2,
+ remove/1]).
+
+%%
+%% API
+%%
+
+%%
+%% Storage format of users in the ets table:
+%% {UserName, Password, UserData}
+%%
+
+add_user(DirData, #httpd_user{username = User} = UStruct) ->
+ PWDB = httpd_util:key1search(DirData, auth_user_file),
+ Record = {User,
+ UStruct#httpd_user.password,
+ UStruct#httpd_user.user_data},
+ case ets:lookup(PWDB, User) of
+ [{User, _SomePassword, _SomeData}] ->
+ {error, user_already_in_db};
+ _ ->
+ ets:insert(PWDB, Record),
+ true
+ end.
+
+get_user(DirData, User) ->
+ PWDB = httpd_util:key1search(DirData, auth_user_file),
+ case ets:lookup(PWDB, User) of
+ [{User, PassWd, Data}] ->
+ {ok, #httpd_user{username=User, password=PassWd, user_data=Data}};
+ _ ->
+ {error, no_such_user}
+ end.
+
+list_users(DirData) ->
+ PWDB = httpd_util:key1search(DirData, auth_user_file),
+ Records = ets:match(PWDB, '$1'),
+ {ok, lists:foldr(fun({User, _PassWd, _Data}, A) -> [User | A] end,
+ [], lists:flatten(Records))}.
+
+delete_user(DirData, UserName) ->
+ PWDB = httpd_util:key1search(DirData, auth_user_file),
+ case ets:lookup(PWDB, UserName) of
+ [{UserName, _SomePassword, _SomeData}] ->
+ ets:delete(PWDB, UserName),
+ {ok, Groups} = list_groups(DirData),
+ lists:foreach(fun(Group) ->
+ delete_group_member(DirData,
+ Group, UserName)
+ end, Groups);
+ _ ->
+ {error, no_such_user}
+ end.
+
+%%
+%% Storage of groups in the ets table:
+%% {Group, UserList} where UserList is a list of strings.
+%%
+
+add_group_member(DirData, Group, UserName) ->
+ GDB = httpd_util:key1search(DirData, auth_group_file),
+ case ets:lookup(GDB, Group) of
+ [{Group, Users}] ->
+ case lists:member(UserName, Users) of
+ true ->
+ true;
+ false ->
+ ets:insert(GDB, {Group, [UserName|Users]}),
+ true
+ end;
+ [] ->
+ ets:insert(GDB, {Group, [UserName]}),
+ true;
+ Other ->
+ {error, Other}
+ end.
+
+list_group_members(DirData, Group) ->
+ GDB = httpd_util:key1search(DirData, auth_group_file),
+ case ets:lookup(GDB, Group) of
+ [{Group, Users}] ->
+ {ok, Users};
+ _ ->
+ {error, no_such_group}
+ end.
+
+list_groups(DirData) ->
+ GDB = httpd_util:key1search(DirData, auth_group_file),
+ Groups = ets:match(GDB, '$1'),
+ {ok, httpd_util:uniq(lists:foldr(fun({G, _}, A) -> [G|A] end,
+ [], lists:flatten(Groups)))}.
+
+delete_group_member(DirData, Group, User) ->
+ GDB = httpd_util:key1search(DirData, auth_group_file),
+ case ets:lookup(GDB, Group) of
+ [{Group, Users}] when is_list(Users) ->
+ case lists:member(User, Users) of
+ true ->
+ ets:delete(GDB, Group),
+ ets:insert(GDB, {Group, lists:delete(User, Users)}),
+ true;
+ false ->
+ {error, no_such_group_member}
+ end;
+ _ ->
+ {error, no_such_group}
+ end.
+
+delete_group(DirData, Group) ->
+ GDB = httpd_util:key1search(DirData, auth_group_file),
+ case ets:lookup(GDB, Group) of
+ [{Group, _Users}] ->
+ ets:delete(GDB, Group),
+ true;
+ _ ->
+ {error, no_such_group}
+ end.
+
+store_directory_data(_Directory, DirData) ->
+ PWFile = httpd_util:key1search(DirData, auth_user_file),
+ GroupFile = httpd_util:key1search(DirData, auth_group_file),
+ case load_passwd(PWFile) of
+ {ok, PWDB} ->
+ case load_group(GroupFile) of
+ {ok, GRDB} ->
+ %% Address and port is included in the file names...
+ Addr = httpd_util:key1search(DirData, bind_address),
+ Port = httpd_util:key1search(DirData, port),
+ {ok, PasswdDB} = store_passwd(Addr,Port,PWDB),
+ {ok, GroupDB} = store_group(Addr,Port,GRDB),
+ NDD1 = lists:keyreplace(auth_user_file, 1, DirData,
+ {auth_user_file, PasswdDB}),
+ NDD2 = lists:keyreplace(auth_group_file, 1, NDD1,
+ {auth_group_file, GroupDB}),
+ {ok, NDD2};
+ Err ->
+ {error, Err}
+ end;
+ Err2 ->
+ {error, Err2}
+ end.
+
+
+
+%% load_passwd
+
+load_passwd(AuthUserFile) ->
+ case file:open(AuthUserFile, read) of
+ {ok,Stream} ->
+ parse_passwd(Stream, []);
+ {error, _} ->
+ {error, ?NICE("Can't open "++AuthUserFile)}
+ end.
+
+parse_passwd(Stream,PasswdList) ->
+ Line =
+ case io:get_line(Stream, '') of
+ eof ->
+ eof;
+ String ->
+ httpd_conf:clean(String)
+ end,
+ parse_passwd(Stream, PasswdList, Line).
+
+parse_passwd(Stream, PasswdList, eof) ->
+ file:close(Stream),
+ {ok, PasswdList};
+parse_passwd(Stream, PasswdList, "") ->
+ parse_passwd(Stream, PasswdList);
+parse_passwd(Stream, PasswdList, [$#|_]) ->
+ parse_passwd(Stream, PasswdList);
+parse_passwd(Stream, PasswdList, Line) ->
+ case regexp:split(Line,":") of
+ {ok, [User,Password]} ->
+ parse_passwd(Stream, [{User,Password, []}|PasswdList]);
+ {ok,_} ->
+ {error, ?NICE(Line)}
+ end.
+
+%% load_group
+
+load_group(AuthGroupFile) ->
+ case file:open(AuthGroupFile, read) of
+ {ok, Stream} ->
+ parse_group(Stream,[]);
+ {error, _} ->
+ {error, ?NICE("Can't open "++AuthGroupFile)}
+ end.
+
+parse_group(Stream, GroupList) ->
+ Line=
+ case io:get_line(Stream,'') of
+ eof ->
+ eof;
+ String ->
+ httpd_conf:clean(String)
+ end,
+ parse_group(Stream, GroupList, Line).
+
+parse_group(Stream, GroupList, eof) ->
+ file:close(Stream),
+ {ok, GroupList};
+parse_group(Stream, GroupList, "") ->
+ parse_group(Stream, GroupList);
+parse_group(Stream, GroupList, [$#|_]) ->
+ parse_group(Stream, GroupList);
+parse_group(Stream, GroupList, Line) ->
+ case regexp:split(Line, ":") of
+ {ok, [Group,Users]} ->
+ {ok, UserList} = regexp:split(Users," "),
+ parse_group(Stream, [{Group,UserList}|GroupList]);
+ {ok, _} ->
+ {error, ?NICE(Line)}
+ end.
+
+
+%% store_passwd
+
+store_passwd(Addr,Port,PasswdList) ->
+ Name = httpd_util:make_name("httpd_passwd",Addr,Port),
+ PasswdDB = ets:new(Name, [set, public]),
+ store_passwd(PasswdDB, PasswdList).
+
+store_passwd(PasswdDB, []) ->
+ {ok, PasswdDB};
+store_passwd(PasswdDB, [User|Rest]) ->
+ ets:insert(PasswdDB, User),
+ store_passwd(PasswdDB, Rest).
+
+%% store_group
+
+store_group(Addr,Port,GroupList) ->
+ Name = httpd_util:make_name("httpd_group",Addr,Port),
+ GroupDB = ets:new(Name, [set, public]),
+ store_group(GroupDB, GroupList).
+
+
+store_group(GroupDB,[]) ->
+ {ok, GroupDB};
+store_group(GroupDB,[User|Rest]) ->
+ ets:insert(GroupDB, User),
+ store_group(GroupDB, Rest).
+
+
+%% remove/1
+%%
+%% Deletes ets tables used by this auth mod.
+%%
+remove(DirData) ->
+ PWDB = httpd_util:key1search(DirData, auth_user_file),
+ GDB = httpd_util:key1search(DirData, auth_group_file),
+ ets:delete(PWDB),
+ ets:delete(GDB).
+
+
+
+
+
+
diff --git a/src/couch_inets/mod_auth_server.erl b/src/couch_inets/mod_auth_server.erl
new file mode 100644
index 00000000..8e9cf9e8
--- /dev/null
+++ b/src/couch_inets/mod_auth_server.erl
@@ -0,0 +1,374 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+
+-module(mod_auth_server).
+
+-include("httpd.hrl").
+
+-behaviour(gen_server).
+
+
+%% mod_auth exports
+-export([start/2, stop/2,
+ add_password/4, update_password/5,
+ add_user/5, delete_user/5, get_user/5, list_users/4,
+ add_group_member/6, delete_group_member/6, list_group_members/5,
+ delete_group/5, list_groups/4]).
+
+%% gen_server exports
+-export([start_link/2, init/1,
+ handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% We will not make the change to use base64 in stdlib in inets just yet.
+%% it will be included in the next major release of inets.
+-compile({nowarn_deprecated_function, {http_base_64, encode, 1}}).
+
+-record(state,{tab}).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% External API %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% start_link/3
+%%
+%% NOTE: This is called by httpd_misc_sup when the process is started
+%%
+start_link(Addr, Port)->
+ Name = make_name(Addr, Port),
+ gen_server:start_link({local, Name}, ?MODULE, [], [{timeout, infinity}]).
+
+
+%% start/2
+
+start(Addr, Port)->
+ Name = make_name(Addr, Port),
+ case whereis(Name) of
+ undefined ->
+ httpd_misc_sup:start_auth_server(Addr, Port);
+ _ -> %% Already started...
+ ok
+ end.
+
+
+%% stop/2
+
+stop(Addr, Port)->
+ Name = make_name(Addr, Port),
+ case whereis(Name) of
+ undefined -> %% Already stopped
+ ok;
+ _ ->
+ (catch httpd_misc_sup:stop_auth_server(Addr, Port))
+ end.
+
+%% add_password/4
+
+add_password(Addr, Port, Dir, Password)->
+ Name = make_name(Addr, Port),
+ Req = {add_password, Dir, Password},
+ call(Name, Req).
+
+
+%% update_password/6
+
+update_password(Addr, Port, Dir, Old, New) when list(New) ->
+ Name = make_name(Addr, Port),
+ Req = {update_password, Dir, Old, New},
+ call(Name, Req).
+
+
+%% add_user/5
+
+add_user(Addr, Port, Dir, User, Password) ->
+ Name = make_name(Addr, Port),
+ Req = {add_user, Addr, Port, Dir, User, Password},
+ call(Name, Req).
+
+
+%% delete_user/5
+
+delete_user(Addr, Port, Dir, UserName, Password) ->
+ Name = make_name(Addr, Port),
+ Req = {delete_user, Addr, Port, Dir, UserName, Password},
+ call(Name, Req).
+
+
+%% get_user/5
+
+get_user(Addr, Port, Dir, UserName, Password) ->
+ Name = make_name(Addr, Port),
+ Req = {get_user, Addr, Port, Dir, UserName, Password},
+ call(Name, Req).
+
+
+%% list_users/4
+
+list_users(Addr, Port, Dir, Password) ->
+ Name = make_name(Addr,Port),
+ Req = {list_users, Addr, Port, Dir, Password},
+ call(Name, Req).
+
+
+%% add_group_member/6
+
+add_group_member(Addr, Port, Dir, GroupName, UserName, Password) ->
+ Name = make_name(Addr,Port),
+ Req = {add_group_member, Addr, Port, Dir, GroupName, UserName, Password},
+ call(Name, Req).
+
+
+%% delete_group_member/6
+
+delete_group_member(Addr, Port, Dir, GroupName, UserName, Password) ->
+ Name = make_name(Addr,Port),
+ Req = {delete_group_member, Addr, Port, Dir, GroupName, UserName, Password},
+ call(Name, Req).
+
+
+%% list_group_members/4
+
+list_group_members(Addr, Port, Dir, Group, Password) ->
+ Name = make_name(Addr, Port),
+ Req = {list_group_members, Addr, Port, Dir, Group, Password},
+ call(Name, Req).
+
+
+%% delete_group/5
+
+delete_group(Addr, Port, Dir, GroupName, Password) ->
+ Name = make_name(Addr, Port),
+ Req = {delete_group, Addr, Port, Dir, GroupName, Password},
+ call(Name, Req).
+
+
+%% list_groups/4
+
+list_groups(Addr, Port, Dir, Password) ->
+ Name = make_name(Addr, Port),
+ Req = {list_groups, Addr, Port, Dir, Password},
+ call(Name, Req).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% Server call-back functions %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% init
+
+init(_) ->
+ {ok,#state{tab = ets:new(auth_pwd,[set,protected])}}.
+
+%% handle_call
+
+%% Add a user
+handle_call({add_user, Addr, Port, Dir, User, AuthPwd}, _From, State) ->
+ Reply = api_call(Addr, Port, Dir, add_user, User, AuthPwd, State),
+ {reply, Reply, State};
+
+%% Get data about a user
+handle_call({get_user, Addr, Port, Dir, User, AuthPwd}, _From, State) ->
+ Reply = api_call(Addr, Port, Dir, get_user, [User], AuthPwd, State),
+ {reply, Reply, State};
+
+%% Add a group member
+handle_call({add_group_member, Addr, Port, Dir, Group, User, AuthPwd},
+ _From, State) ->
+ Reply = api_call(Addr, Port, Dir, add_group_member, [Group, User],
+ AuthPwd, State),
+ {reply, Reply, State};
+
+%% delete a group
+handle_call({delete_group_member, Addr, Port, Dir, Group, User, AuthPwd},
+ _From, State)->
+ Reply = api_call(Addr, Port, Dir, delete_group_member, [Group, User],
+ AuthPwd, State),
+ {reply, Reply, State};
+
+%% List all users thats standalone users
+handle_call({list_users, Addr, Port, Dir, AuthPwd}, _From, State)->
+ Reply = api_call(Addr, Port, Dir, list_users, [], AuthPwd, State),
+ {reply, Reply, State};
+
+%% Delete a user
+handle_call({delete_user, Addr, Port, Dir, User, AuthPwd}, _From, State)->
+ Reply = api_call(Addr, Port, Dir, delete_user, [User], AuthPwd, State),
+ {reply, Reply, State};
+
+%% Delete a group
+handle_call({delete_group, Addr, Port, Dir, Group, AuthPwd}, _From, State)->
+ Reply = api_call(Addr, Port, Dir, delete_group, [Group], AuthPwd, State),
+ {reply, Reply, State};
+
+%% List the current groups
+handle_call({list_groups, Addr, Port, Dir, AuthPwd}, _From, State)->
+ Reply = api_call(Addr, Port, Dir, list_groups, [], AuthPwd, State),
+ {reply, Reply, State};
+
+%% List the members of the given group
+handle_call({list_group_members, Addr, Port, Dir, Group, AuthPwd},
+ _From, State)->
+ Reply = api_call(Addr, Port, Dir, list_group_members, [Group],
+ AuthPwd, State),
+ {reply, Reply, State};
+
+
+%% Add password for a directory
+handle_call({add_password, Dir, Password}, _From, State)->
+ Reply = do_add_password(Dir, Password, State),
+ {reply, Reply, State};
+
+
+%% Update the password for a directory
+
+handle_call({update_password, Dir, Old, New},_From,State)->
+ Reply =
+ case getPassword(State, Dir) of
+ OldPwd when binary(OldPwd)->
+ case erlang:md5(Old) of
+ OldPwd ->
+ %% The old password is right =>
+ %% update the password to the new
+ do_update_password(Dir,New,State),
+ ok;
+ _->
+ {error, error_new}
+ end;
+ _->
+ {error, error_old}
+ end,
+ {reply, Reply, State};
+
+handle_call(stop, _From, State)->
+ {stop, normal, State}.
+
+handle_info(_Info, State)->
+ {noreply, State}.
+
+handle_cast(_Request, State)->
+ {noreply, State}.
+
+
+terminate(_Reason,State) ->
+ ets:delete(State#state.tab),
+ ok.
+
+
+%% code_change({down, ToVsn}, State, Extra)
+%%
+code_change({down, _}, #state{tab = Tab}, downgrade_to_2_6_0) ->
+ {ok, {state, Tab, undefined}};
+
+
+%% code_change(FromVsn, State, Extra)
+%%
+code_change(_, {state, Tab, _}, upgrade_from_2_6_0) ->
+ {ok, #state{tab = Tab}}.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% The functions that really changes the data in the database %%
+%% of users to different directories %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% API gateway
+
+api_call(Addr, Port, Dir, Func, Args,Password,State) ->
+ case controlPassword(Password,State,Dir) of
+ ok->
+ ConfigName = httpd_util:make_name("httpd_conf",Addr,Port),
+ case ets:match_object(ConfigName, {directory, Dir, '$1'}) of
+ [{directory, Dir, DirData}] ->
+ AuthMod = auth_mod_name(DirData),
+ (catch apply(AuthMod, Func, [DirData|Args]));
+ _ ->
+ {error, no_such_directory}
+ end;
+ bad_password ->
+ {error,bad_password}
+ end.
+
+controlPassword(Password, _State, _Dir) when Password=:="DummyPassword"->
+ bad_password;
+
+controlPassword(Password,State,Dir)->
+ case getPassword(State,Dir) of
+ Pwd when binary(Pwd)->
+ case erlang:md5(Password) of
+ Pwd ->
+ ok;
+ _->
+ bad_password
+ end;
+ _ ->
+ bad_password
+ end.
+
+
+getPassword(State,Dir)->
+ case lookup(State#state.tab, Dir) of
+ [{_,Pwd}]->
+ Pwd;
+ _ ->
+ {error,bad_password}
+ end.
+
+do_update_password(Dir, New, State) ->
+ ets:insert(State#state.tab, {Dir, erlang:md5(New)}).
+
+do_add_password(Dir, Password, State) ->
+ case getPassword(State,Dir) of
+ PwdExists when binary(PwdExists) ->
+ {error, dir_protected};
+ {error, _} ->
+ do_update_password(Dir, Password, State)
+ end.
+
+
+auth_mod_name(DirData) ->
+ case httpd_util:key1search(DirData, auth_type, plain) of
+ plain -> mod_auth_plain;
+ mnesia -> mod_auth_mnesia;
+ dets -> mod_auth_dets
+ end.
+
+
+lookup(Db, Key) ->
+ ets:lookup(Db, Key).
+
+
+make_name(Addr,Port) ->
+ httpd_util:make_name("httpd_auth",Addr,Port).
+
+
+call(Name, Req) ->
+ case (catch gen_server:call(Name, Req)) of
+ {'EXIT', Reason} ->
+ {error, Reason};
+ Reply ->
+ Reply
+ end.
+
+
diff --git a/src/couch_inets/mod_browser.erl b/src/couch_inets/mod_browser.erl
new file mode 100644
index 00000000..03eb66bb
--- /dev/null
+++ b/src/couch_inets/mod_browser.erl
@@ -0,0 +1,247 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%% ----------------------------------------------------------------------
+%%
+%% Browsers sends a string to the webbserver
+%% to identify themsevles. They are a bit nasty
+%% since the only thing that the specification really
+%% is strict about is that they shall be short
+%% some axamples:
+%%
+%% Netscape Mozilla/4.75 [en] (X11; U; SunOS 5.8 sun4u)
+%% Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.0.1) Gecko/20020823 Netscape/7.0
+%% Mozilla Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.1) Gecko/20020827
+%% Safari Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us) AppleWebKit/85 (KHTML, like Gecko) Safari/85
+%% IE5 Mozilla/4.0 (compatible; MSIE 5.0; SP1B; SunOS 5.8 sun4u; X11)
+%% Lynx Lynx/2.8.3rel.1 libwww-FM/2.142
+%%
+%% ----------------------------------------------------------------------
+
+-module(mod_browser).
+
+-export([do/1, test/0, getBrowser/1]).
+
+%% Remember that the order of the mozilla browsers are
+%% important since some browsers include others to behave
+%% as they were something else
+-define(MOZILLA_BROWSERS,[{netscape, "netscape"},
+ {opera, "opera"},
+ {msie, "msie"},
+ {safari, "safari"},
+ {mozilla, "rv:"}]). % fallback, must be last
+
+
+%% If your operatingsystem is not recognized add it to this list.
+-define(OPERATIVE_SYSTEMS,[{win3x, ["win16", "windows 3", "windows 16-bit"]},
+ {win95, ["win95", "windows 95"]},
+ {win98, ["win98", "windows 98"]},
+ {winnt, ["winnt", "windows nt"]},
+ {win2k, ["nt 5"]},
+ {sunos4, ["sunos 4"]},
+ {sunos5, ["sunos 5"]},
+ {sun, ["sunos"]},
+ {aix, ["aix"]},
+ {linux, ["linux"]},
+ {sco, ["sco", "unix_sv"]},
+ {freebsd,["freebsd"]},
+ {bsd, ["bsd"]},
+ {macosx, ["mac os x"]}]).
+
+-define(LYNX, lynx).
+-define(MOZILLA, mozilla).
+-define(EMACS, emacs).
+-define(STAROFFICE, soffice).
+-define(MOSAIC, mosaic).
+-define(NETSCAPE, netscape).
+-define(SAFARU, safari).
+-define(UNKOWN, unknown).
+
+-include("httpd.hrl").
+
+-define(VMODULE,"BROWSER").
+
+do(Info) ->
+ case httpd_util:key1search(Info#mod.data,status) of
+ {_StatusCode, _PhraseArgs, _Reason} ->
+ {proceed,Info#mod.data};
+ undefined ->
+ Browser = getBrowser1(Info),
+ {proceed,[{'user-agent', Browser}|Info#mod.data]}
+ end.
+
+getBrowser1(Info) ->
+ PHead = Info#mod.parsed_header,
+ case httpd_util:key1search(PHead,"user-agent") of
+ undefined ->
+ undefined;
+ AgentString ->
+ getBrowser(AgentString)
+ end.
+
+getBrowser(AgentString) ->
+ LAgentString = http_util:to_lower(AgentString),
+ case regexp:first_match(LAgentString,"^[^ ]*") of
+ {match,Start,Length} ->
+ Browser = lists:sublist(LAgentString,Start,Length),
+ case browserType(Browser) of
+ {mozilla,Vsn} ->
+ {getMozilla(LAgentString,
+ ?MOZILLA_BROWSERS,{?NETSCAPE,Vsn}),
+ operativeSystem(LAgentString)};
+ AnyBrowser ->
+ {AnyBrowser,operativeSystem(LAgentString)}
+ end;
+ nomatch ->
+ browserType(LAgentString)
+ end.
+
+browserType([$l,$y,$n,$x|Version]) ->
+ {?LYNX,browserVersion(Version)};
+browserType([$m,$o,$z,$i,$l,$l,$a|Version]) ->
+ {?MOZILLA,browserVersion(Version)};
+browserType([$e,$m,$a,$c,$s|Version]) ->
+ {?EMACS,browserVersion(Version)};
+browserType([$s,$t,$a,$r,$o,$f,$f,$i,$c,$e|Version]) ->
+ {?STAROFFICE,browserVersion(Version)};
+browserType([$m,$o,$s,$a,$i,$c|Version]) ->
+ {?MOSAIC,browserVersion(Version)};
+browserType(_Unknown) ->
+ unknown.
+
+
+browserVersion([$/|VsnString]) ->
+ case catch list_to_float(VsnString) of
+ Number when float(Number) ->
+ Number;
+ _Whatever ->
+ case string:span(VsnString,"1234567890.") of
+ 0 ->
+ unknown;
+ VLength ->
+ Vsn = string:substr(VsnString,1,VLength),
+ case string:tokens(Vsn,".") of
+ [Number] ->
+ list_to_float(Number++".0");
+ [Major,Minor|_MinorMinor] ->
+ list_to_float(Major++"."++Minor)
+ end
+ end
+ end;
+browserVersion(VsnString) ->
+ browserVersion([$/|VsnString]).
+
+operativeSystem(OpString) ->
+ operativeSystem(OpString, ?OPERATIVE_SYSTEMS).
+
+operativeSystem(_OpString,[]) ->
+ unknown;
+operativeSystem(OpString,[{RetVal,RegExps}|Rest]) ->
+ case controlOperativeSystem(OpString,RegExps) of
+ true ->
+ RetVal;
+ _ ->
+ operativeSystem(OpString,Rest)
+ end.
+
+controlOperativeSystem(_OpString,[]) ->
+ false;
+controlOperativeSystem(OpString,[Regexp|Regexps]) ->
+ case regexp:match(OpString,Regexp) of
+ {match,_,_} ->
+ true;
+ nomatch ->
+ controlOperativeSystem(OpString,Regexps)
+ end.
+
+
+%% OK this is ugly but thats the only way since
+%% all browsers dont conform to the name/vsn standard
+%% First we check if it is one of the browsers that
+%% are not the default mozillaborwser against the regexp
+%% for the different browsers. if no match, it is a mozilla
+%% browser i.e opera, netscape, ie or safari
+
+getMozilla(_AgentString,[],Default) ->
+ Default;
+getMozilla(AgentString,[{Agent,AgentRegExp}|Rest],Default) ->
+ case regexp:match(AgentString,AgentRegExp) of
+ {match,_,_} ->
+ {Agent,getMozVersion(AgentString,AgentRegExp)};
+ nomatch ->
+ getMozilla(AgentString,Rest,Default)
+ end.
+
+getMozVersion(AgentString, AgentRegExp) ->
+ case regexp:match(AgentString,AgentRegExp++"[0-9\.\ \/]*") of
+ {match,Start,Length} when length(AgentRegExp) < Length ->
+ %% Ok we got the number split it out
+ RealStart = Start+length(AgentRegExp),
+ RealLength = Length-length(AgentRegExp),
+ VsnString = string:substr(AgentString,RealStart,RealLength),
+ %% case string:strip(VsnString,both,$\ ) of
+ case strip(VsnString) of
+ [] ->
+ unknown;
+ [Y1,Y2,Y3,Y4,M1,M2,D1,D2] = DateVsn when
+ Y1 =< $9, Y1 >= $0,
+ Y2 =< $9, Y2 >= $0,
+ Y3 =< $9, Y3 >= $0,
+ Y4 =< $9, Y4 >= $0,
+ M1 =< $9, M1 >= $0,
+ M2 =< $9, M2 >= $0,
+ D1 =< $9, D1 >= $0,
+ D2 =< $9, D2 >= $0 ->
+ list_to_integer(DateVsn);
+ Vsn ->
+ case string:tokens(Vsn,".") of
+ [Number]->
+ list_to_float(Number++".0");
+ [Major,Minor|Rev] ->
+ V = lists:flatten([Major,".",Minor,Rev]),
+ list_to_float(V)
+ end
+ end;
+ nomatch ->
+ unknown
+ end.
+
+strip(VsnString) ->
+ strip2(strip1(VsnString)).
+
+strip1(VsnString) ->
+ string:strip(VsnString,both,$\ ).
+
+strip2(VsnString) ->
+ string:strip(VsnString,both,$/ ).
+
+test()->
+ test("Mozilla/4.75 [en] (X11; U; SunOS 5.8 sun4u)"),
+ test("Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.0.1) Gecko/20020823 Netscape/7.0"),
+ test("Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.1) Gecko/20020827"),
+ test("Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.4) Gecko/20020827"),
+ test("Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us) AppleWebKit/85 (KHTML, like Gecko) Safari/85"),
+ test("Mozilla/4.0 (compatible; MSIE 5.0; SP1B; SunOS 5.8 sun4u; X11)"),
+ test("Lynx/2.8.3rel.1 libwww-FM/2.142"),
+ ok.
+
+test(Str) ->
+ Browser = getBrowser(Str),
+ io:format("~n--------------------------------------------------------~n"),
+ io:format("~p",[Browser]),
+ io:format("~n--------------------------------------------------------~n").
+
diff --git a/src/couch_inets/mod_cgi.erl b/src/couch_inets/mod_cgi.erl
new file mode 100644
index 00000000..0e682d84
--- /dev/null
+++ b/src/couch_inets/mod_cgi.erl
@@ -0,0 +1,331 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%% Implements The WWW Common Gateway Interface Version 1.1
+
+-module(mod_cgi).
+
+-export([env/3]).
+
+%%% Callback API
+-export([do/1, load/2]).
+
+-include("http_internal.hrl").
+-include("httpd.hrl").
+
+%% We will not make the change to use base64 in stdlib in inets just yet.
+%% it will be included in the next major release of inets.
+-compile({nowarn_deprecated_function, {http_base_64, encode, 1}}).
+
+-define(VMODULE,"CGI").
+
+-define(DEFAULT_CGI_TIMEOUT, 15000).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+%%--------------------------------------------------------------------------
+%% do(ModData, _, AfterScript) -> [{EnvVariable, Value}]
+%%
+%% AfterScript = string()
+%% ModData = #mod{}
+%% EnvVariable = string()
+%% Value = term()
+%% Description: Keep for now as it is documented in the man page
+%%-------------------------------------------------------------------------
+env(ModData, _Script, AfterScript) ->
+ ScriptElements = script_elements(ModData, AfterScript),
+ httpd_script_env:create_env(cgi, ModData, ScriptElements).
+
+%%%=========================================================================
+%%% Callback API
+%%%=========================================================================
+
+%%--------------------------------------------------------------------------
+%% do(ModData) -> {proceed, OldData} | {proceed, NewData} | {break, NewData}
+%% | done
+%% ModData = #mod{}
+%%
+%% Description: See httpd(3) ESWAPI CALLBACK FUNCTIONS
+%%-------------------------------------------------------------------------
+do(ModData) ->
+ case httpd_util:key1search(ModData#mod.data, status) of
+ %% A status code has been generated!
+ {_StatusCode, _PhraseArgs, _Reason} ->
+ {proceed, ModData#mod.data};
+ %% No status code has been generated!
+ undefined ->
+ case httpd_util:key1search(ModData#mod.data, response) of
+ undefined ->
+ generate_response(ModData);
+ _Response ->
+ {proceed, ModData#mod.data}
+ end
+ end.
+
+%%--------------------------------------------------------------------------
+%% load(Line, Context) -> eof | ok | {ok, NewContext} |
+%% {ok, NewContext, Directive} |
+%% {ok, NewContext, DirectiveList} | {error, Reason}
+%% Line = string()
+%% Context = NewContext = DirectiveList = [Directive]
+%% Directive = {DirectiveKey , DirectiveValue}
+%% DirectiveKey = DirectiveValue = term()
+%% Reason = term()
+%%
+%% Description: See httpd(3) ESWAPI CALLBACK FUNCTIONS
+%%-------------------------------------------------------------------------
+
+%% ScriptNoCache true|false, defines whether the server shall add
+%% header fields to stop proxies and
+%% clients from saving the page in history
+%% or cache
+%%
+load("ScriptNoCache " ++ CacheArg, [])->
+ case catch list_to_atom(httpd_conf:clean(CacheArg)) of
+ true ->
+ {ok, [], {script_nocache, true}};
+ false ->
+ {ok, [], {script_nocache, false}};
+ _ ->
+ {error, ?NICE(httpd_conf:clean(CacheArg)++
+ " is an invalid ScriptNoCache directive")}
+ end;
+%% ScriptTimeout Seconds, The number of seconds that the server
+%% maximum will wait for the script to
+%% generate a part of the document
+load("ScriptTimeout " ++ Timeout, [])->
+ case catch list_to_integer(httpd_conf:clean(Timeout)) of
+ TimeoutSec when integer(TimeoutSec) ->
+ {ok, [], {script_timeout,TimeoutSec*1000}};
+ _ ->
+ {error, ?NICE(httpd_conf:clean(Timeout)++
+ " is an invalid ScriptTimeout")}
+ end.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+generate_response(ModData) ->
+ RequestURI =
+ case httpd_util:key1search(ModData#mod.data, new_request_uri) of
+ undefined ->
+ ModData#mod.request_uri;
+ Value ->
+ Value
+ end,
+ ScriptAliases =
+ httpd_util:multi_lookup(ModData#mod.config_db, script_alias),
+ case mod_alias:real_script_name(ModData#mod.config_db, RequestURI,
+ ScriptAliases) of
+ {Script, AfterScript} ->
+ exec_script(ModData, Script, AfterScript,
+ RequestURI);
+ not_a_script ->
+ {proceed, ModData#mod.data}
+ end.
+
+is_executable(File) ->
+ Dir = filename:dirname(File),
+ FileName = filename:basename(File),
+ case os:type() of
+ {win32,_} ->
+ %% temporary (hopefully) fix for win32 OTP-3627
+ is_win32_executable(Dir,FileName);
+ _ ->
+ is_executable(Dir, FileName)
+ end.
+
+is_executable(Dir, FilName) ->
+ case os:find_executable(FilName, Dir) of
+ false ->
+ false;
+ _ ->
+ true
+ end.
+
+%% Start temporary (hopefully) fix for win32 OTP-3627
+%% ---------------------------------
+is_win32_executable(Dir, FileName) ->
+ NewFileName = strip_extention(FileName, [".bat",".exe",".com", ".cmd"]),
+ is_executable(Dir, NewFileName).
+
+strip_extention(FileName, []) ->
+ FileName;
+strip_extention(FileName, [Extention | Extentions]) ->
+ case filename:basename(FileName, Extention) of
+ FileName ->
+ strip_extention(FileName, Extentions);
+ NewFileName ->
+ NewFileName
+ end.
+
+%% End fix
+%% ---------------------------------
+
+exec_script(ModData, Script, AfterScript, RequestURI) ->
+ exec_script(is_executable(Script), ModData, Script,
+ AfterScript, RequestURI).
+
+exec_script(true, ModData, Script, AfterScript, _RequestURI) ->
+ process_flag(trap_exit,true),
+ Dir = filename:dirname(Script),
+ ScriptElements = script_elements(ModData, AfterScript),
+ Env = (catch httpd_script_env:create_env(cgi, ModData, ScriptElements)),
+
+ %% Run script
+ Port = (catch open_port({spawn, Script},[binary, stream,
+ {cd, Dir}, {env, Env}])),
+ case Port of
+ Port when is_port(Port) ->
+ send_request_body_to_script(ModData, Port),
+ deliver_webpage(ModData, Port); % Take care of script output
+ Error ->
+ exit({open_port_failed, Error,
+ [{mod,?MODULE},
+ {uri,ModData#mod.request_uri}, {script,Script},
+ {env,Env},{dir,Dir}]})
+ end;
+
+exec_script(false, ModData, _Script, _AfterScript, _RequestURI) ->
+ {proceed,
+ [{status,
+ {404,ModData#mod.request_uri,
+ ?NICE("You don't have permission to execute " ++
+ ModData#mod.request_uri ++ " on this server")}}|
+ ModData#mod.data]}.
+
+send_request_body_to_script(ModData, Port) ->
+ case ModData#mod.entity_body of
+ [] ->
+ ok;
+ EntityBody ->
+ port_command(Port, EntityBody)
+ end.
+
+deliver_webpage(#mod{config_db = Db} = ModData, Port) ->
+ Timeout = cgi_timeout(Db),
+ case receive_headers(Port, httpd_cgi, parse_headers,
+ [<<>>, [], []], Timeout) of
+ {Headers, Body} ->
+ case httpd_cgi:handle_headers(Headers) of
+ {proceed, AbsPath} ->
+ {proceed, [{real_name,
+ httpd_util:split_path(AbsPath)} |
+ ModData#mod.data]};
+ {ok, HTTPHeaders, Status} ->
+ IsDisableChunkedSend =
+ httpd_response:is_disable_chunked_send(Db),
+ case (ModData#mod.http_version =/= "HTTP/1.1") or
+ (IsDisableChunkedSend) of
+ true ->
+ send_headers(ModData, Status,
+ [{"connection", "close"}
+ | HTTPHeaders]);
+ false ->
+ send_headers(ModData, Status,
+ [{"transfer-encoding",
+ "chunked"} | HTTPHeaders])
+ end,
+ handle_body(Port, ModData, Body, Timeout, size(Body),
+ IsDisableChunkedSend)
+ end;
+ {'EXIT', Port, Reason} ->
+ process_flag(trap_exit, false),
+ {proceed, [{status, {400, none, reason(Reason)}} |
+ ModData#mod.data]};
+ timeout ->
+ (catch port_close(Port)), % KILL the port !!!!
+ send_headers(ModData, {504, "Timeout"}, []),
+ httpd_socket:close(ModData#mod.socket_type, ModData#mod.socket),
+ process_flag(trap_exit,false),
+ {proceed,[{response, {already_sent, 200, 0}} | ModData#mod.data]}
+ end.
+
+receive_headers(Port, Module, Function, Args, Timeout) ->
+ receive
+ {Port, {data, Response}} when is_port(Port) ->
+ case Module:Function([Response | Args]) of
+ {NewModule, NewFunction, NewArgs} ->
+ receive_headers(Port, NewModule,
+ NewFunction, NewArgs, Timeout);
+ {ok, {Headers, Body}} ->
+ {Headers, Body}
+ end;
+ {'EXIT', Port, Reason} when is_port(Port) ->
+ {'EXIT', Port, Reason};
+ {'EXIT', Pid, Reason} when is_pid(Pid) ->
+ exit({linked_process_died, Pid, Reason})
+ after Timeout ->
+ timeout
+ end.
+
+send_headers(ModData, {StatusCode, _}, HTTPHeaders) ->
+ ExtraHeaders = httpd_response:cache_headers(ModData),
+ httpd_response:send_header(ModData, StatusCode,
+ ExtraHeaders ++ HTTPHeaders).
+
+handle_body(Port, #mod{method = "HEAD"} = ModData, _, _, Size, _) ->
+ (catch port_close(Port)), % KILL the port !!!!
+ process_flag(trap_exit,false),
+ {proceed, [{response, {already_sent, 200, Size}} | ModData#mod.data]};
+
+handle_body(Port, ModData, Body, Timeout, Size, IsDisableChunkedSend) ->
+ httpd_response:send_chunk(ModData, Body, IsDisableChunkedSend),
+ receive
+ {Port, {data, Data}} when port(Port) ->
+ handle_body(Port, ModData, Data, Timeout, Size + size(Data),
+ IsDisableChunkedSend);
+ {'EXIT', Port, normal} when is_port(Port) ->
+ httpd_response:send_final_chunk(ModData, IsDisableChunkedSend),
+ process_flag(trap_exit,false),
+ {proceed, [{response, {already_sent, 200, Size}} |
+ ModData#mod.data]};
+ {'EXIT', Port, Reason} when is_port(Port) ->
+ process_flag(trap_exit, false),
+ {proceed, [{status, {400, none, reason(Reason)}} |
+ ModData#mod.data]};
+ {'EXIT', Pid, Reason} when is_pid(Pid) ->
+ exit({mod_cgi_linked_process_died, Pid, Reason})
+ after Timeout ->
+ (catch port_close(Port)), % KILL the port !!!!
+ process_flag(trap_exit,false),
+ {proceed,[{response, {already_sent, 200, Size}} |
+ ModData#mod.data]}
+ end.
+
+script_elements(#mod{method = "GET"}, {[], QueryString}) ->
+ [{query_string, QueryString}];
+script_elements(#mod{method = "GET"}, {PathInfo, []}) ->
+ [{path_info, PathInfo}];
+script_elements(#mod{method = "GET"}, {PathInfo, QueryString}) ->
+ [{query_string, QueryString}, {path_info, PathInfo}];
+script_elements(#mod{method = "POST", entity_body = Body}, _) ->
+ [{entity_body, Body}];
+script_elements(_, _) ->
+ [].
+
+cgi_timeout(Db) ->
+ httpd_util:lookup(Db, cgi_timeout, ?DEFAULT_CGI_TIMEOUT).
+
+%% Convert error to printable string
+%%
+reason({error,emfile}) -> ": To many open files";
+reason({error,{enfile,_}}) -> ": File/port table overflow";
+reason({error,enomem}) -> ": Not enough memory";
+reason({error,eagain}) -> ": No more available OS processes";
+reason(Reason) -> lists:flatten(io_lib:format("Reason: ~p~n", [Reason])).
diff --git a/src/couch_inets/mod_dir.erl b/src/couch_inets/mod_dir.erl
new file mode 100644
index 00000000..7ec3306d
--- /dev/null
+++ b/src/couch_inets/mod_dir.erl
@@ -0,0 +1,281 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_dir).
+-export([do/1]).
+
+-include("httpd.hrl").
+
+%% do
+
+do(Info) ->
+ ?DEBUG("do -> entry",[]),
+ case Info#mod.method of
+ "GET" ->
+ case httpd_util:key1search(Info#mod.data,status) of
+ %% A status code has been generated!
+ {_StatusCode, _PhraseArgs, _Reason} ->
+ {proceed,Info#mod.data};
+ %% No status code has been generated!
+ undefined ->
+ case httpd_util:key1search(Info#mod.data,response) of
+ %% No response has been generated!
+ undefined ->
+ do_dir(Info);
+ %% A response has been generated or sent!
+ _Response ->
+ {proceed,Info#mod.data}
+ end
+ end;
+ %% Not a GET method!
+ _ ->
+ {proceed,Info#mod.data}
+ end.
+
+do_dir(Info) ->
+ ?DEBUG("do_dir -> Request URI: ~p",[Info#mod.request_uri]),
+ Path = mod_alias:path(Info#mod.data,Info#mod.config_db,
+ Info#mod.request_uri),
+ DefaultPath = mod_alias:default_index(Info#mod.config_db,Path),
+ %% Is it a directory?
+ case file:read_file_info(DefaultPath) of
+ {ok,FileInfo} when FileInfo#file_info.type == directory ->
+ DecodedRequestURI =
+ httpd_util:decode_hex(Info#mod.request_uri),
+ ?DEBUG("do_dir -> ~n"
+ " Path: ~p~n"
+ " DefaultPath: ~p~n"
+ " DecodedRequestURI: ~p",
+ [Path,DefaultPath,DecodedRequestURI]),
+ case dir(DefaultPath,string:strip(DecodedRequestURI,right,$/),
+ Info#mod.config_db) of
+ {ok, Dir} ->
+ LastModified =
+ case (catch httpd_util:rfc1123_date(
+ FileInfo#file_info.mtime)) of
+ Date when is_list(Date) ->
+ [{"date", Date}];
+ _ -> %% This will rarly happen, but could happen
+ %% if a computer is wrongly configured.
+ []
+ end,
+ Head=[{content_type,"text/html"},
+ {content_length,
+ integer_to_list(httpd_util:flatlength(Dir))},
+ {code,200} | LastModified],
+ {proceed,[{response,{response, Head, Dir}},
+ {mime_type,"text/html"} | Info#mod.data]};
+ {error, Reason} ->
+ ?ERROR("do_dir -> dir operation failed: ~p",[Reason]),
+ {proceed,
+ [{status,{404,Info#mod.request_uri,Reason}}|
+ Info#mod.data]}
+ end;
+ {ok, _FileInfo} ->
+ ?DEBUG("do_dir -> ~n"
+ " Path: ~p~n"
+ " DefaultPath: ~p~n"
+ " FileInfo: ~p",
+ [Path,DefaultPath,FileInfo]),
+ {proceed,Info#mod.data};
+ {error,Reason} ->
+ ?LOG("do_dir -> failed reading file info (~p) for: ~p",
+ [Reason,DefaultPath]),
+ Status = httpd_file:handle_error(Reason, "access", Info,
+ DefaultPath),
+ {proceed, [{status, Status}| Info#mod.data]}
+ end.
+
+dir(Path,RequestURI,ConfigDB) ->
+ case file:list_dir(Path) of
+ {ok,FileList} ->
+ SortedFileList=lists:sort(FileList),
+ {ok,[header(Path,RequestURI),
+ body(Path,RequestURI,ConfigDB,SortedFileList),
+ footer(Path,SortedFileList)]};
+ {error,Reason} ->
+ {error,?NICE("Can't open directory "++Path++": "++Reason)}
+ end.
+
+%% header
+
+header(Path,RequestURI) ->
+ Header = "<HTML>\n<HEAD>\n<TITLE>Index of "++ RequestURI ++
+ "</TITLE>\n</HEAD>\n<BODY>\n<H1>Index of "++
+ RequestURI ++ "</H1>\n<PRE><IMG SRC=\"" ++ icon(blank) ++
+ "\" ALT=" "> Name Last modified "
+ "Size Description <HR>\n",
+ case regexp:sub(RequestURI,"[^/]*\$","") of
+ {ok,"/",_} ->
+ Header;
+ {ok,ParentRequestURI,_} ->
+ {ok,ParentPath,_} =
+ regexp:sub(string:strip(Path,right,$/),"[^/]*\$",""),
+ Header++format(ParentPath,ParentRequestURI)
+ end.
+
+format(Path,RequestURI) ->
+ {ok,FileInfo}=file:read_file_info(Path),
+ {{Year, Month, Day},{Hour, Minute, _Second}} = FileInfo#file_info.mtime,
+ io_lib:format("<IMG SRC=\"~s\" ALT=\"[~s]\">"
+ " <A HREF=\"~s\">Parent directory</A> "
+ " ~2.2.0w-~s-~w ~2.2.0w:~2.2.0w -\n",
+ [icon(back),"DIR",RequestURI,Day,
+ httpd_util:month(Month),Year,Hour,Minute]).
+
+%% body
+
+body(_Path, _RequestURI, _ConfigDB, []) ->
+ [];
+body(Path, RequestURI, ConfigDB, [Entry | Rest]) ->
+ [format(Path, RequestURI, ConfigDB, Entry)|
+ body(Path, RequestURI, ConfigDB, Rest)].
+
+format(Path,RequestURI,ConfigDB,Entry) ->
+ case file:read_file_info(Path++"/"++Entry) of
+ {ok,FileInfo} when FileInfo#file_info.type == directory ->
+ {{Year, Month, Day},{Hour, Minute, _Second}} =
+ FileInfo#file_info.mtime,
+ EntryLength=length(Entry),
+ if
+ EntryLength > 21 ->
+ io_lib:format("<IMG SRC=\"~s\" ALT=\"[~s]\"> "
+ "<A HREF=\"~s\">~-21.s..</A>"
+ "~2.2.0w-~s-~w ~2.2.0w:~2.2.0w"
+ " -\n", [icon(folder),"DIR",
+ RequestURI++"/"++Entry++"/",
+ Entry,
+ Day, httpd_util:month(Month),
+ Year,Hour,Minute]);
+ true ->
+ io_lib:format("<IMG SRC=\"~s\" ALT=\"[~s]\">"
+ " <A HREF=\"~s\">~s</A>~*.*c~2.2.0"
+ "w-~s-~w ~2.2.0w:~2.2.0w -\n",
+ [icon(folder),"DIR",RequestURI ++ "/" ++
+ Entry ++ "/",Entry,
+ 23-EntryLength,23-EntryLength,$ ,Day,
+ httpd_util:month(Month),Year,Hour,Minute])
+ end;
+ {ok,FileInfo} ->
+ {{Year, Month, Day},{Hour, Minute,_Second}} =
+ FileInfo#file_info.mtime,
+ Suffix=httpd_util:suffix(Entry),
+ MimeType=httpd_util:lookup_mime(ConfigDB,Suffix,""),
+ EntryLength=length(Entry),
+ if
+ EntryLength > 21 ->
+ io_lib:format("<IMG SRC=\"~s\" ALT=\"[~s]\">"
+ " <A HREF=\"~s\">~-21.s..</A>~2.2.0"
+ "w-~s-~w ~2.2.0w:~2.2.0w~8wk ~s\n",
+ [icon(Suffix, MimeType), Suffix, RequestURI
+ ++"/"++Entry, Entry,Day,
+ httpd_util:month(Month),Year,Hour,Minute,
+ trunc(FileInfo#file_info.size/1024+1),
+ MimeType]);
+ true ->
+ io_lib:format("<IMG SRC=\"~s\" ALT=\"[~s]\"> "
+ "<A HREF=\"~s\">~s</A>~*.*c~2.2.0w-~s-~w"
+ " ~2.2.0w:~2.2.0w~8wk ~s\n",
+ [icon(Suffix, MimeType), Suffix, RequestURI
+ ++ "/" ++ Entry, Entry, 23-EntryLength,
+ 23-EntryLength, $ ,Day,
+ httpd_util:month(Month),Year,Hour,Minute,
+ trunc(FileInfo#file_info.size/1024+1),
+ MimeType])
+ end;
+ {error, _Reason} ->
+ ""
+ end.
+
+%% footer
+
+footer(Path,FileList) ->
+ case lists:member("README",FileList) of
+ true ->
+ {ok,Body}=file:read_file(Path++"/README"),
+ "</PRE>\n<HR>\n<PRE>\n"++binary_to_list(Body)++
+ "\n</PRE>\n</BODY>\n</HTML>\n";
+ false ->
+ "</PRE>\n</BODY>\n</HTML>\n"
+ end.
+
+%%
+%% Icon mappings are hard-wired ala default Apache (Ugly!)
+%%
+
+icon(Suffix,MimeType) ->
+ case icon(Suffix) of
+ undefined ->
+ case MimeType of
+ [$t,$e,$x,$t,$/|_] ->
+ "/icons/text.gif";
+ [$i,$m,$a,$g,$e,$/|_] ->
+ "/icons/image2.gif";
+ [$a,$u,$d,$i,$o,$/|_] ->
+ "/icons/sound2.gif";
+ [$v,$i,$d,$e,$o,$/|_] ->
+ "/icons/movie.gif";
+ _ ->
+ "/icons/unknown.gif"
+ end;
+ Icon ->
+ Icon
+ end.
+
+icon(blank) -> "/icons/blank.gif";
+icon(back) -> "/icons/back.gif";
+icon(folder) -> "/icons/folder.gif";
+icon("bin") -> "/icons/binary.gif";
+icon("exe") -> "/icons/binary.gif";
+icon("hqx") -> "/icons/binhex.gif";
+icon("tar") -> "/icons/tar.gif";
+icon("wrl") -> "/icons/world2.gif";
+icon("wrl.gz") -> "/icons/world2.gif";
+icon("vrml") -> "/icons/world2.gif";
+icon("vrm") -> "/icons/world2.gif";
+icon("iv") -> "/icons/world2.gif";
+icon("Z") -> "/icons/compressed.gif";
+icon("z") -> "/icons/compressed.gif";
+icon("tgz") -> "/icons/compressed.gif";
+icon("gz") -> "/icons/compressed.gif";
+icon("zip") -> "/icons/compressed.gif";
+icon("ps") -> "/icons/a.gif";
+icon("ai") -> "/icons/a.gif";
+icon("eps") -> "/icons/a.gif";
+icon("html") -> "/icons/layout.gif";
+icon("shtml") -> "/icons/layout.gif";
+icon("htm") -> "/icons/layout.gif";
+icon("pdf") -> "/icons/layout.gif";
+icon("txt") -> "/icons/text.gif";
+icon("erl") -> "/icons/burst.gif";
+icon("c") -> "/icons/c.gif";
+icon("pl") -> "/icons/p.gif";
+icon("py") -> "/icons/p.gif";
+icon("for") -> "/icons/f.gif";
+icon("dvi") -> "/icons/dvi.gif";
+icon("uu") -> "/icons/uuencoded.gif";
+icon("conf") -> "/icons/script.gif";
+icon("sh") -> "/icons/script.gif";
+icon("shar") -> "/icons/script.gif";
+icon("csh") -> "/icons/script.gif";
+icon("ksh") -> "/icons/script.gif";
+icon("tcl") -> "/icons/script.gif";
+icon("tex") -> "/icons/tex.gif";
+icon("core") -> "/icons/tex.gif";
+icon(_) -> undefined.
+
+
diff --git a/src/couch_inets/mod_disk_log.erl b/src/couch_inets/mod_disk_log.erl
new file mode 100644
index 00000000..3c29b9e1
--- /dev/null
+++ b/src/couch_inets/mod_disk_log.erl
@@ -0,0 +1,396 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_disk_log).
+-export([do/1,error_log/5,security_log/2,load/2,store/2,remove/1]).
+
+-export([report_error/2]).
+
+-define(VMODULE,"DISK_LOG").
+
+-include("httpd.hrl").
+
+%% do
+
+do(Info) ->
+ AuthUser = auth_user(Info#mod.data),
+ Date = custom_date(),
+ log_internal_info(Info,Date,Info#mod.data),
+ LogFormat = get_log_format(Info#mod.config_db),
+ case httpd_util:key1search(Info#mod.data,status) of
+ %% A status code has been generated!
+ {StatusCode, _PhraseArgs, Reason} ->
+ transfer_log(Info, "-", AuthUser, Date, StatusCode, 0, LogFormat),
+ if
+ StatusCode >= 400 ->
+ error_log(Info, Date, Reason, LogFormat);
+ true ->
+ not_an_error
+ end,
+ {proceed,Info#mod.data};
+ %% No status code has been generated!
+ undefined ->
+ case httpd_util:key1search(Info#mod.data,response) of
+ {already_sent,StatusCode,Size} ->
+ transfer_log(Info, "-", AuthUser, Date, StatusCode,
+ Size, LogFormat),
+ {proceed,Info#mod.data};
+
+ {response, Head, _Body} ->
+ Size = httpd_util:key1search(Head, content_length, 0),
+ Code = httpd_util:key1search(Head, code, 200),
+ transfer_log(Info, "-", AuthUser, Date, Code,
+ Size, LogFormat),
+ {proceed,Info#mod.data};
+
+ {_StatusCode, Response} ->
+ transfer_log(Info, "-", AuthUser, Date, 200,
+ httpd_util:flatlength(Response), LogFormat),
+ {proceed,Info#mod.data};
+ undefined ->
+ transfer_log(Info, "-", AuthUser, Date, 200,
+ 0, LogFormat),
+ {proceed,Info#mod.data}
+ end
+ end.
+
+custom_date() ->
+ LocalTime = calendar:local_time(),
+ UniversalTime = calendar:universal_time(),
+ Minutes = round(diff_in_minutes(LocalTime,UniversalTime)),
+ {{YYYY,MM,DD},{Hour,Min,Sec}} = LocalTime,
+ Date =
+ io_lib:format("~.2.0w/~.3s/~.4w:~.2.0w:~.2.0w:~.2.0w ~c~.2.0w~.2.0w",
+ [DD,httpd_util:month(MM),YYYY,Hour,Min,Sec,sign(Minutes),
+ abs(Minutes) div 60,abs(Minutes) rem 60]),
+ lists:flatten(Date).
+
+diff_in_minutes(L,U) ->
+ (calendar:datetime_to_gregorian_seconds(L) -
+ calendar:datetime_to_gregorian_seconds(U))/60.
+
+sign(Minutes) when Minutes > 0 ->
+ $+;
+sign(_Minutes) ->
+ $-.
+
+auth_user(Data) ->
+ case httpd_util:key1search(Data,remote_user) of
+ undefined ->
+ "-";
+ RemoteUser ->
+ RemoteUser
+ end.
+
+%% log_internal_info
+
+log_internal_info(_, _,[]) ->
+ ok;
+log_internal_info(Info,Date,[{internal_info,Reason}|Rest]) ->
+ Format = get_log_format(Info#mod.config_db),
+ error_log(Info,Date,Reason,Format),
+ log_internal_info(Info,Date,Rest);
+log_internal_info(Info,Date,[_|Rest]) ->
+ log_internal_info(Info,Date,Rest).
+
+
+%% transfer_log
+
+transfer_log(Info,RFC931,AuthUser,Date,StatusCode,Bytes,Format) ->
+ case httpd_util:lookup(Info#mod.config_db,transfer_disk_log) of
+ undefined ->
+ no_transfer_log;
+ TransferDiskLog ->
+ {_PortNumber, RemoteHost}=(Info#mod.init_data)#init_data.peername,
+ Entry = io_lib:format("~s ~s ~s [~s] \"~s\" ~w ~w~n",
+ [RemoteHost, RFC931, AuthUser, Date,
+ Info#mod.request_line, StatusCode, Bytes]),
+ write(TransferDiskLog, Entry, Format)
+ end.
+
+
+%% error_log
+
+error_log(Info, Date, Reason, Format) ->
+ Format=get_log_format(Info#mod.config_db),
+ case httpd_util:lookup(Info#mod.config_db,error_disk_log) of
+ undefined ->
+ no_error_log;
+ ErrorDiskLog ->
+ {_PortNumber, RemoteHost}=(Info#mod.init_data)#init_data.peername,
+ Entry =
+ io_lib:format("[~s] access to ~s failed for ~s, reason: ~p~n",
+ [Date, Info#mod.request_uri,
+ RemoteHost, Reason]),
+ write(ErrorDiskLog, Entry, Format)
+ end.
+
+error_log(_SocketType, _Socket, ConfigDB, {_PortNumber, RemoteHost}, Reason) ->
+ Format = get_log_format(ConfigDB),
+ case httpd_util:lookup(ConfigDB,error_disk_log) of
+ undefined ->
+ no_error_log;
+ ErrorDiskLog ->
+ Date = custom_date(),
+ Entry =
+ io_lib:format("[~s] server crash for ~s, reason: ~p~n",
+ [Date,RemoteHost,Reason]),
+ write(ErrorDiskLog, Entry, Format),
+ ok
+ end.
+
+
+%% security_log
+
+security_log(ConfigDB, Event) ->
+ Format = get_log_format(ConfigDB),
+ case httpd_util:lookup(ConfigDB,security_disk_log) of
+ undefined ->
+ no_error_log;
+ DiskLog ->
+ Date = custom_date(),
+ Entry = io_lib:format("[~s] ~s ~n", [Date, Event]),
+ write(DiskLog, Entry, Format),
+ ok
+ end.
+
+report_error(ConfigDB, Error) ->
+ Format = get_log_format(ConfigDB),
+ case httpd_util:lookup(ConfigDB, error_disk_log) of
+ undefined ->
+ no_error_log;
+ ErrorDiskLog ->
+ Date = custom_date(),
+ Entry = io_lib:format("[~s] reporting error: ~s",[Date,Error]),
+ write(ErrorDiskLog, Entry, Format),
+ ok
+ end.
+
+%%----------------------------------------------------------------------
+%% Get the current format of the disklog
+%%----------------------------------------------------------------------
+get_log_format(ConfigDB)->
+ httpd_util:lookup(ConfigDB,disk_log_format,external).
+
+
+%%
+%% Configuration
+%%
+
+%% load
+
+load([$T,$r,$a,$n,$s,$f,$e,$r,$D,$i,$s,$k,$L,$o,$g,$S,$i,$z,$e,$ |
+ TransferDiskLogSize],[]) ->
+ case regexp:split(TransferDiskLogSize," ") of
+ {ok,[MaxBytes,MaxFiles]} ->
+ case httpd_conf:make_integer(MaxBytes) of
+ {ok,MaxBytesInteger} ->
+ case httpd_conf:make_integer(MaxFiles) of
+ {ok,MaxFilesInteger} ->
+ {ok,[],{transfer_disk_log_size,
+ {MaxBytesInteger,MaxFilesInteger}}};
+ {error,_} ->
+ {error,
+ ?NICE(httpd_conf:clean(TransferDiskLogSize)++
+ " is an invalid TransferDiskLogSize")}
+ end;
+ {error,_} ->
+ {error,?NICE(httpd_conf:clean(TransferDiskLogSize)++
+ " is an invalid TransferDiskLogSize")}
+ end
+ end;
+load([$T,$r,$a,$n,$s,$f,$e,$r,$D,$i,$s,$k,$L,$o,$g,$ |TransferDiskLog],[]) ->
+ {ok,[],{transfer_disk_log,httpd_conf:clean(TransferDiskLog)}};
+
+load([$E,$r,$r,$o,$r,$D,$i,$s,$k,$L,$o,$g,$S,$i,$z,$e,$ | ErrorDiskLogSize],[]) ->
+ case regexp:split(ErrorDiskLogSize," ") of
+ {ok,[MaxBytes,MaxFiles]} ->
+ case httpd_conf:make_integer(MaxBytes) of
+ {ok,MaxBytesInteger} ->
+ case httpd_conf:make_integer(MaxFiles) of
+ {ok,MaxFilesInteger} ->
+ {ok,[],{error_disk_log_size,
+ {MaxBytesInteger,MaxFilesInteger}}};
+ {error,_} ->
+ {error,?NICE(httpd_conf:clean(ErrorDiskLogSize)++
+ " is an invalid ErrorDiskLogSize")}
+ end;
+ {error,_} ->
+ {error,?NICE(httpd_conf:clean(ErrorDiskLogSize)++
+ " is an invalid ErrorDiskLogSize")}
+ end
+ end;
+load([$E,$r,$r,$o,$r,$D,$i,$s,$k,$L,$o,$g,$ |ErrorDiskLog],[]) ->
+ {ok, [], {error_disk_log, httpd_conf:clean(ErrorDiskLog)}};
+
+load([$S,$e,$c,$u,$r,$i,$t,$y,$D,$i,$s,$k,$L,$o,$g,$S,$i,$z,$e,$ |SecurityDiskLogSize],[]) ->
+ case regexp:split(SecurityDiskLogSize, " ") of
+ {ok, [MaxBytes, MaxFiles]} ->
+ case httpd_conf:make_integer(MaxBytes) of
+ {ok, MaxBytesInteger} ->
+ case httpd_conf:make_integer(MaxFiles) of
+ {ok, MaxFilesInteger} ->
+ {ok, [], {security_disk_log_size,
+ {MaxBytesInteger, MaxFilesInteger}}};
+ {error,_} ->
+ {error, ?NICE(httpd_conf:clean(SecurityDiskLogSize)++
+ " is an invalid SecurityDiskLogSize")}
+ end;
+ {error, _} ->
+ {error, ?NICE(httpd_conf:clean(SecurityDiskLogSize)++
+ " is an invalid SecurityDiskLogSize")}
+ end
+ end;
+load([$S,$e,$c,$u,$r,$i,$t,$y,$D,$i,$s,$k,$L,$o,$g,$ |SecurityDiskLog],[]) ->
+ {ok, [], {security_disk_log, httpd_conf:clean(SecurityDiskLog)}};
+
+load([$D,$i,$s,$k,$L,$o,$g,$F,$o,$r,$m,$a,$t,$ |Format],[]) ->
+ case httpd_conf:clean(Format) of
+ "internal" ->
+ {ok, [], {disk_log_format,internal}};
+ "external" ->
+ {ok, [], {disk_log_format,external}};
+ _Default ->
+ {ok, [], {disk_log_format,external}}
+ end.
+
+%% store
+
+store({transfer_disk_log,TransferDiskLog},ConfigList) ->
+ case create_disk_log(TransferDiskLog, transfer_disk_log_size, ConfigList) of
+ {ok,TransferDB} ->
+ {ok,{transfer_disk_log,TransferDB}};
+ {error,Reason} ->
+ {error,Reason}
+ end;
+store({security_disk_log,SecurityDiskLog},ConfigList) ->
+ case create_disk_log(SecurityDiskLog, security_disk_log_size, ConfigList) of
+ {ok,SecurityDB} ->
+ {ok,{security_disk_log,SecurityDB}};
+ {error,Reason} ->
+ {error,Reason}
+ end;
+store({error_disk_log,ErrorDiskLog},ConfigList) ->
+ case create_disk_log(ErrorDiskLog, error_disk_log_size, ConfigList) of
+ {ok,ErrorDB} ->
+ {ok,{error_disk_log,ErrorDB}};
+ {error,Reason} ->
+ {error,Reason}
+ end.
+
+
+%%----------------------------------------------------------------------
+%% Open or creates the disklogs
+%%----------------------------------------------------------------------
+log_size(ConfigList, Tag) ->
+ httpd_util:key1search(ConfigList, Tag, {500*1024,8}).
+
+create_disk_log(LogFile, SizeTag, ConfigList) ->
+ Filename = httpd_conf:clean(LogFile),
+ {MaxBytes, MaxFiles} = log_size(ConfigList, SizeTag),
+ case filename:pathtype(Filename) of
+ absolute ->
+ create_disk_log(Filename, MaxBytes, MaxFiles, ConfigList);
+ volumerelative ->
+ create_disk_log(Filename, MaxBytes, MaxFiles, ConfigList);
+ relative ->
+ case httpd_util:key1search(ConfigList,server_root) of
+ undefined ->
+ {error,
+ ?NICE(Filename++
+ " is an invalid ErrorLog beacuse ServerRoot is not defined")};
+ ServerRoot ->
+ AbsoluteFilename = filename:join(ServerRoot,Filename),
+ create_disk_log(AbsoluteFilename, MaxBytes, MaxFiles,
+ ConfigList)
+ end
+ end.
+
+create_disk_log(Filename, MaxBytes, MaxFiles, ConfigList) ->
+ Format = httpd_util:key1search(ConfigList, disk_log_format, external),
+ open(Filename, MaxBytes, MaxFiles, Format).
+
+
+
+%% remove
+remove(ConfigDB) ->
+ lists:foreach(fun([DiskLog]) -> close(DiskLog) end,
+ ets:match(ConfigDB,{transfer_disk_log,'$1'})),
+ lists:foreach(fun([DiskLog]) -> close(DiskLog) end,
+ ets:match(ConfigDB,{error_disk_log,'$1'})),
+ ok.
+
+
+%%
+%% Some disk_log wrapper functions:
+%%
+
+%%----------------------------------------------------------------------
+%% Function: open/4
+%% Description: Open a disk log file.
+%% Control which format the disk log will be in. The external file
+%% format is used as default since that format was used by older
+%% implementations of inets.
+%%
+%% When the internal disk log format is used, we will do some extra
+%% controls. If the files are valid, try to repair them and if
+%% thats not possible, truncate.
+%%----------------------------------------------------------------------
+
+open(Filename, MaxBytes, MaxFiles, internal) ->
+ Opts = [{format, internal}, {repair, truncate}],
+ open1(Filename, MaxBytes, MaxFiles, Opts);
+open(Filename, MaxBytes, MaxFiles, _) ->
+ Opts = [{format, external}],
+ open1(Filename, MaxBytes, MaxFiles, Opts).
+
+open1(Filename, MaxBytes, MaxFiles, Opts0) ->
+ Opts1 = [{name, Filename}, {file, Filename}, {type, wrap}] ++ Opts0,
+ case open2(Opts1, {MaxBytes, MaxFiles}) of
+ {ok, LogDB} ->
+ {ok, LogDB};
+ {error, Reason} ->
+ {error,
+ ?NICE("Can't create " ++ Filename ++
+ lists:flatten(io_lib:format(", ~p",[Reason])))};
+ _ ->
+ {error, ?NICE("Can't create "++Filename)}
+ end.
+
+open2(Opts, Size) ->
+ case disk_log:open(Opts) of
+ {error, {badarg, size}} ->
+ %% File did not exist, add the size option and try again
+ disk_log:open([{size, Size} | Opts]);
+ Else ->
+ Else
+ end.
+
+
+%%----------------------------------------------------------------------
+%% Actually writes the entry to the disk_log. If the log is an
+%% internal disk_log write it with log otherwise with blog.
+%%----------------------------------------------------------------------
+write(Log, Entry, internal) ->
+ disk_log:log(Log, Entry);
+
+write(Log, Entry, _) ->
+ disk_log:blog(Log, Entry).
+
+%% Close the log file
+close(Log) ->
+ disk_log:close(Log).
diff --git a/src/couch_inets/mod_esi.erl b/src/couch_inets/mod_esi.erl
new file mode 100644
index 00000000..f0e8ae80
--- /dev/null
+++ b/src/couch_inets/mod_esi.erl
@@ -0,0 +1,432 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_esi).
+
+%% API
+%% Functions provided to help erl scheme alias programmer to
+%% Create dynamic webpages that are sent back to the user during
+%% Generation
+-export([deliver/2]).
+
+%% Callback API
+-export([do/1, load/2]).
+
+-include("httpd.hrl").
+
+-define(VMODULE,"ESI").
+-define(DEFAULT_ERL_TIMEOUT,15000).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+%%--------------------------------------------------------------------------
+%% deliver(SessionID, Data) -> ok | {error, bad_sessionID}
+%% SessionID = pid()
+%% Data = string() | io_list() (first call must send a string that
+%% contains all header information including "\r\n\r\n", unless there
+%% is no header information at all.)
+%%
+%% Description: Send <Data> (Html page generated sofar) to the server
+%% request handling process so it can forward it to the client.
+%%-------------------------------------------------------------------------
+deliver(SessionID, Data) when pid(SessionID) ->
+ SessionID ! {ok, Data},
+ ok;
+deliver(_SessionID, _Data) ->
+ {error, bad_sessionID}.
+
+%%%=========================================================================
+%%% CALLBACK API
+%%%=========================================================================
+%%--------------------------------------------------------------------------
+%% do(ModData) -> {proceed, OldData} | {proceed, NewData} | {break, NewData}
+%% | done
+%% ModData = #mod{}
+%%
+%% Description: See httpd(3) ESWAPI CALLBACK FUNCTIONS
+%%-------------------------------------------------------------------------
+do(ModData) ->
+ case httpd_util:key1search(ModData#mod.data, status) of
+ {_StatusCode, _PhraseArgs, _Reason} ->
+ {proceed, ModData#mod.data};
+ undefined ->
+ case httpd_util:key1search(ModData#mod.data, response) of
+ undefined ->
+ generate_response(ModData);
+ _Response ->
+ {proceed, ModData#mod.data}
+ end
+ end.
+%%--------------------------------------------------------------------------
+%% load(Line, Context) -> eof | ok | {ok, NewContext} |
+%% {ok, NewContext, Directive} |
+%% {ok, NewContext, DirectiveList} | {error, Reason}
+%% Line = string()
+%% Context = NewContext = DirectiveList = [Directive]
+%% Directive = {DirectiveKey , DirectiveValue}
+%% DirectiveKey = DirectiveValue = term()
+%% Reason = term()
+%%
+%% Description: See httpd(3) ESWAPI CALLBACK FUNCTIONS
+%%-------------------------------------------------------------------------
+load("ErlScriptAlias " ++ ErlScriptAlias, []) ->
+ case regexp:split(ErlScriptAlias," ") of
+ {ok, [ErlName | Modules]} ->
+ {ok, [], {erl_script_alias, {ErlName,Modules}}};
+ {ok, _} ->
+ {error, ?NICE(httpd_conf:clean(ErlScriptAlias) ++
+ " is an invalid ErlScriptAlias")}
+ end;
+load("EvalScriptAlias " ++ EvalScriptAlias, []) ->
+ case regexp:split(EvalScriptAlias, " ") of
+ {ok, [EvalName|Modules]} ->
+ {ok, [], {eval_script_alias, {EvalName, Modules}}};
+ {ok, _} ->
+ {error, ?NICE(httpd_conf:clean(EvalScriptAlias) ++
+ " is an invalid EvalScriptAlias")}
+ end;
+load("ErlScriptTimeout " ++ Timeout, [])->
+ case catch list_to_integer(httpd_conf:clean(Timeout)) of
+ TimeoutSec when integer(TimeoutSec) ->
+ {ok, [], {erl_script_timeout, TimeoutSec * 1000}};
+ _ ->
+ {error, ?NICE(httpd_conf:clean(Timeout) ++
+ " is an invalid ErlScriptTimeout")}
+ end;
+load("ErlScriptNoCache " ++ CacheArg, [])->
+ case catch list_to_atom(httpd_conf:clean(CacheArg)) of
+ true ->
+ {ok, [], {erl_script_nocache, true}};
+ false ->
+ {ok, [], {erl_script_nocache, false}};
+ _ ->
+ {error, ?NICE(httpd_conf:clean(CacheArg)++
+ " is an invalid ErlScriptNoCache directive")}
+ end.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+generate_response(ModData) ->
+ case scheme(ModData#mod.request_uri, ModData#mod.config_db) of
+ {eval, ESIBody, Modules} ->
+ eval(ModData, ESIBody, Modules);
+ {erl, ESIBody, Modules} ->
+ erl(ModData, ESIBody, Modules);
+ no_scheme ->
+ {proceed, ModData#mod.data}
+ end.
+
+scheme(RequestURI, ConfigDB) ->
+ case match_script(RequestURI, ConfigDB, erl_script_alias) of
+ no_match ->
+ case match_script(RequestURI, ConfigDB, eval_script_alias) of
+ no_match ->
+ no_scheme;
+ {EsiBody, ScriptModules} ->
+ {eval, EsiBody, ScriptModules}
+ end;
+ {EsiBody, ScriptModules} ->
+ {erl, EsiBody, ScriptModules}
+ end.
+
+match_script(RequestURI, ConfigDB, AliasType) ->
+ case httpd_util:multi_lookup(ConfigDB, AliasType) of
+ [] ->
+ no_match;
+ AliasAndMods ->
+ match_esi_script(RequestURI, AliasAndMods, AliasType)
+ end.
+
+match_esi_script(_, [], _) ->
+ no_match;
+match_esi_script(RequestURI, [{Alias,Modules} | Rest], AliasType) ->
+ AliasMatchStr = alias_match_str(Alias, AliasType),
+ case regexp:first_match(RequestURI, AliasMatchStr) of
+ {match, 1, Length} ->
+ {string:substr(RequestURI, Length + 1), Modules};
+ nomatch ->
+ match_esi_script(RequestURI, Rest, AliasType)
+ end.
+
+alias_match_str(Alias, erl_script_alias) ->
+ "^" ++ Alias ++ "/";
+alias_match_str(Alias, eval_script_alias) ->
+ "^" ++ Alias ++ "\\?".
+
+
+%%------------------------ Erl mechanism --------------------------------
+
+erl(#mod{method = Method} = ModData, ESIBody, Modules)
+ when Method == "GET"; Method == "HEAD"->
+ case httpd_util:split(ESIBody,":|%3A|/",2) of
+ {ok, [Module, FuncAndInput]} ->
+ case httpd_util:split(FuncAndInput,"[\?/]",2) of
+ {ok, [FunctionName, Input]} ->
+ generate_webpage(ModData, ESIBody, Modules,
+ Module, FunctionName, Input,
+ script_elements(FunctionName, Input));
+ {ok, [FunctionName]} ->
+ generate_webpage(ModData, ESIBody, Modules,
+ Module, FunctionName, "",
+ script_elements(FunctionName, ""));
+ {ok, BadRequest} ->
+ {proceed,[{status,{400,none, BadRequest}} |
+ ModData#mod.data]}
+ end;
+ {ok, BadRequest} ->
+ {proceed, [{status,{400, none, BadRequest}} | ModData#mod.data]}
+ end;
+
+erl(#mod{method = "POST", entity_body = Body} = ModData, ESIBody, Modules) ->
+ case httpd_util:split(ESIBody,":|%3A|/",2) of
+ {ok,[Module, Function]} ->
+ generate_webpage(ModData, ESIBody, Modules, Module,
+ Function, Body, [{entity_body, Body}]);
+ {ok, BadRequest} ->
+ {proceed,[{status, {400, none, BadRequest}} | ModData#mod.data]}
+ end.
+
+generate_webpage(ModData, ESIBody, ["all"], ModuleName, FunctionName,
+ Input, ScriptElements) ->
+ generate_webpage(ModData, ESIBody, [ModuleName], ModuleName,
+ FunctionName, Input, ScriptElements);
+generate_webpage(ModData, ESIBody, Modules, ModuleName, FunctionName,
+ Input, ScriptElements) ->
+ case lists:member(ModuleName, Modules) of
+ true ->
+ Env = httpd_script_env:create_env(esi, ModData, ScriptElements),
+ Module = list_to_atom(ModuleName),
+ Function = list_to_atom(FunctionName),
+ case erl_scheme_webpage_chunk(Module, Function,
+ Env, Input, ModData) of
+ {error, erl_scheme_webpage_chunk_undefined} ->
+ erl_scheme_webpage_whole(Module, Function, Env, Input,
+ ModData);
+ ResponseResult ->
+ ResponseResult
+ end;
+ false ->
+ {proceed, [{status, {403, ModData#mod.request_uri,
+ ?NICE("Client not authorized to evaluate: "
+ ++ ESIBody)}} | ModData#mod.data]}
+ end.
+
+%% Old API that waits for the dymnamic webpage to be totally generated
+%% before anythig is sent back to the client.
+erl_scheme_webpage_whole(Module, Function, Env, Input, ModData) ->
+ case (catch Module:Function(Env, Input)) of
+ {'EXIT',Reason} ->
+ {proceed, [{status, {500, none, Reason}} |
+ ModData#mod.data]};
+ Response ->
+ {Headers, Body} =
+ httpd_esi:parse_headers(lists:flatten(Response)),
+ Length = httpd_util:flatlength(Body),
+ case httpd_esi:handle_headers(Headers) of
+ {proceed, AbsPath} ->
+ {proceed, [{real_name, httpd_util:split_path(AbsPath)}
+ | ModData#mod.data]};
+ {ok, NewHeaders, StatusCode} ->
+ send_headers(ModData, StatusCode,
+ [{"content-length",
+ integer_to_list(Length)}| NewHeaders]),
+ case ModData#mod.method of
+ "HEAD" ->
+ {proceed, [{response, {already_sent, 200, 0}} |
+ ModData#mod.data]};
+ _ ->
+ httpd_response:send_body(ModData,
+ StatusCode, Body),
+ {proceed, [{response, {already_sent, 200,
+ Length}} |
+ ModData#mod.data]}
+ end
+ end
+ end.
+
+%% New API that allows the dynamic wepage to be sent back to the client
+%% in small chunks at the time during generation.
+erl_scheme_webpage_chunk(Mod, Func, Env, Input, ModData) ->
+ process_flag(trap_exit, true),
+ Self = self(),
+ %% Spawn worker that generates the webpage.
+ %% It would be nicer to use erlang:function_exported/3 but if the
+ %% Module isn't loaded the function says that it is not loaded
+ Pid = spawn_link(
+ fun() ->
+ case catch Mod:Func(Self, Env, Input) of
+ {'EXIT',{undef,_}} ->
+ %% Will force fallback on the old API
+ exit(erl_scheme_webpage_chunk_undefined);
+ _ ->
+ ok
+ end
+ end),
+
+ Response = deliver_webpage_chunk(ModData, Pid),
+
+ process_flag(trap_exit,false),
+ Response.
+
+deliver_webpage_chunk(#mod{config_db = Db} = ModData, Pid) ->
+ Timeout = erl_script_timeout(Db),
+ deliver_webpage_chunk(ModData, Pid, Timeout).
+
+deliver_webpage_chunk(#mod{config_db = Db} = ModData, Pid, Timeout) ->
+ case receive_headers(Timeout) of
+ {error, Reason} ->
+ %% Happens when webpage generator callback/3 is undefined
+ {error, Reason};
+ {Headers, Body} ->
+ case httpd_esi:handle_headers(Headers) of
+ {proceed, AbsPath} ->
+ {proceed, [{real_name, httpd_util:split_path(AbsPath)}
+ | ModData#mod.data]};
+ {ok, NewHeaders, StatusCode} ->
+ IsDisableChunkedSend =
+ httpd_response:is_disable_chunked_send(Db),
+ case (ModData#mod.http_version =/= "HTTP/1.1") or
+ (IsDisableChunkedSend) of
+ true ->
+ send_headers(ModData, StatusCode,
+ [{"connection", "close"} |
+ NewHeaders]);
+ false ->
+ send_headers(ModData, StatusCode,
+ [{"transfer-encoding",
+ "chunked"} | NewHeaders])
+ end,
+ handle_body(Pid, ModData, Body, Timeout, length(Body),
+ IsDisableChunkedSend)
+ end;
+ timeout ->
+ send_headers(ModData, {504, "Timeout"},[{"connection", "close"}]),
+ httpd_socket:close(ModData#mod.socket_type, ModData#mod.socket),
+ process_flag(trap_exit,false),
+ {proceed,[{response, {already_sent, 200, 0}} | ModData#mod.data]}
+ end.
+
+receive_headers(Timeout) ->
+ receive
+ {ok, Chunk} ->
+ httpd_esi:parse_headers(lists:flatten(Chunk));
+ {'EXIT', Pid, erl_scheme_webpage_chunk_undefined} when is_pid(Pid) ->
+ {error, erl_scheme_webpage_chunk_undefined};
+ {'EXIT', Pid, Reason} when is_pid(Pid) ->
+ exit({mod_esi_linked_process_died, Pid, Reason})
+ after Timeout ->
+ timeout
+ end.
+
+send_headers(ModData, StatusCode, HTTPHeaders) ->
+ ExtraHeaders = httpd_response:cache_headers(ModData),
+ httpd_response:send_header(ModData, StatusCode,
+ ExtraHeaders ++ HTTPHeaders).
+
+handle_body(_, #mod{method = "HEAD"} = ModData, _, _, Size, _) ->
+ process_flag(trap_exit,false),
+ {proceed, [{response, {already_sent, 200, Size}} | ModData#mod.data]};
+
+handle_body(Pid, ModData, Body, Timeout, Size, IsDisableChunkedSend) ->
+ httpd_response:send_chunk(ModData, Body, IsDisableChunkedSend),
+ receive
+ {ok, Data} ->
+ handle_body(Pid, ModData, Data, Timeout, Size + length(Data),
+ IsDisableChunkedSend);
+ {'EXIT', Pid, normal} when is_pid(Pid) ->
+ httpd_response:send_final_chunk(ModData, IsDisableChunkedSend),
+ {proceed, [{response, {already_sent, 200, Size}} |
+ ModData#mod.data]};
+ {'EXIT', Pid, Reason} when is_pid(Pid) ->
+ exit({mod_esi_linked_process_died, Pid, Reason})
+ after Timeout ->
+ process_flag(trap_exit,false),
+ {proceed,[{response, {already_sent, 200, Size}} |
+ ModData#mod.data]}
+ end.
+
+erl_script_timeout(Db) ->
+ httpd_util:lookup(Db, erl_script_timeout, ?DEFAULT_ERL_TIMEOUT).
+
+script_elements(FuncAndInput, Input) ->
+ case input_type(FuncAndInput) of
+ path_info ->
+ [{path_info, Input}];
+ query_string ->
+ [{query_string, Input}];
+ _ ->
+ []
+ end.
+
+input_type([]) ->
+ no_input;
+input_type([$/|_Rest]) ->
+ path_info;
+input_type([$?|_Rest]) ->
+ query_string;
+input_type([_First|Rest]) ->
+ input_type(Rest).
+
+%%------------------------ Eval mechanism --------------------------------
+
+eval(#mod{request_uri = ReqUri, method = "POST",
+ http_version = Version, data = Data}, _ESIBody, _Modules) ->
+ {proceed,[{status,{501,{"POST", ReqUri, Version},
+ ?NICE("Eval mechanism doesn't support method POST")}}|
+ Data]};
+
+eval(#mod{method = Method} = ModData, ESIBody, Modules)
+ when Method == "GET"; Method == "HEAD" ->
+ case is_authorized(ESIBody, Modules) of
+ true ->
+ case generate_webpage(ESIBody) of
+ {error, Reason} ->
+ {proceed, [{status, {500, none, Reason}} |
+ ModData#mod.data]};
+ {ok, Response} ->
+ {Headers, _} =
+ httpd_esi:parse_headers(lists:flatten(Response)),
+ case httpd_esi:handle_headers(Headers) of
+ {ok, _, StatusCode} ->
+ {proceed,[{response, {StatusCode, Response}} |
+ ModData#mod.data]};
+ {proceed, AbsPath} ->
+ {proceed, [{real_name, AbsPath} |
+ ModData#mod.data]}
+ end
+ end;
+ false ->
+ {proceed,[{status,
+ {403, ModData#mod.request_uri,
+ ?NICE("Client not authorized to evaluate: "
+ ++ ESIBody)}} | ModData#mod.data]}
+ end.
+
+generate_webpage(ESIBody) ->
+ (catch lib:eval_str(string:concat(ESIBody,". "))).
+
+is_authorized(_ESIBody, ["all"]) ->
+ true;
+is_authorized(ESIBody, Modules) ->
+ case regexp:match(ESIBody, "^[^\:(%3A)]*") of
+ {match, Start, Length} ->
+ lists:member(string:substr(ESIBody, Start, Length), Modules);
+ nomatch ->
+ false
+ end.
diff --git a/src/couch_inets/mod_get.erl b/src/couch_inets/mod_get.erl
new file mode 100644
index 00000000..b3c59875
--- /dev/null
+++ b/src/couch_inets/mod_get.erl
@@ -0,0 +1,125 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_get).
+-export([do/1]).
+-include("httpd.hrl").
+
+%% do
+
+do(Info) ->
+ ?DEBUG("do -> entry",[]),
+ case Info#mod.method of
+ "GET" ->
+ case httpd_util:key1search(Info#mod.data,status) of
+ %% A status code has been generated!
+ {_StatusCode, _PhraseArgs, _Reason} ->
+ {proceed,Info#mod.data};
+ %% No status code has been generated!
+ undefined ->
+ case httpd_util:key1search(Info#mod.data,response) of
+ %% No response has been generated!
+ undefined ->
+ do_get(Info);
+ %% A response has been generated or sent!
+ _Response ->
+ {proceed,Info#mod.data}
+ end
+ end;
+ %% Not a GET method!
+ _ ->
+ {proceed,Info#mod.data}
+ end.
+
+
+do_get(Info) ->
+ ?DEBUG("do_get -> Request URI: ~p",[Info#mod.request_uri]),
+ Path = mod_alias:path(Info#mod.data, Info#mod.config_db,
+ Info#mod.request_uri),
+ {FileInfo, LastModified} = get_modification_date(Path),
+
+ send_response(Info#mod.socket,Info#mod.socket_type, Path, Info,
+ FileInfo, LastModified).
+
+
+%% The common case when no range is specified
+send_response(_Socket, _SocketType, Path, Info, FileInfo, LastModified)->
+ %% Send the file!
+ %% Find the modification date of the file
+ case file:open(Path,[raw,binary]) of
+ {ok, FileDescriptor} ->
+ ?DEBUG("do_get -> FileDescriptor: ~p",[FileDescriptor]),
+ Suffix = httpd_util:suffix(Path),
+ MimeType = httpd_util:lookup_mime_default(Info#mod.config_db,
+ Suffix,"text/plain"),
+ %% FileInfo = file:read_file_info(Path),
+ Size = integer_to_list(FileInfo#file_info.size),
+ Headers = case Info#mod.http_version of
+ "HTTP/1.1" ->
+ [{content_type, MimeType},
+ {etag, httpd_util:create_etag(FileInfo)},
+ {content_length, Size}|LastModified];
+ %% OTP-4935
+ _ ->
+ %% i.e http/1.0 and http/0.9
+ [{content_type, MimeType},
+ {content_length, Size}|LastModified]
+ end,
+ send(Info, 200, Headers, FileDescriptor),
+ file:close(FileDescriptor),
+ {proceed,[{response,{already_sent,200,
+ FileInfo#file_info.size}},
+ {mime_type,MimeType}|Info#mod.data]};
+ {error, Reason} ->
+ Status = httpd_file:handle_error(Reason, "open", Info, Path),
+ {proceed,
+ [{status, Status}| Info#mod.data]}
+ end.
+
+%% send
+
+send(#mod{socket = Socket, socket_type = SocketType} = Info,
+ StatusCode, Headers, FileDescriptor) ->
+ ?DEBUG("send -> send header",[]),
+ httpd_response:send_header(Info, StatusCode, Headers),
+ send_body(SocketType,Socket,FileDescriptor).
+
+
+send_body(SocketType,Socket,FileDescriptor) ->
+ case file:read(FileDescriptor,?FILE_CHUNK_SIZE) of
+ {ok,Binary} ->
+ ?DEBUG("send_body -> send another chunk: ~p",[size(Binary)]),
+ case httpd_socket:deliver(SocketType,Socket,Binary) of
+ socket_closed ->
+ ?LOG("send_body -> socket closed while sending",[]),
+ socket_close;
+ _ ->
+ send_body(SocketType,Socket,FileDescriptor)
+ end;
+ eof ->
+ ?DEBUG("send_body -> done with this file",[]),
+ eof
+ end.
+
+get_modification_date(Path)->
+ {ok, FileInfo0} = file:read_file_info(Path),
+ LastModified =
+ case catch httpd_util:rfc1123_date(FileInfo0#file_info.mtime) of
+ Date when is_list(Date) -> [{last_modified, Date}];
+ _ -> []
+ end,
+ {FileInfo0, LastModified}.
diff --git a/src/couch_inets/mod_head.erl b/src/couch_inets/mod_head.erl
new file mode 100644
index 00000000..3b78ff42
--- /dev/null
+++ b/src/couch_inets/mod_head.erl
@@ -0,0 +1,73 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_head).
+-export([do/1]).
+
+-include("httpd.hrl").
+
+-define(VMODULE,"HEAD").
+
+%% do
+
+do(Info) ->
+ case Info#mod.method of
+ "HEAD" ->
+ case httpd_util:key1search(Info#mod.data,status) of
+ %% A status code has been generated!
+ {_StatusCode, _PhraseArgs, _Reason} ->
+ {proceed,Info#mod.data};
+ %% No status code has been generated!
+ _undefined ->
+ case httpd_util:key1search(Info#mod.data,response) of
+ %% No response has been generated!
+ undefined ->
+ do_head(Info);
+ %% A response has been sent! Nothing to do about it!
+ {already_sent, _StatusCode, _Size} ->
+ {proceed,Info#mod.data};
+ %% A response has been generated!
+ {_StatusCode, _Response} ->
+ {proceed,Info#mod.data}
+ end
+ end;
+ %% Not a HEAD method!
+ _ ->
+ {proceed,Info#mod.data}
+ end.
+
+do_head(Info) ->
+ Path = mod_alias:path(Info#mod.data,
+ Info#mod.config_db,
+ Info#mod.request_uri),
+ Suffix = httpd_util:suffix(Path),
+ %% Does the file exists?
+ case file:read_file_info(Path) of
+ {ok, FileInfo} ->
+ MimeType =
+ httpd_util:lookup_mime_default(Info#mod.config_db,
+ Suffix,"text/plain"),
+ Length = io_lib:write(FileInfo#file_info.size),
+ Head =
+ [{content_type, MimeType},
+ {content_length, Length}, {code,200}],
+ {proceed,[{response, {response, Head, nobody}} | Info#mod.data]};
+ {error, Reason} ->
+ Status = httpd_file:handle_error(Reason, "access", Info, Path),
+ {proceed,
+ [{status, Status} | Info#mod.data]}
+ end.
diff --git a/src/couch_inets/mod_htaccess.erl b/src/couch_inets/mod_htaccess.erl
new file mode 100644
index 00000000..7f0d454f
--- /dev/null
+++ b/src/couch_inets/mod_htaccess.erl
@@ -0,0 +1,1075 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+
+-module(mod_htaccess).
+
+-export([do/1, load/2]).
+
+-include("httpd.hrl").
+
+%% We will not make the change to use base64 in stdlib in inets just yet.
+%% it will be included in the next major release of inets.
+-compile({nowarn_deprecated_function, {http_base_64, encode, 1}}).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Public methods that interface the eswapi %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%----------------------------------------------------------------------
+% Public method called by the webbserver to insert the data about
+% Names on accessfiles
+%----------------------------------------------------------------------
+load("AccessFileName" ++ FileNames, _Context)->
+ CleanFileNames=httpd_conf:clean(FileNames),
+ {ok,[],{access_files,string:tokens(CleanFileNames," ")}}.
+
+
+%----------------------------------------------------------------------
+% Public method that the webbserver calls to control the page
+%----------------------------------------------------------------------
+do(Info)->
+ case httpd_util:key1search(Info#mod.data,status) of
+ {_Status_code, _PhraseArgs, _Reason}->
+ {proceed,Info#mod.data};
+ undefined ->
+ control_path(Info)
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% The functions that start the control if there is a accessfile %%
+%% and if so controls if the dir is allowed or not %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%----------------------------------------------------------------------
+%Info = record mod as specified in httpd.hrl
+%returns either {proceed,Info#mod.data}
+%{proceed,[{status,403....}|Info#mod.data]}
+%{proceed,[{status,401....}|Info#mod.data]}
+%{proceed,[{status,500....}|Info#mod.data]}
+%----------------------------------------------------------------------
+control_path(Info) ->
+ Path = mod_alias:path(Info#mod.data,
+ Info#mod.config_db,
+ Info#mod.request_uri),
+ case isErlScriptOrNotAccessibleFile(Path,Info) of
+ true->
+ {proceed,Info#mod.data};
+ false->
+ case getHtAccessData(Path,Info)of
+ {ok,public}->
+ %%There was no restrictions on the page continue
+ {proceed,Info#mod.data};
+ {error, _Reason} ->
+ %%Something got wrong continue or quit??????????????????/
+ {proceed,Info#mod.data};
+ {accessData,AccessData}->
+ controlAllowedMethod(Info,AccessData)
+ end
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% These methods controls that the method the client used in the %%
+%% request is one of the limited %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%----------------------------------------------------------------------
+%Control that if the accessmethod used is in the list of modes to challenge
+%
+%Info is the mod record as specified in httpd.hrl
+%AccessData is an ets table whit the data in the .htaccessfiles
+%----------------------------------------------------------------------
+controlAllowedMethod(Info,AccessData)->
+ case allowedRequestMethod(Info,AccessData) of
+ allow->
+ %%The request didnt use one of the limited methods
+ ets:delete(AccessData),
+ {proceed,Info#mod.data};
+ challenge->
+ authenticateUser(Info,AccessData)
+ end.
+
+%----------------------------------------------------------------------
+%Check the specified access method in the .htaccessfile
+%----------------------------------------------------------------------
+allowedRequestMethod(Info,AccessData)->
+ case ets:lookup(AccessData,limit) of
+ [{limit,all}]->
+ challenge;
+ [{limit,Methods}]->
+ isLimitedRequestMethod(Info,Methods)
+ end.
+
+
+%----------------------------------------------------------------------
+%Check the specified accessmethods in the .htaccesfile against the users
+%accessmethod
+%
+%Info is the record from the do call
+%Methods is a list of the methods specified in the .htaccessfile
+%----------------------------------------------------------------------
+isLimitedRequestMethod(Info,Methods)->
+ case lists:member(Info#mod.method,Methods) of
+ true->
+ challenge;
+ false ->
+ allow
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% These methods controls that the user comes from an allowwed net %%
+%% and if so wheather its a valid user or a challenge shall be %%
+%% generated %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%----------------------------------------------------------------------
+%The first thing to control is that the user is from a network
+%that has access to the page
+%----------------------------------------------------------------------
+authenticateUser(Info,AccessData)->
+ case controlNet(Info,AccessData) of
+ allow->
+ %the network is ok control that it is an allowed user
+ authenticateUser2(Info,AccessData);
+ deny->
+ %The user isnt allowed to access the pages from that network
+ ets:delete(AccessData),
+ {proceed,[{status,{403,Info#mod.request_uri,
+ "Restricted area not allowed from your network"}}|Info#mod.data]}
+ end.
+
+
+%----------------------------------------------------------------------
+%The network the user comes from is allowed to view the resources
+%control whether the user needsto supply a password or not
+%----------------------------------------------------------------------
+authenticateUser2(Info,AccessData)->
+ case ets:lookup(AccessData,require) of
+ [{require,AllowedUsers}]->
+ case ets:lookup(AccessData,auth_name) of
+ [{auth_name,Realm}]->
+ authenticateUser2(Info,AccessData,Realm,AllowedUsers);
+ _NoAuthName->
+ ets:delete(AccessData),
+ {break,[{status,{500,none,
+ ?NICE("mod_htaccess:AuthName directive "
+ "not specified")}}]}
+ end;
+ [] ->
+ %%No special user is required the network is ok so let
+ %%the user in
+ ets:delete(AccessData),
+ {proceed,Info#mod.data}
+ end.
+
+
+%----------------------------------------------------------------------
+%The user must send a userId and a password to get the resource
+%Control if its already in the http-request
+%if the file with users is bad send an 500 response
+%----------------------------------------------------------------------
+authenticateUser2(Info,AccessData,Realm,AllowedUsers)->
+ case authenticateUser(Info,AccessData,AllowedUsers) of
+ allow ->
+ ets:delete(AccessData),
+ {user,Name, _Pwd} = getAuthenticatingDataFromHeader(Info),
+ {proceed, [{remote_user_name,Name}|Info#mod.data]};
+ challenge->
+ ets:delete(AccessData),
+ ReasonPhrase = httpd_util:reason_phrase(401),
+ Message = httpd_util:message(401,none,Info#mod.config_db),
+ {proceed,
+ [{response,
+ {401,
+ ["WWW-Authenticate: Basic realm=\"",Realm,
+ "\"\r\n\r\n","<HTML>\n<HEAD>\n<TITLE>",
+ ReasonPhrase,"</TITLE>\n",
+ "</HEAD>\n<BODY>\n<H1>",ReasonPhrase,
+ "</H1>\n",Message,"\n</BODY>\n</HTML>\n"]}}|
+ Info#mod.data]};
+ deny->
+ ets:delete(AccessData),
+ {break,[{status,{500,none,
+ ?NICE("mod_htaccess:Bad path to user "
+ "or group file")}}]}
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% Methods that validate the netwqork the user comes from %%
+%% according to the allowed networks %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%---------------------------------------------------------------------
+%Controls the users networkaddress agains the specifed networks to
+%allow or deny
+%
+%returns either allow or deny
+%----------------------------------------------------------------------
+controlNet(Info,AccessData)->
+ UserNetwork=getUserNetworkAddress(Info),
+ case getAllowDenyOrder(AccessData) of
+ {_deny,[],_allow,[]}->
+ allow;
+ {deny,[],allow,AllowedNetworks}->
+ controlIfAllowed(AllowedNetworks,UserNetwork,allow,deny);
+ {allow,AllowedNetworks,deny,[]}->
+ controlIfAllowed(AllowedNetworks,UserNetwork,allow,deny);
+
+ {deny,DeniedNetworks,allow,[]}->
+ controlIfAllowed(DeniedNetworks,UserNetwork,allow,deny);
+ {allow,[],deny,DeniedNetworks}->
+ controlIfAllowed(DeniedNetworks,UserNetwork,allow,deny);
+
+ {deny,DeniedNetworks,allow,AllowedNetworks}->
+ controlDenyAllow(DeniedNetworks,AllowedNetworks,UserNetwork);
+ {allow,AllowedNetworks,deny,DeniedNetworks}->
+ controlAllowDeny(AllowedNetworks,DeniedNetworks,UserNetwork)
+ end.
+
+
+%----------------------------------------------------------------------
+%Returns the users IP-Number
+%----------------------------------------------------------------------
+getUserNetworkAddress(Info)->
+ {_Socket,Address}=(Info#mod.init_data)#init_data.peername,
+ Address.
+
+
+%----------------------------------------------------------------------
+%Control the users Ip-number against the ip-numbers in the .htaccessfile
+%----------------------------------------------------------------------
+controlIfAllowed(AllowedNetworks,UserNetwork,IfAllowed,IfDenied)->
+ case AllowedNetworks of
+ [{allow,all}]->
+ IfAllowed;
+ [{deny,all}]->
+ IfDenied;
+ [{deny,Networks}]->
+ memberNetwork(Networks,UserNetwork,IfDenied,IfAllowed);
+ [{allow,Networks}]->
+ memberNetwork(Networks,UserNetwork,IfAllowed,IfDenied);
+ _Error->
+ IfDenied
+ end.
+
+
+%---------------------------------------------------------------------%
+%The Denycontrol isn't neccessary to preform since the allow control %
+%override the deny control %
+%---------------------------------------------------------------------%
+controlDenyAllow(_DeniedNetworks, AllowedNetworks, UserNetwork)->
+ case AllowedNetworks of
+ [{allow, all}]->
+ allow;
+ [{allow, Networks}]->
+ case memberNetwork(Networks, UserNetwork) of
+ true->
+ allow;
+ false->
+ deny
+ end
+ end.
+
+
+%----------------------------------------------------------------------%
+%Control that the user is in the allowed list if so control that the %
+%network is in the denied list
+%----------------------------------------------------------------------%
+controlAllowDeny(AllowedNetworks,DeniedNetworks,UserNetwork)->
+ case controlIfAllowed(AllowedNetworks,UserNetwork,allow,deny) of
+ allow->
+ controlIfAllowed(DeniedNetworks,UserNetwork,deny,allow);
+ deny ->
+ deny
+ end.
+
+%----------------------------------------------------------------------
+%Controls if the users Ipnumber is in the list of either denied or
+%allowed networks
+%----------------------------------------------------------------------
+memberNetwork(Networks,UserNetwork,IfTrue,IfFalse)->
+ case memberNetwork(Networks,UserNetwork) of
+ true->
+ IfTrue;
+ false->
+ IfFalse
+ end.
+
+
+%----------------------------------------------------------------------
+%regexp match the users ip-address against the networks in the list of
+%ipadresses or subnet addresses.
+memberNetwork(Networks,UserNetwork)->
+ case lists:filter(fun(Net)->
+ case regexp:match(UserNetwork,
+ formatRegexp(Net)) of
+ {match,1,_}->
+ true;
+ _NotSubNet ->
+ false
+ end
+ end,Networks) of
+ []->
+ false;
+ _MemberNetWork ->
+ true
+ end.
+
+
+%----------------------------------------------------------------------
+%Creates a regexp from an ip-number i.e "127.0.0-> "^127[.]0[.]0.*"
+%"127.0.0.-> "^127[.]0[.]0[.].*"
+%----------------------------------------------------------------------
+formatRegexp(Net)->
+ [SubNet1|SubNets]=string:tokens(Net,"."),
+ NetRegexp=lists:foldl(fun(SubNet,Newnet)->
+ Newnet ++ "[.]" ++SubNet
+ end,"^"++SubNet1,SubNets),
+ case string:len(Net)-string:rchr(Net,$.) of
+ 0->
+ NetRegexp++"[.].*";
+ _->
+ NetRegexp++".*"
+ end.
+
+%----------------------------------------------------------------------
+%If the user has specified if the allow or deny check shall be preformed
+%first get that order if no order is specified take
+%allow - deny since its harder that deny - allow
+%----------------------------------------------------------------------
+getAllowDenyOrder(AccessData)->
+ case ets:lookup(AccessData,order) of
+ [{order,{deny,allow}}]->
+ {deny,ets:lookup(AccessData,deny),
+ allow,ets:lookup(AccessData,allow)};
+ _DefaultOrder->
+ {allow,ets:lookup(AccessData,allow),
+ deny,ets:lookup(AccessData,deny)}
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% The methods that validates the user %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%----------------------------------------------------------------------
+%Control if there is anyu autheticating data in threquest header
+%if so it controls it against the users in the list Allowed Users
+%----------------------------------------------------------------------
+authenticateUser(Info,AccessData,AllowedUsers)->
+ case getAuthenticatingDataFromHeader(Info) of
+ {user,User,PassWord}->
+ authenticateUser(Info,AccessData,AllowedUsers,
+ {user,User,PassWord});
+ {error,nouser}->
+ challenge;
+ {error, _BadData}->
+ challenge
+ end.
+
+
+%----------------------------------------------------------------------
+%Returns the Autheticating data in the http-request
+%----------------------------------------------------------------------
+getAuthenticatingDataFromHeader(Info)->
+ PrsedHeader=Info#mod.parsed_header,
+ case httpd_util:key1search(PrsedHeader,"authorization" ) of
+ undefined->
+ {error,nouser};
+ [$B,$a,$s,$i,$c,$\ |EncodedString] = Credentials ->
+ case (catch http_base_64:decode(EncodedString)) of
+ {'EXIT',{function_clause, _}} ->
+ {error, Credentials};
+ UnCodedString ->
+ case httpd_util:split(UnCodedString,":",2) of
+ {ok,[User,PassWord]}->
+ {user,User,PassWord};
+ {error,Error}->
+ {error,Error}
+ end
+ end;
+ BadCredentials ->
+ {error,BadCredentials}
+ end.
+
+%----------------------------------------------------------------------
+%Returns a list of all members of the allowed groups
+%----------------------------------------------------------------------
+getGroupMembers(Groups,AllowedGroups)->
+ Allowed=lists:foldl(fun({group,Name,Members},AllowedMembers)->
+ case lists:member(Name,AllowedGroups) of
+ true->
+ AllowedMembers++Members;
+ false ->
+ AllowedMembers
+ end
+ end,[],Groups),
+ {ok,Allowed}.
+
+authenticateUser(Info,AccessData,{{users,[]},{groups,Groups}},User)->
+ authenticateUser(Info,AccessData,{groups,Groups},User);
+authenticateUser(Info,AccessData,{{users,Users},{groups,[]}},User)->
+ authenticateUser(Info,AccessData,{users,Users},User);
+
+authenticateUser(Info,AccessData,{{users,Users},{groups,Groups}},User)->
+ AllowUser=authenticateUser(Info,AccessData,{users,Users},User),
+ AllowGroup=authenticateUser(Info,AccessData,{groups,Groups},User),
+ case {AllowGroup,AllowUser} of
+ {_,allow}->
+ allow;
+ {allow,_}->
+ allow;
+ {challenge,_}->
+ challenge;
+ {_,challenge}->
+ challenge;
+ {_deny,_deny}->
+ deny
+ end;
+
+
+%----------------------------------------------------------------------
+%Controls that the user is a member in one of the allowed group
+%----------------------------------------------------------------------
+authenticateUser(Info,AccessData,{groups,AllowedGroups},{user,User,PassWord})->
+ case getUsers(AccessData,group_file) of
+ {group_data,Groups}->
+ {ok, Members } = getGroupMembers(Groups,AllowedGroups),
+ authenticateUser(Info,AccessData,{users,Members},
+ {user,User,PassWord});
+ {error, _BadData}->
+ deny
+ end;
+
+
+%----------------------------------------------------------------------
+%Control that the user is one of the allowed users and that the passwd is ok
+%----------------------------------------------------------------------
+authenticateUser(_Info,AccessData,{users,AllowedUsers},{user,User,PassWord})->
+ case lists:member(User,AllowedUsers) of
+ true->
+ %Get the usernames and passwords from the file
+ case getUsers(AccessData,user_file) of
+ {error, _BadData}->
+ deny;
+ {user_data,Users}->
+ %Users is a list of the users in
+ %the userfile [{user,User,Passwd}]
+ checkPassWord(Users,{user,User,PassWord})
+ end;
+ false ->
+ challenge
+ end.
+
+
+%----------------------------------------------------------------------
+%Control that the user User={user,"UserName","PassWd"} is
+%member of the list of Users
+%----------------------------------------------------------------------
+checkPassWord(Users,User)->
+ case lists:member(User,Users) of
+ true->
+ allow;
+ false->
+ challenge
+ end.
+
+
+%----------------------------------------------------------------------
+%Get the users in the specified file
+%UserOrGroup is an atom that specify if its a group file or a user file
+%i.e. group_file or user_file
+%----------------------------------------------------------------------
+getUsers({file,FileName},UserOrGroup)->
+ case file:open(FileName,[read]) of
+ {ok,AccessFileHandle} ->
+ getUsers({stream,AccessFileHandle},[],UserOrGroup);
+ {error,Reason} ->
+ {error,{Reason,FileName}}
+ end;
+
+
+%----------------------------------------------------------------------
+%The method that starts the lokkong for user files
+%----------------------------------------------------------------------
+
+getUsers(AccessData,UserOrGroup)->
+ case ets:lookup(AccessData,UserOrGroup) of
+ [{UserOrGroup,File}]->
+ getUsers({file,File},UserOrGroup);
+ _ ->
+ {error,noUsers}
+ end.
+
+
+%----------------------------------------------------------------------
+%Reads data from the filehandle File to the list FileData and when its
+%reach the end it returns the list in a tuple {user_file|group_file,FileData}
+%----------------------------------------------------------------------
+getUsers({stream,File},FileData,UserOrGroup)->
+ case io:get_line(File,[]) of
+ eof when UserOrGroup==user_file->
+ {user_data,FileData};
+ eof when UserOrGroup ==group_file->
+ {group_data,FileData};
+ Line ->
+ getUsers({stream,File},
+ formatUser(Line,FileData,UserOrGroup),UserOrGroup)
+ end.
+
+
+%----------------------------------------------------------------------
+%If the line is a comment remove it
+%----------------------------------------------------------------------
+formatUser([$#|_UserDataComment],FileData,_UserOrgroup)->
+ FileData;
+
+
+%----------------------------------------------------------------------
+%The user name in the file is Username:Passwd\n
+%Remove the newline sign and split the user name in
+%UserName and Password
+%----------------------------------------------------------------------
+formatUser(UserData,FileData,UserOrGroup)->
+ case string:tokens(UserData," \r\n")of
+ [User| _Whitespace] when UserOrGroup==user_file->
+ case string:tokens(User,":") of
+ [Name,PassWord]->
+ [{user,Name,PassWord}|FileData];
+ _Error->
+ FileData
+ end;
+ GroupData when UserOrGroup==group_file ->
+ parseGroupData(GroupData,FileData);
+ _Error ->
+ FileData
+ end.
+
+
+%----------------------------------------------------------------------
+%if everything is right GroupData is on the form
+% ["groupName:", "Member1", "Member2", "Member2"
+%----------------------------------------------------------------------
+parseGroupData([GroupName|GroupData],FileData)->
+ [{group,formatGroupName(GroupName),GroupData}|FileData].
+
+
+%----------------------------------------------------------------------
+%the line in the file is GroupName: Member1 Member2 .....MemberN
+%Remove the : from the group name
+%----------------------------------------------------------------------
+formatGroupName(GroupName)->
+ string:strip(GroupName,right,$:).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% Functions that parses the accessfiles %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%----------------------------------------------------------------------
+%Control that the asset is a real file and not a request for an virtual
+%asset
+%----------------------------------------------------------------------
+isErlScriptOrNotAccessibleFile(Path, _Info)->
+ case file:read_file_info(Path) of
+ {ok,_fileInfo}->
+ false;
+ {error,_Reason} ->
+ true
+ end.
+
+
+%----------------------------------------------------------------------
+%Path=PathToTheRequestedFile=String
+%Innfo=record#mod
+%----------------------------------------------------------------------
+getHtAccessData(Path,Info)->
+ HtAccessFileNames=getHtAccessFileNames(Info),
+ case getData(Path,Info,HtAccessFileNames) of
+ {ok,public}->
+ {ok,public};
+ {accessData,AccessData}->
+ {accessData,AccessData};
+ {error,Reason} ->
+ {error,Reason}
+ end.
+
+
+%----------------------------------------------------------------------
+%returns the names of the accessfiles
+%----------------------------------------------------------------------
+getHtAccessFileNames(Info)->
+ case httpd_util:lookup(Info#mod.config_db,access_files) of
+ undefined->
+ [".htaccess"];
+ Files->
+ Files
+ end.
+%----------------------------------------------------------------------
+%HtAccessFileNames=["accessfileName1",..."AccessFileName2"]
+%----------------------------------------------------------------------
+getData(Path,Info,HtAccessFileNames)->
+ case regexp:split(Path,"/") of
+ {error,Error}->
+ {error,Error};
+ {ok,SplittedPath}->
+ getData2(HtAccessFileNames,SplittedPath,Info)
+ end.
+
+
+%----------------------------------------------------------------------
+%Add to together the data in the Splittedpath up to the path
+%that is the alias or the document root
+%Since we do not need to control after any accessfiles before here
+%----------------------------------------------------------------------
+getData2(HtAccessFileNames,SplittedPath,Info)->
+ case getRootPath(SplittedPath,Info) of
+ {error,Path}->
+ {error,Path};
+ {ok,StartPath,RestOfSplittedPath} ->
+ getData2(HtAccessFileNames,StartPath,RestOfSplittedPath,Info)
+ end.
+
+
+%----------------------------------------------------------------------
+%HtAccessFilenames is a list the names the accesssfiles can have
+%Path is the shortest match agains all alias and documentroot
+%rest of splitted path is a list of the parts of the path
+%Info is the mod recod from the server
+%----------------------------------------------------------------------
+getData2(HtAccessFileNames, StartPath, RestOfSplittedPath, _Info)->
+ case getHtAccessFiles(HtAccessFileNames,StartPath,RestOfSplittedPath) of
+ []->
+ %No accessfile qiut its a public directory
+ {ok,public};
+ Files ->
+ loadAccessFilesData(Files)
+ end.
+
+
+%----------------------------------------------------------------------
+%Loads the data in the accessFiles specifiied by
+% AccessFiles=["/hoem/public/html/accefile",
+% "/home/public/html/priv/accessfile"]
+%----------------------------------------------------------------------
+loadAccessFilesData(AccessFiles)->
+ loadAccessFilesData(AccessFiles,ets:new(accessData,[])).
+
+
+%----------------------------------------------------------------------
+%Returns the found data
+%----------------------------------------------------------------------
+contextToValues(AccessData)->
+ case ets:lookup(AccessData,context) of
+ [{context,Values}]->
+ ets:delete(AccessData,context),
+ insertContext(AccessData,Values),
+ {accessData,AccessData};
+ _Error->
+ {error,errorInAccessFile}
+ end.
+
+
+insertContext(_AccessData, [])->
+ ok;
+
+insertContext(AccessData,[{allow,From}|Values])->
+ insertDenyAllowContext(AccessData,{allow,From}),
+ insertContext(AccessData,Values);
+
+insertContext(AccessData,[{deny,From}|Values])->
+ insertDenyAllowContext(AccessData,{deny,From}),
+ insertContext(AccessData,Values);
+
+insertContext(AccessData,[{require,{GrpOrUsr,Members}}|Values])->
+ case ets:lookup(AccessData,require) of
+ []when GrpOrUsr==users->
+ ets:insert(AccessData,{require,{{users,Members},{groups,[]}}});
+
+ [{require,{{users,Users},{groups,Groups}}}]when GrpOrUsr==users ->
+ ets:insert(AccessData,{require,{{users,Users++Members},
+ {groups,Groups}}});
+ []when GrpOrUsr==groups->
+ ets:insert(AccessData,{require,{{users,[]},{groups,Members}}});
+
+ [{require,{{users,Users},{groups,Groups}}}]when GrpOrUsr==groups ->
+ ets:insert(AccessData,{require,{{users,Users},
+ {groups,Groups++Members}}})
+ end,
+ insertContext(AccessData,Values);
+
+
+
+%%limit and order directive need no transforming they areis just to insert
+insertContext(AccessData,[Elem|Values])->
+ ets:insert(AccessData,Elem),
+ insertContext(AccessData,Values).
+
+
+insertDenyAllowContext(AccessData,{AllowDeny,From})->
+ case From of
+ all ->
+ ets:insert(AccessData,{AllowDeny,all});
+ _AllowedSubnets ->
+ case ets:lookup(AccessData,AllowDeny) of
+ []->
+ ets:insert(AccessData,{AllowDeny,From});
+ [{AllowDeny,all}]->
+ ok;
+ [{AllowDeny,Networks}]->
+ ets:insert(AccessData,{allow,Networks++From})
+ end
+ end.
+
+loadAccessFilesData([],AccessData)->
+ %preform context to limits
+ contextToValues(AccessData),
+ {accessData,AccessData};
+
+%----------------------------------------------------------------------
+%Takes each file in the list and load the data to the ets table
+%AccessData
+%----------------------------------------------------------------------
+loadAccessFilesData([FileName|FileNames],AccessData)->
+ case loadAccessFileData({file,FileName},AccessData) of
+ overRide->
+ loadAccessFilesData(FileNames,AccessData);
+ noOverRide ->
+ {accessData,AccessData};
+ error->
+ ets:delete(AccessData),
+ {error,errorInAccessFile}
+ end.
+
+%----------------------------------------------------------------------
+%opens the filehandle to the specified file
+%----------------------------------------------------------------------
+loadAccessFileData({file,FileName},AccessData)->
+ case file:open(FileName,[read]) of
+ {ok,AccessFileHandle}->
+ loadAccessFileData({stream,AccessFileHandle},AccessData,[]);
+ {error, _Reason} ->
+ overRide
+ end.
+
+%----------------------------------------------------------------------
+%%look att each line in the file and add them to the database
+%%When end of file is reached control i overrride is allowed
+%% if so return
+%----------------------------------------------------------------------
+loadAccessFileData({stream,File},AccessData,FileData)->
+ case io:get_line(File,[]) of
+ eof->
+ insertData(AccessData,FileData),
+ case ets:match_object(AccessData,{'_',error}) of
+ []->
+ %Case we got no error control that we can override a
+ %at least some of the values
+ case ets:match_object(AccessData,
+ {allow_over_ride,none}) of
+ []->
+ overRide;
+ _NoOverride->
+ noOverRide
+ end;
+ _ ->
+ error
+ end;
+ Line ->
+ loadAccessFileData({stream,File},AccessData,
+ insertLine(string:strip(Line,left),FileData))
+ end.
+
+%----------------------------------------------------------------------
+%AccessData is a ets table where the previous found data is inserted
+%FileData is a list of the directives in the last parsed file
+%before insertion a control is done that the directive is allowed to
+%override
+%----------------------------------------------------------------------
+insertData(AccessData,{{context,Values},FileData})->
+ insertData(AccessData,[{context,Values}|FileData]);
+
+insertData(AccessData,FileData)->
+ case ets:lookup(AccessData,allow_over_ride) of
+ [{allow_over_ride,all}]->
+ lists:foreach(fun(Elem)->
+ ets:insert(AccessData,Elem)
+ end,FileData);
+ []->
+ lists:foreach(fun(Elem)->
+ ets:insert(AccessData,Elem)
+ end,FileData);
+ [{allow_over_ride,Directives}]when list(Directives)->
+ lists:foreach(fun({Key,Value})->
+ case lists:member(Key,Directives) of
+ true->
+ ok;
+ false ->
+ ets:insert(AccessData,{Key,Value})
+ end
+ end,FileData);
+ [{allow_over_ride,_}]->
+ %Will never appear if the user
+ %aint doing very strang econfig files
+ ok
+ end.
+%----------------------------------------------------------------------
+%Take a line in the accessfile and transform it into a tuple that
+%later can be inserted in to the ets:table
+%----------------------------------------------------------------------
+%%%Here is the alternatives that resides inside the limit context
+
+insertLine("order"++ Order, {{context, Values}, FileData})->
+ {{context,[{order,getOrder(Order)}|Values]},FileData};
+%%Let the user place a tab in the beginning
+insertLine([$\t,$o,$r,$d,$e,$r|Order],{{context,Values},FileData})->
+ {{context,[{order,getOrder(Order)}|Values]},FileData};
+
+insertLine("allow" ++ Allow, {{context, Values}, FileData})->
+ {{context,[{allow,getAllowDenyData(Allow)}|Values]},FileData};
+insertLine([$\t,$a,$l,$l,$o,$w|Allow],{{context,Values},FileData})->
+ {{context,[{allow,getAllowDenyData(Allow)}|Values]},FileData};
+
+insertLine("deny" ++ Deny, {{context,Values}, FileData})->
+ {{context,[{deny,getAllowDenyData(Deny)}|Values]},FileData};
+insertLine([$\t, $d,$e,$n,$y|Deny],{{context,Values},FileData})->
+ {{context,[{deny,getAllowDenyData(Deny)}|Values]},FileData};
+
+insertLine("require" ++ Require, {{context, Values}, FileData})->
+ {{context,[{require,getRequireData(Require)}|Values]},FileData};
+insertLine([$\t,$r,$e,$q,$u,$i,$r,$e|Require],{{context,Values},FileData})->
+ {{context,[{require,getRequireData(Require)}|Values]},FileData};
+
+insertLine("</Limit" ++ _EndLimit, {Context,FileData})->
+ [Context | FileData];
+insertLine("<Limit" ++ Limit, FileData)->
+ {{context,[{limit,getLimits(Limit)}]}, FileData};
+
+insertLine([$A,$u,$t,$h,$U,$s,$e,$r,$F,$i,$l,$e,$\ |AuthUserFile],FileData)->
+ [{user_file,string:strip(AuthUserFile,right,$\n)}|FileData];
+
+insertLine([$A,$u,$t,$h,$G,$r,$o,$u,$p,$F,$i,$l,$e,$\ |AuthGroupFile],
+ FileData)->
+ [{group_file,string:strip(AuthGroupFile,right,$\n)}|FileData];
+
+insertLine("AllowOverRide" ++ AllowOverRide, FileData)->
+ [{allow_over_ride,getAllowOverRideData(AllowOverRide)}
+ | FileData];
+
+insertLine([$A,$u,$t,$h,$N,$a,$m,$e,$\ |AuthName],FileData)->
+ [{auth_name,string:strip(AuthName,right,$\n)}|FileData];
+
+insertLine("AuthType" ++ AuthType,FileData)->
+ [{auth_type,getAuthorizationType(AuthType)}|FileData];
+
+insertLine(_BadDirectiveOrComment,FileData)->
+ FileData.
+
+%----------------------------------------------------------------------
+%transform the Data specified about override to a form that is ieasier
+%handled later
+%Override data="all"|"md5"|"Directive1 .... DirectioveN"
+%----------------------------------------------------------------------
+
+getAllowOverRideData(OverRideData)->
+ case string:tokens(OverRideData," \r\n") of
+ ["all" ++ _] ->
+ all;
+ ["none" ++ _]->
+ none;
+ Directives ->
+ getOverRideDirectives(Directives)
+ end.
+
+getOverRideDirectives(Directives)->
+ lists:map(fun(Directive)->
+ transformDirective(Directive)
+ end,Directives).
+transformDirective("AuthUserFile" ++ _)->
+ user_file;
+transformDirective("AuthGroupFile" ++ _) ->
+ group_file;
+transformDirective("AuthName" ++ _)->
+ auth_name;
+transformDirective("AuthType" ++ _)->
+ auth_type;
+transformDirective(_UnAllowedOverRideDirective) ->
+ unallowed.
+%----------------------------------------------------------------------
+%Replace the string that specify which method to use for authentication
+%and replace it with the atom for easier mathing
+%----------------------------------------------------------------------
+getAuthorizationType(AuthType)->
+ [Arg | _Crap] = string:tokens(AuthType,"\n\r\ "),
+ case Arg of
+ "Basic"->
+ basic;
+ "MD5" ->
+ md5;
+ _What ->
+ error
+ end.
+%----------------------------------------------------------------------
+%Returns a list of the specified methods to limit or the atom all
+%----------------------------------------------------------------------
+getLimits(Limits)->
+ case regexp:split(Limits,">")of
+ {ok,[_NoEndOnLimit]}->
+ error;
+ {ok, [Methods | _Crap]}->
+ case regexp:split(Methods," ")of
+ {ok,[]}->
+ all;
+ {ok,SplittedMethods}->
+ SplittedMethods;
+ {error, _Error}->
+ error
+ end;
+ {error,_Error}->
+ error
+ end.
+
+
+%----------------------------------------------------------------------
+% Transform the order to prefrom deny allow control to a tuple of atoms
+%----------------------------------------------------------------------
+getOrder(Order)->
+ [First | _Rest]=lists:map(fun(Part)->
+ list_to_atom(Part)
+ end,string:tokens(Order," \n\r")),
+ case First of
+ deny->
+ {deny,allow};
+ allow->
+ {allow,deny};
+ _Error->
+ error
+ end.
+
+%----------------------------------------------------------------------
+% The string AllowDeny is "from all" or "from Subnet1 Subnet2...SubnetN"
+%----------------------------------------------------------------------
+getAllowDenyData(AllowDeny)->
+ case string:tokens(AllowDeny," \n\r") of
+ [_From|AllowDenyData] when length(AllowDenyData)>=1->
+ case lists:nth(1,AllowDenyData) of
+ "all" ->
+ all;
+ _Hosts->
+ AllowDenyData
+ end;
+ _ ->
+ error
+ end.
+%----------------------------------------------------------------------
+% Fix the string that describes who is allowed to se the page
+%----------------------------------------------------------------------
+getRequireData(Require)->
+ [UserOrGroup|UserData]=string:tokens(Require," \n\r"),
+ case UserOrGroup of
+ "user"->
+ {users,UserData};
+ "group" ->
+ {groups,UserData};
+ _Whatever ->
+ error
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% Methods that collects the searchways to the accessfiles %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%----------------------------------------------------------------------
+% Get the whole path to the different accessfiles
+%----------------------------------------------------------------------
+getHtAccessFiles(HtAccessFileNames,Path,RestOfSplittedPath)->
+ getHtAccessFiles(HtAccessFileNames,Path,RestOfSplittedPath,[]).
+
+getHtAccessFiles(HtAccessFileNames,Path,[[]],HtAccessFiles)->
+ HtAccessFiles ++ accessFilesOfPath(HtAccessFileNames,Path++"/");
+
+getHtAccessFiles(_HtAccessFileNames, _Path, [], HtAccessFiles)->
+ HtAccessFiles;
+getHtAccessFiles(HtAccessFileNames,Path,[NextDir|RestOfSplittedPath],
+ AccessFiles)->
+ getHtAccessFiles(HtAccessFileNames,Path++"/"++NextDir,RestOfSplittedPath,
+ AccessFiles ++
+ accessFilesOfPath(HtAccessFileNames,Path++"/")).
+
+
+%----------------------------------------------------------------------
+%Control if therer are any accessfies in the path
+%----------------------------------------------------------------------
+accessFilesOfPath(HtAccessFileNames,Path)->
+ lists:foldl(fun(HtAccessFileName,Files)->
+ case file:read_file_info(Path++HtAccessFileName) of
+ {ok, _}->
+ [Path++HtAccessFileName|Files];
+ {error,_Error} ->
+ Files
+ end
+ end,[],HtAccessFileNames).
+
+
+%----------------------------------------------------------------------
+%Sake the splitted path and joins it up to the documentroot or the alias
+%that match first
+%----------------------------------------------------------------------
+
+getRootPath(SplittedPath, Info)->
+ DocRoot=httpd_util:lookup(Info#mod.config_db,document_root,"/"),
+ PresumtiveRootPath=
+ [DocRoot|lists:map(fun({_Alias,RealPath})->
+ RealPath
+ end,
+ httpd_util:multi_lookup(Info#mod.config_db,alias))],
+ getRootPath(PresumtiveRootPath,SplittedPath,Info).
+
+
+getRootPath(PresumtiveRootPath,[[],Splittedpath],Info)->
+ getRootPath(PresumtiveRootPath,["/",Splittedpath],Info);
+
+
+getRootPath(PresumtiveRootPath,[Part,NextPart|SplittedPath],Info)->
+ case lists:member(Part,PresumtiveRootPath)of
+ true->
+ {ok,Part,[NextPart|SplittedPath]};
+ false ->
+ getRootPath(PresumtiveRootPath,
+ [Part++"/"++NextPart|SplittedPath],Info)
+ end;
+
+getRootPath(PresumtiveRootPath, [Part], _Info)->
+ case lists:member(Part,PresumtiveRootPath)of
+ true->
+ {ok,Part,[]};
+ false ->
+ {error,Part}
+ end.
diff --git a/src/couch_inets/mod_include.erl b/src/couch_inets/mod_include.erl
new file mode 100644
index 00000000..c488c778
--- /dev/null
+++ b/src/couch_inets/mod_include.erl
@@ -0,0 +1,594 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_include).
+-export([do/1,parse/2,config/6,include/6,echo/6,fsize/6,flastmod/6,exec/6]).
+
+-include("httpd.hrl").
+
+-define(VMODULE,"INCLUDE").
+
+%% do
+
+do(Info) ->
+ case Info#mod.method of
+ "GET" ->
+ case httpd_util:key1search(Info#mod.data,status) of
+ %% A status code has been generated!
+ {_StatusCode, _PhraseArgs, _Reason} ->
+ {proceed,Info#mod.data};
+ %% No status code has been generated!
+ undefined ->
+ case httpd_util:key1search(Info#mod.data, response) of
+ %% No response has been generated!
+ undefined ->
+ do_include(Info);
+ %% A response has been generated or sent!
+ _Response ->
+ {proceed,Info#mod.data}
+ end
+ end;
+ %% Not a GET method!
+ _ ->
+ {proceed,Info#mod.data}
+ end.
+
+do_include(Info) ->
+ Path = mod_alias:path(Info#mod.data,Info#mod.config_db,
+ Info#mod.request_uri),
+ Suffix = httpd_util:suffix(Path),
+ case httpd_util:lookup_mime_default(Info#mod.config_db,Suffix) of
+ "text/x-server-parsed-html" ->
+ HeaderStart = [{content_type, "text/html"}],
+ case send_in(Info, Path, HeaderStart, file:read_file_info(Path)) of
+ {ok, ErrorLog, Size} ->
+ {proceed, [{response, {already_sent, 200, Size}},
+ {mime_type, "text/html"} |
+ lists:append(ErrorLog, Info#mod.data)]};
+ {error, Reason} ->
+ {proceed,
+ [{status,send_error(Reason,Info,Path)}|Info#mod.data]}
+ end;
+ _ -> %% Unknown mime type, ignore
+ {proceed,Info#mod.data}
+ end.
+
+
+%%
+%% config directive
+%%
+
+config(_Info, Context, ErrorLog, TagList, ValueList, R) ->
+ case verify_tags("config",[errmsg,timefmt,sizefmt],
+ TagList,ValueList) of
+ ok ->
+ {ok,update_context(TagList,ValueList,Context),ErrorLog,"",R};
+ {error,Reason} ->
+ {ok,Context,[{internal_info,Reason}|ErrorLog],
+ httpd_util:key1search(Context,errmsg,""),R}
+ end.
+
+update_context([],[],Context) ->
+ Context;
+update_context([Tag|R1],[Value|R2],Context) ->
+ update_context(R1,R2,[{Tag,Value}|Context]).
+
+verify_tags(Command,ValidTags,TagList,ValueList) when length(TagList)==length(ValueList) ->
+ verify_tags(Command, ValidTags, TagList);
+verify_tags(Command, _ValidTags, _TagList, _ValueList) ->
+ {error, ?NICE(Command ++ " directive has spurious tags")}.
+
+verify_tags(_Command, _ValidTags, []) ->
+ ok;
+verify_tags(Command, ValidTags, [Tag|Rest]) ->
+ case lists:member(Tag, ValidTags) of
+ true ->
+ verify_tags(Command, ValidTags, Rest);
+ false ->
+ {error, ?NICE(Command++" directive has a spurious tag ("++
+ atom_to_list(Tag)++")")}
+ end.
+
+%%
+%% include directive
+%%
+
+include(Info,Context,ErrorLog,[virtual],[VirtualPath],R) ->
+ Aliases = httpd_util:multi_lookup(Info#mod.config_db,alias),
+ {_, Path, _AfterPath} =
+ mod_alias:real_name(Info#mod.config_db, VirtualPath, Aliases),
+ include(Info,Context,ErrorLog,R,Path);
+include(Info, Context, ErrorLog, [file], [FileName], R) ->
+ Path = file(Info#mod.config_db, Info#mod.request_uri, FileName),
+ include(Info, Context, ErrorLog, R, Path);
+include(_Info, Context, ErrorLog, _TagList, _ValueList, R) ->
+ {ok, Context,
+ [{internal_info,?NICE("include directive has a spurious tag")}|
+ ErrorLog], httpd_util:key1search(Context, errmsg, ""), R}.
+
+include(Info, Context, ErrorLog, R, Path) ->
+ case file:read_file(Path) of
+ {ok, Body} ->
+ {ok, NewContext, NewErrorLog, Result} =
+ parse(Info, binary_to_list(Body), Context, ErrorLog, []),
+ {ok, NewContext, NewErrorLog, Result, R};
+ {error, _Reason} ->
+ {ok, Context,
+ [{internal_info, ?NICE("Can't open "++Path)}|ErrorLog],
+ httpd_util:key1search(Context, errmsg, ""), R}
+ end.
+
+file(ConfigDB, RequestURI, FileName) ->
+ Aliases = httpd_util:multi_lookup(ConfigDB, alias),
+ {_, Path, _AfterPath}
+ = mod_alias:real_name(ConfigDB, RequestURI, Aliases),
+ Pwd = filename:dirname(Path),
+ filename:join(Pwd, FileName).
+
+%%
+%% echo directive
+%%
+
+echo(Info,Context,ErrorLog,[var],["DOCUMENT_NAME"],R) ->
+ {ok,Context,ErrorLog,document_name(Info#mod.data,Info#mod.config_db,
+ Info#mod.request_uri),R};
+echo(Info,Context,ErrorLog,[var],["DOCUMENT_URI"],R) ->
+ {ok,Context,ErrorLog,document_uri(Info#mod.config_db,
+ Info#mod.request_uri),R};
+echo(Info,Context,ErrorLog,[var],["QUERY_STRING_UNESCAPED"],R) ->
+ {ok,Context,ErrorLog,query_string_unescaped(Info#mod.request_uri),R};
+echo(_Info,Context,ErrorLog,[var],["DATE_LOCAL"],R) ->
+ {ok,Context,ErrorLog,date_local(),R};
+echo(_Info,Context,ErrorLog,[var],["DATE_GMT"],R) ->
+ {ok,Context,ErrorLog,date_gmt(),R};
+echo(Info,Context,ErrorLog,[var],["LAST_MODIFIED"],R) ->
+ {ok,Context,ErrorLog,last_modified(Info#mod.data,Info#mod.config_db,
+ Info#mod.request_uri),R};
+echo(_Info, Context, ErrorLog, _TagList, _ValueList, R) ->
+ {ok,Context,
+ [{internal_info,?NICE("echo directive has a spurious tag")}|
+ ErrorLog],"(none)",R}.
+
+document_name(Data,ConfigDB,RequestURI) ->
+ Path = mod_alias:path(Data,ConfigDB,RequestURI),
+ case regexp:match(Path,"[^/]*\$") of
+ {match,Start,Length} ->
+ string:substr(Path,Start,Length);
+ nomatch ->
+ "(none)"
+ end.
+
+document_uri(ConfigDB, RequestURI) ->
+ Aliases = httpd_util:multi_lookup(ConfigDB, alias),
+
+ {_, Path, AfterPath} = mod_alias:real_name(ConfigDB, RequestURI, Aliases),
+
+ VirtualPath = string:substr(RequestURI, 1,
+ length(RequestURI)-length(AfterPath)),
+ {match, Start, Length} = regexp:match(Path,"[^/]*\$"),
+ FileName = string:substr(Path,Start,Length),
+ case regexp:match(VirtualPath, FileName++"\$") of
+ {match, _, _} ->
+ httpd_util:decode_hex(VirtualPath)++AfterPath;
+ nomatch ->
+ string:strip(httpd_util:decode_hex(VirtualPath),right,$/)++
+ "/"++FileName++AfterPath
+ end.
+
+query_string_unescaped(RequestURI) ->
+ case regexp:match(RequestURI,"[\?].*\$") of
+ {match,Start,Length} ->
+ %% Escape all shell-special variables with \
+ escape(string:substr(RequestURI,Start+1,Length-1));
+ nomatch ->
+ "(none)"
+ end.
+
+escape([]) -> [];
+escape([$;|R]) -> [$\\,$;|escape(R)];
+escape([$&|R]) -> [$\\,$&|escape(R)];
+escape([$(|R]) -> [$\\,$(|escape(R)];
+escape([$)|R]) -> [$\\,$)|escape(R)];
+escape([$||R]) -> [$\\,$||escape(R)];
+escape([$^|R]) -> [$\\,$^|escape(R)];
+escape([$<|R]) -> [$\\,$<|escape(R)];
+escape([$>|R]) -> [$\\,$>|escape(R)];
+escape([$\n|R]) -> [$\\,$\n|escape(R)];
+escape([$ |R]) -> [$\\,$ |escape(R)];
+escape([$\t|R]) -> [$\\,$\t|escape(R)];
+escape([C|R]) -> [C|escape(R)].
+
+date_local() ->
+ {{Year,Month,Day},{Hour,Minute,Second}}=calendar:local_time(),
+ %% Time format hard-wired to: "%a %b %e %T %Y" according to strftime(3)
+ io_lib:format("~s ~s ~2w ~2.2.0w:~2.2.0w:~2.2.0w ~w",
+ [httpd_util:day(calendar:day_of_the_week(Year,Month,Day)),
+ httpd_util:month(Month),Day,Hour,Minute,Second,Year]).
+
+date_gmt() ->
+ {{Year,Month,Day},{Hour,Minute,Second}}=calendar:universal_time(),
+ %% Time format hard-wired to: "%a %b %e %T %Z %Y" according to strftime(3)
+ io_lib:format("~s ~s ~2w ~2.2.0w:~2.2.0w:~2.2.0w GMT ~w",
+ [httpd_util:day(calendar:day_of_the_week(Year,Month,Day)),
+ httpd_util:month(Month),Day,Hour,Minute,Second,Year]).
+
+last_modified(Data,ConfigDB,RequestURI) ->
+ {ok,FileInfo}=file:read_file_info(mod_alias:path(Data,ConfigDB,RequestURI)),
+ {{Year,Month,Day},{Hour,Minute,Second}}=FileInfo#file_info.mtime,
+ io_lib:format("~s ~s ~2w ~2.2.0w:~2.2.0w:~2.2.0w ~w",
+ [httpd_util:day(calendar:day_of_the_week(Year,Month,Day)),
+ httpd_util:month(Month),Day,Hour,Minute,Second,Year]).
+
+%%
+%% fsize directive
+%%
+
+fsize(Info,Context,ErrorLog,[virtual],[VirtualPath],R) ->
+ Aliases = httpd_util:multi_lookup(Info#mod.config_db,alias),
+ {_,Path, _AfterPath}=
+ mod_alias:real_name(Info#mod.config_db,VirtualPath,Aliases),
+ fsize(Info, Context, ErrorLog, R, Path);
+fsize(Info,Context,ErrorLog,[file],[FileName],R) ->
+ Path = file(Info#mod.config_db,Info#mod.request_uri,FileName),
+ fsize(Info,Context,ErrorLog,R,Path);
+fsize(_Info, Context, ErrorLog, _TagList, _ValueList, R) ->
+ {ok,Context,[{internal_info,?NICE("fsize directive has a spurious tag")}|
+ ErrorLog],httpd_util:key1search(Context,errmsg,""),R}.
+
+fsize(_Info, Context, ErrorLog, R, Path) ->
+ case file:read_file_info(Path) of
+ {ok,FileInfo} ->
+ case httpd_util:key1search(Context,sizefmt) of
+ "bytes" ->
+ {ok,Context,ErrorLog,
+ integer_to_list(FileInfo#file_info.size),R};
+ "abbrev" ->
+ Size = integer_to_list(trunc(FileInfo#file_info.size/1024+1))++"k",
+ {ok,Context,ErrorLog,Size,R};
+ Value->
+ {ok,Context,
+ [{internal_info,
+ ?NICE("fsize directive has a spurious tag value ("++
+ Value++")")}|
+ ErrorLog],
+ httpd_util:key1search(Context, errmsg, ""), R}
+ end;
+ {error, _Reason} ->
+ {ok,Context,[{internal_info,?NICE("Can't open "++Path)}|ErrorLog],
+ httpd_util:key1search(Context,errmsg,""),R}
+ end.
+
+%%
+%% flastmod directive
+%%
+
+flastmod(#mod{config_db = Db} = Info,
+ Context, ErrorLog, [virtual], [VirtualPath],R) ->
+ Aliases = httpd_util:multi_lookup(Db,alias),
+ {_,Path, _AfterPath} = mod_alias:real_name(Db, VirtualPath, Aliases),
+ flastmod(Info,Context,ErrorLog,R,Path);
+flastmod(#mod{config_db = Db, request_uri = RequestUri} = Info,
+ Context, ErrorLog, [file], [FileName], R) ->
+ Path = file(Db, RequestUri, FileName),
+ flastmod(Info, Context, ErrorLog, R, Path);
+flastmod(_Info, Context, ErrorLog, _TagList, _ValueList, R) ->
+ {ok,Context,
+ [{internal_info,?NICE("flastmod directive has a spurious tag")}|
+ ErrorLog],httpd_util:key1search(Context,errmsg,""),R}.
+
+flastmod(_Info, Context, ErrorLog, R, File) ->
+ case file:read_file_info(File) of
+ {ok, FileInfo} ->
+ {{Yr,Mon,Day},{Hour,Minute,Second}}=FileInfo#file_info.mtime,
+ Result =
+ io_lib:format("~s ~s ~2w ~w:~w:~w ~w",
+ [httpd_util:day(
+ calendar:day_of_the_week(Yr,Mon, Day)),
+ httpd_util:month(Mon),Day,Hour,Minute,Second, Yr]),
+ {ok, Context, ErrorLog, Result, R};
+ {error, _Reason} ->
+ {ok,Context,[{internal_info,?NICE("Can't open "++File)}|ErrorLog],
+ httpd_util:key1search(Context,errmsg,""),R}
+ end.
+
+%%
+%% exec directive
+%%
+
+exec(Info,Context,ErrorLog,[cmd],[Command],R) ->
+ cmd(Info,Context,ErrorLog,R,Command);
+exec(Info,Context,ErrorLog,[cgi],[RequestURI],R) ->
+ cgi(Info,Context,ErrorLog,R,RequestURI);
+exec(_Info, Context, ErrorLog, _TagList, _ValueList, R) ->
+ {ok, Context,
+ [{internal_info,?NICE("exec directive has a spurious tag")}|
+ ErrorLog], httpd_util:key1search(Context,errmsg,""),R}.
+
+%% cmd
+
+cmd(Info, Context, ErrorLog, R, Command) ->
+ process_flag(trap_exit,true),
+ Env = env(Info),
+ Dir = filename:dirname(Command),
+ Port = (catch open_port({spawn,Command},[stream,{cd,Dir},{env,Env}])),
+ case Port of
+ P when port(P) ->
+ {NewErrorLog, Result} = proxy(Port, ErrorLog),
+ {ok, Context, NewErrorLog, Result, R};
+ {'EXIT', Reason} ->
+ exit({open_port_failed,Reason,
+ [{uri,Info#mod.request_uri},{script,Command},
+ {env,Env},{dir,Dir}]});
+ O ->
+ exit({open_port_failed,O,
+ [{uri,Info#mod.request_uri},{script,Command},
+ {env,Env},{dir,Dir}]})
+ end.
+
+env(Info) ->
+ [{"DOCUMENT_NAME",document_name(Info#mod.data,Info#mod.config_db,
+ Info#mod.request_uri)},
+ {"DOCUMENT_URI", document_uri(Info#mod.config_db, Info#mod.request_uri)},
+ {"QUERY_STRING_UNESCAPED", query_string_unescaped(Info#mod.request_uri)},
+ {"DATE_LOCAL", date_local()},
+ {"DATE_GMT", date_gmt()},
+ {"LAST_MODIFIED", last_modified(Info#mod.data, Info#mod.config_db,
+ Info#mod.request_uri)}
+ ].
+
+%% cgi
+
+cgi(Info, Context, ErrorLog, R, RequestURI) ->
+ ScriptAliases = httpd_util:multi_lookup(Info#mod.config_db, script_alias),
+ case mod_alias:real_script_name(Info#mod.config_db, RequestURI,
+ ScriptAliases) of
+ {Script, AfterScript} ->
+ exec_script(Info,Script,AfterScript,ErrorLog,Context,R);
+ not_a_script ->
+ {ok, Context,
+ [{internal_info, ?NICE(RequestURI++" is not a script")}|
+ ErrorLog], httpd_util:key1search(Context, errmsg, ""),R}
+ end.
+
+remove_header([]) ->
+ [];
+remove_header([$\n,$\n|Rest]) ->
+ Rest;
+remove_header([_C|Rest]) ->
+ remove_header(Rest).
+
+
+exec_script(#mod{config_db = Db, request_uri = RequestUri} = Info,
+ Script, _AfterScript, ErrorLog, Context, R) ->
+ process_flag(trap_exit,true),
+ Aliases = httpd_util:multi_lookup(Db, alias),
+ {_, Path, AfterPath} = mod_alias:real_name(Db, RequestUri, Aliases),
+ Env = env(Info) ++ mod_cgi:env(Info, Path, AfterPath),
+ Dir = filename:dirname(Path),
+ Port = (catch open_port({spawn,Script},[stream,{env, Env},{cd, Dir}])),
+ case Port of
+ P when port(P) ->
+ %% Send entity body to port.
+ Res = case Info#mod.entity_body of
+ [] ->
+ true;
+ EntityBody ->
+ (catch port_command(Port, EntityBody))
+ end,
+ case Res of
+ {'EXIT', Reason} ->
+ exit({open_cmd_failed,Reason,
+ [{mod,?MODULE},{port,Port},
+ {uri,RequestUri},
+ {script,Script},{env,Env},{dir,Dir},
+ {ebody_size,sz(Info#mod.entity_body)}]});
+ true ->
+ {NewErrorLog, Result} = proxy(Port, ErrorLog),
+ {ok, Context, NewErrorLog, remove_header(Result), R}
+ end;
+ {'EXIT', Reason} ->
+ exit({open_port_failed,Reason,
+ [{mod,?MODULE},{uri,RequestUri},{script,Script},
+ {env,Env},{dir,Dir}]});
+ O ->
+ exit({open_port_failed,O,
+ [{mod,?MODULE},{uri,RequestUri},{script,Script},
+ {env,Env},{dir,Dir}]})
+ end.
+
+
+%%
+%% Port communication
+%%
+
+proxy(Port, ErrorLog) ->
+ process_flag(trap_exit, true),
+ proxy(Port, ErrorLog, []).
+
+proxy(Port, ErrorLog, Result) ->
+ receive
+ {Port, {data, Response}} ->
+ proxy(Port, ErrorLog, lists:append(Result,Response));
+ {'EXIT', Port, normal} when port(Port) ->
+ process_flag(trap_exit, false),
+ {ErrorLog, Result};
+ {'EXIT', Port, _Reason} when port(Port) ->
+ process_flag(trap_exit, false),
+ {[{internal_info,
+ ?NICE("Scrambled output from CGI-script")}|ErrorLog],
+ Result};
+ {'EXIT', Pid, Reason} when pid(Pid) ->
+ process_flag(trap_exit, false),
+ {'EXIT', Pid, Reason};
+ %% This should not happen!
+ _WhatEver ->
+ process_flag(trap_exit, false),
+ {ErrorLog, Result}
+ end.
+
+
+%% ------
+%% Temporary until I figure out a way to fix send_in_chunks
+%% (comments and directives that start in one chunk but end
+%% in another is not handled).
+%%
+
+send_in(Info, Path, Head, {ok,FileInfo}) ->
+ case file:read_file(Path) of
+ {ok, Bin} ->
+ send_in1(Info, binary_to_list(Bin), Head, FileInfo);
+ {error, Reason} ->
+ {error, {read,Reason}}
+ end;
+send_in(_Info , _Path, _Head,{error,Reason}) ->
+ {error, {open,Reason}}.
+
+send_in1(Info, Data, Head, FileInfo) ->
+ {ok, _Context, Err, ParsedBody} = parse(Info,Data,?DEFAULT_CONTEXT,[],[]),
+ Size = length(ParsedBody),
+ LastModified = case catch httpd_util:rfc1123_date(FileInfo#file_info.mtime) of
+ Date when list(Date) -> [{last_modified,Date}];
+ _ -> []
+ end,
+ Head1 = case Info#mod.http_version of
+ "HTTP/1.1"->
+ Head ++ [{content_length, integer_to_list(Size)},
+ {etag, httpd_util:create_etag(FileInfo,Size)}|
+ LastModified];
+ _->
+ %% i.e http/1.0 and http/0.9
+ Head ++ [{content_length, integer_to_list(Size)}|
+ LastModified]
+ end,
+ httpd_response:send_header(Info, 200, Head1),
+ httpd_socket:deliver(Info#mod.socket_type,Info#mod.socket, ParsedBody),
+ {ok, Err, Size}.
+
+
+parse(Info,Body) ->
+ parse(Info, Body, ?DEFAULT_CONTEXT, [], []).
+
+parse(_Info, [], Context, ErrorLog, Result) ->
+ {ok, Context, lists:reverse(ErrorLog), lists:reverse(Result)};
+parse(Info,[$<,$!,$-,$-,$#|R1],Context,ErrorLog,Result) ->
+ case catch parse0(R1,Context) of
+ {parse_error,Reason} ->
+ parse(Info,R1,Context,[{internal_info,?NICE(Reason)}|ErrorLog],
+ [$#,$-,$-,$!,$<|Result]);
+ {ok,Context,Command,TagList,ValueList,R2} ->
+ {ok,NewContext,NewErrorLog,MoreResult,R3}=
+ handle(Info,Context,ErrorLog,Command,TagList,ValueList,R2),
+ parse(Info,R3,NewContext,NewErrorLog,
+ lists:reverse(MoreResult)++Result)
+ end;
+parse(Info,[$<,$!,$-,$-|R1],Context,ErrorLog,Result) ->
+ case catch parse5(R1,[],0) of
+ {parse_error,Reason} ->
+ parse(Info,R1,Context,
+ [{internal_info,?NICE(Reason)}|ErrorLog],Result);
+ {Comment,R2} ->
+ parse(Info,R2,Context,ErrorLog,Comment++Result)
+ end;
+parse(Info,[C|R],Context,ErrorLog,Result) ->
+ parse(Info,R,Context,ErrorLog,[C|Result]).
+
+handle(Info,Context,ErrorLog,Command,TagList,ValueList,R) ->
+ case catch apply(?MODULE,Command,[Info,Context,ErrorLog,TagList,ValueList,
+ R]) of
+ {'EXIT',{undef,_}} ->
+ throw({parse_error,"Unknown command "++atom_to_list(Command)++
+ " in parsed doc"});
+ Result ->
+ Result
+ end.
+
+parse0([], _Context) ->
+ throw({parse_error,"Premature EOF in parsed file"});
+parse0([$-,$-,$>|_R], _Context) ->
+ throw({parse_error,"Premature EOF in parsed file"});
+parse0([$ |R], Context) ->
+ parse0(R,Context);
+parse0(String, Context) ->
+ parse1(String, Context,"").
+
+parse1([], _Context, _Command) ->
+ throw({parse_error,"Premature EOF in parsed file"});
+parse1([$-,$-,$>|_R], _Context, _Command) ->
+ throw({parse_error,"Premature EOF in parsed file"});
+parse1([$ |R], Context, Command) ->
+ parse2(R,Context,list_to_atom(lists:reverse(Command)),[],[],"");
+parse1([C|R], Context, Command) ->
+ parse1(R,Context,[C|Command]).
+
+parse2([], _Context, _Command, _TagList, _ValueList, _Tag) ->
+ throw({parse_error,"Premature EOF in parsed file"});
+parse2([$-,$-,$>|R], Context, Command, TagList, ValueList, _Tag) ->
+ {ok,Context,Command,TagList,ValueList,R};
+parse2([$ |R],Context,Command,TagList,ValueList,Tag) ->
+ parse2(R,Context,Command,TagList,ValueList,Tag);
+parse2([$=|R],Context,Command,TagList,ValueList,Tag) ->
+ parse3(R,Context,Command,[list_to_atom(lists:reverse(Tag))|TagList],
+ ValueList);
+parse2([C|R],Context,Command,TagList,ValueList,Tag) ->
+ parse2(R,Context,Command,TagList,ValueList,[C|Tag]).
+
+parse3([], _Context, _Command, _TagList, _ValueList) ->
+ throw({parse_error,"Premature EOF in parsed file"});
+parse3([$-,$-,$>|_R], _Context, _Command, _TagList, _ValueList) ->
+ throw({parse_error,"Premature EOF in parsed file"});
+parse3([$ |R], Context, Command, TagList, ValueList) ->
+ parse3(R, Context, Command, TagList, ValueList);
+parse3([$"|R], Context, Command, TagList, ValueList) ->
+ parse4(R,Context,Command,TagList,ValueList,"");
+parse3(_String, _Context, _Command, _TagList, _ValueList) ->
+ throw({parse_error,"Premature EOF in parsed file"}).
+
+parse4([], _Context, _Command, _TagList, _ValueList, _Value) ->
+ throw({parse_error,"Premature EOF in parsed file"});
+parse4([$-,$-,$>|_R], _Context, _Command, _TagList, _ValueList, _Value) ->
+ throw({parse_error,"Premature EOF in parsed file"});
+parse4([$"|R],Context,Command,TagList,ValueList,Value) ->
+ parse2(R,Context,Command,TagList,[lists:reverse(Value)|ValueList],"");
+parse4([C|R],Context,Command,TagList,ValueList,Value) ->
+ parse4(R,Context,Command,TagList,ValueList,[C|Value]).
+
+parse5([], _Comment, _Depth) ->
+ throw({parse_error,"Premature EOF in parsed file"});
+parse5([$<,$!,$-,$-|R],Comment,Depth) ->
+ parse5(R,[$-,$-,$!,$<|Comment],Depth+1);
+parse5([$-,$-,$>|R],Comment,0) ->
+ {">--"++Comment++"--!<",R};
+parse5([$-,$-,$>|R],Comment,Depth) ->
+ parse5(R,[$>,$-,$-|Comment],Depth-1);
+parse5([C|R],Comment,Depth) ->
+ parse5(R,[C|Comment],Depth).
+
+
+sz(B) when binary(B) -> {binary,size(B)};
+sz(L) when list(L) -> {list,length(L)};
+sz(_) -> undefined.
+
+%% send_error - Handle failure to send the file
+%%
+send_error({open,Reason},Info,Path) ->
+ httpd_file:handle_error(Reason, "open", Info, Path);
+send_error({read,Reason},Info,Path) ->
+ httpd_file:handle_error(Reason, "read", Info, Path).
+
+
+
+
diff --git a/src/couch_inets/mod_log.erl b/src/couch_inets/mod_log.erl
new file mode 100644
index 00000000..9903e69f
--- /dev/null
+++ b/src/couch_inets/mod_log.erl
@@ -0,0 +1,253 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_log).
+-export([do/1,error_log/5,security_log/2,load/2,store/2,remove/1]).
+
+-export([report_error/2]).
+
+-include("httpd.hrl").
+-define(VMODULE,"LOG").
+
+%% do
+
+do(Info) ->
+ AuthUser = auth_user(Info#mod.data),
+ Date = custom_date(),
+ log_internal_info(Info,Date,Info#mod.data),
+ case httpd_util:key1search(Info#mod.data,status) of
+ %% A status code has been generated!
+ {StatusCode, _PhraseArgs, Reason} ->
+ transfer_log(Info,"-",AuthUser,Date,StatusCode,0),
+ if
+ StatusCode >= 400 ->
+ error_log(Info,Date,Reason);
+ true ->
+ not_an_error
+ end,
+ {proceed,Info#mod.data};
+ %% No status code has been generated!
+ undefined ->
+ case httpd_util:key1search(Info#mod.data,response) of
+ {already_sent,StatusCode,Size} ->
+ transfer_log(Info,"-",AuthUser,Date,StatusCode,Size),
+ {proceed,Info#mod.data};
+ {response, Head, _Body} ->
+ Size = httpd_util:key1search(Head,content_length,unknown),
+ Code = httpd_util:key1search(Head,code,unknown),
+ transfer_log(Info, "-", AuthUser, Date, Code, Size),
+ {proceed, Info#mod.data};
+ {_StatusCode, Response} ->
+ transfer_log(Info,"-",AuthUser,Date,200,
+ httpd_util:flatlength(Response)),
+ {proceed,Info#mod.data};
+ undefined ->
+ transfer_log(Info,"-",AuthUser,Date,200,0),
+ {proceed,Info#mod.data}
+ end
+ end.
+
+custom_date() ->
+ LocalTime = calendar:local_time(),
+ UniversalTime = calendar:universal_time(),
+ Minutes = round(diff_in_minutes(LocalTime, UniversalTime)),
+ {{YYYY,MM,DD},{Hour,Min,Sec}} = LocalTime,
+ Date =
+ io_lib:format("~.2.0w/~.3s/~.4w:~.2.0w:~.2.0w:~.2.0w ~c~.2.0w~.2.0w",
+ [DD, httpd_util:month(MM), YYYY, Hour, Min, Sec,
+ sign(Minutes),
+ abs(Minutes) div 60, abs(Minutes) rem 60]),
+ lists:flatten(Date).
+
+diff_in_minutes(L,U) ->
+ (calendar:datetime_to_gregorian_seconds(L) -
+ calendar:datetime_to_gregorian_seconds(U))/60.
+
+sign(Minutes) when Minutes > 0 ->
+ $+;
+sign(_Minutes) ->
+ $-.
+
+auth_user(Data) ->
+ case httpd_util:key1search(Data,remote_user) of
+ undefined ->
+ "-";
+ RemoteUser ->
+ RemoteUser
+ end.
+
+%% log_internal_info
+
+log_internal_info(_Info, _Date, []) ->
+ ok;
+log_internal_info(Info,Date,[{internal_info,Reason}|Rest]) ->
+ error_log(Info,Date,Reason),
+ log_internal_info(Info,Date,Rest);
+log_internal_info(Info,Date,[_|Rest]) ->
+ log_internal_info(Info,Date,Rest).
+
+%% transfer_log
+
+transfer_log(Info,RFC931,AuthUser,Date,StatusCode,Bytes) ->
+ case httpd_util:lookup(Info#mod.config_db,transfer_log) of
+ undefined ->
+ no_transfer_log;
+ TransferLog ->
+ {_PortNumber, RemoteHost} =
+ (Info#mod.init_data)#init_data.peername,
+ case (catch io:format(TransferLog,
+ "~s ~s ~s [~s] \"~s\" ~w ~w~n",
+ [RemoteHost, RFC931, AuthUser,
+ Date, Info#mod.request_line,
+ StatusCode, Bytes])) of
+ ok ->
+ ok;
+ Error ->
+ error_logger:error_report(Error)
+ end
+ end.
+
+%% security log
+
+security_log(Info, Reason) ->
+ case httpd_util:lookup(Info#mod.config_db, security_log) of
+ undefined ->
+ no_security_log;
+ SecurityLog ->
+ io:format(SecurityLog,"[~s] ~s~n", [custom_date(), Reason])
+ end.
+
+%% error_log
+
+error_log(Info, Date, Reason) ->
+ case httpd_util:lookup(Info#mod.config_db, error_log) of
+ undefined ->
+ no_error_log;
+ ErrorLog ->
+ {_PortNumber, RemoteHost} =
+ (Info#mod.init_data)#init_data.peername,
+ io:format(ErrorLog,
+ "[~s] access to ~s failed for ~s, reason: ~p~n",
+ [Date,Info#mod.request_uri,RemoteHost,Reason])
+ end.
+
+error_log(_SocketType, _Socket, ConfigDB, {_PortNumber, RemoteHost}, Reason) ->
+ case httpd_util:lookup(ConfigDB, error_log) of
+ undefined ->
+ no_error_log;
+ ErrorLog ->
+ Date = custom_date(),
+ io:format(ErrorLog,"[~s] server crash for ~s, reason: ~p~n",
+ [Date,RemoteHost,Reason]),
+ ok
+ end.
+
+
+report_error(ConfigDB, Error) ->
+ case httpd_util:lookup(ConfigDB, error_log) of
+ undefined ->
+ no_error_log;
+ ErrorLog ->
+ Date = custom_date(),
+ io:format(ErrorLog,"[~s] reporting error: ~s~n",[Date,Error]),
+ ok
+ end.
+
+%%
+%% Configuration
+%%
+
+%% load
+
+load([$T,$r,$a,$n,$s,$f,$e,$r,$L,$o,$g,$ |TransferLog],[]) ->
+ {ok,[],{transfer_log,httpd_conf:clean(TransferLog)}};
+load([$E,$r,$r,$o,$r,$L,$o,$g,$ |ErrorLog],[]) ->
+ {ok,[],{error_log,httpd_conf:clean(ErrorLog)}};
+load([$S,$e,$c,$u,$r,$i,$t,$y,$L,$o,$g,$ |SecurityLog], []) ->
+ {ok, [], {security_log, httpd_conf:clean(SecurityLog)}}.
+
+%% store
+
+store({transfer_log,TransferLog},ConfigList) ->
+ case create_log(TransferLog,ConfigList) of
+ {ok,TransferLogStream} ->
+ {ok,{transfer_log,TransferLogStream}};
+ {error,Reason} ->
+ {error,Reason}
+ end;
+store({error_log,ErrorLog},ConfigList) ->
+ case create_log(ErrorLog,ConfigList) of
+ {ok,ErrorLogStream} ->
+ {ok,{error_log,ErrorLogStream}};
+ {error,Reason} ->
+ {error,Reason}
+ end;
+store({security_log, SecurityLog},ConfigList) ->
+ case create_log(SecurityLog, ConfigList) of
+ {ok, SecurityLogStream} ->
+ {ok, {security_log, SecurityLogStream}};
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+create_log(LogFile,ConfigList) ->
+ Filename = httpd_conf:clean(LogFile),
+ case filename:pathtype(Filename) of
+ absolute ->
+ case file:open(Filename,read_write) of
+ {ok,LogStream} ->
+ file:position(LogStream,{eof,0}),
+ {ok,LogStream};
+ {error,_} ->
+ {error,?NICE("Can't create "++Filename)}
+ end;
+ volumerelative ->
+ case file:open(Filename,read_write) of
+ {ok,LogStream} ->
+ file:position(LogStream,{eof,0}),
+ {ok,LogStream};
+ {error,_} ->
+ {error,?NICE("Can't create "++Filename)}
+ end;
+ relative ->
+ case httpd_util:key1search(ConfigList,server_root) of
+ undefined ->
+ {error,
+ ?NICE(Filename++
+ " is an invalid logfile name beacuse ServerRoot is not defined")};
+ ServerRoot ->
+ AbsoluteFilename=filename:join(ServerRoot,Filename),
+ case file:open(AbsoluteFilename,read_write) of
+ {ok,LogStream} ->
+ file:position(LogStream,{eof,0}),
+ {ok,LogStream};
+ {error, _Reason} ->
+ {error,?NICE("Can't create "++AbsoluteFilename)}
+ end
+ end
+ end.
+
+%% remove
+
+remove(ConfigDB) ->
+ lists:foreach(fun([Stream]) -> file:close(Stream) end,
+ ets:match(ConfigDB,{transfer_log,'$1'})),
+ lists:foreach(fun([Stream]) -> file:close(Stream) end,
+ ets:match(ConfigDB,{error_log,'$1'})),
+ lists:foreach(fun([Stream]) -> file:close(Stream) end,
+ ets:match(ConfigDB,{security_log,'$1'})),
+ ok.
diff --git a/src/couch_inets/mod_range.erl b/src/couch_inets/mod_range.erl
new file mode 100644
index 00000000..fca7693f
--- /dev/null
+++ b/src/couch_inets/mod_range.erl
@@ -0,0 +1,416 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_range).
+-export([do/1]).
+-include("httpd.hrl").
+
+%% do
+
+do(Info) ->
+ ?DEBUG("do -> entry",[]),
+ case Info#mod.method of
+ "GET" ->
+ case httpd_util:key1search(Info#mod.data,status) of
+ %% A status code has been generated!
+ {_StatusCode, _PhraseArgs, _Reason} ->
+ {proceed,Info#mod.data};
+ %% No status code has been generated!
+ undefined ->
+ case httpd_util:key1search(Info#mod.data,response) of
+ %% No response has been generated!
+ undefined ->
+ case httpd_util:key1search(Info#mod.parsed_header,
+ "range") of
+ undefined ->
+ %Not a range response
+ {proceed,Info#mod.data};
+ Range ->
+ %%Control that there weren't a
+ %%if-range field that stopped The
+ %%range request in favor for the
+ %%whole file
+ case httpd_util:key1search(Info#mod.data,
+ if_range) of
+ send_file ->
+ {proceed,Info#mod.data};
+ _undefined ->
+ do_get_range(Info,Range)
+ end
+ end;
+ %% A response has been generated or sent!
+ _Response ->
+ {proceed, Info#mod.data}
+ end
+ end;
+ %% Not a GET method!
+ _ ->
+ {proceed,Info#mod.data}
+ end.
+
+do_get_range(Info,Ranges) ->
+ ?DEBUG("do_get_range -> Request URI: ~p",[Info#mod.request_uri]),
+ Path = mod_alias:path(Info#mod.data, Info#mod.config_db,
+ Info#mod.request_uri),
+ {FileInfo, LastModified} = get_modification_date(Path),
+ send_range_response(Path, Info, Ranges, FileInfo, LastModified).
+
+
+send_range_response(Path, Info, Ranges, FileInfo, LastModified)->
+ case parse_ranges(Ranges) of
+ error->
+ ?ERROR("send_range_response-> Unparsable range request",[]),
+ {proceed,Info#mod.data};
+ {multipart,RangeList}->
+ send_multi_range_response(Path, Info, RangeList);
+ {Start,Stop}->
+ send_range_response(Path, Info, Start, Stop, FileInfo,
+ LastModified)
+ end.
+%%More than one range specified
+%%Send a multipart reponse to the user
+%
+%%An example of an multipart range response
+
+% HTTP/1.1 206 Partial Content
+% Date:Wed 15 Nov 1995 04:08:23 GMT
+% Last-modified:Wed 14 Nov 1995 04:08:23 GMT
+% Content-type: multipart/byteranges; boundary="SeparatorString"
+%
+% --"SeparatorString"
+% Content-Type: application/pdf
+% Content-Range: bytes 500-600/1010
+% .... The data..... 101 bytes
+%
+% --"SeparatorString"
+% Content-Type: application/pdf
+% Content-Range: bytes 700-1009/1010
+% .... The data.....
+
+
+
+send_multi_range_response(Path,Info,RangeList)->
+ case file:open(Path, [raw,binary]) of
+ {ok, FileDescriptor} ->
+ file:close(FileDescriptor),
+ ?DEBUG("send_multi_range_response -> FileDescriptor: ~p",
+ [FileDescriptor]),
+ Suffix = httpd_util:suffix(Path),
+ PartMimeType = httpd_util:lookup_mime_default(Info#mod.config_db,
+ Suffix,"text/plain"),
+ {FileInfo, LastModified} = get_modification_date(Path),
+ case valid_ranges(RangeList,Path,FileInfo) of
+ {ValidRanges,true}->
+ ?DEBUG("send_multi_range_response ->Ranges are valid:",[]),
+ %Apache breaks the standard by sending the size
+ %field in the Header.
+ Header =
+ [{code,206},
+ {content_type, "multipart/byteranges;boundary"
+ "=RangeBoundarySeparator"},
+ {etag, httpd_util:create_etag(FileInfo)} |
+ LastModified],
+ ?DEBUG("send_multi_range_response -> Valid Ranges: ~p",
+ [RagneList]),
+ Body = {fun send_multiranges/4,
+ [ValidRanges, Info, PartMimeType, Path]},
+ {proceed,[{response,
+ {response, Header, Body}} | Info#mod.data]};
+ _ ->
+ {proceed, [{status, {416, "Range not valid",
+ bad_range_boundaries }}]}
+ end;
+ {error, _Reason} ->
+ ?ERROR("do_get -> failed open file: ~p",[_Reason]),
+ {proceed,Info#mod.data}
+ end.
+
+send_multiranges(ValidRanges,Info,PartMimeType,Path)->
+ ?DEBUG("send_multiranges -> Start sending the ranges",[]),
+ case file:open(Path, [raw,binary]) of
+ {ok,FileDescriptor} ->
+ lists:foreach(fun(Range)->
+ send_multipart_start(Range,
+ Info,
+ PartMimeType,
+ FileDescriptor)
+ end,ValidRanges),
+ file:close(FileDescriptor),
+ %%Sends an end of the multipart
+ httpd_socket:deliver(Info#mod.socket_type,Info#mod.socket,
+ "\r\n--RangeBoundarySeparator--"),
+ sent;
+ _ ->
+ close
+ end.
+
+send_multipart_start({{Start,End},{StartByte,EndByte,Size}},Info,
+ PartMimeType,FileDescriptor)when StartByte<Size->
+ PartHeader=["\r\n--RangeBoundarySeparator\r\n","Content-type: ",
+ PartMimeType,"\r\n",
+ "Content-Range:bytes=",integer_to_list(StartByte),"-",
+ integer_to_list(EndByte),"/",
+ integer_to_list(Size),"\r\n\r\n"],
+ send_part_start(Info#mod.socket_type, Info#mod.socket, PartHeader,
+ FileDescriptor, Start, End);
+
+
+send_multipart_start({{Start,End},{StartByte,EndByte,Size}}, Info,
+ PartMimeType, FileDescriptor)->
+ PartHeader=["\r\n--RangeBoundarySeparator\r\n","Content-type: ",
+ PartMimeType,"\r\n",
+ "Content-Range:bytes=",integer_to_list(Size-(StartByte-Size)),
+ "-",integer_to_list(EndByte),"/",
+ integer_to_list(Size),"\r\n\r\n"],
+ send_part_start(Info#mod.socket_type, Info#mod.socket, PartHeader,
+ FileDescriptor, Start, End).
+
+send_part_start(SocketType, Socket, PartHeader, FileDescriptor, Start, End)->
+ case httpd_socket:deliver(SocketType, Socket, PartHeader) of
+ ok ->
+ send_part_start(SocketType,Socket,FileDescriptor,Start,End);
+ _ ->
+ close
+ end.
+
+send_range_response(Path, Info, Start, Stop, FileInfo, LastModified)->
+ case file:open(Path, [raw,binary]) of
+ {ok, FileDescriptor} ->
+ file:close(FileDescriptor),
+ ?DEBUG("send_range_response -> FileDescriptor: ~p",
+ [FileDescriptor]),
+ Suffix = httpd_util:suffix(Path),
+ MimeType = httpd_util:lookup_mime_default(Info#mod.config_db,
+ Suffix,"text/plain"),
+ Size = get_range_size(Start,Stop,FileInfo),
+ case valid_range(Start,Stop,FileInfo) of
+ {true,StartByte,EndByte,TotByte}->
+ Head =[{code,206},{content_type, MimeType},
+ {etag, httpd_util:create_etag(FileInfo)},
+ {content_range,["bytes=",
+ integer_to_list(StartByte),"-",
+ integer_to_list(EndByte),"/",
+ integer_to_list(TotByte)]},
+ {content_length, Size} | LastModified],
+ BodyFunc = fun send_range_body/5,
+ Arg = [Info#mod.socket_type,
+ Info#mod.socket, Path, Start, Stop],
+ {proceed,[{response,{response ,Head, {BodyFunc,Arg}}}|
+ Info#mod.data]};
+ {false,Reason} ->
+ {proceed, [{status, {416, Reason, bad_range_boundaries }}]}
+ end;
+ {error, _Reason} ->
+ ?ERROR("send_range_response -> failed open file: ~p",[_Reason]),
+ {proceed,Info#mod.data}
+ end.
+
+
+send_range_body(SocketType,Socket,Path,Start,End) ->
+ ?DEBUG("mod_range -> send_range_body",[]),
+ case file:open(Path, [raw,binary]) of
+ {ok,FileDescriptor} ->
+ send_part_start(SocketType,Socket,FileDescriptor,Start,End),
+ file:close(FileDescriptor);
+ _ ->
+ close
+ end.
+
+send_part_start(SocketType,Socket,FileDescriptor,Start,End) ->
+ case Start of
+ from_end ->
+ file:position(FileDescriptor,{eof,End}),
+ send_body(SocketType,Socket,FileDescriptor);
+ from_start ->
+ file:position(FileDescriptor,{bof,End}),
+ send_body(SocketType,Socket,FileDescriptor);
+ Byte when integer(Byte) ->
+ file:position(FileDescriptor,{bof,Start}),
+ send_part(SocketType,Socket,FileDescriptor,End)
+ end,
+ sent.
+
+
+%%This function could replace send_body by calling it with Start=0 end
+%%=FileSize But i gues it would be stupid when we look at performance
+send_part(SocketType,Socket,FileDescriptor,End)->
+ case file:position(FileDescriptor,{cur,0}) of
+ {ok,NewPos} ->
+ if
+ NewPos > End ->
+ ok;
+ true ->
+ Size = get_file_chunk_size(NewPos,End,?FILE_CHUNK_SIZE),
+ case file:read(FileDescriptor,Size) of
+ eof ->
+ ok;
+ {error, _Reason} ->
+ ok;
+ {ok,Binary} ->
+ case httpd_socket:deliver(SocketType,Socket,
+ Binary) of
+ socket_closed ->
+ ?LOG("send_range of body -> socket "
+ "closed while sending",[]),
+ socket_close;
+ _ ->
+ send_part(SocketType,Socket,
+ FileDescriptor,End)
+ end
+ end
+ end;
+ _->
+ ok
+ end.
+
+%% validate that the range is in the limits of the file
+valid_ranges(RangeList, _Path, FileInfo)->
+ lists:mapfoldl(fun({Start,End},Acc)->
+ case Acc of
+ true ->
+ case valid_range(Start,End,FileInfo) of
+ {true,StartB,EndB,Size}->
+ {{{Start,End},
+ {StartB,EndB,Size}},true};
+ _ ->
+ false
+ end;
+ _ ->
+ {false,false}
+ end
+ end,true,RangeList).
+
+
+
+valid_range(from_end,End,FileInfo)->
+ Size=FileInfo#file_info.size,
+ if
+ End < Size ->
+ {true,(Size+End),Size-1,Size};
+ true ->
+ false
+ end;
+valid_range(from_start,End,FileInfo)->
+ Size=FileInfo#file_info.size,
+ if
+ End < Size ->
+ {true,End,Size-1,Size};
+ true ->
+ false
+ end;
+
+valid_range(Start,End,FileInfo)when Start=<End->
+ case FileInfo#file_info.size of
+ FileSize when Start< FileSize ->
+ case FileInfo#file_info.size of
+ Size when End<Size ->
+ {true,Start,End,FileInfo#file_info.size};
+ Size ->
+ {true,Start,Size-1,Size}
+ end;
+ _->
+ {false,"The size of the range is negative"}
+ end;
+
+valid_range(_Start,_End,_FileInfo)->
+ {false,"Range starts out of file boundaries"}.
+%% Find the modification date of the file
+get_modification_date(Path)->
+ case file:read_file_info(Path) of
+ {ok, FileInfo0} ->
+ case (catch httpd_util:rfc1123_date(FileInfo0#file_info.mtime)) of
+ Date when is_list(Date) ->
+ {FileInfo0, [{last_modified, Date}]};
+ _ ->
+ {FileInfo0, []}
+ end;
+ _ ->
+ {#file_info{}, []}
+ end.
+
+%Calculate the size of the chunk to read
+
+get_file_chunk_size(Position, End, DefaultChunkSize)
+ when (Position+DefaultChunkSize) =< End->
+ DefaultChunkSize;
+get_file_chunk_size(Position, End, _DefaultChunkSize)->
+ (End-Position) +1.
+
+
+
+%Get the size of the range to send. Remember that
+%A range is from startbyte up to endbyte which means that
+%the nuber of byte in a range is (StartByte-EndByte)+1
+
+get_range_size(from_end, Stop, _FileInfo)->
+ integer_to_list(-1*Stop);
+
+get_range_size(from_start, StartByte, FileInfo) ->
+ integer_to_list((((FileInfo#file_info.size)-StartByte)));
+
+get_range_size(StartByte, EndByte, _FileInfo) ->
+ integer_to_list((EndByte-StartByte)+1).
+
+parse_ranges("\bytes\=" ++ Ranges)->
+ parse_ranges("bytes\=" ++ Ranges);
+parse_ranges("bytes\=" ++ Ranges)->
+ case string:tokens(Ranges,", ") of
+ [Range] ->
+ parse_range(Range);
+ [Range1|SplittedRanges]->
+ {multipart,lists:map(fun parse_range/1,[Range1|SplittedRanges])}
+ end;
+%Bad unit
+parse_ranges(Ranges)->
+ io:format("Bad Ranges : ~p",[Ranges]),
+ error.
+%Parse the range specification from the request to {Start,End}
+%Start=End : Numreric string | []
+
+parse_range(Range)->
+ format_range(split_range(Range,[],[])).
+format_range({[],BytesFromEnd})->
+ {from_end,-1*(list_to_integer(BytesFromEnd))};
+format_range({StartByte,[]})->
+ {from_start,list_to_integer(StartByte)};
+format_range({StartByte,EndByte})->
+ {list_to_integer(StartByte),list_to_integer(EndByte)}.
+%Last case return the splitted range
+split_range([],Current,Other)->
+ {lists:reverse(Other),lists:reverse(Current)};
+
+split_range([$-|Rest],Current,Other)->
+ split_range(Rest,Other,Current);
+
+split_range([N|Rest],Current,End) ->
+ split_range(Rest,[N|Current],End).
+
+send_body(SocketType,Socket,FileDescriptor) ->
+ case file:read(FileDescriptor,?FILE_CHUNK_SIZE) of
+ {ok,Binary} ->
+ ?DEBUG("send_body -> send another chunk: ~p",[size(Binary)]),
+ case httpd_socket:deliver(SocketType,Socket,Binary) of
+ socket_closed ->
+ ?LOG("send_body -> socket closed while sending",[]),
+ socket_close;
+ _ ->
+ send_body(SocketType,Socket,FileDescriptor)
+ end;
+ eof ->
+ ?DEBUG("send_body -> done with this file",[]),
+ eof
+ end.
diff --git a/src/couch_inets/mod_responsecontrol.erl b/src/couch_inets/mod_responsecontrol.erl
new file mode 100644
index 00000000..b1a330b3
--- /dev/null
+++ b/src/couch_inets/mod_responsecontrol.erl
@@ -0,0 +1,301 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+
+-module(mod_responsecontrol).
+-export([do/1]).
+
+-include("httpd.hrl").
+
+do(Info) ->
+ ?DEBUG("do -> response_control",[]),
+ case httpd_util:key1search(Info#mod.data,status) of
+ %% A status code has been generated!
+ {_StatusCode, _PhraseArgs, _Reason} ->
+ {proceed, Info#mod.data};
+ %% No status code has been generated!
+ undefined ->
+ case httpd_util:key1search(Info#mod.data, response) of
+ %% No response has been generated!
+ undefined ->
+ case do_responsecontrol(Info) of
+ continue ->
+ {proceed, Info#mod.data};
+ Response ->
+ {proceed,[Response | Info#mod.data]}
+ end;
+ %% A response has been generated or sent!
+ _Response ->
+ {proceed, Info#mod.data}
+ end
+ end.
+
+%%----------------------------------------------------------------------
+%%Control that the request header did not contians any limitations
+%%wheather a response shall be createed or not
+%%----------------------------------------------------------------------
+do_responsecontrol(Info) ->
+ ?DEBUG("do_response_control -> Request URI: ~p",[Info#mod.request_uri]),
+ Path = mod_alias:path(Info#mod.data, Info#mod.config_db,
+ Info#mod.request_uri),
+ case file:read_file_info(Path) of
+ {ok, FileInfo} ->
+ control(Path, Info, FileInfo);
+ _ ->
+ %% The requested asset is not a plain file and then it must
+ %% be generated everytime its requested
+ continue
+ end.
+
+%%----------------------------------------------------------------------
+%%Control the If-Match, If-None-Match, and If-Modified-Since
+%%----------------------------------------------------------------------
+
+
+%% If a client sends more then one of the if-XXXX fields in a request
+%% The standard says it does not specify the behaviuor so I specified it :-)
+%% The priority between the fields is
+%% 1.If-modified
+%% 2.If-Unmodified
+%% 3.If-Match
+%% 4.If-Nomatch
+
+%% This means if more than one of the fields are in the request the
+%% field with highest priority will be used
+
+%%If the request is a range request the If-Range field will be the winner.
+
+control(Path, Info, FileInfo) ->
+ case control_range(Path, Info, FileInfo) of
+ undefined ->
+ case control_Etag(Path, Info, FileInfo) of
+ undefined ->
+ case control_modification(Path, Info, FileInfo) of
+ continue ->
+ continue;
+ ReturnValue ->
+ send_return_value(ReturnValue, FileInfo)
+ end;
+ continue ->
+ continue;
+ ReturnValue ->
+ send_return_value(ReturnValue, FileInfo)
+ end;
+ Response->
+ Response
+ end.
+
+%%----------------------------------------------------------------------
+%%If there are both a range and an if-range field control if
+%%----------------------------------------------------------------------
+control_range(Path,Info,FileInfo) ->
+ case httpd_util:key1search(Info#mod.parsed_header, "range") of
+ undefined->
+ undefined;
+ _Range ->
+ case httpd_util:key1search(Info#mod.parsed_header,"if-range") of
+ undefined ->
+ undefined;
+ EtagOrDate ->
+ control_if_range(Path,Info,FileInfo,EtagOrDate)
+ end
+ end.
+
+control_if_range(_Path, Info, FileInfo, EtagOrDate) ->
+ case httpd_util:convert_request_date(strip_date(EtagOrDate)) of
+ bad_date ->
+ FileEtag=httpd_util:create_etag(FileInfo),
+ case FileEtag of
+ EtagOrDate ->
+ continue;
+ _ ->
+ {if_range,send_file}
+ end;
+ _ErlDate ->
+ %%We got the date in the request if it is
+ case control_modification_data(Info, FileInfo#file_info.mtime,
+ "if-range") of
+ modified ->
+ {if_range,send_file};
+ _UnmodifiedOrUndefined->
+ continue
+ end
+ end.
+
+%%----------------------------------------------------------------------
+%%Controls the values of the If-Match and I-None-Mtch
+%%----------------------------------------------------------------------
+control_Etag(Path, Info, FileInfo)->
+ FileEtag = httpd_util:create_etag(FileInfo),
+ %%Control if the E-Tag for the resource matches one of the Etags in
+ %%the -if-match header field
+ case control_match(Info, FileInfo, "if-match", FileEtag) of
+ nomatch ->
+ %%None of the Etags in the if-match field matched the current
+ %%Etag for the resource return a 304
+ {412, Info, Path};
+ match ->
+ continue;
+ undefined ->
+ case control_match(Info, FileInfo, "if-none-match", FileEtag) of
+ nomatch ->
+ continue;
+ match ->
+ case Info#mod.method of
+ "GET" ->
+ {304, Info, Path};
+ "HEAD" ->
+ {304, Info, Path};
+ _OtherrequestMethod ->
+ {412, Info, Path}
+ end;
+ undefined ->
+ undefined
+ end
+ end.
+
+%%----------------------------------------------------------------------
+%%Control if there are any Etags for HeaderField in the request if so
+%%Control if they match the Etag for the requested file
+%%----------------------------------------------------------------------
+control_match(Info, _FileInfo, HeaderField, FileEtag)->
+ case split_etags(httpd_util:key1search(Info#mod.parsed_header,
+ HeaderField)) of
+ undefined->
+ undefined;
+ Etags->
+ %%Control that the match any star not is availible
+ case lists:member("*",Etags) of
+ true->
+ match;
+ false->
+ compare_etags(FileEtag, Etags)
+ end
+ end.
+
+%%----------------------------------------------------------------------
+%%Split the etags from the request
+%%----------------------------------------------------------------------
+split_etags(undefined)->
+ undefined;
+split_etags(Tags) ->
+ string:tokens(Tags,", ").
+
+%%----------------------------------------------------------------------
+%%Control if the etag for the file is in the list
+%%----------------------------------------------------------------------
+compare_etags(Tag,Etags) ->
+ case lists:member(Tag,Etags) of
+ true ->
+ match;
+ _ ->
+ nomatch
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%%Control if the file is modificated %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%----------------------------------------------------------------------
+%%Control the If-Modified-Since and If-Not-Modified-Since header fields
+%%----------------------------------------------------------------------
+control_modification(Path,Info,FileInfo)->
+ ?DEBUG("control_modification() -> entry",[]),
+ case control_modification_data(Info,
+ FileInfo#file_info.mtime,
+ "if-modified-since") of
+ modified->
+ continue;
+ unmodified->
+ {304, Info, Path};
+ undefined ->
+ case control_modification_data(Info,
+ FileInfo#file_info.mtime,
+ "if-unmodified-since") of
+ modified ->
+ {412, Info, Path};
+ _ContinueUndefined ->
+ continue
+ end
+ end.
+
+%%----------------------------------------------------------------------
+%%Controls the date from the http-request if-modified-since and
+%%if-not-modified-since against the modification data of the
+%%File
+%%----------------------------------------------------------------------
+%%Info is the record about the request
+%%ModificationTime is the time the file was edited last
+%%Header Field is the name of the field to control
+
+control_modification_data(Info, ModificationTime, HeaderField)->
+ case strip_date(httpd_util:key1search(Info#mod.parsed_header,
+ HeaderField)) of
+ undefined->
+ undefined;
+ LastModified0 ->
+ LastModified = calendar:universal_time_to_local_time(
+ httpd_util:convert_request_date(LastModified0)),
+ ?DEBUG("control_modification_data() -> "
+ "~n Request-Field: ~s"
+ "~n FileLastModified: ~p"
+ "~n FieldValue: ~p",
+ [HeaderField, ModificationTime, LastModified]),
+ FileTime =
+ calendar:datetime_to_gregorian_seconds(ModificationTime),
+ FieldTime = calendar:datetime_to_gregorian_seconds(LastModified),
+ if
+ FileTime =< FieldTime ->
+ ?DEBUG("File unmodified~n", []), unmodified;
+ FileTime >= FieldTime ->
+ ?DEBUG("File modified~n", []), modified
+ end
+ end.
+
+%% IE4 & NS4 sends an extra '; length=xxxx' string at the end of the If-Modified-Since
+%% header, we detect this and ignore it (the RFCs does not mention this).
+strip_date(undefined) ->
+ undefined;
+strip_date([]) ->
+ [];
+strip_date([$;,$ | _]) ->
+ [];
+strip_date([C | Rest]) ->
+ [C | strip_date(Rest)].
+
+send_return_value({412,_,_}, _FileInfo)->
+ {status,{412,none,"Precondition Failed"}};
+
+send_return_value({304,Info,Path}, FileInfo)->
+ Suffix = httpd_util:suffix(Path),
+ MimeType = httpd_util:lookup_mime_default(Info#mod.config_db,Suffix,
+ "text/plain"),
+ LastModified =
+ case (catch httpd_util:rfc1123_date(FileInfo#file_info.mtime)) of
+ Date when is_list(Date) ->
+ [{last_modified, Date}];
+ _ -> %% This will rarly happen, but could happen
+ %% if a computer is wrongly configured.
+ []
+ end,
+
+ Header = [{code,304},
+ {etag, httpd_util:create_etag(FileInfo)},
+ {content_length,"0"}, {mime_type, MimeType} | LastModified],
+ {response, {response, Header, nobody}}.
diff --git a/src/couch_inets/mod_security.erl b/src/couch_inets/mod_security.erl
new file mode 100644
index 00000000..dac6d70e
--- /dev/null
+++ b/src/couch_inets/mod_security.erl
@@ -0,0 +1,294 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_security).
+
+%% Security Audit Functionality
+
+%% User API exports
+-export([list_blocked_users/1, list_blocked_users/2, list_blocked_users/3,
+ block_user/4, block_user/5,
+ unblock_user/2, unblock_user/3, unblock_user/4,
+ list_auth_users/1, list_auth_users/2, list_auth_users/3]).
+
+%% module API exports
+-export([do/1, load/2, store/2, remove/1]).
+
+-include("httpd.hrl").
+
+%% We will not make the change to use base64 in stdlib in inets just yet.
+%% it will be included in the next major release of inets.
+-compile({nowarn_deprecated_function, {http_base_64, encode, 1}}).
+
+-define(VMODULE,"SEC").
+
+
+%% do/1
+do(Info) ->
+ %% Check and see if any user has been authorized.
+ case httpd_util:key1search(Info#mod.data,remote_user,not_defined_user) of
+ not_defined_user ->
+ %% No user has been authorized.
+ case httpd_util:key1search(Info#mod.data, response) of
+ %% A status code has been generated!
+ {401, _Response} ->
+ case httpd_util:key1search(Info#mod.parsed_header,
+ "authorization") of
+ undefined ->
+ %% Not an authorization attempt (server just replied to
+ %% challenge for authentication)
+ {proceed, Info#mod.data};
+ [$B,$a,$s,$i,$c,$ |EncodedString] ->
+ %% Someone tried to authenticate, and obviously failed!
+ DecodedString =
+ case (catch
+ http_base_64:decode(
+ EncodedString)) of
+ %% Decode failed
+ {'EXIT',{function_clause, _}} ->
+ EncodedString;
+ String ->
+ String
+ end,
+
+ report_failed(Info, DecodedString,"Failed authentication"),
+ take_failed_action(Info, DecodedString),
+ {proceed, Info#mod.data}
+ end;
+ _ ->
+ {proceed, Info#mod.data}
+ end;
+ User ->
+ %% A user has been authenticated, now is he blocked ?
+ Path = mod_alias:path(Info#mod.data,
+ Info#mod.config_db,
+ Info#mod.request_uri),
+ {_Dir, SDirData} = secretp(Path, Info#mod.config_db),
+ Addr = httpd_util:lookup(Info#mod.config_db, bind_address),
+ Port = httpd_util:lookup(Info#mod.config_db, port),
+ case mod_security_server:check_blocked_user(Info, User,
+ SDirData,
+ Addr, Port) of
+ true ->
+ report_failed(Info, User ,"User Blocked"),
+ {proceed, [{status, {403, Info#mod.request_uri, ""}}|Info#mod.data]};
+ false ->
+ report_failed(Info, User,"Authentication Succedded"),
+ mod_security_server:store_successful_auth(Addr, Port,
+ User, SDirData),
+ {proceed, Info#mod.data}
+ end
+ end.
+
+report_failed(Info, Auth, Event) ->
+ Request = Info#mod.request_line,
+ {_PortNumber,RemoteHost}=(Info#mod.init_data)#init_data.peername,
+ String = RemoteHost ++ " : " ++ Event ++ " : " ++ Request ++ " : " ++ Auth,
+ mod_disk_log:security_log(Info,String),
+ mod_log:security_log(Info, String).
+
+take_failed_action(Info, Auth) ->
+ Path = mod_alias:path(Info#mod.data, Info#mod.config_db,
+ Info#mod.request_uri),
+ {_Dir, SDirData} = secretp(Path, Info#mod.config_db),
+ Addr = httpd_util:lookup(Info#mod.config_db, bind_address),
+ Port = httpd_util:lookup(Info#mod.config_db, port),
+ mod_security_server:store_failed_auth(Info, Addr, Port,
+ Auth, SDirData).
+
+secretp(Path, ConfigDB) ->
+ Directories = ets:match(ConfigDB,{directory,'$1','_'}),
+ case secret_path(Path, Directories) of
+ {yes, Directory} ->
+ SDirs0 = httpd_util:multi_lookup(ConfigDB, security_directory),
+ SDir = lists:filter(fun(X) ->
+ lists:member({path, Directory}, X)
+ end, SDirs0),
+ {Directory, lists:flatten(SDir)};
+ no ->
+ {[], []}
+ end.
+
+secret_path(Path,Directories) ->
+ secret_path(Path, httpd_util:uniq(lists:sort(Directories)), to_be_found).
+
+secret_path(_Path, [], to_be_found) ->
+ no;
+secret_path(_Path, [], Directory) ->
+ {yes, Directory};
+secret_path(Path, [[NewDirectory]|Rest], Directory) ->
+ case regexp:match(Path, NewDirectory) of
+ {match, _, _} when Directory == to_be_found ->
+ secret_path(Path, Rest, NewDirectory);
+ {match, _, Length} when Length > length(Directory)->
+ secret_path(Path, Rest, NewDirectory);
+ {match, _, _} ->
+ secret_path(Path, Rest, Directory);
+ nomatch ->
+ secret_path(Path, Rest, Directory)
+ end.
+
+
+load("<Directory " ++ Directory,[]) ->
+ Dir = httpd_conf:custom_clean(Directory,"",">"),
+ {ok, [{security_directory, Dir, [{path, Dir}]}]};
+load(eof,[{security_directory,Directory, _DirData}|_]) ->
+ {error, ?NICE("Premature end-of-file in "++Directory)};
+load("SecurityDataFile " ++ FileName,
+ [{security_directory, Dir, DirData}]) ->
+ File = httpd_conf:clean(FileName),
+ {ok, [{security_directory, Dir, [{data_file, File}|DirData]}]};
+load("SecurityCallbackModule " ++ ModuleName,
+ [{security_directory, Dir, DirData}]) ->
+ Mod = list_to_atom(httpd_conf:clean(ModuleName)),
+ {ok, [{security_directory, Dir, [{callback_module, Mod}|DirData]}]};
+load("SecurityMaxRetries " ++ Retries,
+ [{security_directory, Dir, DirData}]) ->
+ load_return_int_tag("SecurityMaxRetries", max_retries,
+ httpd_conf:clean(Retries), Dir, DirData);
+load("SecurityBlockTime " ++ Time,
+ [{security_directory, Dir, DirData}]) ->
+ load_return_int_tag("SecurityBlockTime", block_time,
+ httpd_conf:clean(Time), Dir, DirData);
+load("SecurityFailExpireTime " ++ Time,
+ [{security_directory, Dir, DirData}]) ->
+ load_return_int_tag("SecurityFailExpireTime", fail_expire_time,
+ httpd_conf:clean(Time), Dir, DirData);
+load("SecurityAuthTimeout " ++ Time0,
+ [{security_directory, Dir, DirData}]) ->
+ Time = httpd_conf:clean(Time0),
+ load_return_int_tag("SecurityAuthTimeout", auth_timeout,
+ httpd_conf:clean(Time), Dir, DirData);
+load("AuthName " ++ Name0,
+ [{security_directory, Dir, DirData}]) ->
+ Name = httpd_conf:clean(Name0),
+ {ok, [{security_directory, Dir, [{auth_name, Name}|DirData]}]};
+load("</Directory>",[{security_directory,Directory, DirData}]) ->
+ {ok, [], {security_directory, Directory, DirData}}.
+
+load_return_int_tag(Name, Atom, Time, Dir, DirData) ->
+ case Time of
+ "infinity" ->
+ {ok, [{security_directory, Dir, [{Atom, 99999999999999999999999999999}|DirData]}]};
+ _Int ->
+ case catch list_to_integer(Time) of
+ {'EXIT', _} ->
+ {error, Time++" is an invalid "++Name};
+ Val ->
+ {ok, [{security_directory, Dir, [{Atom, Val}|DirData]}]}
+ end
+ end.
+
+store({security_directory, _Dir0, DirData}, ConfigList) ->
+ Addr = httpd_util:key1search(ConfigList, bind_address),
+ Port = httpd_util:key1search(ConfigList, port),
+ mod_security_server:start(Addr, Port),
+ SR = httpd_util:key1search(ConfigList, server_root),
+
+ case httpd_util:key1search(DirData, data_file, no_data_file) of
+ no_data_file ->
+ {error, no_security_data_file};
+ DataFile0 ->
+ DataFile =
+ case filename:pathtype(DataFile0) of
+ relative ->
+ filename:join(SR, DataFile0);
+ _ ->
+ DataFile0
+ end,
+ case mod_security_server:new_table(Addr, Port, DataFile) of
+ {ok, TwoTables} ->
+ NewDirData0 = lists:keyreplace(data_file, 1, DirData,
+ {data_file, TwoTables}),
+ NewDirData1 = case Addr of
+ undefined ->
+ [{port,Port}|NewDirData0];
+ _ ->
+ [{port,Port},{bind_address,Addr}|
+ NewDirData0]
+ end,
+ {ok, {security_directory,NewDirData1}};
+ {error, Err} ->
+ {error, {{open_data_file, DataFile}, Err}}
+ end
+ end.
+
+
+remove(ConfigDB) ->
+ Addr = case ets:lookup(ConfigDB, bind_address) of
+ [] ->
+ undefined;
+ [{bind_address, Address}] ->
+ Address
+ end,
+ [{port, Port}] = ets:lookup(ConfigDB, port),
+ mod_security_server:delete_tables(Addr, Port),
+ mod_security_server:stop(Addr, Port).
+
+
+%%
+%% User API
+%%
+
+%% list_blocked_users
+
+list_blocked_users(Port) ->
+ list_blocked_users(undefined, Port).
+
+list_blocked_users(Port, Dir) when integer(Port) ->
+ list_blocked_users(undefined,Port,Dir);
+list_blocked_users(Addr, Port) when integer(Port) ->
+ mod_security_server:list_blocked_users(Addr, Port).
+
+list_blocked_users(Addr, Port, Dir) ->
+ mod_security_server:list_blocked_users(Addr, Port, Dir).
+
+
+%% block_user
+
+block_user(User, Port, Dir, Time) ->
+ block_user(User, undefined, Port, Dir, Time).
+block_user(User, Addr, Port, Dir, Time) ->
+ mod_security_server:block_user(User, Addr, Port, Dir, Time).
+
+
+%% unblock_user
+
+unblock_user(User, Port) ->
+ unblock_user(User, undefined, Port).
+
+unblock_user(User, Port, Dir) when integer(Port) ->
+ unblock_user(User, undefined, Port, Dir);
+unblock_user(User, Addr, Port) when integer(Port) ->
+ mod_security_server:unblock_user(User, Addr, Port).
+
+unblock_user(User, Addr, Port, Dir) ->
+ mod_security_server:unblock_user(User, Addr, Port, Dir).
+
+
+%% list_auth_users
+
+list_auth_users(Port) ->
+ list_auth_users(undefined,Port).
+
+list_auth_users(Port, Dir) when integer(Port) ->
+ list_auth_users(undefined, Port, Dir);
+list_auth_users(Addr, Port) when integer(Port) ->
+ mod_security_server:list_auth_users(Addr, Port).
+
+list_auth_users(Addr, Port, Dir) ->
+ mod_security_server:list_auth_users(Addr, Port, Dir).
diff --git a/src/couch_inets/mod_security_server.erl b/src/couch_inets/mod_security_server.erl
new file mode 100644
index 00000000..26463faf
--- /dev/null
+++ b/src/couch_inets/mod_security_server.erl
@@ -0,0 +1,628 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%% Security Audit Functionality
+
+%%
+%% The gen_server code.
+%%
+%% A gen_server is needed in this module to take care of shared access to the
+%% data file used to store failed and successful authentications aswell as
+%% user blocks.
+%%
+%% The storage model is a write-through model with both an ets and a dets
+%% table. Writes are done to both the ets and then the dets table, but reads
+%% are only done from the ets table.
+%%
+%% This approach also enables parallelism when using dets by returning the
+%% same dets table identifier when opening several files with the same
+%% physical location.
+%%
+%% NOTE: This could be implemented using a single dets table, as it is
+%% possible to open a dets file with the ram_file flag, but this
+%% would require periodical sync's to disk, and it would be hard
+%% to decide when such an operation should occur.
+%%
+
+
+-module(mod_security_server).
+
+-include("httpd.hrl").
+
+-behaviour(gen_server).
+
+
+%% User API exports (called via mod_security)
+-export([list_blocked_users/2, list_blocked_users/3,
+ block_user/5,
+ unblock_user/3, unblock_user/4,
+ list_auth_users/2, list_auth_users/3]).
+
+%% Internal exports (for mod_security only)
+-export([start/2, stop/1, stop/2,
+ new_table/3, delete_tables/2,
+ store_failed_auth/5, store_successful_auth/4,
+ check_blocked_user/5]).
+
+%% gen_server exports
+-export([start_link/2, init/1,
+ handle_info/2, handle_call/3, handle_cast/2,
+ terminate/2,
+ code_change/3]).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% External API %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% start_link/3
+%%
+%% NOTE: This is called by httpd_misc_sup when the process is started
+%%
+
+start_link(Addr, Port) ->
+ Name = make_name(Addr, Port),
+ gen_server:start_link({local, Name}, ?MODULE, [], [{timeout, infinity}]).
+
+
+%% start/2
+%% Called by the mod_security module.
+
+start(Addr, Port) ->
+ Name = make_name(Addr, Port),
+ case whereis(Name) of
+ undefined ->
+ httpd_misc_sup:start_sec_server(Addr, Port);
+ _ -> %% Already started...
+ ok
+ end.
+
+
+%% stop
+
+stop(Port) ->
+ stop(undefined, Port).
+stop(Addr, Port) ->
+ Name = make_name(Addr, Port),
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ httpd_misc_sup:stop_sec_server(Addr, Port)
+ end.
+
+
+%% list_blocked_users
+
+list_blocked_users(Addr, Port) ->
+ Name = make_name(Addr,Port),
+ Req = {list_blocked_users, Addr, Port, '_'},
+ call(Name, Req).
+
+list_blocked_users(Addr, Port, Dir) ->
+ Name = make_name(Addr, Port),
+ Req = {list_blocked_users, Addr, Port, Dir},
+ call(Name, Req).
+
+
+%% block_user
+
+block_user(User, Addr, Port, Dir, Time) ->
+ Name = make_name(Addr, Port),
+ Req = {block_user, User, Addr, Port, Dir, Time},
+ call(Name, Req).
+
+
+%% unblock_user
+
+unblock_user(User, Addr, Port) ->
+ Name = make_name(Addr, Port),
+ Req = {unblock_user, User, Addr, Port, '_'},
+ call(Name, Req).
+
+unblock_user(User, Addr, Port, Dir) ->
+ Name = make_name(Addr, Port),
+ Req = {unblock_user, User, Addr, Port, Dir},
+ call(Name, Req).
+
+
+%% list_auth_users
+
+list_auth_users(Addr, Port) ->
+ Name = make_name(Addr, Port),
+ Req = {list_auth_users, Addr, Port, '_'},
+ call(Name, Req).
+
+list_auth_users(Addr, Port, Dir) ->
+ Name = make_name(Addr,Port),
+ Req = {list_auth_users, Addr, Port, Dir},
+ call(Name, Req).
+
+
+%% new_table
+
+new_table(Addr, Port, TabName) ->
+ Name = make_name(Addr,Port),
+ Req = {new_table, Addr, Port, TabName},
+ call(Name, Req).
+
+
+%% delete_tables
+
+delete_tables(Addr, Port) ->
+ Name = make_name(Addr, Port),
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ call(Name, delete_tables)
+ end.
+
+
+%% store_failed_auth
+
+store_failed_auth(Info, Addr, Port, DecodedString, SDirData) ->
+ Name = make_name(Addr,Port),
+ Msg = {store_failed_auth,[Info,DecodedString,SDirData]},
+ cast(Name, Msg).
+
+
+%% store_successful_auth
+
+store_successful_auth(Addr, Port, User, SDirData) ->
+ Name = make_name(Addr,Port),
+ Msg = {store_successful_auth, [User,Addr,Port,SDirData]},
+ cast(Name, Msg).
+
+
+%% check_blocked_user
+
+check_blocked_user(Info, User, SDirData, Addr, Port) ->
+ Name = make_name(Addr, Port),
+ Req = {check_blocked_user, [Info, User, SDirData]},
+ call(Name, Req).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% Server call-back functions %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+init(_) ->
+ process_flag(trap_exit, true),
+ {ok, []}.
+
+handle_call(stop, _From, _Tables) ->
+ {stop, normal, ok, []};
+
+handle_call({block_user, User, Addr, Port, Dir, Time}, _From, Tables) ->
+ Ret = block_user_int({User, Addr, Port, Dir, Time}),
+ {reply, Ret, Tables};
+
+handle_call({list_blocked_users, Addr, Port, Dir}, _From, Tables) ->
+ Blocked = list_blocked(Tables, Addr, Port, Dir, []),
+ {reply, Blocked, Tables};
+
+handle_call({unblock_user, User, Addr, Port, Dir}, _From, Tables) ->
+ Ret = unblock_user_int({User, Addr, Port, Dir}),
+ {reply, Ret, Tables};
+
+handle_call({list_auth_users, Addr, Port, Dir}, _From, Tables) ->
+ Auth = list_auth(Tables, Addr, Port, Dir, []),
+ {reply, Auth, Tables};
+
+handle_call({new_table, Addr, Port, Name}, _From, Tables) ->
+ case lists:keysearch(Name, 1, Tables) of
+ {value, {Name, {Ets, Dets}}} ->
+ {reply, {ok, {Ets, Dets}}, Tables};
+ false ->
+ TName = make_name(Addr,Port,length(Tables)),
+ case dets:open_file(TName, [{type, bag}, {file, Name},
+ {repair, true},
+ {access, read_write}]) of
+ {ok, DFile} ->
+ ETS = ets:new(TName, [bag, private]),
+ sync_dets_to_ets(DFile, ETS),
+ NewTables = [{Name, {ETS, DFile}}|Tables],
+ {reply, {ok, {ETS, DFile}}, NewTables};
+ {error, Err} ->
+ {reply, {error, {create_dets, Err}}, Tables}
+ end
+ end;
+
+handle_call(delete_tables, _From, Tables) ->
+ lists:foreach(fun({_Name, {ETS, DETS}}) ->
+ dets:close(DETS),
+ ets:delete(ETS)
+ end, Tables),
+ {reply, ok, []};
+
+handle_call({check_blocked_user, [Info, User, SDirData]}, _From, Tables) ->
+ {ETS, DETS} = httpd_util:key1search(SDirData, data_file),
+ Dir = httpd_util:key1search(SDirData, path),
+ Addr = httpd_util:key1search(SDirData, bind_address),
+ Port = httpd_util:key1search(SDirData, port),
+ CBModule =
+ httpd_util:key1search(SDirData, callback_module, no_module_at_all),
+ Ret =
+ check_blocked_user(Info, User, Dir, Addr, Port, ETS, DETS, CBModule),
+ {reply, Ret, Tables};
+
+handle_call(_Request,_From,Tables) ->
+ {reply,ok,Tables}.
+
+
+%% handle_cast
+
+handle_cast({store_failed_auth, [_, _, []]}, Tables) ->
+ %% Some other authentication scheme than mod_auth (example mod_htacess)
+ %% was the source for the authentication failure so we should ignor it!
+ {noreply, Tables};
+handle_cast({store_failed_auth, [Info, DecodedString, SDirData]}, Tables) ->
+ {ETS, DETS} = httpd_util:key1search(SDirData, data_file),
+ Dir = httpd_util:key1search(SDirData, path),
+ Addr = httpd_util:key1search(SDirData, bind_address),
+ Port = httpd_util:key1search(SDirData, port),
+ {ok, [User,Password]} = httpd_util:split(DecodedString,":",2),
+ Seconds = universal_time(),
+ Key = {User, Dir, Addr, Port},
+ %% Event
+ CBModule = httpd_util:key1search(SDirData,
+ callback_module, no_module_at_all),
+ auth_fail_event(CBModule,Addr,Port,Dir,User,Password),
+
+ %% Find out if any of this user's other failed logins are too old to keep..
+ case ets:match_object(ETS, {failed, {Key, '_', '_'}}) of
+ [] ->
+ no;
+ List ->
+ ExpireTime = httpd_util:key1search(SDirData,
+ fail_expire_time, 30)*60,
+ lists:map(fun({failed, {TheKey, LS, Gen}}) ->
+ Diff = Seconds-LS,
+ if
+ Diff > ExpireTime ->
+ ets:match_delete(ETS,
+ {failed,
+ {TheKey, LS, Gen}}),
+ dets:match_delete(DETS,
+ {failed,
+ {TheKey, LS, Gen}});
+ true ->
+ ok
+ end
+ end,
+ List)
+ end,
+
+ %% Insert the new failure..
+ Generation = length(ets:match_object(ETS, {failed, {Key, '_', '_'}})),
+ ets:insert(ETS, {failed, {Key, Seconds, Generation}}),
+ dets:insert(DETS, {failed, {Key, Seconds, Generation}}),
+
+ %% See if we should block this user..
+ MaxRetries = httpd_util:key1search(SDirData, max_retries, 3),
+ BlockTime = httpd_util:key1search(SDirData, block_time, 60),
+ case ets:match_object(ETS, {failed, {Key, '_', '_'}}) of
+ List1 when length(List1) >= MaxRetries ->
+ %% Block this user until Future
+ Future = Seconds+BlockTime*60,
+ Reason = io_lib:format("Blocking user ~s from dir ~s "
+ "for ~p minutes",
+ [User, Dir, BlockTime]),
+ mod_log:security_log(Info, lists:flatten(Reason)),
+
+ %% Event
+ user_block_event(CBModule,Addr,Port,Dir,User),
+
+ ets:match_delete(ETS,{blocked_user,
+ {User, Addr, Port, Dir, '$1'}}),
+ dets:match_delete(DETS, {blocked_user,
+ {User, Addr, Port, Dir, '$1'}}),
+ BlockRecord = {blocked_user,
+ {User, Addr, Port, Dir, Future}},
+ ets:insert(ETS, BlockRecord),
+ dets:insert(DETS, BlockRecord),
+ %% Remove previous failed requests.
+ ets:match_delete(ETS, {failed, {Key, '_', '_'}}),
+ dets:match_delete(DETS, {failed, {Key, '_', '_'}});
+ _ ->
+ no
+ end,
+ {noreply, Tables};
+
+handle_cast({store_successful_auth, [User, Addr, Port, SDirData]}, Tables) ->
+ {ETS, DETS} = httpd_util:key1search(SDirData, data_file),
+ AuthTimeOut = httpd_util:key1search(SDirData, auth_timeout, 30),
+ Dir = httpd_util:key1search(SDirData, path),
+ Key = {User, Dir, Addr, Port},
+
+ %% Remove failed entries for this Key
+ dets:match_delete(DETS, {failed, {Key, '_', '_'}}),
+ ets:match_delete(ETS, {failed, {Key, '_', '_'}}),
+
+ %% Keep track of when the last successful login took place.
+ Seconds = universal_time()+AuthTimeOut,
+ ets:match_delete(ETS, {success, {Key, '_'}}),
+ dets:match_delete(DETS, {success, {Key, '_'}}),
+ ets:insert(ETS, {success, {Key, Seconds}}),
+ dets:insert(DETS, {success, {Key, Seconds}}),
+ {noreply, Tables};
+
+handle_cast(Req, Tables) ->
+ error_msg("security server got unknown cast: ~p",[Req]),
+ {noreply, Tables}.
+
+
+%% handle_info
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+
+%% terminate
+
+terminate(_Reason, _Tables) ->
+ ok.
+
+
+%% code_change({down, ToVsn}, State, Extra)
+%%
+code_change({down, _}, State, _Extra) ->
+ {ok, State};
+
+
+%% code_change(FromVsn, State, Extra)
+%%
+code_change(_, State, _Extra) ->
+ {ok, State}.
+
+%% block_user_int/2
+block_user_int({User, Addr, Port, Dir, Time}) ->
+ Dirs = httpd_manager:config_match(Addr, Port, {security_directory, '_'}),
+ case find_dirdata(Dirs, Dir) of
+ {ok, DirData, {ETS, DETS}} ->
+ Time1 =
+ case Time of
+ infinity ->
+ 99999999999999999999999999999;
+ _ ->
+ Time
+ end,
+ Future = universal_time()+Time1,
+ ets:match_delete(ETS, {blocked_user, {User,Addr,Port,Dir,'_'}}),
+ dets:match_delete(DETS, {blocked_user,
+ {User,Addr,Port,Dir,'_'}}),
+ ets:insert(ETS, {blocked_user, {User,Addr,Port,Dir,Future}}),
+ dets:insert(DETS, {blocked_user, {User,Addr,Port,Dir,Future}}),
+ CBModule = httpd_util:key1search(DirData, callback_module,
+ no_module_at_all),
+ user_block_event(CBModule,Addr,Port,Dir,User),
+ true;
+ _ ->
+ {error, no_such_directory}
+ end.
+
+
+find_dirdata([], _Dir) ->
+ false;
+find_dirdata([{security_directory, DirData}|SDirs], Dir) ->
+ case lists:keysearch(path, 1, DirData) of
+ {value, {path, Dir}} ->
+ {value, {data_file, {ETS, DETS}}} =
+ lists:keysearch(data_file, 1, DirData),
+ {ok, DirData, {ETS, DETS}};
+ _ ->
+ find_dirdata(SDirs, Dir)
+ end.
+
+%% unblock_user_int/2
+
+unblock_user_int({User, Addr, Port, Dir}) ->
+ Dirs = httpd_manager:config_match(Addr, Port, {security_directory, '_'}),
+ case find_dirdata(Dirs, Dir) of
+ {ok, DirData, {ETS, DETS}} ->
+ case ets:match_object(ETS,
+ {blocked_user,{User,Addr,Port,Dir,'_'}}) of
+ [] ->
+ {error, not_blocked};
+ _Objects ->
+ ets:match_delete(ETS, {blocked_user,
+ {User, Addr, Port, Dir, '_'}}),
+ dets:match_delete(DETS, {blocked_user,
+ {User, Addr, Port, Dir, '_'}}),
+ CBModule = httpd_util:key1search(DirData,
+ callback_module,
+ no_module_at_all),
+ user_unblock_event(CBModule,Addr,Port,Dir,User),
+ true
+ end;
+ _ ->
+ {error, no_such_directory}
+ end.
+
+
+
+%% list_auth/2
+
+list_auth([], _Addr, _Port, _Dir, Acc) ->
+ Acc;
+list_auth([{_Name, {ETS, DETS}}|Tables], Addr, Port, Dir, Acc) ->
+ case ets:match_object(ETS, {success, {{'_', Dir, Addr, Port}, '_'}}) of
+ [] ->
+ list_auth(Tables, Addr, Port, Dir, Acc);
+ List ->
+ TN = universal_time(),
+ NewAcc = lists:foldr(fun({success,{{U,Ad,P,D},T}},Ac) ->
+ if
+ T-TN > 0 ->
+ [U|Ac];
+ true ->
+ Rec = {success,
+ {{U,Ad,P,D},T}},
+ ets:match_delete(ETS,Rec),
+ dets:match_delete(DETS,Rec),
+ Ac
+ end
+ end,
+ Acc, List),
+ list_auth(Tables, Addr, Port, Dir, NewAcc)
+ end.
+
+
+%% list_blocked/2
+
+list_blocked([], _Addr, _Port, _Dir, Acc) ->
+ TN = universal_time(),
+ lists:foldl(fun({U,Ad,P,D,T}, Ac) ->
+ if
+ T-TN > 0 ->
+ [{U,Ad,P,D,local_time(T)}|Ac];
+ true ->
+ Ac
+ end
+ end,
+ [], Acc);
+list_blocked([{_Name, {ETS, _DETS}}|Tables], Addr, Port, Dir, Acc) ->
+ List = ets:match_object(ETS, {blocked_user,
+ {'_',Addr,Port,Dir,'_'}}),
+
+ NewBlocked = lists:foldl(fun({blocked_user, X}, A) ->
+ [X|A] end, Acc, List),
+
+ list_blocked(Tables, Addr, Port, Dir, NewBlocked).
+
+
+%%
+%% sync_dets_to_ets/2
+%%
+%% Reads dets-table DETS and syncronizes it with the ets-table ETS.
+%%
+sync_dets_to_ets(DETS, ETS) ->
+ dets:traverse(DETS, fun(X) ->
+ ets:insert(ETS, X),
+ continue
+ end).
+
+%%
+%% check_blocked_user/7 -> true | false
+%%
+%% Check if a specific user is blocked from access.
+%%
+%% The sideeffect of this routine is that it unblocks also other users
+%% whos blocking time has expired. This to keep the tables as small
+%% as possible.
+%%
+check_blocked_user(Info, User, Dir, Addr, Port, ETS, DETS, CBModule) ->
+ TN = universal_time(),
+ BlockList = ets:match_object(ETS, {blocked_user, {User, '_', '_', '_', '_'}}),
+ Blocked = lists:foldl(fun({blocked_user, X}, A) ->
+ [X|A] end, [], BlockList),
+ check_blocked_user(Info,User,Dir,
+ Addr,Port,ETS,DETS,TN,Blocked,CBModule).
+
+check_blocked_user(_Info, _User, _Dir, _Addr, _Port, _ETS, _DETS, _TN,
+ [], _CBModule) ->
+ false;
+check_blocked_user(Info, User, Dir, Addr, Port, ETS, DETS, TN,
+ [{User,Addr,Port,Dir,T}| _], CBModule) ->
+ TD = T-TN,
+ if
+ TD =< 0 ->
+ %% Blocking has expired, remove and grant access.
+ unblock_user(Info, User, Dir, Addr, Port, ETS, DETS, CBModule),
+ false;
+ true ->
+ true
+ end;
+check_blocked_user(Info, User, Dir, Addr, Port, ETS, DETS, TN,
+ [{OUser,ODir,OAddr,OPort,T}|Ls], CBModule) ->
+ TD = T-TN,
+ if
+ TD =< 0 ->
+ %% Blocking has expired, remove.
+ unblock_user(Info, OUser, ODir, OAddr, OPort,
+ ETS, DETS, CBModule);
+ true ->
+ true
+ end,
+ check_blocked_user(Info, User, Dir, Addr, Port, ETS, DETS,
+ TN, Ls, CBModule).
+
+unblock_user(Info, User, Dir, Addr, Port, ETS, DETS, CBModule) ->
+ Reason =
+ io_lib:format("User ~s was removed from the block list for dir ~s",
+ [User, Dir]),
+ mod_log:security_log(Info, lists:flatten(Reason)),
+ user_unblock_event(CBModule,Addr,Port,Dir,User),
+ dets:match_delete(DETS, {blocked_user, {User, Addr, Port, Dir, '_'}}),
+ ets:match_delete(ETS, {blocked_user, {User, Addr, Port, Dir, '_'}}).
+
+
+make_name(Addr,Port) ->
+ httpd_util:make_name("httpd_security",Addr,Port).
+
+make_name(Addr,Port,Num) ->
+ httpd_util:make_name("httpd_security",Addr,Port,
+ "__" ++ integer_to_list(Num)).
+
+
+auth_fail_event(Mod,Addr,Port,Dir,User,Passwd) ->
+ event(auth_fail,Mod,Addr,Port,Dir,[{user,User},{password,Passwd}]).
+
+user_block_event(Mod,Addr,Port,Dir,User) ->
+ event(user_block,Mod,Addr,Port,Dir,[{user,User}]).
+
+user_unblock_event(Mod,Addr,Port,Dir,User) ->
+ event(user_unblock,Mod,Addr,Port,Dir,[{user,User}]).
+
+event(Event,Mod,undefined,Port,Dir,Info) ->
+ (catch Mod:event(Event,Port,Dir,Info));
+event(Event,Mod,Addr,Port,Dir,Info) ->
+ (catch Mod:event(Event,Addr,Port,Dir,Info)).
+
+universal_time() ->
+ calendar:datetime_to_gregorian_seconds(calendar:universal_time()).
+
+local_time(T) ->
+ calendar:universal_time_to_local_time(
+ calendar:gregorian_seconds_to_datetime(T)).
+
+
+error_msg(F, A) ->
+ error_logger:error_msg(F, A).
+
+
+call(Name, Req) ->
+ case (catch gen_server:call(Name, Req)) of
+ {'EXIT', Reason} ->
+ {error, Reason};
+ Reply ->
+ Reply
+ end.
+
+
+cast(Name, Msg) ->
+ case (catch gen_server:cast(Name, Msg)) of
+ {'EXIT', Reason} ->
+ {error, Reason};
+ Result ->
+ Result
+ end.
diff --git a/src/couch_inets/mod_trace.erl b/src/couch_inets/mod_trace.erl
new file mode 100644
index 00000000..2ab73e38
--- /dev/null
+++ b/src/couch_inets/mod_trace.erl
@@ -0,0 +1,87 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(mod_trace).
+
+-export([do/1]).
+
+-include("httpd.hrl").
+
+
+do(Info) ->
+ %%?vtrace("do",[]),
+ case Info#mod.method of
+ "TRACE" ->
+ case response_generated(Info) of
+ false->
+ generate_trace_response(Info);
+ true->
+ {proceed,Info#mod.data}
+ end;
+ _ ->
+ {proceed,Info#mod.data}
+ end.
+
+
+%%---------------------------------------------------------------------
+%%Generate the trace response the trace response consists of a
+%%http-header and the body will be the request.
+%5----------------------------------------------------------------------
+
+generate_trace_response(Info)->
+ RequestHead=Info#mod.parsed_header,
+ Body=generate_trace_response_body(RequestHead),
+ Len = length(Info#mod.request_line ++ Body),
+ Response=["HTTP/1.1 200 OK\r\n",
+ "Content-Type:message/http\r\n",
+ "Content-Length:",integer_to_list(Len),"\r\n\r\n",
+ Info#mod.request_line,Body],
+ httpd_socket:deliver(Info#mod.socket_type,Info#mod.socket,Response),
+ {proceed,[{response,{already_sent,200,Len}}|Info#mod.data]}.
+
+generate_trace_response_body(Parsed_header)->
+ generate_trace_response_body(Parsed_header,[]).
+
+generate_trace_response_body([],Head)->
+ lists:flatten(Head);
+generate_trace_response_body([{[],[]}|Rest],Head) ->
+ generate_trace_response_body(Rest,Head);
+generate_trace_response_body([{Field,Value}|Rest],Head) ->
+ generate_trace_response_body(Rest,[Field ++ ":" ++ Value ++ "\r\n"|Head]).
+
+
+
+%%----------------------------------------------------------------------
+%%Function that controls whether a response is generated or not
+%%----------------------------------------------------------------------
+response_generated(Info)->
+ case httpd_util:key1search(Info#mod.data,status) of
+ %% A status code has been generated!
+ {_StatusCode,_PhraseArgs,_Reason}->
+ true;
+ %%No status code control repsonsxe
+ undefined ->
+ case httpd_util:key1search(Info#mod.data, response) of
+ %% No response has been generated!
+ undefined ->
+ false;
+ %% A response has been generated or sent!
+ _Response ->
+ true
+ end
+ end.
+
diff --git a/src/couch_inets/tftp.erl b/src/couch_inets/tftp.erl
new file mode 100644
index 00000000..6af1705e
--- /dev/null
+++ b/src/couch_inets/tftp.erl
@@ -0,0 +1,310 @@
+%%%-------------------------------------------------------------------
+%%% File : tftp.erl
+%%% Author : Hakan Mattsson <hakan@erix.ericsson.se>
+%%% Description : Trivial FTP
+%%% Created : 18 May 2004 by Hakan Mattsson <hakan@erix.ericsson.se>
+%%%-------------------------------------------------------------------
+%%%
+%%% This is a complete implementation of the following IETF standards:
+%%%
+%%% RFC 1350, The TFTP Protocol (revision 2).
+%%% RFC 2347, TFTP Option Extension.
+%%% RFC 2348, TFTP Blocksize Option.
+%%% RFC 2349, TFTP Timeout Interval and Transfer Size Options.
+%%%
+%%% The only feature that not is implemented in this release is
+%%% the "netascii" transfer mode.
+%%%
+%%% The start/1 function starts a daemon process which, listens for
+%%% UDP packets on a port. When it receives a request for read or
+%%% write it spawns a temporary server process which handles the
+%%% actual transfer of the file. On the client side the read_file/3
+%%% and write_file/3 functions spawns a temporary client process which
+%%% establishes contact with a TFTP daemon and performs the actual
+%%% transfer of the file.
+%%%
+%%% Most of the options are common for both the client and the server
+%%% side, but some of them differs a little. Here are the available
+%%% options:
+%%%
+%%% {debug, Level}
+%%%
+%%% Level = none | brief | normal | verbose | all
+%%%
+%%% Controls the level of debug printouts. The default is none.
+%%%
+%%% {host, Host}
+%%%
+%%% The name or IP address of the host where the TFTP daemon
+%%% resides. This option is only used by the client. See
+%%% 'inet' about valid host names.
+%%%
+%%% {port, Port}
+%%%
+%%% Port = integer()
+%%%
+%%% The TFTP port where the daemon listens. It defaults to the
+%%% standardized number 69. On the server side it may sometimes
+%%% make sense to set it to 0, which means that the daemon just
+%%% will pick a free port (which is returned by the start/1
+%%% function).
+%%%
+%%% If a socket has somehow already has been connected, the
+%%% {udp, [{fd, integer()}]} option can be used to pass the
+%%% open file descriptor to gen_udp. This can be automated
+%%% a bit by using a command line argument stating the
+%%% prebound file descriptor number. For example, if the
+%%% Port is 69 and the file descriptor 22 has been opened by
+%%% setuid_socket_wrap. Then the command line argument
+%%% "-tftpd_69 22" will trigger the prebound file
+%%% descriptor 22 to be used instead of opening port 69.
+%%% The UDP option {udp, [{fd, 22}]} autmatically be added.
+%%% See init:get_argument/ about command line arguments and
+%%% gen_udp:open/2 about UDP options.
+%%%
+%%% {port_policy, Policy}
+%%%
+%%% Policy = random | Port | {range, MinPort, MaxPort}
+%%% Port = MinPort = MaxPort = integer()
+%%%
+%%% Policy for the selection of the temporary port which is used
+%%% by the server/client during the file transfer. It defaults to
+%%% 'random' which is the standardized policy. With this policy a
+%%% randomized free port used. A single port or a range of ports
+%%% can be useful if the protocol should pass thru a firewall.
+%%%
+%%% {prebound_fd, InitArgFlag}
+%%%
+%%% InitArgFlag = atom()
+%%%
+%%% If a socket has somehow already has been connected, the
+%%% {udp, [{fd, integer()}]} option can be used to pass the
+%%% open file descriptor to gen_udp.
+%%%
+%%% The prebound_fd option makes it possible to pass give the
+%%% file descriptor as a command line argument. The typical
+%%% usage is when used in conjunction with setuid_socket_wrap
+%%% to be able to open privileged sockets. For example if the
+%%% file descriptor 22 has been opened by setuid_socket_wrap
+%%% and you have choosen my_tftp_fd as init argument, the
+%%% command line should like this "erl -my_tftp_fd 22" and
+%%% FileDesc should be set to my_tftpd_fd. This would
+%%% automatically imply {fd, 22} to be set as UDP option.
+%%%
+%%% {udp, UdpOptions}
+%%%
+%%% Options to gen_udp:open/2.
+%%%
+%%% {use_tsize, Bool}
+%%%
+%%% Bool = boolean()
+%%%
+%%% Flag for automated usage of the "tsize" option. With this set
+%%% to true, the write_file/3 client will determine the filesize
+%%% and send it to the server as the standardized "tsize" option.
+%%% A read_file/3 client will just acquire filesize from the
+%%% server by sending a zero "tsize".
+%%%
+%%% {max_tsize, MaxTsize}
+%%%
+%%% MaxTsize = integer() | infinity
+%%%
+%%% Threshold for the maximal filesize in bytes. The transfer will
+%%% be aborted if the limit is exceeded. It defaults to
+%%% 'infinity'.
+%%%
+%%% {max_conn, MaxConn}
+%%%
+%%% MaxConn = integer() | infinity
+%%%
+%%% Threshold for the maximal number of active connections. The
+%%% daemon will reject the setup of new connections if the limit
+%%% is exceeded. It defaults to 'infinity'.
+%%%
+%%% {TftpKey, TftpVal}
+%%%
+%%% TftpKey = string()
+%%% TftpVal = string()
+%%%
+%%% The name and value of a TFTP option.
+%%%
+%%% {reject, Feature}
+%%%
+%%% Feature = Mode | TftpKey
+%%% Mode = read | write
+%%% TftpKey = string()
+%%%
+%%% Control which features that should be rejected.
+%%% This is mostly useful for the server as it may restrict
+%%% usage of certain TFTP options or read/write access.
+%%%
+%%% {callback, {RegExp, Module, State}}
+%%%
+%%% RegExp = string()
+%%% Module = atom()
+%%% State = term()
+%%%
+%%% Registration of a callback module. When a file is to be
+%%% transferred, its local filename will be matched to the
+%%% regular expressions of the registered callbacks. The first
+%%% matching callback will be used the during the transfer.The
+%%% callback module must implement the 'tftp' behaviour.
+%%%
+%%% On the server side the callback interaction starts with a
+%%% call to open/5 with the registered initial callback
+%%% state. open/5 is expected to open the (virtual) file. Then
+%%% either the read/1 or write/2 functions are invoked
+%%% repeatedly, once per transfererred block. At each function
+%%% call the state returned from the previous call is
+%%% obtained. When the last block has been encountered the read/1
+%%% or write/2 functions is expected to close the (virtual)
+%%% file.and return its last state. The abort/3 function is only
+%%% used in error situations. prepare/5 is not used on the server
+%%% side.
+%%%
+%%% On the client side the callback interaction is the same, but
+%%% it starts and ends a bit differently. It starts with a call
+%%% to prepare/5 with the same arguments as open/5
+%%% takes. prepare/5 is expected to validate the TFTP options,
+%%% suggested by the user and return the subset of them that it
+%%% accepts. Then the options is sent to the server which will
+%%% perform the same TFTP option negotiation procedure. The
+%%% options that are accepted by the server is forwarded to the
+%%% open/5 function on the client side. On the client side the
+%%% open/5 function must accept all option as is or reject the
+%%% transfer. Then the callback interaction follows the same
+%%% pattern as described above for the server side. When the last
+%%% block is encountered in read/1 or write/2 the returned stated
+%%% is forwarded to the user and returned from read_file/3 or
+%%% write_file/3.
+%%%-------------------------------------------------------------------
+
+-module(tftp).
+
+%%-------------------------------------------------------------------
+%% Interface
+%%-------------------------------------------------------------------
+
+%% public functions
+-export([
+ read_file/3,
+ write_file/3,
+ start/1,
+ info/1,
+ change_config/2,
+ start/0
+ ]).
+
+-export([behaviour_info/1]).
+
+behaviour_info(callbacks) ->
+ [{prepare, 6}, {open, 6}, {read, 1}, {write, 2}, {abort, 3}];
+behaviour_info(_) ->
+ undefined.
+
+-include("tftp.hrl").
+
+%%-------------------------------------------------------------------
+%% read_file(RemoteFilename, LocalFilename, Options) ->
+%% {ok, LastCallbackState} | {error, Reason}
+%%
+%% RemoteFilename = string()
+%% LocalFilename = binary | string()
+%% Options = [option()]
+%% LastCallbackState = term()
+%% Reason = term()
+%%
+%% Reads a (virtual) file from a TFTP server
+%%
+%% If LocalFilename is the atom 'binary', tftp_binary will be used as
+%% callback module. It will concatenate all transferred blocks and
+%% return them as one single binary in the CallbackState.
+%%
+%% When LocalFilename is a string, it will be matched to the
+%% registered callback modules and hopefully one of them will be
+%% selected. By default, tftp_file will be used as callback module. It
+%% will write each transferred block to the file named
+%% LocalFilename. The number of transferred bytes will be returned as
+%% LastCallbackState.
+%%-------------------------------------------------------------------
+
+read_file(RemoteFilename, LocalFilename, Options) ->
+ tftp_engine:client_start(read, RemoteFilename, LocalFilename, Options).
+
+%%-------------------------------------------------------------------
+%% write(RemoteFilename, LocalFilename, Options) ->
+%% {ok, LastCallbackState} | {error, Reason}
+%%
+%% RemoteFilename = string()
+%% LocalFilename = binary() | string()
+%% Options = [option()]
+%% LastCallbackState = term()
+%% Reason = term()
+%%
+%% Writes a (virtual) file to a TFTP server
+%%
+%% If LocalFilename is a binary, tftp_binary will be used as callback
+%% module. The binary will be transferred block by block and the number
+%% of transferred bytes will be returned as LastCallbackState.
+%%
+%% When LocalFilename is a string, it will be matched to the
+%% registered callback modules and hopefully one of them will be
+%% selected. By default, tftp_file will be used as callback module. It
+%% will read the file named LocalFilename block by block. The number
+%% of transferred bytes will be returned as LastCallbackState.
+%%-------------------------------------------------------------------
+
+write_file(RemoteFilename, LocalFilename, Options) ->
+ tftp_engine:client_start(write, RemoteFilename, LocalFilename, Options).
+
+%%-------------------------------------------------------------------
+%% start(Options) -> {ok, Pid} | {error, Reason}
+%%
+%% Options = [option()]
+%% Pid = pid()
+%% Reason = term()
+%%
+%% Starts a daemon process which listens for udp packets on a
+%% port. When it receives a request for read or write it spawns
+%% a temporary server process which handles the actual transfer
+%% of the (virtual) file.
+%%-------------------------------------------------------------------
+
+start(Options) ->
+ tftp_engine:daemon_start(Options).
+
+%%-------------------------------------------------------------------
+%% info(Pid) -> {ok, Options} | {error, Reason}
+%%
+%% Options = [option()]
+%% Reason = term()
+%%
+%% Returns info about a tftp daemon, server or client process
+%%-------------------------------------------------------------------
+
+info(Pid) ->
+ tftp_engine:info(Pid).
+
+%%-------------------------------------------------------------------
+%% change_config(Pid, Options) -> ok | {error, Reason}
+%%
+%% Options = [option()]
+%% Reason = term()
+%%
+%% Changes config for a tftp daemon, server or client process
+%% Must be used with care.
+%%-------------------------------------------------------------------
+
+change_config(Pid, Options) ->
+ tftp_engine:change_config(Pid, Options).
+
+%%-------------------------------------------------------------------
+%% start() -> ok | {error, Reason}
+%%
+%% Reason = term()
+%%
+%% Start the application
+%%-------------------------------------------------------------------
+
+start() ->
+ application:start(inets).
diff --git a/src/couch_inets/tftp.hrl b/src/couch_inets/tftp.hrl
new file mode 100644
index 00000000..40719fa7
--- /dev/null
+++ b/src/couch_inets/tftp.hrl
@@ -0,0 +1,47 @@
+%%%-------------------------------------------------------------------
+%%% Defines
+%%%-------------------------------------------------------------------
+
+-define(TFTP_DEFAULT_PORT, 69).% Default server port
+
+-define(TFTP_OPCODE_RRQ, 1). % Read request
+-define(TFTP_OPCODE_WRQ, 2). % Write request
+-define(TFTP_OPCODE_DATA, 3). % Data
+-define(TFTP_OPCODE_ACK, 4). % Acknowledgement
+-define(TFTP_OPCODE_ERROR, 5). % Error
+-define(TFTP_OPCODE_OACK, 6). % Option acknowledgment
+
+-define(TFTP_ERROR_UNDEF, 0). % Not defined, see error message (if any)
+-define(TFTP_ERROR_ENOENT, 1). % File not found.
+-define(TFTP_ERROR_EACCES, 2). % Access violation.
+-define(TFTP_ERROR_ENOSPC, 3). % Disk full or allocation exceeded.
+-define(TFTP_ERROR_BADOP, 4). % Illegal TFTP operation.
+-define(TFTP_ERROR_BADBLK, 5). % Unknown transfer ID.
+-define(TFTP_ERROR_EEXIST, 6). % File already exists.
+-define(TFTP_ERROR_BADUSER, 7). % No such user.
+-define(TFTP_ERROR_BADOPT, 8). % Unrequested or illegal option.
+
+-record(tftp_msg_req, {access, filename, mode, options, local_filename}).
+-record(tftp_msg_data, {block_no, data}).
+-record(tftp_msg_ack, {block_no}).
+-record(tftp_msg_error, {code, text, details}).
+-record(tftp_msg_oack, {options}).
+
+-record(config, {parent_pid = self(),
+ udp_socket,
+ udp_options = [binary, {reuseaddr, true}, {active, once}],
+ udp_host = "localhost",
+ udp_port = ?TFTP_DEFAULT_PORT,
+ port_policy = random,
+ use_tsize = false,
+ max_tsize = infinity, % Filesize
+ max_conn = infinity,
+ rejected = [],
+ polite_ack = false,
+ debug_level = none,
+ timeout,
+ user_options = [],
+ callbacks = []}).
+
+-record(callback, {regexp, internal, module, state, block_no, count}).
+
diff --git a/src/couch_inets/tftp_binary.erl b/src/couch_inets/tftp_binary.erl
new file mode 100644
index 00000000..0850d0cb
--- /dev/null
+++ b/src/couch_inets/tftp_binary.erl
@@ -0,0 +1,181 @@
+%%%-------------------------------------------------------------------
+%%% File : tft_binary.erl
+%%% Author : Hakan Mattsson <hakan@erix.ericsson.se>
+%%% Description :
+%%%
+%%% Created : 24 May 2004 by Hakan Mattsson <hakan@erix.ericsson.se>
+%%%-------------------------------------------------------------------
+
+-module(tftp_binary).
+
+%%%-------------------------------------------------------------------
+%%% Interface
+%%%-------------------------------------------------------------------
+
+-behaviour(tftp).
+
+-export([prepare/6, open/6, read/1, write/2, abort/3]).
+-export([prepare/5, open/5]).
+
+-record(read_state, {options, blksize, bin, is_network_ascii, count}).
+-record(write_state, {options, blksize, list, is_network_ascii}).
+
+%%-------------------------------------------------------------------
+%% Prepare
+%%-------------------------------------------------------------------
+
+prepare(_Peer, Access, Filename, Mode, SuggestedOptions, Initial) ->
+ %% Kept for backwards compatibility
+ prepare(Access, Filename, Mode, SuggestedOptions, Initial).
+
+prepare(Access, Bin, Mode, SuggestedOptions, []) ->
+ %% Client side
+ case catch handle_options(Access, Bin, Mode, SuggestedOptions) of
+ {ok, IsNetworkAscii, AcceptedOptions} when Access =:= read, binary(Bin) ->
+ State = #read_state{options = AcceptedOptions,
+ blksize = lookup_blksize(AcceptedOptions),
+ bin = Bin,
+ is_network_ascii = IsNetworkAscii,
+ count = size(Bin)},
+ {ok, AcceptedOptions, State};
+ {ok, IsNetworkAscii, AcceptedOptions} when Access =:= write, Bin =:= binary ->
+ State = #write_state{options = AcceptedOptions,
+ blksize = lookup_blksize(AcceptedOptions),
+ list = [],
+ is_network_ascii = IsNetworkAscii},
+ {ok, AcceptedOptions, State};
+ {error, {Code, Text}} ->
+ {error, {Code, Text}}
+ end;
+prepare(_Access, _Bin, _Mode, _SuggestedOptions, _Initial) ->
+ {error, {undef, "Illegal callback options."}}.
+
+%%-------------------------------------------------------------------
+%% Open
+%%-------------------------------------------------------------------
+
+open(_Peer, Access, Filename, Mode, SuggestedOptions, Initial) ->
+ %% Kept for backwards compatibility
+ open(Access, Filename, Mode, SuggestedOptions, Initial).
+
+open(Access, Bin, Mode, SuggestedOptions, []) ->
+ %% Server side
+ case prepare(Access, Bin, Mode, SuggestedOptions, []) of
+ {ok, AcceptedOptions, State} ->
+ open(Access, Bin, Mode, AcceptedOptions, State);
+ {error, {Code, Text}} ->
+ {error, {Code, Text}}
+ end;
+open(Access, Bin, Mode, NegotiatedOptions, State) ->
+ %% Both sides
+ IsNetworkAscii =
+ if
+ is_record(State, write_state) -> State#write_state.is_network_ascii;
+ is_record(State, read_state) -> State#read_state.is_network_ascii
+ end,
+ case catch handle_options(Access, Bin, Mode, NegotiatedOptions) of
+ {ok, IsNetworkAscii2, Options}
+ when Options =:= NegotiatedOptions,
+ IsNetworkAscii =:= IsNetworkAscii2 ->
+ {ok, NegotiatedOptions, State};
+ {error, {Code, Text}} ->
+ {error, {Code, Text}}
+ end.
+
+%%-------------------------------------------------------------------
+%% Read
+%%-------------------------------------------------------------------
+
+read(#read_state{bin = Bin} = State) when is_binary(Bin) ->
+ BlkSize = State#read_state.blksize,
+ if
+ size(Bin) >= BlkSize ->
+ <<Block:BlkSize/binary, Bin2/binary>> = Bin,
+ State2 = State#read_state{bin = Bin2},
+ {more, Block, State2};
+ size(Bin) < BlkSize ->
+ {last, Bin, State#read_state.count}
+ end.
+
+%%-------------------------------------------------------------------
+%% Write
+%%-------------------------------------------------------------------
+
+write(Bin, #write_state{list = List} = State) when is_binary(Bin), is_list(List) ->
+ Size = size(Bin),
+ BlkSize = State#write_state.blksize,
+ if
+ Size =:= BlkSize ->
+ {more, State#write_state{list = [Bin | List]}};
+ Size < BlkSize ->
+ Bin2 = list_to_binary(lists:reverse([Bin | List])),
+ {last, Bin2}
+ end.
+
+%%-------------------------------------------------------------------
+%% Abort
+%%-------------------------------------------------------------------
+
+abort(_Code, _Text, #read_state{bin = Bin} = State)
+ when record(State, read_state), binary(Bin) ->
+ ok;
+abort(_Code, _Text, #write_state{list = List} = State)
+ when record(State, write_state), list(List) ->
+ ok.
+
+%%-------------------------------------------------------------------
+%% Process options
+%%-------------------------------------------------------------------
+
+handle_options(Access, Bin, Mode, Options) ->
+ IsNetworkAscii = handle_mode(Mode),
+ Options2 = do_handle_options(Access, Bin, Options),
+ {ok, IsNetworkAscii, Options2}.
+
+handle_mode(Mode) ->
+ case Mode of
+ %% "netascii" -> true;
+ "octet" -> false;
+ _ -> throw({error, {badop, "Illegal mode " ++ Mode}})
+ end.
+
+do_handle_options(Access, Bin, [{Key, Val} | T]) ->
+ case Key of
+ "tsize" ->
+ case Access of
+ read when Val =:= "0", binary(Bin) ->
+ Tsize = integer_to_list(size(Bin)),
+ [{Key, Tsize} | do_handle_options(Access, Bin, T)];
+ _ ->
+ handle_integer(Access, Bin, Key, Val, T, 0, infinity)
+ end;
+ "blksize" ->
+ handle_integer(Access, Bin, Key, Val, T, 8, 65464);
+ "timeout" ->
+ handle_integer(Access, Bin, Key, Val, T, 1, 255);
+ _ ->
+ do_handle_options(Access, Bin, T)
+ end;
+do_handle_options(_Access, _Bin, []) ->
+ [].
+
+
+handle_integer(Access, Bin, Key, Val, Options, Min, Max) ->
+ case catch list_to_integer(Val) of
+ {'EXIT', _} ->
+ do_handle_options(Access, Bin, Options);
+ Int when Int >= Min, Int =< Max ->
+ [{Key, Val} | do_handle_options(Access, Bin, Options)];
+ Int when Int >= Min, Max =:= infinity ->
+ [{Key, Val} | do_handle_options(Access, Bin, Options)];
+ _Int ->
+ throw({error, {badopt, "Illegal " ++ Key ++ " value " ++ Val}})
+ end.
+
+lookup_blksize(Options) ->
+ case lists:keysearch("blksize", 1, Options) of
+ {value, {_, Val}} ->
+ list_to_integer(Val);
+ false ->
+ 512
+ end.
diff --git a/src/couch_inets/tftp_engine.erl b/src/couch_inets/tftp_engine.erl
new file mode 100644
index 00000000..fc494cf6
--- /dev/null
+++ b/src/couch_inets/tftp_engine.erl
@@ -0,0 +1,1121 @@
+%%%-------------------------------------------------------------------
+%%% File : tftp_engine.erl
+%%% Author : Hakan Mattsson <hakan@erix.ericsson.se>
+%%% Description : Protocol engine for trivial FTP
+%%%
+%%% Created : 18 May 2004 by Hakan Mattsson <hakan@erix.ericsson.se>
+%%%-------------------------------------------------------------------
+
+-module(tftp_engine).
+
+%%%-------------------------------------------------------------------
+%%% Interface
+%%%-------------------------------------------------------------------
+
+%% application internal functions
+-export([
+ daemon_start/1,
+ client_start/4,
+ info/1,
+ change_config/2
+ ]).
+
+%% module internal
+-export([
+ daemon_init/1,
+ server_init/2,
+ client_init/2,
+ wait_for_msg/3
+ ]).
+
+%% sys callback functions
+-export([
+ system_continue/3,
+ system_terminate/4,
+ system_code_change/4
+ ]).
+
+-include("tftp.hrl").
+
+-record(error, {what, code, text}).
+-define(ERROR(What, Code, Text), #error{what = What, code = Code, text = Text}).
+
+%%%-------------------------------------------------------------------
+%%% Info
+%%%-------------------------------------------------------------------
+
+info(ToPid) when is_pid(ToPid) ->
+ call(info, ToPid, timer:seconds(10)).
+
+change_config(ToPid, Options) when is_pid(ToPid) ->
+ BadKeys = [host, port, udp],
+ BadOptions = [{Key, Val} || {Key, Val} <- Options,
+ BadKey <- BadKeys,
+ Key =:= BadKey],
+ case BadOptions of
+ [] ->
+ call({change_config, Options}, ToPid, timer:seconds(10));
+ [{Key, Val} | _] ->
+ {error, {badarg, {Key, Val}}}
+ end.
+
+call(Req, ToPid, Timeout) when is_pid(ToPid) ->
+ Type = process,
+ Ref = erlang:monitor(Type, ToPid),
+ ToPid ! {Req, Ref, self()},
+ receive
+ {Reply, Ref, FromPid} when FromPid =:= ToPid ->
+ erlang:demonitor(Ref),
+ Reply;
+ {'DOWN', Ref, Type, FromPid, _Reason} when FromPid =:= ToPid ->
+ {error, timeout}
+ after Timeout ->
+ {error, timeout}
+ end.
+
+reply(Reply, Ref, ToPid) ->
+ ToPid ! {Reply, Ref, self()}.
+
+%%%-------------------------------------------------------------------
+%%% Daemon
+%%%-------------------------------------------------------------------
+
+%% Returns {ok, Port}
+daemon_start(Options) when is_list(Options) ->
+ Config = tftp_lib:parse_config(Options),
+ proc_lib:start_link(?MODULE, daemon_init, [Config], infinity).
+
+daemon_init(Config) when is_record(Config, config),
+ is_pid(Config#config.parent_pid) ->
+ process_flag(trap_exit, true),
+ UdpOptions = prepare_daemon_udp(Config),
+ case catch gen_udp:open(Config#config.udp_port, UdpOptions) of
+ {ok, Socket} ->
+ {ok, ActualPort} = inet:port(Socket),
+ proc_lib:init_ack({ok, self()}),
+ Config2 = Config#config{udp_socket = Socket,
+ udp_port = ActualPort},
+ print_debug_info(Config2, daemon, open, #tftp_msg_req{filename = ""}),
+ daemon_loop(Config2, 0, []);
+ {error, Reason} ->
+ Text = lists:flatten(io_lib:format("UDP open ~p -> ~p", [UdpOptions, Reason])),
+ print_debug_info(Config, daemon, open, ?ERROR(open, undef, Text)),
+ exit({gen_udp_open, UdpOptions, Reason});
+ Reason ->
+ Text = lists:flatten(io_lib:format("UDP open ~p -> ~p", [UdpOptions, Reason])),
+ print_debug_info(Config, daemon, open, ?ERROR(open, undef, Text)),
+ exit({gen_udp_open, UdpOptions, Reason})
+ end.
+
+prepare_daemon_udp(#config{udp_port = Port, udp_options = UdpOptions}) ->
+ case lists:keymember(fd, 1, UdpOptions) of
+ true ->
+ %% Use explicit fd
+ UdpOptions;
+ false ->
+ %% Use fd from setuid_socket_wrap, such as -tftpd_69
+ InitArg = list_to_atom("tftpd_" ++ integer_to_list(Port)),
+ case init:get_argument(InitArg) of
+ {ok, [[FdStr]] = Badarg} when is_list(FdStr) ->
+ case catch list_to_integer(FdStr) of
+ Fd when is_integer(Fd) ->
+ [{fd, Fd} | UdpOptions];
+ {'EXIT', _} ->
+ exit({badarg, {prebound_fd, InitArg, Badarg}})
+ end;
+ {ok, Badarg} ->
+ exit({badarg, {prebound_fd, InitArg, Badarg}});
+ error ->
+ UdpOptions
+ end
+ end.
+
+daemon_loop(DaemonConfig, N, Servers) ->
+ receive
+ {info, Ref, FromPid} when is_pid(FromPid) ->
+ ServerInfo = [{n_conn, N} | [{server, P} || P <- Servers]],
+ Info = internal_info(DaemonConfig, daemon) ++ ServerInfo,
+ reply({ok, Info}, Ref, FromPid),
+ daemon_loop(DaemonConfig, N, Servers);
+ {{change_config, Options}, Ref, FromPid} when is_pid(FromPid) ->
+ case catch tftp_lib:parse_config(Options, DaemonConfig) of
+ {'EXIT', Reason} ->
+ reply({error, Reason}, Ref, FromPid),
+ daemon_loop(DaemonConfig, N, Servers);
+ DaemonConfig2 when is_record(DaemonConfig2, config) ->
+ reply(ok, Ref, FromPid),
+ daemon_loop(DaemonConfig2, N, Servers)
+ end;
+ {udp, Socket, RemoteHost, RemotePort, Bin} when is_binary(Bin) ->
+ inet:setopts(Socket, [{active, once}]),
+ ServerConfig = DaemonConfig#config{parent_pid = self(),
+ udp_host = RemoteHost,
+ udp_port = RemotePort},
+ Msg = (catch tftp_lib:decode_msg(Bin)),
+ print_debug_info(ServerConfig, daemon, recv, Msg),
+ case Msg of
+ Req when is_record(Req, tftp_msg_req),
+ N < DaemonConfig#config.max_conn ->
+ Args = [ServerConfig, Req],
+ Pid = proc_lib:spawn_link(?MODULE, server_init, Args),
+ daemon_loop(DaemonConfig, N + 1, [Pid | Servers]);
+ Req when is_record(Req, tftp_msg_req) ->
+ Reply = #tftp_msg_error{code = enospc,
+ text = "Too many connections"},
+ send_msg(ServerConfig, daemon, Reply),
+ daemon_loop(DaemonConfig, N, Servers);
+ {'EXIT', Reply} when is_record(Reply, tftp_msg_error) ->
+ send_msg(ServerConfig, daemon, Reply),
+ daemon_loop(DaemonConfig, N, Servers);
+ Req ->
+ Reply = #tftp_msg_error{code = badop,
+ text = "Illegal TFTP operation"},
+ error("Daemon received: ~p from ~p:~p", [Req, RemoteHost, RemotePort]),
+ send_msg(ServerConfig, daemon, Reply),
+ daemon_loop(DaemonConfig, N, Servers)
+ end;
+ {system, From, Msg} ->
+ Misc = {daemon_loop, [DaemonConfig, N, Servers]},
+ sys:handle_system_msg(Msg, From, DaemonConfig#config.parent_pid, ?MODULE, [], Misc);
+ {'EXIT', Pid, Reason} when DaemonConfig#config.parent_pid =:= Pid ->
+ close_port(DaemonConfig, daemon, #tftp_msg_req{filename = ""}),
+ exit(Reason);
+ {'EXIT', Pid, _Reason} = Info ->
+ case lists:member(Pid, Servers) of
+ true ->
+ daemon_loop(DaemonConfig, N - 1, Servers -- [Pid]);
+ false ->
+ error("Daemon received: ~p", [Info]),
+ daemon_loop(DaemonConfig, N, Servers)
+ end;
+ Info ->
+ error("Daemon received: ~p", [Info]),
+ daemon_loop(DaemonConfig, N, Servers)
+ end.
+
+%%%-------------------------------------------------------------------
+%%% Server
+%%%-------------------------------------------------------------------
+
+server_init(Config, Req) when is_record(Config, config),
+ is_pid(Config#config.parent_pid),
+ is_record(Req, tftp_msg_req) ->
+ process_flag(trap_exit, true),
+ SuggestedOptions = Req#tftp_msg_req.options,
+ UdpOptions = Config#config.udp_options,
+ UdpOptions2 = lists:keydelete(fd, 1, UdpOptions),
+ Config1 = Config#config{udp_options = UdpOptions2},
+ Config2 = tftp_lib:parse_config(SuggestedOptions, Config1),
+ SuggestedOptions2 = Config2#config.user_options,
+ Req2 = Req#tftp_msg_req{options = SuggestedOptions2},
+ case open_free_port(Config2, server, Req2) of
+ {ok, Config3} ->
+ Filename = Req#tftp_msg_req.filename,
+ case match_callback(Filename, Config3#config.callbacks) of
+ {ok, Callback} ->
+ print_debug_info(Config3, server, match, Callback),
+ case pre_verify_options(Config3, Req2) of
+ ok ->
+ case callback({open, server_open}, Config3, Callback, Req2) of
+ {Callback2, {ok, AcceptedOptions}} ->
+ {LocalAccess, _} = local_file_access(Req2),
+ OptText = "Internal error. Not allowed to add new options.",
+ case post_verify_options(Config3, Req2, AcceptedOptions, OptText) of
+ {ok, Config4, Req3} when AcceptedOptions /= [] ->
+ Reply = #tftp_msg_oack{options = AcceptedOptions},
+ {Config5, Callback3, Next} =
+ transfer(Config4, Callback2, Req3, Reply, LocalAccess, undefined),
+ BlockNo =
+ case LocalAccess of
+ read -> 0;
+ write -> 1
+ end,
+ common_loop(Config5, Callback3, Req3, Next, LocalAccess, BlockNo);
+ {ok, Config4, Req3} when LocalAccess =:= write ->
+ BlockNo = 0,
+ common_ack(Config4, Callback2, Req3, LocalAccess, BlockNo, undefined);
+ {ok, Config4, Req3} when LocalAccess =:= read ->
+ BlockNo = 0,
+ common_read(Config4, Callback2, Req3, LocalAccess, BlockNo, BlockNo, undefined);
+ {error, {Code, Text}} ->
+ {undefined, Error} =
+ callback({abort, {Code, Text}}, Config3, Callback2, Req2),
+ send_msg(Config3, Req, Error),
+ terminate(Config3, Req2, ?ERROR(post_verify_options, Code, Text))
+ end;
+ {undefined, #tftp_msg_error{code = Code, text = Text} = Error} ->
+ send_msg(Config3, Req, Error),
+ terminate(Config3, Req, ?ERROR(server_open, Code, Text))
+ end;
+ {error, {Code, Text}} ->
+ {undefined, Error} =
+ callback({abort, {Code, Text}}, Config2, Callback, Req2),
+ send_msg(Config2, Req, Error),
+ terminate(Config2, Req2, ?ERROR(pre_verify_options, Code, Text))
+ end;
+ {error, #tftp_msg_error{code = Code, text = Text} = Error} ->
+ send_msg(Config3, Req, Error),
+ terminate(Config3, Req, ?ERROR(match_callback, Code, Text))
+ end;
+ #error{} = Error ->
+ terminate(Config2, Req, Error)
+ end.
+
+%%%-------------------------------------------------------------------
+%%% Client
+%%%-------------------------------------------------------------------
+
+%% LocalFilename = filename() | 'binary' | binary()
+%% Returns {ok, LastCallbackState} | {error, Reason}
+client_start(Access, RemoteFilename, LocalFilename, Options) ->
+ Config = tftp_lib:parse_config(Options),
+ Config2 = Config#config{parent_pid = self(),
+ udp_socket = undefined},
+ Req = #tftp_msg_req{access = Access,
+ filename = RemoteFilename,
+ mode = lookup_mode(Config2#config.user_options),
+ options = Config2#config.user_options,
+ local_filename = LocalFilename},
+ Args = [Config2, Req],
+ case proc_lib:start_link(?MODULE, client_init, Args, infinity) of
+ {ok, LastCallbackState} ->
+ {ok, LastCallbackState};
+ {error, Error} ->
+ {error, Error}
+ end.
+
+client_init(Config, Req) when is_record(Config, config),
+ is_pid(Config#config.parent_pid),
+ is_record(Req, tftp_msg_req) ->
+ process_flag(trap_exit, true),
+ case open_free_port(Config, client, Req) of
+ {ok, Config2} ->
+ Req2 =
+ case Config2#config.use_tsize of
+ true ->
+ SuggestedOptions = Req#tftp_msg_req.options,
+ SuggestedOptions2 = tftp_lib:replace_val("tsize", "0", SuggestedOptions),
+ Req#tftp_msg_req{options = SuggestedOptions2};
+ false ->
+ Req
+ end,
+ LocalFilename = Req2#tftp_msg_req.local_filename,
+ case match_callback(LocalFilename, Config2#config.callbacks) of
+ {ok, Callback} ->
+ print_debug_info(Config2, client, match, Callback),
+ client_prepare(Config2, Callback, Req2);
+ {error, #tftp_msg_error{code = Code, text = Text}} ->
+ terminate(Config, Req, ?ERROR(match, Code, Text))
+ end;
+ #error{} = Error ->
+ terminate(Config, Req, Error)
+ end.
+
+client_prepare(Config, Callback, Req) ->
+ case pre_verify_options(Config, Req) of
+ ok ->
+ case callback({open, client_prepare}, Config, Callback, Req) of
+ {Callback2, {ok, AcceptedOptions}} ->
+ OptText = "Internal error. Not allowed to add new options.",
+ case post_verify_options(Config, Req, AcceptedOptions, OptText) of
+ {ok, Config2, Req2} ->
+ {LocalAccess, _} = local_file_access(Req2),
+ {Config3, Callback3, Next} =
+ transfer(Config2, Callback2, Req2, Req2, LocalAccess, undefined),
+ client_open(Config3, Callback3, Req2, Next);
+ {error, {Code, Text}} ->
+ callback({abort, {Code, Text}}, Config, Callback2, Req),
+ terminate(Config, Req, ?ERROR(post_verify_options, Code, Text))
+ end;
+ {undefined, #tftp_msg_error{code = Code, text = Text}} ->
+ terminate(Config, Req, ?ERROR(client_prepare, Code, Text))
+ end;
+ {error, {Code, Text}} ->
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ terminate(Config, Req, ?ERROR(pre_verify_options, Code, Text))
+ end.
+
+client_open(Config, Callback, Req, Next) ->
+ {LocalAccess, _} = local_file_access(Req),
+ case Next of
+ {ok, DecodedMsg, undefined} ->
+ case DecodedMsg of
+ Msg when record(Msg, tftp_msg_oack) ->
+ ServerOptions = Msg#tftp_msg_oack.options,
+ OptText = "Protocol violation. Server is not allowed new options",
+ case post_verify_options(Config, Req, ServerOptions, OptText) of
+ {ok, Config2, Req2} ->
+ {Config3, Callback2, Req3} =
+ do_client_open(Config2, Callback, Req2),
+ case LocalAccess of
+ read ->
+ BlockNo = 0,
+ common_read(Config3, Callback2, Req3, LocalAccess, BlockNo, BlockNo, undefined);
+ write ->
+ BlockNo = 0,
+ common_ack(Config3, Callback2, Req3, LocalAccess, BlockNo, undefined)
+ end;
+ {error, {Code, Text}} ->
+ {undefined, Error} =
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ send_msg(Config, Req, Error),
+ terminate(Config, Req, ?ERROR(verify_server_options, Code, Text))
+ end;
+ #tftp_msg_ack{block_no = ActualBlockNo} when LocalAccess =:= read ->
+ Req2 = Req#tftp_msg_req{options = []},
+ {Config2, Callback2, Req2} = do_client_open(Config, Callback, Req2),
+ ExpectedBlockNo = 0,
+ common_read(Config2, Callback2, Req2, LocalAccess, ExpectedBlockNo, ActualBlockNo, undefined);
+ #tftp_msg_data{block_no = ActualBlockNo, data = Data} when LocalAccess =:= write ->
+ Req2 = Req#tftp_msg_req{options = []},
+ {Config2, Callback2, Req2} = do_client_open(Config, Callback, Req2),
+ ExpectedBlockNo = 1,
+ common_write(Config2, Callback2, Req2, LocalAccess, ExpectedBlockNo, ActualBlockNo, Data, undefined);
+ %% #tftp_msg_error{code = Code, text = Text} when Req#tftp_msg_req.options /= [] ->
+ %% %% Retry without options
+ %% callback({abort, {Code, Text}}, Config, Callback, Req),
+ %% Req2 = Req#tftp_msg_req{options = []},
+ %% client_prepare(Config, Callback, Req2);
+ #tftp_msg_error{code = Code, text = Text} ->
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ terminate(Config, Req, ?ERROR(client_open, Code, Text));
+ {'EXIT', #tftp_msg_error{code = Code, text = Text}} ->
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ terminate(Config, Req, ?ERROR(client_open, Code, Text));
+ Msg when is_tuple(Msg) ->
+ Code = badop,
+ Text = "Illegal TFTP operation",
+ {undefined, Error} =
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ send_msg(Config, Req, Error),
+ Text2 = lists:flatten([Text, ". ", io_lib:format("~p", [element(1, Msg)])]),
+ terminate(Config, Req, ?ERROR(client_open, Code, Text2))
+ end;
+ {error, #tftp_msg_error{code = Code, text = Text}} ->
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ terminate(Config, Req, ?ERROR(client_open, Code, Text))
+ end.
+
+do_client_open(Config, Callback, Req) ->
+ case callback({open, client_open}, Config, Callback, Req) of
+ {Callback2, {ok, FinalOptions}} ->
+ OptText = "Internal error. Not allowed to change options.",
+ case post_verify_options(Config, Req, FinalOptions, OptText) of
+ {ok, Config2, Req2} ->
+ {Config2, Callback2, Req2};
+ {error, {Code, Text}} ->
+ {undefined, Error} =
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ send_msg(Config, Req, Error),
+ terminate(Config, Req, ?ERROR(post_verify_options, Code, Text))
+ end;
+ {undefined, #tftp_msg_error{code = Code, text = Text} = Error} ->
+ send_msg(Config, Req, Error),
+ terminate(Config, Req, ?ERROR(client_open, Code, Text))
+ end.
+
+%%%-------------------------------------------------------------------
+%%% Common loop for both client and server
+%%%-------------------------------------------------------------------
+
+common_loop(Config, Callback, Req, Next, LocalAccess, ExpectedBlockNo) ->
+ case Next of
+ {ok, DecodedMsg, Prepared} ->
+ case DecodedMsg of
+ #tftp_msg_ack{block_no = ActualBlockNo} when LocalAccess =:= read ->
+ common_read(Config, Callback, Req, LocalAccess, ExpectedBlockNo, ActualBlockNo, Prepared);
+ #tftp_msg_data{block_no = ActualBlockNo, data = Data} when LocalAccess =:= write ->
+ common_write(Config, Callback, Req, LocalAccess, ExpectedBlockNo, ActualBlockNo, Data, Prepared);
+ #tftp_msg_error{code = Code, text = Text} ->
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ terminate(Config, Req, ?ERROR(common_loop, Code, Text));
+ {'EXIT', #tftp_msg_error{code = Code, text = Text} = Error} ->
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ send_msg(Config, Req, Error),
+ terminate(Config, Req, ?ERROR(common_loop, Code, Text));
+ Msg when is_tuple(Msg) ->
+ Code = badop,
+ Text = "Illegal TFTP operation",
+ {undefined, Error} =
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ send_msg(Config, Req, Error),
+ Text2 = lists:flatten([Text, ". ", io_lib:format("~p", [element(1, Msg)])]),
+ terminate(Config, Req, ?ERROR(common_loop, Code, Text2))
+ end;
+ {error, #tftp_msg_error{code = Code, text = Text} = Error} ->
+ send_msg(Config, Req, Error),
+ terminate(Config, Req, ?ERROR(transfer, Code, Text))
+ end.
+
+common_read(Config, _, Req, _, _, _, {terminate, Result}) ->
+ terminate(Config, Req, {ok, Result});
+common_read(Config, Callback, Req, LocalAccess, BlockNo, BlockNo, Prepared) ->
+ case early_read(Config, Callback, Req, LocalAccess, Prepared) of
+ {Callback2, {more, Data}} ->
+ do_common_read(Config, Callback2, Req, LocalAccess, BlockNo, Data, undefined);
+ {undefined, {last, Data, Result}} ->
+ do_common_read(Config, undefined, Req, LocalAccess, BlockNo, Data, {terminate, Result});
+ {undefined, #tftp_msg_error{code = Code, text = Text} = Reply} ->
+ send_msg(Config, Req, Reply),
+ terminate(Config, Req, ?ERROR(read, Code, Text))
+ end;
+common_read(Config, Callback, Req, LocalAccess, ExpectedBlockNo, ActualBlockNo, Prepared)
+ when ActualBlockNo < ExpectedBlockNo ->
+ do_common_read(Config, Callback, Req, LocalAccess, ExpectedBlockNo - 1, Prepared, undefined);
+common_read(Config, Callback, Req, _LocalAccess, ExpectedBlockNo, ActualBlockNo, _Prepared) ->
+ Code = badblk,
+ Text = "Unknown transfer ID = " ++
+ integer_to_list(ActualBlockNo) ++ " (" ++ integer_to_list(ExpectedBlockNo) ++ ")",
+ {undefined, Error} =
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ send_msg(Config, Req, Error),
+ terminate(Config, Req, ?ERROR(read, Code, Text)).
+
+do_common_read(Config, Callback, Req, LocalAccess, BlockNo, Data, Prepared) ->
+ NextBlockNo = BlockNo + 1,
+ case NextBlockNo =< 65535 of
+ true ->
+ Reply = #tftp_msg_data{block_no = NextBlockNo, data = Data},
+ {Config2, Callback2, Next} =
+ transfer(Config, Callback, Req, Reply, LocalAccess, Prepared),
+ common_loop(Config2, Callback2, Req, Next, LocalAccess, NextBlockNo);
+ false ->
+ Code = badblk,
+ Text = "Too big transfer ID = " ++
+ integer_to_list(NextBlockNo) ++ " > 65535",
+ {undefined, Error} =
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ send_msg(Config, Req, Error),
+ terminate(Config, Req, ?ERROR(read, Code, Text))
+ end.
+
+common_write(Config, _, Req, _, _, _, _, {terminate, Result}) ->
+ terminate(Config, Req, {ok, Result});
+common_write(Config, Callback, Req, LocalAccess, BlockNo, BlockNo, Data, undefined) ->
+ case callback({write, Data}, Config, Callback, Req) of
+ {Callback2, more} ->
+ common_ack(Config, Callback2, Req, LocalAccess, BlockNo, undefined);
+ {undefined, {last, Result}} ->
+ Config2 = pre_terminate(Config, Req, {ok, Result}),
+ common_ack(Config2, undefined, Req, LocalAccess, BlockNo, {terminate, Result});
+ {undefined, #tftp_msg_error{code = Code, text = Text} = Reply} ->
+ send_msg(Config, Req, Reply),
+ terminate(Config, Req, ?ERROR(write, Code, Text))
+ end;
+common_write(Config, Callback, Req, LocalAccess, ExpectedBlockNo, ActualBlockNo, _Data, undefined)
+ when ActualBlockNo < ExpectedBlockNo ->
+ common_ack(Config, Callback, Req, LocalAccess, ExpectedBlockNo - 1, undefined);
+common_write(Config, Callback, Req, _, ExpectedBlockNo, ActualBlockNo, _, _) ->
+ Code = badblk,
+ Text = "Unknown transfer ID = " ++
+ integer_to_list(ActualBlockNo) ++ " (" ++ integer_to_list(ExpectedBlockNo) ++ ")",
+ {undefined, Error} =
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ send_msg(Config, Req, Error),
+ terminate(Config, Req, ?ERROR(write, Code, Text)).
+
+common_ack(Config, Callback, Req, LocalAccess, BlockNo, Prepared) ->
+ Reply = #tftp_msg_ack{block_no = BlockNo},
+ {Config2, Callback2, Next} =
+ transfer(Config, Callback, Req, Reply, LocalAccess, Prepared),
+ NextBlockNo = BlockNo + 1,
+ case NextBlockNo =< 65535 of
+ true ->
+ common_loop(Config2, Callback2, Req, Next, LocalAccess, NextBlockNo);
+ false ->
+ Code = badblk,
+ Text = "Too big transfer ID = " ++
+ integer_to_list(NextBlockNo) ++ " > 65535",
+ {undefined, Error} =
+ callback({abort, {Code, Text}}, Config, Callback, Req),
+ send_msg(Config, Req, Error),
+ terminate(Config, Req, ?ERROR(read, Code, Text))
+ end.
+
+pre_terminate(Config, Req, Result) ->
+ if
+ Req#tftp_msg_req.local_filename /= undefined,
+ Config#config.parent_pid /= undefined ->
+ proc_lib:init_ack(Result),
+ unlink(Config#config.parent_pid),
+ Config#config{parent_pid = undefined, polite_ack = true};
+ true ->
+ Config#config{polite_ack = true}
+ end.
+
+terminate(Config, Req, Result) ->
+ Result2 =
+ case Result of
+ {ok, _} ->
+ Result;
+ #error{what = What, code = Code, text = Text} = Error ->
+ print_debug_info(Config, Req, What, Error),
+ {error, {What, Code, Text}}
+ end,
+ if
+ Config#config.parent_pid =:= undefined ->
+ close_port(Config, client, Req),
+ exit(normal);
+ Req#tftp_msg_req.local_filename /= undefined ->
+ %% Client
+ close_port(Config, client, Req),
+ proc_lib:init_ack(Result2),
+ unlink(Config#config.parent_pid),
+ exit(normal);
+ true ->
+ %% Server
+ close_port(Config, server, Req),
+ exit(shutdown)
+ end.
+
+close_port(Config, Who, Data) ->
+ case Config#config.udp_socket of
+ undefined ->
+ ignore;
+ Socket ->
+ print_debug_info(Config, Who, close, Data),
+ gen_udp:close(Socket)
+ end.
+
+open_free_port(Config, Who, Data) when is_record(Config, config) ->
+ UdpOptions = Config#config.udp_options,
+ case Config#config.port_policy of
+ random ->
+ %% BUGBUG: Should be a random port
+ case catch gen_udp:open(0, UdpOptions) of
+ {ok, Socket} ->
+ Config2 = Config#config{udp_socket = Socket},
+ print_debug_info(Config2, Who, open, Data),
+ {ok, Config2};
+ {error, Reason} ->
+ Text = lists:flatten(io_lib:format("UDP open ~p -> ~p", [[0 | UdpOptions], Reason])),
+ ?ERROR(open, undef, Text);
+ {'EXIT', _} = Reason ->
+ Text = lists:flatten(io_lib:format("UDP open ~p -> ~p", [[0 | UdpOptions], Reason])),
+ ?ERROR(open, undef, Text)
+ end;
+ {range, Port, Max} when Port =< Max ->
+ case catch gen_udp:open(Port, UdpOptions) of
+ {ok, Socket} ->
+ Config2 = Config#config{udp_socket = Socket},
+ print_debug_info(Config2, Who, open, Data),
+ {ok, Config2};
+ {error, eaddrinuse} ->
+ PortPolicy = {range, Port + 1, Max},
+ Config2 = Config#config{port_policy = PortPolicy},
+ open_free_port(Config2, Who, Data);
+ {error, Reason} ->
+ Text = lists:flatten(io_lib:format("UDP open ~p -> ~p", [[Port | UdpOptions], Reason])),
+ ?ERROR(open, undef, Text);
+ {'EXIT', _} = Reason->
+ Text = lists:flatten(io_lib:format("UDP open ~p -> ~p", [[Port | UdpOptions], Reason])),
+ ?ERROR(open, undef, Text)
+ end;
+ {range, Port, _Max} ->
+ Reason = "Port range exhausted",
+ Text = lists:flatten(io_lib:format("UDP open ~p -> ~p", [[Port | UdpOptions], Reason])),
+ ?ERROR(Who, undef, Text)
+ end.
+
+%%-------------------------------------------------------------------
+%% Transfer
+%%-------------------------------------------------------------------
+
+%% Returns {Config, Callback, Next}
+%% Next = {ok, Reply, Next} | {error, Error}
+transfer(Config, Callback, Req, Msg, LocalAccess, Prepared) ->
+ IoList = tftp_lib:encode_msg(Msg),
+ do_transfer(Config, Callback, Req, Msg, IoList, LocalAccess, Prepared, true).
+
+do_transfer(Config, Callback, Req, Msg, IoList, LocalAccess, Prepared, Retry) ->
+ case do_send_msg(Config, Req, Msg, IoList) of
+ ok ->
+ {Callback2, Prepared2} =
+ early_read(Config, Callback, Req, LocalAccess, Prepared),
+ Code = undef,
+ Text = "Transfer timed out.",
+ case wait_for_msg(Config, Callback, Req) of
+ timeout when Config#config.polite_ack =:= true ->
+ do_send_msg(Config, Req, Msg, IoList),
+ case Prepared2 of
+ {terminate, Result} ->
+ terminate(Config, Req, {ok, Result});
+ _ ->
+ terminate(Config, Req, ?ERROR(transfer, Code, Text))
+ end;
+ timeout when Retry =:= true ->
+ Retry2 = false,
+ do_transfer(Config, Callback2, Req, Msg, IoList, LocalAccess, Prepared2, Retry2);
+ timeout ->
+ Error = #tftp_msg_error{code = Code, text = Text},
+ {Config, Callback, {error, Error}};
+ {Config2, Reply} ->
+ {Config2, Callback2, {ok, Reply, Prepared2}}
+ end;
+ {error, _Reason} when Retry =:= true ->
+ do_transfer(Config, Callback, Req, Msg, IoList, LocalAccess, Prepared, false);
+ {error, Reason} ->
+ Code = undef,
+ Text = lists:flatten(io_lib:format("Transfer failed twice - giving up -> ~p", [Reason])),
+ {Config, Callback, {error, #tftp_msg_error{code = Code, text = Text}}}
+ end.
+
+send_msg(Config, Req, Msg) ->
+ case catch tftp_lib:encode_msg(Msg) of
+ {'EXIT', Reason} ->
+ Code = undef,
+ Text = "Internal error. Encode failed",
+ Msg2 = #tftp_msg_error{code = Code, text = Text, details = Reason},
+ send_msg(Config, Req, Msg2);
+ IoList ->
+ do_send_msg(Config, Req, Msg, IoList)
+ end.
+
+do_send_msg(Config, Req, Msg, IoList) ->
+ print_debug_info(Config, Req, send, Msg),
+ gen_udp:send(Config#config.udp_socket,
+ Config#config.udp_host,
+ Config#config.udp_port,
+ IoList).
+
+wait_for_msg(Config, Callback, Req) ->
+ receive
+ {info, Ref, FromPid} when is_pid(FromPid) ->
+ Type =
+ case Req#tftp_msg_req.local_filename /= undefined of
+ true -> client;
+ false -> server
+ end,
+ Info = internal_info(Config, Type),
+ reply({ok, Info}, Ref, FromPid),
+ wait_for_msg(Config, Callback, Req);
+ {{change_config, Options}, Ref, FromPid} when is_pid(FromPid) ->
+ case catch tftp_lib:parse_config(Options, Config) of
+ {'EXIT', Reason} ->
+ reply({error, Reason}, Ref, FromPid),
+ wait_for_msg(Config, Callback, Req);
+ Config2 when is_record(Config2, config) ->
+ reply(ok, Ref, FromPid),
+ wait_for_msg(Config2, Callback, Req)
+ end;
+ {udp, Socket, RemoteHost, RemotePort, Bin} when is_binary(Bin),
+ Callback#callback.block_no =:= undefined ->
+ %% Client prepare
+ inet:setopts(Socket, [{active, once}]),
+ Config2 = Config#config{udp_host = RemoteHost,
+ udp_port = RemotePort},
+ DecodedMsg = (catch tftp_lib:decode_msg(Bin)),
+ print_debug_info(Config2, Req, recv, DecodedMsg),
+ {Config2, DecodedMsg};
+ {udp, Socket, Host, Port, Bin} when is_binary(Bin),
+ Config#config.udp_host =:= Host,
+ Config#config.udp_port =:= Port ->
+ inet:setopts(Socket, [{active, once}]),
+ DecodedMsg = (catch tftp_lib:decode_msg(Bin)),
+ print_debug_info(Config, Req, recv, DecodedMsg),
+ {Config, DecodedMsg};
+ {system, From, Msg} ->
+ Misc = {wait_for_msg, [Config, Callback, Req]},
+ sys:handle_system_msg(Msg, From, Config#config.parent_pid, ?MODULE, [], Misc);
+ {'EXIT', Pid, _Reason} when Config#config.parent_pid =:= Pid ->
+ Code = undef,
+ Text = "Parent exited.",
+ terminate(Config, Req, ?ERROR(wait_for_msg, Code, Text));
+ Msg when Req#tftp_msg_req.local_filename /= undefined ->
+ error("Client received : ~p", [Msg]),
+ wait_for_msg(Config, Callback, Req);
+ Msg when Req#tftp_msg_req.local_filename =:= undefined ->
+ error("Server received : ~p", [Msg]),
+ wait_for_msg(Config, Callback, Req)
+ after Config#config.timeout * 1000 ->
+ print_debug_info(Config, Req, recv, timeout),
+ timeout
+ end.
+
+early_read(Config, Callback, Req, read, undefined)
+ when Callback#callback.block_no /= undefined ->
+ callback(read, Config, Callback, Req);
+early_read(_Config, Callback, _Req, _LocalAccess, Prepared) ->
+ {Callback, Prepared}.
+
+%%-------------------------------------------------------------------
+%% Callback
+%%-------------------------------------------------------------------
+
+callback(Access, Config, Callback, Req) ->
+ {Callback2, Result} =
+ do_callback(Access, Config, Callback, Req),
+ print_debug_info(Config, Req, call, {Callback2, Result}),
+ {Callback2, Result}.
+
+do_callback(read = Fun, Config, Callback, Req)
+ when is_record(Config, config),
+ is_record(Callback, callback),
+ is_record(Req, tftp_msg_req) ->
+ Args = [Callback#callback.state],
+ case catch apply(Callback#callback.module, Fun, Args) of
+ {more, Bin, NewState} when is_binary(Bin) ->
+ BlockNo = Callback#callback.block_no + 1,
+ Count = Callback#callback.count + size(Bin),
+ Callback2 = Callback#callback{state = NewState,
+ block_no = BlockNo,
+ count = Count},
+ verify_count(Config, Callback2, Req, {more, Bin});
+ {last, Data, Result} ->
+ {undefined, {last, Data, Result}};
+ {error, {Code, Text}} ->
+ {undefined, #tftp_msg_error{code = Code, text = Text}};
+ Details ->
+ Code = undef,
+ Text = "Internal error. File handler error.",
+ callback({abort, {Code, Text, Details}}, Config, Callback, Req)
+ end;
+do_callback({write = Fun, Bin}, Config, Callback, Req)
+ when is_record(Config, config),
+ is_record(Callback, callback),
+ is_record(Req, tftp_msg_req),
+ is_binary(Bin) ->
+ Args = [Bin, Callback#callback.state],
+ case catch apply(Callback#callback.module, Fun, Args) of
+ {more, NewState} ->
+ BlockNo = Callback#callback.block_no + 1,
+ Count = Callback#callback.count + size(Bin),
+ Callback2 = Callback#callback{state = NewState,
+ block_no = BlockNo,
+ count = Count},
+ verify_count(Config, Callback2, Req, more);
+ {last, Result} ->
+ {undefined, {last, Result}};
+ {error, {Code, Text}} ->
+ {undefined, #tftp_msg_error{code = Code, text = Text}};
+ Details ->
+ Code = undef,
+ Text = "Internal error. File handler error.",
+ callback({abort, {Code, Text, Details}}, Config, Callback, Req)
+ end;
+do_callback({open, Type}, Config, Callback, Req)
+ when is_record(Config, config),
+ is_record(Callback, callback),
+ is_record(Req, tftp_msg_req) ->
+ {Access, Filename} = local_file_access(Req),
+ {Fun, BlockNo} =
+ case Type of
+ client_prepare -> {prepare, undefined};
+ client_open -> {open, 0};
+ server_open -> {open, 0}
+ end,
+ Mod = Callback#callback.module,
+ Args = [Access,
+ Filename,
+ Req#tftp_msg_req.mode,
+ Req#tftp_msg_req.options,
+ Callback#callback.state],
+ PeerInfo = peer_info(Config),
+ code:ensure_loaded(Mod),
+ Args2 =
+ case erlang:function_exported(Mod, Fun, length(Args)) of
+ true -> Args;
+ false -> [PeerInfo | Args]
+ end,
+ case catch apply(Mod, Fun, Args2) of
+ {ok, AcceptedOptions, NewState} ->
+ Callback2 = Callback#callback{state = NewState,
+ block_no = BlockNo,
+ count = 0},
+ {Callback2, {ok, AcceptedOptions}};
+ {error, {Code, Text}} ->
+ {undefined, #tftp_msg_error{code = Code, text = Text}};
+ Details ->
+ Code = undef,
+ Text = "Internal error. File handler error.",
+ callback({abort, {Code, Text, Details}}, Config, Callback, Req)
+ end;
+do_callback({abort, {Code, Text}}, Config, Callback, Req) ->
+ Error = #tftp_msg_error{code = Code, text = Text},
+ do_callback({abort, Error}, Config, Callback, Req);
+do_callback({abort, {Code, Text, Details}}, Config, Callback, Req) ->
+ Error = #tftp_msg_error{code = Code, text = Text, details = Details},
+ do_callback({abort, Error}, Config, Callback, Req);
+do_callback({abort = Fun, #tftp_msg_error{code = Code, text = Text} = Error}, Config, Callback, Req)
+ when is_record(Config, config),
+ is_record(Callback, callback),
+ is_record(Req, tftp_msg_req) ->
+ Args = [Code, Text, Callback#callback.state],
+ catch apply(Callback#callback.module, Fun, Args),
+ {undefined, Error};
+do_callback({abort, Error}, _Config, undefined, _Req) when is_record(Error, tftp_msg_error) ->
+ {undefined, Error}.
+
+peer_info(#config{udp_host = Host, udp_port = Port}) ->
+ if
+ is_tuple(Host), size(Host) =:= 4 ->
+ {inet, tftp_lib:host_to_string(Host), Port};
+ is_tuple(Host), size(Host) =:= 8 ->
+ {inet6, tftp_lib:host_to_string(Host), Port};
+ true ->
+ {undefined, Host, Port}
+ end.
+
+match_callback(Filename, Callbacks) ->
+ if
+ Filename =:= binary ->
+ {ok, #callback{regexp = "",
+ internal = "",
+ module = tftp_binary,
+ state = []}};
+ is_binary(Filename) ->
+ {ok, #callback{regexp = "",
+ internal = "",
+ module = tftp_binary,
+ state = []}};
+ Callbacks =:= [] ->
+ {ok, #callback{regexp = "",
+ internal = "",
+ module = tftp_file,
+ state = []}};
+ true ->
+ do_match_callback(Filename, Callbacks)
+ end.
+
+do_match_callback(Filename, [C | Tail]) when is_record(C, callback) ->
+ case catch regexp:match(Filename, C#callback.internal) of
+ {match, _, _} ->
+ {ok, C};
+ nomatch ->
+ do_match_callback(Filename, Tail);
+ Details ->
+ Code = baduser,
+ Text = "Internal error. File handler not found",
+ {error, #tftp_msg_error{code = Code, text = Text, details = Details}}
+ end;
+do_match_callback(Filename, []) ->
+ Code = baduser,
+ Text = "Internal error. File handler not found",
+ {error, #tftp_msg_error{code = Code, text = Text, details = Filename}}.
+
+verify_count(Config, Callback, Req, Result) ->
+ case Config#config.max_tsize of
+ infinity ->
+ {Callback, Result};
+ Max when Callback#callback.count =< Max ->
+ {Callback, Result};
+ _Max ->
+ Code = enospc,
+ Text = "Too large file.",
+ callback({abort, {Code, Text}}, Config, Callback, Req)
+ end.
+
+%%-------------------------------------------------------------------
+%% Miscellaneous
+%%-------------------------------------------------------------------
+
+internal_info(Config, Type) ->
+ {ok, ActualPort} = inet:port(Config#config.udp_socket),
+ [
+ {type, Type},
+ {host, tftp_lib:host_to_string(Config#config.udp_host)},
+ {port, Config#config.udp_port},
+ {local_port, ActualPort},
+ {port_policy, Config#config.port_policy},
+ {udp, Config#config.udp_options},
+ {use_tsize, Config#config.use_tsize},
+ {max_tsize, Config#config.max_tsize},
+ {max_conn, Config#config.max_conn},
+ {rejected, Config#config.rejected},
+ {timeout, Config#config.timeout},
+ {polite_ack, Config#config.polite_ack},
+ {debug, Config#config.debug_level},
+ {parent_pid, Config#config.parent_pid}
+ ] ++ Config#config.user_options ++ Config#config.callbacks.
+
+local_file_access(#tftp_msg_req{access = Access,
+ local_filename = Local,
+ filename = Filename}) ->
+ case Local =:= undefined of
+ true ->
+ %% Server side
+ {Access, Filename};
+ false ->
+ %% Client side
+ case Access of
+ read ->
+ {write, Local};
+ write ->
+ {read, Local}
+ end
+ end.
+
+pre_verify_options(Config, Req) ->
+ Options = Req#tftp_msg_req.options,
+ case catch verify_reject(Config, Req, Options) of
+ ok ->
+ case verify_integer("tsize", 0, Config#config.max_tsize, Options) of
+ true ->
+ case verify_integer("blksize", 0, 65464, Options) of
+ true ->
+ ok;
+ false ->
+ {error, {badopt, "Too large blksize"}}
+ end;
+ false ->
+ {error, {badopt, "Too large tsize"}}
+ end;
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+post_verify_options(Config, Req, NewOptions, Text) ->
+ OldOptions = Req#tftp_msg_req.options,
+ BadOptions =
+ [Key || {Key, _Val} <- NewOptions,
+ not lists:keymember(Key, 1, OldOptions)],
+ case BadOptions =:= [] of
+ true ->
+ {ok,
+ Config#config{timeout = lookup_timeout(NewOptions)},
+ Req#tftp_msg_req{options = NewOptions}};
+ false ->
+ {error, {badopt, Text}}
+ end.
+
+verify_reject(Config, Req, Options) ->
+ Access = Req#tftp_msg_req.access,
+ Rejected = Config#config.rejected,
+ case lists:member(Access, Rejected) of
+ true ->
+ {error, {eacces, atom_to_list(Access) ++ " mode not allowed"}};
+ false ->
+ [throw({error, {badopt, Key ++ " not allowed"}}) ||
+ {Key, _} <- Options, lists:member(Key, Rejected)],
+ ok
+ end.
+
+lookup_timeout(Options) ->
+ case lists:keysearch("timeout", 1, Options) of
+ {value, {_, Val}} ->
+ list_to_integer(Val);
+ false ->
+ 3
+ end.
+
+lookup_mode(Options) ->
+ case lists:keysearch("mode", 1, Options) of
+ {value, {_, Val}} ->
+ Val;
+ false ->
+ "octet"
+ end.
+
+verify_integer(Key, Min, Max, Options) ->
+ case lists:keysearch(Key, 1, Options) of
+ {value, {_, Val}} when is_list(Val) ->
+ case catch list_to_integer(Val) of
+ {'EXIT', _} ->
+ false;
+ Int when Int >= Min, is_integer(Min),
+ Max =:= infinity ->
+ true;
+ Int when Int >= Min, is_integer(Min),
+ Int =< Max, is_integer(Max) ->
+ true;
+ _ ->
+ false
+ end;
+ false ->
+ true
+ end.
+error(F, A) ->
+ ok = error_logger:format("~p(~p): " ++ F ++ "~n", [?MODULE, self() | A]).
+
+print_debug_info(#config{debug_level = Level} = Config, Who, What, Data) ->
+ if
+ Level =:= none ->
+ ok;
+ is_record(Data, error) ->
+ do_print_debug_info(Config, Who, What, Data);
+ Level =:= error ->
+ ok;
+ Level =:= all ->
+ do_print_debug_info(Config, Who, What, Data);
+ What =:= open ->
+ do_print_debug_info(Config, Who, What, Data);
+ What =:= close ->
+ do_print_debug_info(Config, Who, What, Data);
+ Level =:= brief ->
+ ok;
+ What /= recv, What /= send ->
+ ok;
+ is_record(Data, tftp_msg_data), Level =:= normal ->
+ ok;
+ is_record(Data, tftp_msg_ack), Level =:= normal ->
+ ok;
+ true ->
+ do_print_debug_info(Config, Who, What, Data)
+ end.
+
+do_print_debug_info(Config, Who, What, #tftp_msg_data{data = Bin} = Msg) when is_binary(Bin) ->
+ Msg2 = Msg#tftp_msg_data{data = {bytes, size(Bin)}},
+ do_print_debug_info(Config, Who, What, Msg2);
+do_print_debug_info(Config, Who, What, #tftp_msg_req{local_filename = Filename} = Msg) when is_binary(Filename) ->
+ Msg2 = Msg#tftp_msg_req{local_filename = binary},
+ do_print_debug_info(Config, Who, What, Msg2);
+do_print_debug_info(Config, Who, What, Data) ->
+ Local =
+ case catch inet:port(Config#config.udp_socket) of
+ {'EXIT', _Reason} ->
+ 0;
+ {ok, Port} ->
+ Port
+ end,
+ %% Remote = Config#config.udp_port,
+ PeerInfo = peer_info(Config),
+ Side =
+ if
+ is_record(Who, tftp_msg_req),
+ Who#tftp_msg_req.local_filename /= undefined ->
+ client;
+ is_record(Who, tftp_msg_req),
+ Who#tftp_msg_req.local_filename =:= undefined ->
+ server;
+ is_atom(Who) ->
+ Who
+ end,
+ case {What, Data} of
+ {_, #error{what = What, code = Code, text = Text}} ->
+ io:format("~p(~p): ~p ~p -> ~p: ~s\n", [Side, Local, self(), What, Code, Text]);
+ {open, #tftp_msg_req{filename = Filename}} ->
+ io:format("~p(~p): open ~p -> ~p ~p\n", [Side, Local, PeerInfo, self(), Filename]);
+ {close, #tftp_msg_req{filename = Filename}} ->
+ io:format("~p(~p): close ~p -> ~p ~p\n", [Side, Local, PeerInfo, self(), Filename]);
+ {recv, _} ->
+ io:format("~p(~p): recv ~p <- ~p\n", [Side, Local, PeerInfo, Data]);
+ {send, _} ->
+ io:format("~p(~p): send ~p -> ~p\n", [Side, Local, PeerInfo, Data]);
+ {match, _} when is_record(Data, callback) ->
+ Mod = Data#callback.module,
+ State = Data#callback.state,
+ io:format("~p(~p): match ~p ~p => ~p\n", [Side, Local, PeerInfo, Mod, State]);
+ {call, _} ->
+ case Data of
+ {Callback, _Result} when is_record(Callback, callback) ->
+ Mod = Callback#callback.module,
+ State = Callback#callback.state,
+ io:format("~p(~p): call ~p ~p => ~p\n", [Side, Local, PeerInfo, Mod, State]);
+ {undefined, Result} ->
+ io:format("~p(~p): call ~p result => ~p\n", [Side, Local, PeerInfo, Result])
+ end
+ end.
+
+
+%%-------------------------------------------------------------------
+%% System upgrade
+%%-------------------------------------------------------------------
+
+system_continue(_Parent, _Debug, {Fun, Args}) ->
+ apply(?MODULE, Fun, Args).
+
+system_terminate(Reason, _Parent, _Debug, {_Fun, _Args}) ->
+ exit(Reason).
+
+system_code_change({Fun, Args}, _Module, _OldVsn, _Extra) ->
+ {ok, {Fun, Args}}.
diff --git a/src/couch_inets/tftp_file.erl b/src/couch_inets/tftp_file.erl
new file mode 100644
index 00000000..03e54047
--- /dev/null
+++ b/src/couch_inets/tftp_file.erl
@@ -0,0 +1,338 @@
+%%%-------------------------------------------------------------------
+%%% File : tft_file.erl
+%%% Author : Hakan Mattsson <hakan@erix.ericsson.se>
+%%% Description :
+%%%
+%%% Created : 24 May 2004 by Hakan Mattsson <hakan@erix.ericsson.se>
+%%%-------------------------------------------------------------------
+
+-module(tftp_file).
+
+%%%-------------------------------------------------------------------
+%%% Interface
+%%%-------------------------------------------------------------------
+
+-behaviour(tftp).
+
+-export([prepare/6, open/6, read/1, write/2, abort/3]).
+-export([prepare/5, open/5]).
+
+%%%-------------------------------------------------------------------
+%%% Defines
+%%%-------------------------------------------------------------------
+
+-include_lib("kernel/include/file.hrl").
+
+-record(state, {access,
+ filename,
+ root_dir,
+ options,
+ blksize,
+ fd,
+ count,
+ buffer}).
+
+%%-------------------------------------------------------------------
+%% prepare(Peer, Access, Filename, Mode, SuggestedOptions, InitialState) ->
+%% {ok, AcceptedOptions, NewState} | {error, Code, Text}
+%%
+%% Peer = {PeerType, PeerHost, PeerPort}
+%% PeerType = inet | inet6
+%% PeerHost = ip_address()
+%% PeerPort = integer()
+%% Acess = read | write
+%% Filename = string()
+%% Mode = string()
+%% SuggestedOptions = [{Key, Value}]
+%% AcceptedOptions = [{Key, Value}]
+%% Key = string()
+%% Value = string()
+%% InitialState = [] | [{root_dir, string()}]
+%% NewState = term()
+%% Code = undef | enoent | eacces | enospc |
+%% badop | eexist | baduser | badopt |
+%% integer()
+%% Text = string()
+%%
+%% Prepares open of a file on the client side.
+%%
+%% Will be followed by a call to open/4 before any read/write access
+%% is performed. The AcceptedOptions will be sent to the server which
+%% will reply with those options that it accepts. The options that are
+%% accepted by the server will be forwarded to open/4 as SuggestedOptions.
+%%
+%% No new options may be added, but the ones that are present as
+%% SuggestedOptions may be omitted or replaced with new values
+%% in the AcceptedOptions.
+%%-------------------------------------------------------------------
+
+prepare(_Peer, Access, Filename, Mode, SuggestedOptions, Initial) ->
+ %% Kept for backwards compatibility
+ prepare(Access, Filename, Mode, SuggestedOptions, Initial).
+
+prepare(Access, Filename, Mode, SuggestedOptions, Initial) when is_list(Initial) ->
+ %% Client side
+ case catch handle_options(Access, Filename, Mode, SuggestedOptions, Initial) of
+ {ok, Filename2, AcceptedOptions} ->
+ State = #state{access = Access,
+ filename = Filename2,
+ options = AcceptedOptions,
+ blksize = lookup_blksize(AcceptedOptions),
+ count = 0,
+ buffer = []},
+ {ok, AcceptedOptions, State};
+ {error, {Code, Text}} ->
+ {error, {Code, Text}}
+ end.
+
+%% ---------------------------------------------------------
+%% open(Peer, Access, Filename, Mode, SuggestedOptions, State) ->
+%% {ok, AcceptedOptions, NewState} | {error, Code, Text}
+%%
+%% Peer = {PeerType, PeerHost, PeerPort}
+%% PeerType = inet | inet6
+%% PeerHost = ip_address()
+%% PeerPort = integer()
+%% Acess = read | write
+%% Filename = string()
+%% Mode = string()
+%% SuggestedOptions = [{Key, Value}]
+%% AcceptedOptions = [{Key, Value}]
+%% Key = string()
+%% Value = string()
+%% State = InitialState | #state{}
+%% InitialState = [] | [{root_dir, string()}]
+%% NewState = term()
+%% Code = undef | enoent | eacces | enospc |
+%% badop | eexist | baduser | badopt |
+%% integer()
+%% Text = string()
+%%
+%% Opens a file for read or write access.
+%%
+%% On the client side where the open/4 call has been preceeded by a
+%% call to prepare/4, all options must be accepted or rejected.
+%% On the server side, where there are no preceeding prepare/4 call,
+%% noo new options may be added, but the ones that are present as
+%% SuggestedOptions may be omitted or replaced with new values
+%% in the AcceptedOptions.
+%%-------------------------------------------------------------------
+
+open(_Peer, Access, Filename, Mode, SuggestedOptions, Initial) ->
+ %% Kept for backwards compatibility
+ open(Access, Filename, Mode, SuggestedOptions, Initial).
+
+open(Access, Filename, Mode, SuggestedOptions, Initial) when is_list(Initial) ->
+ %% Server side
+ case prepare(Access, Filename, Mode, SuggestedOptions, Initial) of
+ {ok, AcceptedOptions, State} ->
+ open(Access, Filename, Mode, AcceptedOptions, State);
+ {error, {Code, Text}} ->
+ {error, {Code, Text}}
+ end;
+open(Access, Filename, Mode, NegotiatedOptions, State) when is_record(State, state) ->
+ %% Both sides
+ case catch handle_options(Access, Filename, Mode, NegotiatedOptions, State) of
+ {ok, _Filename2, Options}
+ when Options =:= NegotiatedOptions ->
+ do_open(State);
+ {error, {Code, Text}} ->
+ {error, {Code, Text}}
+ end.
+
+do_open(State) when is_record(State, state) ->
+ case file:open(State#state.filename, file_options(State)) of
+ {ok, Fd} ->
+ {ok, State#state.options, State#state{fd = Fd}};
+ {error, Reason} when is_atom(Reason) ->
+ {error, file_error(Reason)}
+ end.
+
+file_options(State) ->
+ case State#state.access of
+ read -> [read, read_ahead, raw, binary];
+ write -> [write, delayed_write, raw, binary]
+ end.
+
+file_error(Reason) when is_atom(Reason) ->
+ Details = file:format_error(Reason),
+ case Reason of
+ eexist -> {Reason, Details};
+ enoent -> {Reason, Details};
+ eacces -> {Reason, Details};
+ eperm -> {eacces, Details};
+ enospc -> {Reason, Details};
+ _ -> {undef, Details ++ " (" ++ atom_to_list(Reason) ++ ")"}
+ end.
+
+%%-------------------------------------------------------------------
+%% read(State) ->
+%% {more, Bin, NewState} | {last, Bin, FileSize} | {error, {Code, Text}}
+%%
+%% State = term()
+%% NewState = term()
+%% Bin = binary()
+%% FileSize = integer()
+%% Code = undef | enoent | eacces | enospc |
+%% badop | eexist | baduser | badopt |
+%% integer()
+%% Text = string()
+%%
+%% Reads a chunk from the file
+%%
+%% The file is automatically closed when the last chunk is read.
+%%-------------------------------------------------------------------
+
+read(#state{access = read} = State) ->
+ BlkSize = State#state.blksize,
+ case file:read(State#state.fd, BlkSize) of
+ {ok, Bin} when is_binary(Bin), size(Bin) =:= BlkSize ->
+ Count = State#state.count + size(Bin),
+ {more, Bin, State#state{count = Count}};
+ {ok, Bin} when is_binary(Bin), size(Bin) < BlkSize ->
+ file:close(State#state.fd),
+ Count = State#state.count + size(Bin),
+ {last, Bin, Count};
+ eof ->
+ {last, <<>>, State#state.count};
+ {error, Reason} ->
+ file:close(State#state.fd),
+ {error, file_error(Reason)}
+ end.
+
+%%-------------------------------------------------------------------
+%% write(Bin, State) ->
+%% {more, NewState} | {last, FileSize} | {error, {Code, Text}}
+%%
+%% State = term()
+%% NewState = term()
+%% Bin = binary()
+%% FileSize = integer()
+%% Code = undef | enoent | eacces | enospc |
+%% badop | eexist | baduser | badopt |
+%% integer()
+%% Text = string()
+%%
+%% Writes a chunk to the file
+%%
+%% The file is automatically closed when the last chunk is written
+%%-------------------------------------------------------------------
+
+write(Bin, #state{access = write} = State) when is_binary(Bin) ->
+ Size = size(Bin),
+ BlkSize = State#state.blksize,
+ case file:write(State#state.fd, Bin) of
+ ok when Size =:= BlkSize->
+ Count = State#state.count + Size,
+ {more, State#state{count = Count}};
+ ok when Size < BlkSize->
+ file:close(State#state.fd),
+ Count = State#state.count + Size,
+ {last, Count};
+ {error, Reason} ->
+ file:close(State#state.fd),
+ file:delete(State#state.filename),
+ {error, file_error(Reason)}
+ end.
+
+%%-------------------------------------------------------------------
+%% abort(Code, Text, State) -> ok
+%%
+%% State = term()
+%% Code = undef | enoent | eacces | enospc |
+%% badop | eexist | baduser | badopt |
+%% badblk | integer()
+%% Text = string()
+%%
+%% Aborts the file transfer
+%%-------------------------------------------------------------------
+
+abort(_Code, _Text, #state{fd = Fd, access = Access} = State) ->
+ file:close(Fd),
+ case Access of
+ write ->
+ ok = file:delete(State#state.filename);
+ read ->
+ ok
+ end.
+
+%%-------------------------------------------------------------------
+%% Process options
+%%-------------------------------------------------------------------
+
+handle_options(Access, Filename, Mode, Options, InitialState) when Mode =:= "octet" ->
+ Filename2 = handle_filename(Filename, InitialState),
+ Options2 = do_handle_options(Access, Filename2, Options),
+ {ok, Filename2, Options2};
+handle_options(_Access, _Filename, Mode, _Options, _InitialState) ->
+ {error, {badop, "Illegal mode " ++ Mode}}.
+
+handle_filename(Filename, InitialState) when is_list(InitialState) ->
+ case lists:keysearch(root_dir, 1, InitialState) of
+ {value, {_, Dir}} ->
+ case catch filename_join(Dir, Filename) of
+ {'EXIT', _} ->
+ throw({error, {badop, "Internal error. root_dir is not a string"}});
+ Filename2 ->
+ Filename2
+ end;
+ false ->
+ Filename
+ end;
+handle_filename(_Filename, State) when is_record(State, state) ->
+ State#state.filename.
+
+filename_join(Dir, Filename) ->
+ case filename:pathtype(Filename) of
+ absolute ->
+ [_ | RelFilename] = filename:split(Filename),
+ filename:join([Dir, RelFilename]);
+ _ ->
+ filename:join([Dir, Filename])
+ end.
+
+do_handle_options(Access, Filename, [{Key, Val} | T]) ->
+ case Key of
+ "tsize" ->
+ case Access of
+ read when Val =:= "0" ->
+ case file:read_file_info(Filename) of
+ {ok, FI} ->
+ Tsize = integer_to_list(FI#file_info.size),
+ [{Key, Tsize} | do_handle_options(Access, Filename, T)];
+ {error, _} ->
+ do_handle_options(Access, Filename, T)
+ end;
+ _ ->
+ handle_integer(Access, Filename, Key, Val, T, 0, infinity)
+ end;
+ "blksize" ->
+ handle_integer(Access, Filename, Key, Val, T, 8, 65464);
+ "timeout" ->
+ handle_integer(Access, Filename, Key, Val, T, 1, 255);
+ _ ->
+ do_handle_options(Access, Filename, T)
+ end;
+do_handle_options(_Access, _Filename, []) ->
+ [].
+
+
+handle_integer(Access, Filename, Key, Val, Options, Min, Max) ->
+ case catch list_to_integer(Val) of
+ {'EXIT', _} ->
+ do_handle_options(Access, Filename, Options);
+ Int when Int >= Min, Int =< Max ->
+ [{Key, Val} | do_handle_options(Access, Filename, Options)];
+ Int when Int >= Min, Max =:= infinity ->
+ [{Key, Val} | do_handle_options(Access, Filename, Options)];
+ _Int ->
+ throw({error, {badopt, "Illegal " ++ Key ++ " value " ++ Val}})
+ end.
+
+lookup_blksize(Options) ->
+ case lists:keysearch("blksize", 1, Options) of
+ {value, {_, Val}} ->
+ list_to_integer(Val);
+ false ->
+ 512
+ end.
diff --git a/src/couch_inets/tftp_lib.erl b/src/couch_inets/tftp_lib.erl
new file mode 100644
index 00000000..f73b7a68
--- /dev/null
+++ b/src/couch_inets/tftp_lib.erl
@@ -0,0 +1,418 @@
+%%%-------------------------------------------------------------------
+%%% File : tftp_lib.erl
+%%% Author : Hakan Mattsson <hakan@erix.ericsson.se>
+%%% Description : Option parsing, decode, encode etc.
+%%%
+%%% Created : 18 May 2004 by Hakan Mattsson <hakan@erix.ericsson.se>
+%%%-------------------------------------------------------------------
+
+-module(tftp_lib).
+
+%%-------------------------------------------------------------------
+%% Interface
+%%-------------------------------------------------------------------
+
+%% application internal functions
+-export([
+ parse_config/1,
+ parse_config/2,
+ decode_msg/1,
+ encode_msg/1,
+ replace_val/3,
+ to_lower/1,
+ host_to_string/1
+ ]).
+
+%%-------------------------------------------------------------------
+%% Defines
+%%-------------------------------------------------------------------
+
+-include("tftp.hrl").
+
+-define(LOWER(Char),
+ if
+ Char >= $A, Char =< $Z ->
+ Char - ($A - $a);
+ true ->
+ Char
+ end).
+
+%%-------------------------------------------------------------------
+%% Config
+%%-------------------------------------------------------------------
+
+parse_config(Options) ->
+ parse_config(Options, #config{}).
+
+parse_config(Options, Config) ->
+ do_parse_config(Options, Config).
+
+do_parse_config([{Key, Val} | Tail], Config) when is_record(Config, config) ->
+ case Key of
+ debug ->
+ case Val of
+ none ->
+ do_parse_config(Tail, Config#config{debug_level = Val});
+ error ->
+ do_parse_config(Tail, Config#config{debug_level = Val});
+ brief ->
+ do_parse_config(Tail, Config#config{debug_level = Val});
+ normal ->
+ do_parse_config(Tail, Config#config{debug_level = Val});
+ verbose ->
+ do_parse_config(Tail, Config#config{debug_level = Val});
+ all ->
+ do_parse_config(Tail, Config#config{debug_level = Val});
+ _ ->
+ exit({badarg, {Key, Val}})
+ end;
+ host ->
+ if
+ is_list(Val) ->
+ do_parse_config(Tail, Config#config{udp_host = Val});
+ is_tuple(Val), size(Val) =:= 4 ->
+ do_parse_config(Tail, Config#config{udp_host = Val});
+ is_tuple(Val), size(Val) =:= 8 ->
+ do_parse_config(Tail, Config#config{udp_host = Val});
+ true ->
+ exit({badarg, {Key, Val}})
+ end;
+ port ->
+ if
+ is_integer(Val), Val >= 0 ->
+ Config2 = Config#config{udp_port = Val, udp_options = Config#config.udp_options},
+ do_parse_config(Tail, Config2);
+ true ->
+ exit({badarg, {Key, Val}})
+ end;
+ port_policy ->
+ case Val of
+ random ->
+ do_parse_config(Tail, Config#config{port_policy = Val});
+ 0 ->
+ do_parse_config(Tail, Config#config{port_policy = random});
+ MinMax when is_integer(MinMax), MinMax > 0 ->
+ do_parse_config(Tail, Config#config{port_policy = {range, MinMax, MinMax}});
+ {range, Min, Max} when Max >= Min,
+ integer(Min), Min > 0,
+ integer(Max), Max > 0 ->
+ do_parse_config(Tail, Config#config{port_policy = Val});
+ true ->
+ exit({badarg, {Key, Val}})
+ end;
+ udp when is_list(Val) ->
+ Fun =
+ fun({K, V}, List) when K /= active ->
+ replace_val(K, V, List);
+ (V, List) when V /= list, V /= binary ->
+ List ++ [V];
+ (V, _List) ->
+ exit({badarg, {udp, [V]}})
+ end,
+ UdpOptions = lists:foldl(Fun, Config#config.udp_options, Val),
+ do_parse_config(Tail, Config#config{udp_options = UdpOptions});
+ use_tsize ->
+ case Val of
+ true ->
+ do_parse_config(Tail, Config#config{use_tsize = Val});
+ false ->
+ do_parse_config(Tail, Config#config{use_tsize = Val});
+ _ ->
+ exit({badarg, {Key, Val}})
+ end;
+ max_tsize ->
+ if
+ Val =:= infinity ->
+ do_parse_config(Tail, Config#config{max_tsize = Val});
+ integer(Val), Val >= 0 ->
+ do_parse_config(Tail, Config#config{max_tsize = Val});
+ true ->
+ exit({badarg, {Key, Val}})
+ end;
+ max_conn ->
+ if
+ Val =:= infinity ->
+ do_parse_config(Tail, Config#config{max_conn = Val});
+ integer(Val), Val > 0 ->
+ do_parse_config(Tail, Config#config{max_conn = Val});
+ true ->
+ exit({badarg, {Key, Val}})
+ end;
+ _ when is_list(Key), is_list(Val) ->
+ Key2 = to_lower(Key),
+ Val2 = to_lower(Val),
+ TftpOptions = replace_val(Key2, Val2, Config#config.user_options),
+ do_parse_config(Tail, Config#config{user_options = TftpOptions});
+ reject ->
+ case Val of
+ read ->
+ Rejected = [Val | Config#config.rejected],
+ do_parse_config(Tail, Config#config{rejected = Rejected});
+ write ->
+ Rejected = [Val | Config#config.rejected],
+ do_parse_config(Tail, Config#config{rejected = Rejected});
+ _ when is_list(Val) ->
+ Rejected = [Val | Config#config.rejected],
+ do_parse_config(Tail, Config#config{rejected = Rejected});
+ _ ->
+ exit({badarg, {Key, Val}})
+ end;
+ callback ->
+ case Val of
+ {RegExp, Mod, State} when is_list(RegExp), atom(Mod) ->
+ case regexp:parse(RegExp) of
+ {ok, Internal} ->
+ Callback = #callback{regexp = RegExp,
+ internal = Internal,
+ module = Mod,
+ state = State},
+ Callbacks = Config#config.callbacks ++ [Callback],
+ do_parse_config(Tail, Config#config{callbacks = Callbacks});
+ {error, Reason} ->
+ exit({badarg, {Key, Val}, Reason})
+ end;
+ _ ->
+ exit({badarg, {Key, Val}})
+ end;
+ _ ->
+ exit({badarg, {Key, Val}})
+ end;
+do_parse_config([], Config) when is_record(Config, config) ->
+ UdpOptions = Config#config.udp_options,
+ IsInet6 = lists:member(inet6, UdpOptions),
+ IsInet = lists:member(inet, UdpOptions),
+ Host = Config#config.udp_host,
+ Host2 =
+ if
+ (IsInet and not IsInet6); (not IsInet and not IsInet6) ->
+ case inet:getaddr(Host, inet) of
+ {ok, Addr} ->
+ Addr;
+ {error, Reason} ->
+ exit({badarg, {host, Reason}})
+ end;
+ (IsInet6 and not IsInet) ->
+ case inet:getaddr(Host, inet6) of
+ {ok, Addr} ->
+ Addr;
+ {error, Reason} ->
+ exit({badarg, {host, Reason}})
+ end;
+ true ->
+ %% Conflicting options
+ exit({badarg, {udp, [inet]}})
+ end,
+ UdpOptions2 = lists:reverse(UdpOptions),
+ TftpOptions = lists:reverse(Config#config.user_options),
+ Config#config{udp_host = Host2, udp_options = UdpOptions2, user_options = TftpOptions};
+do_parse_config(Options, Config) when is_record(Config, config) ->
+ exit({badarg, Options}).
+
+host_to_string(Host) ->
+ case Host of
+ String when is_list(String) ->
+ String;
+ {A1, A2, A3, A4} -> % inet
+ lists:concat([A1, ".", A2, ".", A3, ".",A4]);
+ {A1, A2, A3, A4, A5, A6, A7, A8} -> % inet6
+ lists:concat([
+ int16_to_hex(A1), "::",
+ int16_to_hex(A2), "::",
+ int16_to_hex(A3), "::",
+ int16_to_hex(A4), "::",
+ int16_to_hex(A5), "::",
+ int16_to_hex(A6), "::",
+ int16_to_hex(A7), "::",
+ int16_to_hex(A8)
+ ])
+ end.
+
+int16_to_hex(0) ->
+ [$0];
+int16_to_hex(I) ->
+ N1 = ((I bsr 8) band 16#ff),
+ N2 = (I band 16#ff),
+ [code_character(N1 div 16), code_character(N1 rem 16),
+ code_character(N2 div 16), code_character(N2 rem 16)].
+
+code_character(N) when N < 10 ->
+ $0 + N;
+code_character(N) ->
+ $A + (N - 10).
+
+%%-------------------------------------------------------------------
+%% Decode
+%%-------------------------------------------------------------------
+
+decode_msg(Bin) when is_binary(Bin) ->
+ case Bin of
+ <<?TFTP_OPCODE_RRQ:16/integer, Tail/binary>> ->
+ case decode_strings(Tail, [keep_case, lower_case]) of
+ [Filename, Mode | Strings] ->
+ Options = decode_options(Strings),
+ #tftp_msg_req{access = read,
+ filename = Filename,
+ mode = to_lower(Mode),
+ options = Options};
+ [_Filename | _Strings] ->
+ exit(#tftp_msg_error{code = undef,
+ text = "Missing mode"});
+ _ ->
+ exit(#tftp_msg_error{code = undef,
+ text = "Missing filename"})
+ end;
+ <<?TFTP_OPCODE_WRQ:16/integer, Tail/binary>> ->
+ case decode_strings(Tail, [keep_case, lower_case]) of
+ [Filename, Mode | Strings] ->
+ Options = decode_options(Strings),
+ #tftp_msg_req{access = write,
+ filename = Filename,
+ mode = to_lower(Mode),
+ options = Options};
+ [_Filename | _Strings] ->
+ exit(#tftp_msg_error{code = undef,
+ text = "Missing mode"});
+ _ ->
+ exit(#tftp_msg_error{code = undef,
+ text = "Missing filename"})
+ end;
+ <<?TFTP_OPCODE_DATA:16/integer, SeqNo:16/integer, Data/binary>> ->
+ #tftp_msg_data{block_no = SeqNo, data = Data};
+ <<?TFTP_OPCODE_ACK:16/integer, SeqNo:16/integer>> ->
+ #tftp_msg_ack{block_no = SeqNo};
+ <<?TFTP_OPCODE_ERROR:16/integer, ErrorCode:16/integer, Tail/binary>> ->
+ case decode_strings(Tail, [keep_case]) of
+ [ErrorText] ->
+ ErrorCode2 = decode_error_code(ErrorCode),
+ #tftp_msg_error{code = ErrorCode2,
+ text = ErrorText};
+ _ ->
+ exit(#tftp_msg_error{code = undef,
+ text = "Trailing garbage"})
+ end;
+ <<?TFTP_OPCODE_OACK:16/integer, Tail/binary>> ->
+ Strings = decode_strings(Tail, [lower_case]),
+ Options = decode_options(Strings),
+ #tftp_msg_oack{options = Options};
+ _ ->
+ exit(#tftp_msg_error{code = undef,
+ text = "Invalid syntax"})
+ end.
+
+decode_strings(Bin, Cases) when is_binary(Bin), is_list(Cases) ->
+ do_decode_strings(Bin, Cases, []).
+
+do_decode_strings(<<>>, _Cases, Strings) ->
+ lists:reverse(Strings);
+do_decode_strings(Bin, [Case | Cases], Strings) ->
+ {String, Tail} = decode_string(Bin, Case, []),
+ if
+ Cases =:= [] ->
+ do_decode_strings(Tail, [Case], [String | Strings]);
+ true ->
+ do_decode_strings(Tail, Cases, [String | Strings])
+ end.
+
+decode_string(<<Char:8/integer, Tail/binary>>, Case, String) ->
+ if
+ Char =:= 0 ->
+ {lists:reverse(String), Tail};
+ Case =:= keep_case ->
+ decode_string(Tail, Case, [Char | String]);
+ Case =:= lower_case ->
+ Char2 = ?LOWER(Char),
+ decode_string(Tail, Case, [Char2 | String])
+ end;
+decode_string(<<>>, _Case, _String) ->
+ exit(#tftp_msg_error{code = undef, text = "Trailing null missing"}).
+
+decode_options([Key, Value | Strings]) ->
+ [{to_lower(Key), Value} | decode_options(Strings)];
+decode_options([]) ->
+ [].
+
+decode_error_code(Int) ->
+ case Int of
+ ?TFTP_ERROR_UNDEF -> undef;
+ ?TFTP_ERROR_ENOENT -> enoent;
+ ?TFTP_ERROR_EACCES -> eacces;
+ ?TFTP_ERROR_ENOSPC -> enospc;
+ ?TFTP_ERROR_BADOP -> badop;
+ ?TFTP_ERROR_BADBLK -> badblk;
+ ?TFTP_ERROR_EEXIST -> eexist;
+ ?TFTP_ERROR_BADUSER -> baduser;
+ ?TFTP_ERROR_BADOPT -> badopt;
+ Int when is_integer(Int), Int >= 0, Int =< 65535 -> Int;
+ _ -> exit(#tftp_msg_error{code = undef, text = "Error code outside range."})
+ end.
+
+%%-------------------------------------------------------------------
+%% Encode
+%%-------------------------------------------------------------------
+
+encode_msg(#tftp_msg_req{access = Access,
+ filename = Filename,
+ mode = Mode,
+ options = Options}) ->
+ OpCode = case Access of
+ read -> ?TFTP_OPCODE_RRQ;
+ write -> ?TFTP_OPCODE_WRQ
+ end,
+ [
+ <<OpCode:16/integer>>,
+ Filename,
+ 0,
+ Mode,
+ 0,
+ [[Key, 0, Val, 0] || {Key, Val} <- Options]
+ ];
+encode_msg(#tftp_msg_data{block_no = BlockNo, data = Data}) when BlockNo =< 65535 ->
+ [
+ <<?TFTP_OPCODE_DATA:16/integer, BlockNo:16/integer>>,
+ Data
+ ];
+encode_msg(#tftp_msg_ack{block_no = BlockNo}) when BlockNo =< 65535 ->
+ <<?TFTP_OPCODE_ACK:16/integer, BlockNo:16/integer>>;
+encode_msg(#tftp_msg_error{code = Code, text = Text}) ->
+ IntCode = encode_error_code(Code),
+ [
+ <<?TFTP_OPCODE_ERROR:16/integer, IntCode:16/integer>>,
+ Text,
+ 0
+ ];
+encode_msg(#tftp_msg_oack{options = Options}) ->
+ [
+ <<?TFTP_OPCODE_OACK:16/integer>>,
+ [[Key, 0, Val, 0] || {Key, Val} <- Options]
+ ].
+
+encode_error_code(Code) ->
+ case Code of
+ undef -> ?TFTP_ERROR_UNDEF;
+ enoent -> ?TFTP_ERROR_ENOENT;
+ eacces -> ?TFTP_ERROR_EACCES;
+ enospc -> ?TFTP_ERROR_ENOSPC;
+ badop -> ?TFTP_ERROR_BADOP;
+ badblk -> ?TFTP_ERROR_BADBLK;
+ eexist -> ?TFTP_ERROR_EEXIST;
+ baduser -> ?TFTP_ERROR_BADUSER;
+ badopt -> ?TFTP_ERROR_BADOPT;
+ Int when is_integer(Int), Int >= 0, Int =< 65535 -> Int
+ end.
+
+%%-------------------------------------------------------------------
+%% Miscellaneous
+%%-------------------------------------------------------------------
+
+replace_val(Key, Val, List) ->
+ case lists:keysearch(Key, 1, List) of
+ false ->
+ List ++ [{Key, Val}];
+ {value, {_, OldVal}} when OldVal =:= Val ->
+ List;
+ {value, {_, _}} ->
+ lists:keyreplace(Key, 1, List, {Key, Val})
+ end.
+
+to_lower(Chars) ->
+ [?LOWER(Char) || Char <- Chars].
diff --git a/src/couch_inets/tftp_sup.erl b/src/couch_inets/tftp_sup.erl
new file mode 100644
index 00000000..5a176311
--- /dev/null
+++ b/src/couch_inets/tftp_sup.erl
@@ -0,0 +1,81 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+%%----------------------------------------------------------------------
+%% Purpose: The top supervisor for tftp hangs under inets_sup.
+%%----------------------------------------------------------------------
+
+-module(tftp_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/1]).
+-export([start_child/1]).
+
+%% Supervisor callback
+-export([init/1]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+start_link(TftpServices) ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, [TftpServices]).
+
+start_child(Args) ->
+ supervisor:start_child(?MODULE, Args).
+
+%%%=========================================================================
+%%% Supervisor callback
+%%%=========================================================================
+
+init([Services]) when is_list(Services) ->
+ RestartStrategy = one_for_one,
+ MaxR = 10,
+ MaxT = 3600,
+ KillAfter = timer:seconds(3),
+ Children = [worker_spec(KillAfter, Options) || {tftpd, Options} <- Services],
+ {ok, {{RestartStrategy, MaxR, MaxT}, Children}}.
+
+%%%=========================================================================
+%%% Internal functions
+%%%=========================================================================
+
+worker_spec(KillAfter, Options) ->
+ Modules = [proc_lib, tftp, tftp_engine],
+ KA = supervisor_timeout(KillAfter),
+ Name = unique_name(Options),
+ {Name, {tftp, start, [Options]}, permanent, KA, worker, Modules}.
+
+unique_name(Options) ->
+ case lists:keysearch(port, 1, Options) of
+ {value, {_, Port}} when is_integer(Port), Port > 0 ->
+ {tftpd, Port};
+ _ ->
+ {tftpd, erlang:now()}
+ end.
+
+%% supervisor_spec(Name) ->
+%% {Name, {Name, start, []}, permanent, infinity, supervisor,
+%% [Name, supervisor]}.
+
+-ifdef(debug_shutdown).
+supervisor_timeout(_KillAfter) -> timer:hours(24).
+-else.
+supervisor_timeout(KillAfter) -> KillAfter.
+-endif.
diff --git a/src/couchdb/Makefile.am b/src/couchdb/Makefile.am
new file mode 100644
index 00000000..bf0c31bc
--- /dev/null
+++ b/src/couchdb/Makefile.am
@@ -0,0 +1,97 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+datarootdir = @prefix@/share
+
+ICU_LOCAL_FLAGS = $(ICU_LOCAL_CFLAGS) $(ICU_LOCAL_LDFLAGS)
+
+couchprivlibdir = $(erlanglibdir)/couch-$(version)/priv/lib
+
+couchprivlib_LTLIBRARIES = couch_erl_driver.la
+couch_erl_driver_la_SOURCES = couch_erl_driver.c
+couch_erl_driver_la_LDFLAGS = -module -avoid-version $(ICU_LOCAL_FLAGS)
+couch_erl_driver_la_CFLAGS = $(ICU_LOCAL_FLAGS)
+couch_erl_driver_la_LIBADD = -licuuc -licudata -licui18n
+
+libbin_PROGRAMS = couchjs
+couchjs_SOURCES = couch_js.c
+
+couchebindir = $(erlanglibdir)/couch-$(version)/ebin
+couchincludedir = $(erlanglibdir)/couch-$(version)/include
+
+couch_file_collection = \
+ cjson.erl \
+ couch_btree.erl \
+ couch_db.erl \
+ couch_db_update_notifier.erl \
+ couch_doc.erl \
+ couch_event_sup.erl \
+ couch_file.erl \
+ couch_ft_query.erl \
+ couch_key_tree.erl \
+ couch_log.erl \
+ couch_query_servers.erl \
+ couch_rep.erl \
+ couch_server.erl \
+ couch_server_sup.erl \
+ couch_stream.erl \
+ couch_util.erl \
+ couch_view.erl \
+ mod_couch.erl
+
+couchebin_DATA = \
+ cjson.beam \
+ couch.app \
+ couch_btree.beam \
+ couch_db.beam \
+ couch_db_update_notifier.beam \
+ couch_doc.beam \
+ couch_event_sup.beam \
+ couch_file.beam \
+ couch_ft_query.beam \
+ couch_key_tree.beam \
+ couch_log.beam \
+ couch_query_servers.beam \
+ couch_rep.beam \
+ couch_server.beam \
+ couch_server_sup.beam \
+ couch_stream.beam \
+ couch_util.beam \
+ couch_view.beam \
+ mod_couch.beam
+
+couchinclude_DATA = couch_db.hrl
+
+EXTRA_DIST = $(couch_file_collection) $(couchinclude_DATA)
+
+CLEANFILES = $(couchebin_DATA)
+
+couch.app: couch.app.tpl
+ sed -e "s|%package_name%|@package_name@|g" \
+ -e "s|%version%|@version@|g" > \
+ $@ < $<
+ chmod +x $@
+
+%.beam: %.erl
+ erlc $<
+
+install-data-hook:
+ if test -f "$(DESTDIR)/$(couchprivlibdir)/couch_erl_driver"; then \
+ rm -f "$(DESTDIR)/$(couchprivlibdir)/couch_erl_driver.so"; \
+ cd "$(DESTDIR)/$(couchprivlibdir)" && \
+ $(LN_S) couch_erl_driver couch_erl_driver.so; \
+ fi
+
+uninstall-local:
+ if test -f "$(DESTDIR)/$(couchprivlibdir)/couch_erl_driver"; then \
+ rm -f "$(DESTDIR)/$(couchprivlibdir)/couch_erl_driver.so"; \
+ fi
diff --git a/src/couchdb/cjson.erl b/src/couchdb/cjson.erl
new file mode 100644
index 00000000..042d5c41
--- /dev/null
+++ b/src/couchdb/cjson.erl
@@ -0,0 +1,567 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2006 Mochi Media, Inc.
+%%
+%% Permission is hereby granted, free of charge, to any person
+%% obtaining a copy of this software and associated documentation
+%% files (the "Software"), to deal in the Software without restriction,
+%% including without limitation the rights to use, copy, modify, merge,
+%% publish, distribute, sublicense, and/or sell copies of the Software,
+%% and to permit persons to whom the Software is furnished to do
+%% so, subject to the following conditions:
+%%
+%% The above copyright notice and this permission notice shall be included
+%% in all copies or substantial portions of the Software.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+%% MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+%% IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+%% CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+%% TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+%% SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+%% @doc Yet another JSON (RFC 4627) library for Erlang.
+
+-module(cjson).
+-author('bob@mochimedia.com').
+-export([encoder/1, encode/1]).
+-export([decoder/1, decode/1]).
+-export([test/0]).
+
+%
+% NOTE: This file was originally mochijson.erl and has been adapted for
+% use with CouchDB.
+%
+% The changes are:
+% {array, [...]}
+% is now
+% {...}
+% and:
+% {struct, [...]}
+% is now
+% {obj, [...]}
+%
+
+% This is a macro to placate syntax highlighters..
+-define(Q, $\").
+-define(ADV_COL(S, N), S#decoder{column=N+S#decoder.column}).
+-define(INC_COL(S), S#decoder{column=1+S#decoder.column}).
+-define(INC_LINE(S), S#decoder{column=1, line=1+S#decoder.line}).
+
+%% @type iolist() = [char() | binary() | iolist()]
+%% @type iodata() = iolist() | binary()
+%% @type json_string() = atom | string() | binary()
+%% @type json_number() = integer() | float()
+%% @type json_array() = {json_term()}
+%% @type json_object() = {struct, [{json_string(), json_term()}]}
+%% @type json_term() = json_string() | json_number() | json_array() |
+%% json_object()
+%% @type encoding() = utf8 | unicode
+%% @type encoder_option() = {input_encoding, encoding()} |
+%% {handler, function()}
+%% @type decoder_option() = {input_encoding, encoding()} |
+%% {object_hook, function()}
+
+-record(encoder, {input_encoding=utf8,
+ handler=null}).
+
+-record(decoder, {input_encoding=utf8,
+ object_hook=null,
+ line=1,
+ column=1,
+ state=null}).
+
+%% @spec encoder([encoder_option()]) -> function()
+%% @doc Create an encoder/1 with the given options.
+encoder(Options) ->
+ State = parse_encoder_options(Options, #encoder{}),
+ fun (O) -> json_encode(O, State) end.
+
+%% @spec encode(json_term()) -> iolist()
+%% @doc Encode the given as JSON to an iolist.
+encode(Any) ->
+ json_encode(Any, #encoder{}).
+
+%% @spec decoder([decoder_option()]) -> function()
+%% @doc Create a decoder/1 with the given options.
+decoder(Options) ->
+ State = parse_decoder_options(Options, #decoder{}),
+ fun (O) -> json_decode(O, State) end.
+
+%% @spec decode(iolist()) -> json_term()
+%% @doc Decode the given iolist to Erlang terms.
+decode(S) ->
+ json_decode(S, #decoder{}).
+
+test() ->
+ test_all().
+
+%% Internal API
+
+parse_encoder_options([], State) ->
+ State;
+parse_encoder_options([{input_encoding, Encoding} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{input_encoding=Encoding});
+parse_encoder_options([{handler, Handler} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{handler=Handler}).
+
+parse_decoder_options([], State) ->
+ State;
+parse_decoder_options([{input_encoding, Encoding} | Rest], State) ->
+ parse_decoder_options(Rest, State#decoder{input_encoding=Encoding});
+parse_decoder_options([{object_hook, Hook} | Rest], State) ->
+ parse_decoder_options(Rest, State#decoder{object_hook=Hook}).
+
+
+format_float(F) ->
+ format_float1(lists:reverse(float_to_list(F)), []).
+
+format_float1([$0, $0, _, $e | Rest], []) ->
+ strip_zeros(Rest, []);
+format_float1([Sign, $e | Rest], Acc) ->
+ strip_zeros(Rest, [$e, Sign | Acc]);
+format_float1([C | Rest], Acc) ->
+ format_float1(Rest, [C | Acc]).
+
+strip_zeros(L=[$0, $. | _], Acc) ->
+ lists:reverse(L, Acc);
+strip_zeros([$0 | Rest], Acc) ->
+ strip_zeros(Rest, Acc);
+strip_zeros(L, Acc) ->
+ lists:reverse(L, Acc).
+
+json_encode(true, _State) ->
+ "true";
+json_encode(false, _State) ->
+ "false";
+json_encode(null, _State) ->
+ "null";
+json_encode(I, _State) when is_integer(I) ->
+ integer_to_list(I);
+json_encode(F, _State) when is_float(F) ->
+ format_float(F);
+json_encode(L, State) when is_list(L); is_binary(L); is_atom(L) ->
+ json_encode_string(L, State);
+json_encode({obj, Props}, State) when is_list(Props) ->
+ json_encode_proplist(Props, State);
+json_encode(Array, State) when is_tuple(Array) ->
+ json_encode_array(Array, State);
+json_encode(Bad, #encoder{handler=null}) ->
+ exit({json_encode, {bad_term, Bad}});
+json_encode(Bad, State=#encoder{handler=Handler}) ->
+ json_encode(Handler(Bad), State).
+
+json_encode_array({}, _State) ->
+ "[]";
+json_encode_array(Tuple, State) ->
+ F = fun (O, Acc) ->
+ [$,, json_encode(O, State) | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "[", tuple_to_list(Tuple)),
+ lists:reverse([$\] | Acc1]).
+
+json_encode_proplist([], _State) ->
+ "{}";
+json_encode_proplist(Props, State) ->
+ F = fun ({K, V}, Acc) ->
+ KS = case K of
+ K when is_atom(K) ->
+ json_encode_string_utf8(atom_to_list(K), [?Q]);
+ K when is_integer(K) ->
+ json_encode_string(integer_to_list(K), State);
+ K when is_list(K); is_binary(K) ->
+ json_encode_string(K, State)
+ end,
+ VS = json_encode(V, State),
+ [$,, VS, $:, KS | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "{", Props),
+ lists:reverse([$\} | Acc1]).
+
+json_encode_string(A, _State) when is_atom(A) ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(atom_to_list(A)), [?Q]);
+json_encode_string(B, _State) when is_binary(B) ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(B), [?Q]);
+json_encode_string(S, #encoder{input_encoding=utf8}) ->
+ json_encode_string_utf8(S, [?Q]);
+json_encode_string(S, #encoder{input_encoding=unicode}) ->
+ json_encode_string_unicode(S, [?Q]).
+
+json_encode_string_utf8([], Acc) ->
+ lists:reverse([$\" | Acc]);
+json_encode_string_utf8(All=[C | Cs], Acc) ->
+ case C of
+ C when C >= 16#7f ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(All), Acc);
+ _ ->
+ Acc1 = case C of
+ ?Q ->
+ [?Q, $\\ | Acc];
+ $/ ->
+ [$/, $\\ | Acc];
+ $\\ ->
+ [$\\, $\\ | Acc];
+ $\b ->
+ [$b, $\\ | Acc];
+ $\f ->
+ [$f, $\\ | Acc];
+ $\n ->
+ [$n, $\\ | Acc];
+ $\r ->
+ [$r, $\\ | Acc];
+ $\t ->
+ [$t, $\\ | Acc];
+ C when C >= 0, C < $\s ->
+ [unihex(C) | Acc];
+ C when C >= $\s ->
+ [C | Acc];
+ _ ->
+ exit({json_encode, {bad_char, C}})
+ end,
+ json_encode_string_utf8(Cs, Acc1)
+ end.
+
+json_encode_string_unicode([], Acc) ->
+ lists:reverse([$\" | Acc]);
+json_encode_string_unicode([C | Cs], Acc) ->
+ Acc1 = case C of
+ ?Q ->
+ [?Q, $\\ | Acc];
+ $/ ->
+ [$/, $\\ | Acc];
+ $\\ ->
+ [$\\, $\\ | Acc];
+ $\b ->
+ [$b, $\\ | Acc];
+ $\f ->
+ [$f, $\\ | Acc];
+ $\n ->
+ [$n, $\\ | Acc];
+ $\r ->
+ [$r, $\\ | Acc];
+ $\t ->
+ [$t, $\\ | Acc];
+ C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
+ [unihex(C) | Acc];
+ C when C < 16#7f ->
+ [C | Acc];
+ _ ->
+ exit({json_encode, {bad_char, C}})
+ end,
+ json_encode_string_unicode(Cs, Acc1).
+
+dehex(C) when C >= $0, C =< $9 ->
+ C - $0;
+dehex(C) when C >= $a, C =< $f ->
+ C - $a + 10;
+dehex(C) when C >= $A, C =< $F ->
+ C - $A + 10.
+
+hexdigit(C) when C >= 0, C =< 9 ->
+ C + $0;
+hexdigit(C) when C =< 15 ->
+ C + $a - 10.
+
+unihex(C) when C < 16#10000 ->
+ <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
+ Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
+ [$\\, $u | Digits];
+unihex(C) when C =< 16#10FFFF ->
+ N = C - 16#10000,
+ S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
+ S2 = 16#dc00 bor (N band 16#3ff),
+ [unihex(S1), unihex(S2)].
+
+json_decode(B, S) when is_binary(B) ->
+ json_decode([B], S);
+json_decode(L, S) ->
+ {Res, L1, S1} = decode1(L, S),
+ {eof, [], _} = tokenize(L1, S1#decoder{state=trim}),
+ Res.
+
+decode1(L, S=#decoder{state=null}) ->
+ case tokenize(L, S#decoder{state=any}) of
+ {{const, C}, L1, S1} ->
+ {C, L1, S1};
+ {start_array, L1, S1} ->
+ decode_array(L1, S1#decoder{state=any}, []);
+ {start_object, L1, S1} ->
+ decode_object(L1, S1#decoder{state=key}, [])
+ end.
+
+make_object(V, #decoder{object_hook=null}) ->
+ V;
+make_object(V, #decoder{object_hook=Hook}) ->
+ Hook(V).
+
+decode_object(L, S=#decoder{state=key}, Acc) ->
+ case tokenize(L, S) of
+ {end_object, Rest, S1} ->
+ V = make_object({obj, lists:reverse(Acc)}, S1),
+ {V, Rest, S1#decoder{state=null}};
+ {{const, K}, Rest, S1} when is_list(K) ->
+ {colon, L2, S2} = tokenize(Rest, S1),
+ {V, L3, S3} = decode1(L2, S2#decoder{state=null}),
+ decode_object(L3, S3#decoder{state=comma}, [{K, V} | Acc])
+ end;
+decode_object(L, S=#decoder{state=comma}, Acc) ->
+ case tokenize(L, S) of
+ {end_object, Rest, S1} ->
+ V = make_object({obj, lists:reverse(Acc)}, S1),
+ {V, Rest, S1#decoder{state=null}};
+ {comma, Rest, S1} ->
+ decode_object(Rest, S1#decoder{state=key}, Acc)
+ end.
+
+decode_array(L, S=#decoder{state=any}, Acc) ->
+ case tokenize(L, S) of
+ {end_array, Rest, S1} ->
+ {list_to_tuple(lists:reverse(Acc)), Rest, S1#decoder{state=null}};
+ {start_array, Rest, S1} ->
+ {Array, Rest1, S2} = decode_array(Rest, S1#decoder{state=any}, []),
+ decode_array(Rest1, S2#decoder{state=comma}, [Array | Acc]);
+ {start_object, Rest, S1} ->
+ {Array, Rest1, S2} = decode_object(Rest, S1#decoder{state=key}, []),
+ decode_array(Rest1, S2#decoder{state=comma}, [Array | Acc]);
+ {{const, Const}, Rest, S1} ->
+ decode_array(Rest, S1#decoder{state=comma}, [Const | Acc])
+ end;
+decode_array(L, S=#decoder{state=comma}, Acc) ->
+ case tokenize(L, S) of
+ {end_array, Rest, S1} ->
+ {list_to_tuple(lists:reverse(Acc)), Rest, S1#decoder{state=null}};
+ {comma, Rest, S1} ->
+ decode_array(Rest, S1#decoder{state=any}, Acc)
+ end.
+
+tokenize_string(IoList=[C | _], S=#decoder{input_encoding=utf8}, Acc)
+ when is_list(C); is_binary(C); C >= 16#7f ->
+ List = xmerl_ucs:from_utf8(list_to_binary(lists:flatten(IoList))),
+ tokenize_string(List, S#decoder{input_encoding=unicode}, Acc);
+tokenize_string("\"" ++ Rest, S, Acc) ->
+ {lists:reverse(Acc), Rest, ?INC_COL(S)};
+tokenize_string("\\\"" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\" | Acc]);
+tokenize_string("\\\\" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\\ | Acc]);
+tokenize_string("\\/" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$/ | Acc]);
+tokenize_string("\\b" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\b | Acc]);
+tokenize_string("\\f" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\\ | Acc]);
+tokenize_string("\\n" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\n | Acc]);
+tokenize_string("\\r" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\r | Acc]);
+tokenize_string("\\t" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\t | Acc]);
+tokenize_string([$\\, $u, C3, C2, C1, C0 | Rest], S, Acc) ->
+ % coalesce UTF-16 surrogate pair?
+ C = dehex(C0) bor
+ (dehex(C1) bsl 4) bor
+ (dehex(C2) bsl 8) bor
+ (dehex(C3) bsl 12),
+ tokenize_string(Rest, ?ADV_COL(S, 6), [C | Acc]);
+tokenize_string([C | Rest], S, Acc) when C >= $\s; C < 16#10FFFF ->
+ tokenize_string(Rest, ?ADV_COL(S, 1), [C | Acc]).
+
+tokenize_number(IoList=[C | _], Mode, S=#decoder{input_encoding=utf8}, Acc)
+ when is_list(C); is_binary(C); C >= 16#7f ->
+ List = xmerl_ucs:from_utf8(list_to_binary(lists:flatten(IoList))),
+ tokenize_number(List, Mode, S#decoder{input_encoding=unicode}, Acc);
+tokenize_number([$- | Rest], sign, S, []) ->
+ tokenize_number(Rest, int, ?INC_COL(S), [$-]);
+tokenize_number(Rest, sign, S, []) ->
+ tokenize_number(Rest, int, S, []);
+tokenize_number([$0 | Rest], int, S, Acc) ->
+ tokenize_number(Rest, frac, ?INC_COL(S), [$0 | Acc]);
+tokenize_number([C | Rest], int, S, Acc) when C >= $1, C =< $9 ->
+ tokenize_number(Rest, int1, ?INC_COL(S), [C | Acc]);
+tokenize_number([C | Rest], int1, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, int1, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, int1, S, Acc) ->
+ tokenize_number(Rest, frac, S, Acc);
+tokenize_number([$., C | Rest], frac, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
+tokenize_number([E | Rest], frac, S, Acc) when E == $e; E == $E ->
+ tokenize_number(Rest, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
+tokenize_number(Rest, frac, S, Acc) ->
+ {{int, lists:reverse(Acc)}, Rest, S};
+tokenize_number([C | Rest], frac1, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, frac1, ?INC_COL(S), [C | Acc]);
+tokenize_number([E | Rest], frac1, S, Acc) when E == $e; E == $E ->
+ tokenize_number(Rest, esign, ?INC_COL(S), [$e | Acc]);
+tokenize_number(Rest, frac1, S, Acc) ->
+ {{float, lists:reverse(Acc)}, Rest, S};
+tokenize_number([C | Rest], esign, S, Acc) when C == $-; C == $+ ->
+ tokenize_number(Rest, eint, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, esign, S, Acc) ->
+ tokenize_number(Rest, eint, S, Acc);
+tokenize_number([C | Rest], eint, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, eint1, ?INC_COL(S), [C | Acc]);
+tokenize_number([C | Rest], eint1, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, eint1, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, eint1, S, Acc) ->
+ {{float, lists:reverse(Acc)}, Rest, S}.
+
+tokenize([], S=#decoder{state=trim}) ->
+ {eof, [], S};
+tokenize([L | Rest], S) when is_list(L) ->
+ tokenize(L ++ Rest, S);
+tokenize([B | Rest], S) when is_binary(B) ->
+ tokenize(xmerl_ucs:from_utf8(B) ++ Rest, S);
+tokenize("\r\n" ++ Rest, S) ->
+ tokenize(Rest, ?INC_LINE(S));
+tokenize("\n" ++ Rest, S) ->
+ tokenize(Rest, ?INC_LINE(S));
+tokenize([C | Rest], S) when C == $\s; C == $\t ->
+ tokenize(Rest, ?INC_COL(S));
+tokenize("{" ++ Rest, S) ->
+ {start_object, Rest, ?INC_COL(S)};
+tokenize("}" ++ Rest, S) ->
+ {end_object, Rest, ?INC_COL(S)};
+tokenize("[" ++ Rest, S) ->
+ {start_array, Rest, ?INC_COL(S)};
+tokenize("]" ++ Rest, S) ->
+ {end_array, Rest, ?INC_COL(S)};
+tokenize("," ++ Rest, S) ->
+ {comma, Rest, ?INC_COL(S)};
+tokenize(":" ++ Rest, S) ->
+ {colon, Rest, ?INC_COL(S)};
+tokenize("null" ++ Rest, S) ->
+ {{const, null}, Rest, ?ADV_COL(S, 4)};
+tokenize("true" ++ Rest, S) ->
+ {{const, true}, Rest, ?ADV_COL(S, 4)};
+tokenize("false" ++ Rest, S) ->
+ {{const, false}, Rest, ?ADV_COL(S, 5)};
+tokenize("\"" ++ Rest, S) ->
+ {String, Rest1, S1} = tokenize_string(Rest, ?INC_COL(S), []),
+ {{const, xmerl_ucs:to_utf8(String)}, Rest1, S1};
+tokenize(L=[C | _], S) when C >= $0, C =< $9; C == $- ->
+ case tokenize_number(L, sign, S, []) of
+ {{int, Int}, Rest, S1} ->
+ {{const, list_to_integer(Int)}, Rest, S1};
+ {{float, Float}, Rest, S1} ->
+ {{const, list_to_float(Float)}, Rest, S1}
+ end.
+
+%% testing constructs borrowed from the Yaws JSON implementation.
+
+%% Create an object from a list of Key/Value pairs.
+
+obj_new() ->
+ {obj, []}.
+
+is_obj({obj, Props}) ->
+ F = fun ({K, _}) when is_list(K) ->
+ true;
+ (_) ->
+ false
+ end,
+ lists:all(F, Props).
+
+obj_from_list(Props) ->
+ Obj = {obj, Props},
+ case is_obj(Obj) of
+ true -> Obj;
+ false -> exit(json_bad_object)
+ end.
+
+%% Test for equivalence of Erlang terms.
+%% Due to arbitrary order of construction, equivalent objects might
+%% compare unequal as erlang terms, so we need to carefully recurse
+%% through aggregates (tuples and objects).
+
+equiv({obj, Props1}, {obj, Props2}) ->
+ equiv_object(Props1, Props2);
+equiv(T1, T2) when is_tuple(T1), is_tuple(T2) ->
+ equiv_list(tuple_to_list(T1), tuple_to_list(T2));
+equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
+equiv(S1, S2) when is_list(S1), is_list(S2) -> S1 == S2;
+equiv(true, true) -> true;
+equiv(false, false) -> true;
+equiv(null, null) -> true.
+
+%% Object representation and traversal order is unknown.
+%% Use the sledgehammer and sort property lists.
+
+equiv_object(Props1, Props2) ->
+ L1 = lists:keysort(1, Props1),
+ L2 = lists:keysort(1, Props2),
+ Pairs = lists:zip(L1, L2),
+ true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
+ equiv(K1, K2) and equiv(V1, V2)
+ end, Pairs).
+
+%% Recursively compare tuple elements for equivalence.
+
+equiv_list([], []) ->
+ true;
+equiv_list([V1 | L1], [V2 | L2]) ->
+ case equiv(V1, V2) of
+ true ->
+ equiv_list(L1, L2);
+ false ->
+ false
+ end.
+
+test_all() ->
+ test_one(e2j_test_vec(utf8), 1).
+
+test_one([], N) ->
+ io:format("~p tests passed~n", [N-1]),
+ ok;
+test_one([{E, J} | Rest], N) ->
+ io:format("[~p] ~p ~p~n", [N, E, J]),
+ true = equiv(E, decode(J)),
+ true = equiv(E, decode(encode(E))),
+ test_one(Rest, 1+N).
+
+e2j_test_vec(unicode) ->
+ [
+ {"foo" ++ [500] ++ "bar", [$", $f, $o, $o, 500, $b, $a, $r, $"]}
+ ];
+e2j_test_vec(utf8) ->
+ [
+ {1, "1"},
+ {3.1416, "3.14160"}, % text representation may truncate, trail zeroes
+ {-1, "-1"},
+ {-3.1416, "-3.14160"},
+ {12.0e10, "1.20000e+11"},
+ {1.234E+10, "1.23400e+10"},
+ {-1.234E-10, "-1.23400e-10"},
+ {10.0, "1.0e+01"},
+ {123.456, "1.23456E+2"},
+ {10.0, "1e1"},
+ {"foo", "\"foo\""},
+ {"foo" ++ [5] ++ "bar", "\"foo\\u0005bar\""},
+ {"", "\"\""},
+ {[], "\"\""},
+ {"\n\n\n", "\"\\n\\n\\n\""},
+ {obj_new(), "{}"},
+ {obj_from_list([{"foo", "bar"}]), "{\"foo\":\"bar\"}"},
+ {obj_from_list([{"foo", "bar"}, {"baz", 123}]),
+ "{\"foo\":\"bar\",\"baz\":123}"},
+ {{}, "[]"},
+ {{{}}, "[[]]"},
+ {{1, "foo"}, "[1,\"foo\"]"},
+
+ % json array in a json object
+ {obj_from_list([{"foo", {123}}]),
+ "{\"foo\":[123]}"},
+
+ % json object in a json object
+ {obj_from_list([{"foo", obj_from_list([{"bar", true}])}]),
+ "{\"foo\":{\"bar\":true}}"},
+
+ % fold evaluation order
+ {obj_from_list([{"foo", {}},
+ {"bar", obj_from_list([{"baz", true}])},
+ {"alice", "bob"}]),
+ "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
+
+ % json object in a json array
+ {{-123, "foo", obj_from_list([{"bar", {}}]), null},
+ "[-123,\"foo\",{\"bar\":[]},null]"}
+ ].
diff --git a/src/couchdb/couch.app.tpl.in b/src/couchdb/couch.app.tpl.in
new file mode 100644
index 00000000..5ddf0989
--- /dev/null
+++ b/src/couchdb/couch.app.tpl.in
@@ -0,0 +1,29 @@
+{application,couch,
+ [{description,"@package_name@"},
+ {vsn,"@version@"},
+ {modules,[couch_btree,
+ cjson,
+ couch_db,
+ couch_doc,
+ couch_query_servers,
+ couch_file,
+ couch_server,
+ couch_server_sup,
+ couch_stream,
+ couch_key_tree,
+ couch_view,
+ couch_util,
+ mod_couch,
+ couch_event_sup,
+ couch_db_update_notifier,
+ couch_ft_query,
+ couch_log,
+ couch_rep]},
+ {registered,[couch_server,
+ couch_server_sup,
+ couch_util,
+ couch_view,
+ couch_query_servers,
+ couch_ft_query]},
+ {applications,[kernel,stdlib,xmerl,couch_inets]},
+ {mod,{couch_server,[]}}]}.
diff --git a/src/couchdb/couch_btree.erl b/src/couchdb/couch_btree.erl
new file mode 100644
index 00000000..2ae837dd
--- /dev/null
+++ b/src/couchdb/couch_btree.erl
@@ -0,0 +1,590 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_btree).
+
+-export([open/2, open/3, query_modify/4, add_remove/3, foldl/3, foldl/4]).
+-export([foldr/3, foldr/4, fold/4, fold/5, row_count/1]).
+-export([lookup/2, get_state/1, test/1, test/0]).
+
+-define(CHUNK_THRESHOLD, 16#fff).
+
+-record(btree,
+ {fd,
+ root,
+ extract_kv = fun({Key, Value}) -> {Key, Value} end,
+ assemble_kv = fun(Key, Value) -> {Key, Value} end,
+ less = fun(A, B) -> A < B end
+ }).
+
+extract(#btree{extract_kv=Extract}, Value) ->
+ Extract(Value).
+
+assemble(#btree{assemble_kv=Assemble}, Key, Value) ->
+ Assemble(Key, Value).
+
+less(#btree{less=Less}, A, B) ->
+ Less(A, B).
+
+% pass in 'nil' for State if a new Btree.
+open(State, Fd) ->
+ {ok, #btree{root=State, fd=Fd}}.
+
+set_options(Bt, []) ->
+ Bt;
+set_options(Bt, [{split, Extract}|Rest]) ->
+ set_options(Bt#btree{extract_kv=Extract}, Rest);
+set_options(Bt, [{join, Assemble}|Rest]) ->
+ set_options(Bt#btree{assemble_kv=Assemble}, Rest);
+set_options(Bt, [{less, Less}|Rest]) ->
+ set_options(Bt#btree{less=Less}, Rest).
+
+open(State, Fd, Options) ->
+ {ok, set_options(#btree{root=State, fd=Fd}, Options)}.
+
+get_state(#btree{root=Root}) ->
+ Root.
+
+row_count(#btree{root=nil}) ->
+ 0;
+row_count(#btree{root={_RootPointer, Count}}) ->
+ Count.
+
+foldl(Bt, Fun, Acc) ->
+ fold(Bt, fwd, Fun, Acc).
+
+foldl(Bt, Key, Fun, Acc) ->
+ fold(Bt, Key, fwd, Fun, Acc).
+
+foldr(Bt, Fun, Acc) ->
+ fold(Bt, rev, Fun, Acc).
+
+foldr(Bt, Key, Fun, Acc) ->
+ fold(Bt, Key, rev, Fun, Acc).
+
+% wraps a 2 arity function with the proper 3 arity function
+convert_fun_arity(Fun) when is_function(Fun, 2) ->
+ fun(KV, _Offset, AccIn) -> Fun(KV, AccIn) end;
+convert_fun_arity(Fun) when is_function(Fun, 3) ->
+ Fun. % Already arity 3
+
+fold(Bt, Dir, Fun, Acc) ->
+ {_ContinueFlag, Acc2} = stream_node(Bt, 0, Bt#btree.root, nil, Dir, convert_fun_arity(Fun), Acc),
+ {ok, Acc2}.
+
+fold(Bt, Key, Dir, Fun, Acc) ->
+ {_ContinueFlag, Acc2} = stream_node(Bt, 0, Bt#btree.root, Key, Dir, convert_fun_arity(Fun), Acc),
+ {ok, Acc2}.
+
+add_remove(Bt, InsertKeyValues, RemoveKeys) ->
+ {Result, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys),
+ {Result, Bt2}.
+
+query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
+ #btree{root=Root} = Bt,
+ InsertActions = lists:map(
+ fun(KeyValue) ->
+ {Key, Value} = extract(Bt, KeyValue),
+ {insert, Key, Value}
+ end, InsertValues),
+ RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
+ FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
+ SortFun =
+ fun({OpA, A, _}, {OpB, B, _}) ->
+ case less(Bt, A, B) of
+ true -> true;
+ false ->
+ case less(Bt, B, A) of
+ true -> false;
+ false ->
+ % A and B are equal, sort by op.
+ op_order(OpA) < op_order(OpB)
+ end
+ end
+ end,
+ Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
+ {ok, KeyPointers, QueryResults, Bt2} = modify_node(Bt, Root, Actions, []),
+ {ok, NewRoot, Bt3} = complete_root(Bt2, KeyPointers),
+ {ok, QueryResults, Bt3#btree{root=NewRoot}}.
+
+% for ordering different operatations with the same key.
+% fetch < remove < insert
+op_order(fetch) -> 1;
+op_order(remove) -> 2;
+op_order(insert) -> 3.
+
+lookup(#btree{root=Root, less=Less}=Bt, Keys) ->
+ SortedKeys = lists:sort(Less, Keys),
+ {ok, SortedResults} = lookup(Bt, Root, SortedKeys),
+ % We want to return the results in the same order as the keys were input
+ % but we may have changed the order when we sorted. So we need to put the
+ % order back into the results.
+ KeyDict = dict:from_list(SortedResults),
+ [dict:fetch(Key, KeyDict) || Key <- Keys].
+
+lookup(_Bt, nil, Keys) ->
+ {ok, [{Key, not_found} || Key <- Keys]};
+lookup(Bt, {Pointer, _Count}, Keys) ->
+ {NodeType, NodeList} = get_node(Bt, Pointer),
+ case NodeType of
+ kp_node ->
+ lookup_kpnode(Bt, NodeList, Keys, []);
+ kv_node ->
+ lookup_kvnode(Bt, NodeList, Keys, [])
+ end.
+
+
+lookup_kpnode(_Bt, [], Keys, Output) ->
+ {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
+
+lookup_kpnode(_Bt, _KPs, [], Output) ->
+ {ok, lists:reverse(Output)};
+
+lookup_kpnode(Bt, [{Key, PointerInfo} | RestKPs], LookupKeys, Output) ->
+ % Split the Keys into two lists, queries of values less
+ % than equals, and greater than the current key
+ SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
+ case lists:splitwith(SplitFun, LookupKeys) of
+ {[], GreaterQueries} ->
+ lookup_kpnode(Bt, RestKPs, GreaterQueries, Output);
+ {LessEqQueries, GreaterQueries} ->
+ {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
+ lookup_kpnode(Bt, RestKPs, GreaterQueries, lists:reverse(Results, Output))
+ end.
+
+
+
+lookup_kvnode(_Bt, _KVs, [], Output) ->
+ {ok, lists:reverse(Output)};
+lookup_kvnode(_Bt, [], Keys, Output) ->
+ % keys not found
+ {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
+lookup_kvnode(Bt, [{Key, Value} | RestKVs], [LookupKey | RestLookupKeys], Output) ->
+ case less(Bt, LookupKey, Key) of
+ true ->
+ lookup_kvnode(Bt, [{Key, Value} | RestKVs], RestLookupKeys, [{LookupKey, not_found} | Output]);
+ false ->
+ case less(Bt, Key, LookupKey) of
+ true ->
+ % LookupKey is greater than Key
+ lookup_kvnode(Bt, RestKVs, [LookupKey | RestLookupKeys], Output);
+ false ->
+ % LookupKey is equal to Key
+ lookup_kvnode(Bt, RestKVs, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output])
+ end
+ end.
+
+
+complete_root(Bt, []) ->
+ {ok, nil, Bt};
+complete_root(Bt, [{_Key, PointerInfo}])->
+ {ok, PointerInfo, Bt};
+complete_root(Bt, KPs) ->
+ {ok, ResultKeyPointers, Bt2} = write_node(Bt, kp_node, KPs),
+ complete_root(Bt2, ResultKeyPointers).
+
+%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%%
+% It is inaccurate as it does not account for compression when blocks are
+% written. Plus with the "case size(term_to_binary(InList)) of" code it's
+% probably really inefficient.
+
+chunkify(_Bt, []) ->
+ [];
+chunkify(Bt, InList) ->
+ case size(term_to_binary(InList)) of
+ Size when Size > ?CHUNK_THRESHOLD ->
+ NumberOfChunksLikely = ((Size div ?CHUNK_THRESHOLD) + 1),
+ ChunkThreshold = Size div NumberOfChunksLikely,
+ chunkify(Bt, InList, ChunkThreshold, [], 0, []);
+ _Else ->
+ [InList]
+ end.
+
+chunkify(_Bt, [], _ChunkThreshold, [], 0, OutputChunks) ->
+ lists:reverse(OutputChunks);
+chunkify(_Bt, [], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
+ lists:reverse([lists:reverse(OutList) | OutputChunks]);
+chunkify(Bt, [InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
+ case size(term_to_binary(InElement)) of
+ Size when (Size + OutListSize) > ChunkThreshold ->
+ chunkify(Bt, RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]);
+ Size ->
+ chunkify(Bt, RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks)
+ end.
+
+modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
+ case RootPointerInfo of
+ nil ->
+ NodeType = kv_node,
+ NodeList = [];
+ {Pointer, _count} ->
+ {NodeType, NodeList} = get_node(Bt, Pointer)
+ end,
+ case NodeType of
+ kp_node ->
+ {ok, NewNodeList, QueryOutput2, Bt2} = modify_kpnode(Bt, NodeList, Actions, [], QueryOutput);
+ kv_node ->
+ {ok, NewNodeList, QueryOutput2, Bt2} = modify_kvnode(Bt, NodeList, Actions, [], QueryOutput)
+ end,
+ case NewNodeList of
+ [] -> % no nodes remain
+ {ok, [], QueryOutput2, Bt2};
+ NodeList -> % nothing changed
+ {LastKey, _LastValue} = lists:last(NodeList),
+ {ok, [{LastKey, RootPointerInfo}], QueryOutput2, Bt2};
+ _Else2 ->
+ {ok, ResultList, Bt3} = write_node(Bt2, NodeType, NewNodeList),
+ {ok, ResultList, QueryOutput2, Bt3}
+ end.
+
+
+count(kv_node, NodeList) ->
+ length(NodeList);
+count(kp_node, NodeList) ->
+ lists:foldl( fun({_Key, {_Pointer, Count}}, AccCount) ->
+ Count + AccCount
+ end,
+ 0, NodeList).
+
+
+get_node(#btree{fd = Fd}, NodePos) ->
+ {ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos),
+ case NodeType of
+ kp_node ->
+ % Node pointers always point backward on disk.
+ % Validating this prevents infinite loops should
+ % a disk corruption occur.
+ [throw({error, disk_corruption})
+ || {_Key, {SubNodePos, _Count}}
+ <- NodeList, SubNodePos >= NodePos];
+ kv_node ->
+ ok
+ end,
+ {NodeType, NodeList}.
+
+write_node(Bt, NodeType, NodeList) ->
+ % split up nodes into smaller sizes
+ NodeListList = chunkify(Bt, NodeList),
+ % now write out each chunk and return the KeyPointer pairs for those nodes
+ ResultList = [
+ begin
+ {ok, Pointer} = couch_file:append_term(Bt#btree.fd, {NodeType, ANodeList}),
+ {LastKey, _} = lists:last(ANodeList),
+ {LastKey, {Pointer, count(NodeType, ANodeList)}}
+ end
+ ||
+ ANodeList <- NodeListList
+ ],
+ {ok, ResultList, Bt}.
+
+modify_kpnode(Bt, KPs, [], ResultNode, QueryOutput) ->
+ % processed all queries for the current tree
+ {ok, lists:reverse(ResultNode, KPs), QueryOutput, Bt};
+
+modify_kpnode(Bt, [], Actions, [{_Key, PointerInfo} | ResultNode], QueryOutput) ->
+ {ok, ChildKPs, QueryOutput2, Bt2} = modify_node(Bt, PointerInfo, Actions, QueryOutput),
+ {ok, lists:reverse(ResultNode, ChildKPs), QueryOutput2, Bt2};
+
+modify_kpnode(Bt, [{Key,PointerInfo} | RestKPs], Actions, ResultNode, QueryOutput) ->
+ % Split the actions into two lists, queries of values less
+ % than equals, and greater than the current key
+ SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
+ not less(Bt, Key, ActionKey)
+ end,
+ case lists:splitwith(SplitFun, Actions) of
+ {[], GreaterQueries} ->
+ modify_kpnode(Bt, RestKPs, GreaterQueries, [{Key, PointerInfo} | ResultNode], QueryOutput);
+ {LessEqQueries, GreaterQueries} ->
+ {ok, ChildKPs, QueryOutput2, Bt2} = modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
+ modify_kpnode(Bt2, RestKPs, GreaterQueries, lists:reverse(ChildKPs, ResultNode), QueryOutput2)
+ end.
+
+modify_kvnode(Bt, KVs, [], ResultNode, QueryOutput) ->
+ {ok, lists:reverse(ResultNode, KVs), QueryOutput, Bt};
+modify_kvnode(Bt, [], [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) ->
+ case ActionType of
+ insert ->
+ modify_kvnode(Bt, [], RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
+ remove ->
+ % just drop the action
+ modify_kvnode(Bt, [], RestActions, ResultNode, QueryOutput);
+ fetch ->
+ % the key/value must not exist in the tree
+ modify_kvnode(Bt, [], RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
+ end;
+modify_kvnode(Bt, [{Key, Value} | RestKVs], [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) ->
+ case less(Bt, ActionKey, Key) of
+ true ->
+ case ActionType of
+ insert ->
+ % ActionKey is less than the Key, so insert
+ modify_kvnode(Bt, [{Key, Value} | RestKVs], RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
+ remove ->
+ % ActionKey is less than the Key, just drop the action
+ modify_kvnode(Bt, [{Key, Value} | RestKVs], RestActions, ResultNode, QueryOutput);
+ fetch ->
+ % ActionKey is less than the Key, the key/value must not exist in the tree
+ modify_kvnode(Bt, [{Key, Value} | RestKVs], RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
+ end;
+ false ->
+ case less(Bt, Key, ActionKey) of
+ true ->
+ % ActionKey is greater than Key
+ modify_kvnode(Bt, RestKVs, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput);
+ false ->
+ % InsertKey is equal to Key
+ case ActionType of
+ insert ->
+ % ActionKey is less than the Key, so insert
+ modify_kvnode(Bt, RestKVs, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
+ remove ->
+ modify_kvnode(Bt, RestKVs, RestActions, ResultNode, QueryOutput);
+ fetch ->
+ % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
+ % since an identical action key can follow it.
+ modify_kvnode(Bt, [{Key, Value} | RestKVs], RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput])
+ end
+ end
+ end.
+
+adjust_dir(fwd, List) ->
+ List;
+adjust_dir(rev, List) ->
+ lists:reverse(List).
+
+stream_node(Bt, Offset, PointerInfo, nil, Dir, Fun, Acc) ->
+ stream_node(Bt, Offset, PointerInfo, Dir, Fun, Acc);
+stream_node(_Bt, _Offset, nil, _StartKey, _Dir, _Fun, Acc) ->
+ {ok, Acc};
+stream_node(Bt, Offset, {Pointer, _Count}, StartKey, Dir, Fun, Acc) ->
+ {NodeType, NodeList} = get_node(Bt, Pointer),
+ case NodeType of
+ kp_node ->
+ stream_kp_node(Bt, Offset, adjust_dir(Dir, NodeList), StartKey, Dir, Fun, Acc);
+ kv_node ->
+ stream_kv_node(Bt, Offset, adjust_dir(Dir, NodeList), StartKey, Dir, Fun, Acc)
+ end.
+
+stream_node(_Bt, _Offset, nil, _Dir, _Fun, Acc) ->
+ {ok, Acc};
+stream_node(Bt, Offset, {Pointer, _Count}, Dir, Fun, Acc) ->
+ {NodeType, NodeList} = get_node(Bt, Pointer),
+ case NodeType of
+ kp_node ->
+ stream_kp_node(Bt, Offset, adjust_dir(Dir, NodeList), Dir, Fun, Acc);
+ kv_node ->
+ stream_kv_node(Bt, Offset, adjust_dir(Dir, NodeList), Dir, Fun, Acc)
+ end.
+
+stream_kp_node(_Bt, _Offset, [], _Dir, _Fun, Acc) ->
+ {ok, Acc};
+stream_kp_node(Bt, Offset, [{_Key, {Pointer, Count}} | Rest], Dir, Fun, Acc) ->
+ case stream_node(Bt, Offset, {Pointer, Count}, Dir, Fun, Acc) of
+ {ok, Acc2} ->
+ stream_kp_node(Bt, Offset + Count, Rest, Dir, Fun, Acc2);
+ {stop, Acc2} ->
+ {stop, Acc2}
+ end.
+
+drop_nodes(_Bt, Offset, _StartKey, []) ->
+ {Offset, []};
+drop_nodes(Bt, Offset, StartKey, [{NodeKey, {Pointer, Count}} | RestKPs]) ->
+ case less(Bt, NodeKey, StartKey) of
+ true -> drop_nodes(Bt, Offset + Count, StartKey, RestKPs);
+ false -> {Offset, [{NodeKey, {Pointer, Count}} | RestKPs]}
+ end.
+
+stream_kp_node(Bt, Offset, KPs, StartKey, Dir, Fun, Acc) ->
+ {NewOffset, NodesToStream} =
+ case Dir of
+ fwd ->
+ % drop all nodes sorting before the key
+ drop_nodes(Bt, Offset, StartKey, KPs);
+ rev ->
+ % keep all nodes sorting before the key, AND the first node to sort after
+ RevKPs = lists:reverse(KPs),
+ case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
+ {_RevBefore, []} ->
+ % everything sorts before it
+ {Offset, KPs};
+ {RevBefore, [FirstAfter | Drop]} ->
+ {Offset + count(kp_node, Drop), [FirstAfter | lists:reverse(RevBefore)]}
+ end
+ end,
+ case NodesToStream of
+ [] ->
+ {ok, Acc};
+ [{_Key, PointerInfo} | Rest] ->
+ case stream_node(Bt, NewOffset, PointerInfo, StartKey, Dir, Fun, Acc) of
+ {ok, Acc2} ->
+ stream_kp_node(Bt, NewOffset, Rest, Dir, Fun, Acc2);
+ {stop, Acc2} ->
+ {stop, Acc2}
+ end
+ end.
+
+stream_kv_node(_Bt, _Offset, [], _Dir, _Fun, Acc) ->
+ {ok, Acc};
+stream_kv_node(Bt, Offset, [{K, V} | RestKVs], Dir, Fun, Acc) ->
+ case Fun(assemble(Bt, K, V), Offset, Acc) of
+ {ok, Acc2} ->
+ stream_kv_node(Bt, Offset + 1, RestKVs, Dir, Fun, Acc2);
+ {stop, Acc2} ->
+ {stop, Acc2}
+ end.
+
+stream_kv_node(Bt, Offset, KVs, StartKey, Dir, Fun, Acc) ->
+ DropFun =
+ case Dir of
+ fwd ->
+ fun({Key, _}) -> less(Bt, Key, StartKey) end;
+ rev ->
+ fun({Key, _}) -> less(Bt, StartKey, Key) end
+ end,
+ % drop all nodes preceding the key
+ GTEKVs = lists:dropwhile(DropFun, KVs),
+ LenSkipped = length(KVs) - length(GTEKVs),
+ stream_kv_node(Bt, Offset + LenSkipped, GTEKVs, Dir, Fun, Acc).
+
+
+
+
+test()->
+ test(1000).
+
+test(N) ->
+ KeyValues = [{random:uniform(), random:uniform()} || _Seq <- lists:seq(1, N)],
+ test_btree(KeyValues), % randomly distributed
+ Sorted = lists:sort(KeyValues),
+ test_btree(Sorted), % sorted regular
+ test_btree(lists:reverse(Sorted)). % sorted reverse
+
+
+test_btree(KeyValues) ->
+ {ok, Fd} = couch_file:open("foo", [create,overwrite]),
+ {ok, Btree} = open(nil, Fd),
+
+ % first dump in all the values in one go
+ {ok, Btree10} = add_remove(Btree, KeyValues, []),
+
+ ok = test_keys(Btree10, KeyValues),
+
+ % remove everything
+ {ok, Btree20} = test_remove(Btree10, KeyValues),
+
+ % make sure its empty
+ {ok, false} = foldl(Btree20, fun(_X, _Acc) ->
+ {ok, true} % change Acc to 'true'
+ end,
+ false),
+
+ % add everything back one at a time.
+ {ok, Btree30} = test_add(Btree20, KeyValues),
+
+ ok = test_keys(Btree30, KeyValues),
+
+ KeyValuesRev = lists:reverse(KeyValues),
+
+ % remove everything, in reverse order
+ {ok, Btree40} = test_remove(Btree30, KeyValuesRev),
+
+ % make sure its empty
+ {ok, false} = foldl(Btree40, fun(_X, _Acc) ->
+ {ok, true} % change Acc to 'true'
+ end,
+ false),
+
+
+ {A, B} = every_other(KeyValues),
+
+ % add everything back
+ {ok, Btree50} = test_add(Btree40,KeyValues),
+
+ ok = test_keys(Btree50, KeyValues),
+
+ % remove half the values
+ {ok, Btree60} = test_remove(Btree50, A),
+
+ % verify the remaining
+ ok = test_keys(Btree60, B),
+
+ % add A back
+ {ok, Btree70} = test_add(Btree60, A),
+
+ % verify
+ ok = test_keys(Btree70, KeyValues),
+
+ % remove B
+ {ok, Btree80} = test_remove(Btree70, B),
+
+ % verify the remaining
+ ok = test_keys(Btree80, A),
+
+ ok = couch_file:close(Fd).
+
+
+
+
+every_other(List) ->
+ every_other(List, [], [], 1).
+
+every_other([], AccA, AccB, _Flag) ->
+ {lists:reverse(AccA), lists:reverse(AccB)};
+every_other([H|T], AccA, AccB, 1) ->
+ every_other(T, [H|AccA], AccB, 0);
+every_other([H|T], AccA, AccB, 0) ->
+ every_other(T, AccA, [H|AccB], 1).
+
+test_keys(Btree, List) ->
+ FoldFun =
+ fun(Element, [HAcc|TAcc]) ->
+ Element = HAcc, % must match
+ {ok, TAcc}
+ end,
+ Sorted = lists:sort(List),
+ {ok, []} = foldl(Btree, FoldFun, Sorted),
+ {ok, []} = foldr(Btree, FoldFun, lists:reverse(Sorted)),
+
+ test_lookup(Btree, List).
+
+% Makes sure each key value pair is found in the btree
+test_lookup(_Btree, []) ->
+ ok;
+test_lookup(Btree, [{Key, Value} | Rest]) ->
+ [{ok,{Key, Value}}] = lookup(Btree, [Key]),
+ {ok, []} = foldl(Btree, Key, fun({KeyIn, ValueIn}, []) ->
+ KeyIn = Key,
+ ValueIn = Value,
+ {stop, []}
+ end,
+ []),
+ {ok, []} = foldr(Btree, Key, fun({KeyIn, ValueIn}, []) ->
+ KeyIn = Key,
+ ValueIn = Value,
+ {stop, []}
+ end,
+ []),
+ test_lookup(Btree, Rest).
+
+% removes each key one at a time from the btree
+test_remove(Btree, []) ->
+ {ok, Btree};
+test_remove(Btree, [{Key, _Value} | Rest]) ->
+ {ok, Btree2} = add_remove(Btree,[], [Key]),
+ test_remove(Btree2, Rest).
+
+% adds each key one at a time from the btree
+test_add(Btree, []) ->
+ {ok, Btree};
+test_add(Btree, [KeyValue | Rest]) ->
+ {ok, Btree2} = add_remove(Btree, [KeyValue], []),
+ test_add(Btree2, Rest).
diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl
new file mode 100644
index 00000000..e567d27b
--- /dev/null
+++ b/src/couchdb/couch_db.erl
@@ -0,0 +1,757 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_db).
+-behaviour(gen_server).
+
+-export([open/2,create/2,create/3,get_doc_info/2]).
+-export([save_docs/2, save_docs/3, get_db_info/1, update_doc/3, update_docs/2, update_docs/3]).
+-export([delete_doc/3,open_doc/2,open_doc/3,close/1,enum_docs_since/4,enum_docs_since/5]).
+-export([enum_docs/4,enum_docs/5, open_doc_revs/4, get_missing_revs/2]).
+-export([start_update_loop/1]).
+-export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
+
+-include("couch_db.hrl").
+
+-record(db_header,
+ {write_version = 0,
+ last_update_seq = 0,
+ summary_stream_state = nil,
+ docinfo_by_Id_btree_state = nil,
+ docinfo_by_seq_btree_state = nil,
+ local_docs_btree_state = nil,
+ doc_count=0,
+ doc_del_count=0
+ }).
+
+-record(db,
+ {main_pid,
+ update_pid,
+ fd,
+ header = #db_header{},
+ summary_stream,
+ docinfo_by_Id_btree,
+ docinfo_by_seq_btree,
+ local_docs_btree,
+ last_update_seq,
+ doc_count,
+ doc_del_count,
+ name
+ }).
+
+start_link(DbName, Filepath, Options) ->
+ case couch_file:open(Filepath, Options) of
+ {ok, Fd} ->
+ Result = gen_server:start_link(couch_db, {DbName, Fd, Options}, []),
+ unlink(Fd),
+ Result;
+ {error, enoent} ->
+ % couldn't find file
+ {error, not_found};
+ Else ->
+ Else
+ end.
+
+%%% Interface functions %%%
+
+create(Filepath, Options) ->
+ create(Filepath, Filepath, Options).
+
+create(DbName, Filepath, Options) when is_list(Options) ->
+ start_link(DbName, Filepath, [create | Options]).
+
+open(DbName, Filepath) ->
+ start_link(DbName, Filepath, []).
+
+delete_doc(MainPid, Id, Revisions) ->
+ DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions],
+ {ok, [Result]} = update_docs(MainPid, DeletedDocs, [new_edits]),
+ {ok, Result}.
+
+open_doc(MainPid, IdOrDocInfo) ->
+ open_doc(MainPid, IdOrDocInfo, []).
+
+open_doc(MainPid, Id, Options) ->
+ case open_doc_int(get_db(MainPid), Id, Options) of
+ {ok, #doc{deleted=true}=Doc} ->
+ case lists:member(deleted, Options) of
+ true ->
+ {ok, Doc};
+ false ->
+ {not_found, deleted}
+ end;
+ Else ->
+ Else
+ end.
+
+open_doc_revs(MainPid, Id, Revs, Options) ->
+ open_doc_revs_int(get_db(MainPid), Id, Revs, Options).
+
+get_missing_revs(MainPid, IdRevsList) ->
+ Ids = [Id1 || {Id1, _Revs} <- IdRevsList],
+ FullDocInfoResults = get_full_doc_infos(MainPid, Ids),
+ Results = lists:zipwith(
+ fun({Id, Revs}, FullDocInfoResult) ->
+ case FullDocInfoResult of
+ {ok, #full_doc_info{rev_tree=RevisionTree}} ->
+ {Id, couch_key_tree:find_missing(RevisionTree, Revs)};
+ not_found ->
+ {Id, Revs}
+ end
+ end,
+ IdRevsList, FullDocInfoResults),
+ {ok, Results}.
+
+get_doc_info(Db, Id) ->
+ case get_full_doc_info(Db, Id) of
+ {ok, DocInfo} ->
+ {ok, couch_doc:to_doc_info(DocInfo)};
+ Else ->
+ Else
+ end.
+
+% returns {ok, DocInfo} or not_found
+get_full_doc_info(Db, Id) ->
+ [Result] = get_full_doc_infos(Db, [Id]),
+ Result.
+
+
+get_full_doc_infos(MainPid, Ids) when is_pid(MainPid) ->
+ get_full_doc_infos(get_db(MainPid), Ids);
+get_full_doc_infos(#db{}=Db, Ids) ->
+ couch_btree:lookup(Db#db.docinfo_by_Id_btree, Ids).
+
+get_db_info(MainPid) when is_pid(MainPid) ->
+ get_db_info(get_db(MainPid));
+get_db_info(#db{doc_count=Count, doc_del_count=DelCount, last_update_seq=SeqNum}) ->
+ InfoList = [
+ {doc_count, Count},
+ {doc_del_count, DelCount},
+ {last_update_seq, SeqNum}
+ ],
+ {ok, InfoList}.
+
+update_doc(MainPid, Doc, Options) ->
+ {ok, [NewRev]} = update_docs(MainPid, [Doc], Options),
+ {ok, NewRev}.
+
+update_docs(MainPid, Docs) ->
+ update_docs(MainPid, Docs, []).
+
+% group_alike_docs groups the sorted documents into sublist buckets, by id.
+% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
+group_alike_docs(Docs) ->
+ Sorted = lists:sort(fun(#doc{id=A},#doc{id=B})-> A < B end, Docs),
+ group_alike_docs(Sorted, []).
+
+group_alike_docs([], Buckets) ->
+ lists:reverse(Buckets);
+group_alike_docs([Doc|Rest], []) ->
+ group_alike_docs(Rest, [[Doc]]);
+group_alike_docs([Doc|Rest], [Bucket|RestBuckets]) ->
+ [#doc{id=BucketId}|_] = Bucket,
+ case Doc#doc.id == BucketId of
+ true ->
+ % add to existing bucket
+ group_alike_docs(Rest, [[Doc|Bucket]|RestBuckets]);
+ false ->
+ % add to new bucket
+ group_alike_docs(Rest, [[Doc]|[Bucket|RestBuckets]])
+ end.
+
+
+prepare_doc_for_new_edit(Db, #doc{id=Id,revs=[NewRev|PrevRevs]}=Doc, OldFullDocInfo, LeafRevsDict) ->
+ case PrevRevs of
+ [PrevRev|_] ->
+ case dict:find(PrevRev, LeafRevsDict) of
+ {ok, {Deleted, Sp, DiskRevs}} ->
+ case couch_doc:has_stubs(Doc) of
+ true ->
+ DiskDoc = make_doc(Db, Id, Deleted, Sp, DiskRevs),
+ Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
+ Doc2#doc{revs=[NewRev|DiskRevs]};
+ false ->
+ Doc#doc{revs=[NewRev|DiskRevs]}
+ end;
+ error ->
+ throw(conflict)
+ end;
+ [] ->
+ % new doc, and we have existing revs.
+ OldDocInfo = couch_doc:to_doc_info(OldFullDocInfo),
+ if OldDocInfo#doc_info.deleted ->
+ % existing doc is a deleton
+ % allow this new doc to be a later revision.
+ {_Deleted, _Sp, Revs} = dict:fetch(OldDocInfo#doc_info.rev, LeafRevsDict),
+ Doc#doc{revs=[NewRev|Revs]};
+ true ->
+ throw(conflict)
+ end
+ end.
+
+update_docs(MainPid, Docs, Options) ->
+ Docs2 = lists:map(
+ fun(#doc{id=Id,revs=Revs}=Doc) ->
+ case Id of
+ ?LOCAL_DOC_PREFIX ++ _ ->
+ Rev = case Revs of [] -> 0; [Rev0|_] -> list_to_integer(Rev0) end,
+ Doc#doc{revs=[integer_to_list(Rev + 1)]};
+ _ ->
+ Doc#doc{revs=[integer_to_list(couch_util:rand32()) | Revs]}
+ end
+ end, Docs),
+ DocBuckets = group_alike_docs(Docs2),
+ Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
+ Db = get_db(MainPid),
+
+ % first things first, lookup the doc by id and get the most recent
+
+ ExistingDocs = get_full_doc_infos(Db, Ids),
+
+ DocBuckets2 = lists:zipwith(
+ fun(Bucket, not_found) ->
+ % no existing revs, make sure no old revision is specified.
+ [throw(conflict) || #doc{revs=[_NewRev, _OldRev | _]} <- Bucket],
+ Bucket;
+ (Bucket, {ok, #full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo}) ->
+ Leafs = couch_key_tree:get_all_leafs(OldRevTree),
+ LeafRevsDict = dict:from_list([{Rev, {Deleted, Sp, Revs}} || {Rev, {Deleted, Sp}, Revs} <- Leafs]),
+ [prepare_doc_for_new_edit(Db, Doc, OldFullDocInfo, LeafRevsDict) || Doc <- Bucket]
+ end,
+ DocBuckets, ExistingDocs),
+
+ % flush unwritten binaries to disk.
+ DocBuckets3 = [[doc_flush_binaries(Doc, Db#db.fd) || Doc <- Bucket] || Bucket <- DocBuckets2],
+
+ case gen_server:call(MainPid, {update_docs, DocBuckets3, Options}) of
+ ok ->
+ % return back the new rev ids, in the same order input.
+ {ok, [NewRev || #doc{revs=[NewRev|_]} <- Docs2]};
+ Else->
+ throw(Else)
+ end.
+
+save_docs(MainPid, Docs) ->
+ save_docs(MainPid, Docs, []).
+
+save_docs(MainPid, Docs, Options) ->
+ % flush unwritten binaries to disk.
+ Db = get_db(MainPid),
+ DocBuckets = group_alike_docs(Docs),
+ DocBuckets2 = [[doc_flush_binaries(Doc, Db#db.fd) || Doc <- Bucket] || Bucket <- DocBuckets],
+ ok = gen_server:call(MainPid, {update_docs, DocBuckets2, Options}).
+
+
+doc_flush_binaries(Doc, Fd) ->
+ % calc size of binaries to write out
+ Bins = Doc#doc.attachments,
+ PreAllocSize = lists:foldl(
+ fun(BinValue, SizeAcc) ->
+ case BinValue of
+ {_Key, {_Type, {Fd0, _StreamPointer, _Len}}} when Fd0 == Fd ->
+ % already written to our file, nothing to write
+ SizeAcc;
+ {_Key, {_Type, {_OtherFd, _StreamPointer, Len}}} ->
+ % written to a different file
+ SizeAcc + Len;
+ {_Key, {_Type, Bin}} when is_binary(Bin) ->
+ SizeAcc + size(Bin)
+ end
+ end,
+ 0, Bins),
+
+ {ok, OutputStream} = couch_stream:open(Fd),
+ ok = couch_stream:ensure_buffer(OutputStream, PreAllocSize),
+
+ NewBins = lists:map(
+ fun({Key, {Type, BinValue}}) ->
+ NewBinValue =
+ case BinValue of
+ {Fd0, StreamPointer, Len} when Fd0 == Fd ->
+ % already written to our file, nothing to write
+ {Fd, StreamPointer, Len};
+ {OtherFd, StreamPointer, Len} ->
+ % written to a different file (or a closed file
+ % instance, which will cause an error)
+ {ok, {NewStreamPointer, Len}, _EndSp} =
+ couch_stream:foldl(OtherFd, StreamPointer, Len,
+ fun(Bin, {BeginPointer, SizeAcc}) ->
+ {ok, Pointer} = couch_stream:write(OutputStream, Bin),
+ case SizeAcc of
+ 0 -> % this was the first write, record the pointer
+ {ok, {Pointer, size(Bin)}};
+ _ ->
+ {ok, {BeginPointer, SizeAcc + size(Bin)}}
+ end
+ end,
+ {{0,0}, 0}),
+ {Fd, NewStreamPointer, Len};
+ Bin when is_binary(Bin), size(Bin) > 0 ->
+ {ok, StreamPointer} = couch_stream:write(OutputStream, Bin),
+ {Fd, StreamPointer, size(Bin)}
+ end,
+ {Key, {Type, NewBinValue}}
+ end, Bins),
+
+ {ok, _FinalPos} = couch_stream:close(OutputStream),
+
+ Doc#doc{attachments = NewBins}.
+
+enum_docs_since(MainPid, SinceSeq, Direction, InFun, Ctx) ->
+ Db = get_db(MainPid),
+ couch_btree:fold(Db#db.docinfo_by_seq_btree, SinceSeq + 1, Direction, InFun, Ctx).
+
+enum_docs_since(MainPid, SinceSeq, InFun, Acc) ->
+ enum_docs_since(MainPid, SinceSeq, fwd, InFun, Acc).
+
+enum_docs(MainPid, StartId, Direction, InFun, InAcc) ->
+ Db = get_db(MainPid),
+ couch_btree:fold(Db#db.docinfo_by_Id_btree, StartId, Direction, InFun, InAcc).
+
+enum_docs(MainPid, StartId, InFun, Ctx) ->
+ enum_docs(MainPid, StartId, fwd, InFun, Ctx).
+
+close(MainPid) ->
+ Ref = erlang:monitor(process, MainPid),
+ unlink(MainPid),
+ exit(MainPid, normal),
+ receive
+ {'DOWN', Ref, process, MainPid, _Reason} ->
+ ok
+ end.
+
+
+% server functions
+
+init({DbName, Fd, Options}) ->
+ link(Fd),
+ case lists:member(create, Options) of
+ true ->
+ % create a new header and writes it to the file
+ Header = #db_header{},
+ ok = couch_file:write_header(Fd, <<$g, $m, $k, 0>>, Header),
+ ok = couch_file:sync(Fd),
+ init_main(DbName, Fd, Header);
+ false ->
+ {ok, Header} = couch_file:read_header(Fd, <<$g, $m, $k, 0>>),
+ init_main(DbName, Fd, Header)
+ end.
+
+btree_by_seq_split(DocInfo) ->
+ #doc_info{
+ id = Id,
+ rev = Rev,
+ update_seq = Seq,
+ summary_pointer = Sp,
+ conflict_revs = Conflicts,
+ deleted_conflict_revs = DelConflicts,
+ deleted = Deleted} = DocInfo,
+ {Seq,{Id, Rev, Sp, Conflicts, DelConflicts, Deleted}}.
+
+btree_by_seq_join(Seq,{Id, Rev, Sp, Conflicts, DelConflicts, Deleted}) ->
+ #doc_info{
+ id = Id,
+ rev = Rev,
+ update_seq = Seq,
+ summary_pointer = Sp,
+ conflict_revs = Conflicts,
+ deleted_conflict_revs = DelConflicts,
+ deleted = Deleted}.
+
+btree_by_name_split(#full_doc_info{id=Id, update_seq=Seq, rev_tree=Tree}) ->
+ {Id, {Seq, Tree}}.
+
+btree_by_name_join(Id, {Seq, Tree}) ->
+ #full_doc_info{id=Id, update_seq=Seq, rev_tree=Tree}.
+
+
+init_main(DbName, Fd, Header) ->
+ {ok, SummaryStream} = couch_stream:open(Header#db_header.summary_stream_state, Fd),
+ ok = couch_stream:set_min_buffer(SummaryStream, 10000),
+ {ok, IdBtree} = couch_btree:open(Header#db_header.docinfo_by_Id_btree_state, Fd,
+ [{split, fun(V) -> btree_by_name_split(V) end},
+ {join, fun(K,V) -> btree_by_name_join(K,V) end}] ),
+ {ok, SeqBtree} = couch_btree:open(Header#db_header.docinfo_by_seq_btree_state, Fd,
+ [{split, fun(V) -> btree_by_seq_split(V) end},
+ {join, fun(K,V) -> btree_by_seq_join(K,V) end}] ),
+ {ok, LocalDocsBtree} = couch_btree:open(Header#db_header.local_docs_btree_state, Fd),
+
+ Db = #db{
+ main_pid=self(),
+ fd=Fd,
+ header=Header,
+ summary_stream = SummaryStream,
+ docinfo_by_Id_btree = IdBtree,
+ docinfo_by_seq_btree = SeqBtree,
+ local_docs_btree = LocalDocsBtree,
+ last_update_seq = Header#db_header.last_update_seq,
+ doc_count = Header#db_header.doc_count,
+ doc_del_count = Header#db_header.doc_del_count,
+ name = DbName
+ },
+
+ UpdatePid = spawn_link(couch_db, start_update_loop, [Db]),
+
+ {ok, Db#db{update_pid=UpdatePid}}.
+
+terminate(_Reason, Db) ->
+ Db#db.update_pid ! close,
+ couch_file:close(Db#db.fd).
+
+handle_call({update_docs, DocActions, Options}, From, #db{update_pid=Updater}=Db) ->
+ Updater ! {From, update_docs, DocActions, Options},
+ {noreply, Db};
+handle_call(get_db, _From, Db) ->
+ {reply, {ok, Db}, Db};
+handle_call({db_updated, NewDb}, _From, _OldDb) ->
+ {reply, ok, NewDb}.
+
+
+handle_cast(foo, Main) ->
+ {noreply, Main}.
+
+%%% Internal function %%%
+
+start_update_loop(Db) ->
+ update_loop(Db#db{update_pid=self()}).
+
+update_loop(Db) ->
+ receive
+ {OrigFrom, update_docs, DocActions, Options} ->
+ case (catch update_docs_int(Db, DocActions, Options)) of
+ {ok, Db2} ->
+ ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+ gen_server:reply(OrigFrom, ok),
+ couch_db_update_notifier:notify({updated, Db2#db.name}),
+ update_loop(Db2);
+ conflict ->
+ gen_server:reply(OrigFrom, conflict),
+ update_loop(Db);
+ Error ->
+ exit(Error) % we crashed
+ end;
+ close ->
+ % terminate loop
+ exit(normal)
+ end.
+
+get_db(MainPid) ->
+ {ok, Db} = gen_server:call(MainPid, get_db),
+ Db.
+
+open_doc_revs_int(Db, Id, Revs, Options) ->
+ case get_full_doc_info(Db, Id) of
+ {ok, #full_doc_info{rev_tree=RevTree}} ->
+ {FoundRevs, MissingRevs} =
+ case Revs of
+ all ->
+ {couch_key_tree:get_all_leafs(RevTree), []};
+ _ ->
+ case lists:member(latest, Options) of
+ true ->
+ couch_key_tree:get_key_leafs(RevTree, Revs);
+ false ->
+ couch_key_tree:get(RevTree, Revs)
+ end
+ end,
+ FoundResults =
+ lists:map(fun({Rev, Value, FoundRevPath}) ->
+ case Value of
+ 0 ->
+ % we have the rev in our list but know nothing about it
+ {{not_found, missing}, Rev};
+ {IsDeleted, SummaryPtr} ->
+ {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
+ end
+ end, FoundRevs),
+ Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
+ {ok, Results};
+ not_found when Revs == all ->
+ {ok, []};
+ not_found ->
+ {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
+ end.
+
+open_doc_int(Db, ?LOCAL_DOC_PREFIX ++ _ = Id, _Options) ->
+ case couch_btree:lookup(Db#db.local_docs_btree, [Id]) of
+ [{ok, {_, {Rev, BodyData}}}] ->
+ {ok, #doc{id=Id, revs=[integer_to_list(Rev)], body=BodyData}};
+ [not_found] ->
+ {not_found, missing}
+ end;
+open_doc_int(Db, #doc_info{id=Id,rev=Rev,deleted=IsDeleted,summary_pointer=Sp}=DocInfo, Options) ->
+ Doc = make_doc(Db, Id, IsDeleted, Sp, [Rev]),
+ {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}};
+open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
+ #doc_info{deleted=IsDeleted,rev=Rev, summary_pointer=Sp} = DocInfo =
+ couch_doc:to_doc_info(FullDocInfo),
+ {[{_Rev,_Value, Revs}], []} = couch_key_tree:get(RevTree, [Rev]),
+ Doc = make_doc(Db, Id, IsDeleted, Sp, Revs),
+ {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}};
+open_doc_int(Db, Id, Options) ->
+ case get_full_doc_info(Db, Id) of
+ {ok, FullDocInfo} ->
+ open_doc_int(Db, FullDocInfo, Options);
+ not_found ->
+ throw({not_found, missing})
+ end.
+
+doc_meta_info(DocInfo, RevTree, Options) ->
+ case lists:member(revs_info, Options) of
+ false -> [];
+ true ->
+ {[RevPath],[]} =
+ couch_key_tree:get_full_key_paths(RevTree, [DocInfo#doc_info.rev]),
+ [{revs_info, [{Rev, Deleted} || {Rev, {Deleted, _Sp0}} <- RevPath]}]
+ end ++
+ case lists:member(conflicts, Options) of
+ false -> [];
+ true ->
+ case DocInfo#doc_info.conflict_revs of
+ [] -> [];
+ _ -> [{conflicts, DocInfo#doc_info.conflict_revs}]
+ end
+ end ++
+ case lists:member(deleted_conflicts, Options) of
+ false -> [];
+ true ->
+ case DocInfo#doc_info.deleted_conflict_revs of
+ [] -> [];
+ _ -> [{deleted_conflicts, DocInfo#doc_info.deleted_conflict_revs}]
+ end
+ end.
+
+% rev tree functions
+
+doc_to_tree(Doc) ->
+ doc_to_tree(Doc, lists:reverse(Doc#doc.revs)).
+
+doc_to_tree(Doc, [RevId]) ->
+ [{RevId, Doc, []}];
+doc_to_tree(Doc, [RevId | Rest]) ->
+ [{RevId, [], doc_to_tree(Doc, Rest)}].
+
+make_doc(Db, Id, Deleted, SummaryPointer, RevisionPath) ->
+ {BodyData, BinValues} =
+ case SummaryPointer of
+ nil ->
+ {[], []};
+ _ ->
+ {ok, {BodyData0, BinValues0}} = couch_stream:read_term(Db#db.summary_stream, SummaryPointer),
+ {BodyData0, [{Name, {Type, {Db#db.fd, Sp, Len}}} || {Name, {Type, Sp, Len}} <- BinValues0]}
+ end,
+ #doc{
+ id = Id,
+ revs = RevisionPath,
+ body = BodyData,
+ attachments = BinValues,
+ deleted = Deleted
+ }.
+
+flush_trees(_Db, [], AccFlushedTrees) ->
+ {ok, lists:reverse(AccFlushedTrees)};
+flush_trees(Db, [Unflushed | RestUnflushed], AccFlushed) ->
+ Flushed = couch_key_tree:map(
+ fun(_Rev, Value) ->
+ case Value of
+ #doc{attachments=Atts,deleted=IsDeleted}=Doc ->
+ % this node value is actually an unwritten document summary,
+ % write to disk.
+
+ % convert bins, removing the FD.
+ % All bins should have been flushed to disk already.
+ Bins = [{BinName, {BinType, BinSp, BinLen}} || {BinName, {BinType, {_Fd, BinSp, BinLen}}} <- Atts],
+ {ok, NewSummaryPointer} = couch_stream:write_term(Db#db.summary_stream, {Doc#doc.body, Bins}),
+ {IsDeleted, NewSummaryPointer};
+ _ ->
+ Value
+ end
+ end, Unflushed),
+ flush_trees(Db, RestUnflushed, [Flushed | AccFlushed]).
+
+merge_rev_trees(_NoConflicts, [], [], AccNewTrees) ->
+ {ok, lists:reverse(AccNewTrees)};
+merge_rev_trees(NoConflicts, [NewDocs | RestDocsList],
+ [OldTree | RestOldTrees], AccNewTrees) ->
+ UpdatesRevTree = lists:foldl(
+ fun(NewDoc, AccTree) ->
+ couch_key_tree:merge(AccTree, doc_to_tree(NewDoc))
+ end,
+ [], NewDocs),
+ NewRevTree = couch_key_tree:merge(OldTree, UpdatesRevTree),
+ if NoConflicts andalso OldTree == [] ->
+ OldConflicts = couch_key_tree:count_leafs(OldTree),
+ NewConflicts = couch_key_tree:count_leafs(NewRevTree),
+ if NewConflicts > OldConflicts ->
+ throw(conflict);
+ true -> ok
+ end;
+ true -> ok
+ end,
+ merge_rev_trees(NoConflicts, RestDocsList, RestOldTrees, [NewRevTree | AccNewTrees]).
+
+new_index_entries([], [], Seq, DocCount, DelCount, AccById, AccBySeq) ->
+ {ok, Seq, DocCount, DelCount, AccById, AccBySeq};
+new_index_entries([Id|RestIds], [RevTree|RestTrees], Seq0, DocCount, DelCount, AccById, AccBySeq) ->
+ Seq = Seq0 + 1,
+ FullDocInfo = #full_doc_info{id=Id, update_seq=Seq, rev_tree=RevTree},
+ #doc_info{deleted=Deleted} = DocInfo = couch_doc:to_doc_info(FullDocInfo),
+ {DocCount2, DelCount2} =
+ if Deleted -> {DocCount, DelCount + 1};
+ true -> {DocCount + 1, DelCount}
+ end,
+ new_index_entries(RestIds, RestTrees, Seq, DocCount2, DelCount2, [FullDocInfo|AccById], [DocInfo|AccBySeq]).
+
+update_docs_int(Db, DocsList, Options) ->
+ #db{
+ docinfo_by_Id_btree = DocInfoByIdBTree,
+ docinfo_by_seq_btree = DocInfoBySeqBTree,
+ last_update_seq = LastSeq,
+ doc_count = FullDocCount,
+ doc_del_count = FullDelCount
+ } = Db,
+
+ % separate out the NonRep documents from the rest of the documents
+ {DocsList2, NonRepDocs} = lists:foldl(
+ fun([#doc{id=Id}=Doc | Rest]=Docs, {DocsListAcc, NonRepDocsAcc}) ->
+ case Id of
+ ?LOCAL_DOC_PREFIX ++ _ when Rest==[] ->
+ % when saving NR (non rep) documents, you can only save a single rev
+ {DocsListAcc, [Doc | NonRepDocsAcc]};
+ Id->
+ {[Docs | DocsListAcc], NonRepDocsAcc}
+ end
+ end, {[], []}, DocsList),
+
+ Ids = [Id || [#doc{id=Id}|_] <- DocsList2],
+
+ % lookup up the existing documents, if they exist.
+ OldDocLookups = couch_btree:lookup(DocInfoByIdBTree, Ids),
+ OldDocTrees = lists:map(
+ fun({ok, #full_doc_info{rev_tree=OldRevTree}}) ->
+ OldRevTree;
+ (not_found) ->
+ []
+ end,
+ OldDocLookups),
+
+ {OldCount, OldDelCount} = lists:foldl(
+ fun({ok, FullDocInfo}, {OldCountAcc, OldDelCountAcc}) ->
+ case couch_doc:to_doc_info(FullDocInfo) of
+ #doc_info{deleted=false} ->
+ {OldCountAcc + 1, OldDelCountAcc};
+ _ ->
+ {OldCountAcc , OldDelCountAcc + 1}
+ end;
+ (not_found, Acc) ->
+ Acc
+ end, {0, 0}, OldDocLookups),
+
+ % Merge the new docs into the revision trees.
+ NoConflicts = lists:member(no_conflicts, Options),
+ {ok, NewRevTrees} = merge_rev_trees(NoConflicts, DocsList2, OldDocTrees, []),
+
+ RemoveSeqs = [ OldSeq || {ok, #full_doc_info{update_seq=OldSeq}} <- OldDocLookups],
+
+ % All regular documents are now ready to write.
+
+ % Try to write the local documents first, a conflict might be generated
+ {ok, Db2} = update_local_docs(Db, NonRepDocs),
+
+ % Write out the documents summaries (they are stored in the nodes of the rev trees)
+ {ok, FlushedRevTrees} = flush_trees(Db2, NewRevTrees, []),
+
+ {ok, NewSeq, NewDocsCount, NewDelCount, InfoById, InfoBySeq} =
+ new_index_entries(Ids, FlushedRevTrees, LastSeq, 0, 0, [], []),
+
+ % and the indexes to the documents
+ {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree, InfoBySeq, RemoveSeqs),
+ {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree, InfoById, []),
+
+ Db3 = Db2#db{
+ docinfo_by_Id_btree = DocInfoByIdBTree2,
+ docinfo_by_seq_btree = DocInfoBySeqBTree2,
+ last_update_seq = NewSeq,
+ doc_count = FullDocCount + NewDocsCount - OldCount,
+ doc_del_count = FullDelCount + NewDelCount - OldDelCount
+ },
+
+ case lists:member(delay_commit, Options) of
+ true ->
+ {ok, Db3};
+ false ->
+ commit_outstanding(Db3)
+ end.
+
+update_local_docs(#db{local_docs_btree=Btree}=Db, Docs) ->
+ Ids = [Id || #doc{id=Id} <- Docs],
+ OldDocLookups = couch_btree:lookup(Btree, Ids),
+ BtreeEntries = lists:zipwith(
+ fun(#doc{id=Id,deleted=Delete,revs=Revs,body=Body}, OldDocLookup) ->
+ BasedOnRev =
+ case Revs of
+ [] -> 0;
+ [RevStr|_] -> list_to_integer(RevStr) - 1
+ end,
+ OldRev =
+ case OldDocLookup of
+ {ok, {_, {OldRev0, _}}} -> OldRev0;
+ not_found -> 0
+ end,
+ case OldRev == BasedOnRev of
+ true ->
+ case Delete of
+ false -> {update, {Id, {OldRev+1, Body}}};
+ true -> {remove, Id}
+ end;
+ false ->
+ throw(conflict)
+ end
+
+ end, Docs, OldDocLookups),
+
+ BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries],
+ BtreeIdsUpdate = [ByIdDocInfo || {update, ByIdDocInfo} <- BtreeEntries],
+
+ {ok, Btree2} =
+ couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove),
+
+ {ok, Db#db{local_docs_btree = Btree2}}.
+
+
+
+commit_outstanding(#db{fd=Fd, header=Header} = Db) ->
+ ok = couch_file:sync(Fd), % commit outstanding data
+ Header2 = Header#db_header{
+ last_update_seq = Db#db.last_update_seq,
+ summary_stream_state = couch_stream:get_state(Db#db.summary_stream),
+ docinfo_by_seq_btree_state = couch_btree:get_state(Db#db.docinfo_by_seq_btree),
+ docinfo_by_Id_btree_state = couch_btree:get_state(Db#db.docinfo_by_Id_btree),
+ local_docs_btree_state = couch_btree:get_state(Db#db.local_docs_btree),
+ doc_count = Db#db.doc_count,
+ doc_del_count = Db#db.doc_del_count
+ },
+ ok = couch_file:write_header(Fd, <<$g, $m, $k, 0>>, Header2),
+ ok = couch_file:sync(Fd), % commit header to disk
+ Db2 = Db#db{
+ header = Header2
+ },
+ {ok, Db2}.
+
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+
diff --git a/src/couchdb/couch_db.hrl b/src/couchdb/couch_db.hrl
new file mode 100644
index 00000000..51ee7af2
--- /dev/null
+++ b/src/couchdb/couch_db.hrl
@@ -0,0 +1,56 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-define(LOCAL_DOC_PREFIX, "_local/").
+-define(DESIGN_DOC_PREFIX0, "_design").
+-define(DESIGN_DOC_PREFIX, "_design/").
+
+-define(DEFAULT_ATTACHMENT_CONTENT_TYPE, "application/octet-stream").
+
+-record(doc_info,
+ {
+ id = "",
+ rev = "",
+ update_seq = 0,
+ summary_pointer = nil,
+ conflict_revs = [],
+ deleted_conflict_revs = [],
+ deleted = false
+ }).
+
+-record(full_doc_info,
+ {id = "",
+ update_seq = 0,
+ rev_tree = []
+ }).
+
+-record(doc,
+ {
+ id = "",
+ revs = [], % in the form [{RevId, IsAvailable}, ...]
+
+ % the json body object.
+ body = {obj, []},
+
+ % each attachment contains:
+ % {data, Type, <<binary>>}
+ % or:
+ % {pointer, Type, {FileHandle, StreamPointer, Length}}
+ attachments = [],
+
+ deleted = false,
+
+ % key/value tuple of meta information, provided when using special options:
+ % couch_db:open_doc(Db, Id, Options).
+ meta = []
+ }).
+
diff --git a/src/couchdb/couch_db_update_notifier.erl b/src/couchdb/couch_db_update_notifier.erl
new file mode 100644
index 00000000..96354620
--- /dev/null
+++ b/src/couchdb/couch_db_update_notifier.erl
@@ -0,0 +1,66 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%
+% This causes an OS process to spawned and it is notified every time a database
+% is updated.
+%
+% The notifications are in the form of a the database name sent as a line of
+% text to the OS processes stdout.
+%
+
+-module(couch_db_update_notifier).
+
+-behaviour(gen_event).
+
+-export([start_link/1, notify/1]).
+-export([init/1, terminate/2, handle_event/2, handle_call/2, handle_info/2, code_change/3,stop/1]).
+
+-define(ERR_HANDLE, {Port, {exit_status, Status}} -> {stop, {unknown_error, Status}, {unknown_error, Status}, Port}).
+
+start_link(Exec) ->
+ couch_event_sup:start_link(couch_db_update, {couch_db_update_notifier, make_ref()}, Exec).
+
+notify(Event) ->
+ gen_event:notify(couch_db_update, Event).
+
+stop(Pid) ->
+ couch_event_sup:stop(Pid).
+
+init(Exec) when is_list(Exec) -> % an exe
+ Port = open_port({spawn, Exec}, [stream, exit_status, hide]),
+ {ok, Port};
+init(Else) ->
+ {ok, Else}.
+
+terminate(_Reason, _Port) ->
+ ok.
+
+handle_event(Event, Fun) when is_function(Fun, 1) ->
+ Fun(Event),
+ {ok, Fun};
+handle_event(Event, {Fun, FunAcc}) ->
+ FunAcc2 = Fun(Event, FunAcc),
+ {ok, {Fun, FunAcc2}};
+handle_event({EventAtom, DbName}, Port) ->
+ Obj = {obj, [{type, atom_to_list(EventAtom)}, {db, DbName}]},
+ true = port_command(Port, cjson:encode(Obj) ++ "\n"),
+ {ok, Port}.
+
+handle_call(_Request, State) ->
+ {ok, ok, State}.
+
+handle_info({'EXIT', _, _Reason}, _Port) ->
+ remove_handler.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/src/couchdb/couch_doc.erl b/src/couchdb/couch_doc.erl
new file mode 100644
index 00000000..a9ef55f7
--- /dev/null
+++ b/src/couchdb/couch_doc.erl
@@ -0,0 +1,199 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_doc).
+
+-export([get_view_functions/1, is_special_doc/1,to_doc_info/1]).
+-export([bin_foldl/3,bin_size/1,bin_to_binary/1]).
+-export([from_json_obj/1,to_json_obj/2,has_stubs/1, merge_stubs/2]).
+
+-include("couch_db.hrl").
+
+to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs=Revs,meta=Meta}=Doc,Options)->
+ {obj, [{"_id", Id}] ++
+ case Revs of
+ [] -> [];
+ _ -> [{"_rev", lists:nth(1, Revs)}]
+ end ++
+ case Del of
+ false ->
+ {obj, BodyProps} = Body,
+ BodyProps;
+ true ->
+ [{"_deleted", true}]
+ end ++
+ case lists:member(revs, Options) of
+ false -> [];
+ true ->
+ [{"_revs", list_to_tuple(Revs)}]
+ end ++
+ lists:map(
+ fun({revs_info, RevsInfo}) ->
+ JsonRevsInfo =
+ [{obj, [{rev, Rev}, {status, atom_to_list(Status)}]} ||
+ {Rev, Status} <- RevsInfo],
+ {"_revs_info", list_to_tuple(JsonRevsInfo)};
+ ({conflicts, Conflicts}) ->
+ {"_conflicts", list_to_tuple(Conflicts)};
+ ({deleted_conflicts, Conflicts}) ->
+ {"_deleted_conflicts", list_to_tuple(Conflicts)}
+ end, Meta) ++
+ case lists:member(attachments, Options) of
+ true -> % return the full rev list and the binaries as strings.
+ BinProps = lists:map(
+ fun({Name, {Type, BinValue}}) ->
+ {Name, {obj, [{"content-type", Type},
+ {"data", couch_util:encodeBase64(bin_to_binary(BinValue))}]}}
+ end,
+ Doc#doc.attachments),
+ case BinProps of
+ [] -> [];
+ _ -> [{"_attachments", {obj, BinProps}}]
+ end;
+ false ->
+ BinProps = lists:map(
+ fun({Name, {Type, BinValue}}) ->
+ {Name, {obj, [{"stub", true}, {"content-type", Type},
+ {"length", bin_size(BinValue)}]}}
+ end,
+ Doc#doc.attachments),
+ case BinProps of
+ [] -> [];
+ _ -> [{"_attachments", {obj, BinProps}}]
+ end
+ end
+ }.
+
+from_json_obj({obj, Props}) ->
+ {obj,JsonBins} = proplists:get_value("_attachments", Props, {obj, []}),
+ Bins = lists:flatmap(fun({Name, {obj, BinProps}}) ->
+ case proplists:get_value("stub", BinProps) of
+ true ->
+ [{Name, stub}];
+ _ ->
+ Value = proplists:get_value("data", BinProps),
+ Type = proplists:get_value("content-type", BinProps,
+ ?DEFAULT_ATTACHMENT_CONTENT_TYPE),
+ [{Name, {Type, couch_util:decodeBase64(Value)}}]
+ end
+ end, JsonBins),
+ AllowedSpecialMembers = ["id", "revs", "rev", "attachments", "revs_info",
+ "conflicts", "deleted_conflicts", "deleted"],
+ [case lists:member(Name, AllowedSpecialMembers) of
+ true ->
+ ok;
+ false ->
+ throw({doc_validation, io_lib:format("Bad special document member: _~s", [Name])})
+ end
+ || {[$_|Name], _Value} <- Props],
+ Revs =
+ case tuple_to_list(proplists:get_value("_revs", Props, {})) of
+ [] ->
+ case proplists:get_value("_rev", Props) of
+ undefined -> [];
+ Rev -> [Rev]
+ end;
+ Revs0 ->
+ Revs0
+ end,
+ #doc{
+ id = proplists:get_value("_id", Props, ""),
+ revs = Revs,
+ deleted = proplists:get_value("_deleted", Props, false),
+ body = {obj, [{Key, Value} || {[FirstChar|_]=Key, Value} <- Props, FirstChar /= $_]},
+ attachments = Bins
+ }.
+
+
+to_doc_info(#full_doc_info{id=Id,update_seq=Seq,rev_tree=Tree}) ->
+ LeafRevs = couch_key_tree:get_all_leafs(Tree),
+ SortedLeafRevs =
+ lists:sort(fun({RevIdA, {IsDeletedA, _}, PathA}, {RevIdB, {IsDeletedB, _}, PathB}) ->
+ % sort descending by {not deleted, then Depth, then RevisionId}
+ A = {not IsDeletedA, length(PathA), RevIdA},
+ B = {not IsDeletedB, length(PathB), RevIdB},
+ A > B
+ end,
+ LeafRevs),
+
+ [{RevId, {IsDeleted, SummaryPointer}, _Path} | Rest] = SortedLeafRevs,
+
+ {ConflictRevTuples, DeletedConflictRevTuples} =
+ lists:splitwith(fun({_ConflictRevId, {IsDeleted1, _SummaryPointer}, _}) ->
+ not IsDeleted1
+ end, Rest),
+
+ ConflictRevs = [RevId1 || {RevId1, _, _} <- ConflictRevTuples],
+ DeletedConflictRevs = [RevId2 || {RevId2, _, _} <- DeletedConflictRevTuples],
+
+ #doc_info{
+ id=Id,
+ update_seq=Seq,
+ rev = RevId,
+ summary_pointer = SummaryPointer,
+ conflict_revs = ConflictRevs,
+ deleted_conflict_revs = DeletedConflictRevs,
+ deleted = IsDeleted
+ }.
+
+is_special_doc(?DESIGN_DOC_PREFIX ++ _ ) ->
+ true;
+is_special_doc(#doc{id=Id}) ->
+ is_special_doc(Id);
+is_special_doc(_) ->
+ false.
+
+bin_foldl(Bin, Fun, Acc) when is_binary(Bin) ->
+ case Fun(Bin, Acc) of
+ {ok, Acc2} -> {ok, Acc2};
+ {done, Acc2} -> {ok, Acc2}
+ end;
+bin_foldl({Fd, Sp, Len}, Fun, Acc) ->
+ {ok, Acc2, _Sp2} = couch_stream:foldl(Fd, Sp, Len, Fun, Acc),
+ {ok, Acc2}.
+
+bin_size(Bin) when is_binary(Bin) ->
+ size(Bin);
+bin_size({_Fd, _Sp, Len}) ->
+ Len.
+
+bin_to_binary(Bin) when is_binary(Bin) ->
+ Bin;
+bin_to_binary({Fd, Sp, Len}) ->
+ {ok, Bin, _Sp2} = couch_stream:read(Fd, Sp, Len),
+ Bin.
+
+get_view_functions(#doc{body={obj, Fields}}) ->
+ Lang = proplists:get_value("language", Fields, "text/javascript"),
+ {obj, Views} = proplists:get_value("views", Fields, {obj, []}),
+ {Lang, [{ViewName, Value} || {ViewName, Value} <- Views, is_list(Value)]};
+get_view_functions(_Doc) ->
+ none.
+
+has_stubs(#doc{attachments=Bins}) ->
+ has_stubs(Bins);
+has_stubs([]) ->
+ false;
+has_stubs([{_Name, stub}|_]) ->
+ true;
+has_stubs([_Bin|Rest]) ->
+ has_stubs(Rest).
+
+merge_stubs(#doc{attachments=MemBins}=StubsDoc, #doc{attachments=DiskBins}) ->
+ BinDict = dict:from_list(DiskBins),
+ MergedBins = lists:map(
+ fun({Name, stub}) ->
+ {Name, dict:fetch(Name, BinDict)};
+ ({Name, Value}) ->
+ {Name, Value}
+ end, MemBins),
+ StubsDoc#doc{attachments= MergedBins}.
diff --git a/src/couchdb/couch_erl_driver.c b/src/couchdb/couch_erl_driver.c
new file mode 100644
index 00000000..b5703f09
--- /dev/null
+++ b/src/couchdb/couch_erl_driver.c
@@ -0,0 +1,160 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+// This file is the C port driver for Erlang. It provides a low overhead
+// means of calling into C code, however unlike the Fabric engine, coding
+// errors in this module can crash the entire Erlang server.
+
+#include "erl_driver.h"
+#include "unicode/ucol.h"
+#include "unicode/ucasemap.h"
+#ifndef WIN32
+#include <string.h> // for memcpy
+#endif
+
+typedef struct {
+ ErlDrvPort port;
+ UCollator* collNoCase;
+ UCollator* coll;
+} couch_drv_data;
+
+static void couch_drv_stop(ErlDrvData data)
+{
+ couch_drv_data* pData = (couch_drv_data*)data;
+ if (pData->coll) {
+ ucol_close(pData->coll);
+ }
+ if (pData->collNoCase) {
+ ucol_close(pData->collNoCase);
+ }
+ driver_free((char*)pData);
+}
+
+static ErlDrvData couch_drv_start(ErlDrvPort port, char *buff)
+{
+ UErrorCode status = U_ZERO_ERROR;
+ couch_drv_data* pData = (couch_drv_data*)driver_alloc(sizeof(couch_drv_data));
+
+ if (pData == NULL)
+ return ERL_DRV_ERROR_GENERAL;
+
+ pData->port = port;
+ pData->coll = NULL;
+ pData->collNoCase = NULL;
+ pData->coll = ucol_open("", &status);
+
+ if (U_FAILURE(status)) {
+ couch_drv_stop((ErlDrvData)pData);
+ return ERL_DRV_ERROR_GENERAL;
+ }
+
+ pData->collNoCase = ucol_open("", &status);
+ if (U_FAILURE(status)) {
+ couch_drv_stop((ErlDrvData)pData);
+ return ERL_DRV_ERROR_GENERAL;
+ }
+
+ ucol_setAttribute(pData->collNoCase, UCOL_STRENGTH, UCOL_PRIMARY, &status);
+ if (U_FAILURE(status)) {
+ couch_drv_stop((ErlDrvData)pData);
+ return ERL_DRV_ERROR_GENERAL;
+ }
+
+ return (ErlDrvData)pData;
+}
+
+static int return_control_result(void* pLocalResult, int localLen, char **ppRetBuf, int returnLen)
+{
+ if (*ppRetBuf == NULL || localLen > returnLen) {
+ *ppRetBuf = (char*)driver_alloc_binary(localLen);
+ if(*ppRetBuf == NULL) {
+ return -1;
+ }
+ }
+ memcpy(*ppRetBuf, pLocalResult, localLen);
+ return localLen;
+}
+
+static int couch_drv_control(ErlDrvData drv_data, unsigned int command, const char *pBuf,
+ int bufLen, char **rbuf, int rlen)
+{
+ #define COLLATE 0
+ #define COLLATE_NO_CASE 1
+
+ couch_drv_data* pData = (couch_drv_data*)drv_data;
+
+ UErrorCode status = U_ZERO_ERROR;
+ int collResult;
+ char response;
+ UCharIterator iterA;
+ UCharIterator iterB;
+ int32_t length;
+
+ // 2 strings are in the buffer, consecutively
+ // The strings begin first with a 32 bit integer byte length, then the actual
+ // string bytes follow.
+
+ // first 32bits are the length
+ memcpy(&length, pBuf, sizeof(length));
+ pBuf += sizeof(length);
+
+ // point the iterator at it.
+ uiter_setUTF8(&iterA, pBuf, length);
+
+ pBuf += length; // now on to string b
+
+ // first 32bits are the length
+ memcpy(&length, pBuf, sizeof(length));
+ pBuf += sizeof(length);
+
+ // point the iterator at it.
+ uiter_setUTF8(&iterB, pBuf, length);
+
+ if (command == COLLATE)
+ collResult = ucol_strcollIter(pData->coll, &iterA, &iterB, &status);
+ else if (command == COLLATE_NO_CASE)
+ collResult = ucol_strcollIter(pData->collNoCase, &iterA, &iterB, &status);
+ else
+ return -1;
+
+ if (collResult < 0)
+ response = 0; //lt
+ else if (collResult > 0)
+ response = 1; //gt
+ else
+ response = 2; //eq
+
+ return return_control_result(&response, sizeof(response), rbuf, rlen);
+}
+
+ErlDrvEntry couch_driver_entry = {
+ NULL, /* F_PTR init, N/A */
+ couch_drv_start, /* L_PTR start, called when port is opened */
+ couch_drv_stop, /* F_PTR stop, called when port is closed */
+ NULL, /* F_PTR output, called when erlang has sent */
+ NULL, /* F_PTR ready_input, called when input descriptor ready */
+ NULL, /* F_PTR ready_output, called when output descriptor ready */
+ "couch_erl_driver", /* char *driver_name, the argument to open_port */
+ NULL, /* F_PTR finish, called when unloaded */
+ NULL, /* Not used */
+ couch_drv_control, /* F_PTR control, port_command callback */
+ NULL, /* F_PTR timeout, reserved */
+ NULL /* F_PTR outputv, reserved */
+};
+
+DRIVER_INIT(couch_erl_driver) /* must match name in driver_entry */
+{
+ return &couch_driver_entry;
+}
diff --git a/src/couchdb/couch_event_sup.erl b/src/couchdb/couch_event_sup.erl
new file mode 100644
index 00000000..72b17e5d
--- /dev/null
+++ b/src/couchdb/couch_event_sup.erl
@@ -0,0 +1,69 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%% The purpose of this module is to allow event handlers to particpate in Erlang
+%% supervisor trees. It provide a monitorable process that crashes if the event
+%% handler fails. The process, when shutdown, deregisters the event handler.
+
+-module(couch_event_sup).
+-behaviour(gen_server).
+
+-include("couch_db.hrl").
+
+-export([start_link/3,start_link/4, stop/1]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]).
+
+%
+% Instead calling the
+% ok = gen_event:add_sup_handler(error_logger, my_log, Args)
+%
+% do this:
+% {ok, LinkedPid} = couch_event_sup:start_link(error_logger, my_log, Args)
+%
+% The benefit is the event is now part of the process tree, and can be
+% started, restarted and shutdown consistently like the rest of the server
+% components.
+%
+% And now if the "event" crashes, the supervisor is notified and can restart
+% the event handler.
+%
+% Use this form to named process:
+% {ok, LinkedPid} = couch_event_sup:start_link({local, my_log}, error_logger, my_log, Args)
+%
+
+start_link(EventMgr, EventHandler, Args) ->
+ gen_server:start_link(couch_event_sup, {EventMgr, EventHandler, Args}, []).
+
+start_link(ServerName, EventMgr, EventHandler, Args) ->
+ gen_server:start_link(ServerName, couch_event_sup, {EventMgr, EventHandler, Args}, []).
+
+stop(Pid) ->
+ gen_server:cast(Pid, stop).
+
+init({EventMgr, EventHandler, Args}) ->
+ ok = gen_event:add_sup_handler(EventMgr, EventHandler, Args),
+ {ok, {EventMgr, EventHandler}}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+handle_call(_Whatever, _From, State) ->
+ {ok, State}.
+
+handle_cast(stop, State) ->
+ {stop, normal, State}.
+
+handle_info({gen_event_EXIT, _Handler, Reason}, State) ->
+ {stop, Reason, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/src/couchdb/couch_file.erl b/src/couchdb/couch_file.erl
new file mode 100644
index 00000000..6cbad44a
--- /dev/null
+++ b/src/couchdb/couch_file.erl
@@ -0,0 +1,323 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_file).
+-behaviour(gen_server).
+
+-define(HEADER_SIZE, 2048). % size of each segment of the doubly written header
+
+-export([open/1, open/2, close/1, pread/3, pwrite/3, expand/2, bytes/1, sync/1]).
+-export([append_term/2, pread_term/2,write_header/3, read_header/2, truncate/2]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
+
+%%----------------------------------------------------------------------
+%% Args: Valid Options are [create] and [create,overwrite].
+%% Files are opened in read/write mode.
+%% Returns: On success, {ok, Fd}
+%% or {error, Reason} if the file could not be opened.
+%%----------------------------------------------------------------------
+
+open(Filepath) ->
+ open(Filepath, []).
+
+open(Filepath, Options) ->
+ case gen_server:start_link(couch_file, {Filepath, Options, self()}, []) of
+ {ok, FdPid} ->
+ % we got back an ok, but that doesn't really mean it was successful.
+ % Instead the true status has been sent back to us as a message.
+ % We do this because if the gen_server doesn't initialize properly,
+ % it generates a crash report that will get logged. This avoids
+ % that mess, because we don't want crash reports generated
+ % every time a file cannot be found.
+ receive
+ {FdPid, ok} ->
+ {ok, FdPid};
+ {FdPid, Error} ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+
+%%----------------------------------------------------------------------
+%% Args: Pos is the offset from the beginning of the file, Bytes is
+%% is the number of bytes to read.
+%% Returns: {ok, Binary} where Binary is a binary data from disk
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+pread(Fd, Pos, Bytes) when Bytes > 0 ->
+ gen_server:call(Fd, {pread, Pos, Bytes}).
+
+
+%%----------------------------------------------------------------------
+%% Args: Pos is the offset from the beginning of the file, Bin is
+%% is the binary to write
+%% Returns: ok
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+pwrite(Fd, Pos, Bin) ->
+ gen_server:call(Fd, {pwrite, Pos, Bin}).
+
+%%----------------------------------------------------------------------
+%% Purpose: To append a segment of zeros to the end of the file.
+%% Args: Bytes is the number of bytes to append to the file.
+%% Returns: {ok, Pos} where Pos is the file offset to the beginning of
+%% the new segments.
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+expand(Fd, Bytes) when Bytes > 0 ->
+ gen_server:call(Fd, {expand, Bytes}).
+
+
+%%----------------------------------------------------------------------
+%% Purpose: To append an Erlang term to the end of the file.
+%% Args: Erlang term to serialize and append to the file.
+%% Returns: {ok, Pos} where Pos is the file offset to the beginning the
+%% serialized term. Use pread_term to read the term back.
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+append_term(Fd, Term) ->
+ gen_server:call(Fd, {append_term, Term}).
+
+
+%%----------------------------------------------------------------------
+%% Purpose: Reads a term from a file that was written with append_term
+%% Args: Pos, the offset into the file where the term is serialized.
+%% Returns: {ok, Term}
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+pread_term(Fd, Pos) ->
+ gen_server:call(Fd, {pread_term, Pos}).
+
+
+%%----------------------------------------------------------------------
+%% Purpose: The length of a file, in bytes.
+%% Returns: {ok, Bytes}
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+% length in bytes
+bytes(Fd) ->
+ gen_server:call(Fd, bytes).
+
+%%----------------------------------------------------------------------
+%% Purpose: Truncate a file to the number of bytes.
+%% Returns: ok
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+truncate(Fd, Pos) ->
+ gen_server:call(Fd, {truncate, Pos}).
+
+%%----------------------------------------------------------------------
+%% Purpose: Ensure all bytes written to the file are flushed to disk.
+%% Returns: ok
+%% or {error, Reason}.
+%%----------------------------------------------------------------------
+
+sync(Fd) ->
+ gen_server:call(Fd, sync).
+
+%%----------------------------------------------------------------------
+%% Purpose: Close the file. Is performed asynchronously.
+%% Returns: ok
+%%----------------------------------------------------------------------
+close(Fd) ->
+ gen_server:cast(Fd, close).
+
+
+write_header(Fd, Prefix, Data) ->
+ % The leading bytes in every db file, the sig and the file version:
+ %the actual header data
+ TermBin = term_to_binary(Data),
+ % the size of all the bytes written to the header, including the md5 signature (16 bytes)
+ FilledSize = size(Prefix) + size(TermBin) + 16,
+ case FilledSize > ?HEADER_SIZE of
+ true ->
+ % too big!
+ {error, error_header_too_large};
+ false ->
+ % pad out the header with zeros, then take the md5 hash
+ PadZeros = <<0:(8*(?HEADER_SIZE - FilledSize))>>,
+ Sig = erlang:md5([TermBin, PadZeros]),
+ % now we assemble the final header binary and write to disk
+ WriteBin = <<Prefix/binary, TermBin/binary, PadZeros/binary, Sig/binary>>,
+ ?HEADER_SIZE = size(WriteBin), % sanity check
+ DblWriteBin = [WriteBin, WriteBin],
+ ok = pwrite(Fd, 0, DblWriteBin)
+ end.
+
+
+read_header(Fd, Prefix) ->
+ {ok, Bin} = couch_file:pread(Fd, 0, 2*(?HEADER_SIZE)),
+ <<Bin1:(?HEADER_SIZE)/binary, Bin2:(?HEADER_SIZE)/binary>> = Bin,
+ % read the first header
+ case extract_header(Prefix, Bin1) of
+ {ok, Header1} ->
+ case extract_header(Prefix, Bin2) of
+ {ok, Header2} ->
+ case Header1 == Header2 of
+ true ->
+ % Everything is completely normal!
+ {ok, Header1};
+ false ->
+ % To get here we must have two different header versions with signatures intact.
+ % It's weird but possible (a commit failure right at the 2k boundary). Log it and take the first.
+ couch_log:info("Header version differences.~nPrimary Header: ~p~nSecondary Header: ~p", [Header1, Header2]),
+ {ok, Header1}
+ end;
+ {error, Error} ->
+ % error reading second header. It's ok, but log it.
+ couch_log:info("Secondary header corruption (error: ~p). Using primary header.", [Error]),
+ {ok, Header1}
+ end;
+ {error, Error} ->
+ % error reading primary header
+ case extract_header(Prefix, Bin2) of
+ {ok, Header2} ->
+ % log corrupt primary header. It's ok since the secondary is still good.
+ couch_log:info("Primary header corruption (error: ~p). Using secondary header.", [Error]),
+ {ok, Header2};
+ _ ->
+ % error reading secondary header too
+ % return the error, no need to log anything as the caller will be responsible for dealing with the error.
+ {error, Error}
+ end
+ end.
+
+
+extract_header(Prefix, Bin) ->
+ SizeOfPrefix = size(Prefix),
+ SizeOfTermBin = ?HEADER_SIZE -
+ SizeOfPrefix -
+ 16, % md5 sig
+
+ <<HeaderPrefix:SizeOfPrefix/binary, TermBin:SizeOfTermBin/binary, Sig:16/binary>> = Bin,
+
+ % check the header prefix
+ case HeaderPrefix of
+ Prefix ->
+ % check the integrity signature
+ case erlang:md5(TermBin) == Sig of
+ true ->
+ Header = binary_to_term(TermBin),
+ {ok, Header};
+ false ->
+ {error, header_corrupt}
+ end;
+ _ ->
+ {error, unknown_header_type}
+ end.
+
+
+
+init_status_ok(ReturnPid, Fd) ->
+ ReturnPid ! {self(), ok}, % signal back ok
+ {ok, Fd}.
+
+init_status_error(ReturnPid, Error) ->
+ ReturnPid ! {self(), Error}, % signal back error status
+ self() ! self_close, % tell ourself to close async
+ {ok, nil}.
+
+% server functions
+
+init({Filepath, Options, ReturnPid}) ->
+ case lists:member(create, Options) of
+ true ->
+ filelib:ensure_dir(Filepath),
+ case file:open(Filepath, [read, write, raw, binary]) of
+ {ok, Fd} ->
+ {ok, Length} = file:position(Fd, eof),
+ case Length > 0 of
+ true ->
+ % this means the file already exists and has data.
+ % FYI: We don't differentiate between empty files and non-existant
+ % files here.
+ case lists:member(overwrite, Options) of
+ true ->
+ {ok, 0} = file:position(Fd, 0),
+ ok = file:truncate(Fd),
+ init_status_ok(ReturnPid, Fd);
+ false ->
+ ok = file:close(Fd),
+ init_status_error(ReturnPid, {error, file_exists})
+ end;
+ false ->
+ init_status_ok(ReturnPid, Fd)
+ end;
+ Error ->
+ init_status_error(ReturnPid, Error)
+ end;
+ false ->
+ % open in read mode first, so we don't create the file if it doesn't exist.
+ case file:open(Filepath, [read, raw]) of
+ {ok, Fd_Read} ->
+ {ok, Fd} = file:open(Filepath, [read, write, raw, binary]),
+ ok = file:close(Fd_Read),
+ init_status_ok(ReturnPid, Fd);
+ Error ->
+ init_status_error(ReturnPid, Error)
+ end
+ end.
+
+
+terminate(_Reason, nil) ->
+ ok;
+terminate(_Reason, Fd) ->
+ file:close(Fd),
+ ok.
+
+
+handle_call({pread, Pos, Bytes}, _From, Fd) ->
+ {reply, file:pread(Fd, Pos, Bytes), Fd};
+handle_call({pwrite, Pos, Bin}, _From, Fd) ->
+ {reply, file:pwrite(Fd, Pos, Bin), Fd};
+handle_call({expand, Num}, _From, Fd) ->
+ {ok, Pos} = file:position(Fd, eof),
+ {reply, {file:pwrite(Fd, Pos + Num - 1, <<0>>), Pos}, Fd};
+handle_call(bytes, _From, Fd) ->
+ {reply, file:position(Fd, eof), Fd};
+handle_call(sync, _From, Fd) ->
+ {reply, file:sync(Fd), Fd};
+handle_call({truncate, Pos}, _From, Fd) ->
+ {ok, Pos} = file:position(Fd, Pos),
+ {reply, file:truncate(Fd), Fd};
+handle_call({append_term, Term}, _From, Fd) ->
+ Bin = term_to_binary(Term, [compressed]),
+ TermLen = size(Bin),
+ Bin2 = <<TermLen:32, Bin/binary>>,
+ {ok, Pos} = file:position(Fd, eof),
+ {reply, {file:pwrite(Fd, Pos, Bin2), Pos}, Fd};
+handle_call({pread_term, Pos}, _From, Fd) ->
+ {ok, <<TermLen:32>>}
+ = file:pread(Fd, Pos, 4),
+ {ok, Bin} = file:pread(Fd, Pos + 4, TermLen),
+ {reply, {ok, binary_to_term(Bin)}, Fd}.
+
+
+handle_cast(close, Fd) ->
+ {stop,normal,Fd}. % causes terminate to be called
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_info(self_close, State) ->
+ {stop,normal,State};
+handle_info(_Info, State) ->
+ {noreply, State}.
diff --git a/src/couchdb/couch_ft_query.erl b/src/couchdb/couch_ft_query.erl
new file mode 100644
index 00000000..2d1b9fc5
--- /dev/null
+++ b/src/couchdb/couch_ft_query.erl
@@ -0,0 +1,78 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_ft_query).
+-behaviour(gen_server).
+
+-export([start_link/1, execute/2]).
+
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3, stop/0]).
+
+-define(ERR_HANDLE, {Port, {exit_status, Status}} -> {stop, {unknown_error, Status}, {unknown_error, Status}, Port}).
+
+start_link(QueryExec) ->
+ gen_server:start_link({local, couch_ft_query}, couch_ft_query, QueryExec, []).
+
+stop() ->
+ exit(whereis(couch_ft_query), close).
+
+execute(DatabaseName, QueryString) ->
+ gen_server:call(couch_ft_query, {ft_query, DatabaseName, QueryString}).
+
+init(QueryExec) ->
+ Port = open_port({spawn, QueryExec}, [{line, 1000}, exit_status, hide]),
+ {ok, Port}.
+
+terminate(_Reason, _Server) ->
+ ok.
+
+handle_call({ft_query, Database, QueryText}, _From, Port) ->
+ %% send the database name
+ true = port_command(Port, Database ++ "\n"),
+ true = port_command(Port, QueryText ++ "\n"),
+ case get_line(Port) of
+ "ok" ->
+ DocIds = read_query_results(Port, []),
+ {reply, {ok, DocIds}, Port};
+ "error" ->
+ ErrorId = get_line(Port),
+ ErrorMsg = get_line(Port),
+ {reply, {list_to_atom(ErrorId), ErrorMsg}, Port}
+ end.
+
+read_query_results(Port, Acc) ->
+ case get_line(Port) of
+ "" -> % line by itself means all done
+ lists:reverse(Acc);
+ DocId ->
+ Score = get_line(Port),
+ read_query_results(Port, [{DocId, Score} | Acc])
+ end.
+
+
+get_line(Port) ->
+ receive
+ {Port, {data, {eol, Line}}} ->
+ Line;
+ ?ERR_HANDLE
+ end.
+
+handle_cast(_Whatever, State) ->
+ {noreply, State}.
+
+handle_info({Port, {exit_status, Status}}, Port) ->
+ {stop, {os_process_exited, Status}, Port};
+handle_info(_Whatever, State) ->
+ {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/src/couchdb/couch_js.c b/src/couchdb/couch_js.c
new file mode 100644
index 00000000..a234fda9
--- /dev/null
+++ b/src/couchdb/couch_js.c
@@ -0,0 +1,452 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+#include <stdio.h>
+#include <jsapi.h>
+
+int gExitCode = 0;
+int gStackChunkSize = 8L * 1024L;
+
+int
+EncodeChar(uint8 *utf8Buffer, uint32 ucs4Char) {
+ int utf8Length = 1;
+
+ if (ucs4Char < 0x80) {
+ *utf8Buffer = (uint8)ucs4Char;
+ } else {
+ int i;
+ uint32 a = ucs4Char >> 11;
+ utf8Length = 2;
+ while (a) {
+ a >>= 5;
+ utf8Length++;
+ }
+ i = utf8Length;
+ while (--i) {
+ utf8Buffer[i] = (uint8)((ucs4Char & 0x3F) | 0x80);
+ ucs4Char >>= 6;
+ }
+ *utf8Buffer = (uint8)(0x100 - (1 << (8-utf8Length)) + ucs4Char);
+ }
+ return utf8Length;
+}
+
+JSBool
+EncodeString(const jschar *src, size_t srclen, char *dst, size_t *dstlenp) {
+ size_t i, utf8Len, dstlen = *dstlenp, origDstlen = dstlen;
+ jschar c, c2;
+ uint32 v;
+ uint8 utf8buf[6];
+
+ if (!dst)
+ dstlen = origDstlen = (size_t) -1;
+
+ while (srclen) {
+ c = *src++;
+ srclen--;
+ if ((c >= 0xDC00) && (c <= 0xDFFF))
+ goto badSurrogate;
+ if (c < 0xD800 || c > 0xDBFF) {
+ v = c;
+ } else {
+ if (srclen < 1)
+ goto bufferTooSmall;
+ c2 = *src++;
+ srclen--;
+ if ((c2 < 0xDC00) || (c2 > 0xDFFF)) {
+ c = c2;
+ goto badSurrogate;
+ }
+ v = ((c - 0xD800) << 10) + (c2 - 0xDC00) + 0x10000;
+ }
+ if (v < 0x0080) {
+ /* no encoding necessary - performance hack */
+ if (!dstlen)
+ goto bufferTooSmall;
+ if (dst)
+ *dst++ = (char) v;
+ utf8Len = 1;
+ } else {
+ utf8Len = EncodeChar(utf8buf, v);
+ if (utf8Len > dstlen)
+ goto bufferTooSmall;
+ if (dst) {
+ for (i = 0; i < utf8Len; i++)
+ *dst++ = (char) utf8buf[i];
+ }
+ }
+ dstlen -= utf8Len;
+ }
+ *dstlenp = (origDstlen - dstlen);
+ return JS_TRUE;
+
+badSurrogate:
+ *dstlenp = (origDstlen - dstlen);
+ return JS_FALSE;
+
+bufferTooSmall:
+ *dstlenp = (origDstlen - dstlen);
+ return JS_FALSE;
+}
+
+static uint32
+DecodeChar(const uint8 *utf8Buffer, int utf8Length) {
+ uint32 ucs4Char;
+ uint32 minucs4Char;
+ /* from Unicode 3.1, non-shortest form is illegal */
+ static const uint32 minucs4Table[] = {
+ 0x00000080, 0x00000800, 0x0001000, 0x0020000, 0x0400000
+ };
+
+ if (utf8Length == 1) {
+ ucs4Char = *utf8Buffer;
+ } else {
+ ucs4Char = *utf8Buffer++ & ((1<<(7-utf8Length))-1);
+ minucs4Char = minucs4Table[utf8Length-2];
+ while (--utf8Length) {
+ ucs4Char = ucs4Char<<6 | (*utf8Buffer++ & 0x3F);
+ }
+ if (ucs4Char < minucs4Char ||
+ ucs4Char == 0xFFFE || ucs4Char == 0xFFFF) {
+ ucs4Char = 0xFFFD;
+ }
+ }
+ return ucs4Char;
+}
+
+JSBool
+DecodeString(const char *src, size_t srclen, jschar *dst, size_t *dstlenp) {
+ uint32 v;
+ size_t offset = 0, j, n, dstlen = *dstlenp, origDstlen = dstlen;
+
+ if (!dst)
+ dstlen = origDstlen = (size_t) -1;
+
+ while (srclen) {
+ v = (uint8) *src;
+ n = 1;
+ if (v & 0x80) {
+ while (v & (0x80 >> n))
+ n++;
+ if (n > srclen)
+ goto bufferTooSmall;
+ if (n == 1 || n > 6)
+ goto badCharacter;
+ for (j = 1; j < n; j++) {
+ if ((src[j] & 0xC0) != 0x80)
+ goto badCharacter;
+ }
+ v = DecodeChar((const uint8 *) src, n);
+ if (v >= 0x10000) {
+ v -= 0x10000;
+ if (v > 0xFFFFF || dstlen < 2) {
+ *dstlenp = (origDstlen - dstlen);
+ return JS_FALSE;
+ }
+ if (dstlen < 2)
+ goto bufferTooSmall;
+ if (dst) {
+ *dst++ = (jschar)((v >> 10) + 0xD800);
+ v = (jschar)((v & 0x3FF) + 0xDC00);
+ }
+ dstlen--;
+ }
+ }
+ if (!dstlen)
+ goto bufferTooSmall;
+ if (dst)
+ *dst++ = (jschar) v;
+ dstlen--;
+ offset += n;
+ src += n;
+ srclen -= n;
+ }
+ *dstlenp = (origDstlen - dstlen);
+ return JS_TRUE;
+
+badCharacter:
+ *dstlenp = (origDstlen - dstlen);
+ return JS_FALSE;
+
+bufferTooSmall:
+ *dstlenp = (origDstlen - dstlen);
+ return JS_FALSE;
+}
+
+static JSBool
+EvalInContext(JSContext *context, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval) {
+ JSString *str;
+ JSObject *sandbox;
+ JSContext *sub_context;
+ const jschar *src;
+ size_t srclen;
+ JSBool ok;
+ jsval v;
+
+ sandbox = NULL;
+ if (!JS_ConvertArguments(context, argc, argv, "S / o", &str, &sandbox))
+ return JS_FALSE;
+
+ sub_context = JS_NewContext(JS_GetRuntime(context), gStackChunkSize);
+ if (!sub_context) {
+ JS_ReportOutOfMemory(context);
+ return JS_FALSE;
+ }
+
+ src = JS_GetStringChars(str);
+ srclen = JS_GetStringLength(str);
+
+ if (!sandbox) {
+ sandbox = JS_NewObject(sub_context, NULL, NULL, NULL);
+ if (!sandbox || !JS_InitStandardClasses(sub_context, sandbox)) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+
+ if (srclen == 0) {
+ *rval = OBJECT_TO_JSVAL(sandbox);
+ ok = JS_TRUE;
+ } else {
+ ok = JS_EvaluateUCScript(sub_context, sandbox, src, srclen, NULL, -1,
+ rval);
+ }
+
+out:
+ JS_DestroyContext(sub_context);
+ return ok;
+}
+
+static JSBool
+GC(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ JS_GC(context);
+ return JS_TRUE;
+}
+
+static JSBool
+Print(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ uintN i, n;
+ size_t cl, bl;
+ JSString *str;
+ jschar *chars;
+ char *bytes;
+
+ for (i = n = 0; i < argc; i++) {
+ str = JS_ValueToString(context, argv[i]);
+ if (!str)
+ return JS_FALSE;
+ chars = JS_GetStringChars(str);
+ cl = JS_GetStringLength(str);
+ if (!EncodeString(chars, cl, NULL, &bl))
+ return JS_FALSE;
+ bytes = JS_malloc(context, bl + 1);
+ bytes[bl] = '\0';
+ if (!EncodeString(chars, cl, bytes, &bl)) {
+ JS_free(context, bytes);
+ return JS_FALSE;
+ }
+ fprintf(stdout, "%s%s", i ? " " : "", bytes);
+ JS_free(context, bytes);
+ }
+ n++;
+ if (n)
+ fputc('\n', stdout);
+ fflush(stdout);
+ return JS_TRUE;
+}
+
+static JSBool
+Quit(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ JS_ConvertArguments(context, argc, argv, "/ i", &gExitCode);
+ return JS_FALSE;
+}
+
+static JSBool
+ReadLine(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ char *bytes, *tmp;
+ jschar *chars;
+ size_t bufsize, byteslen, charslen, readlen;
+ JSString *str;
+
+ JS_MaybeGC(context);
+
+ byteslen = 0;
+ bufsize = 256;
+ bytes = JS_malloc(context, bufsize);
+ if (!bytes)
+ return JS_FALSE;
+
+ while ((readlen = js_fgets(bytes + byteslen, bufsize - byteslen, stdin)) > 0) {
+ byteslen += readlen;
+
+ /* Are we done? */
+ if (bytes[byteslen - 1] == '\n') {
+ bytes[byteslen - 1] = '\0';
+ break;
+ }
+
+ /* Else, grow our buffer for another pass */
+ tmp = JS_realloc(context, bytes, bufsize * 2);
+ if (!tmp) {
+ JS_free(context, bytes);
+ return JS_FALSE;
+ }
+
+ bufsize *= 2;
+ bytes = tmp;
+ }
+
+ /* Treat the empty string specially */
+ if (byteslen == 0) {
+ *rval = JS_GetEmptyStringValue(context);
+ JS_free(context, bytes);
+ return JS_TRUE;
+ }
+
+ /* Shrink the buffer to the real size */
+ tmp = JS_realloc(context, bytes, byteslen);
+ if (!tmp) {
+ JS_free(context, bytes);
+ return JS_FALSE;
+ }
+ bytes = tmp;
+
+ /* Decode the string from UTF-8 */
+ if (!DecodeString(bytes, byteslen, NULL, &charslen)) {
+ JS_free(context, bytes);
+ return JS_FALSE;
+ }
+ chars = JS_malloc(context, (charslen + 1) * sizeof(jschar));
+ if (!DecodeString(bytes, byteslen, chars, &charslen)) {
+ JS_free(context, bytes);
+ JS_free(context, chars);
+ return JS_FALSE;
+ }
+ chars[charslen] = '\0';
+
+ /* Initialize a JSString object */
+ str = JS_NewUCString(context, chars, charslen - 1);
+ if (!str) {
+ JS_free(context, bytes);
+ JS_free(context, chars);
+ return JS_FALSE;
+ }
+
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+Seal(JSContext *context, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ JSObject *target;
+ JSBool deep = JS_FALSE;
+
+ if (!JS_ConvertArguments(context, argc, argv, "o/b", &target, &deep))
+ return JS_FALSE;
+ if (!target)
+ return JS_TRUE;
+ return JS_SealObject(context, target, deep);
+}
+
+static void
+ExecuteScript(JSContext *context, JSObject *obj, const char *filename) {
+ FILE *file;
+ JSScript *script;
+ jsval result;
+
+ if (!filename || strcmp(filename, "-") == 0) {
+ file = stdin;
+ } else {
+ file = fopen(filename, "r");
+ if (!file) {
+ fprintf(stderr, "could not open script file %s\n", filename);
+ gExitCode = 1;
+ return;
+ }
+ }
+
+ script = JS_CompileFileHandle(context, obj, filename, file);
+ if (script) {
+ JS_ExecuteScript(context, obj, script, &result);
+ JS_DestroyScript(context, script);
+ }
+}
+
+static uint32 gBranchCount = 0;
+static uint32 gBranchLimit = 100 * 1024;
+
+static JSBool
+BranchCallback(JSContext *context, JSScript *script) {
+ if (++gBranchCount == gBranchLimit) {
+ gBranchCount = 0;
+ return JS_FALSE;
+ }
+ if ((gBranchCount & 0x3fff) == 1) {
+ JS_MaybeGC(context);
+ }
+ return JS_TRUE;
+}
+
+static void
+PrintError(JSContext *context, const char *message, JSErrorReport *report) {
+ if (!report || !JSREPORT_IS_WARNING(report->flags))
+ fprintf(stderr, "%s\n", message);
+}
+
+int
+main(int argc, const char * argv[]) {
+ JSRuntime *runtime;
+ JSContext *context;
+ JSObject *global;
+
+ runtime = JS_NewRuntime(64L * 1024L * 1024L);
+ if (!runtime)
+ return 1;
+ context = JS_NewContext(runtime, gStackChunkSize);
+ if (!context)
+ return 1;
+ JS_SetErrorReporter(context, PrintError);
+ JS_SetBranchCallback(context, BranchCallback);
+ JS_ToggleOptions(context, JSOPTION_NATIVE_BRANCH_CALLBACK);
+ JS_ToggleOptions(context, JSOPTION_XML);
+
+ global = JS_NewObject(context, NULL, NULL, NULL);
+ if (!global)
+ return 1;
+ if (!JS_InitStandardClasses(context, global))
+ return 1;
+ if (!JS_DefineFunction(context, global, "evalcx", EvalInContext, 0, 0)
+ || !JS_DefineFunction(context, global, "gc", GC, 0, 0)
+ || !JS_DefineFunction(context, global, "print", Print, 0, 0)
+ || !JS_DefineFunction(context, global, "quit", Quit, 0, 0)
+ || !JS_DefineFunction(context, global, "readline", ReadLine, 0, 0)
+ || !JS_DefineFunction(context, global, "seal", Seal, 0, 0))
+ return 1;
+
+ if (argc != 2) {
+ fprintf(stderr, "incorrect number of arguments\n\n");
+ fprintf(stderr, "usage: %s <scriptfile>\n", argv[0]);
+ return 2;
+ }
+
+ ExecuteScript(context, global, argv[1]);
+
+ JS_DestroyContext(context);
+ JS_DestroyRuntime(runtime);
+ JS_ShutDown();
+
+ return gExitCode;
+}
diff --git a/src/couchdb/couch_key_tree.erl b/src/couchdb/couch_key_tree.erl
new file mode 100644
index 00000000..705365bd
--- /dev/null
+++ b/src/couchdb/couch_key_tree.erl
@@ -0,0 +1,139 @@
+% Copyright 2007, 2008 Damien Katz <damien_katz@yahoo.com>
+%
+% Licensed under the Apache License, Version 2.0 (the "License");
+% you may not use this file except in compliance with the License.
+% You may obtain a copy of the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS,
+% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+% See the License for the specific language governing permissions and
+% limitations under the License.
+
+-module(couch_key_tree).
+
+-export([merge/2, find_missing/2, get_key_leafs/2, get_full_key_paths/2, get/2]).
+-export([map/2, get_all_leafs/1, get_leaf_keys/1, count_leafs/1]).
+
+% a key tree looks like this:
+% Tree -> [] or [{Key, Value, Tree} | SiblingTree]
+% ChildTree -> Tree
+% SiblingTree -> [] or [{SiblingKey, Value, Tree} | Tree]
+% And each Key < SiblingKey
+
+
+
+% key tree functions
+
+% When the same key is found in the trees, the value in tree B is discarded.
+merge([], B) ->
+ B;
+merge(A, []) ->
+ A;
+merge([ATree | ANextTree], [BTree | BNextTree]) ->
+ {AKey, AValue, ASubTree} = ATree,
+ {BKey, _BValue, BSubTree} = BTree,
+ if
+ AKey == BKey ->
+ %same key
+ MergedSubTree = merge(ASubTree, BSubTree),
+ MergedNextTree = merge(ANextTree, BNextTree),
+ [{AKey, AValue, MergedSubTree} | MergedNextTree];
+ AKey < BKey ->
+ [ATree | merge(ANextTree, [BTree | BNextTree])];
+ true ->
+ [BTree | merge([ATree | ANextTree], BNextTree)]
+ end.
+
+find_missing(_Tree, []) ->
+ [];
+find_missing([], Keys) ->
+ Keys;
+find_missing([{Key, _, SubTree} | RestTree], Keys) ->
+ SrcKeys2 = Keys -- Key,
+ SrcKeys3 = find_missing(SubTree, SrcKeys2),
+ find_missing(RestTree, SrcKeys3).
+
+
+% get the leafs in the tree matching the keys. The matching key nodes can be
+% leafs or an inner nodes. If an inner node, then the leafs for that node
+% are returned.
+get_key_leafs(Tree, Keys) ->
+ get_key_leafs(Tree, Keys, []).
+
+get_key_leafs(_Tree, [], _KeyPathAcc) ->
+ {[], []};
+get_key_leafs([], KeysToGet, _KeyPathAcc) ->
+ {[], KeysToGet};
+get_key_leafs([{Key, _Value, SubTree}=Tree | RestTree], KeysToGet, KeyPathAcc) ->
+ case KeysToGet -- [Key] of
+ KeysToGet -> % same list, key not found
+ {LeafsFound, KeysToGet2} = get_key_leafs(SubTree, KeysToGet, [Key | KeyPathAcc]),
+ {RestLeafsFound, KeysRemaining} = get_key_leafs(RestTree, KeysToGet2, KeyPathAcc),
+ {LeafsFound ++ RestLeafsFound, KeysRemaining};
+ KeysToGet2 ->
+ LeafsFound = get_all_leafs([Tree], KeyPathAcc),
+ LeafKeysFound = [LeafKeyFound || {LeafKeyFound, _, _} <- LeafsFound],
+ KeysToGet2 = KeysToGet2 -- LeafKeysFound,
+ {RestLeafsFound, KeysRemaining} = get_key_leafs(RestTree, KeysToGet2, KeyPathAcc),
+ {LeafsFound ++ RestLeafsFound, KeysRemaining}
+ end.
+
+get(Tree, KeysToGet) ->
+ {KeyPaths, KeysNotFound} = get_full_key_paths(Tree, KeysToGet),
+ FixedResults = [ {Key, Value, [Key0 || {Key0, _} <- Path]} || [{Key, Value}|_] = Path <- KeyPaths],
+ {FixedResults, KeysNotFound}.
+
+get_full_key_paths(Tree, Keys) ->
+ get_full_key_paths(Tree, Keys, []).
+
+get_full_key_paths(_Tree, [], _KeyPathAcc) ->
+ {[], []};
+get_full_key_paths([], KeysToGet, _KeyPathAcc) ->
+ {[], KeysToGet};
+get_full_key_paths([{KeyId, Value, SubTree} | RestTree], KeysToGet, KeyPathAcc) ->
+ KeysToGet2 = KeysToGet -- [KeyId],
+ CurrentNodeResult =
+ case length(KeysToGet2) == length(KeysToGet) of
+ true -> % not in the key list.
+ [];
+ false -> % this node is the key list. return it
+ [[{KeyId, Value} | KeyPathAcc]]
+ end,
+ {KeysGotten, KeysRemaining} = get_full_key_paths(SubTree, KeysToGet2, [{KeyId, Value} | KeyPathAcc]),
+ {KeysGotten2, KeysRemaining2} = get_full_key_paths(RestTree, KeysRemaining, KeyPathAcc),
+ {CurrentNodeResult ++ KeysGotten ++ KeysGotten2, KeysRemaining2}.
+
+get_all_leafs(Tree) ->
+ get_all_leafs(Tree, []).
+
+get_all_leafs([], _KeyPathAcc) ->
+ [];
+get_all_leafs([{KeyId, Value, []} | RestTree], KeyPathAcc) ->
+ [{KeyId, Value, [KeyId | KeyPathAcc]} | get_all_leafs(RestTree, KeyPathAcc)];
+get_all_leafs([{KeyId, _Value, SubTree} | RestTree], KeyPathAcc) ->
+ get_all_leafs(SubTree, [KeyId | KeyPathAcc]) ++ get_all_leafs(RestTree, KeyPathAcc).
+
+get_leaf_keys([]) ->
+ [];
+get_leaf_keys([{Key, _Value, []} | RestTree]) ->
+ [Key | get_leaf_keys(RestTree)];
+get_leaf_keys([{_Key, _Value, SubTree} | RestTree]) ->
+ get_leaf_keys(SubTree) ++ get_leaf_keys(RestTree).
+
+count_leafs([]) ->
+ 0;
+count_leafs([{_Key, _Value, []} | RestTree]) ->
+ 1 + count_leafs(RestTree);
+count_leafs([{_Key, _Value, SubTree} | RestTree]) ->
+ count_leafs(SubTree) + count_leafs(RestTree).
+
+
+map(_Fun, []) ->
+ [];
+map(Fun, [{Key, Value, SubTree} | RestTree]) ->
+ Value2 = Fun(Key, Value),
+ [{Key, Value2, map(Fun, SubTree)} | map(Fun, RestTree)].
+
diff --git a/src/couchdb/couch_log.erl b/src/couchdb/couch_log.erl
new file mode 100644
index 00000000..47e0114d
--- /dev/null
+++ b/src/couchdb/couch_log.erl
@@ -0,0 +1,130 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_log).
+-behaviour(gen_event).
+
+-export([start_link/2,stop/0]).
+-export([error/1,error/2,info/1,info/2,debug/1,debug/2,get_level/0,get_level_integer/0, set_level/1]).
+-export([init/1, handle_event/2, terminate/2, code_change/3, handle_info/2, handle_call/2]).
+
+-define(LEVEL_ERROR, 3).
+-define(LEVEL_INFO, 2).
+-define(LEVEL_DEBUG, 1).
+-define(LEVEL_TMI, 0).
+
+level_integer(error) -> ?LEVEL_ERROR;
+level_integer(info) -> ?LEVEL_INFO;
+level_integer(debug) -> ?LEVEL_DEBUG;
+level_integer(tmi) -> ?LEVEL_TMI;
+level_integer(_Else) -> ?LEVEL_ERROR. % anything else default to ERROR level
+
+level_atom(?LEVEL_ERROR) -> error;
+level_atom(?LEVEL_INFO) -> info;
+level_atom(?LEVEL_DEBUG) -> debug;
+level_atom(?LEVEL_TMI) -> tmi.
+
+
+start_link(Filename, Level) ->
+ couch_event_sup:start_link({local, couch_log}, error_logger, couch_log, {Filename, Level}).
+
+stop() ->
+ couch_event_sup:stop(couch_log).
+
+init({Filename, Level}) ->
+ {ok, Fd} = file:open(Filename, [append]),
+ {ok, {Fd, level_integer(Level)}}.
+
+error(Msg) ->
+ error("~s", [Msg]).
+
+error(Format, Args) ->
+ error_logger:error_report(couch_error, {Format, Args}).
+
+info(Msg) ->
+ info("~s", [Msg]).
+
+info(Format, Args) ->
+ case get_level_integer() =< ?LEVEL_INFO of
+ true ->
+ error_logger:info_report(couch_info, {Format, Args});
+ false ->
+ ok
+ end.
+
+debug(Msg) ->
+ debug("~s", [Msg]).
+
+debug(Format, Args) ->
+ case get_level_integer() =< ?LEVEL_DEBUG of
+ true ->
+ error_logger:info_report(couch_debug, {Format, Args});
+ false ->
+ ok
+ end.
+
+set_level(LevelAtom) ->
+ set_level_integer(level_integer(LevelAtom)).
+
+get_level() ->
+ level_atom(get_level_integer()).
+
+get_level_integer() ->
+ catch gen_event:call(error_logger, couch_log, get_level_integer).
+
+set_level_integer(Int) ->
+ gen_event:call(error_logger, couch_log, {set_level_integer, Int}).
+
+handle_event({error_report, _, {Pid, couch_error, {Format, Args}}}, {Fd, _LogLevel}=State) ->
+ log(Fd, Pid, error, Format, Args),
+ {ok, State};
+handle_event({error_report, _, {Pid, _, _}}=Event, {Fd, _LogLevel}=State) ->
+ log(Fd, Pid, error, "~p", [Event]),
+ {ok, State};
+handle_event({error, _, {Pid, Format, Args}}, {Fd, _LogLevel}=State) ->
+ log(Fd, Pid, error, Format, Args),
+ {ok, State};
+handle_event({info_report, _, {Pid, couch_info, {Format, Args}}}, {Fd, LogLevel}=State)
+when LogLevel =< ?LEVEL_INFO ->
+ log(Fd, Pid, info, Format, Args),
+ {ok, State};
+handle_event({info_report, _, {Pid, couch_debug, {Format, Args}}}, {Fd, LogLevel}=State)
+when LogLevel =< ?LEVEL_DEBUG ->
+ log(Fd, Pid, debug, Format, Args),
+ {ok, State};
+handle_event({_, _, {Pid, _, _}}=Event, {Fd, LogLevel}=State)
+when LogLevel =< ?LEVEL_TMI ->
+ % log every remaining event if tmi!
+ log(Fd, Pid, tmi, "~p", [Event]),
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_call(get_level_integer, {_Fd, LogLevel}=State) ->
+ {ok, LogLevel, State};
+handle_call({set_level_integer, NewLevel}, {Fd, _LogLevel}) ->
+ {ok, ok, {Fd, NewLevel}}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Arg, {Fd, _LoggingLevel}) ->
+ file:close(Fd).
+
+log(Fd, Pid, Level, Format, Args) ->
+ Msg = io_lib:format(Format, Args),
+ ok = io:format("[~s] [~p] ~s~n", [Level, Pid, Msg]), % dump to console too
+ {ok, Msg2, _} = regexp:gsub(lists:flatten(Msg),"\\r\\n|\\r|\\n", "\r\n"),
+ ok = io:format(Fd, "[~s] [~s] [~p] ~s\r~n\r~n", [httpd_util:rfc1123_date(), Level, Pid, Msg2]).
diff --git a/src/couchdb/couch_query_servers.erl b/src/couchdb/couch_query_servers.erl
new file mode 100644
index 00000000..19cba9bd
--- /dev/null
+++ b/src/couchdb/couch_query_servers.erl
@@ -0,0 +1,206 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_query_servers).
+-behaviour(gen_server).
+
+-export([start_link/1]).
+
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3,stop/0]).
+-export([start_doc_map/2, map_docs/2, stop_doc_map/1]).
+
+-export([test/0, test/1]).
+
+-include("couch_db.hrl").
+
+timeout() ->
+ % hardcoded 5 sec timeout per document
+ 5000.
+
+start_link(QueryServerList) ->
+ gen_server:start_link({local, couch_query_servers}, couch_query_servers, QueryServerList, []).
+
+stop() ->
+ exit(whereis(couch_query_servers), close).
+
+readline(Port) ->
+ readline(Port, []).
+
+readline(Port, Acc) ->
+ Timer = erlang:send_after(timeout(), self(), timeout),
+ Result =
+ receive
+ {Port, {data, {noeol, Data}}} ->
+ readline(Port, [Data|Acc]);
+ {Port, {data, {eol, Data}}} ->
+ lists:flatten(lists:reverse(Acc, Data));
+ {Port, Err} ->
+ catch port_close(Port),
+ erlang:cancel_timer(Timer),
+ throw({map_process_error, Err});
+ timeout ->
+ catch port_close(Port),
+ throw({map_process_error, "map function timed out"})
+ end,
+ case erlang:cancel_timer(Timer) of
+ false ->
+ % message already sent. clear it
+ receive timeout -> ok end;
+ _ ->
+ ok
+ end,
+ Result.
+
+read_json(Port) ->
+ case cjson:decode(readline(Port)) of
+ {obj, [{"log", Msg}]} when is_list(Msg) ->
+ % we got a message to log. Log it and continue
+ couch_log:info("Query Server Log Message: ~s", [Msg]),
+ read_json(Port);
+ Else ->
+ Else
+ end.
+
+writeline(Port, String) ->
+ true = port_command(Port, String ++ "\n").
+
+% send command and get a response.
+prompt(Port, Json) ->
+ writeline(Port, cjson:encode(Json)),
+ read_json(Port).
+
+
+start_doc_map(Lang, Functions) ->
+ Port =
+ case gen_server:call(couch_query_servers, {get_port, Lang}) of
+ {ok, Port0} ->
+ link(Port0),
+ Port0;
+ {empty, Cmd} ->
+ couch_log:info("Spawning new ~s instance.", [Lang]),
+ open_port({spawn, Cmd}, [stream,
+ {line, 1000},
+ exit_status,
+ hide]);
+ Error ->
+ throw(Error)
+ end,
+ true = prompt(Port, {"reset"}),
+ % send the functions as json strings
+ lists:foreach(fun(FunctionSource) ->
+ case prompt(Port, {"add_fun", FunctionSource}) of
+ true -> ok;
+ {obj, [{"error", Id}, {"reason", Reason}]} ->
+ throw({Id, Reason})
+ end
+ end,
+ Functions),
+ {ok, {Lang, Port}}.
+
+map_docs({_Lang, Port}, Docs) ->
+ % send the documents
+ Results =
+ lists:map(
+ fun(Doc) ->
+ Json = couch_doc:to_json_obj(Doc, []),
+ case prompt(Port, {"map_doc", Json}) of
+ {obj, [{"error", Id}, {"reason", Reason}]} ->
+ throw({list_to_atom(Id),Reason});
+ {obj, [{"reason", Reason}, {"error", Id}]} ->
+ throw({list_to_atom(Id),Reason});
+ Results when is_tuple(Results) ->
+ % the results are a json array of function map yields like this:
+ % {FunResults1, FunResults2 ...}
+ % where funresults is are json arrays of key value pairs:
+ % {{Key1, Value1}, {Key2, Value2}}
+ % Convert to real lists, execept the key, value pairs
+ [tuple_to_list(FunResult) || FunResult <- tuple_to_list(Results)]
+ end
+ end,
+ Docs),
+ {ok, Results}.
+
+
+stop_doc_map(nil) ->
+ ok;
+stop_doc_map({Lang, Port}) ->
+ ok = gen_server:call(couch_query_servers, {return_port, {Lang, Port}}),
+ true = unlink(Port),
+ ok.
+
+init(QueryServerList) ->
+ {ok, {QueryServerList, []}}.
+
+terminate(_Reason, _Server) ->
+ ok.
+
+
+handle_call({get_port, Lang}, {FromPid, _}, {QueryServerList, LangPorts}) ->
+ case lists:keysearch(Lang, 1, LangPorts) of
+ {value, {_, Port}=LangPort} ->
+ Result =
+ case catch port_connect(Port, FromPid) of
+ true ->
+ true = unlink(Port),
+ {ok, Port};
+ Error ->
+ catch port_close(Port),
+ Error
+ end,
+ {reply, Result, {QueryServerList, LangPorts -- [LangPort]}};
+ false ->
+ case lists:keysearch(Lang, 1, QueryServerList) of
+ {value, {_, ServerCmd}} ->
+ {reply, {empty, ServerCmd}, {QueryServerList, LangPorts}};
+ false -> % not a supported language
+ {reply, {query_language_unknown, Lang}, {QueryServerList, LangPorts}}
+ end
+ end;
+handle_call({return_port, {Lang, Port}}, _From, {QueryServerList, LangPorts}) ->
+ case catch port_connect(Port, self()) of
+ true ->
+ {reply, ok, {QueryServerList, [{Lang, Port} | LangPorts]}};
+ _ ->
+ catch port_close(Port),
+ {reply, ok, {QueryServerList, LangPorts}}
+ end.
+
+handle_cast(_Whatever, {Cmd, Ports}) ->
+ {noreply, {Cmd, Ports}}.
+
+handle_info({Port, {exit_status, Status}}, {QueryServerList, LangPorts}) ->
+ case lists:keysearch(Port, 2, LangPorts) of
+ {value, {Lang, _}} ->
+ case Status of
+ 0 -> ok;
+ _ -> couch_log:error("Abnormal shutdown of ~s query server process (exit_status: ~w).", [Lang, Status])
+ end,
+ {noreply, {QueryServerList, lists:keydelete(Port, 2, LangPorts)}};
+ _ ->
+ couch_log:error("Unknown linked port/process crash: ~p", [Port])
+ end;
+handle_info(_Whatever, {Cmd, Ports}) ->
+ {noreply, {Cmd, Ports}}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+test() ->
+ test("../js/js -f main.js").
+
+test(Cmd) ->
+ start_link(Cmd),
+ {ok, DocMap} = start_doc_map("javascript", ["function(doc) {if (doc[0] == 'a') return doc[1];}"]),
+ {ok, Results} = map_docs(DocMap, [#doc{body={"a", "b"}}, #doc{body={"c", "d"}},#doc{body={"a", "c"}}]),
+ io:format("Results: ~w~n", [Results]),
+ stop_doc_map(DocMap),
+ ok.
diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl
new file mode 100644
index 00000000..9590d5c1
--- /dev/null
+++ b/src/couchdb/couch_rep.erl
@@ -0,0 +1,308 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rep).
+
+-include("couch_db.hrl").
+
+-export([replicate/2, replicate/3, test/0, test_write_docs/3]).
+
+-record(stats, {
+ docs_read=0,
+ read_errors=0,
+ docs_copied=0,
+ copy_errors=0
+ }).
+
+
+url_encode([H|T]) ->
+ if
+ H >= $a, $z >= H ->
+ [H|url_encode(T)];
+ H >= $A, $Z >= H ->
+ [H|url_encode(T)];
+ H >= $0, $9 >= H ->
+ [H|url_encode(T)];
+ H == $_; H == $.; H == $-; H == $: ->
+ [H|url_encode(T)];
+ true ->
+ case lists:flatten(io_lib:format("~.16.0B", [H])) of
+ [X, Y] ->
+ [$%, X, Y | url_encode(T)];
+ [X] ->
+ [$%, $0, X | url_encode(T)]
+ end
+ end;
+url_encode([]) ->
+ [].
+
+
+replicate(DbNameA, DbNameB) ->
+ replicate(DbNameA, DbNameB, []).
+
+replicate(Source, Target, Options) ->
+ {ok, DbSrc} = open_db(Source),
+ {ok, DbTgt} = open_db(Target),
+ {ok, HostName} = inet:gethostname(),
+
+ RepRecKey = ?LOCAL_DOC_PREFIX ++ HostName ++ ":" ++ Source ++ ":" ++ Target,
+ StartTime = httpd_util:rfc1123_date(),
+ RepRecSrc =
+ case open_doc(DbSrc, RepRecKey, []) of
+ {ok, SrcDoc} -> SrcDoc;
+ _ -> #doc{id=RepRecKey}
+ end,
+
+ RepRecTgt =
+ case open_doc(DbTgt, RepRecKey, []) of
+ {ok, TgtDoc} -> TgtDoc;
+ _ -> #doc{id=RepRecKey}
+ end,
+
+ #doc{body={obj,OldRepHistoryProps}} = RepRecSrc,
+ #doc{body={obj,OldRepHistoryPropsTrg}} = RepRecTgt,
+
+ SeqNum0 =
+ case OldRepHistoryProps == OldRepHistoryPropsTrg of
+ true ->
+ % if the records are identical, then we have a valid replication history
+ proplists:get_value("source_last_seq", OldRepHistoryProps, 0);
+ false ->
+ 0
+ end,
+
+ SeqNum =
+ case proplists:get_value(full, Options, false)
+ orelse proplists:get_value("full", Options, false) of
+ true -> 0;
+ false -> SeqNum0
+ end,
+
+ {NewSeqNum, Stats} = pull_rep(DbTgt, DbSrc, SeqNum, #stats{}),
+ case NewSeqNum == SeqNum andalso OldRepHistoryProps /= [] of
+ true ->
+ % nothing changed, don't record results
+ {ok, {obj, OldRepHistoryProps}};
+ false ->
+ HistEntries =[
+ {obj,
+ [{"start_time", StartTime},
+ {"end_time", httpd_util:rfc1123_date()},
+ {"start_last_seq", SeqNum},
+ {"end_last_seq", NewSeqNum},
+ {"docs_read", Stats#stats.docs_read},
+ {"read_errors", Stats#stats.read_errors},
+ {"docs_copied", Stats#stats.docs_copied},
+ {"copy_errors", Stats#stats.copy_errors}]}
+ | tuple_to_list(proplists:get_value("history", OldRepHistoryProps, {}))],
+ % something changed, record results
+ NewRepHistory =
+ {obj,
+ [{"session_id", couch_util:new_uuid()},
+ {"source_last_seq", NewSeqNum},
+ {"history", list_to_tuple(lists:sublist(HistEntries, 50))}]},
+
+ {ok, _} = update_doc(DbSrc, RepRecSrc#doc{body=NewRepHistory}, []),
+ {ok, _} = update_doc(DbTgt, RepRecTgt#doc{body=NewRepHistory}, []),
+ {ok, NewRepHistory}
+ end.
+
+pull_rep(DbTarget, DbSource, SourceSeqNum, Stats) ->
+ {ok, NewSeq} =
+ enum_docs_since(DbSource, SourceSeqNum,
+ fun(#doc_info{update_seq=Seq}=SrcDocInfo, _, {_, AccStats}) ->
+ Stats2 = maybe_save_docs(DbTarget, DbSource, SrcDocInfo, AccStats),
+ {ok, {Seq, Stats2}}
+ end, {SourceSeqNum, Stats}),
+ NewSeq.
+
+
+maybe_save_docs(DbTarget, DbSource,
+ #doc_info{id=Id, rev=Rev, conflict_revs=Conflicts, deleted_conflict_revs=DelConflicts},
+ Stats) ->
+ SrcRevs = [Rev | Conflicts] ++ DelConflicts,
+ {ok, [{Id, MissingRevs}]} = get_missing_revs(DbTarget, [{Id, SrcRevs}]),
+
+ case MissingRevs of
+ [] ->
+ Stats;
+ _Else ->
+ % the 'ok' below validates no unrecoverable errors (like network failure, etc).
+ {ok, DocResults} = open_doc_revs(DbSource, Id, MissingRevs, [latest]),
+
+ Docs = [RevDoc || {ok, RevDoc} <- DocResults], % only match successful loads
+
+ Stats2 = Stats#stats{
+ docs_read=Stats#stats.docs_read + length(Docs),
+ read_errors=Stats#stats.read_errors + length(DocResults) - length(Docs)},
+
+ case Docs of
+ [] ->
+ Stats2;
+ _ ->
+ % the 'ok' below validates no unrecoverable errors (like network failure, etc).
+ ok = save_docs(DbTarget, Docs, []),
+ Stats2#stats{docs_copied=Stats2#stats.docs_copied+length(Docs)}
+ end
+ end.
+
+
+do_http_request(Url, Action) ->
+ do_http_request(Url, Action, []).
+
+do_http_request(Url, Action, JsonBody) ->
+ couch_log:debug("couch_rep HTTP client request:"),
+ couch_log:debug("\tAction: ~p", [Action]),
+ couch_log:debug("\tUrl: ~p", [Url]),
+ Request =
+ case JsonBody of
+ [] ->
+ {Url, []};
+ _ ->
+ {Url, [], "application/json; charset=utf-8", lists:flatten(cjson:encode(JsonBody))}
+ end,
+ {ok, {{_, ResponseCode,_},_Headers, ResponseBody}} = http:request(Action, Request, [], []),
+ if
+ ResponseCode >= 200, ResponseCode < 500 ->
+ cjson:decode(ResponseBody)
+ end.
+
+enum_docs0(_InFun, [], Acc) ->
+ Acc;
+enum_docs0(InFun, [DocInfo | Rest], Acc) ->
+ case InFun(DocInfo, 0, Acc) of
+ {ok, Acc2} -> enum_docs0(InFun, Rest, Acc2);
+ {stop, Acc2} -> Acc2
+ end.
+
+open_db("http" ++ DbName)->
+ case lists:last(DbName) of
+ $/ ->
+ {ok, "http" ++ DbName};
+ _ ->
+ {ok, "http" ++ DbName ++ "/"}
+ end;
+open_db(DbName)->
+ couch_server:open(DbName).
+
+
+enum_docs_since(DbUrl, StartSeq, InFun, InAcc) when is_list(DbUrl) ->
+ Url = DbUrl ++ "_all_docs_by_seq?startkey=" ++ integer_to_list(StartSeq),
+ {obj, Results} = do_http_request(Url, get),
+ DocInfoList=
+ lists:map(fun({obj, RowInfoList}) ->
+ {obj, RowValueProps} = proplists:get_value("value", RowInfoList),
+ #doc_info{
+ id=proplists:get_value("id", RowInfoList),
+ rev=proplists:get_value("rev", RowValueProps),
+ update_seq = proplists:get_value("key", RowInfoList),
+ conflict_revs =
+ tuple_to_list(proplists:get_value("conflicts", RowValueProps, {})),
+ deleted_conflict_revs =
+ tuple_to_list(proplists:get_value("deleted_conflicts", RowValueProps, {})),
+ deleted = proplists:get_value("deleted", RowValueProps, false)}
+ end, tuple_to_list(proplists:get_value("rows", Results))),
+ {ok, enum_docs0(InFun, DocInfoList, InAcc)};
+enum_docs_since(DbSource, StartSeq, Fun, Acc) ->
+ couch_db:enum_docs_since(DbSource, StartSeq, Fun, Acc).
+
+get_missing_revs(DbUrl, DocIdRevsList) when is_list(DbUrl) ->
+ JsonDocIdRevsList = {obj,
+ [{Id, list_to_tuple(RevList)} || {Id, RevList} <- DocIdRevsList]},
+ {obj, ResponseMembers} =
+ do_http_request(DbUrl ++ "_missing_revs",
+ post, JsonDocIdRevsList),
+ {obj, DocMissingRevsList} = proplists:get_value("missing_revs", ResponseMembers),
+ {ok, [{Id, tuple_to_list(MissingRevs)} || {Id, MissingRevs} <- DocMissingRevsList]};
+get_missing_revs(Db, DocId) ->
+ couch_db:get_missing_revs(Db, DocId).
+
+
+update_doc(DbUrl, #doc{id=DocId}=Doc, _Options) when is_list(DbUrl) ->
+ Url = DbUrl ++ url_encode(DocId),
+ {obj, ResponseMembers} =
+ do_http_request(Url, put, couch_doc:to_json_obj(Doc, [revs,attachments])),
+ RevId = proplists:get_value("_rev", ResponseMembers),
+ {ok, RevId};
+update_doc(Db, Doc, Options) ->
+ couch_db:update_doc(Db, Doc, Options).
+
+save_docs(_, [], _) ->
+ ok;
+save_docs(DbUrl, Docs, []) when is_list(DbUrl) ->
+ JsonDocs = [couch_doc:to_json_obj(Doc, [revs,attachments]) || Doc <- Docs],
+ {obj, Returned} =
+ do_http_request(DbUrl ++ "_bulk_docs", post, {obj, [{new_edits, false}, {docs, list_to_tuple(JsonDocs)}]}),
+ true = proplists:get_value("ok", Returned),
+ ok;
+save_docs(Db, Docs, Options) ->
+ couch_db:save_docs(Db, Docs, Options).
+
+
+open_doc(DbUrl, DocId, []) when is_list(DbUrl) ->
+ case do_http_request(DbUrl ++ url_encode(DocId), get) of
+ {obj, [{"error", ErrId}, {"reason", Reason}]} ->
+ {list_to_atom(ErrId), Reason};
+ Doc ->
+ {ok, couch_doc:from_json_obj(Doc)}
+ end;
+open_doc(Db, DocId, Options) when not is_list(Db) ->
+ couch_db:open_doc(Db, DocId, Options).
+
+
+open_doc_revs(DbUrl, DocId, Revs, Options) when is_list(DbUrl) ->
+ QueryOptionStrs =
+ lists:map(fun(latest) ->
+ % latest is only option right now
+ "latest=true"
+ end, Options),
+ RevsQueryStrs = lists:flatten(cjson:encode(list_to_tuple(Revs))),
+ Url = DbUrl ++ DocId ++ "?" ++ couch_util:implode(["revs=true", "attachments=true", "open_revs=" ++ RevsQueryStrs ] ++ QueryOptionStrs, "&"),
+ JsonResults = do_http_request(Url, get, []),
+ Results =
+ lists:map(
+ fun({obj, [{"missing", Rev}]}) ->
+ {{not_found, missing}, Rev};
+ ({obj, [{"ok", JsonDoc}]}) ->
+ {ok, couch_doc:from_json_obj(JsonDoc)}
+ end, tuple_to_list(JsonResults)),
+ {ok, Results};
+open_doc_revs(Db, DocId, Revs, Options) ->
+ couch_db:open_doc_revs(Db, DocId, Revs, Options).
+
+
+
+
+
+test() ->
+ couch_server:start(),
+ %{ok, LocalA} = couch_server:open("replica_a"),
+ {ok, LocalA} = couch_server:create("replica_a", [overwrite]),
+ {ok, _} = couch_server:create("replica_b", [overwrite]),
+ %DbA = "replica_a",
+ DbA = "http://localhost:5984/replica_a/",
+ %DbB = "replica_b",
+ DbB = "http://localhost:5984/replica_b/",
+ _DocUnids = test_write_docs(10, LocalA, []),
+ replicate(DbA, DbB),
+ %{ok, _Rev} = couch_db:delete_doc(LocalA, lists:nth(1, DocUnids), any),
+ % replicate(DbA, DbB),
+ ok.
+
+test_write_docs(0, _Db, Output) ->
+ lists:reverse(Output);
+test_write_docs(N, Db, Output) ->
+ Doc = #doc{
+ id=integer_to_list(N),
+ body={obj, [{"foo", integer_to_list(N)}, {"num", N}, {"bar", "blah"}]}},
+ couch_db:save_doc(Db, Doc, []),
+ test_write_docs(N-1, Db, [integer_to_list(N) | Output]).
diff --git a/src/couchdb/couch_server.erl b/src/couchdb/couch_server.erl
new file mode 100644
index 00000000..bb3617b2
--- /dev/null
+++ b/src/couchdb/couch_server.erl
@@ -0,0 +1,215 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_server).
+-behaviour(gen_server).
+-behaviour(application).
+
+-export([start/0,start/1,start/2,stop/0,stop/1]).
+-export([open/1,create/2,delete/1,all_databases/0,get_version/0]).
+-export([init/1, handle_call/3,sup_start_link/2]).
+-export([handle_cast/2,code_change/3,handle_info/2,terminate/2]).
+-export([dev_start/0,remote_restart/0]).
+
+-include("couch_db.hrl").
+
+-record(server,{
+ root_dir = [],
+ dbname_regexp,
+ options=[]
+ }).
+
+start() ->
+ start("").
+
+start(IniFile) when is_atom(IniFile) ->
+ couch_server_sup:start_link(atom_to_list(IniFile) ++ ".ini");
+start(IniNum) when is_integer(IniNum) ->
+ couch_server_sup:start_link("couch" ++ integer_to_list(IniNum) ++ ".ini");
+start(IniFile) ->
+ couch_server_sup:start_link(IniFile).
+
+start(_Type, _Args) ->
+ start().
+
+stop() ->
+ couch_server_sup:stop().
+
+stop(_Reason) ->
+ stop().
+
+dev_start() ->
+ stop(),
+ up_to_date = make:all([load, debug_info]),
+ start().
+
+get_version() ->
+ Apps = application:loaded_applications(),
+ case lists:keysearch(couch, 1, Apps) of
+ {value, {_, _, Vsn}} ->
+ Vsn;
+ false ->
+ "0.0.0"
+ end.
+
+sup_start_link(RootDir, Options) ->
+ gen_server:start_link({local, couch_server}, couch_server, {RootDir, Options}, []).
+
+open(Filename) ->
+ gen_server:call(couch_server, {open, Filename}).
+
+create(Filename, Options) ->
+ gen_server:call(couch_server, {create, Filename, Options}).
+
+delete(Filename) ->
+ gen_server:call(couch_server, {delete, Filename}).
+
+remote_restart() ->
+ gen_server:call(couch_server, remote_restart).
+
+init({RootDir, Options}) ->
+ {ok, RegExp} = regexp:parse("^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*$"),
+ {ok, #server{root_dir=RootDir, dbname_regexp=RegExp, options=Options}}.
+
+check_filename(#server{dbname_regexp=RegExp}, Filename) ->
+ case regexp:match(Filename, RegExp) of
+ nomatch ->
+ {error, illegal_database_name};
+ _Match ->
+ ok
+ end.
+
+get_full_filename(Server, Filename) ->
+ filename:join([Server#server.root_dir, "./" ++ Filename ++ ".couch"]).
+
+
+terminate(_Reason, _Server) ->
+ ok.
+
+all_databases() ->
+ {ok, Root} = gen_server:call(couch_server, get_root),
+ Filenames =
+ filelib:fold_files(Root, "^[a-z0-9\\_\\$()\\+\\-]*[\\.]couch$", true,
+ fun(Filename, AccIn) ->
+ case Filename -- Root of
+ [$/ | RelativeFilename] -> ok;
+ RelativeFilename -> ok
+ end,
+ [filename:rootname(RelativeFilename, ".couch") | AccIn]
+ end, []),
+ {ok, Filenames}.
+
+
+handle_call(get_root, _From, #server{root_dir=Root}=Server) ->
+ {reply, {ok, Root}, Server};
+handle_call({open, Filename}, From, Server) ->
+ case check_filename(Server, Filename) of
+ {error, Error} ->
+ {reply, {error, Error}, Server};
+ ok ->
+ Filepath = get_full_filename(Server, Filename),
+ Result = supervisor:start_child(couch_server_sup,
+ {Filename,
+ {couch_db, open, [Filename, Filepath]},
+ transient ,
+ infinity,
+ supervisor,
+ [couch_db]}),
+ case Result of
+ {ok, Db} ->
+ {reply, {ok, Db}, Server};
+ {error, already_present} ->
+ ok = supervisor:delete_child(couch_server_sup, Filename),
+ % call self recursively
+ handle_call({open, Filename}, From, Server);
+ {error, {already_started, Db}} ->
+ {reply, {ok, Db}, Server};
+ {error, {not_found, _}} ->
+ {reply, not_found, Server};
+ {error, {Error, _}} ->
+ {reply, {error, Error}, Server}
+ end
+ end;
+handle_call({create, Filename, Options}, _From, Server) ->
+ case check_filename(Server, Filename) of
+ {error, Error} ->
+ {reply, {error, Error}, Server};
+ ok ->
+ Filepath = get_full_filename(Server, Filename),
+ ChildSpec = {Filename,
+ {couch_db, create, [Filename, Filepath, Options]},
+ transient,
+ infinity,
+ supervisor,
+ [couch_db]},
+ Result =
+ case supervisor:delete_child(couch_server_sup, Filename) of
+ ok ->
+ sup_start_child(couch_server_sup, ChildSpec);
+ {error, not_found} ->
+ sup_start_child(couch_server_sup, ChildSpec);
+ {error, running} ->
+ % a server process for this database already started. Maybe kill it
+ case lists:member(overwrite, Options) of
+ true ->
+ supervisor:terminate_child(couch_server_sup, Filename),
+ ok = supervisor:delete_child(couch_server_sup, Filename),
+ sup_start_child(couch_server_sup, ChildSpec);
+ false ->
+ {error, database_already_exists}
+ end
+ end,
+ case Result of
+ {ok, _Db} -> couch_db_update_notifier:notify({created, Filename});
+ _ -> ok
+ end,
+ {reply, Result, Server}
+ end;
+handle_call({delete, Filename}, _From, Server) ->
+ FullFilepath = get_full_filename(Server, Filename),
+ supervisor:terminate_child(couch_server_sup, Filename),
+ supervisor:delete_child(couch_server_sup, Filename),
+ case file:delete(FullFilepath) of
+ ok ->
+ couch_db_update_notifier:notify({deleted, Filename}),
+ {reply, ok, Server};
+ {error, enoent} ->
+ {reply, not_found, Server};
+ Else ->
+ {reply, Else, Server}
+ end;
+handle_call(remote_restart, _From, #server{options=Options}=Server) ->
+ case proplists:get_value(remote_restart, Options) of
+ true ->
+ exit(self(), restart);
+ _ ->
+ ok
+ end,
+ {reply, ok, Server}.
+
+% this function is just to strip out the child spec error stuff if hit
+sup_start_child(couch_server_sup, ChildSpec) ->
+ case supervisor:start_child(couch_server_sup, ChildSpec) of
+ {error, {Error, _ChildInfo}} ->
+ {error, Error};
+ Else ->
+ Else
+ end.
+
+handle_cast(_Msg, State) ->
+ {noreply,State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
diff --git a/src/couchdb/couch_server_sup.erl b/src/couchdb/couch_server_sup.erl
new file mode 100644
index 00000000..8b9889e7
--- /dev/null
+++ b/src/couchdb/couch_server_sup.erl
@@ -0,0 +1,185 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_server_sup).
+-behaviour(supervisor).
+
+-define(DEFAULT_INI, "couch.ini").
+
+-export([start_link/1,stop/0]).
+
+%% supervisor callbacks
+-export([init/1]).
+
+start_link(IniFilename) ->
+ case whereis(couch_server_sup) of
+ undefined ->
+ start_server(IniFilename);
+ _Else ->
+ {error, already_started}
+ end.
+
+start_server("") ->
+ % no ini file specified, check the command line args
+ IniFile =
+ case init:get_argument(couchini) of
+ {ok, [CmdLineIniFilename]} ->
+ CmdLineIniFilename;
+ _Else ->
+ ?DEFAULT_INI
+ end,
+ start_server(IniFile);
+start_server(InputIniFilename) ->
+
+ case init:get_argument(pidfile) of
+ {ok, [PidFile]} ->
+ case file:write_file(PidFile, os:getpid()) of
+ ok -> ok;
+ Error -> io:format("Failed to write PID file ~s, error: ~p", [PidFile, Error])
+ end;
+ _ -> ok
+ end,
+
+ {ok, Cwd} = file:get_cwd(),
+ IniFilename = couch_util:abs_pathname(InputIniFilename),
+ IniBin =
+ case file:read_file(IniFilename) of
+ {ok, IniBin0} ->
+ IniBin0;
+ {error, enoent} ->
+ Msg = io_lib:format("Couldn't find server configuration file ~s.", [InputIniFilename]),
+ io:format("~s~n", [Msg]),
+ throw({startup_error, Msg})
+ end,
+ {ok, Ini} = couch_util:parse_ini(binary_to_list(IniBin)),
+
+ ConsoleStartupMsg = proplists:get_value({"Couch", "ConsoleStartupMsg"}, Ini, "Apache CouchDB is starting."),
+ LogLevel = list_to_atom(proplists:get_value({"Couch", "LogLevel"}, Ini, "error")),
+ DbRootDir = proplists:get_value({"Couch", "DbRootDir"}, Ini, "."),
+ HttpConfigFile = proplists:get_value({"Couch", "HttpConfigFile"}, Ini, "couch_httpd.conf"),
+ LogFile = proplists:get_value({"Couch", "LogFile"}, Ini, "couchdb.log"),
+ UtilDriverDir = proplists:get_value({"Couch", "UtilDriverDir"}, Ini, ""),
+ UpdateNotifierExes = proplists:get_all_values({"Couch", "DbUpdateNotificationProcess"}, Ini),
+ FtSearchQueryServer = proplists:get_value({"Couch", "FullTextSearchQueryServer"}, Ini, ""),
+ RemoteRestart = list_to_atom(proplists:get_value({"Couch", "AllowRemoteRestart"}, Ini, "undefined")),
+ ServerOptions = [{remote_restart, RemoteRestart}],
+ QueryServers = [{Lang, QueryExe} || {{"Couch Query Servers", Lang}, QueryExe} <- Ini],
+
+ ChildProcesses =
+ [{couch_log,
+ {couch_log, start_link, [LogFile, LogLevel]},
+ permanent,
+ brutal_kill,
+ worker,
+ [couch_server]},
+ {couch_db_update_event,
+ {gen_event, start_link, [{local, couch_db_update}]},
+ permanent,
+ 1000,
+ supervisor,
+ dynamic},
+ {couch_server,
+ {couch_server, sup_start_link, [DbRootDir, ServerOptions]},
+ permanent,
+ brutal_kill,
+ worker,
+ [couch_server]},
+ {couch_util,
+ {couch_util, start_link, [UtilDriverDir]},
+ permanent,
+ brutal_kill,
+ worker,
+ [couch_util]},
+ {couch_query_servers,
+ {couch_query_servers, start_link, [QueryServers]},
+ permanent,
+ brutal_kill,
+ worker,
+ [couch_query_servers]},
+ {couch_view,
+ {couch_view, start_link, [DbRootDir]},
+ permanent,
+ brutal_kill,
+ worker,
+ [couch_view]},
+ {httpd,
+ {httpd, start_link, [HttpConfigFile]},
+ permanent,
+ 1000,
+ supervisor,
+ [httpd]}
+ ] ++
+ lists:map(fun(UpdateNotifierExe) ->
+ {UpdateNotifierExe,
+ {couch_db_update_notifier, start_link, [UpdateNotifierExe]},
+ permanent,
+ 1000,
+ supervisor,
+ [couch_db_update_notifier]}
+ end, UpdateNotifierExes)
+ ++
+ case FtSearchQueryServer of
+ "" ->
+ [];
+ _ ->
+ [{couch_ft_query,
+ {couch_ft_query, start_link, [FtSearchQueryServer]},
+ permanent,
+ 1000,
+ supervisor,
+ [httpd]}]
+ end,
+
+ io:format("couch ~s (LogLevel=~s)~n", [couch_server:get_version(), LogLevel]),
+ io:format("~s~n", [ConsoleStartupMsg]),
+
+ process_flag(trap_exit, true),
+ StartResult = (catch supervisor:start_link(
+ {local, couch_server_sup}, couch_server_sup, ChildProcesses)),
+
+ ConfigInfo = io_lib:format("Config Info ~s:~n\tCurrentWorkingDir=~s~n" ++
+ "\tDbRootDir=~s~n" ++
+ "\tHttpConfigFile=~s~n" ++
+ "\tLogFile=~s~n" ++
+ "\tUtilDriverDir=~s~n" ++
+ "\tDbUpdateNotificationProcesses=~s~n" ++
+ "\tFullTextSearchQueryServer=~s~n" ++
+ "~s",
+ [IniFilename,
+ Cwd,
+ DbRootDir,
+ HttpConfigFile,
+ LogFile,
+ UtilDriverDir,
+ UpdateNotifierExes,
+ FtSearchQueryServer,
+ [lists:flatten(io_lib:format("\t~s=~s~n", [Lang, QueryExe])) || {Lang, QueryExe} <- QueryServers]]),
+ couch_log:debug("~s", [ConfigInfo]),
+
+ case StartResult of
+ {ok,_} ->
+ % only output when startup was successful
+ io:format("Apache CouchDB has started. Time to relax.~n");
+ _ ->
+ % Since we failed startup, unconditionally dump configuration data to console
+ io:format("~s", [ConfigInfo]),
+ ok
+ end,
+ process_flag(trap_exit, false),
+ StartResult.
+
+stop() ->
+ catch exit(whereis(couch_server_sup), normal),
+ couch_log:stop().
+
+init(ChildProcesses) ->
+ {ok, {{one_for_one, 10, 3600}, ChildProcesses}}.
diff --git a/src/couchdb/couch_stream.erl b/src/couchdb/couch_stream.erl
new file mode 100644
index 00000000..d5157b4d
--- /dev/null
+++ b/src/couchdb/couch_stream.erl
@@ -0,0 +1,252 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_stream).
+-behaviour(gen_server).
+
+-export([test/1]).
+-export([open/1, open/2, close/1, read/3, read_term/2, write/2, write_term/2, get_state/1, foldl/5]).
+-export([copy/4]).
+-export([ensure_buffer/2, set_min_buffer/2]).
+-export([init/1, terminate/2, handle_call/3]).
+-export([handle_cast/2,code_change/3,handle_info/2]).
+
+-include("couch_db.hrl").
+
+-define(FILE_POINTER_BYTES, 8).
+-define(FILE_POINTER_BITS, 8*(?FILE_POINTER_BYTES)).
+
+-define(STREAM_OFFSET_BYTES, 4).
+-define(STREAM_OFFSET_BITS, 8*(?STREAM_OFFSET_BYTES)).
+
+-define(HUGE_CHUNK, 1000000000). % Huge chuck size when reading all in one go
+
+-define(DEFAULT_STREAM_CHUNK, 16#00100000). % 1 meg chunks when streaming data
+
+
+-record(write_stream,
+ {fd = 0,
+ current_pos = 0,
+ bytes_remaining = 0,
+ next_alloc = 0,
+ min_alloc = 16#00010000
+ }).
+
+-record(stream,
+ {
+ pid,
+ fd
+ }).
+
+
+%%% Interface functions %%%
+
+open(Fd) ->
+ open(nil, Fd).
+
+open(nil, Fd) ->
+ open({0,0}, Fd);
+open(State, Fd) ->
+ {ok, Pid} = gen_server:start_link(couch_stream, {State, Fd}, []),
+ {ok, #stream{pid = Pid, fd = Fd}}.
+
+close(#stream{pid = Pid, fd = _Fd}) ->
+ gen_server:call(Pid, close).
+
+get_state(#stream{pid = Pid, fd = _Fd}) ->
+ gen_server:call(Pid, get_state).
+
+ensure_buffer(#stream{pid = Pid, fd = _Fd}, Bytes) ->
+ gen_server:call(Pid, {ensure_buffer, Bytes}).
+
+set_min_buffer(#stream{pid = Pid, fd = _Fd}, Bytes) ->
+ gen_server:call(Pid, {set_min_buffer, Bytes}).
+
+read(#stream{pid = _Pid, fd = Fd}, Sp, Num) ->
+ read(Fd, Sp, Num);
+read(Fd, Sp, Num) ->
+ {ok, RevBin, Sp2} = stream_data(Fd, Sp, Num, ?HUGE_CHUNK, fun(Bin, Acc) -> {ok, [Bin | Acc]} end, []),
+ Bin = list_to_binary(lists:reverse(RevBin)),
+ {ok, Bin, Sp2}.
+
+copy(#stream{pid = _Pid, fd = Fd}, Sp, Num, DestStream) ->
+ copy(Fd, Sp, Num, DestStream);
+copy(Fd, Sp, Num, DestStream) ->
+ {ok, NewSp, _Sp2} = stream_data(Fd, Sp, Num, ?HUGE_CHUNK,
+ fun(Bin, AccPointer) ->
+ {ok, NewPointer} = write(Bin, DestStream),
+ if AccPointer == null -> NewPointer; true -> AccPointer end
+ end,
+ null),
+ {ok, NewSp}.
+
+foldl(#stream{pid = _Pid, fd = Fd}, Sp, Num, Fun, Acc) ->
+ foldl(Fd, Sp, Num, Fun, Acc);
+foldl(Fd, Sp, Num, Fun, Acc) ->
+ {ok, _Acc, _Sp} = stream_data(Fd, Sp, Num, ?DEFAULT_STREAM_CHUNK, Fun, Acc).
+
+read_term(#stream{pid = _Pid, fd = Fd}, Sp) ->
+ read_term(Fd, Sp);
+read_term(Fd, Sp) ->
+ {ok, <<TermLen:(?STREAM_OFFSET_BITS)>>, Sp2}
+ = read(Fd, Sp, ?STREAM_OFFSET_BYTES),
+ {ok, Bin, _Sp3} = read(Fd, Sp2, TermLen),
+ {ok, binary_to_term(Bin)}.
+
+write_term(Stream, Term) ->
+ Bin = term_to_binary(Term),
+ Size = size(Bin),
+ Bin2 = <<Size:(?STREAM_OFFSET_BITS), Bin/binary>>,
+ write(Stream, Bin2).
+
+write(#stream{}, <<>>) ->
+ {ok, {0,0}};
+write(#stream{pid = Pid}, Bin) when is_binary(Bin) ->
+ gen_server:call(Pid, {write, Bin}).
+
+
+init({{Pos, BytesRemaining}, Fd}) ->
+ {ok, #write_stream
+ {fd = Fd,
+ current_pos = Pos,
+ bytes_remaining = BytesRemaining
+ }}.
+
+terminate(_Reason, _Stream) ->
+ ok.
+
+handle_call(get_state, _From, Stream) ->
+ #write_stream{current_pos = Pos, bytes_remaining = BytesRemaining} = Stream,
+ {reply, {Pos, BytesRemaining}, Stream};
+handle_call({set_min_buffer, MinBuffer}, _From, Stream) ->
+ {reply, ok, Stream#write_stream{min_alloc = MinBuffer}};
+handle_call({ensure_buffer, BufferSizeRequested}, _From, Stream) ->
+ #write_stream{bytes_remaining = BytesRemainingInCurrentBuffer} = Stream,
+ case BytesRemainingInCurrentBuffer < BufferSizeRequested of
+ true -> NextAlloc = BufferSizeRequested - BytesRemainingInCurrentBuffer;
+ false -> NextAlloc = 0 % enough room in current segment
+ end,
+ {reply, ok, Stream#write_stream{next_alloc = NextAlloc}};
+handle_call({write, Bin}, _From, Stream) ->
+ % ensure init is called first so we can get a pointer to the begining of the binary
+ {ok, Sp, Stream2} = write_data(Stream, Bin),
+ {reply, {ok, Sp}, Stream2};
+handle_call(close, _From, Stream) ->
+ #write_stream{current_pos=Pos, bytes_remaining = BytesRemaining} = Stream,
+ {stop, normal, {ok, {Pos, BytesRemaining}}, Stream}.
+
+handle_cast(_Msg, State) ->
+ {noreply,State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%%% Internal function %%%
+
+stream_data(_Fd, Sp, 0, _MaxChunk, _Fun, Acc) ->
+ {ok, Acc, Sp};
+stream_data(Fd, {Pos, 0}, Num, MaxChunk, Fun, Acc) ->
+ {ok, <<NextPos:(?FILE_POINTER_BITS), NextOffset:(?STREAM_OFFSET_BITS)>>}
+ = couch_file:pread(Fd, Pos, ?FILE_POINTER_BYTES + ?STREAM_OFFSET_BYTES),
+ Sp = {NextPos, NextOffset},
+ % Check NextPos is past current Pos (this is always true in a stream)
+ % Guards against potential infinite loops caused by corruption.
+ case NextPos > Pos of
+ true -> ok;
+ false -> throw({error, stream_corruption})
+ end,
+ stream_data(Fd, Sp, Num, MaxChunk, Fun, Acc);
+stream_data(Fd, {Pos, Offset}, Num, MaxChunk, Fun, Acc) ->
+ ReadAmount = lists:min([MaxChunk, Num, Offset]),
+ {ok, Bin} = couch_file:pread(Fd, Pos, ReadAmount),
+ Sp = {Pos + ReadAmount, Offset - ReadAmount},
+ case Fun(Bin, Acc) of
+ {ok, Acc2} ->
+ stream_data(Fd, Sp, Num - ReadAmount, MaxChunk, Fun, Acc2);
+ {stop, Acc2} ->
+ {ok, Acc2, Sp}
+ end.
+
+write_data(Stream, <<>>) ->
+ {ok, {0,0}, Stream};
+write_data(#write_stream{bytes_remaining=0} = Stream, Bin) ->
+ #write_stream {
+ fd = Fd,
+ current_pos = CurrentPos,
+ next_alloc = NextAlloc,
+ min_alloc = MinAlloc
+ }= Stream,
+
+ NewSize = lists:max([MinAlloc, NextAlloc, size(Bin)]),
+ % no space in the current segment, must alloc a new segment
+ {ok, NewPos} = couch_file:expand(Fd, NewSize + ?FILE_POINTER_BYTES + ?STREAM_OFFSET_BYTES),
+
+ case CurrentPos of
+ 0 ->
+ ok;
+ _ ->
+ ok = couch_file:pwrite(Fd, CurrentPos, <<NewPos:(?FILE_POINTER_BITS), NewSize:(?STREAM_OFFSET_BITS)>>)
+ end,
+ Stream2 = Stream#write_stream{
+ current_pos=NewPos,
+ bytes_remaining=NewSize,
+ next_alloc=0},
+ write_data(Stream2, Bin);
+write_data(#write_stream{fd=Fd, current_pos=Pos, bytes_remaining=BytesRemaining} = Stream, Bin) ->
+ BytesToWrite = lists:min([size(Bin), BytesRemaining]),
+ {WriteBin, Rest} = split_binary(Bin, BytesToWrite),
+ ok = couch_file:pwrite(Fd, Pos, WriteBin),
+ Stream2 = Stream#write_stream{
+ bytes_remaining=BytesRemaining - BytesToWrite,
+ current_pos=Pos + BytesToWrite
+ },
+ {ok, _, Stream3} = write_data(Stream2, Rest),
+ {ok, {Pos, BytesRemaining}, Stream3}.
+
+
+
+%%% Tests %%%
+
+
+test(Term) ->
+ {ok, Fd} = couch_file:open("foo", [write]),
+ {ok, Stream} = open({0,0}, Fd),
+ {ok, Pos} = write_term(Stream, Term),
+ {ok, Pos2} = write_term(Stream, {Term, Term}),
+ close(Stream),
+ couch_file:close(Fd),
+ {ok, Fd2} = couch_file:open("foo", [read, write]),
+ {ok, Stream2} = open({0,0}, Fd2),
+ {ok, Term1} = read_term(Fd2, Pos),
+ io:format("Term1: ~w ~n",[Term1]),
+ {ok, Term2} = read_term(Fd2, Pos2),
+ io:format("Term2: ~w ~n",[Term2]),
+ {ok, PointerList} = deep_write_test(Stream2, Term, 1000, []),
+ deep_read_test(Fd2, PointerList),
+ close(Stream2),
+ couch_file:close(Fd2).
+
+deep_read_test(_Fd, []) ->
+ ok;
+deep_read_test(Fd, [Pointer | RestPointerList]) ->
+ {ok, _Term} = read_term(Fd, Pointer),
+ deep_read_test(Fd, RestPointerList).
+
+deep_write_test(_Stream, _Term, 0, PointerList) ->
+ {ok, PointerList};
+deep_write_test(Stream, Term, N, PointerList) ->
+ WriteList = lists:duplicate(random:uniform(N), Term),
+ {ok, Pointer} = write_term(Stream, WriteList),
+ deep_write_test(Stream, Term, N-1, [Pointer | PointerList]).
diff --git a/src/couchdb/couch_util.erl b/src/couchdb/couch_util.erl
new file mode 100644
index 00000000..42845fe0
--- /dev/null
+++ b/src/couchdb/couch_util.erl
@@ -0,0 +1,316 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_util).
+-behaviour(gen_server).
+
+-export([start_link/0,start_link/1]).
+-export([parse_ini/1]).
+-export([new_uuid/0, rand32/0, implode/2, collate/2, collate/3]).
+-export([abs_pathname/1,abs_pathname/2, trim/1, ascii_lower/1, test/0]).
+-export([encodeBase64/1, decodeBase64/1]).
+
+-export([init/1, terminate/2, handle_call/3]).
+-export([handle_cast/2,code_change/3,handle_info/2]).
+
+
+start_link() ->
+ start_link("").
+
+start_link("") ->
+ start_link(filename:join(code:priv_dir(couch), "lib"));
+start_link(LibDir) ->
+ case erl_ddll:load_driver(LibDir, "couch_erl_driver") of
+ ok -> ok;
+ {error, already_loaded} -> ok;
+ {error, ErrorDesc} -> exit({error, ErrorDesc})
+ end,
+ gen_server:start_link({local, couch_util}, couch_util, [], []).
+
+
+new_uuid() ->
+ gen_server:call(couch_util, new_uuid).
+
+% returns a random integer
+rand32() ->
+ gen_server:call(couch_util, rand32).
+
+% given a pathname "../foo/bar/" it gives back the fully qualified
+% absolute pathname.
+abs_pathname(" " ++ Filename) ->
+ % strip leading whitspace
+ abs_pathname(Filename);
+abs_pathname([$/ |_]=Filename) ->
+ Filename;
+abs_pathname(Filename) ->
+ {ok, Cwd} = file:get_cwd(),
+ {Filename2, Args} = separate_cmd_args(Filename, ""),
+ abs_pathname(Filename2, Cwd) ++ Args.
+
+abs_pathname(Filename, Dir) ->
+ Name = filename:absname(Filename, Dir ++ "/"),
+ OutFilename = filename:join(fix_path_list(filename:split(Name), [])),
+ % If the filename is a dir (last char slash, put back end slash
+ case string:right(Filename,1) of
+ "/" ->
+ OutFilename ++ "/";
+ "\\" ->
+ OutFilename ++ "/";
+ _Else->
+ OutFilename
+ end.
+
+% if this as an executable with arguments, seperate out the arguments
+% ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"}
+separate_cmd_args("", CmdAcc) ->
+ {lists:reverse(CmdAcc), ""};
+separate_cmd_args("\\ " ++ Rest, CmdAcc) -> % handle skipped value
+ separate_cmd_args(Rest, " \\" ++ CmdAcc);
+separate_cmd_args(" " ++ Rest, CmdAcc) ->
+ {lists:reverse(CmdAcc), " " ++ Rest};
+separate_cmd_args([Char|Rest], CmdAcc) ->
+ separate_cmd_args(Rest, [Char | CmdAcc]).
+
+% lowercases string bytes that are the ascii characters A-Z.
+% All other characters/bytes are ignored.
+ascii_lower(String) ->
+ ascii_lower(String, []).
+
+ascii_lower([], Acc) ->
+ lists:reverse(Acc);
+ascii_lower([Char | RestString], Acc) when Char >= $A, Char =< $B ->
+ ascii_lower(RestString, [Char + ($a-$A) | Acc]);
+ascii_lower([Char | RestString], Acc)->
+ ascii_lower(RestString, [Char | Acc]).
+
+% Is a character whitespace?
+is_whitespace($\s)-> true;
+is_whitespace($\t)-> true;
+is_whitespace($\n)-> true;
+is_whitespace($\r)-> true;
+is_whitespace(_Else) -> false.
+
+
+% removes leading and trailing whitespace from a string
+trim(String) ->
+ String2 = lists:dropwhile(fun is_whitespace/1, String),
+ lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))).
+
+% takes a heirarchical list of dirs and removes the dots ".", double dots
+% ".." and the corresponding parent dirs.
+fix_path_list([], Acc) ->
+ lists:reverse(Acc);
+fix_path_list([".."|Rest], [_PrevAcc|RestAcc]) ->
+ fix_path_list(Rest, RestAcc);
+fix_path_list(["."|Rest], Acc) ->
+ fix_path_list(Rest, Acc);
+fix_path_list([Dir | Rest], Acc) ->
+ fix_path_list(Rest, [Dir | Acc]).
+
+
+implode(List, Sep) ->
+ implode(List, Sep, []).
+
+implode([], _Sep, Acc) ->
+ lists:flatten(lists:reverse(Acc));
+implode([H], Sep, Acc) ->
+ implode([], Sep, [H|Acc]);
+implode([H|T], Sep, Acc) ->
+ implode(T, Sep, [Sep,H|Acc]).
+
+
+% This is a simple ini parser. it just converts the string
+% contents of a file like this:
+%
+%; comments are ignored
+%;commentedoutvariable=foo
+%this is line that gets ignored because it has no equals sign
+%[this line gets ignored because it starts with a bracket but doesn't end with one
+%bloodtype=Ragu
+%[Some Section]
+%timeout=30
+%Default=zuh ; another comment (leading space or tab before a semi is necessary to be a comment if not at beginning of line)
+%[Another Section]
+%key with spaces=a value with stuff; and no comment
+%oops="it doesn't qet quoted strings with semis quite right ; it thinks it's part comment"
+%
+%And converts it into this:
+%[{{"","bloodtype"},"Ragu"},
+% {{"Some Section","timeout"},"30"},
+% {{"Some section","Default"}, "zuh"},
+% {{"Another Section", "key with spaces"}, "a value with stuff; and no comment"},
+% {{"Another Section", "oops"}, "\"it doesn't qet quoted strings with semis quite right"}]
+%
+
+parse_ini(FileContents) ->
+ {ok, Lines} = regexp:split(FileContents, "\r\n|\n|\r|\032"),
+ {_, ParsedIniValues} =
+ lists:foldl(fun(Line, {AccSectionName, AccValues}) ->
+ case string:strip(Line) of
+ "[" ++ Rest ->
+ case regexp:split(Rest, "\\]") of
+ {ok, [NewSectionName, ""]} ->
+ {NewSectionName, AccValues};
+ _Else -> % end bracket not at end, ignore this line
+ {AccSectionName, AccValues}
+ end;
+ ";" ++ _Comment ->
+ {AccSectionName, AccValues};
+ Line2 ->
+ case regexp:split(Line2, "=") of
+ {ok, [_SingleElement]} -> % no "=" found, ignore this line
+ {AccSectionName, AccValues};
+ {ok, [""|_LineValues]} -> % line begins with "=", ignore
+ {AccSectionName, AccValues};
+ {ok, [ValueName|LineValues]} -> % yeehaw, got a line!
+ RemainingLine = implode(LineValues, "="),
+ {ok, [LineValue | _Rest]} = regexp:split(RemainingLine, " ;|\t;"), % removes comments
+ {AccSectionName, [{{AccSectionName, ValueName}, LineValue} | AccValues]}
+ end
+ end
+ end, {"", []}, Lines),
+ {ok, lists:reverse(ParsedIniValues)}.
+
+init([]) ->
+ {A,B,C} = erlang:now(),
+ random:seed(A,B,C),
+ {ok, dummy_server}.
+
+terminate(_Reason, _Server) ->
+ ok.
+
+handle_call(new_uuid, _From, Server) ->
+ {reply, new_uuid_int(), Server};
+handle_call(rand32, _From, Server) ->
+ {reply, rand32_int(), Server}.
+
+handle_cast(_Msg, State) ->
+ {noreply,State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+
+new_uuid_int() ->
+ % eventually make this a C callout for a real guid (collisions are far less likely
+ % when using a proper generation function). For now we just fake it.
+ Num1 = random:uniform(16#FFFFFFFF + 1) - 1,
+ Num2 = random:uniform(16#FFFFFFFF + 1) - 1,
+ Num3 = random:uniform(16#FFFFFFFF + 1) - 1,
+ Num4 = random:uniform(16#FFFFFFFF + 1) - 1,
+ lists:flatten(io_lib:format("~8.16.0B~8.16.0B~8.16.0B~8.16.0B", [Num1, Num2, Num3, Num4])).
+
+
+
+rand32_int() ->
+ random:uniform(16#FFFFFFFF + 1) - 1.
+
+drv_port() ->
+ case get(couch_drv_port) of
+ undefined ->
+ Port = open_port({spawn, "couch_erl_driver"}, []),
+ put(couch_drv_port, Port),
+ Port;
+ Port ->
+ Port
+ end.
+
+collate(A, B) ->
+ collate(A, B, []).
+
+collate(A, B, Options) when is_list(A), is_list(B) ->
+ Operation =
+ case lists:member(nocase, Options) of
+ true -> 1; % Case insensitive
+ false -> 0 % Case sensitive
+ end,
+ Port = drv_port(),
+ LenA = length(A),
+ LenB = length(B),
+ Bin = list_to_binary([<<LenA:32/native>>, A, <<LenB:32/native>>, B]),
+ case erlang:port_control(Port, Operation, Bin) of
+ [0] -> -1;
+ [1] -> 1;
+ [2] -> 0
+ end.
+
+
+
+
+%%% Purpose : Base 64 encoding and decoding.
+%%% Copied from ssl_base_64 to avoid using the
+%%% erlang ssl library
+
+-define(st(X,A), ((X-A+256) div 256)).
+-define(CHARS, 64).
+
+%% A PEM encoding consists of characters A-Z, a-z, 0-9, +, / and
+%% =. Each character encodes a 6 bits value from 0 to 63 (A = 0, / =
+%% 63); = is a padding character.
+%%
+
+%%
+%% encode64(Bytes|Binary) -> Chars
+%%
+%% Take 3 bytes a time (3 x 8 = 24 bits), and make 4 characters out of
+%% them (4 x 6 = 24 bits).
+%%
+encodeBase64(Bs) when list(Bs) ->
+ encodeBase64(list_to_binary(Bs));
+encodeBase64(<<B:3/binary, Bs/binary>>) ->
+ <<C1:6, C2:6, C3:6, C4:6>> = B,
+ [enc(C1), enc(C2), enc(C3), enc(C4)| encodeBase64(Bs)];
+encodeBase64(<<B:2/binary>>) ->
+ <<C1:6, C2:6, C3:6, _:6>> = <<B/binary, 0>>,
+ [enc(C1), enc(C2), enc(C3), $=];
+encodeBase64(<<B:1/binary>>) ->
+ <<C1:6, C2:6, _:12>> = <<B/binary, 0, 0>>,
+ [enc(C1), enc(C2), $=, $=];
+encodeBase64(<<>>) ->
+ [].
+
+%%
+%% decodeBase64(Chars) -> Binary
+%%
+decodeBase64(Cs) ->
+ list_to_binary(decode1(Cs)).
+
+decode1([C1, C2, $=, $=]) ->
+ <<B1, _:16>> = <<(dec(C1)):6, (dec(C2)):6, 0:12>>,
+ [B1];
+decode1([C1, C2, C3, $=]) ->
+ <<B1, B2, _:8>> = <<(dec(C1)):6, (dec(C2)):6, (dec(C3)):6, (dec(0)):6>>,
+ [B1, B2];
+decode1([C1, C2, C3, C4| Cs]) ->
+ Bin = <<(dec(C1)):6, (dec(C2)):6, (dec(C3)):6, (dec(C4)):6>>,
+ [Bin| decode1(Cs)];
+decode1([]) ->
+ [].
+
+%% enc/1 and dec/1
+%%
+%% Mapping: 0-25 -> A-Z, 26-51 -> a-z, 52-61 -> 0-9, 62 -> +, 63 -> /
+%%
+enc(C) ->
+ 65 + C + 6*?st(C,26) - 75*?st(C,52) -15*?st(C,62) + 3*?st(C,63).
+
+dec(C) ->
+ 62*?st(C,43) + ?st(C,47) + (C-59)*?st(C,48) - 69*?st(C,65) - 6*?st(C,97).
+
+
+
+test() ->
+ start_link("debug"),
+ collate("a","b",[]).
diff --git a/src/couchdb/couch_view.erl b/src/couchdb/couch_view.erl
new file mode 100644
index 00000000..612eb5fd
--- /dev/null
+++ b/src/couchdb/couch_view.erl
@@ -0,0 +1,616 @@
+% Copyright 2007, 2008 Damien Katz <damien_katz@yahoo.com>
+%
+% Licensed under the Apache License, Version 2.0 (the "License");
+% you may not use this file except in compliance with the License.
+% You may obtain a copy of the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS,
+% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+% See the License for the specific language governing permissions and
+% limitations under the License.
+
+-module(couch_view).
+-behaviour(gen_server).
+
+-export([start_link/1,fold/4,fold/5,less_json/2, start_update_loop/3, start_temp_update_loop/4]).
+-export([init/1,terminate/2,handle_call/3,handle_cast/2,handle_info/2,code_change/3]).
+
+-include("couch_db.hrl").
+
+% arbitrarily chosen amount of memory to use before flushing to disk
+-define(FLUSH_MAX_MEM, 10000000).
+
+-record(group,
+ {db,
+ fd,
+ name,
+ def_lang,
+ views,
+ id_btree,
+ current_seq,
+ query_server=nil
+ }).
+
+-record(view,
+ {id_num,
+ name,
+ btree,
+ def
+ }).
+
+-record(server,
+ {root_dir
+ }).
+
+start_link(RootDir) ->
+ gen_server:start_link({local, couch_view}, couch_view, RootDir, []).
+
+
+
+get_temp_updater(DbName, Type, Src) ->
+ {ok, Pid} = gen_server:call(couch_view, {start_temp_updater, DbName, Type, Src}),
+ Pid.
+
+get_updater(DbName, GroupId) ->
+ {ok, Pid} = gen_server:call(couch_view, {start_updater, DbName, GroupId}),
+ Pid.
+
+get_updated_group(Pid) ->
+ Mref = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', Mref, _, _, Reason} ->
+ throw(Reason)
+ after 0 ->
+ Pid ! {self(), get_updated},
+ receive
+ {Pid, Response} ->
+ erlang:demonitor(Mref),
+ receive
+ {'DOWN', Mref, _, _, _} ->
+ Response
+ after 0 ->
+ Response
+ end;
+ {'DOWN', Mref, _, _, Reason} ->
+ throw(Reason)
+ end
+ end.
+
+fold(ViewInfo, Dir, Fun, Acc) ->
+ fold(ViewInfo, nil, Dir, Fun, Acc).
+
+fold({temp, DbName, Type, Src}, StartKey, Dir, Fun, Acc) ->
+ {ok, #group{views=[View]}} = get_updated_group(get_temp_updater(DbName, Type, Src)),
+ fold_view(View#view.btree, StartKey, Dir, Fun, Acc);
+fold({DbName, GroupId, ViewName}, StartKey, Dir, Fun, Acc) ->
+ {ok, #group{views=Views}} = get_updated_group(get_updater(DbName, GroupId)),
+ Btree = get_view_btree(Views, ViewName),
+ fold_view(Btree, StartKey, Dir, Fun, Acc).
+
+fold_view(Btree, StartKey, Dir, Fun, Acc) ->
+ TotalRowCount = couch_btree:row_count(Btree),
+ WrapperFun = fun({{Key, DocId}, Value}, Offset, WrapperAcc) ->
+ Fun(DocId, Key, Value, Offset, TotalRowCount, WrapperAcc)
+ end,
+ {ok, AccResult} = couch_btree:fold(Btree, StartKey, Dir, WrapperFun, Acc),
+ {ok, TotalRowCount, AccResult}.
+
+
+get_view_btree([], _ViewName) ->
+ throw({not_found, missing_named_view});
+get_view_btree([View | _RestViews], ViewName) when View#view.name == ViewName ->
+ View#view.btree;
+get_view_btree([_View | RestViews], ViewName) ->
+ get_view_btree(RestViews, ViewName).
+
+
+init(RootDir) ->
+ UpdateNotifierFun =
+ fun({deleted, DbName}) ->
+ gen_server:cast(couch_view, {reset_indexes, DbName});
+ ({created, DbName}) ->
+ gen_server:cast(couch_view, {reset_indexes, DbName});
+ (_Else) ->
+ ok
+ end,
+ couch_db_update_notifier:start_link(UpdateNotifierFun),
+ ets:new(couch_views_by_db, [bag, private, named_table]),
+ ets:new(couch_views_by_name, [set, protected, named_table]),
+ ets:new(couch_views_by_updater, [set, private, named_table]),
+ ets:new(couch_views_temp_fd_by_db, [set, protected, named_table]),
+ process_flag(trap_exit, true),
+ {ok, #server{root_dir=RootDir}}.
+
+terminate(_Reason, _) ->
+ catch ets:delete(couch_views_by_name),
+ catch ets:delete(couch_views_by_updater),
+ catch ets:delete(couch_views_by_db),
+ catch ets:delete(couch_views_temp_fd_by_db).
+
+
+handle_call({start_temp_updater, DbName, Lang, Query}, _From, #server{root_dir=Root}=Server) ->
+ <<SigInt:128/integer>> = erlang:md5(Lang ++ Query),
+ Name = lists:flatten(io_lib:format("_temp_~.36B",[SigInt])),
+ Pid =
+ case ets:lookup(couch_views_by_name, {DbName, Name}) of
+ [] ->
+ case ets:lookup(couch_views_temp_fd_by_db, DbName) of
+ [] ->
+ FileName = Root ++ "/." ++ DbName ++ "_temp",
+ {ok, Fd} = couch_file:open(FileName, [create, overwrite]),
+ Count = 0;
+ [{_, Fd, Count}] ->
+ ok
+ end,
+ couch_log:debug("Spawning new temp update process for db ~s.", [DbName]),
+ NewPid = spawn_link(couch_view, start_temp_update_loop, [DbName, Fd, Lang, Query]),
+ true = ets:insert(couch_views_temp_fd_by_db, {DbName, Fd, Count + 1}),
+ add_to_ets(NewPid, DbName, Name),
+ NewPid;
+ [{_, ExistingPid0}] ->
+ ExistingPid0
+ end,
+ {reply, {ok, Pid}, Server};
+handle_call({start_updater, DbName, GroupId}, _From, #server{root_dir=Root}=Server) ->
+ Pid =
+ case ets:lookup(couch_views_by_name, {DbName, GroupId}) of
+ [] ->
+ couch_log:debug("Spawning new update process for view group ~s in database ~s.", [GroupId, DbName]),
+ NewPid = spawn_link(couch_view, start_update_loop, [Root, DbName, GroupId]),
+ add_to_ets(NewPid, DbName, GroupId),
+ NewPid;
+ [{_, ExistingPid0}] ->
+ ExistingPid0
+ end,
+ {reply, {ok, Pid}, Server}.
+
+handle_cast({reset_indexes, DbName}, #server{root_dir=Root}=Server) ->
+ % shutdown all the updaters
+ Names = ets:lookup(couch_views_by_db, DbName),
+ lists:foreach(
+ fun({_DbName, GroupId}) ->
+ couch_log:debug("Killing update process for view group ~s. in database ~s.", [GroupId, DbName]),
+ [{_, Pid}] = ets:lookup(couch_views_by_name, {DbName, GroupId}),
+ exit(Pid, kill),
+ receive {'EXIT', Pid, _} ->
+ delete_from_ets(Pid, DbName, GroupId)
+ end
+ end, Names),
+ delete_index_dir(Root, DbName),
+ file:delete(Root ++ "/." ++ DbName ++ "_temp"),
+ {noreply, Server}.
+
+handle_info({'EXIT', FromPid, Reason}, #server{root_dir=RootDir}=Server) ->
+ case ets:lookup(couch_views_by_updater, FromPid) of
+ [] -> % non-updater linked process must have died, we propagate the error
+ exit(Reason);
+ [{_, {DbName, "_temp_" ++ _ = GroupId}}] ->
+ delete_from_ets(FromPid, DbName, GroupId),
+ [{_, Fd, Count}] = ets:lookup(couch_views_temp_fd_by_db, DbName),
+ case Count of
+ 1 -> % Last ref
+ couch_file:close(Fd),
+ file:delete(RootDir ++ "/." ++ DbName ++ "_temp"),
+ true = ets:delete(couch_views_temp_fd_by_db, DbName);
+ _ ->
+ true = ets:insert(couch_views_temp_fd_by_db, {DbName, Fd, Count - 1})
+ end;
+ [{_, {DbName, GroupId}}] ->
+ delete_from_ets(FromPid, DbName, GroupId)
+ end,
+ {noreply, Server}.
+
+add_to_ets(Pid, DbName, GroupId) ->
+ true = ets:insert(couch_views_by_updater, {Pid, {DbName, GroupId}}),
+ true = ets:insert(couch_views_by_name, {{DbName, GroupId}, Pid}),
+ true = ets:insert(couch_views_by_db, {DbName, GroupId}).
+
+delete_from_ets(Pid, DbName, GroupId) ->
+ true = ets:delete(couch_views_by_updater, Pid),
+ true = ets:delete(couch_views_by_name, {DbName, GroupId}),
+ true = ets:delete_object(couch_views_by_db, {DbName, GroupId}).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+start_update_loop(RootDir, DbName, GroupId) ->
+ % wait for a notify request before doing anything. This way, we can just
+ % exit and any exits will be noticed by the callers.
+ start_update_loop(RootDir, DbName, GroupId, get_notify_pids(1000)).
+
+
+start_temp_update_loop(DbName, Fd, Lang, Query) ->
+ NotifyPids = get_notify_pids(1000),
+ case couch_server:open(DbName) of
+ {ok, Db} ->
+ View = #view{name="_temp", id_num=0, btree=nil, def=Query},
+ Group = #group{name="_temp",
+ db=Db,
+ views=[View],
+ current_seq=0,
+ def_lang=Lang,
+ id_btree=nil},
+ Group2 = disk_group_to_mem(Fd, Group),
+ temp_update_loop(Group2, NotifyPids);
+ Else ->
+ exit(Else)
+ end.
+
+temp_update_loop(Group, NotifyPids) ->
+ {ok, Group2} = update_group(Group),
+ [Pid ! {self(), {ok, Group2}} || Pid <- NotifyPids],
+ garbage_collect(),
+ temp_update_loop(Group2, get_notify_pids(100000)).
+
+start_update_loop(RootDir, DbName, GroupId, NotifyPids) ->
+ {Db, DefLang, Defs} =
+ case couch_server:open(DbName) of
+ {ok, Db0} ->
+ case couch_db:open_doc(Db0, GroupId) of
+ {ok, Doc} ->
+ case couch_doc:get_view_functions(Doc) of
+ none ->
+ delete_index_file(RootDir, DbName, GroupId),
+ exit({not_found, no_views_found});
+ {DefLang0, Defs0} ->
+ {Db0, DefLang0, Defs0}
+ end;
+ Else ->
+ delete_index_file(RootDir, DbName, GroupId),
+ exit(Else)
+ end;
+ Else ->
+ delete_index_file(RootDir, DbName, GroupId),
+ exit(Else)
+ end,
+ Group = open_index_file(RootDir, DbName, GroupId, DefLang, Defs),
+
+ try update_loop(Group#group{db=Db}, NotifyPids) of
+ _ -> ok
+ catch
+ restart ->
+ couch_file:close(Group#group.fd),
+ start_update_loop(RootDir, DbName, GroupId, NotifyPids ++ get_notify_pids())
+ end.
+
+update_loop(#group{fd=Fd}=Group, NotifyPids) ->
+ {ok, Group2} = update_group(Group),
+ ok = couch_file:write_header(Fd, <<$r, $c, $k, 0>>, mem_group_to_disk(Group2)),
+ [Pid ! {self(), {ok, Group2}} || Pid <- NotifyPids],
+ garbage_collect(),
+ update_loop(Group2).
+
+update_loop(Group) ->
+ update_loop(Group, get_notify_pids()).
+
+% wait for the first request to come in.
+get_notify_pids(Wait) ->
+ receive
+ {Pid, get_updated} ->
+ [Pid | get_notify_pids()]
+ after Wait ->
+ exit(wait_timeout)
+ end.
+% then keep getting all available and return.
+get_notify_pids() ->
+ receive
+ {Pid, get_updated} ->
+ [Pid | get_notify_pids()]
+ after 0 ->
+ []
+ end.
+
+update_group(#group{db=Db,current_seq=CurrentSeq, views=Views}=Group) ->
+ ViewEmptyKVs = [{View, []} || View <- Views],
+ % compute on all docs modified since we last computed.
+ {ok, {UncomputedDocs, Group2, ViewKVsToAdd, DocIdViewIdKeys, NewSeq}}
+ = couch_db:enum_docs_since(
+ Db,
+ CurrentSeq,
+ fun(DocInfo, _, Acc) -> process_doc(Db, DocInfo, Acc) end,
+ {[], Group, ViewEmptyKVs, [], CurrentSeq}
+ ),
+
+ {Group3, Results} = view_compute(Group2, UncomputedDocs),
+ {ViewKVsToAdd2, DocIdViewIdKeys2} = view_insert_query_results(UncomputedDocs, Results, ViewKVsToAdd, DocIdViewIdKeys),
+ couch_query_servers:stop_doc_map(Group3#group.query_server),
+ if CurrentSeq /= NewSeq ->
+ {ok, Group4} = write_changes(Group3, ViewKVsToAdd2, DocIdViewIdKeys2, NewSeq),
+ {ok, Group4#group{query_server=nil}};
+ true ->
+ {ok, Group3#group{query_server=nil}}
+ end.
+
+delete_index_dir(RootDir, DbName) ->
+ nuke_dir(RootDir ++ "/." ++ DbName ++ "_design").
+
+nuke_dir(Dir) ->
+ case file:list_dir(Dir) of
+ {error, enoent} -> ok; % doesn't exist
+ {ok, Files} ->
+ lists:foreach(
+ fun(File)->
+ Full = Dir ++ "/" ++ File,
+ case file:delete(Full) of
+ ok -> ok;
+ {error, eperm} ->
+ ok = nuke_dir(Full)
+ end
+ end,
+ Files),
+ ok = file:del_dir(Dir)
+ end.
+
+delete_index_file(RootDir, DbName, GroupId) ->
+ file:delete(RootDir ++ "/." ++ DbName ++ GroupId ++ ".view").
+
+open_index_file(RootDir, DbName, GroupId, ViewLang, ViewDefs) ->
+ FileName = RootDir ++ "/." ++ DbName ++ GroupId ++".view",
+ case couch_file:open(FileName) of
+ {ok, Fd} ->
+ case couch_file:read_header(Fd, <<$r, $c, $k, 0>>) of
+ {ok, #group{views=Views}=Group} ->
+ % validate all the view definitions in the index are correct.
+ case same_view_def(Views, ViewDefs) of
+ true -> disk_group_to_mem(Fd, Group);
+ false -> reset_header(GroupId, Fd, ViewLang, ViewDefs)
+ end;
+ _ ->
+ reset_header(GroupId, Fd, ViewLang, ViewDefs)
+ end;
+ _ ->
+ case couch_file:open(FileName, [create]) of
+ {ok, Fd} ->
+ reset_header(GroupId, Fd, ViewLang, ViewDefs);
+ Error ->
+ throw(Error)
+ end
+ end.
+
+same_view_def([], []) ->
+ true;
+same_view_def(DiskViews, ViewDefs) when DiskViews == [] orelse ViewDefs == []->
+ false;
+same_view_def([#view{name=DiskName,def=DiskDef}|RestViews], [{Name, Def}|RestDefs]) ->
+ if DiskName == Name andalso DiskDef == Def ->
+ same_view_def(RestViews, RestDefs);
+ true ->
+ false
+ end.
+
+% Given a disk ready group structure, return an initialized, in-memory version.
+disk_group_to_mem(Fd, #group{id_btree=IdState,views=Views}=Group) ->
+ {ok, IdBtree} = couch_btree:open(IdState, Fd),
+ Views2 = lists:map(
+ fun(#view{btree=BtreeState}=View) ->
+ {ok, Btree} = couch_btree:open(BtreeState, Fd, [{less, fun less_json/2}]),
+ View#view{btree=Btree}
+ end,
+ Views),
+ Group#group{fd=Fd, id_btree=IdBtree, views=Views2}.
+
+% Given an initialized, in-memory group structure, return a disk ready version.
+mem_group_to_disk(#group{id_btree=IdBtree,views=Views}=Group) ->
+ Views2 = lists:map(
+ fun(#view{btree=Btree}=View) ->
+ State = couch_btree:get_state(Btree),
+ View#view{btree=State}
+ end,
+ Views),
+ Group#group{fd=nil, id_btree=couch_btree:get_state(IdBtree), views=Views2}.
+
+reset_header(GroupId, Fd, DefLanguage, NamedViews) ->
+ couch_file:truncate(Fd, 0),
+ {Views, _N} = lists:mapfoldl(
+ fun({Name, Definiton}, N) ->
+ {#view{name=Name, id_num=N, btree=nil, def=Definiton}, N+1}
+ end,
+ 0, NamedViews),
+ Group = #group{name=GroupId,
+ fd=Fd,
+ views=Views,
+ current_seq=0,
+ def_lang=DefLanguage,
+ id_btree=nil},
+ ok = couch_file:write_header(Fd, <<$r, $c, $k, 0>>, Group),
+ disk_group_to_mem(Fd, Group).
+
+
+
+less_json(A, B) ->
+ TypeA = type_sort(A),
+ TypeB = type_sort(B),
+ if
+ TypeA == TypeB ->
+ less_same_type(A,B);
+ true ->
+ TypeA < TypeB
+ end.
+
+type_sort(V) when is_atom(V) -> 0;
+type_sort(V) when is_integer(V) -> 1;
+type_sort(V) when is_float(V) -> 1;
+type_sort(V) when is_list(V) -> 2;
+type_sort({obj, _}) -> 4; % must come before tuple test below
+type_sort(V) when is_tuple(V) -> 3;
+type_sort(V) when is_binary(V) -> 5.
+
+atom_sort(nil) -> 0;
+atom_sort(null) -> 1;
+atom_sort(false) -> 2;
+atom_sort(true) -> 3.
+
+less_same_type(A,B) when is_atom(A) ->
+ atom_sort(A) < atom_sort(B);
+less_same_type(A,B) when is_list(A) ->
+ couch_util:collate(A, B) < 0;
+less_same_type({obj, AProps}, {obj, BProps}) ->
+ less_props(AProps, BProps);
+less_same_type(A, B) when is_tuple(A) ->
+ less_list(tuple_to_list(A),tuple_to_list(B));
+less_same_type(A, B) ->
+ A < B.
+
+ensure_list(V) when is_list(V) -> V;
+ensure_list(V) when is_atom(V) -> atom_to_list(V).
+
+less_props([], [_|_]) ->
+ true;
+less_props(_, []) ->
+ false;
+less_props([{AKey, AValue}|RestA], [{BKey, BValue}|RestB]) ->
+ case couch_util:collate(ensure_list(AKey), ensure_list(BKey)) of
+ -1 -> true;
+ 1 -> false;
+ 0 ->
+ case less_json(AValue, BValue) of
+ true -> true;
+ false ->
+ case less_json(BValue, AValue) of
+ true -> false;
+ false ->
+ less_props(RestA, RestB)
+ end
+ end
+ end.
+
+less_list([], [_|_]) ->
+ true;
+less_list(_, []) ->
+ false;
+less_list([A|RestA], [B|RestB]) ->
+ case less_json(A,B) of
+ true -> true;
+ false ->
+ case less_json(B,A) of
+ true -> false;
+ false ->
+ less_list(RestA, RestB)
+ end
+ end.
+
+process_doc(Db, DocInfo, {Docs, #group{name=GroupId}=Group, ViewKVs, DocIdViewIdKeys, _LastSeq}) ->
+ % This fun computes once for each document
+ #doc_info{id=DocId, update_seq=Seq, deleted=Deleted} = DocInfo,
+ case DocId of
+ GroupId ->
+ % uh oh. this is the design doc with our definitions. See if
+ % anything in the definition changed.
+ case couch_db:open_doc(Db, DocInfo) of
+ {ok, Doc} ->
+ case couch_doc:get_view_functions(Doc) of
+ none ->
+ throw(restart);
+ {DefLang, NewDefs} ->
+ case Group#group.def_lang == DefLang andalso same_view_def(Group#group.views, NewDefs) of
+ true ->
+ % nothing changed, keeping on computing
+ {ok, {Docs, Group, ViewKVs, DocIdViewIdKeys, Seq}};
+ false ->
+ throw(restart)
+ end
+ end;
+ {not_found, deleted} ->
+ throw(restart)
+ end;
+ ?DESIGN_DOC_PREFIX ++ _ -> % we skip design docs
+ {ok, {Docs, Group, ViewKVs, DocIdViewIdKeys, Seq}};
+ _ ->
+ {Docs2, DocIdViewIdKeys2} =
+ if Deleted ->
+ {Docs, [{DocId, []} | DocIdViewIdKeys]};
+ true ->
+ {ok, Doc} = couch_db:open_doc(Db, DocInfo, [conflicts, deleted_conflicts]),
+ {[Doc | Docs], DocIdViewIdKeys}
+ end,
+ case process_info(self(), memory) of
+ {memory, Mem} when Mem > ?FLUSH_MAX_MEM ->
+ {Group1, Results} = view_compute(Group, Docs2),
+ {ViewKVs3, DocIdViewIdKeys3} = view_insert_query_results(Docs2, Results, ViewKVs, DocIdViewIdKeys2),
+ {ok, Group2} = write_changes(Group1, ViewKVs3, DocIdViewIdKeys3, Seq),
+ garbage_collect(),
+ ViewEmptyKeyValues = [{View, []} || View <- Group2#group.views],
+ {ok, {[], Group2, ViewEmptyKeyValues, [], Seq}};
+ _Else ->
+ {ok, {Docs2, Group, ViewKVs, DocIdViewIdKeys2, Seq}}
+ end
+ end.
+
+view_insert_query_results([], [], ViewKVs, DocIdViewIdKeysAcc) ->
+ {ViewKVs, DocIdViewIdKeysAcc};
+view_insert_query_results([Doc|RestDocs], [QueryResults | RestResults], ViewKVs, DocIdViewIdKeysAcc) ->
+ {NewViewKVs, NewViewIdKeys} = view_insert_doc_query_results(Doc, QueryResults, ViewKVs, [], []),
+ NewDocIdViewIdKeys = [{Doc#doc.id, NewViewIdKeys} | DocIdViewIdKeysAcc],
+ view_insert_query_results(RestDocs, RestResults, NewViewKVs, NewDocIdViewIdKeys).
+
+
+view_insert_doc_query_results(_Doc, [], [], ViewKVsAcc, ViewIdKeysAcc) ->
+ {lists:reverse(ViewKVsAcc), lists:reverse(ViewIdKeysAcc)};
+view_insert_doc_query_results(#doc{id=DocId}=Doc, [ResultKVs|RestResults], [{View, KVs}|RestViewKVs], ViewKVsAcc, ViewIdKeysAcc) ->
+ NewKVs = [{{Key, DocId}, Value} || {Key, Value} <- ResultKVs],
+ NewViewIdKeys = [{View#view.id_num, Key} || {Key, _Value} <- ResultKVs],
+ NewViewKVsAcc = [{View, NewKVs ++ KVs} | ViewKVsAcc],
+ NewViewIdKeysAcc = NewViewIdKeys ++ ViewIdKeysAcc,
+ view_insert_doc_query_results(Doc, RestResults, RestViewKVs, NewViewKVsAcc, NewViewIdKeysAcc).
+
+view_compute(Group, []) ->
+ {Group, []};
+view_compute(#group{def_lang=DefLang, query_server=QueryServerIn}=Group, Docs) ->
+ {ok, QueryServer} =
+ case QueryServerIn of
+ nil -> % doc map not started
+ Definitions = [View#view.def || View <- Group#group.views],
+ couch_query_servers:start_doc_map(DefLang, Definitions);
+ _ ->
+ {ok, QueryServerIn}
+ end,
+ {ok, Results} = couch_query_servers:map_docs(QueryServer, Docs),
+ {Group#group{query_server=QueryServer}, Results}.
+
+
+dict_find(Key, DefaultValue, Dict) ->
+ case dict:find(Key, Dict) of
+ {ok, Value} ->
+ Value;
+ error ->
+ DefaultValue
+ end.
+
+write_changes(Group, ViewKeyValuesToAdd, DocIdViewIdKeys, NewSeq) ->
+ #group{id_btree=IdBtree} = Group,
+
+ AddDocIdViewIdKeys = [{DocId, ViewIdKeys} || {DocId, ViewIdKeys} <- DocIdViewIdKeys, ViewIdKeys /= []],
+ RemoveDocIds = [DocId || {DocId, ViewIdKeys} <- DocIdViewIdKeys, ViewIdKeys == []],
+ LookupDocIds = [DocId || {DocId, _ViewIdKeys} <- DocIdViewIdKeys],
+
+ {ok, LookupResults, IdBtree2}
+ = couch_btree:query_modify(IdBtree, LookupDocIds, AddDocIdViewIdKeys, RemoveDocIds),
+ KeysToRemoveByView = lists:foldl(
+ fun(LookupResult, KeysToRemoveByViewAcc) ->
+ case LookupResult of
+ {ok, {DocId, ViewIdKeys}} ->
+ lists:foldl(
+ fun({ViewId, Key}, KeysToRemoveByViewAcc2) ->
+ dict:append(ViewId, {Key, DocId}, KeysToRemoveByViewAcc2)
+ end,
+ KeysToRemoveByViewAcc, ViewIdKeys);
+ {not_found, _} ->
+ KeysToRemoveByViewAcc
+ end
+ end,
+ dict:new(), LookupResults),
+
+ Views2 = [
+ begin
+ KeysToRemove = dict_find(View#view.id_num, [], KeysToRemoveByView),
+ {ok, ViewBtree2} = couch_btree:add_remove(View#view.btree, AddKeyValues, KeysToRemove),
+ View#view{btree = ViewBtree2}
+ end
+ ||
+ {View, AddKeyValues} <- ViewKeyValuesToAdd
+ ],
+ Group2 = Group#group{views=Views2, current_seq=NewSeq, id_btree=IdBtree2},
+ {ok, Group2}.
diff --git a/src/couchdb/mod_couch.erl b/src/couchdb/mod_couch.erl
new file mode 100644
index 00000000..78c0853a
--- /dev/null
+++ b/src/couchdb/mod_couch.erl
@@ -0,0 +1,891 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mod_couch).
+
+-include("couch_db.hrl").
+
+-export([do/1, load/2, url_decode/1]).
+
+-include_lib("../couch_inets/httpd.hrl").
+
+-record(uri_parts,
+ {db = "",
+ doc = "",
+ attachment = "",
+ view = "",
+ querystr = ""}).
+
+-record(doc_query_args,
+ {
+ options = [],
+ rev = "",
+ open_revs = ""
+ }).
+
+%% do. This is the main entry point into Apache CouchDB from the HTTP server
+
+do(Mod) ->
+ #mod{request_uri=Uri,request_line=Request, parsed_header=Header,entity_body=Body} = Mod,
+ PrevTrapExit = process_flag(trap_exit, true),
+ Resp =
+ case Uri of
+ "/_utils/" ++ RestURI ->
+ % if the URI is the utils directory, then this
+ % tells mod_get (a std HTTP module) where to serve the file from
+ DocumentRoot = httpd_util:lookup(Mod#mod.config_db, document_root, ""),
+ {Path, AfterPath} = httpd_util:split_path(DocumentRoot ++ "/" ++ RestURI),
+
+ case RestURI of
+ "" ->
+ Paths = httpd_util:split_path(DocumentRoot ++ "/index.html"),
+ {proceed, [{real_name, Paths} | Mod#mod.data]};
+ _ ->
+ case filelib:is_file(Path) of
+ true ->
+ {proceed, [{real_name, {Path, AfterPath}} | Mod#mod.data]};
+ false ->
+ case filelib:is_dir(Path) of
+ true ->
+ % this ends up causing a "Internal Server Error", need to fix.
+ {proceed, [{response,{403,"Forbidden"}}]};
+ false ->
+ {proceed, [{response,{404,"Not found"}}]}
+ end
+ end
+ end;
+ "/favicon.ico" ->
+ DocumentRoot = httpd_util:lookup(Mod#mod.config_db, document_root, ""),
+ RealName = DocumentRoot ++ "/" ++ Uri,
+ {Path, AfterPath} = httpd_util:split_path(RealName),
+ {proceed, [{real_name, {Path, AfterPath}} | Mod#mod.data]};
+ _ ->
+ couch_log:info("HTTP Request: ~s", [Request]),
+ couch_log:debug("Headers: ~p", [Header]),
+ couch_log:debug("Body: ~P", [Body, 100]),
+ case (catch parse_uri(Uri)) of
+ {ok, Parts} ->
+ {ok, ResponseCode} =
+ case (catch do(Mod, Parts)) of
+ {ok, ResponseCode0} ->
+ {ok, ResponseCode0};
+ Error ->
+ send_error(Mod, Error)
+ end;
+ Error ->
+ {ok, ResponseCode} = send_error(Mod, Error)
+ end,
+ couch_log:info("HTTP Response Code:~p~n", [ResponseCode]),
+ {proceed, [{response, {already_sent, ResponseCode, 0}} | Mod#mod.data]}
+ end,
+ process_flag(trap_exit, PrevTrapExit),
+ Resp.
+
+
+parse_uri(RequestUri) ->
+ % seperate out the path and query portions and
+ % strip out leading slash and question mark.
+ case regexp:split(RequestUri, "\\?") of
+ {ok, [[$/|UriPath], QueryStr]} -> ok;
+ {ok, [[$/|UriPath]]} -> QueryStr = ""
+ end,
+ % lets try to parse out the UriPath.
+ {ok, UrlParts} = regexp:split(UriPath, "/"),
+
+ {DbName, Id, Attachment, View} =
+ case UrlParts of
+ [Db] ->
+ {Db, "", "", ""};
+ [Db, "_design", Doc] ->
+ {Db, "_design/" ++ Doc, "", ""};
+ [Db, "_design", Doc, Attachment0] ->
+ {Db, "_design/" ++ Doc, Attachment0, ""};
+ [Db, "_view", Doc, ViewName] ->
+ {Db, "_design/" ++ Doc, "", ViewName};
+ [Db, "_view%2f" ++ Doc, ViewName] ->
+ {Db, "_design/" ++ Doc, "", ViewName};
+ [Db, Doc] ->
+ {Db, Doc, "", ""};
+ [Db, Doc, Attachment0] ->
+ {Db, Doc, Attachment0, ""};
+ _ ->
+ throw({invalid_uri, lists:flatten(io_lib:format("Uri has too many parts: ~p", [UrlParts]))})
+ end,
+ {ok, #uri_parts{db=url_decode(DbName),
+ doc=url_decode(Id),
+ attachment=url_decode(Attachment),
+ view=url_decode(View),
+ querystr=url_decode(QueryStr)}}.
+
+resp_json_header(Mod) ->
+ resp_json_header(Mod, []).
+
+% return json doc header values list
+resp_json_header(Mod, Options) ->
+ Types = string:tokens(proplists:get_value("accept", Mod#mod.parsed_header, ""), ", "),
+ case lists:member("application/json", Types) of
+ true ->
+ resp_header(Mod, Options) ++ [{"content-type","application/json"}];
+ false ->
+ resp_header(Mod, Options) ++ [{"content-type","text/plain;charset=utf-8"}]
+ end.
+
+% return doc header values list
+resp_header(#mod{http_version=Version}, Options) ->
+ [{"cache-control", "no-cache"},
+ {"pragma", "no-cache"},
+ {"expires", httpd_util:rfc1123_date()}] ++
+ case lists:member(no_body, Options) of
+ true -> [];
+ false ->
+ case Version == "HTTP/1.1" of
+ true ->
+ [{"transfer-encoding", "chunked"}];
+ false ->
+ [{"connection", "close"}]
+ end
+ end.
+
+
+url_decode([$%, Hi, Lo | Tail]) ->
+ Hex = erlang:list_to_integer([Hi, Lo], 16),
+ xmerl_ucs:to_utf8([Hex]) ++ url_decode(Tail);
+url_decode([H|T]) ->
+ [H |url_decode(T)];
+url_decode([]) ->
+ [].
+
+
+send_header(Mod, RespCode, Headers) ->
+ couch_log:debug("HTTP Response Headers (code ~w): ~p", [RespCode, Headers]),
+ httpd_response:send_header(Mod, RespCode, Headers).
+
+send_chunk(Mod, Data) ->
+ httpd_response:send_chunk(Mod, Data, false).
+
+send_final_chunk(Mod) ->
+ httpd_response:send_final_chunk(Mod, false).
+
+show_couch_welcome(Mod) ->
+ send_header(Mod, 200, resp_json_header(Mod)),
+ send_chunk(Mod, "{\"couchdb\": \"Welcome\", "),
+ send_chunk(Mod, "\"version\": \"" ++ couch_server:get_version()),
+ send_chunk(Mod, "\"}\n"),
+ send_final_chunk(Mod),
+ {ok, 200}.
+
+
+do(#mod{method="GET"}=Mod, #uri_parts{db=""}) ->
+ show_couch_welcome(Mod);
+do(#mod{method="GET"}=Mod, #uri_parts{db="_all_dbs", doc=""}=Parts) ->
+ send_all_dbs(Mod, Parts);
+do(#mod{method="POST"}=Mod, #uri_parts{db="_replicate", doc=""}) ->
+ handle_replication_request(Mod);
+do(#mod{method="POST"}=Mod, #uri_parts{db="_restart", doc=""}) ->
+ couch_server:remote_restart(),
+ send_ok(Mod, 201);
+do(#mod{method="POST"}=Mod, #uri_parts{doc="_missing_revs"}=Parts) ->
+ handle_missing_revs_request(Mod, Parts);
+do(#mod{method="PUT"}=Mod, #uri_parts{doc=""}=Parts) ->
+ handle_db_create(Mod, Parts);
+do(#mod{method="DELETE"}=Mod, #uri_parts{doc=""}=Parts) ->
+ handle_db_delete(Mod, Parts);
+do(#mod{method="POST"}=Mod, #uri_parts{doc="_bulk_docs"}=Parts) ->
+ handle_bulk_doc_update(Mod, Parts);
+do(#mod{method="POST"}=Mod, #uri_parts{doc=""}=Parts) ->
+ handle_doc_post(Mod, Parts);
+do(#mod{method="PUT"}=Mod, Parts) ->
+ handle_doc_put(Mod, Parts);
+do(#mod{method="DELETE"}=Mod, Parts) ->
+ handle_doc_delete(Mod, Parts);
+do(#mod{method="POST"}=Mod, #uri_parts{doc="_temp_view"}=Parts) ->
+ send_temp_view(Mod, Parts);
+do(#mod{method="GET"}=Mod, #uri_parts{doc="_all_docs"}=Parts) ->
+ send_all_docs(Mod, Parts);
+do(#mod{method="GET"}=Mod, #uri_parts{doc="_all_docs_by_seq"}=Parts) ->
+ send_all_docs_by_seq(Mod, Parts);
+do(#mod{method="GET"}=Mod, #uri_parts{doc=""}=Parts) ->
+ send_database_info(Mod, Parts);
+do(#mod{method=Method}=Mod, #uri_parts{attachment="",view=""}=Parts)
+ when Method == "GET" orelse Method == "HEAD" ->
+ #doc_query_args{open_revs=Revs} = doc_parse_query(Parts#uri_parts.querystr),
+ case Revs of
+ [] ->
+ send_doc(Mod, Parts);
+ _ ->
+ send_doc_revs(Mod, Parts)
+ end;
+do(#mod{method=Method}=Mod, #uri_parts{attachment=Att}=Parts)
+ when Att /= "", Method == "GET" orelse Method == "HEAD" ->
+ send_attachment(Mod, Parts);
+do(#mod{method="GET"}=Mod, #uri_parts{view=View}=Parts) when View /= "" ->
+ send_view(Mod, Parts).
+
+handle_db_create(Mod, #uri_parts{db=DbName}) ->
+ case couch_server:create(DbName, []) of
+ {ok, _Db} ->
+ send_ok(Mod, 201);
+ {error, database_already_exists} ->
+ Msg = io_lib:format("Database ~p already exists.", [DbName]),
+ throw({database_already_exists, Msg});
+ Error ->
+ Msg = io_lib:format("Error creating database ~p: ~p", [DbName, Error]),
+ throw({unknown_error, Msg})
+ end.
+
+handle_db_delete(Mod, #uri_parts{db=DbName}) ->
+ % delete with no doc specified, therefore database delete
+ case couch_server:delete(DbName) of
+ ok ->
+ send_ok(Mod, 202);
+ Error ->
+ throw(Error)
+ end.
+
+handle_bulk_doc_update(#mod{entity_body=RawBody}=Mod, Parts) ->
+ Options = [], % put options here.
+ Db = open_db(Parts),
+ {obj, JsonProps} = cjson:decode(RawBody),
+ DocsArray = proplists:get_value("docs", JsonProps),
+ % convert all the doc elements to native docs
+ case proplists:get_value("new_edits", JsonProps, true) of
+ true ->
+ Docs = lists:map(
+ fun({obj, ObjProps} = JsonObj) ->
+ Doc = couch_doc:from_json_obj(JsonObj),
+
+ Id =
+ case Doc#doc.id of
+ "" -> couch_util:new_uuid();
+ Id0 -> Id0
+ end,
+ Revs =
+ case proplists:get_value("_rev", ObjProps) of
+ undefined -> [];
+ Rev -> [Rev]
+ end,
+ Doc#doc{id=Id,revs=Revs}
+ end,
+ tuple_to_list(DocsArray)),
+
+ {ok, ResultRevs} = couch_db:update_docs(Db, Docs, Options),
+
+ % output the results
+ DocResults = lists:zipwith(
+ fun(Doc, NewRev) ->
+ {obj, [{"id", Doc#doc.id}, {"rev", NewRev}]}
+ end,
+ Docs, ResultRevs),
+ send_ok(Mod, 201, [{new_revs, list_to_tuple(DocResults)}]);
+
+ false ->
+ Docs = [couch_doc:from_json_obj(JsonObj) || JsonObj <- tuple_to_list(DocsArray)],
+ ok = couch_db:save_docs(Db, Docs, Options),
+ send_ok(Mod, 201)
+ end.
+
+
+
+
+doc_parse_query(QueryStr) ->
+ QueryList = httpd:parse_query(QueryStr),
+ lists:foldl(fun({Key,Value}, Args) ->
+ case {Key, Value} of
+ {"attachments", "true"} ->
+ Options = [attachments | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"meta", "true"} ->
+ Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"revs", "true"} ->
+ Options = [revs | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"revs_info", "true"} ->
+ Options = [revs_info | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"conflicts", "true"} ->
+ Options = [conflicts | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"deleted_conflicts", "true"} ->
+ Options = [deleted_conflicts | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"rev", Rev} ->
+ Args#doc_query_args{rev=Rev};
+ {"open_revs", "all"} ->
+ Args#doc_query_args{open_revs=all};
+ {"open_revs", RevsJsonStr} ->
+ JsonArray = cjson:decode(RevsJsonStr),
+ Args#doc_query_args{open_revs=tuple_to_list(JsonArray)};
+ _Else -> % unknown key value pair, ignore.
+ Args
+ end
+ end,
+ #doc_query_args{}, QueryList).
+
+
+handle_doc_post(#mod{entity_body=RawBody}=Mod, Parts) ->
+ Db = open_db(Parts),
+ Json = cjson:decode(RawBody),
+ Doc = couch_doc:from_json_obj(Json),
+ Id = couch_util:new_uuid(),
+ {ok, NewRevId} = couch_db:update_doc(Db, Doc#doc{id=Id, revs=[]}, []),
+ send_ok(Mod, 201, [{"id", Id}, {"rev", NewRevId}], [{"etag", NewRevId}]).
+
+handle_doc_put(#mod{parsed_header=Headers}=Mod,
+ #uri_parts{doc=Id, querystr=QueryStr}=Parts) ->
+ #doc_query_args{options=SaveOptions} = doc_parse_query(QueryStr),
+ Db = open_db(Parts),
+ {obj, ObjProps} = Json = cjson:decode(Mod#mod.entity_body),
+ Doc = couch_doc:from_json_obj(Json),
+ Etag = proplists:get_value("if-match", Headers, ""),
+ DocRev = proplists:get_value("_rev", ObjProps, ""),
+
+ if DocRev /= "" andalso Etag /= "" andalso DocRev /= Etag ->
+ throw({invalid_request, "Document rev and etag have different values"});
+ true -> ok
+ end,
+ Revs =
+ if DocRev /= "" -> [DocRev];
+ Etag /= "" -> [Etag];
+ true -> []
+ end,
+
+ {ok, NewRevId} = couch_db:update_doc(Db, Doc#doc{id=Id, revs=Revs}, SaveOptions),
+ send_ok(Mod, 201, [{"id", Id}, {"rev", NewRevId}],[{"etag", NewRevId}]).
+
+handle_doc_delete(#mod{parsed_header=Headers}=Mod,
+ #uri_parts{doc=Id, querystr=QueryStr}=Parts) ->
+ Db = open_db(Parts),
+ #doc_query_args{rev=QueryRev} = doc_parse_query(QueryStr),
+ Etag = proplists:get_value("if-match", Headers, ""),
+ RevToDelete =
+ case {QueryRev, Etag} of
+ {"", ""} ->
+ throw({missing_rev, "Document rev/etag must be specified to delete"});
+ {_, ""} ->
+ QueryRev;
+ {"", _} ->
+ Etag;
+ _ when QueryRev == Etag ->
+ Etag;
+ _ ->
+ throw({invalid_request, "Document rev and etag have different values"})
+ end,
+ {ok, NewRev} = couch_db:delete_doc(Db, Id, [RevToDelete]),
+ send_ok(Mod, 202, [{"id", Id}, {"rev", NewRev}]).
+
+
+-record(query_args,
+ {start_key = nil,
+ end_key = <<>>,
+ count = 10000000000, % a huge huge default number. Picked so we don't have
+ % to do different logic for when there is no count limit
+ update = true,
+ direction = fwd,
+ start_docid = nil,
+ end_docid = <<>>,
+ skip = 0
+ }).
+
+reverse_key_default(nil) -> <<>>;
+reverse_key_default(<<>>) -> nil;
+reverse_key_default(Key) -> Key.
+
+view_parse_query(QueryStr) ->
+ QueryList = httpd:parse_query(QueryStr),
+ lists:foldl(fun({Key,Value}, Args) ->
+ case {Key, Value} of
+ {"", _} ->
+ Args;
+ {"key", Value} ->
+ JsonKey = cjson:decode(Value),
+ Args#query_args{start_key=JsonKey,end_key=JsonKey};
+ {"startkey_docid", DocId} ->
+ Args#query_args{start_docid=DocId};
+ {"startkey", Value} ->
+ Args#query_args{start_key=cjson:decode(Value)};
+ {"endkey", Value} ->
+ Args#query_args{end_key=cjson:decode(Value)};
+ {"count", Value} ->
+ case (catch list_to_integer(Value)) of
+ Count when is_integer(Count) ->
+ if Count < 0 ->
+ Args#query_args {
+ direction =
+ if Args#query_args.direction == rev -> fwd;
+ true -> rev
+ end,
+ count=Count,
+ start_key = reverse_key_default(Args#query_args.start_key),
+ start_docid = reverse_key_default(Args#query_args.start_docid),
+ end_key = reverse_key_default(Args#query_args.end_key),
+ end_docid = reverse_key_default(Args#query_args.end_docid)};
+ true ->
+ Args#query_args{count=Count}
+ end;
+ _Error ->
+ Msg = io_lib:format("Bad URL query value, number expected: count=~s", [Value]),
+ throw({query_parse_error, Msg})
+ end;
+ {"update", "false"} ->
+ Args#query_args{update=false};
+ {"descending", "true"} ->
+ case Args#query_args.direction of
+ fwd ->
+ Args#query_args {
+ direction = rev,
+ start_key = reverse_key_default(Args#query_args.start_key),
+ start_docid = reverse_key_default(Args#query_args.start_docid),
+ end_key = reverse_key_default(Args#query_args.end_key),
+ end_docid = reverse_key_default(Args#query_args.end_docid)};
+ _ ->
+ Args %already reversed
+ end;
+ {"skip", Value} ->
+ case (catch list_to_integer(Value)) of
+ Count when is_integer(Count) ->
+ Args#query_args{skip=Count};
+ _Error ->
+ Msg = lists:flatten(io_lib:format(
+ "Bad URL query value, number expected: skip=~s", [Value])),
+ throw({query_parse_error, Msg})
+ end;
+ _ -> % unknown key
+ Msg = lists:flatten(io_lib:format(
+ "Bad URL query key:~s", [Key])),
+ throw({query_parse_error, Msg})
+ end
+ end,
+ #query_args{}, QueryList).
+
+
+% returns db, otherwise throws exception. Note: no {ok,_}.
+open_db(#uri_parts{db=DbName}) ->
+ open_db(DbName);
+open_db(DbName) when is_list(DbName)->
+ case couch_server:open(DbName) of
+ {ok, Db} ->
+ Db;
+ Error ->
+ throw(Error)
+ end.
+
+handle_missing_revs_request(#mod{entity_body=RawJson}=Mod, Parts) ->
+ Db = open_db(Parts),
+ {obj, JsonDocIdRevs} = cjson:decode(RawJson),
+ DocIdRevs = [{Id, tuple_to_list(Revs)} || {Id, Revs} <- JsonDocIdRevs],
+ {ok, Results} = couch_db:get_missing_revs(Db, DocIdRevs),
+ JsonResults = [{Id, list_to_tuple(Revs)} || {Id, Revs} <- Results],
+ send_json(Mod, 200, {obj, [{missing_revs, {obj, JsonResults}}]}).
+
+handle_replication_request(#mod{entity_body=RawJson}=Mod) ->
+ {obj, Props} = cjson:decode(RawJson),
+ Src = proplists:get_value("source", Props),
+ Tgt = proplists:get_value("target", Props),
+ {obj, Options} = proplists:get_value("options", Props, {obj, []}),
+ {ok, {obj, JsonResults}} = couch_rep:replicate(Src, Tgt, Options),
+ send_ok(Mod, 200, JsonResults).
+
+
+
+send_database_info(Mod, #uri_parts{db=DbName}=Parts) ->
+ Db = open_db(Parts),
+ {ok, InfoList} = couch_db:get_db_info(Db),
+ ok = send_header(Mod, 200, resp_json_header(Mod)),
+ DocCount = proplists:get_value(doc_count, InfoList),
+ LastUpdateSequence = proplists:get_value(last_update_seq, InfoList),
+ ok = send_chunk(Mod, "{\"db_name\": \"" ++ DbName ++
+ "\", \"doc_count\":" ++ integer_to_list(DocCount) ++
+ ", \"update_seq\":" ++ integer_to_list(LastUpdateSequence)++"}"),
+ ok = send_final_chunk(Mod),
+ {ok, 200}.
+
+send_doc(#mod{parsed_header=Headers}=Mod,
+ #uri_parts{doc=DocId,querystr=QueryStr}=Parts) ->
+ Db = open_db(Parts),
+ #doc_query_args{rev=Rev, options=Options} = doc_parse_query(QueryStr),
+ case Rev of
+ "" ->
+ % open most recent rev
+ case couch_db:open_doc(Db, DocId, Options) of
+ {ok, #doc{revs=[DocRev|_]}=Doc} ->
+ Etag = proplists:get_value("if-none-match", Headers),
+ if Options == [] andalso Etag == DocRev ->
+ ok = send_header(Mod, 304,
+ resp_header(Mod, [no_body]) ++ [{"etag", DocRev}]),
+ {ok, 304};
+ true ->
+ send_json(Mod, 200, couch_doc:to_json_obj(Doc, Options),
+ if Options == [] -> [{"etag", DocRev}]; true -> [] end)
+ end;
+ Error ->
+ throw(Error)
+ end;
+ _ ->
+ % open a specific rev (deletions come back as stubs)
+ case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
+ {ok, [{ok, Doc}]} ->
+ send_json(Mod, 200, couch_doc:to_json_obj(Doc, Options), [{"etag", Rev}]);
+ {ok, [Else]} ->
+ throw(Else)
+ end
+ end.
+
+send_doc_revs(Mod, #uri_parts{doc=DocId,querystr=QueryStr}=Parts) ->
+ Db = open_db(Parts),
+ #doc_query_args{options=Options, open_revs=Revs} = doc_parse_query(QueryStr),
+ {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options),
+ ok = send_header(Mod, 200, resp_json_header(Mod)),
+ ok = send_chunk(Mod, "["),
+ % We loop through the docs. The first time through the separator
+ % is whitespace, then a comma on subsequent iterations.
+ lists:foldl(
+ fun(Result, AccSeparator) ->
+ case Result of
+ {ok, Doc} ->
+ JsonDoc= couch_doc:to_json_obj(Doc, Options),
+ ok = send_chunk(Mod, AccSeparator ++ lists:flatten(cjson:encode({obj, [{ok, JsonDoc}]})));
+ {{not_found, missing}, RevId} ->
+ Json = {obj, [{"missing", RevId}]},
+ ok = send_chunk(Mod, AccSeparator ++ lists:flatten(cjson:encode(Json)))
+ end,
+ "," % AccSeparator now has a comma
+ end,
+ "", Results),
+ ok = send_chunk(Mod, "]"),
+ ok = send_final_chunk(Mod),
+ {ok, 200}.
+
+send_attachment(#mod{method=Method} = Mod,
+ #uri_parts{doc=DocId,attachment=Attachment}=Parts) ->
+ Db = open_db(Parts),
+ case couch_db:open_doc(Db, DocId, []) of
+ {ok, #doc{attachments=Attachments}} ->
+ case proplists:get_value(Attachment, Attachments) of
+ undefined ->
+ throw({not_found, missing});
+ {Type, Bin} ->
+ ok = send_header(Mod, 200, resp_header(Mod, Type) ++
+ [{"content-type", Type},
+ {"content-length", integer_to_list(couch_doc:bin_size(Bin))}]),
+ case Method of
+ "GET" ->
+ couch_doc:bin_foldl(Bin,
+ fun(BinSegment, []) ->
+ ok = send_chunk(Mod, BinSegment),
+ {ok, []}
+ end,
+ []);
+ "HEAD" ->
+ ok
+ end,
+ ok = send_final_chunk(Mod),
+ {ok, 200}
+ end;
+ Error ->
+ throw(Error)
+ end.
+
+
+send_json(Mod, Code, JsonData) ->
+ send_json(Mod, Code, JsonData, []).
+
+send_json(#mod{method=Method}=Mod, Code, JsonData, Headers) ->
+ case Method of
+ "HEAD" ->
+ ok = send_header(Mod, Code, resp_json_header(Mod, [no_body]) ++ Headers);
+ _ ->
+ ok = send_header(Mod, Code, resp_json_header(Mod) ++ Headers),
+ ok = send_chunk(Mod, lists:flatten([cjson:encode(JsonData) | "\n"])),
+ ok = send_final_chunk(Mod)
+ end,
+ {ok, Code}.
+
+
+send_ok(Mod, Code) ->
+ send_ok(Mod, Code, []).
+
+send_ok(Mod, Code, AdditionalProps) ->
+ send_ok(Mod, Code, AdditionalProps, []).
+
+send_ok(Mod, Code, AdditionalProps, AdditionalHeaders) ->
+ send_json(Mod, Code, {obj, [{ok, true}|AdditionalProps]}, AdditionalHeaders).
+
+
+make_view_fold_fun(Mod, QueryArgs) ->
+ #query_args{
+ end_key=EndKey,
+ end_docid=EndDocId,
+ direction=Dir,
+ count=Count
+ } = QueryArgs,
+
+ PassedEndFun =
+ case Dir of
+ fwd ->
+ fun(ViewKey, ViewId) ->
+ couch_view:less_json({EndKey,EndDocId}, {ViewKey,ViewId})
+ end;
+ rev->
+ fun(ViewKey, ViewId) ->
+ couch_view:less_json({ViewKey, ViewId}, {EndKey,EndDocId})
+ end
+ end,
+
+ NegCountFun =
+ fun(Id, Key, Value, Offset, TotalViewCount, {AccCount, AccSkip, HeaderSent, AccRevRows}) ->
+ PassedEnd = PassedEndFun(Key, Id),
+ case {PassedEnd, AccCount, AccSkip, HeaderSent} of
+ {true,_,_,_} ->
+ % The stop key has been passed, stop looping.
+ {stop, {AccCount, AccSkip, HeaderSent, AccRevRows}};
+ {_,0,_,_} ->
+ {stop, {0, 0, HeaderSent, AccRevRows}}; % we've done "count" rows, stop foldling
+ {_,_,AccSkip,_} when AccSkip > 0 ->
+ {ok, {AccCount, AccSkip - 1, HeaderSent, AccRevRows}};
+ {_,AccCount,_,header_sent} ->
+ JsonObj = {obj, [{"key",Key},{"id",Id},{"value",Value}]},
+ {ok, {AccCount + 1, 0, header_sent, [cjson:encode(JsonObj), "," | AccRevRows]}};
+ {_,_,_,header_not_sent} ->
+ ok = send_header(Mod, 200, resp_json_header(Mod)),
+ Offset2= TotalViewCount - Offset -
+ lists:min([TotalViewCount - Offset, - AccCount]),
+ JsonBegin = io_lib:format("{\"total_rows\":~w,\"offset\":~w,\"rows\":[",
+ [TotalViewCount, Offset2]),
+ JsonObj = {obj, [{"key",Key},{"id",Id},{"value",Value}]},
+ ok = send_chunk(Mod, lists:flatten(JsonBegin)),
+ {ok, {AccCount + 1, 0, header_sent, [cjson:encode(JsonObj) | AccRevRows]}}
+ end
+ end,
+
+ PosCountFun =
+ fun(Id, Key, Value, Offset, TotalViewCount, {AccCount, AccSkip, HeaderSent, AccRevRows}) ->
+ PassedEnd = PassedEndFun(Key, Id),
+ case {PassedEnd, AccCount, AccSkip, HeaderSent} of
+ {true,_,_,_} ->
+ % The stop key has been passed, stop looping.
+ {stop, {AccCount, AccSkip, HeaderSent, AccRevRows}};
+ {_,0,_,_} ->
+ {stop, {0, 0, HeaderSent, AccRevRows}}; % we've done "count" rows, stop foldling
+ {_,_,AccSkip,_} when AccSkip > 0 ->
+ {ok, {AccCount, AccSkip - 1, HeaderSent, AccRevRows}};
+ {_,AccCount,_,header_sent} when (AccCount > 0) ->
+ JsonObj = {obj, [{"key",Key},{"id",Id},{"value",Value}]},
+ ok = send_chunk(Mod, "," ++ lists:flatten(cjson:encode(JsonObj))),
+ {ok, {AccCount - 1, 0, header_sent, AccRevRows}};
+ {_,_,_,header_not_sent} ->
+ ok = send_header(Mod, 200, resp_json_header(Mod)),
+ JsonBegin = io_lib:format("{\"total_rows\":~w,\"offset\":~w,\"rows\":[",
+ [TotalViewCount, Offset]),
+ JsonObj = {obj, [{"key",Key},{"id",Id},{"value",Value}]},
+ ok = send_chunk(Mod, lists:flatten(JsonBegin ++ cjson:encode(JsonObj))),
+ {ok, {AccCount - 1, 0, header_sent, AccRevRows}}
+ end
+ end,
+ case Count > 0 of
+ true -> PosCountFun;
+ false -> NegCountFun
+ end.
+
+finish_view_fold(Mod, FoldResult) ->
+ case FoldResult of
+ {ok, TotalRows, {_, _, header_not_sent, _}} ->
+ % nothing found in the view, nothing has been returned
+ % send empty view
+ ok = send_header(Mod, 200, resp_json_header(Mod)),
+ JsonEmptyView = lists:flatten(
+ io_lib:format("{\"total_rows\":~w,\"rows\":[]}\n",
+ [TotalRows])),
+ ok = send_chunk(Mod, JsonEmptyView),
+ ok = send_final_chunk(Mod),
+ {ok, 200};
+ {ok, _TotalRows, {_, _, header_sent, AccRevRows}} ->
+ % end the view
+ ok = send_chunk(Mod, lists:flatten(AccRevRows) ++ "]}\n"),
+ ok = send_final_chunk(Mod),
+ {ok, 200};
+ Error ->
+ throw(Error)
+ end.
+
+
+send_temp_view(#mod{entity_body=Body,parsed_header=Headers}=Mod,
+ #uri_parts{db=DbName, querystr=QueryStr}) ->
+ #query_args{
+ start_key=StartKey,
+ count=Count,
+ skip=SkipCount,
+ direction=Dir,
+ start_docid=StartDocId} = QueryArgs = view_parse_query(QueryStr),
+ Type0 = proplists:get_value("content-type", Headers, "text/javascript"),
+ % remove the charset ("...;charset=foo") if its there
+ {ok, [Type|_]} = regexp:split(Type0, ";"),
+ View = {temp, DbName, Type, Body},
+ Start = {StartKey, StartDocId},
+ FoldlFun = make_view_fold_fun(Mod, QueryArgs),
+ FoldAccInit = {Count, SkipCount, header_not_sent, []},
+ FoldResult = couch_view:fold(View, Start, Dir, FoldlFun, FoldAccInit),
+ finish_view_fold(Mod, FoldResult).
+
+
+send_view(Mod, #uri_parts{db=DbName, doc=DesignDocId, view=ViewId, querystr=QueryStr}) ->
+ #query_args{
+ start_key=StartKey,
+ count=Count,
+ skip=SkipCount,
+ direction=Dir,
+ start_docid=StartDocId} = QueryArgs = view_parse_query(QueryStr),
+ View = {DbName, DesignDocId, ViewId},
+ Start = {StartKey, StartDocId},
+ FoldlFun = make_view_fold_fun(Mod, QueryArgs),
+ FoldAccInit = {Count, SkipCount, header_not_sent, []},
+ Result = couch_view:fold(View, Start, Dir, FoldlFun, FoldAccInit),
+ finish_view_fold(Mod, Result).
+
+
+send_all_docs(Mod, #uri_parts{querystr=QueryStr}=Parts) ->
+ Db = open_db(Parts),
+ #query_args{
+ start_key=StartKey,
+ start_docid=StartDocId,
+ count=Count,
+ skip=SkipCount,
+ direction=Dir} = QueryArgs = view_parse_query(QueryStr),
+ {ok, Info} = couch_db:get_db_info(Db),
+ TotalRowCount = proplists:get_value(doc_count, Info),
+
+ StartId =
+ if is_list(StartKey) -> StartKey;
+ true -> StartDocId
+ end,
+
+ FoldlFun = make_view_fold_fun(Mod, QueryArgs),
+ AdapterFun =
+ fun(#full_doc_info{id=Id}=FullDocInfo, Offset, Acc) ->
+ case couch_doc:to_doc_info(FullDocInfo) of
+ #doc_info{deleted=false, rev=Rev} ->
+ FoldlFun(Id, Id, {obj, [{rev, Rev}]}, Offset, TotalRowCount, Acc);
+ #doc_info{deleted=true} ->
+ {ok, Acc}
+ end
+ end,
+ {ok, FoldResult} = couch_db:enum_docs(Db, StartId, Dir, AdapterFun,
+ {Count, SkipCount, header_not_sent, []}),
+ finish_view_fold(Mod, {ok, TotalRowCount, FoldResult}).
+
+send_all_docs_by_seq(Mod, #uri_parts{querystr=QueryStr}=Parts) ->
+ Db = open_db(Parts),
+ QueryArgs = view_parse_query(QueryStr),
+ #query_args{
+ start_key=StartKey,
+ count=Count,
+ skip=SkipCount,
+ direction=Dir} = QueryArgs,
+
+ {ok, Info} = couch_db:get_db_info(Db),
+ TotalRowCount = proplists:get_value(doc_count, Info),
+
+ FoldlFun = make_view_fold_fun(Mod, QueryArgs),
+
+ StartKey2 =
+ case StartKey of
+ nil -> 0;
+ <<>> -> 100000000000;
+ StartKey when is_integer(StartKey) -> StartKey
+ end,
+ {ok, FoldResult} =
+ couch_db:enum_docs_since(Db, StartKey2, Dir,
+ fun(DocInfo, Offset, Acc) ->
+ #doc_info{
+ id=Id,
+ rev=Rev,
+ update_seq=UpdateSeq,
+ deleted=Deleted,
+ conflict_revs=ConflictRevs,
+ deleted_conflict_revs=DelConflictRevs} = DocInfo,
+ Json =
+ {obj,
+ [{"rev", Rev}] ++
+ case ConflictRevs of
+ [] -> [];
+ _ -> [{"conflicts", list_to_tuple(ConflictRevs)}]
+ end ++
+ case DelConflictRevs of
+ [] -> [];
+ _ -> [{"deleted_conflicts", list_to_tuple(DelConflictRevs)}]
+ end ++
+ case Deleted of
+ true -> [{"deleted", true}];
+ false -> []
+ end
+ },
+ FoldlFun(Id, UpdateSeq, Json, Offset, TotalRowCount, Acc)
+ end, {Count, SkipCount, header_not_sent, []}),
+ finish_view_fold(Mod, {ok, TotalRowCount, FoldResult}).
+
+
+
+send_all_dbs(Mod, _Parts)->
+ {ok, DbNames} = couch_server:all_databases(),
+ ok = send_header(Mod, 200, resp_json_header(Mod)),
+ ok = send_chunk(Mod, lists:flatten(cjson:encode(list_to_tuple(DbNames)))),
+ ok = send_final_chunk(Mod),
+ {ok, 200}.
+
+send_error(Mod, Error) ->
+ {Json, Code} = error_to_json(Error),
+ couch_log:info("HTTP Error (code ~w): ~p", [Code, Json]),
+ send_json(Mod, Code, Json).
+
+
+
+% convert an error response into a json object and http error code.
+error_to_json(Error) ->
+ {HttpCode, Atom, Reason} = error_to_json0(Error),
+ Reason1 =
+ case (catch io_lib:format("~s", [Reason])) of
+ Reason0 when is_list(Reason0) ->
+ lists:flatten(Reason0);
+ _ ->
+ lists:flatten(io_lib:format("~p", [Reason])) % else term to text
+ end,
+ Json =
+ {obj,
+ [{error, atom_to_list(Atom)},
+ {reason, Reason1}]},
+ {Json, HttpCode}.
+
+error_to_json0(not_found) ->
+ {404, not_found, "missing"};
+error_to_json0({missing_rev, Msg}) ->
+ {412, missing_rev, Msg};
+error_to_json0({not_found, Reason}) ->
+ {404, not_found, Reason};
+error_to_json0({database_already_exists, Reason}) ->
+ {409, database_already_exists, Reason};
+error_to_json0(conflict) ->
+ {412, conflict, "Update conflict"};
+error_to_json0({doc_validation, Msg}) ->
+ {406, doc_validation, Msg};
+error_to_json0({Id, Reason}) when is_atom(Id) ->
+ {500, Id, Reason};
+error_to_json0(Error) ->
+ {500, error, Error}.
+
+%%
+%% Configuration
+%%
+
+%% load
+
+load("Foo Bar", []) ->
+ {ok, [], {script_alias, {"foo", "bar"}}}.
diff --git a/src/fulltext/lucene/CouchConfig.java b/src/fulltext/lucene/CouchConfig.java
new file mode 100644
index 00000000..5f4d84ce
--- /dev/null
+++ b/src/fulltext/lucene/CouchConfig.java
@@ -0,0 +1,62 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+import java.util.*;
+
+
+class CouchConfig
+{
+/* private CouchDocument[] documents;
+*/
+ private Hashtable documents;
+ private long updateSequence;
+
+ public CouchConfig()
+ {
+ documents = new Hashtable();
+ updateSequence = 0;
+ }
+
+ public void setUpdateSequence(long newUpdateSequence)
+ {
+ updateSequence = newUpdateSequence;
+ }
+
+ public long getUpdateSequence()
+ {
+ return updateSequence;
+ }
+
+ public void addDocument(com.fourspaces.couchdb.Document document)
+ {
+ String field;
+// System.out.println(document);
+ field = document.getString("__couchdb_database");
+// System.out.println(field);
+ if(field != null) {
+ documents.put(field, document);
+ }
+ }
+
+ public Hashtable getDocuments()
+ {
+ return documents;
+ }
+
+ public boolean hasDb(String db)
+ {
+ return documents.containsKey(db);
+ }
+}
diff --git a/src/fulltext/lucene/CouchDbDirFilter.java b/src/fulltext/lucene/CouchDbDirFilter.java
new file mode 100644
index 00000000..6b002ce5
--- /dev/null
+++ b/src/fulltext/lucene/CouchDbDirFilter.java
@@ -0,0 +1,30 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+/*
+
+LuceneIndexer creates a lucene index by intrementally fetching changes from a a
+Apache CouchDB server. It is managed by the Apache CouchDB daemon.
+
+*/
+import java.io.*;
+
+class CouchDbDirFilter implements FilenameFilter
+{
+ public boolean accept(File dir, String name)
+ {
+ return new File(dir, name).isFile();
+ }
+}
diff --git a/src/fulltext/lucene/LuceneIndexer.java b/src/fulltext/lucene/LuceneIndexer.java
new file mode 100644
index 00000000..07040610
--- /dev/null
+++ b/src/fulltext/lucene/LuceneIndexer.java
@@ -0,0 +1,355 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+/*
+
+LuceneIndexer creates a lucene index by incrementally fetching changes from a a
+Apache CouchDB server. It is managed by the Apache CouchDB daemon.
+
+I know this is Java and there should be a lot of OO going on, but it
+isn't. Sorry about that.
+
+*/
+
+//basics
+import java.io.*;
+import java.net.*;
+import java.util.*;
+import java.nio.channels.FileChannel;
+import java.nio.ByteBuffer;
+import java.lang.reflect.*;
+
+
+//couchdb4j
+//import com.fourspaces.couchdb.*;
+
+//xml
+import org.xml.sax.*;
+import org.xml.sax.helpers.*;
+import javax.xml.parsers.*;
+
+//lucene
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.SimpleAnalyzer;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Hits;
+import org.apache.lucene.search.TermQuery;
+
+public class LuceneIndexer
+{
+ private static CouchConfig configuration;
+ private static com.fourspaces.couchdb.Session s;
+
+ public static void main(String[] args) throws Exception
+ {
+/* BufferedWriter out = new BufferedWriter(new FileWriter("LuceneIndexer.log"));
+ out.write("indexer started");out.flush();
+*/
+ String db;
+/* out.write("indexer about to read config");out.flush();*/
+ connect();
+ readConfig();
+
+/* out.write("indexer read config: " + configuration.getDocuments());out.flush();*/
+
+ BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
+ try {
+ while((db = in.readLine()) != null) {
+/* out.write("indexer got a poke");out.flush();*/
+
+ if(db.equals("couchdbfulltext")) {
+/* System.out.println("refresh config");
+
+*/ readConfig();
+/* out.write("indexer refreshed config");out.flush();*/
+
+ }
+
+/* out.write("indexer has table: " + db + "?");*/
+
+ if(!configuration.hasDb(db)) {
+/* out.write("... no wait for input");out.flush();*/
+
+ continue;
+ }
+
+/* out.write("yeppa");out.flush();*/
+
+
+ createIndexDir(db);
+ indexChanges(db);
+/* System.out.println(db + " to revision: " + revision);*/
+ }
+ } catch (IOException e) {
+/* out.write("indexer caught IO exception: " + e.getMessage());out.flush();*/
+
+ }
+/* System.out.println("Lucene Indexer stopped");*/
+/* out.write("indexer stopped");out.flush();*/
+
+/* out.close();*/
+
+ }
+
+ public static void connect() throws Exception
+ {
+ s = null;
+ com.fourspaces.couchdb.Session s = new com.fourspaces.couchdb.Session("locahost", 5984);
+ }
+
+ public static void readConfig() throws Exception
+ {
+ //get all docs in /$ftconfig
+ //return array of config docs
+ configuration = null;
+ configuration = new CouchConfig();
+ com.fourspaces.couchdb.Database db = s.getDatabase("couchdbfulltext");
+ com.fourspaces.couchdb.ViewResults changedDocuments = db.getAllDocuments(0);
+
+ for (com.fourspaces.couchdb.Document d: changedDocuments.getResults()) {
+ configuration.addDocument(d);
+ }
+
+/* for(int i = 0; i < changedDocuments.length; i++) {
+ CouchDocument document = changedDocuments[i];
+ document = loadDocumentData(document, "couchdbfulltext");
+ configuration.addDocument(document);
+ }
+*/ }
+
+ public static void indexChanges(String db) throws Exception
+ {
+// System.out.println("Updating index for '" + db + "' from revision: " + revision);
+ int sequence = -1;
+ try {
+ com.fourspaces.couchdb.Database _db = s.getDatabase(db);
+ sequence = _db.getUpdateSeq();
+ com.fourspaces.couchdb.ViewResults changedDocuments = _db.getAllDocuments(sequence);
+
+ if(changedDocuments.size() == 0) {
+// System.out.println("Index is up-to date at sequence_id: " + revision);
+ return;
+ }
+
+ boolean delete = false;
+
+ for (com.fourspaces.couchdb.Document d: changedDocuments.getResults()) {
+ delete = d.getBoolean("delete");
+ documentAddToIndex(db, d, delete);
+ }
+/* for(int idx = 0; idx < changedDocuments.length; idx++) {
+ com.fourspaces.couchdb.Document document = changedDocuments[idx];
+ sequence = document.getUpdateSequence();
+ delete = document.getDelete();
+// System.out.println("Doing: " + document + " with squence: " + sequence + " delete: "+document.getDelete() + " hash code:" + document.hashCode());
+
+ document = loadDocumentData(document, db);
+ // System.out.println(changedDocuments[idx]);
+ // remove from lucene if exists, add to lucene.
+
+ documentAddToIndex(db, document, delete);
+ }
+*/ // CouchDocument document = getDocumentByRevision(db, revision);
+ setRevisionForDb(db, sequence);
+ } catch(Exception e) {
+// System.out.println("Warning: " + db + " says: " + e.getMessage());
+ }
+ }
+
+ public static void documentAddToIndex(String db, com.fourspaces.couchdb.Document document, boolean delete) throws IOException
+ {
+ String index = "Lucene/Index/" + db;
+ boolean create = true;
+
+/* System.out.println("DEBUG: delete: " + delete);*/
+/* System.out.println("DEBUG: create index? " + create);*/
+
+ if(IndexReader.indexExists(index)) {
+ create = false;
+ Term term = new Term("__couchdb_document_id", document.getId());
+/* System.out.println("DEBUG: Deleting: " + document + " with term:" + term);*/
+ IndexReader reader = IndexReader.open(index);
+ reader.deleteDocuments(term);
+/* System.out.println("DEBUG: reader has deletions: " + reader.hasDeletions());*/
+
+ reader.close();
+ }
+
+ if(!delete) {
+ Analyzer analyzer = new SimpleAnalyzer();
+
+ IndexWriter writer = new IndexWriter(index, analyzer, create);
+ writer.setUseCompoundFile(true);
+
+/* Collection fields = document.keys();*/
+ Document luceneDocument = new Document();
+
+/* Set tmpKeys = fields.keySet();
+ Object keys[] = tmpKeys.toArray();
+*/ String keywords = "";
+
+ for (Iterator it = document.keys(); it.hasNext(); ) {
+ Object key = it.next();
+ String value = document.getString((String)key);
+
+ if(key.equals("__couchdb_document_id") || key.equals("__couchdb_document_revision")) {
+ luceneDocument.add(new Field((String)key, value, Field.Store.YES, Field.Index.UN_TOKENIZED));
+ } else {
+ luceneDocument.add(new Field((String)key, value, Field.Store.YES, Field.Index.TOKENIZED));
+ keywords = keywords + " " + value;
+ }
+ }
+ if(keywords.length() > 0) {
+ luceneDocument.add(new Field("__couchdb_keywords", keywords, Field.Store.YES, Field.Index.TOKENIZED));
+ }
+
+
+/* for(int idx = 0; idx < keys.length; idx++) {
+ // System.out.println("DEBUG: Add Field: "+ keys[idx] + " with value: " + fields.get(keys[idx]));
+ Hashtable field = (Hashtable)fields.get(keys[idx]);
+ if(field == null) {return;}
+ for(int fieldIdx = 0; fieldIdx < field.size(); fieldIdx++) {
+ String value = (String)field.get(fieldIdx);
+ if(value == null) {
+ value = "";
+ }
+ // System.out.println("DEBUG: fieldIdx:" + fieldIdx + " and value: "+ value);
+ String key = (String)keys[idx];
+ if(key.equals("__couchdb_document_id") || key.equals("__couchdb_document_revision")) {
+ luceneDocument.add(new Field(key, value, Field.Store.YES, Field.Index.UN_TOKENIZED));
+ } else {
+ luceneDocument.add(new Field(key, value, Field.Store.YES, Field.Index.TOKENIZED));
+ keywords = keywords + " " + value;
+ }
+ }
+*/// }
+ writer.addDocument(luceneDocument);
+ writer.optimize();
+ writer.close();
+ }
+ }
+
+
+ private static void setRevisionForDb(String db, long revision) throws Exception
+ {
+ File dbFile = new File("Lucene/State/" + db);
+
+ RandomAccessFile stateFile = new RandomAccessFile("Lucene/State/" + db, "rwd");
+ stateFile.writeBytes(String.valueOf(revision));
+ return;
+ }
+
+ private static String[] getDBs()
+ {
+ File dbRoot = new File("db_root");
+ if(!dbRoot.isDirectory()) {
+ return new String[0];
+ }
+
+ String[] dbs = dbRoot.list(new CouchDbDirFilter());
+
+ return dbs;
+ }
+
+ private static long getRevisionForDb(String db) throws Exception
+ {
+
+ File dbFile = new File("Lucene/State/" + db);
+ if(!dbFile.exists()) {
+ return 0;
+ }
+
+
+ RandomAccessFile stateFile = new RandomAccessFile("Lucene/State/" + db, "r");
+ String revision = stateFile.readLine();
+// System.out.println("rev: " + revision);
+ return (long)Integer.parseInt(revision);
+ }
+
+ private static void createIndexDir(String db)
+ {
+ File indexDir = new File("Lucene/Index/" + db);
+ if(!indexDir.exists()) {
+ indexDir.mkdirs();
+ System.out.println("Created Index Directory");
+ }
+
+ File stateDir = new File("Lucene/State");
+ if(!stateDir.exists()) {
+ stateDir.mkdirs();
+ System.out.println("Created State Directory");
+ }
+ }
+
+ private static XMLReader getParser(SAXCouchDocumentBuilder documentBuilder) throws Exception
+ {
+ SAXParserFactory factory = SAXParserFactory.newInstance();
+ SAXParser saxParser = factory.newSAXParser();
+ XMLReader parser = saxParser.getXMLReader();
+ parser.setContentHandler(documentBuilder);
+ return parser;
+ }
+
+ private static BufferedInputStream getUrlStream(String address) throws Exception
+ {
+ URL url = new URL(address);
+ InputStream inStream = url.openStream();
+ return new BufferedInputStream(inStream);
+ }
+
+ public static com.fourspaces.couchdb.ViewResults getChangedDocumentsSinceRevision(String db, int revision) throws Exception
+ {
+ //BufferedInputStream inBuffer = getUrlStream("http://localhost:5984/" + db + "/_all_docs_by_update_seq?startkey=" + revision);
+
+ com.fourspaces.couchdb.ViewResults newDocs = s.getDatabase(db).getAllDocuments(revision);
+
+ return newDocs;
+ //return CouchDocument[]
+
+/* CouchDocument[] returnValue = {};
+*/ //setup xml parser
+/* SAXCouchDocumentBuilder documentBuilder = new SAXCouchDocumentBuilder();
+ XMLReader parser = getParser(documentBuilder);
+ // Repeat until end of file
+ parser.parse(new InputSource(inBuffer));
+
+
+ return documentBuilder.getDocuments();
+*/ }
+
+
+ public static CouchDocument loadDocumentData(CouchDocument document, String db) throws Exception
+ {
+ BufferedInputStream inBuffer = getUrlStream("http://localhost:5984/" + db + "/" + document.getDocId() + "?rev=" + document.getRevision());
+
+ //setup xml parser
+ SAXCouchDocumentBuilder documentBuilder = new SAXCouchDocumentBuilder();
+ XMLReader parser = getParser(documentBuilder);
+
+ // Repeat until end of file
+ parser.parse(new InputSource(inBuffer));
+
+ return documentBuilder.getDocument();
+ }
+}
diff --git a/src/fulltext/lucene/LuceneSearcher.java b/src/fulltext/lucene/LuceneSearcher.java
new file mode 100644
index 00000000..a5ccbe89
--- /dev/null
+++ b/src/fulltext/lucene/LuceneSearcher.java
@@ -0,0 +1,90 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+/*
+
+LuceneSearcher searches a lucene index.
+
+It is managed by the Apache CouchDB daemon.
+
+*/
+
+//basics
+import java.io.*;
+
+//lucene
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader;
+
+import org.apache.lucene.document.Document;
+
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Hits;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.Query;
+
+/*
+protocol:
+Queries will look like this:
+
+databasename\n
+the full text query\n
+
+Then the java reader will read the lines and respond
+by outputing each document result:
+ok\n
+docid1\n
+score1\n
+docid2\n
+score2\n
+docid3\n
+score3\n
+\n
+
+or:
+
+error\n
+error_id\n
+error message\n
+
+*/
+public class LuceneSearcher
+{
+ public static void main(String[] args) throws Exception
+ {
+
+ BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
+
+ String db = "";
+ String queryString = "";
+
+ while(((db = in.readLine()) != null) && ((queryString = in.readLine()) != null)) {
+
+ IndexSearcher searcher = new IndexSearcher("Lucene/Index/" + db);
+
+ Query query = new TermQuery(new Term("__couchdb_keywords", queryString));
+
+ Hits hits = searcher.search(query);
+
+ System.out.println("ok");
+ for(int i = 0; i < hits.length(); i++) {
+ Document d = hits.doc(i);
+ System.out.println(d.get("__couchdb_document_id"));
+ System.out.println(hits.score(i));
+ }
+ System.out.println();
+ }
+ }
+}
diff --git a/src/fulltext/lucene/readme.txt b/src/fulltext/lucene/readme.txt
new file mode 100644
index 00000000..c115534c
--- /dev/null
+++ b/src/fulltext/lucene/readme.txt
@@ -0,0 +1,41 @@
+This is still work in progress and has not been integrated into the build
+process. Good luck though :)
+
+This document describes how to use the LuceneIndexer with Apache CouchDB.
+
+Requirements:
+Apache CouchDB 0.6.4 or newer.
+Java Development Kit (JDK) 1.5
+Lucene 2.0.0 or newer
+couchdb4j (http://code.google.com/p/couchdb4j/)
+
+
+If you don't already have it,
+download lucene-core-2.0.0.jar from a mirror
+A list of mirrors can be found at
+http://www.apache.org/dyn/closer.cgi/lucene/java/
+
+Add the following line to your couch.ini:
+LuceneServer=/usr/bin/java -cp "./bin/:./lib/lucene-core.jar" LuceneIndexer=...
+
+Adjust the version number and the path to java, if needed.
+If you have lucene installed already, remove the
+'-cp "./bin/:./Lucene/lucene-core-2.0.0.jar"' part.
+
+Put lucene-core.jar and cocuhdb4j.jar into $CouchDbDir/lib
+
+Launch Apache CouchDB.
+
+The indexer will populate $CouchDbDir/Lucene/Index with an index for
+all documents in all databases.
+(indexes per database will be added soon).
+
+To see that the data is actually stored in there,
+use luke from http://www.getopt.org/luke/
+
+To use the actual index, you could use the PHP 5 Lucene Demo in the Zend Framework
+(http://framework.zend.com) or any other Lucene implementation in your favourite
+language.
+
+If you have any questions, please visit:
+http://couchdb.com/CouchDB/CouchDBWeb.nsf/vDissByDate
diff --git a/var/Makefile.am b/var/Makefile.am
new file mode 100644
index 00000000..7d49a6cc
--- /dev/null
+++ b/var/Makefile.am
@@ -0,0 +1,25 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy
+## of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+datarootdir = @prefix@/share
+
+install-data-hook:
+ if test ! "$(mkdir_p)" = ""; then \
+ $(mkdir_p) "$(DESTDIR)$(pkgstatelibdir)"; \
+ $(mkdir_p) "$(DESTDIR)$(pkgstatelogdir)"; \
+ $(mkdir_p) "$(DESTDIR)$(localstatedir)/run"; \
+ else \
+ echo "WARNING: You may have to create these directories by hand."; \
+ mkdir -p "$(DESTDIR)$(pkgstatelibdir)"; \
+ mkdir -p "$(DESTDIR)$(pkgstatelogdir)"; \
+ mkdir -p "$(DESTDIR)$(localstatedir)/run"; \
+ fi