summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMicah Anderson <micah@leap.se>2014-01-15 18:13:16 +0000
committerdrebs <drebs@leap.se>2014-01-17 08:48:11 -0200
commit510c6d763fba74f95ae8f894408c3658bcef4f83 (patch)
treed4dd0930b902cb1e5d46bea621ec83f801ea8ed6
parent8bd863936ead4243f58fb99e11d1221e1af0a71e (diff)
embed dependencies that were previously pulled in by git during rebar build
-rw-r--r--deps/chttpd/README.md16
-rwxr-xr-xdeps/chttpd/rebarbin0 -> 100732 bytes
-rw-r--r--deps/chttpd/src/chttpd.app.src7
-rw-r--r--deps/chttpd/src/chttpd.erl782
-rw-r--r--deps/chttpd/src/chttpd_app.erl21
-rw-r--r--deps/chttpd/src/chttpd_db.erl1264
-rw-r--r--deps/chttpd/src/chttpd_external.erl174
-rw-r--r--deps/chttpd/src/chttpd_misc.erl283
-rw-r--r--deps/chttpd/src/chttpd_rewrite.erl421
-rw-r--r--deps/chttpd/src/chttpd_show.erl314
-rw-r--r--deps/chttpd/src/chttpd_sup.erl25
-rw-r--r--deps/chttpd/src/chttpd_view.erl393
-rw-r--r--deps/chttpd/test/chttpd_delayed_response_test.erl43
-rw-r--r--deps/chttpd/test/mock_request.erl39
-rw-r--r--deps/fabric/README.md24
-rw-r--r--deps/fabric/include/fabric.hrl38
-rwxr-xr-xdeps/fabric/rebarbin0 -> 100732 bytes
-rw-r--r--deps/fabric/rebar.config18
-rw-r--r--deps/fabric/src/fabric.app.src6
-rw-r--r--deps/fabric/src/fabric.erl460
-rw-r--r--deps/fabric/src/fabric_db_create.erl161
-rw-r--r--deps/fabric/src/fabric_db_delete.erl95
-rw-r--r--deps/fabric/src/fabric_db_doc_count.erl68
-rw-r--r--deps/fabric/src/fabric_db_info.erl104
-rw-r--r--deps/fabric/src/fabric_db_meta.erl49
-rw-r--r--deps/fabric/src/fabric_db_update_listener.erl114
-rw-r--r--deps/fabric/src/fabric_dict.erl51
-rw-r--r--deps/fabric/src/fabric_doc_attachments.erl131
-rw-r--r--deps/fabric/src/fabric_doc_missing_revs.erl90
-rw-r--r--deps/fabric/src/fabric_doc_open.erl139
-rw-r--r--deps/fabric/src/fabric_doc_open_revs.erl307
-rw-r--r--deps/fabric/src/fabric_doc_update.erl297
-rw-r--r--deps/fabric/src/fabric_group_info.erl100
-rw-r--r--deps/fabric/src/fabric_rpc.erl485
-rw-r--r--deps/fabric/src/fabric_util.erl168
-rw-r--r--deps/fabric/src/fabric_view.erl362
-rw-r--r--deps/fabric/src/fabric_view_all_docs.erl181
-rw-r--r--deps/fabric/src/fabric_view_changes.erl334
-rw-r--r--deps/fabric/src/fabric_view_map.erl151
-rw-r--r--deps/fabric/src/fabric_view_reduce.erl114
-rw-r--r--deps/ibrowse/BSD_LICENSE10
-rw-r--r--deps/ibrowse/LICENSE9
-rw-r--r--deps/ibrowse/Makefile20
-rw-r--r--deps/ibrowse/README531
-rw-r--r--deps/ibrowse/doc/ibrowse.html472
-rw-r--r--deps/ibrowse/doc/ibrowse_lib.html67
-rw-r--r--deps/ibrowse/doc/short-desc1
-rw-r--r--deps/ibrowse/include/ibrowse.hrl21
-rw-r--r--deps/ibrowse/priv/ibrowse.conf18
-rw-r--r--deps/ibrowse/rebar.config2
-rw-r--r--deps/ibrowse/src/Emakefile.src7
-rw-r--r--deps/ibrowse/src/ibrowse.app.src7
-rw-r--r--deps/ibrowse/src/ibrowse.erl862
-rw-r--r--deps/ibrowse/src/ibrowse_app.erl63
-rw-r--r--deps/ibrowse/src/ibrowse_http_client.erl1855
-rw-r--r--deps/ibrowse/src/ibrowse_lb.erl235
-rw-r--r--deps/ibrowse/src/ibrowse_lib.erl391
-rw-r--r--deps/ibrowse/src/ibrowse_sup.erl63
-rw-r--r--deps/ibrowse/src/ibrowse_test.erl513
-rw-r--r--deps/ibrowse/test/Makefile19
-rw-r--r--deps/ibrowse/test/ibrowse_lib_tests.erl135
-rw-r--r--deps/ibrowse/test/ibrowse_test_server.erl195
-rwxr-xr-xdeps/meck/.scripts/tag_with_changelog.sh41
-rw-r--r--deps/meck/.travis.yml8
-rw-r--r--deps/meck/CHANGELOG1
-rw-r--r--deps/meck/LICENSE178
-rw-r--r--deps/meck/Makefile14
-rw-r--r--deps/meck/NOTICE5
-rw-r--r--deps/meck/README.md210
-rw-r--r--deps/meck/doc/overview.edoc25
-rw-r--r--deps/meck/rebar.config6
-rw-r--r--deps/meck/src/meck.app.src9
-rw-r--r--deps/meck/src/meck.erl813
-rw-r--r--deps/meck/src/meck_abstract.hrl19
-rw-r--r--deps/meck/src/meck_cover.erl110
-rw-r--r--deps/meck/src/meck_mod.erl118
-rw-r--r--deps/meck/test/cover_test_module.dontcompile21
-rw-r--r--deps/meck/test/include/cover_test.hrl1
-rw-r--r--deps/meck/test/meck_performance_test.erl65
-rw-r--r--deps/meck/test/meck_test_module.erl8
-rw-r--r--deps/meck/test/meck_test_parametrized_module.erl7
-rw-r--r--deps/meck/test/meck_tests.erl890
-rw-r--r--deps/mem3/README.md33
-rw-r--r--deps/mem3/include/mem3.hrl44
-rwxr-xr-xdeps/mem3/rebarbin0 -> 100732 bytes
-rw-r--r--deps/mem3/rebar.config17
-rw-r--r--deps/mem3/src/mem3.app.src13
-rw-r--r--deps/mem3/src/mem3.erl238
-rw-r--r--deps/mem3/src/mem3_app.erl23
-rw-r--r--deps/mem3/src/mem3_cache.erl118
-rw-r--r--deps/mem3/src/mem3_httpd.erl53
-rw-r--r--deps/mem3/src/mem3_nodes.erl136
-rw-r--r--deps/mem3/src/mem3_rep.erl144
-rw-r--r--deps/mem3/src/mem3_rep_manager.erl627
-rw-r--r--deps/mem3/src/mem3_sup.erl36
-rw-r--r--deps/mem3/src/mem3_sync.erl267
-rw-r--r--deps/mem3/src/mem3_sync_event.erl68
-rw-r--r--deps/mem3/src/mem3_util.erl211
-rw-r--r--deps/mem3/test/01-config-default.ini2
-rw-r--r--deps/mem3/test/mem3_util_test.erl154
-rw-r--r--deps/mochiweb/LICENSE9
-rw-r--r--deps/mochiweb/Makefile20
-rw-r--r--deps/mochiweb/README1
-rw-r--r--deps/mochiweb/examples/https/https_store.erl146
-rw-r--r--deps/mochiweb/examples/https/server_cert.pem19
-rw-r--r--deps/mochiweb/examples/https/server_key.pem27
-rw-r--r--deps/mochiweb/examples/keepalive/keepalive.erl81
-rw-r--r--deps/mochiweb/priv/skel/Makefile20
-rw-r--r--deps/mochiweb/priv/skel/priv/www/index.html8
-rw-r--r--deps/mochiweb/priv/skel/src/Makefile33
-rw-r--r--deps/mochiweb/priv/skel/src/skel.app14
-rw-r--r--deps/mochiweb/priv/skel/src/skel.erl30
-rw-r--r--deps/mochiweb/priv/skel/src/skel.hrl1
-rw-r--r--deps/mochiweb/priv/skel/src/skel_app.erl30
-rw-r--r--deps/mochiweb/priv/skel/src/skel_deps.erl92
-rw-r--r--deps/mochiweb/priv/skel/src/skel_sup.erl62
-rw-r--r--deps/mochiweb/priv/skel/src/skel_web.erl51
-rwxr-xr-xdeps/mochiweb/priv/skel/start-dev.sh12
-rwxr-xr-xdeps/mochiweb/priv/skel/start.sh3
-rw-r--r--deps/mochiweb/priv/skel/support/include.mk40
-rwxr-xr-xdeps/mochiweb/priv/skel/support/run_tests.escript94
-rwxr-xr-xdeps/mochiweb/scripts/new_mochiweb.erl37
-rw-r--r--deps/mochiweb/src/Makefile33
-rw-r--r--deps/mochiweb/src/internal.hrl3
-rw-r--r--deps/mochiweb/src/mochifmt.erl425
-rw-r--r--deps/mochiweb/src/mochifmt_records.erl38
-rw-r--r--deps/mochiweb/src/mochifmt_std.erl30
-rw-r--r--deps/mochiweb/src/mochiglobal.erl107
-rw-r--r--deps/mochiweb/src/mochihex.erl91
-rw-r--r--deps/mochiweb/src/mochijson.erl531
-rw-r--r--deps/mochiweb/src/mochijson2.erl802
-rw-r--r--deps/mochiweb/src/mochilists.erl104
-rw-r--r--deps/mochiweb/src/mochilogfile2.erl140
-rw-r--r--deps/mochiweb/src/mochinum.erl331
-rw-r--r--deps/mochiweb/src/mochitemp.erl310
-rw-r--r--deps/mochiweb/src/mochiutf8.erl316
-rw-r--r--deps/mochiweb/src/mochiweb.app.src9
-rw-r--r--deps/mochiweb/src/mochiweb.erl289
-rw-r--r--deps/mochiweb/src/mochiweb_acceptor.erl48
-rw-r--r--deps/mochiweb/src/mochiweb_app.erl27
-rw-r--r--deps/mochiweb/src/mochiweb_charref.erl308
-rw-r--r--deps/mochiweb/src/mochiweb_cookies.erl309
-rw-r--r--deps/mochiweb/src/mochiweb_cover.erl75
-rw-r--r--deps/mochiweb/src/mochiweb_echo.erl38
-rw-r--r--deps/mochiweb/src/mochiweb_headers.erl299
-rw-r--r--deps/mochiweb/src/mochiweb_html.erl1061
-rw-r--r--deps/mochiweb/src/mochiweb_http.erl273
-rw-r--r--deps/mochiweb/src/mochiweb_io.erl46
-rw-r--r--deps/mochiweb/src/mochiweb_mime.erl94
-rw-r--r--deps/mochiweb/src/mochiweb_multipart.erl824
-rw-r--r--deps/mochiweb/src/mochiweb_request.erl768
-rw-r--r--deps/mochiweb/src/mochiweb_request_tests.erl63
-rw-r--r--deps/mochiweb/src/mochiweb_response.erl64
-rw-r--r--deps/mochiweb/src/mochiweb_skel.erl86
-rw-r--r--deps/mochiweb/src/mochiweb_socket.erl84
-rw-r--r--deps/mochiweb/src/mochiweb_socket_server.erl272
-rw-r--r--deps/mochiweb/src/mochiweb_sup.erl41
-rw-r--r--deps/mochiweb/src/mochiweb_util.erl973
-rw-r--r--deps/mochiweb/src/reloader.erl161
-rw-r--r--deps/mochiweb/support/include.mk41
-rwxr-xr-xdeps/mochiweb/support/make_app.escript86
-rwxr-xr-xdeps/mochiweb/support/run_tests.escript94
-rw-r--r--deps/mochiweb/support/test-materials/test_ssl_cert.pem19
-rw-r--r--deps/mochiweb/support/test-materials/test_ssl_key.pem27
-rw-r--r--deps/oauth/Emakefile1
-rw-r--r--deps/oauth/License.txt22
-rw-r--r--deps/oauth/Makefile7
-rw-r--r--deps/oauth/README.txt36
-rw-r--r--deps/oauth/src/oauth.app.src21
-rw-r--r--deps/oauth/src/oauth.erl107
-rw-r--r--deps/oauth/src/oauth_client.erl149
-rw-r--r--deps/oauth/src/oauth_hmac_sha1.erl11
-rw-r--r--deps/oauth/src/oauth_http.erl22
-rw-r--r--deps/oauth/src/oauth_plaintext.erl10
-rw-r--r--deps/oauth/src/oauth_rsa_sha1.erl30
-rw-r--r--deps/oauth/src/oauth_unix.erl16
-rw-r--r--deps/oauth/src/oauth_uri.erl98
-rw-r--r--deps/rexi/README.md23
-rw-r--r--deps/rexi/include/rexi.hrl22
-rwxr-xr-xdeps/rexi/rebarbin0 -> 100732 bytes
-rw-r--r--deps/rexi/rebar.config17
-rw-r--r--deps/rexi/src/rexi.app.src7
-rw-r--r--deps/rexi/src/rexi.erl125
-rw-r--r--deps/rexi/src/rexi_app.erl25
-rw-r--r--deps/rexi/src/rexi_monitor.erl66
-rw-r--r--deps/rexi/src/rexi_server.erl190
-rw-r--r--deps/rexi/src/rexi_sup.erl29
-rw-r--r--deps/rexi/src/rexi_utils.erl52
-rw-r--r--deps/twig/README.md11
-rwxr-xr-xdeps/twig/rebarbin0 -> 101028 bytes
-rw-r--r--deps/twig/src/trunc_io.erl215
-rw-r--r--deps/twig/src/twig.app.src8
-rw-r--r--deps/twig/src/twig.erl55
-rw-r--r--deps/twig/src/twig_app.erl23
-rw-r--r--deps/twig/src/twig_event_handler.erl164
-rw-r--r--deps/twig/src/twig_int.hrl26
-rw-r--r--deps/twig/src/twig_monitor.erl48
-rw-r--r--deps/twig/src/twig_sup.erl26
-rw-r--r--deps/twig/src/twig_util.erl82
199 files changed, 30372 insertions, 0 deletions
diff --git a/deps/chttpd/README.md b/deps/chttpd/README.md
new file mode 100644
index 00000000..784bedd7
--- /dev/null
+++ b/deps/chttpd/README.md
@@ -0,0 +1,16 @@
+## chttpd
+
+chttpd is a cluster-aware http layer for [CouchDB][1]. It is used in [BigCouch][2] as the http front-end.
+
+### License
+[Apache 2.0][3]
+
+### Contact
+ * [http://cloudant.com][4]
+ * [info@cloudant.com][5]
+
+[1]: http://couchdb.apache.org
+[2]: http://github.com/cloudant/bigcouch
+[3]: http://www.apache.org/licenses/LICENSE-2.0.html
+[4]: http://cloudant.com
+[5]: mailto:info@cloudant.com
diff --git a/deps/chttpd/rebar b/deps/chttpd/rebar
new file mode 100755
index 00000000..30c43ba5
--- /dev/null
+++ b/deps/chttpd/rebar
Binary files differ
diff --git a/deps/chttpd/src/chttpd.app.src b/deps/chttpd/src/chttpd.app.src
new file mode 100644
index 00000000..6897076c
--- /dev/null
+++ b/deps/chttpd/src/chttpd.app.src
@@ -0,0 +1,7 @@
+{application, chttpd, [
+ {description, "HTTP interface for CouchDB cluster"},
+ {vsn, git},
+ {registered, [chttpd_sup, chttpd]},
+ {applications, [kernel, stdlib, couch, fabric]},
+ {mod, {chttpd_app,[]}}
+]}. \ No newline at end of file
diff --git a/deps/chttpd/src/chttpd.erl b/deps/chttpd/src/chttpd.erl
new file mode 100644
index 00000000..a4f053aa
--- /dev/null
+++ b/deps/chttpd/src/chttpd.erl
@@ -0,0 +1,782 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd).
+-include_lib("couch/include/couch_db.hrl").
+
+-export([start_link/0, start_link/1, start_link/2,
+ stop/0, handle_request/1, config_change/2,
+ primary_header_value/2, header_value/2, header_value/3, qs_value/2,
+ qs_value/3, qs/1, path/1, absolute_uri/2, body_length/1,
+ verify_is_server_admin/1, unquote/1, quote/1, recv/2, recv_chunked/4,
+ error_info/1, parse_form/1, json_body/1, json_body_obj/1, body/1,
+ doc_etag/1, make_etag/1, etag_respond/3, partition/1, serve_file/3,
+ server_header/0, start_chunked_response/3,send_chunk/2,
+ start_response_length/4, send/2, start_json_response/2,
+ start_json_response/3, end_json_response/1, send_response/4,
+ send_method_not_allowed/2, send_error/2, send_error/4, send_redirect/2,
+ send_chunked_error/2, send_json/2,send_json/3,send_json/4]).
+
+-export([start_delayed_json_response/2, start_delayed_json_response/3,
+ start_delayed_json_response/4,
+ start_delayed_chunked_response/3, start_delayed_chunked_response/4,
+ send_delayed_chunk/2, send_delayed_last_chunk/1,
+ send_delayed_error/2, end_delayed_json_response/1,
+ get_delayed_req/1]).
+
+-record(delayed_resp, {
+ start_fun,
+ req,
+ code,
+ headers,
+ first_chunk
+}).
+
+start_link() ->
+ start_link(http).
+start_link(http) ->
+ Port = couch_config:get("chttpd", "port", "5984"),
+ start_link(?MODULE, [{port, Port}]);
+
+start_link(https) ->
+ Port = couch_config:get("chttps", "port", "6984"),
+ CertFile = couch_config:get("chttps", "cert_file", nil),
+ KeyFile = couch_config:get("chttps", "key_file", nil),
+ Options = case CertFile /= nil andalso KeyFile /= nil of
+ true ->
+ SslOpts = [{certfile, CertFile}, {keyfile, KeyFile}],
+
+ %% set password if one is needed for the cert
+ SslOpts1 = case couch_config:get("chttps", "password", nil) of
+ nil -> SslOpts;
+ Password ->
+ SslOpts ++ [{password, Password}]
+ end,
+ % do we verify certificates ?
+ FinalSslOpts = case couch_config:get("chttps",
+ "verify_ssl_certificates", "false") of
+ "false" -> SslOpts1;
+ "true" ->
+ case couch_config:get("chttps",
+ "cacert_file", nil) of
+ nil ->
+ io:format("Verify SSL certificate "
+ ++"enabled but file containing "
+ ++"PEM encoded CA certificates is "
+ ++"missing", []),
+ throw({error, missing_cacerts});
+ CaCertFile ->
+ Depth = list_to_integer(couch_config:get("chttps",
+ "ssl_certificate_max_depth",
+ "1")),
+ FinalOpts = [
+ {cacertfile, CaCertFile},
+ {depth, Depth},
+ {verify, verify_peer}],
+ % allows custom verify fun.
+ case couch_config:get("chttps",
+ "verify_fun", nil) of
+ nil -> FinalOpts;
+ SpecStr ->
+ FinalOpts
+ ++ [{verify_fun, couch_httpd:make_arity_3_fun(SpecStr)}]
+ end
+ end
+ end,
+
+ [{port, Port},
+ {ssl, true},
+ {ssl_opts, FinalSslOpts}];
+ false ->
+ io:format("SSL enabled but PEM certificates are missing.", []),
+ throw({error, missing_certs})
+ end,
+ start_link(https, Options).
+
+start_link(Name, Options) ->
+ Options1 = Options ++ [
+ {loop, fun ?MODULE:handle_request/1},
+ {name, Name},
+ {ip, couch_config:get("chttpd", "bind_address", any)}
+ ],
+ ServerOptsCfg = couch_config:get("chttpd", "server_options", "[]"),
+ {ok, ServerOpts} = couch_util:parse_term(ServerOptsCfg),
+ Options2 = lists:keymerge(1, lists:sort(Options1), lists:sort(ServerOpts)),
+ case mochiweb_http:start(Options2) of
+ {ok, Pid} ->
+ ok = couch_config:register(fun ?MODULE:config_change/2, Pid),
+ {ok, Pid};
+ {error, Reason} ->
+ io:format("Failure to start Mochiweb: ~s~n", [Reason]),
+ {error, Reason}
+ end.
+
+config_change("chttpd", "bind_address") ->
+ ?MODULE:stop();
+config_change("chttpd", "port") ->
+ ?MODULE:stop();
+config_change("chttpd", "backlog") ->
+ ?MODULE:stop();
+config_change("chttpd", "server_options") ->
+ ?MODULE:stop().
+
+stop() ->
+ catch mochiweb_http:stop(https),
+ mochiweb_http:stop(?MODULE).
+
+handle_request(MochiReq) ->
+ Begin = now(),
+
+ case couch_config:get("chttpd", "socket_options") of
+ undefined ->
+ ok;
+ SocketOptsCfg ->
+ {ok, SocketOpts} = couch_util:parse_term(SocketOptsCfg),
+ ok = mochiweb_socket:setopts(MochiReq:get(socket), SocketOpts)
+ end,
+
+ AuthenticationFuns = [
+ fun couch_httpd_auth:cookie_authentication_handler/1,
+ fun couch_httpd_auth:default_authentication_handler/1
+ ],
+
+ % for the path, use the raw path with the query string and fragment
+ % removed, but URL quoting left intact
+ RawUri = MochiReq:get(raw_path),
+ {"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
+ {HandlerKey, _, _} = mochiweb_util:partition(Path, "/"),
+
+ Peer = MochiReq:get(peer),
+ LogForClosedSocket = io_lib:format("mochiweb_recv_error for ~s - ~p ~s", [
+ Peer,
+ MochiReq:get(method),
+ RawUri
+ ]),
+
+ Method1 =
+ case MochiReq:get(method) of
+ % already an atom
+ Meth when is_atom(Meth) -> Meth;
+
+ % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
+ % possible (if any module references the atom, then it's existing).
+ Meth -> couch_util:to_existing_atom(Meth)
+ end,
+ increment_method_stats(Method1),
+ % alias HEAD to GET as mochiweb takes care of stripping the body
+ Method = case Method1 of
+ 'HEAD' -> 'GET';
+ Other -> Other
+ end,
+
+ HttpReq = #httpd{
+ mochi_req = MochiReq,
+ method = Method,
+ path_parts = [list_to_binary(chttpd:unquote(Part))
+ || Part <- string:tokens(Path, "/")],
+ db_url_handlers = db_url_handlers(),
+ design_url_handlers = design_url_handlers()
+ },
+
+ % put small token on heap to keep requests synced to backend calls
+ erlang:put(nonce, couch_util:to_hex(crypto:rand_bytes(4))),
+
+ Result =
+ try
+ case authenticate_request(HttpReq, AuthenticationFuns) of
+ #httpd{} = Req ->
+ HandlerFun = url_handler(HandlerKey),
+ HandlerFun(possibly_hack(Req));
+ Response ->
+ Response
+ end
+ catch
+ throw:{http_head_abort, Resp0} ->
+ {ok, Resp0};
+ throw:{http_abort, Resp0, Reason0} ->
+ {aborted, Resp0, Reason0};
+ throw:{invalid_json, S} ->
+ ?LOG_ERROR("attempted upload of invalid JSON ~s", [S]),
+ send_error(HttpReq, {bad_request, "invalid UTF-8 JSON"});
+ exit:{mochiweb_recv_error, E} ->
+ ?LOG_INFO(LogForClosedSocket ++ " - ~p", [E]),
+ exit(normal);
+ throw:Error ->
+ send_error(HttpReq, Error);
+ error:database_does_not_exist ->
+ send_error(HttpReq, database_does_not_exist);
+ Tag:Error ->
+ Stack = erlang:get_stacktrace(),
+ ?LOG_ERROR("Uncaught error in HTTP request: ~p",[{Tag, Error}]),
+ ?LOG_INFO("Stacktrace: ~p",[Stack]),
+ send_error(HttpReq, {Error, nil, Stack})
+ end,
+
+ RequestTime = timer:now_diff(now(), Begin)/1000,
+ {Status, Code} = case Result of
+ {ok, Resp} ->
+ {ok, Resp:get(code)};
+ {aborted, Resp, _} ->
+ {aborted, Resp:get(code)}
+ end,
+ Host = MochiReq:get_header_value("Host"),
+ ?LOG_INFO("~s ~s ~s ~s ~B ~p ~B", [Peer, Host,
+ atom_to_list(Method1), RawUri, Code, Status, round(RequestTime)]),
+ couch_stats_collector:record({couchdb, request_time}, RequestTime),
+ case Result of
+ {ok, _} ->
+ couch_stats_collector:increment({httpd, requests}),
+ {ok, Resp};
+ {aborted, _, Reason} ->
+ couch_stats_collector:increment({httpd, aborted_requests}),
+ ?LOG_ERROR("Response abnormally terminated: ~p", [Reason]),
+ exit(normal)
+ end.
+
+%% HACK: replication currently handles two forms of input, #db{} style
+%% and #http_db style. We need a third that makes use of fabric. #db{}
+%% works fine for replicating the dbs and nodes database because they
+%% aren't sharded. So for now when a local db is specified as the source or
+%% the target, it's hacked to make it a full url and treated as a remote.
+possibly_hack(#httpd{path_parts=[<<"_replicate">>]}=Req) ->
+ {Props0} = couch_httpd:json_body_obj(Req),
+ Props1 = fix_uri(Req, Props0, <<"source">>),
+ Props2 = fix_uri(Req, Props1, <<"target">>),
+ put(post_body, {Props2}),
+ Req;
+possibly_hack(Req) ->
+ Req.
+
+fix_uri(Req, Props, Type) ->
+ case is_http(replication_uri(Type, Props)) of
+ true ->
+ Props;
+ false ->
+ Uri = make_uri(Req,replication_uri(Type, Props)),
+ [{Type,Uri}|proplists:delete(Type,Props)]
+ end.
+
+replication_uri(Type, PostProps) ->
+ case couch_util:get_value(Type, PostProps) of
+ {Props} ->
+ couch_util:get_value(<<"url">>, Props);
+ Else ->
+ Else
+ end.
+
+is_http(<<"http://", _/binary>>) ->
+ true;
+is_http(<<"https://", _/binary>>) ->
+ true;
+is_http(_) ->
+ false.
+
+make_uri(Req, Raw) ->
+ Url = list_to_binary(["http://", couch_config:get("httpd", "bind_address"),
+ ":", couch_config:get("chttpd", "port"), "/", Raw]),
+ Headers = [
+ {<<"authorization">>, ?l2b(header_value(Req,"authorization",""))},
+ {<<"cookie">>, ?l2b(header_value(Req,"cookie",""))}
+ ],
+ {[{<<"url">>,Url}, {<<"headers">>,{Headers}}]}.
+%%% end hack
+
+
+% Try authentication handlers in order until one returns a result
+authenticate_request(#httpd{user_ctx=#user_ctx{}} = Req, _AuthFuns) ->
+ Req;
+authenticate_request(#httpd{} = Req, [AuthFun|Rest]) ->
+ authenticate_request(AuthFun(Req), Rest);
+authenticate_request(#httpd{} = Req, []) ->
+ case couch_config:get("chttpd", "require_valid_user", "false") of
+ "true" ->
+ throw({unauthorized, <<"Authentication required.">>});
+ "false" ->
+ case couch_config:get("admins") of
+ [] ->
+ Ctx = #user_ctx{roles=[<<"_reader">>, <<"_writer">>, <<"_admin">>]},
+ Req#httpd{user_ctx = Ctx};
+ _ ->
+ Req#httpd{user_ctx=#user_ctx{}}
+ end
+ end;
+authenticate_request(Response, _AuthFuns) ->
+ Response.
+
+increment_method_stats(Method) ->
+ couch_stats_collector:increment({httpd_request_methods, Method}).
+
+url_handler("") -> fun chttpd_misc:handle_welcome_req/1;
+url_handler("favicon.ico") -> fun chttpd_misc:handle_favicon_req/1;
+url_handler("_utils") -> fun chttpd_misc:handle_utils_dir_req/1;
+url_handler("_all_dbs") -> fun chttpd_misc:handle_all_dbs_req/1;
+url_handler("_active_tasks") -> fun chttpd_misc:handle_task_status_req/1;
+url_handler("_config") -> fun chttpd_misc:handle_config_req/1;
+url_handler("_replicate") -> fun chttpd_misc:handle_replicate_req/1;
+url_handler("_uuids") -> fun chttpd_misc:handle_uuids_req/1;
+url_handler("_log") -> fun chttpd_misc:handle_log_req/1;
+url_handler("_sleep") -> fun chttpd_misc:handle_sleep_req/1;
+url_handler("_session") -> fun couch_httpd_auth:handle_session_req/1;
+url_handler("_oauth") -> fun couch_httpd_oauth:handle_oauth_req/1;
+%% showroom_http module missing in bigcouch
+url_handler("_restart") -> fun showroom_http:handle_restart_req/1;
+url_handler("_membership") -> fun mem3_httpd:handle_membership_req/1;
+url_handler(_) -> fun chttpd_db:handle_request/1.
+
+db_url_handlers() ->
+ [
+ {<<"_view_cleanup">>, fun chttpd_db:handle_view_cleanup_req/2},
+ {<<"_compact">>, fun chttpd_db:handle_compact_req/2},
+ {<<"_design">>, fun chttpd_db:handle_design_req/2},
+ {<<"_temp_view">>, fun chttpd_view:handle_temp_view_req/2},
+ {<<"_changes">>, fun chttpd_db:handle_changes_req/2},
+ {<<"_search">>, fun chttpd_external:handle_search_req/2}
+ ].
+
+design_url_handlers() ->
+ [
+ {<<"_view">>, fun chttpd_view:handle_view_req/3},
+ {<<"_show">>, fun chttpd_show:handle_doc_show_req/3},
+ {<<"_list">>, fun chttpd_show:handle_view_list_req/3},
+ {<<"_update">>, fun chttpd_show:handle_doc_update_req/3},
+ {<<"_info">>, fun chttpd_db:handle_design_info_req/3},
+ {<<"_rewrite">>, fun chttpd_rewrite:handle_rewrite_req/3}
+ ].
+
+% Utilities
+
+partition(Path) ->
+ mochiweb_util:partition(Path, "/").
+
+header_value(#httpd{mochi_req=MochiReq}, Key) ->
+ MochiReq:get_header_value(Key).
+
+header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
+ case MochiReq:get_header_value(Key) of
+ undefined -> Default;
+ Value -> Value
+ end.
+
+primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
+ MochiReq:get_primary_header_value(Key).
+
+serve_file(#httpd{mochi_req=MochiReq}=Req, RelativePath, DocumentRoot) ->
+ {ok, MochiReq:serve_file(RelativePath, DocumentRoot,
+ server_header() ++ couch_httpd_auth:cookie_auth_header(Req, []))}.
+
+qs_value(Req, Key) ->
+ qs_value(Req, Key, undefined).
+
+qs_value(Req, Key, Default) ->
+ couch_util:get_value(Key, qs(Req), Default).
+
+qs(#httpd{mochi_req=MochiReq}) ->
+ MochiReq:parse_qs().
+
+path(#httpd{mochi_req=MochiReq}) ->
+ MochiReq:get(path).
+
+absolute_uri(#httpd{mochi_req=MochiReq}, Path) ->
+ XHost = couch_config:get("httpd", "x_forwarded_host", "X-Forwarded-Host"),
+ Host = case MochiReq:get_header_value(XHost) of
+ undefined ->
+ case MochiReq:get_header_value("Host") of
+ undefined ->
+ {ok, {Address, Port}} = inet:sockname(MochiReq:get(socket)),
+ inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
+ Value1 ->
+ Value1
+ end;
+ Value -> Value
+ end,
+ XSsl = couch_config:get("httpd", "x_forwarded_ssl", "X-Forwarded-Ssl"),
+ Scheme = case MochiReq:get_header_value(XSsl) of
+ "on" -> "https";
+ _ ->
+ XProto = couch_config:get("httpd", "x_forwarded_proto",
+ "X-Forwarded-Proto"),
+ case MochiReq:get_header_value(XProto) of
+ % Restrict to "https" and "http" schemes only
+ "https" -> "https";
+ _ ->
+ case MochiReq:get(scheme) of
+ https ->
+ "https";
+ http ->
+ "http"
+ end
+ end
+ end,
+ Scheme ++ "://" ++ Host ++ Path.
+
+unquote(UrlEncodedString) ->
+ mochiweb_util:unquote(UrlEncodedString).
+
+quote(UrlDecodedString) ->
+ mochiweb_util:quote_plus(UrlDecodedString).
+
+parse_form(#httpd{mochi_req=MochiReq}) ->
+ mochiweb_multipart:parse_form(MochiReq).
+
+recv(#httpd{mochi_req=MochiReq}, Len) ->
+ MochiReq:recv(Len).
+
+recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
+ % Fun is called once with each chunk
+ % Fun({Length, Binary}, State)
+ % called with Length == 0 on the last time.
+ MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState).
+
+body_length(Req) ->
+ case header_value(Req, "Transfer-Encoding") of
+ undefined ->
+ case header_value(Req, "Content-Length") of
+ undefined -> undefined;
+ Length -> list_to_integer(Length)
+ end;
+ "chunked" -> chunked;
+ Unknown -> {unknown_transfer_encoding, Unknown}
+ end.
+
+body(Req) ->
+ couch_httpd:body(Req).
+
+json_body(Httpd) ->
+ ?JSON_DECODE(body(Httpd)).
+
+json_body_obj(Httpd) ->
+ case json_body(Httpd) of
+ {Props} -> {Props};
+ _Else ->
+ throw({bad_request, "Request body must be a JSON object"})
+ end.
+
+
+doc_etag(#doc{revs={Start, [DiskRev|_]}}) ->
+ "\"" ++ ?b2l(couch_doc:rev_to_str({Start, DiskRev})) ++ "\"".
+
+make_etag(Term) ->
+ <<SigInt:128/integer>> = erlang:md5(term_to_binary(Term)),
+ list_to_binary(io_lib:format("\"~.36B\"",[SigInt])).
+
+etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
+ etag_match(Req, binary_to_list(CurrentEtag));
+
+etag_match(Req, CurrentEtag) ->
+ EtagsToMatch = string:tokens(
+ chttpd:header_value(Req, "If-None-Match", ""), ", "),
+ lists:member(CurrentEtag, EtagsToMatch).
+
+etag_respond(Req, CurrentEtag, RespFun) ->
+ case etag_match(Req, CurrentEtag) of
+ true ->
+ % the client has this in their cache.
+ chttpd:send_response(Req, 304, [{"Etag", CurrentEtag}], <<>>);
+ false ->
+ % Run the function.
+ RespFun()
+ end.
+
+verify_is_server_admin(#httpd{user_ctx=#user_ctx{roles=Roles}}) ->
+ case lists:member(<<"_admin">>, Roles) of
+ true -> ok;
+ false -> throw({unauthorized, <<"You are not a server admin.">>})
+ end.
+
+start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Length) ->
+ couch_stats_collector:increment({httpd_status_codes, Code}),
+ Resp = MochiReq:start_response_length({Code, Headers ++ server_header() ++
+ couch_httpd_auth:cookie_auth_header(Req, Headers), Length}),
+ case MochiReq:get(method) of
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
+ end,
+ {ok, Resp}.
+
+send(Resp, Data) ->
+ Resp:send(Data),
+ {ok, Resp}.
+
+start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers) ->
+ couch_stats_collector:increment({httpd_status_codes, Code}),
+ Resp = MochiReq:respond({Code, Headers ++ server_header() ++
+ couch_httpd_auth:cookie_auth_header(Req, Headers), chunked}),
+ case MochiReq:get(method) of
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
+ end,
+ {ok, Resp}.
+
+send_chunk(Resp, Data) ->
+ Resp:write_chunk(Data),
+ {ok, Resp}.
+
+send_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Body) ->
+ couch_stats_collector:increment({httpd_status_codes, Code}),
+ if Code >= 400 ->
+ ?LOG_DEBUG("httpd ~p error response:~n ~s", [Code, Body]);
+ true -> ok
+ end,
+ {ok, MochiReq:respond({Code, Headers ++ server_header() ++
+ couch_httpd_auth:cookie_auth_header(Req, Headers), Body})}.
+
+send_method_not_allowed(Req, Methods) ->
+ send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>,
+ ?l2b("Only " ++ Methods ++ " allowed")).
+
+send_json(Req, Value) ->
+ send_json(Req, 200, Value).
+
+send_json(Req, Code, Value) ->
+ send_json(Req, Code, [], Value).
+
+send_json(Req, Code, Headers, Value) ->
+ couch_httpd:send_json(Req, Code, [reqid() | Headers], Value).
+
+start_json_response(Req, Code) ->
+ start_json_response(Req, Code, []).
+
+start_json_response(Req, Code, Headers) ->
+ couch_httpd:start_json_response(Req, Code, [reqid() | Headers]).
+
+end_json_response(Resp) ->
+ couch_httpd:end_json_response(Resp).
+
+start_delayed_json_response(Req, Code) ->
+ start_delayed_json_response(Req, Code, []).
+
+start_delayed_json_response(Req, Code, Headers) ->
+ start_delayed_json_response(Req, Code, Headers, "").
+
+start_delayed_json_response(Req, Code, Headers, FirstChunk) ->
+ {ok, #delayed_resp{
+ start_fun = fun start_json_response/3,
+ req = Req,
+ code = Code,
+ headers = Headers,
+ first_chunk = FirstChunk}}.
+
+start_delayed_chunked_response(Req, Code, Headers) ->
+ start_delayed_chunked_response(Req, Code, Headers, "").
+
+start_delayed_chunked_response(Req, Code, Headers, FirstChunk) ->
+ {ok, #delayed_resp{
+ start_fun = fun start_chunked_response/3,
+ req = Req,
+ code = Code,
+ headers = Headers,
+ first_chunk = FirstChunk}}.
+
+send_delayed_chunk(Resp, Chunk) ->
+ {ok, Resp1} = start_delayed_response(Resp),
+ send_chunk(Resp1, Chunk).
+
+send_delayed_last_chunk(Req) ->
+ send_delayed_chunk(Req, []).
+
+send_delayed_error(#httpd{}=Req, Reason) ->
+ {Code, ErrorStr, ReasonStr} = error_info(Reason),
+ send_error(Req, Code, ErrorStr, ReasonStr);
+send_delayed_error(#delayed_resp{req=Req}, Reason) ->
+ {Code, ErrorStr, ReasonStr} = error_info(Reason),
+ send_error(Req, Code, ErrorStr, ReasonStr);
+send_delayed_error(Resp, Reason) ->
+ throw({http_abort, Resp, Reason}).
+
+end_delayed_json_response(Resp) ->
+ {ok, Resp1} = start_delayed_response(Resp),
+ end_json_response(Resp1).
+
+get_delayed_req(#delayed_resp{req=#httpd{mochi_req=MochiReq}}) ->
+ MochiReq;
+get_delayed_req(Resp) ->
+ Resp:get(request).
+
+start_delayed_response(#delayed_resp{start_fun=StartFun, req=Req, code=Code,
+ headers=Headers, first_chunk=FirstChunk}) ->
+ {ok, Resp} = StartFun(Req, Code, Headers),
+ case FirstChunk of
+ "" -> {ok, Resp};
+ _ -> send_chunk(Resp, FirstChunk)
+ end;
+start_delayed_response(Resp) ->
+ {ok, Resp}.
+
+error_info({Error, Reason}) when is_list(Reason) ->
+ error_info({Error, couch_util:to_binary(Reason)});
+error_info(bad_request) ->
+ {400, <<"bad_request">>, <<>>};
+error_info({bad_request, Reason}) ->
+ {400, <<"bad_request">>, Reason};
+error_info({query_parse_error, Reason}) ->
+ {400, <<"query_parse_error">>, Reason};
+error_info(database_does_not_exist) ->
+ {404, <<"not_found">>, <<"Database does not exist.">>};
+error_info(not_found) ->
+ {404, <<"not_found">>, <<"missing">>};
+error_info({not_found, Reason}) ->
+ {404, <<"not_found">>, Reason};
+error_info({not_acceptable, Reason}) ->
+ {406, <<"not_acceptable">>, Reason};
+error_info(conflict) ->
+ {409, <<"conflict">>, <<"Document update conflict.">>};
+error_info({conflict, _}) ->
+ {409, <<"conflict">>, <<"Document update conflict.">>};
+error_info({forbidden, Msg}) ->
+ {403, <<"forbidden">>, Msg};
+error_info({forbidden, Error, Msg}) ->
+ {403, Error, Msg};
+error_info({unauthorized, Msg}) ->
+ {401, <<"unauthorized">>, Msg};
+error_info(file_exists) ->
+ {412, <<"file_exists">>, <<"The database could not be "
+ "created, the file already exists.">>};
+error_info({r_quorum_not_met, Reason}) ->
+ {412, <<"read_quorum_not_met">>, Reason};
+error_info({w_quorum_not_met, Reason}) ->
+ {500, <<"write_quorum_not_met">>, Reason};
+error_info({bad_ctype, Reason}) ->
+ {415, <<"bad_content_type">>, Reason};
+error_info(requested_range_not_satisfiable) ->
+ {416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
+error_info({error, illegal_database_name}) ->
+ {400, <<"illegal_database_name">>, <<"Only lowercase characters (a-z), "
+ "digits (0-9), and any of the characters _, $, (, ), +, -, and / "
+ "are allowed">>};
+error_info({missing_stub, Reason}) ->
+ {412, <<"missing_stub">>, Reason};
+error_info(not_implemented) ->
+ {501, <<"not_implemented">>, <<"this feature is not yet implemented">>};
+error_info({Error, null}) ->
+ {500, couch_util:to_binary(Error), null};
+error_info({Error, Reason}) ->
+ {500, couch_util:to_binary(Error), couch_util:to_binary(Reason)};
+error_info({Error, nil, _Stack}) ->
+ error_info(Error);
+error_info({Error, Reason, _Stack}) ->
+ error_info({Error, Reason});
+error_info(Error) ->
+ {500, couch_util:to_binary(Error), null}.
+
+error_headers(#httpd{mochi_req=MochiReq}=Req, 401=Code, ErrorStr, ReasonStr) ->
+ % this is where the basic auth popup is triggered
+ case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
+ undefined ->
+ case couch_config:get("httpd", "WWW-Authenticate", nil) of
+ nil ->
+ % If the client is a browser and the basic auth popup isn't turned on
+ % redirect to the session page.
+ case ErrorStr of
+ <<"unauthorized">> ->
+ case couch_config:get("couch_httpd_auth", "authentication_redirect", nil) of
+ nil -> {Code, []};
+ AuthRedirect ->
+ case couch_config:get("couch_httpd_auth", "require_valid_user", "false") of
+ "true" ->
+ % send the browser popup header no matter what if we are require_valid_user
+ {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
+ _False ->
+ % if the accept header matches html, then do the redirect. else proceed as usual.
+ Accepts = case MochiReq:get_header_value("Accept") of
+ undefined ->
+ % According to the HTTP 1.1 spec, if the Accept
+ % header is missing, it means the client accepts
+ % all media types.
+ "html";
+ Else ->
+ Else
+ end,
+ case re:run(Accepts, "\\bhtml\\b",
+ [{capture, none}, caseless]) of
+ nomatch ->
+ {Code, []};
+ match ->
+ AuthRedirectBin = ?l2b(AuthRedirect),
+ % Redirect to the path the user requested, not
+ % the one that is used internally.
+ UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of
+ undefined -> MochiReq:get(path);
+ VHostPath -> VHostPath
+ end,
+ UrlReturn = ?l2b(couch_util:url_encode(UrlReturnRaw)),
+ UrlReason = ?l2b(couch_util:url_encode(ReasonStr)),
+ {302, [{"Location", couch_httpd:absolute_uri(Req, <<AuthRedirectBin/binary,"?return=",UrlReturn/binary,"&reason=",UrlReason/binary>>)}]}
+ end
+ end
+ end;
+ _Else ->
+ {Code, []}
+ end;
+ Type ->
+ {Code, [{"WWW-Authenticate", Type}]}
+ end;
+ Type ->
+ {Code, [{"WWW-Authenticate", Type}]}
+ end;
+error_headers(_, Code, _, _) ->
+ {Code, []}.
+
+send_error(_Req, {already_sent, Resp, _Error}) ->
+ {ok, Resp};
+
+send_error(Req, Error) ->
+ {Code, ErrorStr, ReasonStr} = error_info(Error),
+ {Code1, Headers} = error_headers(Req, Code, ErrorStr, ReasonStr),
+ send_error(Req, Code1, Headers, ErrorStr, ReasonStr, json_stack(Error)).
+
+send_error(Req, Code, ErrorStr, ReasonStr) ->
+ send_error(Req, Code, [], ErrorStr, ReasonStr, []).
+
+send_error(Req, Code, Headers, ErrorStr, ReasonStr) ->
+ send_error(Req, Code, Headers, ErrorStr, ReasonStr, []).
+
+send_error(Req, Code, Headers, ErrorStr, ReasonStr, Stack) ->
+ send_json(Req, Code, Headers,
+ {[{<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr} |
+ case Stack of [] -> []; _ -> [{stack, Stack}] end
+ ]}).
+
+% give the option for list functions to output html or other raw errors
+send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
+ send_chunk(Resp, Reason),
+ send_chunk(Resp, []);
+
+send_chunked_error(Resp, Error) ->
+ {Code, ErrorStr, ReasonStr} = error_info(Error),
+ JsonError = {[{<<"code">>, Code},
+ {<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr} |
+ case json_stack(Error) of [] -> []; Stack -> [{stack, Stack}] end
+ ]},
+ send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
+ send_chunk(Resp, []).
+
+send_redirect(Req, Path) ->
+ Headers = [{"Location", chttpd:absolute_uri(Req, Path)}],
+ send_response(Req, 301, Headers, <<>>).
+
+server_header() ->
+ couch_httpd:server_header().
+
+reqid() ->
+ {"X-Couch-Request-ID", get(nonce)}.
+
+json_stack({_Error, _Reason, Stack}) ->
+ lists:map(fun({M,F,A0}) ->
+ A = if is_integer(A0) -> A0; is_list(A0) -> length(A0); true -> 0 end,
+ list_to_binary(io_lib:format("~s:~s/~B", [M,F,A]));
+ (_) ->
+ <<"bad entry in stacktrace">>
+ end, Stack);
+json_stack(_) ->
+ [].
diff --git a/deps/chttpd/src/chttpd_app.erl b/deps/chttpd/src/chttpd_app.erl
new file mode 100644
index 00000000..d7a5aef8
--- /dev/null
+++ b/deps/chttpd/src/chttpd_app.erl
@@ -0,0 +1,21 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_app).
+-behaviour(application).
+-export([start/2, stop/1]).
+
+start(_Type, StartArgs) ->
+ chttpd_sup:start_link(StartArgs).
+
+stop(_State) ->
+ ok.
diff --git a/deps/chttpd/src/chttpd_db.erl b/deps/chttpd/src/chttpd_db.erl
new file mode 100644
index 00000000..463aaa10
--- /dev/null
+++ b/deps/chttpd/src/chttpd_db.erl
@@ -0,0 +1,1264 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_db).
+-include_lib("couch/include/couch_db.hrl").
+
+-export([handle_request/1, handle_compact_req/2, handle_design_req/2,
+ db_req/2, couch_doc_open/4,handle_changes_req/2,
+ update_doc_result_to_json/1, update_doc_result_to_json/2,
+ handle_design_info_req/3, handle_view_cleanup_req/2]).
+
+-import(chttpd,
+ [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
+ start_json_response/2,send_chunk/2,end_json_response/1,
+ start_chunked_response/3, absolute_uri/2, send/2,
+ start_response_length/4]).
+
+-record(doc_query_args, {
+ options = [],
+ rev = nil,
+ open_revs = [],
+ update_type = interactive_edit,
+ atts_since = nil
+}).
+
+% Database request handlers
+handle_request(#httpd{path_parts=[DbName|RestParts],method=Method,
+ db_url_handlers=DbUrlHandlers}=Req)->
+ case {Method, RestParts} of
+ {'PUT', []} ->
+ create_db_req(Req, DbName);
+ {'DELETE', []} ->
+ % if we get ?rev=... the user is using a faulty script where the
+ % document id is empty by accident. Let them recover safely.
+ case couch_httpd:qs_value(Req, "rev", false) of
+ false -> delete_db_req(Req, DbName);
+ _Rev -> throw({bad_request,
+ "You tried to DELETE a database with a ?=rev parameter. "
+ ++ "Did you mean to DELETE a document instead?"})
+ end;
+ {_, []} ->
+ do_db_req(Req, fun db_req/2);
+ {_, [SecondPart|_]} ->
+ Handler = couch_util:get_value(SecondPart, DbUrlHandlers, fun db_req/2),
+ do_db_req(Req, Handler)
+ end.
+
+handle_changes_req(#httpd{method='GET'}=Req, Db) ->
+ #changes_args{filter=Raw, style=Style} = Args0 = parse_changes_query(Req),
+ ChangesArgs = Args0#changes_args{
+ filter = couch_changes:configure_filter(Raw, Style, Req, Db)
+ },
+ case ChangesArgs#changes_args.feed of
+ "normal" ->
+ T0 = now(),
+ {ok, Info} = fabric:get_db_info(Db),
+ Etag = chttpd:make_etag(Info),
+ DeltaT = timer:now_diff(now(), T0) / 1000,
+ couch_stats_collector:record({couchdb, dbinfo}, DeltaT),
+ chttpd:etag_respond(Req, Etag, fun() ->
+ fabric:changes(Db, fun changes_callback/2, {"normal", {"Etag",Etag}, Req},
+ ChangesArgs)
+ end);
+ Feed ->
+ % "longpoll" or "continuous"
+ fabric:changes(Db, fun changes_callback/2, {Feed, Req}, ChangesArgs)
+ end;
+handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "GET,HEAD").
+
+% callbacks for continuous feed (newline-delimited JSON Objects)
+changes_callback(start, {"continuous", Req}) ->
+ {ok, Resp} = chttpd:start_delayed_json_response(Req, 200),
+ {ok, {"continuous", Resp}};
+changes_callback({change, Change}, {"continuous", Resp}) ->
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]),
+ {ok, {"continuous", Resp1}};
+changes_callback({stop, EndSeq0}, {"continuous", Resp}) ->
+ EndSeq = case is_old_couch(Resp) of true -> 0; false -> EndSeq0 end,
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp,
+ [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]),
+ chttpd:end_delayed_json_response(Resp1);
+
+% callbacks for longpoll and normal (single JSON Object)
+changes_callback(start, {"normal", {"Etag", Etag}, Req}) ->
+ FirstChunk = "{\"results\":[\n",
+ {ok, Resp} = chttpd:start_delayed_json_response(Req, 200,
+ [{"Etag",Etag}], FirstChunk),
+ {ok, {"", Resp}};
+changes_callback(start, {_, Req}) ->
+ FirstChunk = "{\"results\":[\n",
+ {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [], FirstChunk),
+ {ok, {"", Resp}};
+changes_callback({change, Change}, {Prepend, Resp}) ->
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]),
+ {ok, {",\r\n", Resp1}};
+changes_callback({stop, EndSeq}, {_, Resp}) ->
+ {ok, Resp1} = case is_old_couch(Resp) of
+ true ->
+ chttpd:send_delayed_chunk(Resp, "\n],\n\"last_seq\":0}\n");
+ false ->
+ chttpd:send_delayed_chunk(Resp,
+ ["\n],\n\"last_seq\":", ?JSON_ENCODE(EndSeq), "}\n"])
+ end,
+ chttpd:end_delayed_json_response(Resp1);
+
+changes_callback(timeout, {Prepend, Resp}) ->
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, "\n"),
+ {ok, {Prepend, Resp1}};
+changes_callback({error, Reason}, {_, Resp}) ->
+ chttpd:send_delayed_error(Resp, Reason).
+
+is_old_couch(Resp) ->
+ MochiReq = chttpd:get_delayed_req(Resp),
+ case MochiReq:get_header_value("user-agent") of
+ undefined ->
+ false;
+ "CouchDB/1.0.0" ->
+ true;
+ UserAgent ->
+ string:str(UserAgent, "CouchDB/0") > 0
+ end.
+
+handle_compact_req(Req, _) ->
+ Msg = <<"Compaction must be triggered on a per-shard basis in BigCouch">>,
+ couch_httpd:send_error(Req, 403, forbidden, Msg).
+
+handle_view_cleanup_req(Req, Db) ->
+ ok = fabric:cleanup_index_files(Db),
+ send_json(Req, 202, {[{ok, true}]}).
+
+handle_design_req(#httpd{
+ path_parts=[_DbName, _Design, Name, <<"_",_/binary>> = Action | _Rest],
+ design_url_handlers = DesignUrlHandlers
+ }=Req, Db) ->
+ case fabric:open_doc(Db, <<"_design/", Name/binary>>, []) of
+ {ok, DDoc} ->
+ Handler = couch_util:get_value(Action, DesignUrlHandlers,
+ fun bad_action_req/3),
+ Handler(Req, Db, DDoc);
+ Error ->
+ throw(Error)
+ end;
+
+handle_design_req(Req, Db) ->
+ db_req(Req, Db).
+
+bad_action_req(#httpd{path_parts=[_, _, Name|FileNameParts]}=Req, Db, _DDoc) ->
+ db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts).
+
+handle_design_info_req(#httpd{method='GET'}=Req, Db, #doc{id=Id} = DDoc) ->
+ {ok, GroupInfoList} = fabric:get_view_group_info(Db, DDoc),
+ send_json(Req, 200, {[
+ {name, Id},
+ {view_index, {GroupInfoList}}
+ ]});
+
+handle_design_info_req(Req, _Db, _DDoc) ->
+ send_method_not_allowed(Req, "GET").
+
+create_db_req(#httpd{}=Req, DbName) ->
+ couch_httpd:verify_is_server_admin(Req),
+ N = couch_httpd:qs_value(Req, "n", couch_config:get("cluster", "n", "3")),
+ Q = couch_httpd:qs_value(Req, "q", couch_config:get("cluster", "q", "8")),
+ DocUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
+ case fabric:create_db(DbName, [{n,N}, {q,Q}]) of
+ ok ->
+ send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
+ accepted ->
+ send_json(Req, 202, [{"Location", DocUrl}], {[{ok, true}]});
+ {error, file_exists} ->
+ chttpd:send_error(Req, file_exists);
+ Error ->
+ throw(Error)
+ end.
+
+delete_db_req(#httpd{}=Req, DbName) ->
+ couch_httpd:verify_is_server_admin(Req),
+ case fabric:delete_db(DbName, []) of
+ ok ->
+ send_json(Req, 200, {[{ok, true}]});
+ accepted ->
+ send_json(Req, 202, {[{ok, true}]});
+ Error ->
+ throw(Error)
+ end.
+
+do_db_req(#httpd{path_parts=[DbName|_], user_ctx=Ctx}=Req, Fun) ->
+ fabric:get_security(DbName, [{user_ctx,Ctx}]), % calls check_is_reader
+ Fun(Req, #db{name=DbName, user_ctx=Ctx}).
+
+db_req(#httpd{method='GET',path_parts=[DbName]}=Req, _Db) ->
+ % measure the time required to generate the etag, see if it's worth it
+ T0 = now(),
+ {ok, DbInfo} = fabric:get_db_info(DbName),
+ DeltaT = timer:now_diff(now(), T0) / 1000,
+ couch_stats_collector:record({couchdb, dbinfo}, DeltaT),
+ send_json(Req, {DbInfo});
+
+db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+
+ W = couch_httpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
+ Options = [{user_ctx,Ctx}, {w,W}],
+
+ Doc = couch_doc:from_json_obj(couch_httpd:json_body(Req)),
+ Doc2 = case Doc#doc.id of
+ <<"">> ->
+ Doc#doc{id=couch_uuids:new(), revs={0, []}};
+ _ ->
+ Doc
+ end,
+ DocId = Doc2#doc.id,
+ case couch_httpd:qs_value(Req, "batch") of
+ "ok" ->
+ % async_batching
+ spawn(fun() ->
+ case catch(fabric:update_doc(Db, Doc2, Options)) of
+ {ok, _} -> ok;
+ {accepted, _} -> ok;
+ Error ->
+ ?LOG_INFO("Batch doc error (~s): ~p",[DocId, Error])
+ end
+ end),
+
+ send_json(Req, 202, [], {[
+ {ok, true},
+ {id, DocId}
+ ]});
+ _Normal ->
+ % normal
+ DocUrl = absolute_uri(Req, [$/, DbName, $/, DocId]),
+ case fabric:update_doc(Db, Doc2, Options) of
+ {ok, NewRev} ->
+ HttpCode = 201;
+ {accepted, NewRev} ->
+ HttpCode = 202
+ end,
+ send_json(Req, HttpCode, [{"Location", DocUrl}], {[
+ {ok, true},
+ {id, DocId},
+ {rev, couch_doc:rev_to_str(NewRev)}
+ ]})
+ end;
+
+db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
+ send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
+ send_json(Req, 201, {[
+ {ok, true},
+ {instance_start_time, <<"0">>}
+ ]});
+
+db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req, Db) ->
+ couch_stats_collector:increment({httpd, bulk_requests}),
+ couch_httpd:validate_ctype(Req, "application/json"),
+ {JsonProps} = chttpd:json_body_obj(Req),
+ DocsArray = couch_util:get_value(<<"docs">>, JsonProps),
+ W = couch_httpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
+ case chttpd:header_value(Req, "X-Couch-Full-Commit") of
+ "true" ->
+ Options = [full_commit, {user_ctx,Ctx}, {w,W}];
+ "false" ->
+ Options = [delay_commit, {user_ctx,Ctx}, {w,W}];
+ _ ->
+ Options = [{user_ctx,Ctx}, {w,W}]
+ end,
+ case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
+ true ->
+ Docs = lists:map(
+ fun({ObjProps} = JsonObj) ->
+ Doc = couch_doc:from_json_obj(JsonObj),
+ validate_attachment_names(Doc),
+ Id = case Doc#doc.id of
+ <<>> -> couch_uuids:new();
+ Id0 -> Id0
+ end,
+ case couch_util:get_value(<<"_rev">>, ObjProps) of
+ undefined ->
+ Revs = {0, []};
+ Rev ->
+ {Pos, RevId} = couch_doc:parse_rev(Rev),
+ Revs = {Pos, [RevId]}
+ end,
+ Doc#doc{id=Id,revs=Revs}
+ end,
+ DocsArray),
+ Options2 =
+ case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
+ true -> [all_or_nothing|Options];
+ _ -> Options
+ end,
+ case fabric:update_docs(Db, Docs, Options2) of
+ {ok, Results} ->
+ % output the results
+ DocResults = lists:zipwith(fun update_doc_result_to_json/2,
+ Docs, Results),
+ send_json(Req, 201, DocResults);
+ {accepted, Results} ->
+ % output the results
+ DocResults = lists:zipwith(fun update_doc_result_to_json/2,
+ Docs, Results),
+ send_json(Req, 202, DocResults);
+ {aborted, Errors} ->
+ ErrorsJson =
+ lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 417, ErrorsJson)
+ end;
+ false ->
+ Docs = [couch_doc:from_json_obj(JsonObj) || JsonObj <- DocsArray],
+ [validate_attachment_names(D) || D <- Docs],
+ case fabric:update_docs(Db, Docs, [replicated_changes|Options]) of
+ {ok, Errors} ->
+ ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 201, ErrorsJson);
+ {accepted, Errors} ->
+ ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 202, ErrorsJson)
+ end
+ end;
+
+db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ {IdsRevs} = chttpd:json_body_obj(Req),
+ IdsRevs2 = [{Id, couch_doc:parse_revs(Revs)} || {Id, Revs} <- IdsRevs],
+ case fabric:purge_docs(Db, IdsRevs2) of
+ {ok, PurgeSeq, PurgedIdsRevs} ->
+ PurgedIdsRevs2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs}
+ <- PurgedIdsRevs],
+ send_json(Req, 200, {[
+ {<<"purge_seq">>, PurgeSeq},
+ {<<"purged">>, {PurgedIdsRevs2}}
+ ]});
+ Error ->
+ throw(Error)
+ end;
+
+db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='GET',path_parts=[_,<<"_all_docs">>]}=Req, Db) ->
+ all_docs_view(Req, Db, nil);
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_all_docs">>]}=Req, Db) ->
+ {Fields} = chttpd:json_body_obj(Req),
+ case couch_util:get_value(<<"keys">>, Fields, nil) of
+ Keys when is_list(Keys) ->
+ all_docs_view(Req, Db, Keys);
+ nil ->
+ all_docs_view(Req, Db, nil);
+ _ ->
+ throw({bad_request, "`keys` body member must be an array."})
+ end;
+
+db_req(#httpd{path_parts=[_,<<"_all_docs">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "GET,HEAD,POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
+ {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
+ {ok, Results} = fabric:get_missing_revs(Db, JsonDocIdRevs),
+ Results2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs, _} <- Results],
+ send_json(Req, {[
+ {missing_revs, {Results2}}
+ ]});
+
+db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
+ {JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
+ {ok, Results} = fabric:get_missing_revs(Db, JsonDocIdRevs),
+ Results2 =
+ lists:map(fun({Id, MissingRevs, PossibleAncestors}) ->
+ {Id,
+ {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
+ if PossibleAncestors == [] ->
+ [];
+ true ->
+ [{possible_ancestors,
+ couch_doc:revs_to_strs(PossibleAncestors)}]
+ end}}
+ end, Results),
+ send_json(Req, {Results2});
+
+db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "POST");
+
+db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>],user_ctx=Ctx}=Req,
+ Db) ->
+ SecObj = couch_httpd:json_body(Req),
+ ok = fabric:set_security(Db, SecObj, [{user_ctx,Ctx}]),
+ send_json(Req, {[{<<"ok">>, true}]});
+
+db_req(#httpd{method='GET',path_parts=[_,<<"_security">>],user_ctx=Ctx}=Req, Db) ->
+ send_json(Req, fabric:get_security(Db, [{user_ctx,Ctx}]));
+
+db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "PUT,GET");
+
+db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>],user_ctx=Ctx}=Req,
+ Db) ->
+ Limit = chttpd:json_body(Req),
+ ok = fabric:set_revs_limit(Db, Limit, [{user_ctx,Ctx}]),
+ send_json(Req, {[{<<"ok">>, true}]});
+
+db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
+ send_json(Req, fabric:get_revs_limit(Db));
+
+db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
+ send_method_not_allowed(Req, "PUT,GET");
+
+% vanilla CouchDB sends a 301 here, but we just handle the request
+db_req(#httpd{path_parts=[DbName,<<"_design/",Name/binary>>|Rest]}=Req, Db) ->
+ db_req(Req#httpd{path_parts=[DbName, <<"_design">>, Name | Rest]}, Db);
+
+% Special case to enable using an unencoded slash in the URL of design docs,
+% as slashes in document IDs must otherwise be URL encoded.
+db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
+ db_doc_req(Req, Db, <<"_design/",Name/binary>>);
+
+db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
+ db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
+
+
+% Special case to allow for accessing local documents without %2F
+% encoding the docid. Throws out requests that don't have the second
+% path part or that specify an attachment name.
+db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) ->
+ throw({bad_request, <<"Invalid _local document id.">>});
+
+db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) ->
+ throw({bad_request, <<"Invalid _local document id.">>});
+
+db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) ->
+ db_doc_req(Req, Db, <<"_local/", Name/binary>>);
+
+db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) ->
+ throw({bad_request, <<"_local documents do not accept attachments.">>});
+
+db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
+ db_doc_req(Req, Db, DocId);
+
+db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
+ db_attachment_req(Req, Db, DocId, FileNameParts).
+
+all_docs_view(Req, Db, Keys) ->
+ % measure the time required to generate the etag, see if it's worth it
+ T0 = now(),
+ {ok, Info} = fabric:get_db_info(Db),
+ Etag = couch_httpd:make_etag(Info),
+ DeltaT = timer:now_diff(now(), T0) / 1000,
+ couch_stats_collector:record({couchdb, dbinfo}, DeltaT),
+ QueryArgs = chttpd_view:parse_view_params(Req, Keys, map),
+ chttpd:etag_respond(Req, Etag, fun() ->
+ {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"Etag",Etag}]),
+ fabric:all_docs(Db, fun all_docs_callback/2, {nil, Resp}, QueryArgs)
+ end).
+
+all_docs_callback({total_and_offset, Total, Offset}, {_, Resp}) ->
+ Chunk = "{\"total_rows\":~p,\"offset\":~p,\"rows\":[\r\n",
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, io_lib:format(Chunk, [Total, Offset])),
+ {ok, {"", Resp1}};
+all_docs_callback({row, Row}, {Prepend, Resp}) ->
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, [Prepend, ?JSON_ENCODE(Row)]),
+ {ok, {",\r\n", Resp1}};
+all_docs_callback(complete, {_, Resp}) ->
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, "\r\n]}"),
+ chttpd:end_delayed_json_response(Resp1);
+all_docs_callback({error, Reason}, {_, Resp}) ->
+ chttpd:send_delayed_error(Resp, Reason).
+
+db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
+ % check for the existence of the doc to handle the 404 case.
+ couch_doc_open(Db, DocId, nil, []),
+ case chttpd:qs_value(Req, "rev") of
+ undefined ->
+ Body = {[{<<"_deleted">>,true}]};
+ Rev ->
+ Body = {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}
+ end,
+ update_doc(Req, Db, DocId, couch_doc_from_req(Req, DocId, Body));
+
+db_doc_req(#httpd{method='GET'}=Req, Db, DocId) ->
+ #doc_query_args{
+ rev = Rev,
+ open_revs = Revs,
+ options = Options,
+ atts_since = AttsSince
+ } = parse_doc_query(Req),
+ case Revs of
+ [] ->
+ Options2 =
+ if AttsSince /= nil ->
+ [{atts_since, AttsSince}, attachments | Options];
+ true -> Options
+ end,
+ Doc = couch_doc_open(Db, DocId, Rev, Options2),
+ send_doc(Req, Doc, Options2);
+ _ ->
+ {ok, Results} = fabric:open_revs(Db, DocId, Revs, Options),
+ AcceptedTypes = case couch_httpd:header_value(Req, "Accept") of
+ undefined -> [];
+ AcceptHeader -> string:tokens(AcceptHeader, ", ")
+ end,
+ case lists:member("multipart/mixed", AcceptedTypes) of
+ false ->
+ {ok, Resp} = start_json_response(Req, 200),
+ send_chunk(Resp, "["),
+ % We loop through the docs. The first time through the separator
+ % is whitespace, then a comma on subsequent iterations.
+ lists:foldl(
+ fun(Result, AccSeparator) ->
+ case Result of
+ {ok, Doc} ->
+ JsonDoc = couch_doc:to_json_obj(Doc, Options),
+ Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
+ send_chunk(Resp, AccSeparator ++ Json);
+ {{not_found, missing}, RevId} ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
+ send_chunk(Resp, AccSeparator ++ Json)
+ end,
+ "," % AccSeparator now has a comma
+ end,
+ "", Results),
+ send_chunk(Resp, "]"),
+ end_json_response(Resp);
+ true ->
+ send_docs_multipart(Req, Results, Options)
+ end
+ end;
+
+db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
+ couch_httpd:validate_referer(Req),
+ couch_doc:validate_docid(DocId),
+ couch_httpd:validate_ctype(Req, "multipart/form-data"),
+
+ W = couch_httpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
+ Options = [{user_ctx,Ctx}, {w,W}],
+
+ Form = couch_httpd:parse_form(Req),
+ case proplists:is_defined("_doc", Form) of
+ true ->
+ Json = ?JSON_DECODE(couch_util:get_value("_doc", Form)),
+ Doc = couch_doc_from_req(Req, DocId, Json);
+ false ->
+ Rev = couch_doc:parse_rev(list_to_binary(couch_util:get_value("_rev", Form))),
+ {ok, [{ok, Doc}]} = fabric:open_revs(Db, DocId, [Rev], [])
+ end,
+ UpdatedAtts = [
+ #att{name=validate_attachment_name(Name),
+ type=list_to_binary(ContentType),
+ data=Content} ||
+ {Name, {ContentType, _}, Content} <-
+ proplists:get_all_values("_attachments", Form)
+ ],
+ #doc{atts=OldAtts} = Doc,
+ OldAtts2 = lists:flatmap(
+ fun(#att{name=OldName}=Att) ->
+ case [1 || A <- UpdatedAtts, A#att.name == OldName] of
+ [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it
+ _ -> [] % the attachment was in the UpdatedAtts, drop it
+ end
+ end, OldAtts),
+ NewDoc = Doc#doc{
+ atts = UpdatedAtts ++ OldAtts2
+ },
+ case fabric:update_doc(Db, NewDoc, Options) of
+ {ok, NewRev} ->
+ HttpCode = 201;
+ {accepted, NewRev} ->
+ HttpCode = 202
+ end,
+ send_json(Req, HttpCode, [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewRev)) ++ "\""}], {[
+ {ok, true},
+ {id, DocId},
+ {rev, couch_doc:rev_to_str(NewRev)}
+ ]});
+
+db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
+ #doc_query_args{
+ update_type = UpdateType
+ } = parse_doc_query(Req),
+ couch_doc:validate_docid(DocId),
+
+ W = couch_httpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
+ Options = [{user_ctx,Ctx}, {w,W}],
+
+ Loc = absolute_uri(Req, [$/, Db#db.name, $/, DocId]),
+ RespHeaders = [{"Location", Loc}],
+ case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
+ ("multipart/related;" ++ _) = ContentType ->
+ {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(ContentType,
+ fun() -> receive_request_data(Req) end),
+ Doc = couch_doc_from_req(Req, DocId, Doc0),
+ try
+ Result = update_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType),
+ WaitFun(),
+ Result
+ catch throw:Err ->
+ % Document rejected by a validate_doc_update function.
+ couch_doc:abort_multi_part_stream(Parser),
+ throw(Err)
+ end;
+ _Else ->
+ case couch_httpd:qs_value(Req, "batch") of
+ "ok" ->
+ % batch
+ Doc = couch_doc_from_req(Req, DocId, couch_httpd:json_body(Req)),
+
+ spawn(fun() ->
+ case catch(fabric:update_doc(Db, Doc, Options)) of
+ {ok, _} -> ok;
+ {accepted, _} -> ok;
+ Error ->
+ ?LOG_INFO("Batch doc error (~s): ~p",[DocId, Error])
+ end
+ end),
+ send_json(Req, 202, [], {[
+ {ok, true},
+ {id, DocId}
+ ]});
+ _Normal ->
+ % normal
+ Body = couch_httpd:json_body(Req),
+ Doc = couch_doc_from_req(Req, DocId, Body),
+ update_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType)
+ end
+ end;
+
+db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) ->
+ SourceRev =
+ case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
+ missing_rev -> nil;
+ Rev -> Rev
+ end,
+ {TargetDocId, TargetRevs} = parse_copy_destination_header(Req),
+ % open old doc
+ Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
+ % save new doc
+ case fabric:update_doc(Db,
+ Doc#doc{id=TargetDocId, revs=TargetRevs}, [{user_ctx,Ctx}]) of
+ {ok, NewTargetRev} ->
+ HttpCode = 201;
+ {accepted, NewTargetRev} ->
+ HttpCode = 202
+ end,
+ % respond
+ {PartRes} = update_doc_result_to_json(TargetDocId, {ok, NewTargetRev}),
+ send_json(Req, HttpCode,
+ [{"Etag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}],
+ {[{ok, true}] ++ PartRes});
+
+db_doc_req(Req, _Db, _DocId) ->
+ send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
+
+send_doc(Req, Doc, Options) ->
+ case Doc#doc.meta of
+ [] ->
+ DiskEtag = couch_httpd:doc_etag(Doc),
+ % output etag only when we have no meta
+ chttpd:etag_respond(Req, DiskEtag, fun() ->
+ send_doc_efficiently(Req, Doc, [{"Etag", DiskEtag}], Options)
+ end);
+ _ ->
+ send_doc_efficiently(Req, Doc, [], Options)
+ end.
+
+send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
+send_doc_efficiently(Req, #doc{atts=Atts}=Doc, Headers, Options) ->
+ case lists:member(attachments, Options) of
+ true ->
+ AcceptedTypes = case couch_httpd:header_value(Req, "Accept") of
+ undefined -> [];
+ AcceptHeader -> string:tokens(AcceptHeader, ", ")
+ end,
+ case lists:member("multipart/related", AcceptedTypes) of
+ false ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
+ true ->
+ Boundary = couch_uuids:random(),
+ JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
+ [attachments, follows|Options])),
+ {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
+ Boundary,JsonBytes, Atts, true),
+ CType = {<<"Content-Type">>, ContentType},
+ {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len),
+ couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts,
+ fun(Data) -> couch_httpd:send(Resp, Data) end, true)
+ end;
+ false ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
+ end.
+
+send_docs_multipart(Req, Results, Options1) ->
+ OuterBoundary = couch_uuids:random(),
+ InnerBoundary = couch_uuids:random(),
+ Options = [attachments, follows, att_encoding_info | Options1],
+ CType = {"Content-Type",
+ "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
+ {ok, Resp} = start_chunked_response(Req, 200, [CType]),
+ couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
+ lists:foreach(
+ fun({ok, #doc{atts=Atts}=Doc}) ->
+ JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
+ {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
+ InnerBoundary, JsonBytes, Atts, true),
+ couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
+ ContentType/binary, "\r\n\r\n">>),
+ couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
+ fun(Data) -> couch_httpd:send_chunk(Resp, Data)
+ end, true),
+ couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>);
+ ({{not_found, missing}, RevId}) ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = ?JSON_ENCODE({[{"missing", RevStr}]}),
+ couch_httpd:send_chunk(Resp,
+ [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
+ Json,
+ <<"\r\n--", OuterBoundary/binary>>])
+ end, Results),
+ couch_httpd:send_chunk(Resp, <<"--">>),
+ couch_httpd:last_chunk(Resp).
+
+receive_request_data(Req) ->
+ receive_request_data(Req, chttpd:body_length(Req)).
+
+receive_request_data(Req, LenLeft) when LenLeft > 0 ->
+ Len = erlang:min(4096, LenLeft),
+ Data = chttpd:recv(Req, Len),
+ {Data, fun() -> receive_request_data(Req, LenLeft - iolist_size(Data)) end};
+receive_request_data(_Req, _) ->
+ throw(<<"expected more data">>).
+
+update_doc_result_to_json({{Id, Rev}, Error}) ->
+ {_Code, Err, Msg} = chttpd:error_info(Error),
+ {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)},
+ {error, Err}, {reason, Msg}]}.
+
+update_doc_result_to_json(#doc{id=DocId}, Result) ->
+ update_doc_result_to_json(DocId, Result);
+update_doc_result_to_json(DocId, {ok, NewRev}) ->
+ {[{id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
+update_doc_result_to_json(DocId, {accepted, NewRev}) ->
+ {[{id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}, {accepted, true}]};
+update_doc_result_to_json(DocId, Error) ->
+ {_Code, ErrorStr, Reason} = chttpd:error_info(Error),
+ {[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
+
+
+update_doc(Req, Db, DocId, Json) ->
+ update_doc(Req, Db, DocId, Json, []).
+
+update_doc(Req, Db, DocId, Doc, Headers) ->
+ update_doc(Req, Db, DocId, Doc, Headers, interactive_edit).
+
+update_doc(#httpd{user_ctx=Ctx} = Req, Db, DocId, #doc{deleted=Deleted}=Doc,
+ Headers, UpdateType) ->
+ W = couch_httpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
+ Options =
+ case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
+ "true" ->
+ [full_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
+ "false" ->
+ [delay_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
+ _ ->
+ [UpdateType, {user_ctx,Ctx}, {w,W}]
+ end,
+ {_, Ref} = spawn_monitor(fun() -> exit(fabric:update_doc(Db, Doc, Options)) end),
+ Result = receive {'DOWN', Ref, _, _, Res} -> Res end,
+ case Result of
+ {{nocatch, Exception}, _Reason} ->
+ % Exceptions from spawned processes are swallowed and returned, rethrow
+ throw(Exception);
+ _ ->
+ ok
+ end,
+
+ case Result of
+ {ok, NewRev} ->
+ Accepted = false;
+ {accepted, NewRev} ->
+ Accepted = true
+ end,
+ NewRevStr = couch_doc:rev_to_str(NewRev),
+ ResponseHeaders = [{"Etag", <<"\"", NewRevStr/binary, "\"">>} | Headers],
+ case {Accepted, Deleted} of
+ {true, _} ->
+ HttpCode = 202;
+ {false, true} ->
+ HttpCode = 200;
+ {false, false} ->
+ HttpCode = 201
+ end,
+ send_json(Req, HttpCode, ResponseHeaders, {[
+ {ok, true},
+ {id, DocId},
+ {rev, NewRevStr}
+ ]}).
+
+couch_doc_from_req(Req, DocId, #doc{revs=Revs} = Doc) ->
+ validate_attachment_names(Doc),
+ ExplicitDocRev =
+ case Revs of
+ {Start,[RevId|_]} -> {Start, RevId};
+ _ -> undefined
+ end,
+ case extract_header_rev(Req, ExplicitDocRev) of
+ missing_rev ->
+ Revs2 = {0, []};
+ ExplicitDocRev ->
+ Revs2 = Revs;
+ {Pos, Rev} ->
+ Revs2 = {Pos, [Rev]}
+ end,
+ Doc#doc{id=DocId, revs=Revs2};
+couch_doc_from_req(Req, DocId, Json) ->
+ couch_doc_from_req(Req, DocId, couch_doc:from_json_obj(Json)).
+
+
+% Useful for debugging
+% couch_doc_open(Db, DocId) ->
+% couch_doc_open(Db, DocId, nil, []).
+
+couch_doc_open(Db, DocId, Rev, Options) ->
+ case Rev of
+ nil -> % open most recent rev
+ case fabric:open_doc(Db, DocId, Options) of
+ {ok, Doc} ->
+ Doc;
+ Error ->
+ throw(Error)
+ end;
+ _ -> % open a specific rev (deletions come back as stubs)
+ case fabric:open_revs(Db, DocId, [Rev], Options) of
+ {ok, [{ok, Doc}]} ->
+ Doc;
+ {ok, [{{not_found, missing}, Rev}]} ->
+ throw(not_found);
+ {ok, [Else]} ->
+ throw(Else)
+ end
+ end.
+
+% Attachment request handlers
+
+db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) ->
+ FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1,
+ FileNameParts),"/")),
+ #doc_query_args{
+ rev=Rev,
+ options=Options
+ } = parse_doc_query(Req),
+ #doc{
+ atts=Atts
+ } = Doc = couch_doc_open(Db, DocId, Rev, Options),
+ case [A || A <- Atts, A#att.name == FileName] of
+ [] ->
+ throw({not_found, "Document is missing attachment"});
+ [#att{type=Type, encoding=Enc, disk_len=DiskLen, att_len=AttLen}=Att] ->
+ Etag = chttpd:doc_etag(Doc),
+ ReqAcceptsAttEnc = lists:member(
+ atom_to_list(Enc),
+ couch_httpd:accepted_encodings(Req)
+ ),
+ Headers = [
+ {"ETag", Etag},
+ {"Cache-Control", "must-revalidate"},
+ {"Content-Type", binary_to_list(Type)}
+ ] ++ case ReqAcceptsAttEnc of
+ true when Enc =/= identity ->
+ % RFC 2616 says that the 'identify' encoding should not be used in
+ % the Content-Encoding header
+ [{"Content-Encoding", atom_to_list(Enc)}];
+ _ ->
+ []
+ end ++ case Enc of
+ identity ->
+ [{"Accept-Ranges", "bytes"}];
+ _ ->
+ [{"Accept-Ranges", "none"}]
+ end,
+ Len = case {Enc, ReqAcceptsAttEnc} of
+ {identity, _} ->
+ % stored and served in identity form
+ DiskLen;
+ {_, false} when DiskLen =/= AttLen ->
+ % Stored encoded, but client doesn't accept the encoding we used,
+ % so we need to decode on the fly. DiskLen is the identity length
+ % of the attachment.
+ DiskLen;
+ {_, true} ->
+ % Stored and served encoded. AttLen is the encoded length.
+ AttLen;
+ _ ->
+ % We received an encoded attachment and stored it as such, so we
+ % don't know the identity length. The client doesn't accept the
+ % encoding, and since we cannot serve a correct Content-Length
+ % header we'll fall back to a chunked response.
+ undefined
+ end,
+ AttFun = case ReqAcceptsAttEnc of
+ false ->
+ fun couch_doc:att_foldl_decode/3;
+ true ->
+ fun couch_doc:att_foldl/3
+ end,
+ chttpd:etag_respond(
+ Req,
+ Etag,
+ fun() ->
+ case Len of
+ undefined ->
+ {ok, Resp} = start_chunked_response(Req, 200, Headers),
+ AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
+ couch_httpd:last_chunk(Resp);
+ _ ->
+ Ranges = parse_ranges(MochiReq:get(range), Len),
+ case {Enc, Ranges} of
+ {identity, [{From, To}]} ->
+ Headers1 = [{<<"Content-Range">>, make_content_range(From, To, Len)}]
+ ++ Headers,
+ {ok, Resp} = start_response_length(Req, 206, Headers1, To - From + 1),
+ couch_doc:range_att_foldl(Att, From, To + 1,
+ fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp});
+ {identity, Ranges} when is_list(Ranges) ->
+ send_ranges_multipart(Req, Type, Len, Att, Ranges);
+ _ ->
+ Headers1 = Headers ++
+ if Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
+ [{"Content-MD5", base64:encode(Att#att.md5)}];
+ true ->
+ []
+ end,
+ {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
+ AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
+ end
+ end
+ end
+ )
+ end;
+
+
+db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNameParts)
+ when (Method == 'PUT') or (Method == 'DELETE') ->
+ FileName = validate_attachment_name(
+ mochiweb_util:join(
+ lists:map(fun binary_to_list/1,
+ FileNameParts),"/")),
+
+ NewAtt = case Method of
+ 'DELETE' ->
+ [];
+ _ ->
+ [#att{
+ name=FileName,
+ type = case couch_httpd:header_value(Req,"Content-Type") of
+ undefined ->
+ % We could throw an error here or guess by the FileName.
+ % Currently, just giving it a default.
+ <<"application/octet-stream">>;
+ CType ->
+ list_to_binary(CType)
+ end,
+ data = fabric:att_receiver(Req, chttpd:body_length(Req)),
+ att_len = case couch_httpd:header_value(Req,"Content-Length") of
+ undefined ->
+ undefined;
+ Length ->
+ list_to_integer(Length)
+ end,
+ md5 = get_md5_header(Req),
+ encoding = case string:to_lower(string:strip(
+ couch_httpd:header_value(Req,"Content-Encoding","identity")
+ )) of
+ "identity" ->
+ identity;
+ "gzip" ->
+ gzip;
+ _ ->
+ throw({
+ bad_ctype,
+ "Only gzip and identity content-encodings are supported"
+ })
+ end
+ }]
+ end,
+
+ Doc = case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
+ missing_rev -> % make the new doc
+ couch_doc:validate_docid(DocId),
+ #doc{id=DocId};
+ Rev ->
+ case fabric:open_revs(Db, DocId, [Rev], []) of
+ {ok, [{ok, Doc0}]} -> Doc0;
+ {ok, [Error]} -> throw(Error)
+ end
+ end,
+
+ #doc{atts=Atts, revs = {Pos, Revs}} = Doc,
+ DocEdited = Doc#doc{
+ % prune revision list as a workaround for key tree bug (COUCHDB-902)
+ revs = {Pos, case Revs of [] -> []; [Hd|_] -> [Hd] end},
+ atts = NewAtt ++ [A || A <- Atts, A#att.name /= FileName]
+ },
+ case fabric:update_doc(Db, DocEdited, [{user_ctx,Ctx}]) of
+ {ok, UpdatedRev} ->
+ HttpCode = 201;
+ {accepted, UpdatedRev} ->
+ HttpCode = 202
+ end,
+ erlang:put(mochiweb_request_recv, true),
+ #db{name=DbName} = Db,
+
+ {Status, Headers} = case Method of
+ 'DELETE' ->
+ {200, []};
+ _ ->
+ {HttpCode, [{"Location", absolute_uri(Req, [$/, DbName, $/, DocId, $/,
+ FileName])}]}
+ end,
+ send_json(Req,Status, Headers, {[
+ {ok, true},
+ {id, DocId},
+ {rev, couch_doc:rev_to_str(UpdatedRev)}
+ ]});
+
+db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
+ send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
+
+send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
+ Boundary = couch_uuids:random(),
+ CType = {"Content-Type",
+ "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
+ {ok, Resp} = start_chunked_response(Req, 206, [CType]),
+ couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
+ lists:foreach(fun({From, To}) ->
+ ContentRange = make_content_range(From, To, Len),
+ couch_httpd:send_chunk(Resp,
+ <<"\r\nContent-Type: ", ContentType/binary, "\r\n",
+ "Content-Range: ", ContentRange/binary, "\r\n",
+ "\r\n">>),
+ couch_doc:range_att_foldl(Att, From, To + 1,
+ fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
+ couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
+ end, Ranges),
+ couch_httpd:send_chunk(Resp, <<"--">>),
+ couch_httpd:last_chunk(Resp),
+ {ok, Resp}.
+
+parse_ranges(undefined, _Len) ->
+ undefined;
+parse_ranges(fail, _Len) ->
+ undefined;
+parse_ranges(Ranges, Len) ->
+ parse_ranges(Ranges, Len, []).
+
+parse_ranges([], _Len, Acc) ->
+ lists:reverse(Acc);
+parse_ranges([{From, To}|_], _Len, _Acc)
+ when is_integer(From) andalso is_integer(To) andalso To < From ->
+ throw(requested_range_not_satisfiable);
+parse_ranges([{From, To}|Rest], Len, Acc)
+ when is_integer(To) andalso To >= Len ->
+ parse_ranges([{From, Len-1}] ++ Rest, Len, Acc);
+parse_ranges([{none, To}|Rest], Len, Acc) ->
+ parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
+parse_ranges([{From, none}|Rest], Len, Acc) ->
+ parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
+parse_ranges([{From,To}|Rest], Len, Acc) ->
+ parse_ranges(Rest, Len, [{From, To}] ++ Acc).
+
+make_content_range(From, To, Len) ->
+ ?l2b(io_lib:format("bytes ~B-~B/~B", [From, To, Len])).
+
+get_md5_header(Req) ->
+ ContentMD5 = couch_httpd:header_value(Req, "Content-MD5"),
+ Length = couch_httpd:body_length(Req),
+ Trailer = couch_httpd:header_value(Req, "Trailer"),
+ case {ContentMD5, Length, Trailer} of
+ _ when is_list(ContentMD5) orelse is_binary(ContentMD5) ->
+ base64:decode(ContentMD5);
+ {_, chunked, undefined} ->
+ <<>>;
+ {_, chunked, _} ->
+ case re:run(Trailer, "\\bContent-MD5\\b", [caseless]) of
+ {match, _} ->
+ md5_in_footer;
+ _ ->
+ <<>>
+ end;
+ _ ->
+ <<>>
+ end.
+
+parse_doc_query(Req) ->
+ lists:foldl(fun({Key,Value}, Args) ->
+ case {Key, Value} of
+ {"attachments", "true"} ->
+ Options = [attachments | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"meta", "true"} ->
+ Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"revs", "true"} ->
+ Options = [revs | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"local_seq", "true"} ->
+ Options = [local_seq | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"revs_info", "true"} ->
+ Options = [revs_info | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"conflicts", "true"} ->
+ Options = [conflicts | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"deleted_conflicts", "true"} ->
+ Options = [deleted_conflicts | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"rev", Rev} ->
+ Args#doc_query_args{rev=couch_doc:parse_rev(Rev)};
+ {"open_revs", "all"} ->
+ Args#doc_query_args{open_revs=all};
+ {"open_revs", RevsJsonStr} ->
+ JsonArray = ?JSON_DECODE(RevsJsonStr),
+ Args#doc_query_args{open_revs=[couch_doc:parse_rev(Rev) || Rev <- JsonArray]};
+ {"atts_since", RevsJsonStr} ->
+ JsonArray = ?JSON_DECODE(RevsJsonStr),
+ Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
+ {"new_edits", "false"} ->
+ Args#doc_query_args{update_type=replicated_changes};
+ {"new_edits", "true"} ->
+ Args#doc_query_args{update_type=interactive_edit};
+ {"att_encoding_info", "true"} ->
+ Options = [att_encoding_info | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"r", R} ->
+ Options = [{r,R} | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ {"w", W} ->
+ Options = [{w,W} | Args#doc_query_args.options],
+ Args#doc_query_args{options=Options};
+ _Else -> % unknown key value pair, ignore.
+ Args
+ end
+ end, #doc_query_args{}, chttpd:qs(Req)).
+
+parse_changes_query(Req) ->
+ lists:foldl(fun({Key, Value}, Args) ->
+ case {Key, Value} of
+ {"feed", _} ->
+ Args#changes_args{feed=Value};
+ {"descending", "true"} ->
+ Args#changes_args{dir=rev};
+ {"since", _} ->
+ Args#changes_args{since=Value};
+ {"limit", _} ->
+ Args#changes_args{limit=list_to_integer(Value)};
+ {"style", _} ->
+ Args#changes_args{style=list_to_existing_atom(Value)};
+ {"heartbeat", "true"} ->
+ Args#changes_args{heartbeat=true};
+ {"heartbeat", _} ->
+ Args#changes_args{heartbeat=list_to_integer(Value)};
+ {"timeout", _} ->
+ Args#changes_args{timeout=list_to_integer(Value)};
+ {"include_docs", "true"} ->
+ Args#changes_args{include_docs=true};
+ {"conflicts", "true"} ->
+ Args#changes_args{conflicts=true};
+ {"filter", _} ->
+ Args#changes_args{filter=Value};
+ _Else -> % unknown key value pair, ignore.
+ Args
+ end
+ end, #changes_args{}, couch_httpd:qs(Req)).
+
+extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
+ extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
+extract_header_rev(Req, ExplicitRev) ->
+ Etag = case chttpd:header_value(Req, "If-Match") of
+ undefined -> undefined;
+ Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
+ end,
+ case {ExplicitRev, Etag} of
+ {undefined, undefined} -> missing_rev;
+ {_, undefined} -> ExplicitRev;
+ {undefined, _} -> Etag;
+ _ when ExplicitRev == Etag -> Etag;
+ _ ->
+ throw({bad_request, "Document rev and etag have different values"})
+ end.
+
+
+parse_copy_destination_header(Req) ->
+ Destination = chttpd:header_value(Req, "Destination"),
+ case re:run(Destination, "\\?", [{capture, none}]) of
+ nomatch ->
+ {list_to_binary(Destination), {0, []}};
+ match ->
+ [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
+ [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
+ {Pos, RevId} = couch_doc:parse_rev(Rev),
+ {list_to_binary(DocId), {Pos, [RevId]}}
+ end.
+
+validate_attachment_names(Doc) ->
+ lists:foreach(fun(#att{name=Name}) ->
+ validate_attachment_name(Name)
+ end, Doc#doc.atts).
+
+validate_attachment_name(Name) when is_list(Name) ->
+ validate_attachment_name(list_to_binary(Name));
+validate_attachment_name(<<"_",_/binary>>) ->
+ throw({bad_request, <<"Attachment name can't start with '_'">>});
+validate_attachment_name(Name) ->
+ case is_valid_utf8(Name) of
+ true -> Name;
+ false -> throw({bad_request, <<"Attachment name is not UTF-8 encoded">>})
+ end.
+
+%% borrowed from mochijson2:json_bin_is_safe()
+is_valid_utf8(<<>>) ->
+ true;
+is_valid_utf8(<<C, Rest/binary>>) ->
+ case C of
+ $\" ->
+ false;
+ $\\ ->
+ false;
+ $\b ->
+ false;
+ $\f ->
+ false;
+ $\n ->
+ false;
+ $\r ->
+ false;
+ $\t ->
+ false;
+ C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
+ false;
+ C when C < 16#7f ->
+ is_valid_utf8(Rest);
+ _ ->
+ false
+ end.
diff --git a/deps/chttpd/src/chttpd_external.erl b/deps/chttpd/src/chttpd_external.erl
new file mode 100644
index 00000000..df27a299
--- /dev/null
+++ b/deps/chttpd/src/chttpd_external.erl
@@ -0,0 +1,174 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_external).
+
+-export([handle_external_req/2, handle_external_req/3]).
+-export([send_external_response/2, json_req_obj/2, json_req_obj/3]).
+-export([default_or_content_type/2, parse_external_response/1]).
+-export([handle_search_req/2]).
+
+-import(chttpd,[send_error/4]).
+
+-include_lib("couch/include/couch_db.hrl").
+
+handle_search_req(Req, Db) ->
+ process_external_req(Req, Db, <<"search">>).
+
+% handle_external_req/2
+% for the old type of config usage:
+% _external = {chttpd_external, handle_external_req}
+% with urls like
+% /db/_external/action/design/name
+handle_external_req(#httpd{
+ path_parts=[_DbName, _External, UrlName | _Path]
+ }=HttpReq, Db) ->
+ process_external_req(HttpReq, Db, UrlName);
+handle_external_req(#httpd{path_parts=[_, _]}=Req, _Db) ->
+ send_error(Req, 404, <<"external_server_error">>, <<"No server name specified.">>);
+handle_external_req(Req, _) ->
+ send_error(Req, 404, <<"external_server_error">>, <<"Broken assumption">>).
+
+% handle_external_req/3
+% for this type of config usage:
+% _action = {chttpd_external, handle_external_req, <<"action">>}
+% with urls like
+% /db/_action/design/name
+handle_external_req(HttpReq, Db, Name) ->
+ process_external_req(HttpReq, Db, Name).
+
+process_external_req(HttpReq, Db, Name) ->
+
+ Response = couch_external_manager:execute(binary_to_list(Name),
+ json_req_obj(HttpReq, Db)),
+
+ case Response of
+ {unknown_external_server, Msg} ->
+ send_error(HttpReq, 404, <<"external_server_error">>, Msg);
+ _ ->
+ send_external_response(HttpReq, Response)
+ end.
+
+json_req_obj(Req, Db) -> json_req_obj(Req, Db, null).
+json_req_obj(#httpd{mochi_req=Req,
+ method=Method,
+ path_parts=Path,
+ req_body=ReqBody
+ }, Db, DocId) ->
+ Body = case ReqBody of
+ undefined -> Req:recv_body();
+ Else -> Else
+ end,
+ ParsedForm = case Req:get_primary_header_value("content-type") of
+ "application/x-www-form-urlencoded" ++ _ when Method =:= 'POST' ->
+ mochiweb_util:parse_qs(Body);
+ _ ->
+ []
+ end,
+ Headers = Req:get(headers),
+ Hlist = mochiweb_headers:to_list(Headers),
+ {ok, Info} = fabric:get_db_info(Db),
+
+ % add headers...
+ {[{<<"info">>, {Info}},
+ {<<"uuid">>, couch_uuids:new()},
+ {<<"id">>, DocId},
+ {<<"method">>, Method},
+ {<<"path">>, Path},
+ {<<"query">>, json_query_keys(to_json_terms(Req:parse_qs()))},
+ {<<"headers">>, to_json_terms(Hlist)},
+ {<<"body">>, Body},
+ {<<"peer">>, ?l2b(Req:get(peer))},
+ {<<"form">>, to_json_terms(ParsedForm)},
+ {<<"cookie">>, to_json_terms(Req:parse_cookie())},
+ {<<"userCtx">>, couch_util:json_user_ctx(Db)}]}.
+
+to_json_terms(Data) ->
+ to_json_terms(Data, []).
+to_json_terms([], Acc) ->
+ {lists:reverse(Acc)};
+to_json_terms([{Key, Value} | Rest], Acc) when is_atom(Key) ->
+ to_json_terms(Rest, [{list_to_binary(atom_to_list(Key)), list_to_binary(Value)} | Acc]);
+to_json_terms([{Key, Value} | Rest], Acc) ->
+ to_json_terms(Rest, [{list_to_binary(Key), list_to_binary(Value)} | Acc]).
+
+json_query_keys({Json}) ->
+ json_query_keys(Json, []).
+json_query_keys([], Acc) ->
+ {lists:reverse(Acc)};
+json_query_keys([{<<"startkey">>, Value} | Rest], Acc) ->
+ json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)}|Acc]);
+json_query_keys([{<<"endkey">>, Value} | Rest], Acc) ->
+ json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)}|Acc]);
+json_query_keys([{<<"key">>, Value} | Rest], Acc) ->
+ json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)}|Acc]);
+json_query_keys([Term | Rest], Acc) ->
+ json_query_keys(Rest, [Term|Acc]).
+
+send_external_response(#httpd{mochi_req=MochiReq}, Response) ->
+ #extern_resp_args{
+ code = Code,
+ data = Data,
+ ctype = CType,
+ headers = Headers
+ } = parse_external_response(Response),
+ Resp = MochiReq:respond({Code,
+ default_or_content_type(CType, Headers ++ chttpd:server_header()), Data}),
+ {ok, Resp}.
+
+parse_external_response({Response}) ->
+ lists:foldl(fun({Key,Value}, Args) ->
+ case {Key, Value} of
+ {"", _} ->
+ Args;
+ {<<"code">>, Value} ->
+ Args#extern_resp_args{code=Value};
+ {<<"stop">>, true} ->
+ Args#extern_resp_args{stop=true};
+ {<<"json">>, Value} ->
+ Args#extern_resp_args{
+ data=?JSON_ENCODE(Value),
+ ctype="application/json"};
+ {<<"body">>, Value} ->
+ Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"};
+ {<<"base64">>, Value} ->
+ Args#extern_resp_args{
+ data=base64:decode(Value),
+ ctype="application/binary"
+ };
+ {<<"headers">>, {Headers}} ->
+ NewHeaders = lists:map(fun({Header, HVal}) ->
+ {binary_to_list(Header), binary_to_list(HVal)}
+ end, Headers),
+ Args#extern_resp_args{headers=NewHeaders};
+ _ -> % unknown key
+ Msg = lists:flatten(io_lib:format("Invalid data from external server: ~p", [{Key, Value}])),
+ throw({external_response_error, Msg})
+ end
+ end, #extern_resp_args{}, Response).
+
+default_or_content_type(DefaultContentType, Headers) ->
+ {ContentType, OtherHeaders} = lists:partition(
+ fun({HeaderName, _}) ->
+ HeaderName == "Content-Type"
+ end, Headers),
+
+ % XXX: What happens if we were passed multiple content types? We add another?
+ case ContentType of
+ [{"Content-Type", SetContentType}] ->
+ TrueContentType = SetContentType;
+ _Else ->
+ TrueContentType = DefaultContentType
+ end,
+
+ HeadersWithContentType = lists:append(OtherHeaders, [{"Content-Type", TrueContentType}]),
+ HeadersWithContentType.
diff --git a/deps/chttpd/src/chttpd_misc.erl b/deps/chttpd/src/chttpd_misc.erl
new file mode 100644
index 00000000..1d0556d5
--- /dev/null
+++ b/deps/chttpd/src/chttpd_misc.erl
@@ -0,0 +1,283 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_misc).
+
+-export([handle_welcome_req/2,handle_favicon_req/2,handle_utils_dir_req/2,
+ handle_all_dbs_req/1,handle_replicate_req/1,handle_restart_req/1,
+ handle_uuids_req/1,handle_config_req/1,handle_log_req/1,
+ handle_task_status_req/1,handle_sleep_req/1,handle_welcome_req/1,
+ handle_utils_dir_req/1, handle_favicon_req/1, handle_system_req/1]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+-import(chttpd,
+ [send_json/2,send_json/3,send_method_not_allowed/2,
+ send_chunk/2,start_chunked_response/3]).
+
+% httpd global handlers
+
+handle_welcome_req(Req) ->
+ handle_welcome_req(Req, <<"Welcome">>).
+
+handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
+ send_json(Req, {[
+ {couchdb, WelcomeMessage},
+ {version, list_to_binary(couch:version())},
+ {bigcouch, get_version()}
+ ]});
+handle_welcome_req(Req, _) ->
+ send_method_not_allowed(Req, "GET,HEAD").
+
+get_version() ->
+ Releases = release_handler:which_releases(),
+ Version = case [V || {"bigcouch", V, _, current} <- Releases] of
+ [] ->
+ case [V || {"bigcouch", V, _, permanent} <- Releases] of
+ [] ->
+ "dev";
+ [Permanent] ->
+ Permanent
+ end;
+ [Current] ->
+ Current
+ end,
+ list_to_binary(Version).
+
+handle_favicon_req(Req) ->
+ handle_favicon_req(Req, couch_config:get("chttpd", "docroot")).
+
+handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
+ chttpd:serve_file(Req, "favicon.ico", DocumentRoot);
+handle_favicon_req(Req, _) ->
+ send_method_not_allowed(Req, "GET,HEAD").
+
+handle_utils_dir_req(Req) ->
+ handle_utils_dir_req(Req, couch_config:get("chttpd", "docroot")).
+
+handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
+ "/" ++ UrlPath = chttpd:path(Req),
+ case chttpd:partition(UrlPath) of
+ {_ActionKey, "/", RelativePath} ->
+ % GET /_utils/path or GET /_utils/
+ chttpd:serve_file(Req, RelativePath, DocumentRoot);
+ {_ActionKey, "", _RelativePath} ->
+ % GET /_utils
+ RedirectPath = chttpd:path(Req) ++ "/",
+ chttpd:send_redirect(Req, RedirectPath)
+ end;
+handle_utils_dir_req(Req, _) ->
+ send_method_not_allowed(Req, "GET,HEAD").
+
+handle_sleep_req(#httpd{method='GET'}=Req) ->
+ Time = list_to_integer(chttpd:qs_value(Req, "time")),
+ receive snicklefart -> ok after Time -> ok end,
+ send_json(Req, {[{ok, true}]});
+handle_sleep_req(Req) ->
+ send_method_not_allowed(Req, "GET,HEAD").
+
+handle_all_dbs_req(#httpd{method='GET'}=Req) ->
+ ShardDbName = couch_config:get("mem3", "shard_db", "dbs"),
+ %% shard_db is not sharded but mem3:shards treats it as an edge case
+ %% so it can be pushed thru fabric
+ {ok, Info} = fabric:get_db_info(ShardDbName),
+ Etag = couch_httpd:make_etag({Info}),
+ chttpd:etag_respond(Req, Etag, fun() ->
+ {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"Etag",Etag}]),
+ fabric:all_docs(ShardDbName, fun all_dbs_callback/2,
+ {nil, Resp}, #view_query_args{})
+ end);
+handle_all_dbs_req(Req) ->
+ send_method_not_allowed(Req, "GET,HEAD").
+
+all_dbs_callback({total_and_offset, _Total, _Offset}, {_, Resp}) ->
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, "["),
+ {ok, {"", Resp1}};
+all_dbs_callback({row, {Row}}, {Prepend, Resp}) ->
+ case couch_util:get_value(id, Row) of <<"_design", _/binary>> ->
+ {ok, {Prepend, Resp}};
+ DbName ->
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, [Prepend, ?JSON_ENCODE(DbName)]),
+ {ok, {",", Resp1}}
+ end;
+all_dbs_callback(complete, {_, Resp}) ->
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, "]"),
+ chttpd:end_delayed_json_response(Resp1);
+all_dbs_callback({error, Reason}, {_, Resp}) ->
+ chttpd:send_delayed_error(Resp, Reason).
+
+handle_task_status_req(#httpd{method='GET'}=Req) ->
+ {Replies, _BadNodes} = gen_server:multi_call(couch_task_status, all),
+ Response = lists:flatmap(fun({Node, Tasks}) ->
+ [{[{node,Node} | Task]} || Task <- Tasks]
+ end, Replies),
+ send_json(Req, lists:sort(Response));
+handle_task_status_req(Req) ->
+ send_method_not_allowed(Req, "GET,HEAD").
+
+handle_replicate_req(#httpd{method='POST', user_ctx=Ctx} = Req) ->
+ couch_httpd:validate_ctype(Req, "application/json"),
+ %% see HACK in chttpd.erl about replication
+ PostBody = get(post_body),
+ try replicate(PostBody, Ctx, mem3_rep_manager) of
+ {ok, {continuous, RepId}} ->
+ send_json(Req, 202, {[{ok, true}, {<<"_local_id">>, RepId}]});
+ {ok, {cancelled, RepId}} ->
+ send_json(Req, 200, {[{ok, true}, {<<"_local_id">>, RepId}]});
+ {ok, {JsonResults}} ->
+ send_json(Req, {[{ok, true} | JsonResults]});
+ {error, {Type, Details}} ->
+ send_json(Req, 500, {[{error, Type}, {reason, Details}]});
+ {error, not_found} ->
+ send_json(Req, 404, {[{error, not_found}]});
+ {error, Reason} ->
+ try
+ send_json(Req, 500, {[{error, Reason}]})
+ catch
+ exit:{json_encode, _} ->
+ send_json(Req, 500, {[{error, couch_util:to_binary(Reason)}]})
+ end
+ catch
+ throw:{db_not_found, Msg} ->
+ send_json(Req, 404, {[{error, db_not_found}, {reason, Msg}]});
+ throw:{unauthorized, Msg} ->
+ send_json(Req, 404, {[{error, unauthorized}, {reason, Msg}]})
+ end;
+handle_replicate_req(Req) ->
+ send_method_not_allowed(Req, "POST").
+
+replicate({Props} = PostBody, Ctx, Module) ->
+ Node = choose_node([
+ couch_util:get_value(<<"source">>, Props),
+ couch_util:get_value(<<"target">>, Props)
+ ]),
+ case rpc:call(Node, couch_rep, replicate, [PostBody, Ctx, Module]) of
+ {badrpc, Reason} ->
+ erlang:error(Reason);
+ Res ->
+ Res
+ end.
+
+choose_node(Key) when is_binary(Key) ->
+ Checksum = erlang:crc32(Key),
+ Nodes = lists:sort([node()|erlang:nodes()]),
+ lists:nth(1 + Checksum rem length(Nodes), Nodes);
+choose_node(Key) ->
+ choose_node(term_to_binary(Key)).
+
+handle_restart_req(#httpd{method='POST'}=Req) ->
+ couch_server_sup:restart_core_server(),
+ send_json(Req, 200, {[{ok, true}]});
+handle_restart_req(Req) ->
+ send_method_not_allowed(Req, "POST").
+
+
+handle_uuids_req(Req) ->
+ couch_httpd_misc_handlers:handle_uuids_req(Req).
+
+
+% Config request handler
+
+
+% GET /_config/
+% GET /_config
+handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) ->
+ Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
+ case dict:is_key(Section, Acc) of
+ true ->
+ dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
+ false ->
+ dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
+ end
+ end, dict:new(), couch_config:all()),
+ KVs = dict:fold(fun(Section, Values, Acc) ->
+ [{list_to_binary(Section), {Values}} | Acc]
+ end, [], Grouped),
+ send_json(Req, 200, {KVs});
+% GET /_config/Section
+handle_config_req(#httpd{method='GET', path_parts=[_,Section]}=Req) ->
+ KVs = [{list_to_binary(Key), list_to_binary(Value)}
+ || {Key, Value} <- couch_config:get(Section)],
+ send_json(Req, 200, {KVs});
+% PUT /_config/Section/Key
+% "value"
+handle_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Req) ->
+ Value = chttpd:json_body(Req),
+ Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
+ OldValue = couch_config:get(Section, Key, ""),
+ ok = couch_config:set(Section, Key, ?b2l(Value), Persist),
+ send_json(Req, 200, list_to_binary(OldValue));
+% GET /_config/Section/Key
+handle_config_req(#httpd{method='GET', path_parts=[_, Section, Key]}=Req) ->
+ case couch_config:get(Section, Key, null) of
+ null ->
+ throw({not_found, unknown_config_value});
+ Value ->
+ send_json(Req, 200, list_to_binary(Value))
+ end;
+% DELETE /_config/Section/Key
+handle_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Req) ->
+ Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
+ case couch_config:get(Section, Key, null) of
+ null ->
+ throw({not_found, unknown_config_value});
+ OldValue ->
+ couch_config:delete(Section, Key, Persist),
+ send_json(Req, 200, list_to_binary(OldValue))
+ end;
+handle_config_req(Req) ->
+ send_method_not_allowed(Req, "GET,PUT,DELETE").
+
+% httpd log handlers
+
+handle_log_req(#httpd{method='GET'}=Req) ->
+ Bytes = list_to_integer(chttpd:qs_value(Req, "bytes", "1000")),
+ Offset = list_to_integer(chttpd:qs_value(Req, "offset", "0")),
+ Chunk = couch_log:read(Bytes, Offset),
+ {ok, Resp} = start_chunked_response(Req, 200, [
+ % send a plaintext response
+ {"Content-Type", "text/plain; charset=utf-8"},
+ {"Content-Length", integer_to_list(length(Chunk))}
+ ]),
+ send_chunk(Resp, Chunk),
+ send_chunk(Resp, "");
+handle_log_req(Req) ->
+ send_method_not_allowed(Req, "GET").
+
+% Note: this resource is exposed on the backdoor interface, but it's in chttpd
+% because it's not couch trunk
+handle_system_req(Req) ->
+ Other = erlang:memory(system) - lists:sum([X || {_,X} <-
+ erlang:memory([atom, code, binary, ets])]),
+ Memory = [{other, Other} | erlang:memory([atom, atom_used, processes,
+ processes_used, binary, code, ets])],
+ {NumberOfGCs, WordsReclaimed, _} = statistics(garbage_collection),
+ {{input, Input}, {output, Output}} = statistics(io),
+ {message_queue_len, MessageQueueLen} = process_info(whereis(couch_server),
+ message_queue_len),
+ send_json(Req, {[
+ {uptime, element(1,statistics(wall_clock)) div 1000},
+ {memory, {Memory}},
+ {run_queue, statistics(run_queue)},
+ {ets_table_count, length(ets:all())},
+ {context_switches, element(1, statistics(context_switches))},
+ {reductions, element(1, statistics(reductions))},
+ {garbage_collection_count, NumberOfGCs},
+ {words_reclaimed, WordsReclaimed},
+ {io_input, Input},
+ {io_output, Output},
+ {os_proc_count, couch_proc_manager:get_proc_count()},
+ {process_count, erlang:system_info(process_count)},
+ {process_limit, erlang:system_info(process_limit)},
+ {message_queue_len, MessageQueueLen}
+ ]}).
diff --git a/deps/chttpd/src/chttpd_rewrite.erl b/deps/chttpd/src/chttpd_rewrite.erl
new file mode 100644
index 00000000..f512ba5f
--- /dev/null
+++ b/deps/chttpd/src/chttpd_rewrite.erl
@@ -0,0 +1,421 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+%
+% bind_path is based on bind method from Webmachine
+
+
+%% @doc Module for URL rewriting by pattern matching.
+
+-module(chttpd_rewrite).
+-export([handle_rewrite_req/3]).
+-include_lib("couch/include/couch_db.hrl").
+
+-define(SEPARATOR, $\/).
+-define(MATCH_ALL, {bind, <<"*">>}).
+
+
+%% doc The http rewrite handler. All rewriting is done from
+%% /dbname/_design/ddocname/_rewrite by default.
+%%
+%% each rules should be in rewrites member of the design doc.
+%% Ex of a complete rule :
+%%
+%% {
+%% ....
+%% "rewrites": [
+%% {
+%% "from": "",
+%% "to": "index.html",
+%% "method": "GET",
+%% "query": {}
+%% }
+%% ]
+%% }
+%%
+%% from: is the path rule used to bind current uri to the rule. It
+%% use pattern matching for that.
+%%
+%% to: rule to rewrite an url. It can contain variables depending on binding
+%% variables discovered during pattern matching and query args (url args and from
+%% the query member.)
+%%
+%% method: method to bind the request method to the rule. by default "*"
+%% query: query args you want to define they can contain dynamic variable
+%% by binding the key to the bindings
+%%
+%%
+%% to and from are path with patterns. pattern can be string starting with ":" or
+%% "*". ex:
+%% /somepath/:var/*
+%%
+%% This path is converted in erlang list by splitting "/". Each var are
+%% converted in atom. "*" is converted to '*' atom. The pattern matching is done
+%% by splitting "/" in request url in a list of token. A string pattern will
+%% match equal token. The star atom ('*' in single quotes) will match any number
+%% of tokens, but may only be present as the last pathtern in a pathspec. If all
+%% tokens are matched and all pathterms are used, then the pathspec matches. It works
+%% like webmachine. Each identified token will be reused in to rule and in query
+%%
+%% The pattern matching is done by first matching the request method to a rule. by
+%% default all methods match a rule. (method is equal to "*" by default). Then
+%% It will try to match the path to one rule. If no rule match, then a 404 error
+%% is displayed.
+%%
+%% Once a rule is found we rewrite the request url using the "to" and
+%% "query" members. The identified token are matched to the rule and
+%% will replace var. if '*' is found in the rule it will contain the remaining
+%% part if it exists.
+%%
+%% Examples:
+%%
+%% Dispatch rule URL TO Tokens
+%%
+%% {"from": "/a/b", /a/b?k=v /some/b?k=v var =:= b
+%% "to": "/some/"} k = v
+%%
+%% {"from": "/a/b", /a/b /some/b?var=b var =:= b
+%% "to": "/some/:var"}
+%%
+%% {"from": "/a", /a /some
+%% "to": "/some/*"}
+%%
+%% {"from": "/a/*", /a/b/c /some/b/c
+%% "to": "/some/*"}
+%%
+%% {"from": "/a", /a /some
+%% "to": "/some/*"}
+%%
+%% {"from": "/a/:foo/*", /a/b/c /some/b/c?foo=b foo =:= b
+%% "to": "/some/:foo/*"}
+%%
+%% {"from": "/a/:foo", /a/b /some/?k=b&foo=b foo =:= b
+%% "to": "/some",
+%% "query": {
+%% "k": ":foo"
+%% }}
+%%
+%% {"from": "/a", /a?foo=b /some/b foo =:= b
+%% "to": "/some/:foo",
+%% }}
+
+
+
+handle_rewrite_req(#httpd{
+ path_parts=[DbName, <<"_design">>, DesignName, _Rewrite|PathParts],
+ method=Method,
+ mochi_req=MochiReq}=Req, _Db, DDoc) ->
+
+ % we are in a design handler
+ DesignId = <<"_design/", DesignName/binary>>,
+ Prefix = <<"/", DbName/binary, "/", DesignId/binary>>,
+ QueryList = couch_httpd:qs(Req),
+ QueryList1 = [{to_binding(K), V} || {K, V} <- QueryList],
+
+ #doc{body={Props}} = DDoc,
+
+ % get rules from ddoc
+ case couch_util:get_value(<<"rewrites">>, Props) of
+ undefined ->
+ couch_httpd:send_error(Req, 404, <<"rewrite_error">>,
+ <<"Invalid path.">>);
+ Bin when is_binary(Bin) ->
+ couch_httpd:send_error(Req, 400, <<"rewrite_error">>,
+ <<"Rewrite rules are a String. They must be a JSON Array.">>);
+ Rules ->
+ % create dispatch list from rules
+ DispatchList = [make_rule(Rule) || {Rule} <- Rules],
+
+ %% get raw path by matching url to a rule.
+ RawPath = case try_bind_path(DispatchList, couch_util:to_binary(Method), PathParts,
+ QueryList1) of
+ no_dispatch_path ->
+ throw(not_found);
+ {NewPathParts, Bindings} ->
+ Parts = [quote_plus(X) || X <- NewPathParts],
+
+ % build new path, reencode query args, eventually convert
+ % them to json
+ Path = lists:append(
+ string:join(Parts, [?SEPARATOR]),
+ case Bindings of
+ [] -> [];
+ _ -> [$?, encode_query(Bindings)]
+ end),
+
+ % if path is relative detect it and rewrite path
+ case mochiweb_util:safe_relative_path(Path) of
+ undefined ->
+ ?b2l(Prefix) ++ "/" ++ Path;
+ P1 ->
+ ?b2l(Prefix) ++ "/" ++ P1
+ end
+
+ end,
+
+ % normalize final path (fix levels "." and "..")
+ RawPath1 = ?b2l(iolist_to_binary(normalize_path(RawPath))),
+
+ ?LOG_DEBUG("rewrite to ~p ~n", [RawPath1]),
+
+ % build a new mochiweb request
+ MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
+ MochiReq:get(method),
+ RawPath1,
+ MochiReq:get(version),
+ MochiReq:get(headers)),
+
+ % cleanup, It force mochiweb to reparse raw uri.
+ MochiReq1:cleanup(),
+
+ chttpd:handle_request(MochiReq1)
+ end.
+
+quote_plus({bind, X}) ->
+ mochiweb_util:quote_plus(X);
+quote_plus(X) ->
+ mochiweb_util:quote_plus(X).
+
+%% @doc Try to find a rule matching current url. If none is found
+%% 404 error not_found is raised
+try_bind_path([], _Method, _PathParts, _QueryList) ->
+ no_dispatch_path;
+try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
+ [{PathParts1, Method1}, RedirectPath, QueryArgs] = Dispatch,
+ case bind_method(Method1, Method) of
+ true ->
+ case bind_path(PathParts1, PathParts, []) of
+ {ok, Remaining, Bindings} ->
+ Bindings1 = Bindings ++ QueryList,
+ % we parse query args from the rule and fill
+ % it eventually with bindings vars
+ QueryArgs1 = make_query_list(QueryArgs, Bindings1, []),
+ % remove params in QueryLists1 that are already in
+ % QueryArgs1
+ Bindings2 = lists:foldl(fun({K, V}, Acc) ->
+ K1 = to_binding(K),
+ KV = case couch_util:get_value(K1, QueryArgs1) of
+ undefined -> [{K1, V}];
+ _V1 -> []
+ end,
+ Acc ++ KV
+ end, [], Bindings1),
+
+ FinalBindings = Bindings2 ++ QueryArgs1,
+ NewPathParts = make_new_path(RedirectPath, FinalBindings,
+ Remaining, []),
+ {NewPathParts, FinalBindings};
+ fail ->
+ try_bind_path(Rest, Method, PathParts, QueryList)
+ end;
+ false ->
+ try_bind_path(Rest, Method, PathParts, QueryList)
+ end.
+
+%% rewriting dynamically the quey list given as query member in
+%% rewrites. Each value is replaced by one binding or an argument
+%% passed in url.
+make_query_list([], _Bindings, Acc) ->
+ Acc;
+make_query_list([{Key, {Value}}|Rest], Bindings, Acc) ->
+ Value1 = to_json({Value}),
+ make_query_list(Rest, Bindings, [{to_binding(Key), Value1}|Acc]);
+make_query_list([{Key, Value}|Rest], Bindings, Acc) when is_binary(Value) ->
+ Value1 = replace_var(Key, Value, Bindings),
+ make_query_list(Rest, Bindings, [{to_binding(Key), Value1}|Acc]);
+make_query_list([{Key, Value}|Rest], Bindings, Acc) when is_list(Value) ->
+ Value1 = replace_var(Key, Value, Bindings),
+ make_query_list(Rest, Bindings, [{to_binding(Key), Value1}|Acc]);
+make_query_list([{Key, Value}|Rest], Bindings, Acc) ->
+ make_query_list(Rest, Bindings, [{to_binding(Key), Value}|Acc]).
+
+replace_var(Key, Value, Bindings) ->
+ case Value of
+ <<":", Var/binary>> ->
+ get_var(Var, Bindings, Value);
+ _ when is_list(Value) ->
+ Value1 = lists:foldr(fun(V, Acc) ->
+ V1 = case V of
+ <<":", VName/binary>> ->
+ case get_var(VName, Bindings, V) of
+ V2 when is_list(V2) ->
+ iolist_to_binary(V2);
+ V2 -> V2
+ end;
+ _ ->
+
+ V
+ end,
+ [V1|Acc]
+ end, [], Value),
+ to_json(Value1);
+ _ when is_binary(Value) ->
+ Value;
+ _ ->
+ case Key of
+ <<"key">> -> to_json(Value);
+ <<"startkey">> -> to_json(Value);
+ <<"endkey">> -> to_json(Value);
+ _ ->
+ lists:flatten(?JSON_ENCODE(Value))
+ end
+ end.
+
+
+get_var(VarName, Props, Default) ->
+ VarName1 = to_binding(VarName),
+ couch_util:get_value(VarName1, Props, Default).
+
+%% doc: build new patch from bindings. bindings are query args
+%% (+ dynamic query rewritten if needed) and bindings found in
+%% bind_path step.
+make_new_path([], _Bindings, _Remaining, Acc) ->
+ lists:reverse(Acc);
+make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
+ Acc1 = lists:reverse(Acc) ++ Remaining,
+ Acc1;
+make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
+ Acc1 = lists:reverse(Acc) ++ Remaining,
+ Acc1;
+make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
+ P2 = case couch_util:get_value({bind, P}, Bindings) of
+ undefined -> << "undefined">>;
+ P1 -> P1
+ end,
+ make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
+make_new_path([P|Rest], Bindings, Remaining, Acc) ->
+ make_new_path(Rest, Bindings, Remaining, [P|Acc]).
+
+
+%% @doc If method of the query fith the rule method. If the
+%% method rule is '*', which is the default, all
+%% request method will bind. It allows us to make rules
+%% depending on HTTP method.
+bind_method(?MATCH_ALL, _Method) ->
+ true;
+bind_method({bind, Method}, Method) ->
+ true;
+bind_method(_, _) ->
+ false.
+
+
+%% @doc bind path. Using the rule from we try to bind variables given
+%% to the current url by pattern matching
+bind_path([], [], Bindings) ->
+ {ok, [], Bindings};
+bind_path([?MATCH_ALL], Rest, Bindings) when is_list(Rest) ->
+ {ok, Rest, Bindings};
+bind_path(_, [], _) ->
+ fail;
+bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
+ bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
+bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
+ bind_path(RestToken, RestMatch, Bindings);
+bind_path(_, _, _) ->
+ fail.
+
+
+%% normalize path.
+normalize_path(Path) ->
+ "/" ++ string:join(normalize_path1(string:tokens(Path,
+ "/"), []), [?SEPARATOR]).
+
+
+normalize_path1([], Acc) ->
+ lists:reverse(Acc);
+normalize_path1([".."|Rest], Acc) ->
+ Acc1 = case Acc of
+ [] -> [".."|Acc];
+ [T|_] when T =:= ".." -> [".."|Acc];
+ [_|R] -> R
+ end,
+ normalize_path1(Rest, Acc1);
+normalize_path1(["."|Rest], Acc) ->
+ normalize_path1(Rest, Acc);
+normalize_path1([Path|Rest], Acc) ->
+ normalize_path1(Rest, [Path|Acc]).
+
+
+%% @doc transform json rule in erlang for pattern matching
+make_rule(Rule) ->
+ Method = case couch_util:get_value(<<"method">>, Rule) of
+ undefined -> ?MATCH_ALL;
+ M -> to_binding(M)
+ end,
+ QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
+ undefined -> [];
+ {Args} -> Args
+ end,
+ FromParts = case couch_util:get_value(<<"from">>, Rule) of
+ undefined -> [?MATCH_ALL];
+ From ->
+ parse_path(From)
+ end,
+ ToParts = case couch_util:get_value(<<"to">>, Rule) of
+ undefined ->
+ throw({error, invalid_rewrite_target});
+ To ->
+ parse_path(To)
+ end,
+ [{FromParts, Method}, ToParts, QueryArgs].
+
+parse_path(Path) ->
+ {ok, SlashRE} = re:compile(<<"\\/">>),
+ path_to_list(re:split(Path, SlashRE), [], 0).
+
+%% @doc convert a path rule (from or to) to an erlang list
+%% * and path variable starting by ":" are converted
+%% in erlang atom.
+path_to_list([], Acc, _DotDotCount) ->
+ lists:reverse(Acc);
+path_to_list([<<>>|R], Acc, DotDotCount) ->
+ path_to_list(R, Acc, DotDotCount);
+path_to_list([<<"*">>|R], Acc, DotDotCount) ->
+ path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
+path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
+ case couch_config:get("httpd", "secure_rewrites", "true") of
+ "false" ->
+ path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
+ _Else ->
+ ?LOG_INFO("insecure_rewrite_rule ~p blocked", [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
+ throw({insecure_rewrite_rule, "too many ../.. segments"})
+ end;
+path_to_list([<<"..">>|R], Acc, DotDotCount) ->
+ path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
+path_to_list([P|R], Acc, DotDotCount) ->
+ P1 = case P of
+ <<":", Var/binary>> ->
+ to_binding(Var);
+ _ -> P
+ end,
+ path_to_list(R, [P1|Acc], DotDotCount).
+
+encode_query(Props) ->
+ Props1 = lists:foldl(fun ({{bind, K}, V}, Acc) ->
+ V1 = case is_list(V) orelse is_binary(V) of
+ true -> V;
+ false ->
+ % probably it's a number
+ quote_plus(V)
+ end,
+ [{K, V1} | Acc]
+ end, [], Props),
+ lists:flatten(mochiweb_util:urlencode(Props1)).
+
+to_binding({bind, V}) ->
+ {bind, V};
+to_binding(V) when is_list(V) ->
+ to_binding(?l2b(V));
+to_binding(V) ->
+ {bind, V}.
+
+to_json(V) ->
+ iolist_to_binary(?JSON_ENCODE(V)).
diff --git a/deps/chttpd/src/chttpd_show.erl b/deps/chttpd/src/chttpd_show.erl
new file mode 100644
index 00000000..dadf9c7a
--- /dev/null
+++ b/deps/chttpd/src/chttpd_show.erl
@@ -0,0 +1,314 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_show).
+
+-export([handle_doc_show_req/3, handle_doc_update_req/3, handle_view_list_req/3]).
+
+-include_lib("couch/include/couch_db.hrl").
+
+-record(lacc, {
+ req,
+ resp = nil,
+ qserver,
+ lname,
+ db,
+ etag
+}).
+
+% /db/_design/foo/_show/bar/docid
+% show converts a json doc to a response of any content-type.
+% it looks up the doc an then passes it to the query server.
+% then it sends the response from the query server to the http client.
+
+maybe_open_doc(Db, DocId) ->
+ case fabric:open_doc(Db, DocId, [conflicts]) of
+ {ok, Doc} ->
+ Doc;
+ {not_found, _} ->
+ nil
+ end.
+
+handle_doc_show_req(#httpd{
+ path_parts=[_, _, _, _, ShowName, DocId]
+ }=Req, Db, DDoc) ->
+
+ % open the doc
+ Doc = maybe_open_doc(Db, DocId),
+
+ % we don't handle revs here b/c they are an internal api
+ % returns 404 if there is no doc with DocId
+ handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
+
+handle_doc_show_req(#httpd{
+ path_parts=[_, _, _, _, ShowName, DocId|Rest]
+ }=Req, Db, DDoc) ->
+
+ DocParts = [DocId|Rest],
+ DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")),
+
+ % open the doc
+ Doc = maybe_open_doc(Db, DocId1),
+
+ % we don't handle revs here b/c they are an internal api
+ % pass 404 docs to the show function
+ handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
+
+handle_doc_show_req(#httpd{
+ path_parts=[_, _, _, _, ShowName]
+ }=Req, Db, DDoc) ->
+ % with no docid the doc is nil
+ handle_doc_show(Req, Db, DDoc, ShowName, nil);
+
+handle_doc_show_req(Req, _Db, _DDoc) ->
+ chttpd:send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
+
+handle_doc_show(Req, Db, DDoc, ShowName, Doc) ->
+ handle_doc_show(Req, Db, DDoc, ShowName, Doc, null).
+
+handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
+ % get responder for ddoc/showname
+ CurrentEtag = show_etag(Req, Doc, DDoc, []),
+ chttpd:etag_respond(Req, CurrentEtag, fun() ->
+ JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
+ JsonDoc = couch_query_servers:json_doc(Doc),
+ [<<"resp">>, ExternalResp] =
+ couch_query_servers:ddoc_prompt(DDoc, [<<"shows">>, ShowName],
+ [JsonDoc, JsonReq]),
+ JsonResp = apply_etag(ExternalResp, CurrentEtag),
+ chttpd_external:send_external_response(Req, JsonResp)
+ end).
+
+
+show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
+ Accept = chttpd:header_value(Req, "Accept"),
+ DocPart = case Doc of
+ nil -> nil;
+ Doc -> chttpd:doc_etag(Doc)
+ end,
+ couch_httpd:make_etag({couch_httpd:doc_etag(DDoc), DocPart, Accept,
+ UserCtx#user_ctx.roles, More}).
+
+% /db/_design/foo/update/bar/docid
+% updates a doc based on a request
+% handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) ->
+% % anything but GET
+% send_method_not_allowed(Req, "POST,PUT,DELETE,ETC");
+
+handle_doc_update_req(#httpd{
+ path_parts=[_, _, _, _, UpdateName, DocId]
+ }=Req, Db, DDoc) ->
+ Doc = maybe_open_doc(Db, DocId),
+ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId);
+
+handle_doc_update_req(#httpd{
+ path_parts=[_, _, _, _, UpdateName]
+ }=Req, Db, DDoc) ->
+ send_doc_update_response(Req, Db, DDoc, UpdateName, nil, null);
+
+handle_doc_update_req(Req, _Db, _DDoc) ->
+ chttpd:send_error(Req, 404, <<"update_error">>, <<"Invalid path.">>).
+
+send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
+ JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
+ JsonDoc = couch_query_servers:json_doc(Doc),
+ Cmd = [<<"updates">>, UpdateName],
+ case couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]) of
+ [<<"up">>, {NewJsonDoc}, JsonResp] ->
+ case chttpd:header_value(Req, "X-Couch-Full-Commit", "false") of
+ "true" ->
+ Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
+ _ ->
+ Options = [{user_ctx, Req#httpd.user_ctx}]
+ end,
+ NewDoc = couch_doc:from_json_obj({NewJsonDoc}),
+ case fabric:update_doc(Db, NewDoc, Options) of
+ {ok, _} ->
+ Code = 201;
+ {accepted, _} ->
+ Code = 202
+ end;
+ [<<"up">>, _Other, JsonResp] ->
+ Code = 200
+ end,
+ JsonResp2 = json_apply_field({<<"code">>, Code}, JsonResp),
+ % todo set location field
+ chttpd_external:send_external_response(Req, JsonResp2).
+
+
+% view-list request with view and list from same design doc.
+handle_view_list_req(#httpd{method='GET',
+ path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) ->
+ handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, nil);
+
+% view-list request with view and list from different design docs.
+handle_view_list_req(#httpd{method='GET',
+ path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc) ->
+ handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, nil);
+
+handle_view_list_req(#httpd{method='GET'}=Req, _Db, _DDoc) ->
+ chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
+
+handle_view_list_req(#httpd{method='POST',
+ path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) ->
+ ReqBody = couch_httpd:body(Req),
+ {Props2} = ?JSON_DECODE(ReqBody),
+ Keys = proplists:get_value(<<"keys">>, Props2, nil),
+ handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName,
+ {DesignName, ViewName}, Keys);
+
+handle_view_list_req(#httpd{method='POST',
+ path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc) ->
+ ReqBody = couch_httpd:body(Req),
+ {Props2} = ?JSON_DECODE(ReqBody),
+ Keys = proplists:get_value(<<"keys">>, Props2, nil),
+ handle_view_list(Req#httpd{req_body=ReqBody}, Db, DDoc, ListName,
+ {DesignName, ViewName}, Keys);
+
+handle_view_list_req(#httpd{method='POST'}=Req, _Db, _DDoc) ->
+ chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
+
+handle_view_list_req(Req, _Db, _DDoc) ->
+ chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
+
+handle_view_list(Req, Db, DDoc, LName, {ViewDesignName, ViewName}, Keys) ->
+ {ok, VDoc} = fabric:open_doc(Db, <<"_design/", ViewDesignName/binary>>, []),
+ Group = couch_view_group:design_doc_to_view_group(VDoc),
+ IsReduce = chttpd_view:get_reduce_type(Req),
+ ViewType = chttpd_view:extract_view_type(ViewName, Group#group.views,
+ IsReduce),
+ QueryArgs = chttpd_view:parse_view_params(Req, Keys, ViewType),
+ CB = fun list_callback/2,
+ Etag = couch_uuids:new(),
+ chttpd:etag_respond(Req, Etag, fun() ->
+ couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
+ Acc0 = #lacc{
+ lname = LName,
+ req = Req,
+ qserver = QServer,
+ db = Db,
+ etag = Etag
+ },
+ fabric:query_view(Db, VDoc, ViewName, CB, Acc0, QueryArgs)
+ end)
+ end).
+
+list_callback({total_and_offset, Total, Offset}, #lacc{resp=nil} = Acc) ->
+ start_list_resp({[{<<"total_rows">>, Total}, {<<"offset">>, Offset}]}, Acc);
+list_callback({total_and_offset, _, _}, Acc) ->
+ % a sorted=false view where the message came in late. Ignore.
+ {ok, Acc};
+list_callback({row, Row}, #lacc{resp=nil} = Acc) ->
+ % first row of a reduce view, or a sorted=false view
+ {ok, NewAcc} = start_list_resp({[]}, Acc),
+ send_list_row(Row, NewAcc);
+list_callback({row, Row}, Acc) ->
+ send_list_row(Row, Acc);
+list_callback(complete, Acc) ->
+ #lacc{qserver = {Proc, _}, resp = Resp0} = Acc,
+ if Resp0 =:= nil ->
+ {ok, #lacc{resp = Resp}} = start_list_resp({[]}, Acc);
+ true ->
+ Resp = Resp0
+ end,
+ try couch_query_servers:proc_prompt(Proc, [<<"list_end">>]) of
+ [<<"end">>, Chunk] ->
+ {ok, Resp1} = send_non_empty_chunk(Resp, Chunk),
+ chttpd:send_delayed_last_chunk(Resp1)
+ catch Error ->
+ {ok, Resp1} = chttpd:send_delayed_error(Resp, Error),
+ {stop, Resp1}
+ end;
+list_callback({error, Reason}, #lacc{resp=Resp}) ->
+ chttpd:send_delayed_error(Resp, Reason).
+
+start_list_resp(Head, Acc) ->
+ #lacc{
+ req = Req,
+ db = Db,
+ qserver = QServer,
+ lname = LName,
+ etag = Etag
+ } = Acc,
+
+ % use a separate process because we're already in a receive loop, and
+ % json_req_obj calls fabric:get_db_info()
+ spawn_monitor(fun() -> exit(chttpd_external:json_req_obj(Req, Db)) end),
+ receive {'DOWN', _, _, _, JsonReq} -> ok end,
+
+ [<<"start">>,Chunk,JsonResp] = couch_query_servers:ddoc_proc_prompt(QServer,
+ [<<"lists">>, LName], [Head, JsonReq]),
+ JsonResp2 = apply_etag(JsonResp, Etag),
+ #extern_resp_args{
+ code = Code,
+ ctype = CType,
+ headers = ExtHeaders
+ } = couch_httpd_external:parse_external_response(JsonResp2),
+ JsonHeaders = couch_httpd_external:default_or_content_type(CType, ExtHeaders),
+ {ok, Resp} = chttpd:start_delayed_chunked_response(Req, Code,
+ JsonHeaders, Chunk),
+ {ok, Acc#lacc{resp=Resp}}.
+
+send_list_row(Row, #lacc{qserver = {Proc, _}, resp = Resp} = Acc) ->
+ try couch_query_servers:proc_prompt(Proc, [<<"list_row">>, Row]) of
+ [<<"chunks">>, Chunk] ->
+ {ok, Resp1} = send_non_empty_chunk(Resp, Chunk),
+ {ok, Acc#lacc{resp=Resp1}};
+ [<<"end">>, Chunk] ->
+ {ok, Resp1} = send_non_empty_chunk(Resp, Chunk),
+ {ok, Resp2} = chttpd:send_delayed_last_chunk(Resp1),
+ {stop, Resp2}
+ catch Error ->
+ {ok, Resp1} = chttpd:send_delayed_error(Resp, Error),
+ {stop, Resp1}
+ end.
+
+send_non_empty_chunk(Resp, []) ->
+ {ok, Resp};
+send_non_empty_chunk(Resp, Chunk) ->
+ chttpd:send_delayed_chunk(Resp, Chunk).
+
+% Maybe this is in the proplists API
+% todo move to couch_util
+json_apply_field(H, {L}) ->
+ json_apply_field(H, L, []).
+json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
+ % drop matching keys
+ json_apply_field({Key, NewValue}, Headers, Acc);
+json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
+ % something else is next, leave it alone.
+ json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
+json_apply_field({Key, NewValue}, [], Acc) ->
+ % end of list, add ours
+ {[{Key, NewValue}|Acc]}.
+
+apply_etag({ExternalResponse}, CurrentEtag) ->
+ % Here we embark on the delicate task of replacing or creating the
+ % headers on the JsonResponse object. We need to control the Etag and
+ % Vary headers. If the external function controls the Etag, we'd have to
+ % run it to check for a match, which sort of defeats the purpose.
+ case couch_util:get_value(<<"headers">>, ExternalResponse, nil) of
+ nil ->
+ % no JSON headers
+ % add our Etag and Vary headers to the response
+ {[{<<"headers">>, {[{<<"Etag">>, CurrentEtag}, {<<"Vary">>, <<"Accept">>}]}} | ExternalResponse]};
+ JsonHeaders ->
+ {[case Field of
+ {<<"headers">>, JsonHeaders} -> % add our headers
+ JsonHeadersEtagged = json_apply_field({<<"Etag">>, CurrentEtag}, JsonHeaders),
+ JsonHeadersVaried = json_apply_field({<<"Vary">>, <<"Accept">>}, JsonHeadersEtagged),
+ {<<"headers">>, JsonHeadersVaried};
+ _ -> % skip non-header fields
+ Field
+ end || Field <- ExternalResponse]}
+ end.
+
diff --git a/deps/chttpd/src/chttpd_sup.erl b/deps/chttpd/src/chttpd_sup.erl
new file mode 100644
index 00000000..bfe6be90
--- /dev/null
+++ b/deps/chttpd/src/chttpd_sup.erl
@@ -0,0 +1,25 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_sup).
+-behaviour(supervisor).
+-export([init/1]).
+
+-export([start_link/1]).
+
+start_link(Args) ->
+ supervisor:start_link({local,?MODULE}, ?MODULE, Args).
+
+init([]) ->
+ Mod = chttpd,
+ Spec = {Mod, {Mod,start_link,[]}, permanent, 100, worker, [Mod]},
+ {ok, {{one_for_one, 3, 10}, [Spec]}}.
diff --git a/deps/chttpd/src/chttpd_view.erl b/deps/chttpd/src/chttpd_view.erl
new file mode 100644
index 00000000..7e13c356
--- /dev/null
+++ b/deps/chttpd/src/chttpd_view.erl
@@ -0,0 +1,393 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_view).
+-include_lib("couch/include/couch_db.hrl").
+
+-export([handle_view_req/3, handle_temp_view_req/2, get_reduce_type/1,
+ parse_view_params/3, view_group_etag/2, view_group_etag/3,
+ parse_bool_param/1, extract_view_type/3]).
+
+
+multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
+ Group = couch_view_group:design_doc_to_view_group(DDoc),
+ IsReduce = get_reduce_type(Req),
+ ViewType = extract_view_type(ViewName, Group#group.views, IsReduce),
+ % TODO proper calculation of etag
+ % Etag = view_group_etag(ViewGroup, Db, Queries),
+ Etag = couch_uuids:new(),
+ DefaultParams = lists:flatmap(fun({K,V}) -> parse_view_param(K,V) end,
+ chttpd:qs(Req)),
+ [couch_stats_collector:increment({httpd, view_reads}) || _I <- Queries],
+ chttpd:etag_respond(Req, Etag, fun() ->
+ FirstChunk = "{\"results\":[",
+ {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"Etag",Etag}], FirstChunk),
+ {_, Resp1} = lists:foldl(fun({QueryProps}, {Chunk, RespAcc}) ->
+ if Chunk =/= nil -> chttpd:send_delayed_chunk(Resp, Chunk); true -> ok end,
+ ThisQuery = lists:flatmap(fun parse_json_view_param/1, QueryProps),
+ FullParams = lists:ukeymerge(1, ThisQuery, DefaultParams),
+ {ok, RespAcc1} = fabric:query_view(
+ Db,
+ DDoc,
+ ViewName,
+ fun view_callback/2,
+ {nil, RespAcc},
+ parse_view_params(FullParams, nil, ViewType)
+ ),
+ {",\n", RespAcc1}
+ end, {nil,Resp}, Queries),
+ chttpd:send_delayed_chunk(Resp1, "]}"),
+ chttpd:end_delayed_json_response(Resp1)
+ end).
+
+design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
+ Group = couch_view_group:design_doc_to_view_group(DDoc),
+ IsReduce = get_reduce_type(Req),
+ ViewType = extract_view_type(ViewName, Group#group.views, IsReduce),
+ QueryArgs = parse_view_params(Req, Keys, ViewType),
+ % TODO proper calculation of etag
+ % Etag = view_group_etag(ViewGroup, Db, Keys),
+ Etag = couch_uuids:new(),
+ couch_stats_collector:increment({httpd, view_reads}),
+ chttpd:etag_respond(Req, Etag, fun() ->
+ {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"Etag",Etag}]),
+ CB = fun view_callback/2,
+ {ok, Resp1} = fabric:query_view(Db, DDoc, ViewName, CB, {nil, Resp}, QueryArgs),
+ chttpd:end_delayed_json_response(Resp1)
+ end).
+
+view_callback({total_and_offset, Total, Offset}, {nil, Resp}) ->
+ Chunk = "{\"total_rows\":~p,\"offset\":~p,\"rows\":[\r\n",
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, io_lib:format(Chunk, [Total, Offset])),
+ {ok, {"", Resp1}};
+view_callback({total_and_offset, _, _}, Acc) ->
+ % a sorted=false view where the message came in late. Ignore.
+ {ok, Acc};
+view_callback({row, Row}, {nil, Resp}) ->
+ % first row of a reduce view, or a sorted=false view
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, ["{\"rows\":[\r\n", ?JSON_ENCODE(Row)]),
+ {ok, {",\r\n", Resp1}};
+view_callback({row, Row}, {Prepend, Resp}) ->
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, [Prepend, ?JSON_ENCODE(Row)]),
+ {ok, {",\r\n", Resp1}};
+view_callback(complete, {nil, Resp}) ->
+ chttpd:send_delayed_chunk(Resp, "{\"rows\":[]}");
+view_callback(complete, {_, Resp}) ->
+ chttpd:send_delayed_chunk(Resp, "\r\n]}");
+view_callback({error, Reason}, {_, Resp}) ->
+ chttpd:send_delayed_error(Resp, Reason).
+
+extract_view_type(_ViewName, [], _IsReduce) ->
+ throw({not_found, missing_named_view});
+extract_view_type(ViewName, [View|Rest], IsReduce) ->
+ case lists:member(ViewName, [Name || {Name, _} <- View#view.reduce_funs]) of
+ true ->
+ if IsReduce -> reduce; true -> red_map end;
+ false ->
+ case lists:member(ViewName, View#view.map_names) of
+ true -> map;
+ false -> extract_view_type(ViewName, Rest, IsReduce)
+ end
+ end.
+
+handle_view_req(#httpd{method='GET',
+ path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
+ design_doc_view(Req, Db, DDoc, ViewName, nil);
+
+handle_view_req(#httpd{method='POST',
+ path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
+ {Fields} = chttpd:json_body_obj(Req),
+ Queries = couch_util:get_value(<<"queries">>, Fields),
+ Keys = couch_util:get_value(<<"keys">>, Fields),
+ case {Queries, Keys} of
+ {Queries, undefined} when is_list(Queries) ->
+ multi_query_view(Req, Db, DDoc, ViewName, Queries);
+ {undefined, Keys} when is_list(Keys) ->
+ design_doc_view(Req, Db, DDoc, ViewName, Keys);
+ {undefined, undefined} ->
+ throw({bad_request, "POST body must contain `keys` or `queries` field"});
+ {undefined, _} ->
+ throw({bad_request, "`keys` body member must be an array"});
+ {_, undefined} ->
+ throw({bad_request, "`queries` body member must be an array"});
+ {_, _} ->
+ throw({bad_request, "`keys` and `queries` are mutually exclusive"})
+ end;
+
+handle_view_req(Req, _Db, _DDoc) ->
+ chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
+
+handle_temp_view_req(Req, _Db) ->
+ Msg = <<"Temporary views are not supported in BigCouch">>,
+ chttpd:send_error(Req, 403, forbidden, Msg).
+
+reverse_key_default(?MIN_STR) -> ?MAX_STR;
+reverse_key_default(?MAX_STR) -> ?MIN_STR;
+reverse_key_default(Key) -> Key.
+
+get_reduce_type(Req) ->
+ list_to_existing_atom(chttpd:qs_value(Req, "reduce", "true")).
+
+parse_view_params(Req, Keys, ViewType) when not is_list(Req) ->
+ QueryParams = lists:flatmap(fun({K,V}) -> parse_view_param(K,V) end,
+ chttpd:qs(Req)),
+ parse_view_params(QueryParams, Keys, ViewType);
+parse_view_params(QueryParams, Keys, ViewType) ->
+ IsMultiGet = (Keys =/= nil),
+ Args = #view_query_args{
+ view_type=ViewType,
+ multi_get=IsMultiGet,
+ keys=Keys
+ },
+ QueryArgs = lists:foldl(fun({K, V}, Args2) ->
+ validate_view_query(K, V, Args2)
+ end, Args, QueryParams),
+
+ GroupLevel = QueryArgs#view_query_args.group_level,
+ case {ViewType, GroupLevel, IsMultiGet} of
+ {reduce, exact, true} ->
+ QueryArgs;
+ {reduce, _, false} ->
+ QueryArgs;
+ {reduce, _, _} ->
+ Msg = <<"Multi-key fetchs for reduce "
+ "view must include `group=true`">>,
+ throw({query_parse_error, Msg});
+ _ ->
+ QueryArgs
+ end,
+ QueryArgs.
+
+parse_json_view_param({<<"key">>, V}) ->
+ [{start_key, V}, {end_key, V}];
+parse_json_view_param({<<"startkey_docid">>, V}) ->
+ [{start_docid, V}];
+parse_json_view_param({<<"endkey_docid">>, V}) ->
+ [{end_docid, V}];
+parse_json_view_param({<<"startkey">>, V}) ->
+ [{start_key, V}];
+parse_json_view_param({<<"endkey">>, V}) ->
+ [{end_key, V}];
+parse_json_view_param({<<"limit">>, V}) when is_integer(V), V > 0 ->
+ [{limit, V}];
+parse_json_view_param({<<"stale">>, <<"ok">>}) ->
+ [{stale, ok}];
+parse_json_view_param({<<"stale">>, <<"update_after">>}) ->
+ [{stale, update_after}];
+parse_json_view_param({<<"descending">>, V}) when is_boolean(V) ->
+ [{descending, V}];
+parse_json_view_param({<<"skip">>, V}) when is_integer(V) ->
+ [{skip, V}];
+parse_json_view_param({<<"group">>, true}) ->
+ [{group_level, exact}];
+parse_json_view_param({<<"group">>, false}) ->
+ [{group_level, 0}];
+parse_json_view_param({<<"group_level">>, V}) when is_integer(V), V > 0 ->
+ [{group_level, V}];
+parse_json_view_param({<<"inclusive_end">>, V}) when is_boolean(V) ->
+ [{inclusive_end, V}];
+parse_json_view_param({<<"reduce">>, V}) when is_boolean(V) ->
+ [{reduce, V}];
+parse_json_view_param({<<"include_docs">>, V}) when is_boolean(V) ->
+ [{include_docs, V}];
+parse_json_view_param({<<"conflicts">>, V}) when is_boolean(V) ->
+ [{conflicts, V}];
+parse_json_view_param({<<"list">>, V}) ->
+ [{list, couch_util:to_binary(V)}];
+parse_json_view_param({<<"sorted">>, V}) when is_boolean(V) ->
+ [{sorted, V}];
+parse_json_view_param({K, V}) ->
+ [{extra, {K, V}}].
+
+parse_view_param("", _) ->
+ [];
+parse_view_param("key", Value) ->
+ JsonKey = ?JSON_DECODE(Value),
+ [{start_key, JsonKey}, {end_key, JsonKey}];
+parse_view_param("startkey_docid", Value) ->
+ [{start_docid, ?l2b(Value)}];
+parse_view_param("endkey_docid", Value) ->
+ [{end_docid, ?l2b(Value)}];
+parse_view_param("startkey", Value) ->
+ [{start_key, ?JSON_DECODE(Value)}];
+parse_view_param("endkey", Value) ->
+ [{end_key, ?JSON_DECODE(Value)}];
+parse_view_param("limit", Value) ->
+ [{limit, parse_positive_int_param(Value)}];
+parse_view_param("count", _Value) ->
+ throw({query_parse_error, <<"Query parameter 'count' is now 'limit'.">>});
+parse_view_param("stale", "ok") ->
+ [{stale, ok}];
+parse_view_param("stale", "update_after") ->
+ [{stale, update_after}];
+parse_view_param("stale", _Value) ->
+ throw({query_parse_error,
+ <<"stale only available as stale=ok or as stale=update_after">>});
+parse_view_param("update", _Value) ->
+ throw({query_parse_error, <<"update=false is now stale=ok">>});
+parse_view_param("descending", Value) ->
+ [{descending, parse_bool_param(Value)}];
+parse_view_param("skip", Value) ->
+ [{skip, parse_int_param(Value)}];
+parse_view_param("group", Value) ->
+ case parse_bool_param(Value) of
+ true -> [{group_level, exact}];
+ false -> [{group_level, 0}]
+ end;
+parse_view_param("group_level", Value) ->
+ [{group_level, parse_positive_int_param(Value)}];
+parse_view_param("inclusive_end", Value) ->
+ [{inclusive_end, parse_bool_param(Value)}];
+parse_view_param("reduce", Value) ->
+ [{reduce, parse_bool_param(Value)}];
+parse_view_param("include_docs", Value) ->
+ [{include_docs, parse_bool_param(Value)}];
+parse_view_param("conflicts", Value) ->
+ [{conflicts, parse_bool_param(Value)}];
+parse_view_param("list", Value) ->
+ [{list, ?l2b(Value)}];
+parse_view_param("callback", _) ->
+ []; % Verified in the JSON response functions
+parse_view_param("sorted", Value) ->
+ [{sorted, parse_bool_param(Value)}];
+parse_view_param(Key, Value) ->
+ [{extra, {Key, Value}}].
+
+validate_view_query(start_key, Value, Args) ->
+ case Args#view_query_args.multi_get of
+ true ->
+ Msg = <<"Query parameter `start_key` is "
+ "not compatiible with multi-get">>,
+ throw({query_parse_error, Msg});
+ _ ->
+ Args#view_query_args{start_key=Value}
+ end;
+validate_view_query(start_docid, Value, Args) ->
+ Args#view_query_args{start_docid=Value};
+validate_view_query(end_key, Value, Args) ->
+ case Args#view_query_args.multi_get of
+ true->
+ Msg = <<"Query paramter `end_key` is "
+ "not compatibile with multi-get">>,
+ throw({query_parse_error, Msg});
+ _ ->
+ Args#view_query_args{end_key=Value}
+ end;
+validate_view_query(end_docid, Value, Args) ->
+ Args#view_query_args{end_docid=Value};
+validate_view_query(limit, Value, Args) ->
+ Args#view_query_args{limit=Value};
+validate_view_query(list, Value, Args) ->
+ Args#view_query_args{list=Value};
+validate_view_query(stale, Value, Args) ->
+ Args#view_query_args{stale=Value};
+validate_view_query(descending, true, Args) ->
+ case Args#view_query_args.direction of
+ rev -> Args; % Already reversed
+ fwd ->
+ Args#view_query_args{
+ direction = rev,
+ start_docid =
+ reverse_key_default(Args#view_query_args.start_docid),
+ end_docid =
+ reverse_key_default(Args#view_query_args.end_docid)
+ }
+ end;
+validate_view_query(descending, false, Args) ->
+ Args; % Ignore default condition
+validate_view_query(skip, Value, Args) ->
+ Args#view_query_args{skip=Value};
+validate_view_query(group_level, Value, Args) ->
+ case Args#view_query_args.view_type of
+ reduce ->
+ Args#view_query_args{group_level=Value};
+ _ ->
+ Msg = <<"Invalid URL parameter 'group' or "
+ " 'group_level' for non-reduce view.">>,
+ throw({query_parse_error, Msg})
+ end;
+validate_view_query(inclusive_end, Value, Args) ->
+ Args#view_query_args{inclusive_end=Value};
+validate_view_query(reduce, false, Args) ->
+ Args;
+validate_view_query(reduce, _, Args) ->
+ case Args#view_query_args.view_type of
+ map ->
+ Msg = <<"Invalid URL parameter `reduce` for map view.">>,
+ throw({query_parse_error, Msg});
+ _ ->
+ Args
+ end;
+validate_view_query(include_docs, true, Args) ->
+ case Args#view_query_args.view_type of
+ reduce ->
+ Msg = <<"Query paramter `include_docs` "
+ "is invalid for reduce views.">>,
+ throw({query_parse_error, Msg});
+ _ ->
+ Args#view_query_args{include_docs=true}
+ end;
+validate_view_query(include_docs, _Value, Args) ->
+ Args;
+validate_view_query(conflicts, true, Args) ->
+ case Args#view_query_args.view_type of
+ reduce ->
+ Msg = <<"Query parameter `conflicts` "
+ "is invalid for reduce views.">>,
+ throw({query_parse_error, Msg});
+ _ ->
+ Args#view_query_args{conflicts = true}
+ end;
+validate_view_query(conflicts, _Value, Args) ->
+ Args;
+validate_view_query(sorted, false, Args) ->
+ Args#view_query_args{sorted=false};
+validate_view_query(sorted, _Value, Args) ->
+ Args;
+validate_view_query(extra, _Value, Args) ->
+ Args.
+
+view_group_etag(Group, Db) ->
+ view_group_etag(Group, Db, nil).
+
+view_group_etag(#group{sig=Sig,current_seq=CurrentSeq}, _Db, Extra) ->
+ % ?LOG_ERROR("Group ~p",[Group]),
+ % This is not as granular as it could be.
+ % If there are updates to the db that do not effect the view index,
+ % they will change the Etag. For more granular Etags we'd need to keep
+ % track of the last Db seq that caused an index change.
+ chttpd:make_etag({Sig, CurrentSeq, Extra}).
+
+parse_bool_param("true") -> true;
+parse_bool_param("false") -> false;
+parse_bool_param(Val) ->
+ Msg = io_lib:format("Invalid value for boolean paramter: ~p", [Val]),
+ throw({query_parse_error, ?l2b(Msg)}).
+
+parse_int_param(Val) ->
+ case (catch list_to_integer(Val)) of
+ IntVal when is_integer(IntVal) ->
+ IntVal;
+ _ ->
+ Msg = io_lib:format("Invalid value for integer parameter: ~p", [Val]),
+ throw({query_parse_error, ?l2b(Msg)})
+ end.
+
+parse_positive_int_param(Val) ->
+ case parse_int_param(Val) of
+ IntVal when IntVal >= 0 ->
+ IntVal;
+ _ ->
+ Fmt = "Invalid value for positive integer parameter: ~p",
+ Msg = io_lib:format(Fmt, [Val]),
+ throw({query_parse_error, ?l2b(Msg)})
+ end.
diff --git a/deps/chttpd/test/chttpd_delayed_response_test.erl b/deps/chttpd/test/chttpd_delayed_response_test.erl
new file mode 100644
index 00000000..f6509dd2
--- /dev/null
+++ b/deps/chttpd/test/chttpd_delayed_response_test.erl
@@ -0,0 +1,43 @@
+%% Copyright 2011 Cloudant
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+%% use this file except in compliance with the License. You may obtain a copy of
+%% the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+%% License for the specific language governing permissions and limitations under
+%% the License.
+
+-module(chttpd_delayed_response_test).
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+all_test_() ->
+ {foreach,
+ fun() -> application:load(couch) end,
+ fun(_) -> application:unload(couch) end,
+ [
+ fun delayed_chunked_response/1,
+ fun delayed_chunked_response_after_error/1
+ ]}.
+
+delayed_chunked_response(_) ->
+ {"sending an error first should be ok",
+ fun() ->
+ Req = #httpd{mochi_req=mock_request:new(nil, get, "/", {1, 1}, [])},
+ {ok, Resp} = chttpd:start_delayed_chunked_response(Req, 200, []),
+ ?assertMatch({ok, _}, chttpd:send_delayed_error(Resp, bad_request))
+ end}.
+
+delayed_chunked_response_after_error(_) ->
+ {"sending an error midstream should throw http_abort",
+ fun() ->
+ Req = #httpd{mochi_req=mock_request:new(nil, get, "/", {1, 1}, [])},
+ {ok, Resp} = chttpd:start_delayed_chunked_response(Req, 200, []),
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp, <<>>),
+ ?assertThrow({http_abort, _, _}, chttpd:send_delayed_error(Resp1, bad_request))
+ end}.
diff --git a/deps/chttpd/test/mock_request.erl b/deps/chttpd/test/mock_request.erl
new file mode 100644
index 00000000..e1f8b0ad
--- /dev/null
+++ b/deps/chttpd/test/mock_request.erl
@@ -0,0 +1,39 @@
+%% Copyright 2011 Cloudant
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+%% use this file except in compliance with the License. You may obtain a copy of
+%% the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+%% License for the specific language governing permissions and limitations under
+%% the License.
+
+-module(mock_request, [Socket, Method, RawPath, Version, Headers]).
+
+-compile(export_all).
+
+get_header_value(_) ->
+ undefined.
+
+parse_qs() ->
+ [].
+
+get(method) ->
+ Method;
+get(raw_path) ->
+ RawPath;
+get(version) ->
+ Version.
+
+should_close() ->
+ false.
+
+respond({Code, ResponseHeaders, _}) ->
+ mochiweb:new_response({THIS, Code, ResponseHeaders}).
+
+send(_) ->
+ ok.
diff --git a/deps/fabric/README.md b/deps/fabric/README.md
new file mode 100644
index 00000000..ab710ca9
--- /dev/null
+++ b/deps/fabric/README.md
@@ -0,0 +1,24 @@
+## fabric
+
+Fabric is a collection of proxy functions for [CouchDB][1] operations in a cluster. These functions are used in [BigCouch][2] as the remote procedure endpoints on each of the cluster nodes.
+
+For example, creating a database is a straightforward task in standalone CouchDB, but for BigCouch, each node that will store a shard/partition for the database needs to receive and execute a fabric function. The node handling the request also needs to compile the results from each of the nodes and respond accordingly to the client.
+
+Fabric is used in conjunction with 'Rexi' which is also an application within BigCouch.
+
+### Getting Started
+Fabric requires R13B03 or higher and can be built with [rebar][6], which comes bundled in the repository.
+
+### License
+[Apache 2.0][3]
+
+### Contact
+ * [http://cloudant.com][4]
+ * [info@cloudant.com][5]
+
+[1]: http://couchdb.apache.org
+[2]: http://github.com/cloudant/bigcouch
+[3]: http://www.apache.org/licenses/LICENSE-2.0.html
+[4]: http://cloudant.com
+[5]: mailto:info@cloudant.com
+[6]: http://github.com/basho/rebar
diff --git a/deps/fabric/include/fabric.hrl b/deps/fabric/include/fabric.hrl
new file mode 100644
index 00000000..c8de0f8f
--- /dev/null
+++ b/deps/fabric/include/fabric.hrl
@@ -0,0 +1,38 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-include_lib("eunit/include/eunit.hrl").
+
+-record(collector, {
+ db_name=nil,
+ query_args,
+ callback,
+ counters,
+ buffer_size,
+ blocked = [],
+ total_rows = 0,
+ offset = 0,
+ rows = [],
+ skip,
+ limit,
+ keys,
+ os_proc,
+ reducer,
+ lang,
+ sorted,
+ user_acc
+}).
+
+-record(view_row, {key, id, value, doc, worker}).
+-record(change, {key, id, value, deleted=false, doc, worker}).
diff --git a/deps/fabric/rebar b/deps/fabric/rebar
new file mode 100755
index 00000000..30c43ba5
--- /dev/null
+++ b/deps/fabric/rebar
Binary files differ
diff --git a/deps/fabric/rebar.config b/deps/fabric/rebar.config
new file mode 100644
index 00000000..bb4d2fe9
--- /dev/null
+++ b/deps/fabric/rebar.config
@@ -0,0 +1,18 @@
+% Copyright 2011 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{deps, [
+ {meck, ".*", {git, "https://github.com/eproxus/meck.git", {tag, "0.7.2"}}},
+ {twig, ".*", {git, "https://github.com/cloudant/twig.git", {tag, "0.2.1"}}}
+]}.
diff --git a/deps/fabric/src/fabric.app.src b/deps/fabric/src/fabric.app.src
new file mode 100644
index 00000000..a1cbb2c8
--- /dev/null
+++ b/deps/fabric/src/fabric.app.src
@@ -0,0 +1,6 @@
+{application, fabric, [
+ {description, "Routing and proxying layer for CouchDB cluster"},
+ {vsn, git},
+ {registered, []},
+ {applications, [kernel, stdlib, couch, rexi, mem3, twig]}
+]}.
diff --git a/deps/fabric/src/fabric.erl b/deps/fabric/src/fabric.erl
new file mode 100644
index 00000000..a8e3b2ea
--- /dev/null
+++ b/deps/fabric/src/fabric.erl
@@ -0,0 +1,460 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric).
+
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-define(ADMIN_CTX, {user_ctx, #user_ctx{roles = [<<"_admin">>]}}).
+
+% DBs
+-export([all_dbs/0, all_dbs/1, create_db/1, create_db/2, delete_db/1,
+ delete_db/2, get_db_info/1, get_doc_count/1, set_revs_limit/3,
+ set_security/3, get_revs_limit/1, get_security/1, get_security/2]).
+
+% Documents
+-export([open_doc/3, open_revs/4, get_missing_revs/2, get_missing_revs/3,
+ update_doc/3, update_docs/3, purge_docs/2, att_receiver/2]).
+
+% Views
+-export([all_docs/4, changes/4, query_view/3, query_view/4, query_view/6,
+ get_view_group_info/2]).
+
+% miscellany
+-export([design_docs/1, reset_validation_funs/1, cleanup_index_files/0,
+ cleanup_index_files/1]).
+
+-include("fabric.hrl").
+
+-type dbname() :: (iodata() | #db{}).
+-type docid() :: iodata().
+-type revision() :: {integer(), binary()}.
+-type callback() :: fun((any(), any()) -> {ok | stop, any()}).
+-type json_obj() :: {[{binary() | atom(), any()}]}.
+-type option() :: atom() | {atom(), any()}.
+
+%% db operations
+%% @equiv all_dbs(<<>>)
+all_dbs() ->
+ all_dbs(<<>>).
+
+%% @doc returns a list of all database names
+-spec all_dbs(Prefix::iodata()) -> {ok, [binary()]}.
+all_dbs(Prefix) when is_binary(Prefix) ->
+ Length = byte_size(Prefix),
+ MatchingDbs = ets:foldl(fun(#shard{dbname=DbName}, Acc) ->
+ case DbName of
+ <<Prefix:Length/binary, _/binary>> ->
+ [DbName | Acc];
+ _ ->
+ Acc
+ end
+ end, [], partitions),
+ {ok, lists:usort(MatchingDbs)};
+
+%% @equiv all_dbs(list_to_binary(Prefix))
+all_dbs(Prefix) when is_list(Prefix) ->
+ all_dbs(list_to_binary(Prefix)).
+
+%% @doc returns a property list of interesting properties
+%% about the database such as `doc_count', `disk_size',
+%% etc.
+-spec get_db_info(dbname()) ->
+ {ok, [
+ {instance_start_time, binary()} |
+ {doc_count, non_neg_integer()} |
+ {doc_del_count, non_neg_integer()} |
+ {purge_seq, non_neg_integer()} |
+ {compact_running, boolean()} |
+ {disk_size, non_neg_integer()} |
+ {disk_format_version, pos_integer()}
+ ]}.
+get_db_info(DbName) ->
+ fabric_db_info:go(dbname(DbName)).
+
+%% @doc the number of docs in a database
+-spec get_doc_count(dbname()) -> {ok, non_neg_integer()}.
+get_doc_count(DbName) ->
+ fabric_db_doc_count:go(dbname(DbName)).
+
+%% @equiv create_db(DbName, [])
+create_db(DbName) ->
+ create_db(DbName, []).
+
+%% @doc creates a database with the given name.
+%%
+%% Options can include values for q and n,
+%% for example `{q, "8"}' and `{n, "3"}', which
+%% control how many shards to split a database into
+%% and how many nodes each doc is copied to respectively.
+%%
+-spec create_db(dbname(), [option()]) -> ok | accepted | {error, atom()}.
+create_db(DbName, Options) ->
+ fabric_db_create:go(dbname(DbName), opts(Options)).
+
+%% @equiv delete_db([])
+delete_db(DbName) ->
+ delete_db(DbName, []).
+
+%% @doc delete a database
+-spec delete_db(dbname(), [option()]) -> ok | accepted | {error, atom()}.
+delete_db(DbName, Options) ->
+ fabric_db_delete:go(dbname(DbName), opts(Options)).
+
+%% @doc provide an upper bound for the number of tracked document revisions
+-spec set_revs_limit(dbname(), pos_integer(), [option()]) -> ok.
+set_revs_limit(DbName, Limit, Options) when is_integer(Limit), Limit > 0 ->
+ fabric_db_meta:set_revs_limit(dbname(DbName), Limit, opts(Options)).
+
+%% @doc retrieves the maximum number of document revisions
+-spec get_revs_limit(dbname()) -> pos_integer() | no_return().
+get_revs_limit(DbName) ->
+ {ok, Db} = fabric_util:get_db(dbname(DbName), [?ADMIN_CTX]),
+ try couch_db:get_revs_limit(Db) after catch couch_db:close(Db) end.
+
+%% @doc sets the readers/writers/admin permissions for a database
+-spec set_security(dbname(), SecObj::json_obj(), [option()]) -> ok.
+set_security(DbName, SecObj, Options) ->
+ fabric_db_meta:set_security(dbname(DbName), SecObj, opts(Options)).
+
+get_security(DbName) ->
+ get_security(DbName, [?ADMIN_CTX]).
+
+%% @doc retrieve the security object for a database
+-spec get_security(dbname()) -> json_obj() | no_return().
+get_security(DbName, Options) ->
+ {ok, Db} = fabric_util:get_db(dbname(DbName), opts(Options)),
+ try couch_db:get_security(Db) after catch couch_db:close(Db) end.
+
+% doc operations
+
+%% @doc retrieve the doc with a given id
+-spec open_doc(dbname(), docid(), [option()]) ->
+ {ok, #doc{}} |
+ {not_found, missing | deleted} |
+ {timeout, any()} |
+ {error, any()} |
+ {error, any() | any()}.
+open_doc(DbName, Id, Options) ->
+ fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options)).
+
+%% @doc retrieve a collection of revisions, possible all
+-spec open_revs(dbname(), docid(), [revision()] | all, [option()]) ->
+ {ok, [{ok, #doc{}} | {{not_found,missing}, revision()}]} |
+ {timeout, any()} |
+ {error, any()} |
+ {error, any(), any()}.
+open_revs(DbName, Id, Revs, Options) ->
+ fabric_doc_open_revs:go(dbname(DbName), docid(Id), Revs, opts(Options)).
+
+%% @equiv get_missing_revs(DbName, IdsRevs, [])
+get_missing_revs(DbName, IdsRevs) ->
+ get_missing_revs(DbName, IdsRevs, []).
+
+%% @doc retrieve missing revisions for a list of `{Id, Revs}'
+-spec get_missing_revs(dbname(),[{docid(), [revision()]}], [option()]) ->
+ {ok, [{docid(), any(), [any()]}]}.
+get_missing_revs(DbName, IdsRevs, Options) when is_list(IdsRevs) ->
+ Sanitized = [idrevs(IdR) || IdR <- IdsRevs],
+ fabric_doc_missing_revs:go(dbname(DbName), Sanitized, opts(Options)).
+
+%% @doc update a single doc
+%% @equiv update_docs(DbName,[Doc],Options)
+-spec update_doc(dbname(), #doc{}, [option()]) ->
+ {ok, any()} | any().
+update_doc(DbName, Doc, Options) ->
+ case update_docs(DbName, [Doc], opts(Options)) of
+ {ok, [{ok, NewRev}]} ->
+ {ok, NewRev};
+ {accepted, [{accepted, NewRev}]} ->
+ {accepted, NewRev};
+ {ok, [{{_Id, _Rev}, Error}]} ->
+ throw(Error);
+ {ok, [Error]} ->
+ throw(Error);
+ {ok, []} ->
+ % replication success
+ #doc{revs = {Pos, [RevId | _]}} = doc(Doc),
+ {ok, {Pos, RevId}}
+ end.
+
+%% @doc update a list of docs
+-spec update_docs(dbname(), [#doc{}], [option()]) ->
+ {ok, any()} | any().
+update_docs(DbName, Docs, Options) ->
+ try
+ fabric_doc_update:go(dbname(DbName), docs(Docs), opts(Options)) of
+ {ok, Results} ->
+ {ok, Results};
+ {accepted, Results} ->
+ {accepted, Results};
+ Error ->
+ throw(Error)
+ catch {aborted, PreCommitFailures} ->
+ {aborted, PreCommitFailures}
+ end.
+
+purge_docs(_DbName, _IdsRevs) ->
+ not_implemented.
+
+%% @doc spawns a process to upload attachment data and
+%% returns a function that shards can use to communicate
+%% with the spawned middleman process
+-spec att_receiver(#httpd{}, Length :: undefined | chunked | pos_integer() |
+ {unknown_transfer_encoding, any()}) ->
+ function() | binary().
+att_receiver(Req, Length) ->
+ fabric_doc_attachments:receiver(Req, Length).
+
+%% @doc retrieves all docs. Additional query parameters, such as `limit',
+%% `start_key' and `end_key', `descending', and `include_docs', can
+%% also be passed to further constrain the query. See <a href=
+%% "http://wiki.apache.org/couchdb/HTTP_Document_API#All_Documents">
+%% all_docs</a> for details
+-spec all_docs(dbname(), callback(), [] | tuple(), #view_query_args{}) ->
+ {ok, [any()]}.
+all_docs(DbName, Callback, Acc0, #view_query_args{} = QueryArgs) when
+ is_function(Callback, 2) ->
+ fabric_view_all_docs:go(dbname(DbName), QueryArgs, Callback, Acc0);
+
+%% @doc convenience function that takes a keylist rather than a record
+%% @equiv all_docs(DbName, Callback, Acc0, kl_to_query_args(QueryArgs))
+all_docs(DbName, Callback, Acc0, QueryArgs) ->
+ all_docs(DbName, Callback, Acc0, kl_to_query_args(QueryArgs)).
+
+
+-spec changes(dbname(), callback(), any(), #changes_args{} | [{atom(),any()}]) ->
+ {ok, any()}.
+changes(DbName, Callback, Acc0, #changes_args{}=Options) ->
+ Feed = Options#changes_args.feed,
+ fabric_view_changes:go(dbname(DbName), Feed, Options, Callback, Acc0);
+
+%% @doc convenience function, takes keylist instead of record
+%% @equiv changes(DbName, Callback, Acc0, kl_to_changes_args(Options))
+changes(DbName, Callback, Acc0, Options) ->
+ changes(DbName, Callback, Acc0, kl_to_changes_args(Options)).
+
+%% @equiv query_view(DbName, DesignName, ViewName, #view_query_args{})
+query_view(DbName, DesignName, ViewName) ->
+ query_view(DbName, DesignName, ViewName, #view_query_args{}).
+
+%% @equiv query_view(DbName, DesignName,
+%% ViewName, fun default_callback/2, [], QueryArgs)
+query_view(DbName, DesignName, ViewName, QueryArgs) ->
+ Callback = fun default_callback/2,
+ query_view(DbName, DesignName, ViewName, Callback, [], QueryArgs).
+
+%% @doc execute a given view.
+%% There are many additional query args that can be passed to a view,
+%% see <a href="http://wiki.apache.org/couchdb/HTTP_view_API#Querying_Options">
+%% query args</a> for details.
+-spec query_view(dbname(), #doc{} | binary(), iodata(), callback(), any(),
+ #view_query_args{}) ->
+ any().
+query_view(DbName, Design, ViewName, Callback, Acc0, QueryArgs) ->
+ Db = dbname(DbName), View = name(ViewName),
+ case is_reduce_view(Db, Design, View, QueryArgs) of
+ true ->
+ Mod = fabric_view_reduce;
+ false ->
+ Mod = fabric_view_map
+ end,
+ Mod:go(Db, Design, View, QueryArgs, Callback, Acc0).
+
+%% @doc retrieve info about a view group, disk size, language, whether compaction
+%% is running and so forth
+-spec get_view_group_info(dbname(), #doc{} | docid()) ->
+ {ok, [
+ {signature, binary()} |
+ {language, binary()} |
+ {disk_size, non_neg_integer()} |
+ {compact_running, boolean()} |
+ {updater_running, boolean()} |
+ {waiting_commit, boolean()} |
+ {waiting_clients, non_neg_integer()} |
+ {update_seq, pos_integer()} |
+ {purge_seq, non_neg_integer()}
+ ]}.
+get_view_group_info(DbName, DesignId) ->
+ fabric_group_info:go(dbname(DbName), design_doc(DesignId)).
+
+%% @doc retrieve all the design docs from a database
+-spec design_docs(dbname()) -> {ok, [json_obj()]}.
+design_docs(DbName) ->
+ QueryArgs = #view_query_args{start_key = <<"_design/">>, include_docs=true},
+ Callback = fun({total_and_offset, _, _}, []) ->
+ {ok, []};
+ ({row, {Props}}, Acc) ->
+ case couch_util:get_value(id, Props) of
+ <<"_design/", _/binary>> ->
+ {ok, [couch_util:get_value(doc, Props) | Acc]};
+ _ ->
+ {stop, Acc}
+ end;
+ (complete, Acc) ->
+ {ok, lists:reverse(Acc)};
+ ({error, Reason}, _Acc) ->
+ {error, Reason}
+ end,
+ fabric:all_docs(dbname(DbName), Callback, [], QueryArgs).
+
+%% @doc forces a reload of validation functions, this is performed after
+%% design docs are update
+%% NOTE: This function probably doesn't belong here as part fo the API
+-spec reset_validation_funs(dbname()) -> [reference()].
+reset_validation_funs(DbName) ->
+ [rexi:cast(Node, {fabric_rpc, reset_validation_funs, [Name]}) ||
+ #shard{node=Node, name=Name} <- mem3:shards(DbName)].
+
+%% @doc clean up index files for all Dbs
+-spec cleanup_index_files() -> [ok].
+cleanup_index_files() ->
+ {ok, Dbs} = fabric:all_dbs(),
+ [cleanup_index_files(Db) || Db <- Dbs].
+
+%% @doc clean up index files for a specific db
+-spec cleanup_index_files(dbname()) -> ok.
+cleanup_index_files(DbName) ->
+ {ok, DesignDocs} = fabric:design_docs(DbName),
+
+ ActiveSigs = lists:map(fun(#doc{id = GroupId}) ->
+ {ok, Info} = fabric:get_view_group_info(DbName, GroupId),
+ binary_to_list(couch_util:get_value(signature, Info))
+ end, [couch_doc:from_json_obj(DD) || DD <- DesignDocs]),
+
+ FileList = filelib:wildcard([couch_config:get("couchdb", "view_index_dir"),
+ "/.shards/*/", couch_util:to_list(dbname(DbName)), ".[0-9]*_design/*"]),
+
+ DeleteFiles = if ActiveSigs =:= [] -> FileList; true ->
+ {ok, RegExp} = re:compile([$(, string:join(ActiveSigs, "|"), $)]),
+ lists:filter(fun(FilePath) ->
+ re:run(FilePath, RegExp, [{capture, none}]) == nomatch
+ end, FileList)
+ end,
+ [file:delete(File) || File <- DeleteFiles],
+ ok.
+
+%% some simple type validation and transcoding
+
+dbname(DbName) when is_list(DbName) ->
+ list_to_binary(DbName);
+dbname(DbName) when is_binary(DbName) ->
+ DbName;
+dbname(#db{name=Name}) ->
+ Name;
+dbname(DbName) ->
+ erlang:error({illegal_database_name, DbName}).
+
+name(Thing) ->
+ couch_util:to_binary(Thing).
+
+docid(DocId) when is_list(DocId) ->
+ list_to_binary(DocId);
+docid(DocId) when is_binary(DocId) ->
+ DocId;
+docid(DocId) ->
+ erlang:error({illegal_docid, DocId}).
+
+docs(Docs) when is_list(Docs) ->
+ [doc(D) || D <- Docs];
+docs(Docs) ->
+ erlang:error({illegal_docs_list, Docs}).
+
+doc(#doc{} = Doc) ->
+ Doc;
+doc({_} = Doc) ->
+ couch_doc:from_json_obj(Doc);
+doc(Doc) ->
+ erlang:error({illegal_doc_format, Doc}).
+
+design_doc(#doc{} = DDoc) ->
+ DDoc;
+design_doc(DocId) when is_list(DocId) ->
+ design_doc(list_to_binary(DocId));
+design_doc(<<"_design/", _/binary>> = DocId) ->
+ DocId;
+design_doc(GroupName) ->
+ <<"_design/", GroupName/binary>>.
+
+idrevs({Id, Revs}) when is_list(Revs) ->
+ {docid(Id), [rev(R) || R <- Revs]}.
+
+rev(Rev) when is_list(Rev); is_binary(Rev) ->
+ couch_doc:parse_rev(Rev);
+rev({Seq, Hash} = Rev) when is_integer(Seq), is_binary(Hash) ->
+ Rev.
+
+%% @doc convenience method, useful when testing or calling fabric from the shell
+opts(Options) ->
+ add_option(user_ctx, add_option(io_priority, Options)).
+
+add_option(Key, Options) ->
+ case couch_util:get_value(Key, Options) of
+ undefined ->
+ case erlang:get(Key) of
+ undefined ->
+ Options;
+ Value ->
+ [{Key, Value} | Options]
+ end;
+ _ ->
+ Options
+ end.
+
+default_callback(complete, Acc) ->
+ {ok, lists:reverse(Acc)};
+default_callback(Row, Acc) ->
+ {ok, [Row | Acc]}.
+
+is_reduce_view(_, _, _, #view_query_args{view_type=Reduce}) ->
+ Reduce =:= reduce.
+
+%% @doc convenience method for use in the shell, converts a keylist
+%% to a `changes_args' record
+kl_to_changes_args(KeyList) ->
+ kl_to_record(KeyList, changes_args).
+
+%% @doc convenience method for use in the shell, converts a keylist
+%% to a `view_query_args' record
+kl_to_query_args(KeyList) ->
+ kl_to_record(KeyList, view_query_args).
+
+%% @doc finds the index of the given Key in the record.
+%% note that record_info is only known at compile time
+%% so the code must be written in this way. For each new
+%% record type add a case clause
+lookup_index(Key,RecName) ->
+ Indexes =
+ case RecName of
+ changes_args ->
+ lists:zip(record_info(fields, changes_args),
+ lists:seq(2, record_info(size, changes_args)));
+ view_query_args ->
+ lists:zip(record_info(fields, view_query_args),
+ lists:seq(2, record_info(size, view_query_args)))
+ end,
+ couch_util:get_value(Key, Indexes).
+
+%% @doc convert a keylist to record with given `RecName'
+%% @see lookup_index
+kl_to_record(KeyList,RecName) ->
+ Acc0 = case RecName of
+ changes_args -> #changes_args{};
+ view_query_args -> #view_query_args{}
+ end,
+ lists:foldl(fun({Key, Value}, Acc) ->
+ Index = lookup_index(couch_util:to_existing_atom(Key),RecName),
+ setelement(Index, Acc, Value)
+ end, Acc0, KeyList).
diff --git a/deps/fabric/src/fabric_db_create.erl b/deps/fabric/src/fabric_db_create.erl
new file mode 100644
index 00000000..080517be
--- /dev/null
+++ b/deps/fabric/src/fabric_db_create.erl
@@ -0,0 +1,161 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_db_create).
+-export([go/2]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-define(DBNAME_REGEX, "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*$").
+
+%% @doc Create a new database, and all its partition files across the cluster
+%% Options is proplist with user_ctx, n, q, validate_name
+go(DbName, Options) ->
+ case validate_dbname(DbName, Options) of
+ ok ->
+ {Shards, Doc} = generate_shard_map(DbName, Options),
+ case {create_shard_files(Shards), create_shard_db_doc(Doc)} of
+ {ok, {ok, Status}} ->
+ Status;
+ {file_exists, {ok, _}} ->
+ {error, file_exists};
+ {_, Error} ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+validate_dbname(DbName, Options) ->
+ case couch_util:get_value(validate_name, Options, true) of
+ false ->
+ ok;
+ true ->
+ case re:run(DbName, ?DBNAME_REGEX, [{capture,none}]) of
+ match ->
+ ok;
+ nomatch when DbName =:= <<"_users">> ->
+ ok;
+ nomatch when DbName =:= <<"_replicator">> ->
+ ok;
+ nomatch ->
+ {error, illegal_database_name}
+ end
+ end.
+
+generate_shard_map(DbName, Options) ->
+ {MegaSecs, Secs, _} = now(),
+ Suffix = "." ++ integer_to_list(MegaSecs*1000000 + Secs),
+ Shards = mem3:choose_shards(DbName, [{shard_suffix,Suffix} | Options]),
+ case mem3_util:open_db_doc(DbName) of
+ {ok, Doc} ->
+ % the DB already exists, and may have a different Suffix
+ ok;
+ {not_found, _} ->
+ Doc = make_document(Shards, Suffix)
+ end,
+ {Shards, Doc}.
+
+create_shard_files(Shards) ->
+ Workers = fabric_util:submit_jobs(Shards, create_db, []),
+ RexiMon = fabric_util:create_monitors(Shards),
+ try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Workers) of
+ {error, file_exists} ->
+ file_exists;
+ _ ->
+ ok
+ after
+ rexi_monitor:stop(RexiMon)
+ end.
+
+handle_message(file_exists, _, _) ->
+ {error, file_exists};
+
+handle_message({rexi_DOWN, _, {_, Node}, _}, _, Workers) ->
+ case lists:filter(fun(S) -> S#shard.node =/= Node end, Workers) of
+ [] ->
+ {stop, ok};
+ RemainingWorkers ->
+ {ok, RemainingWorkers}
+ end;
+
+handle_message(_, Worker, Workers) ->
+ case lists:delete(Worker, Workers) of
+ [] ->
+ {stop, ok};
+ RemainingWorkers ->
+ {ok, RemainingWorkers}
+ end.
+
+create_shard_db_doc(Doc) ->
+ Shards = [#shard{node=N} || N <- mem3:nodes()],
+ RexiMon = fabric_util:create_monitors(Shards),
+ Workers = fabric_util:submit_jobs(Shards, create_shard_db_doc, [Doc]),
+ Acc0 = {length(Shards), fabric_dict:init(Workers, nil)},
+ try fabric_util:recv(Workers, #shard.ref, fun handle_db_update/3, Acc0) of
+ {timeout, _} ->
+ {error, timeout};
+ Else ->
+ Else
+ after
+ rexi_monitor:stop(RexiMon)
+ end.
+
+handle_db_update({rexi_DOWN, _, {_, Node}, _}, _Worker, {W, Counters}) ->
+ New = fabric_dict:filter(fun(S, _) -> S#shard.node =/= Node end, Counters),
+ maybe_stop(W, New);
+
+handle_db_update({rexi_EXIT, _Reason}, Worker, {W, Counters}) ->
+ maybe_stop(W, fabric_dict:erase(Worker, Counters));
+
+handle_db_update(conflict, _, _) ->
+ % just fail when we get any conflicts
+ {error, conflict};
+
+handle_db_update(Msg, Worker, {W, Counters}) ->
+ maybe_stop(W, fabric_dict:store(Worker, Msg, Counters)).
+
+maybe_stop(W, Counters) ->
+ case fabric_dict:any(nil, Counters) of
+ true ->
+ {ok, {W, Counters}};
+ false ->
+ case lists:sum([1 || {_, ok} <- Counters]) of
+ W ->
+ {stop, ok};
+ NumOk when NumOk >= (W div 2 + 1) ->
+ {stop, accepted};
+ _ ->
+ {error, internal_server_error}
+ end
+ end.
+
+make_document([#shard{dbname=DbName}|_] = Shards, Suffix) ->
+ {RawOut, ByNodeOut, ByRangeOut} =
+ lists:foldl(fun(#shard{node=N, range=[B,E]}, {Raw, ByNode, ByRange}) ->
+ Range = ?l2b([couch_util:to_hex(<<B:32/integer>>), "-",
+ couch_util:to_hex(<<E:32/integer>>)]),
+ Node = couch_util:to_binary(N),
+ {[[<<"add">>, Range, Node] | Raw], orddict:append(Node, Range, ByNode),
+ orddict:append(Range, Node, ByRange)}
+ end, {[], [], []}, Shards),
+ #doc{id=DbName, body = {[
+ {<<"shard_suffix">>, Suffix},
+ {<<"changelog">>, lists:sort(RawOut)},
+ {<<"by_node">>, {[{K,lists:sort(V)} || {K,V} <- ByNodeOut]}},
+ {<<"by_range">>, {[{K,lists:sort(V)} || {K,V} <- ByRangeOut]}}
+ ]}}.
+
diff --git a/deps/fabric/src/fabric_db_delete.erl b/deps/fabric/src/fabric_db_delete.erl
new file mode 100644
index 00000000..9283d0b2
--- /dev/null
+++ b/deps/fabric/src/fabric_db_delete.erl
@@ -0,0 +1,95 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_db_delete).
+-export([go/2]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+
+%% @doc Options aren't used at all now in couch on delete but are left here
+%% to be consistent with fabric_db_create for possible future use
+%% @see couch_server:delete_db
+%%
+go(DbName, _Options) ->
+ Shards = mem3:shards(DbName),
+ % delete doc from shard_db
+ try delete_shard_db_doc(DbName) of
+ {ok, ok} ->
+ ok;
+ {ok, accepted} ->
+ accepted;
+ {ok, not_found} ->
+ erlang:error(database_does_not_exist, DbName);
+ Error ->
+ Error
+ after
+ % delete the shard files
+ fabric_util:submit_jobs(Shards, delete_db, [])
+ end.
+
+delete_shard_db_doc(Doc) ->
+ Shards = [#shard{node=N} || N <- mem3:nodes()],
+ RexiMon = fabric_util:create_monitors(Shards),
+ Workers = fabric_util:submit_jobs(Shards, delete_shard_db_doc, [Doc]),
+ Acc0 = {length(Shards), fabric_dict:init(Workers, nil)},
+ try fabric_util:recv(Workers, #shard.ref, fun handle_db_update/3, Acc0) of
+ {timeout, _} ->
+ {error, timeout};
+ Else ->
+ Else
+ after
+ rexi_monitor:stop(RexiMon)
+ end.
+
+handle_db_update({rexi_DOWN, _, {_, Node}, _}, _Worker, {W, Counters}) ->
+ New = fabric_dict:filter(fun(S, _) -> S#shard.node =/= Node end, Counters),
+ maybe_stop(W, New);
+
+handle_db_update({rexi_EXIT, _Reason}, Worker, {W, Counters}) ->
+ maybe_stop(W, fabric_dict:erase(Worker, Counters));
+
+handle_db_update(conflict, _, _) ->
+ % just fail when we get any conflicts
+ {error, conflict};
+
+handle_db_update(Msg, Worker, {W, Counters}) ->
+ maybe_stop(W, fabric_dict:store(Worker, Msg, Counters)).
+
+maybe_stop(W, Counters) ->
+ case fabric_dict:any(nil, Counters) of
+ true ->
+ {ok, {W, Counters}};
+ false ->
+ {Ok,NotFound} = fabric_dict:fold(fun count_replies/3, {0,0}, Counters),
+ case {Ok + NotFound, Ok, NotFound} of
+ {W, 0, W} ->
+ {#shard{dbname=Name}, _} = hd(Counters),
+ twig:log(warn, "~p not_found ~s", [?MODULE, Name]),
+ {stop, not_found};
+ {W, _, _} ->
+ {stop, ok};
+ {N, M, _} when N >= (W div 2 + 1), M > 0 ->
+ {stop, accepted};
+ _ ->
+ {error, internal_server_error}
+ end
+ end.
+
+count_replies(_, ok, {Ok, NotFound}) ->
+ {Ok+1, NotFound};
+count_replies(_, not_found, {Ok, NotFound}) ->
+ {Ok, NotFound+1};
+count_replies(_, _, Acc) ->
+ Acc.
diff --git a/deps/fabric/src/fabric_db_doc_count.erl b/deps/fabric/src/fabric_db_doc_count.erl
new file mode 100644
index 00000000..107f212a
--- /dev/null
+++ b/deps/fabric/src/fabric_db_doc_count.erl
@@ -0,0 +1,68 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_db_doc_count).
+
+-export([go/1]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+go(DbName) ->
+ Shards = mem3:shards(DbName),
+ Workers = fabric_util:submit_jobs(Shards, get_doc_count, []),
+ RexiMon = fabric_util:create_monitors(Shards),
+ Acc0 = {fabric_dict:init(Workers, nil), 0},
+ try
+ fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0)
+ after
+ rexi_monitor:stop(RexiMon)
+ end.
+
+handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {Counters, Acc}) ->
+ case fabric_util:remove_down_workers(Counters, NodeRef) of
+ {ok, NewCounters} ->
+ {ok, {NewCounters, Acc}};
+ error ->
+ {error, {nodedown, <<"progress not possible">>}}
+ end;
+
+handle_message({rexi_EXIT, Reason}, Shard, {Counters, Acc}) ->
+ NewCounters = lists:keydelete(Shard, 1, Counters),
+ case fabric_view:is_progress_possible(NewCounters) of
+ true ->
+ {ok, {NewCounters, Acc}};
+ false ->
+ {error, Reason}
+ end;
+
+handle_message({ok, Count}, Shard, {Counters, Acc}) ->
+ case fabric_dict:lookup_element(Shard, Counters) of
+ undefined ->
+ % already heard from someone else in this range
+ {ok, {Counters, Acc}};
+ nil ->
+ C1 = fabric_dict:store(Shard, ok, Counters),
+ C2 = fabric_view:remove_overlapping_shards(Shard, C1),
+ case fabric_dict:any(nil, C2) of
+ true ->
+ {ok, {C2, Count+Acc}};
+ false ->
+ {stop, Count+Acc}
+ end
+ end;
+handle_message(_, _, Acc) ->
+ {ok, Acc}.
+
diff --git a/deps/fabric/src/fabric_db_info.erl b/deps/fabric/src/fabric_db_info.erl
new file mode 100644
index 00000000..63fb44a5
--- /dev/null
+++ b/deps/fabric/src/fabric_db_info.erl
@@ -0,0 +1,104 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_db_info).
+
+-export([go/1]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+
+go(DbName) ->
+ Shards = mem3:shards(DbName),
+ Workers = fabric_util:submit_jobs(Shards, get_db_info, []),
+ RexiMon = fabric_util:create_monitors(Shards),
+ Acc0 = {fabric_dict:init(Workers, nil), []},
+ try
+ fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0)
+ after
+ rexi_monitor:stop(RexiMon)
+ end.
+
+handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {Counters, Acc}) ->
+ case fabric_util:remove_down_workers(Counters, NodeRef) of
+ {ok, NewCounters} ->
+ {ok, {NewCounters, Acc}};
+ error ->
+ {error, {nodedown, <<"progress not possible">>}}
+ end;
+
+handle_message({rexi_EXIT, Reason}, Shard, {Counters, Acc}) ->
+ NewCounters = lists:keydelete(Shard,1,Counters),
+ case fabric_view:is_progress_possible(Counters) of
+ true ->
+ {ok, {NewCounters, Acc}};
+ false ->
+ {error, Reason}
+ end;
+
+handle_message({ok, Info}, #shard{dbname=Name} = Shard, {Counters, Acc}) ->
+ case fabric_dict:lookup_element(Shard, Counters) of
+ undefined ->
+ % already heard from someone else in this range
+ {ok, {Counters, Acc}};
+ nil ->
+ Seq = couch_util:get_value(update_seq, Info),
+ C1 = fabric_dict:store(Shard, Seq, Counters),
+ C2 = fabric_view:remove_overlapping_shards(Shard, C1),
+ case fabric_dict:any(nil, C2) of
+ true ->
+ {ok, {C2, [Info|Acc]}};
+ false ->
+ {stop, [
+ {db_name,Name},
+ {update_seq, fabric_view_changes:pack_seqs(C2)} |
+ merge_results(lists:flatten([Info|Acc]))
+ ]}
+ end
+ end;
+handle_message(_, _, Acc) ->
+ {ok, Acc}.
+
+merge_results(Info) ->
+ Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
+ orddict:new(), Info),
+ orddict:fold(fun
+ (doc_count, X, Acc) ->
+ [{doc_count, lists:sum(X)} | Acc];
+ (doc_del_count, X, Acc) ->
+ [{doc_del_count, lists:sum(X)} | Acc];
+ (purge_seq, X, Acc) ->
+ [{purge_seq, lists:sum(X)} | Acc];
+ (compact_running, X, Acc) ->
+ [{compact_running, lists:member(true, X)} | Acc];
+ (disk_size, X, Acc) ->
+ [{disk_size, lists:sum(X)} | Acc];
+ (other, X, Acc) ->
+ [{other, {merge_other_results(X)}} | Acc];
+ (disk_format_version, X, Acc) ->
+ [{disk_format_version, lists:max(X)} | Acc];
+ (_, _, Acc) ->
+ Acc
+ end, [{instance_start_time, <<"0">>}], Dict).
+
+merge_other_results(Results) ->
+ Dict = lists:foldl(fun({Props}, D) ->
+ lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end, D, Props)
+ end, orddict:new(), Results),
+ orddict:fold(fun
+ (data_size, X, Acc) ->
+ [{data_size, lists:sum(X)} | Acc];
+ (_, _, Acc) ->
+ Acc
+ end, [], Dict).
diff --git a/deps/fabric/src/fabric_db_meta.erl b/deps/fabric/src/fabric_db_meta.erl
new file mode 100644
index 00000000..87721555
--- /dev/null
+++ b/deps/fabric/src/fabric_db_meta.erl
@@ -0,0 +1,49 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_db_meta).
+
+-export([set_revs_limit/3, set_security/3]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+
+set_revs_limit(DbName, Limit, Options) ->
+ Shards = mem3:shards(DbName),
+ Workers = fabric_util:submit_jobs(Shards, set_revs_limit, [Limit, Options]),
+ Waiting = length(Workers) - 1,
+ case fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Waiting) of
+ {ok, ok} ->
+ ok;
+ Error ->
+ Error
+ end.
+
+set_security(DbName, SecObj, Options) ->
+ Shards = mem3:shards(DbName),
+ Workers = fabric_util:submit_jobs(Shards, set_security, [SecObj, Options]),
+ Waiting = length(Workers) - 1,
+ case fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Waiting) of
+ {ok, ok} ->
+ ok;
+ Error ->
+ Error
+ end.
+
+handle_message(ok, _, 0) ->
+ {stop, ok};
+handle_message(ok, _, Waiting) ->
+ {ok, Waiting - 1};
+handle_message(Error, _, _Waiting) ->
+ {error, Error}. \ No newline at end of file
diff --git a/deps/fabric/src/fabric_db_update_listener.erl b/deps/fabric/src/fabric_db_update_listener.erl
new file mode 100644
index 00000000..e29f3ec7
--- /dev/null
+++ b/deps/fabric/src/fabric_db_update_listener.erl
@@ -0,0 +1,114 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_db_update_listener).
+
+-export([go/4, start_update_notifier/1, stop/1, wait_db_updated/1]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+
+go(Parent, ParentRef, DbName, Timeout) ->
+ Notifiers = start_update_notifiers(DbName),
+ MonRefs = lists:usort([{rexi_server, Node} || {Node, _Ref} <- Notifiers]),
+ RexiMon = rexi_monitor:start(MonRefs),
+ %% Add calling controller node as rexi end point as this controller will
+ %% receive messages from it
+ Workers = [{Parent, ParentRef} | Notifiers],
+ try
+ receive_results(Workers, {Workers, Parent, unset}, Timeout)
+ after
+ rexi_monitor:stop(RexiMon),
+ stop_update_notifiers(Notifiers)
+ end.
+
+start_update_notifiers(DbName) ->
+ lists:map(fun(#shard{node=Node, name=Name}) ->
+ {Node, rexi:cast(Node, {?MODULE, start_update_notifier, [Name]})}
+ end, mem3:shards(DbName)).
+
+% rexi endpoint
+start_update_notifier(DbName) ->
+ {Caller, Ref} = get(rexi_from),
+ Fun = fun({_, X}) when X == DbName ->
+ erlang:send(Caller, {Ref, db_updated}); (_) -> ok end,
+ Id = {couch_db_update_notifier, make_ref()},
+ ok = gen_event:add_sup_handler(couch_db_update, Id, Fun),
+ receive {gen_event_EXIT, Id, Reason} ->
+ rexi:reply({gen_event_EXIT, DbName, Reason})
+ end.
+
+stop_update_notifiers(Notifiers) ->
+ [rexi:kill(Node, Ref) || {Node, Ref} <- Notifiers].
+
+stop({Pid, Ref}) ->
+ erlang:send(Pid, {Ref, done}).
+
+wait_db_updated({Pid, Ref}) ->
+ erlang:send(Pid, {Ref, get_state}),
+ receive
+ Any ->
+ Any
+ end.
+
+receive_results(Workers, State, Timeout) ->
+ case rexi_utils:recv(Workers, 2, fun handle_message/3, State,
+ infinity, Timeout) of
+ {timeout, {NewWorkers, Parent, State1}} ->
+ erlang:send(Parent, timeout),
+ State2 =
+ case State1 of
+ waiting ->
+ unset;
+ Any -> Any
+ end,
+ receive_results(NewWorkers, {NewWorkers, Parent, State2}, Timeout);
+ {_, NewState} ->
+ {ok, NewState}
+ end.
+
+
+handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, {Workers, Parent, State}) ->
+ NewWorkers = lists:filter(fun({_Node, Ref}) -> NodeRef =/= Ref end, Workers),
+ case NewWorkers of
+ [] ->
+ {error, {nodedown, <<"progress not possible">>}};
+ _ ->
+ {ok, {NewWorkers, Parent, State}}
+ end;
+handle_message({rexi_EXIT, Reason}, Worker, {Workers, Parent, State}) ->
+ NewWorkers = lists:delete(Worker,Workers),
+ case NewWorkers of
+ [] ->
+ {error, Reason};
+ _ ->
+ {ok, {NewWorkers, Parent, State}}
+ end;
+handle_message(db_updated, {_Worker, _From}, {Workers, Parent, waiting}) ->
+ % propagate message to calling controller
+ erlang:send(Parent, updated),
+ {ok, {Workers, Parent, unset}};
+handle_message(db_updated, _Worker, {Workers, Parent, State})
+ when State == unset orelse State == updated ->
+ {ok, {Workers, Parent, updated}};
+handle_message(get_state, {_Worker, _From}, {Workers, Parent, unset}) ->
+ {ok, {Workers, Parent, waiting}};
+handle_message(get_state, {_Worker, _From}, {Workers, Parent, State}) ->
+ erlang:send(Parent, State),
+ {ok, {Workers, Parent, unset}};
+handle_message(done, _, _) ->
+ {stop, ok}.
+
+
+
diff --git a/deps/fabric/src/fabric_dict.erl b/deps/fabric/src/fabric_dict.erl
new file mode 100644
index 00000000..cea537ca
--- /dev/null
+++ b/deps/fabric/src/fabric_dict.erl
@@ -0,0 +1,51 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_dict).
+-compile(export_all).
+
+% Instead of ets, let's use an ordered keylist. We'll need to revisit if we
+% have >> 100 shards, so a private interface is a good idea. - APK June 2010
+
+init(Keys, InitialValue) ->
+ orddict:from_list([{Key, InitialValue} || Key <- Keys]).
+
+
+decrement_all(Dict) ->
+ [{K,V-1} || {K,V} <- Dict].
+
+store(Key, Value, Dict) ->
+ orddict:store(Key, Value, Dict).
+
+erase(Key, Dict) ->
+ orddict:erase(Key, Dict).
+
+update_counter(Key, Incr, Dict0) ->
+ orddict:update_counter(Key, Incr, Dict0).
+
+
+lookup_element(Key, Dict) ->
+ couch_util:get_value(Key, Dict).
+
+size(Dict) ->
+ orddict:size(Dict).
+
+any(Value, Dict) ->
+ lists:keymember(Value, 2, Dict).
+
+filter(Fun, Dict) ->
+ orddict:filter(Fun, Dict).
+
+fold(Fun, Acc0, Dict) ->
+ orddict:fold(Fun, Acc0, Dict).
diff --git a/deps/fabric/src/fabric_doc_attachments.erl b/deps/fabric/src/fabric_doc_attachments.erl
new file mode 100644
index 00000000..a45ba7c7
--- /dev/null
+++ b/deps/fabric/src/fabric_doc_attachments.erl
@@ -0,0 +1,131 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_doc_attachments).
+
+-include("fabric.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+%% couch api calls
+-export([receiver/2]).
+
+receiver(_Req, undefined) ->
+ <<"">>;
+receiver(_Req, {unknown_transfer_encoding, Unknown}) ->
+ exit({unknown_transfer_encoding, Unknown});
+receiver(Req, chunked) ->
+ MiddleMan = spawn(fun() -> middleman(Req, chunked) end),
+ fun(4096, ChunkFun, ok) ->
+ write_chunks(MiddleMan, ChunkFun)
+ end;
+receiver(_Req, 0) ->
+ <<"">>;
+receiver(Req, Length) when is_integer(Length) ->
+ maybe_send_continue(Req),
+ Middleman = spawn(fun() -> middleman(Req, Length) end),
+ fun() ->
+ Middleman ! {self(), gimme_data},
+ receive {Middleman, Data} -> Data end
+ end;
+receiver(_Req, Length) ->
+ exit({length_not_integer, Length}).
+
+%%
+%% internal
+%%
+
+maybe_send_continue(#httpd{mochi_req = MochiReq} = Req) ->
+ case couch_httpd:header_value(Req, "expect") of
+ undefined ->
+ ok;
+ Expect ->
+ case string:to_lower(Expect) of
+ "100-continue" ->
+ MochiReq:start_raw_response({100, gb_trees:empty()});
+ _ ->
+ ok
+ end
+ end.
+
+write_chunks(MiddleMan, ChunkFun) ->
+ MiddleMan ! {self(), gimme_data},
+ receive
+ {MiddleMan, {0, _Footers}} ->
+ % MiddleMan ! {self(), done},
+ ok;
+ {MiddleMan, ChunkRecord} ->
+ ChunkFun(ChunkRecord, ok),
+ write_chunks(MiddleMan, ChunkFun)
+ end.
+
+receive_unchunked_attachment(_Req, 0) ->
+ ok;
+receive_unchunked_attachment(Req, Length) ->
+ receive {MiddleMan, go} ->
+ Data = couch_httpd:recv(Req, 0),
+ MiddleMan ! {self(), Data}
+ end,
+ receive_unchunked_attachment(Req, Length - size(Data)).
+
+middleman(Req, chunked) ->
+ % spawn a process to actually receive the uploaded data
+ RcvFun = fun(ChunkRecord, ok) ->
+ receive {From, go} -> From ! {self(), ChunkRecord} end, ok
+ end,
+ Receiver = spawn(fun() -> couch_httpd:recv_chunked(Req,4096,RcvFun,ok) end),
+
+ % take requests from the DB writers and get data from the receiver
+ N = erlang:list_to_integer(couch_config:get("cluster","n")),
+ middleman_loop(Receiver, N, dict:new(), 0, []);
+
+middleman(Req, Length) ->
+ Receiver = spawn(fun() -> receive_unchunked_attachment(Req, Length) end),
+ N = erlang:list_to_integer(couch_config:get("cluster","n")),
+ middleman_loop(Receiver, N, dict:new(), 0, []).
+
+middleman_loop(Receiver, N, Counters, Offset, ChunkList) ->
+ receive {From, gimme_data} ->
+ % figure out how far along this writer (From) is in the list
+ {NewCounters, WhichChunk} = case dict:find(From, Counters) of
+ {ok, I} ->
+ {dict:update_counter(From, 1, Counters), I};
+ error ->
+ {dict:store(From, 2, Counters), 1}
+ end,
+ ListIndex = WhichChunk - Offset,
+
+ % talk to the receiver to get another chunk if necessary
+ ChunkList1 = if ListIndex > length(ChunkList) ->
+ Receiver ! {self(), go},
+ receive {Receiver, ChunkRecord} -> ChunkList ++ [ChunkRecord] end;
+ true -> ChunkList end,
+
+ % reply to the writer
+ From ! {self(), lists:nth(ListIndex, ChunkList1)},
+
+ % check if we can drop a chunk from the head of the list
+ SmallestIndex = dict:fold(fun(_, Val, Acc) -> lists:min([Val,Acc]) end,
+ WhichChunk+1, NewCounters),
+ Size = dict:size(NewCounters),
+
+ {NewChunkList, NewOffset} =
+ if Size == N andalso (SmallestIndex - Offset) == 2 ->
+ {tl(ChunkList1), Offset+1};
+ true ->
+ {ChunkList1, Offset}
+ end,
+ middleman_loop(Receiver, N, NewCounters, NewOffset, NewChunkList)
+ after 10000 ->
+ ok
+ end.
diff --git a/deps/fabric/src/fabric_doc_missing_revs.erl b/deps/fabric/src/fabric_doc_missing_revs.erl
new file mode 100644
index 00000000..2dd04a70
--- /dev/null
+++ b/deps/fabric/src/fabric_doc_missing_revs.erl
@@ -0,0 +1,90 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_doc_missing_revs).
+
+-export([go/2, go/3]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+
+go(DbName, AllIdsRevs) ->
+ go(DbName, AllIdsRevs, []).
+
+go(DbName, AllIdsRevs, Options) ->
+ Workers = lists:map(fun({#shard{name=Name, node=Node} = Shard, IdsRevs}) ->
+ Ref = rexi:cast(Node, {fabric_rpc, get_missing_revs, [Name, IdsRevs,
+ Options]}),
+ Shard#shard{ref=Ref}
+ end, group_idrevs_by_shard(DbName, AllIdsRevs)),
+ ResultDict = dict:from_list([{Id, {{nil,Revs},[]}} || {Id, Revs} <- AllIdsRevs]),
+ RexiMon = fabric_util:create_monitors(Workers),
+ Acc0 = {length(Workers), ResultDict, Workers},
+ try
+ fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0)
+ after
+ rexi_monitor:stop(RexiMon)
+ end.
+
+handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {_WorkerLen, ResultDict, Workers}) ->
+ NewWorkers = [W || #shard{node=Node} = W <- Workers, Node =/= NodeRef],
+ skip_message({fabric_dict:size(NewWorkers), ResultDict, NewWorkers});
+handle_message({rexi_EXIT, _}, Worker, {W, D, Workers}) ->
+ skip_message({W-1,D,lists:delete(Worker, Workers)});
+handle_message({ok, Results}, _Worker, {1, D0, _}) ->
+ D = update_dict(D0, Results),
+ {stop, dict:fold(fun force_reply/3, [], D)};
+handle_message({ok, Results}, Worker, {WaitingCount, D0, Workers}) ->
+ D = update_dict(D0, Results),
+ case dict:fold(fun maybe_reply/3, {stop, []}, D) of
+ continue ->
+ % still haven't heard about some Ids
+ {ok, {WaitingCount - 1, D, lists:delete(Worker,Workers)}};
+ {stop, FinalReply} ->
+ % finished, stop the rest of the jobs
+ fabric_util:cleanup(lists:delete(Worker,Workers)),
+ {stop, FinalReply}
+ end.
+
+force_reply(Id, {{nil,Revs}, Anc}, Acc) ->
+ % never heard about this ID, assume it's missing
+ [{Id, Revs, Anc} | Acc];
+force_reply(_, {[], _}, Acc) ->
+ Acc;
+force_reply(Id, {Revs, Anc}, Acc) ->
+ [{Id, Revs, Anc} | Acc].
+
+maybe_reply(_, _, continue) ->
+ continue;
+maybe_reply(_, {{nil, _}, _}, _) ->
+ continue;
+maybe_reply(_, {[], _}, {stop, Acc}) ->
+ {stop, Acc};
+maybe_reply(Id, {Revs, Anc}, {stop, Acc}) ->
+ {stop, [{Id, Revs, Anc} | Acc]}.
+
+group_idrevs_by_shard(DbName, IdsRevs) ->
+ dict:to_list(lists:foldl(fun({Id, Revs}, D0) ->
+ lists:foldl(fun(Shard, D1) ->
+ dict:append(Shard, {Id, Revs}, D1)
+ end, D0, mem3:shards(DbName,Id))
+ end, dict:new(), IdsRevs)).
+
+update_dict(D0, KVs) ->
+ lists:foldl(fun({K,V,A}, D1) -> dict:store(K, {V,A}, D1) end, D0, KVs).
+
+skip_message({0, Dict, _Workers}) ->
+ {stop, dict:fold(fun force_reply/3, [], Dict)};
+skip_message(Acc) ->
+ {ok, Acc}.
diff --git a/deps/fabric/src/fabric_doc_open.erl b/deps/fabric/src/fabric_doc_open.erl
new file mode 100644
index 00000000..9e466b7a
--- /dev/null
+++ b/deps/fabric/src/fabric_doc_open.erl
@@ -0,0 +1,139 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_doc_open).
+
+-export([go/3]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+go(DbName, Id, Options) ->
+ Workers = fabric_util:submit_jobs(mem3:shards(DbName,Id), open_doc,
+ [Id, [deleted|Options]]),
+ SuppressDeletedDoc = not lists:member(deleted, Options),
+ N = mem3:n(DbName),
+ R = couch_util:get_value(r, Options, integer_to_list(mem3:quorum(DbName))),
+ RepairOpts = [{r, integer_to_list(N)} | Options],
+ Acc0 = {Workers, erlang:min(N, list_to_integer(R)), []},
+ RexiMon = fabric_util:create_monitors(Workers),
+ try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
+ {ok, Reply} ->
+ format_reply(Reply, SuppressDeletedDoc);
+ {error, needs_repair, Reply} ->
+ spawn(fabric, open_revs, [DbName, Id, all, RepairOpts]),
+ format_reply(Reply, SuppressDeletedDoc);
+ {error, needs_repair} ->
+ % we couldn't determine the correct reply, so we'll run a sync repair
+ {ok, Results} = fabric:open_revs(DbName, Id, all, RepairOpts),
+ case lists:partition(fun({ok, #doc{deleted=Del}}) -> Del end, Results) of
+ {[], []} ->
+ {not_found, missing};
+ {_DeletedDocs, []} when SuppressDeletedDoc ->
+ {not_found, deleted};
+ {DeletedDocs, []} ->
+ lists:last(lists:sort(DeletedDocs));
+ {_, LiveDocs} ->
+ lists:last(lists:sort(LiveDocs))
+ end;
+ Error ->
+ Error
+ after
+ rexi_monitor:stop(RexiMon)
+ end.
+
+format_reply({ok, #doc{deleted=true}}, true) ->
+ {not_found, deleted};
+format_reply(Else, _) ->
+ Else.
+
+handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, {Workers, R, Replies}) ->
+ NewWorkers = lists:keydelete(NodeRef, #shard.node, Workers),
+ case NewWorkers of
+ [] ->
+ {error, needs_repair};
+ _ ->
+ {ok, {NewWorkers, R, Replies}}
+ end;
+handle_message({rexi_EXIT, _Reason}, Worker, Acc0) ->
+ skip_message(Worker, Acc0);
+handle_message(Reply, Worker, {Workers, R, Replies}) ->
+ NewReplies = fabric_util:update_counter(Reply, 1, Replies),
+ case lists:dropwhile(fun({_,{_, Count}}) -> Count < R end, NewReplies) of
+ [{_,{QuorumReply, _}} | _] ->
+ fabric_util:cleanup(lists:delete(Worker,Workers)),
+ case {NewReplies, fabric_util:remove_ancestors(NewReplies, [])} of
+ {[_], [_]} ->
+ % complete agreement amongst all copies
+ {stop, QuorumReply};
+ {[_|_], [{_, {QuorumReply, _}}]} ->
+ % any divergent replies are ancestors of the QuorumReply
+ {error, needs_repair, QuorumReply};
+ _Else ->
+ % real disagreement amongst the workers, block for the repair
+ {error, needs_repair}
+ end;
+ [] ->
+ if length(Workers) =:= 1 ->
+ {error, needs_repair};
+ true ->
+ {ok, {lists:delete(Worker,Workers), R, NewReplies}}
+ end
+ end.
+
+skip_message(_Worker, {Workers, _R, _Replies}) when length(Workers) =:= 1 ->
+ {error, needs_repair};
+skip_message(Worker, {Workers, R, Replies}) ->
+ {ok, {lists:delete(Worker,Workers), R, Replies}}.
+
+
+open_doc_test() ->
+ Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
+ Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
+ Bar1 = {ok, #doc{revs = {1,[<<"bar">>]}}},
+ Baz1 = {ok, #doc{revs = {1,[<<"baz">>]}}},
+ NF = {not_found, missing},
+ State0 = {[nil, nil, nil], 2, []},
+ State1 = {[nil, nil], 2, [fabric_util:kv(Foo1,1)]},
+ State2 = {[nil], 2, [fabric_util:kv(Bar1,1), fabric_util:kv(Foo1,1)]},
+ State3 = {[nil], 2, [fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,1)]},
+ ?assertEqual({ok, State1}, handle_message(Foo1, nil, State0)),
+
+ % normal case - quorum reached, no disagreement
+ ?assertEqual({stop, Foo1}, handle_message(Foo1, nil, State1)),
+
+ % 2nd worker disagrees, voting continues
+ ?assertEqual({ok, State2}, handle_message(Bar1, nil, State1)),
+
+ % 3rd worker resolves voting, but repair is needed
+ ?assertEqual({error, needs_repair}, handle_message(Foo1, nil, State2)),
+
+ % 2nd worker comes up with descendant of Foo1, voting continues
+ ?assertEqual({ok, State3}, handle_message(Foo2, nil, State1)),
+
+ % 3rd worker is also a descendant so run repair async
+ ?assertEqual({error, needs_repair, Foo2}, handle_message(Foo2, nil,
+ State3)),
+
+ % We only run async repair when every revision is part of the same branch
+ ?assertEqual({error, needs_repair}, handle_message(Bar1, nil, State3)),
+
+ % not_found is considered to be an ancestor of everybody
+ {ok, State4} = handle_message(NF, nil, State1),
+ ?assertEqual({error, needs_repair, Foo1}, handle_message(Foo1, nil,
+ State4)),
+
+ % 3 distinct edit branches result in quorum failure
+ ?assertEqual({error, needs_repair}, handle_message(Baz1, nil, State2)).
diff --git a/deps/fabric/src/fabric_doc_open_revs.erl b/deps/fabric/src/fabric_doc_open_revs.erl
new file mode 100644
index 00000000..395789ca
--- /dev/null
+++ b/deps/fabric/src/fabric_doc_open_revs.erl
@@ -0,0 +1,307 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_doc_open_revs).
+
+-export([go/4]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-record(state, {
+ dbname,
+ worker_count,
+ workers,
+ reply_count = 0,
+ r,
+ revs,
+ latest,
+ replies = []
+}).
+
+go(DbName, Id, Revs, Options) ->
+ Workers = fabric_util:submit_jobs(mem3:shards(DbName,Id), open_revs,
+ [Id, Revs, Options]),
+ R = couch_util:get_value(r, Options, integer_to_list(mem3:quorum(DbName))),
+ State = #state{
+ dbname = DbName,
+ worker_count = length(Workers),
+ workers = Workers,
+ r = list_to_integer(R),
+ revs = Revs,
+ latest = lists:member(latest, Options),
+ replies = case Revs of all -> []; Revs -> [{Rev,[]} || Rev <- Revs] end
+ },
+ RexiMon = fabric_util:create_monitors(Workers),
+ try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, State) of
+ {ok, {ok, Reply}} ->
+ {ok, Reply};
+ Else ->
+ Else
+ after
+ rexi_monitor:stop(RexiMon)
+ end.
+
+handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, #state{workers=Workers}=State) ->
+ NewWorkers = lists:keydelete(NodeRef, #shard.node, Workers),
+ skip(State#state{workers=NewWorkers});
+handle_message({rexi_EXIT, _}, Worker, #state{workers=Workers}=State) ->
+ skip(State#state{workers=lists:delete(Worker,Workers)});
+handle_message({ok, RawReplies}, Worker, #state{revs = all} = State) ->
+ #state{
+ dbname = DbName,
+ reply_count = ReplyCount,
+ worker_count = WorkerCount,
+ workers = Workers,
+ replies = All0,
+ r = R
+ } = State,
+ All = lists:foldl(fun(Reply,D) -> fabric_util:update_counter(Reply,1,D) end,
+ All0, RawReplies),
+ Reduced = fabric_util:remove_ancestors(All, []),
+ Complete = (ReplyCount =:= (WorkerCount - 1)),
+ QuorumMet = lists:all(fun({_,{_, C}}) -> C >= R end, Reduced),
+ case Reduced of All when QuorumMet andalso ReplyCount =:= (R-1) ->
+ Repair = false;
+ _ ->
+ Repair = [D || {_,{{ok,D}, _}} <- Reduced]
+ end,
+ case maybe_reply(DbName, Reduced, Complete, Repair, R) of
+ noreply ->
+ {ok, State#state{replies = All, reply_count = ReplyCount+1,
+ workers = lists:delete(Worker,Workers)}};
+ {reply, FinalReply} ->
+ fabric_util:cleanup(lists:delete(Worker,Workers)),
+ {stop, FinalReply}
+ end;
+handle_message({ok, RawReplies0}, Worker, State) ->
+ % we've got an explicit revision list, but if latest=true the workers may
+ % return a descendant of the requested revision. Take advantage of the
+ % fact that revisions are returned in order to keep track.
+ RawReplies = strip_not_found_missing(RawReplies0),
+ #state{
+ dbname = DbName,
+ reply_count = ReplyCount,
+ worker_count = WorkerCount,
+ workers = Workers,
+ replies = All0,
+ r = R
+ } = State,
+ All = lists:zipwith(fun({Rev, D}, Reply) ->
+ if Reply =:= error -> {Rev, D}; true ->
+ {Rev, fabric_util:update_counter(Reply, 1, D)}
+ end
+ end, All0, RawReplies),
+ Reduced = [fabric_util:remove_ancestors(X, []) || {_, X} <- All],
+ FinalReplies = [choose_winner(X, R) || X <- Reduced, X =/= []],
+ Complete = (ReplyCount =:= (WorkerCount - 1)),
+ case is_repair_needed(All, FinalReplies) of
+ true ->
+ Repair = [D || {_,{{ok,D}, _}} <- lists:flatten(Reduced)];
+ false ->
+ Repair = false
+ end,
+ case maybe_reply(DbName, FinalReplies, Complete, Repair, R) of
+ noreply ->
+ {ok, State#state{replies = All, reply_count = ReplyCount+1,
+ workers=lists:delete(Worker,Workers)}};
+ {reply, FinalReply} ->
+ fabric_util:cleanup(lists:delete(Worker,Workers)),
+ {stop, FinalReply}
+ end.
+
+skip(#state{revs=all} = State) ->
+ handle_message({ok, []}, nil, State);
+skip(#state{revs=Revs} = State) ->
+ handle_message({ok, [error || _Rev <- Revs]}, nil, State).
+
+maybe_reply(_, [], false, _, _) ->
+ noreply;
+maybe_reply(DbName, ReplyDict, Complete, RepairDocs, R) ->
+ case Complete orelse lists:all(fun({_,{_, C}}) -> C >= R end, ReplyDict) of
+ true ->
+ maybe_execute_read_repair(DbName, RepairDocs),
+ {reply, unstrip_not_found_missing(extract_replies(ReplyDict))};
+ false ->
+ noreply
+ end.
+
+extract_replies(Replies) ->
+ lists:map(fun({_,{Reply,_}}) -> Reply end, Replies).
+
+choose_winner(Options, R) ->
+ case lists:dropwhile(fun({_,{_Reply, C}}) -> C < R end, Options) of
+ [] ->
+ case [Elem || {_,{{ok, #doc{}}, _}} = Elem <- Options] of
+ [] ->
+ hd(Options);
+ Docs ->
+ lists:last(lists:sort(Docs))
+ end;
+ [QuorumMet | _] ->
+ QuorumMet
+ end.
+
+% repair needed if any reply other than the winner has been received for a rev
+is_repair_needed([], []) ->
+ false;
+is_repair_needed([{_Rev, [Reply]} | Tail1], [Reply | Tail2]) ->
+ is_repair_needed(Tail1, Tail2);
+is_repair_needed(_, _) ->
+ true.
+
+maybe_execute_read_repair(_Db, false) ->
+ ok;
+maybe_execute_read_repair(Db, Docs) ->
+ [#doc{id=Id} | _] = Docs,
+ Ctx = #user_ctx{roles=[<<"_admin">>]},
+ Res = fabric:update_docs(Db, Docs, [replicated_changes, {user_ctx,Ctx}]),
+ twig:log(notice, "read_repair ~s ~s ~p", [Db, Id, Res]).
+
+% hackery required so that not_found sorts first
+strip_not_found_missing([]) ->
+ [];
+strip_not_found_missing([{{not_found, missing}, Rev} | Rest]) ->
+ [{not_found, Rev} | strip_not_found_missing(Rest)];
+strip_not_found_missing([Else | Rest]) ->
+ [Else | strip_not_found_missing(Rest)].
+
+unstrip_not_found_missing([]) ->
+ [];
+unstrip_not_found_missing([{not_found, Rev} | Rest]) ->
+ [{{not_found, missing}, Rev} | unstrip_not_found_missing(Rest)];
+unstrip_not_found_missing([Else | Rest]) ->
+ [Else | unstrip_not_found_missing(Rest)].
+
+all_revs_test() ->
+ couch_config:start_link([]),
+ meck:new(fabric),
+ meck:expect(fabric, dbname, fun(Name) -> Name end),
+ meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, nil} end),
+ State0 = #state{worker_count = 3, workers=[nil,nil,nil], r = 2, revs = all},
+ Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
+ Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
+ Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
+
+ % an empty worker response does not count as meeting quorum
+ ?assertMatch(
+ {ok, #state{workers=[nil,nil]}},
+ handle_message({ok, []}, nil, State0)
+ ),
+
+ ?assertMatch(
+ {ok, #state{workers=[nil, nil]}},
+ handle_message({ok, [Foo1, Bar1]}, nil, State0)
+ ),
+ {ok, State1} = handle_message({ok, [Foo1, Bar1]}, nil, State0),
+
+ % the normal case - workers agree
+ ?assertEqual(
+ {stop, [Bar1, Foo1]},
+ handle_message({ok, [Foo1, Bar1]}, nil, State1)
+ ),
+
+ % a case where the 2nd worker has a newer Foo - currently we're considering
+ % Foo to have reached quorum and execute_read_repair()
+ ?assertEqual(
+ {stop, [Bar1, Foo2]},
+ handle_message({ok, [Foo2, Bar1]}, nil, State1)
+ ),
+
+ % a case where quorum has not yet been reached for Foo
+ ?assertMatch(
+ {ok, #state{}},
+ handle_message({ok, [Bar1]}, nil, State1)
+ ),
+ {ok, State2} = handle_message({ok, [Bar1]}, nil, State1),
+
+ % still no quorum, but all workers have responded. We include Foo1 in the
+ % response and execute_read_repair()
+ ?assertEqual(
+ {stop, [Bar1, Foo1]},
+ handle_message({ok, [Bar1]}, nil, State2)
+ ),
+ meck:unload(fabric),
+ couch_config:stop().
+
+specific_revs_test() ->
+ couch_config:start_link([]),
+ meck:new(fabric),
+ meck:expect(fabric, dbname, fun(Name) -> Name end),
+ meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, nil} end),
+ Revs = [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}],
+ State0 = #state{
+ worker_count = 3,
+ workers = [nil, nil, nil],
+ r = 2,
+ revs = Revs,
+ latest = false,
+ replies = [{Rev,[]} || Rev <- Revs]
+ },
+ Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
+ Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
+ Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
+ Baz1 = {{not_found, missing}, {1,<<"baz">>}},
+ Baz2 = {ok, #doc{revs = {1, [<<"baz">>]}}},
+
+ ?assertMatch(
+ {ok, #state{}},
+ handle_message({ok, [Foo1, Bar1, Baz1]}, nil, State0)
+ ),
+ {ok, State1} = handle_message({ok, [Foo1, Bar1, Baz1]}, nil, State0),
+
+ % the normal case - workers agree
+ ?assertEqual(
+ {stop, [Foo1, Bar1, Baz1]},
+ handle_message({ok, [Foo1, Bar1, Baz1]}, nil, State1)
+ ),
+
+ % latest=true, worker responds with Foo2 and we return it
+ State0L = State0#state{latest = true},
+ ?assertMatch(
+ {ok, #state{}},
+ handle_message({ok, [Foo2, Bar1, Baz1]}, nil, State0L)
+ ),
+ {ok, State1L} = handle_message({ok, [Foo2, Bar1, Baz1]}, nil, State0L),
+ ?assertEqual(
+ {stop, [Foo2, Bar1, Baz1]},
+ handle_message({ok, [Foo2, Bar1, Baz1]}, nil, State1L)
+ ),
+
+ % Foo1 is included in the read quorum for Foo2
+ ?assertEqual(
+ {stop, [Foo2, Bar1, Baz1]},
+ handle_message({ok, [Foo1, Bar1, Baz1]}, nil, State1L)
+ ),
+
+ % {not_found, missing} is included in the quorum for any found revision
+ ?assertEqual(
+ {stop, [Foo2, Bar1, Baz2]},
+ handle_message({ok, [Foo2, Bar1, Baz2]}, nil, State1L)
+ ),
+
+ % a worker failure is skipped
+ ?assertMatch(
+ {ok, #state{}},
+ handle_message({rexi_EXIT, foo}, nil, State1L)
+ ),
+ {ok, State2L} = handle_message({rexi_EXIT, foo}, nil, State1L),
+ ?assertEqual(
+ {stop, [Foo2, Bar1, Baz2]},
+ handle_message({ok, [Foo2, Bar1, Baz2]}, nil, State2L)
+ ),
+ meck:unload(fabric),
+ couch_config:stop().
diff --git a/deps/fabric/src/fabric_doc_update.erl b/deps/fabric/src/fabric_doc_update.erl
new file mode 100644
index 00000000..3ac4e185
--- /dev/null
+++ b/deps/fabric/src/fabric_doc_update.erl
@@ -0,0 +1,297 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_doc_update).
+
+-export([go/3]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+go(_, [], _) ->
+ {ok, []};
+go(DbName, AllDocs, Opts) ->
+ validate_atomic_update(DbName, AllDocs, lists:member(all_or_nothing, Opts)),
+ Options = lists:delete(all_or_nothing, Opts),
+ GroupedDocs = lists:map(fun({#shard{name=Name, node=Node} = Shard, Docs}) ->
+ Ref = rexi:cast(Node, {fabric_rpc, update_docs, [Name, Docs, Options]}),
+ {Shard#shard{ref=Ref}, Docs}
+ end, group_docs_by_shard(DbName, AllDocs)),
+ {Workers, _} = lists:unzip(GroupedDocs),
+ RexiMon = fabric_util:create_monitors(Workers),
+ W = couch_util:get_value(w, Options, integer_to_list(mem3:quorum(DbName))),
+ Acc0 = {length(Workers), length(AllDocs), list_to_integer(W), GroupedDocs,
+ dict:from_list([{Doc,[]} || Doc <- AllDocs])},
+ try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
+ {ok, {Health, Results}} when Health =:= ok; Health =:= accepted ->
+ {Health, [R || R <- couch_util:reorder_results(AllDocs, Results), R =/= noreply]};
+ {timeout, Acc} ->
+ {_, _, W1, _, DocReplDict} = Acc,
+ {Health, _, Resp} = dict:fold(fun force_reply/3, {ok, W1, []},
+ DocReplDict),
+ {Health, [R || R <- couch_util:reorder_results(AllDocs, Resp), R =/= noreply]};
+ Else ->
+ Else
+ after
+ rexi_monitor:stop(RexiMon)
+ end.
+
+handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, Acc0) ->
+ {_, LenDocs, W, GroupedDocs, DocReplyDict} = Acc0,
+ NewGrpDocs = [X || {#shard{node=N}, _} = X <- GroupedDocs, N =/= NodeRef],
+ skip_message({length(NewGrpDocs), LenDocs, W, NewGrpDocs, DocReplyDict});
+
+handle_message({rexi_EXIT, _}, Worker, Acc0) ->
+ {WC,LenDocs,W,GrpDocs,DocReplyDict} = Acc0,
+ NewGrpDocs = lists:keydelete(Worker,1,GrpDocs),
+ skip_message({WC-1,LenDocs,W,NewGrpDocs,DocReplyDict});
+handle_message(internal_server_error, Worker, Acc0) ->
+ % happens when we fail to load validation functions in an RPC worker
+ {WC,LenDocs,W,GrpDocs,DocReplyDict} = Acc0,
+ NewGrpDocs = lists:keydelete(Worker,1,GrpDocs),
+ skip_message({WC-1,LenDocs,W,NewGrpDocs,DocReplyDict});
+handle_message({ok, Replies}, Worker, Acc0) ->
+ {WaitingCount, DocCount, W, GroupedDocs, DocReplyDict0} = Acc0,
+ {value, {_, Docs}, NewGrpDocs} = lists:keytake(Worker, 1, GroupedDocs),
+ DocReplyDict = append_update_replies(Docs, Replies, DocReplyDict0),
+ case {WaitingCount, dict:size(DocReplyDict)} of
+ {1, _} ->
+ % last message has arrived, we need to conclude things
+ {Health, W, Reply} = dict:fold(fun force_reply/3, {ok, W, []},
+ DocReplyDict),
+ {stop, {Health, Reply}};
+ {_, DocCount} ->
+ % we've got at least one reply for each document, let's take a look
+ case dict:fold(fun maybe_reply/3, {stop,W,[]}, DocReplyDict) of
+ continue ->
+ {ok, {WaitingCount - 1, DocCount, W, NewGrpDocs, DocReplyDict}};
+ {stop, W, FinalReplies} ->
+ {stop, {ok, FinalReplies}}
+ end
+ end;
+handle_message({missing_stub, Stub}, _, _) ->
+ throw({missing_stub, Stub});
+handle_message({not_found, no_db_file} = X, Worker, Acc0) ->
+ {_, _, _, GroupedDocs, _} = Acc0,
+ Docs = couch_util:get_value(Worker, GroupedDocs),
+ handle_message({ok, [X || _D <- Docs]}, Worker, Acc0).
+
+force_reply(Doc, [], {_, W, Acc}) ->
+ {error, W, [{Doc, {error, internal_server_error}} | Acc]};
+force_reply(Doc, [FirstReply|_] = Replies, {Health, W, Acc}) ->
+ case update_quorum_met(W, Replies) of
+ {true, Reply} ->
+ {Health, W, [{Doc,Reply} | Acc]};
+ false ->
+ twig:log(warn, "write quorum (~p) failed for ~s", [W, Doc#doc.id]),
+ case [Reply || {ok, Reply} <- Replies] of
+ [] ->
+ % check if all errors are identical, if so inherit health
+ case lists:all(fun(E) -> E =:= FirstReply end, Replies) of
+ true ->
+ {Health, W, [{Doc, FirstReply} | Acc]};
+ false ->
+ {error, W, [{Doc, FirstReply} | Acc]}
+ end;
+ [AcceptedRev | _] ->
+ NewHealth = case Health of ok -> accepted; _ -> Health end,
+ {NewHealth, W, [{Doc, {accepted,AcceptedRev}} | Acc]}
+ end
+ end.
+
+maybe_reply(_, _, continue) ->
+ % we didn't meet quorum for all docs, so we're fast-forwarding the fold
+ continue;
+maybe_reply(Doc, Replies, {stop, W, Acc}) ->
+ case update_quorum_met(W, Replies) of
+ {true, Reply} ->
+ {stop, W, [{Doc, Reply} | Acc]};
+ false ->
+ continue
+ end.
+
+update_quorum_met(W, Replies) ->
+ Counters = lists:foldl(fun(R,D) -> orddict:update_counter(R,1,D) end,
+ orddict:new(), Replies),
+ case lists:dropwhile(fun({_, Count}) -> Count < W end, Counters) of
+ [] ->
+ false;
+ [{FinalReply, _} | _] ->
+ {true, FinalReply}
+ end.
+
+-spec group_docs_by_shard(binary(), [#doc{}]) -> [{#shard{}, [#doc{}]}].
+group_docs_by_shard(DbName, Docs) ->
+ dict:to_list(lists:foldl(fun(#doc{id=Id} = Doc, D0) ->
+ lists:foldl(fun(Shard, D1) ->
+ dict:append(Shard, Doc, D1)
+ end, D0, mem3:shards(DbName,Id))
+ end, dict:new(), Docs)).
+
+append_update_replies([], [], DocReplyDict) ->
+ DocReplyDict;
+append_update_replies([Doc|Rest], [], Dict0) ->
+ % icky, if replicated_changes only errors show up in result
+ append_update_replies(Rest, [], dict:append(Doc, noreply, Dict0));
+append_update_replies([Doc|Rest1], [Reply|Rest2], Dict0) ->
+ % TODO what if the same document shows up twice in one update_docs call?
+ append_update_replies(Rest1, Rest2, dict:append(Doc, Reply, Dict0)).
+
+skip_message({0, _, W, _, DocReplyDict}) ->
+ {Health, W, Reply} = dict:fold(fun force_reply/3, {ok, W, []}, DocReplyDict),
+ {stop, {Health, Reply}};
+skip_message(Acc0) ->
+ {ok, Acc0}.
+
+validate_atomic_update(_, _, false) ->
+ ok;
+validate_atomic_update(_DbName, AllDocs, true) ->
+ % TODO actually perform the validation. This requires some hackery, we need
+ % to basically extract the prep_and_validate_updates function from couch_db
+ % and only run that, without actually writing in case of a success.
+ Error = {not_implemented, <<"all_or_nothing is not supported yet">>},
+ PreCommitFailures = lists:map(fun(#doc{id=Id, revs = {Pos,Revs}}) ->
+ case Revs of [] -> RevId = <<>>; [RevId|_] -> ok end,
+ {{Id, {Pos, RevId}}, Error}
+ end, AllDocs),
+ throw({aborted, PreCommitFailures}).
+
+% eunits
+doc_update1_test() ->
+ Doc1 = #doc{revs = {1,[<<"foo">>]}},
+ Doc2 = #doc{revs = {1,[<<"bar">>]}},
+ Docs = [Doc1],
+ Docs2 = [Doc2, Doc1],
+ Dict = dict:from_list([{Doc,[]} || Doc <- Docs]),
+ Dict2 = dict:from_list([{Doc,[]} || Doc <- Docs2]),
+
+ Shards =
+ mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
+ GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
+
+
+ % test for W = 2
+ AccW2 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
+ Dict},
+
+ {ok,{WaitingCountW2_1,_,_,_,_}=AccW2_1} =
+ handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW2),
+ ?assertEqual(WaitingCountW2_1,2),
+ {stop, FinalReplyW2 } =
+ handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW2_1),
+ ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW2),
+
+ % test for W = 3
+ AccW3 = {length(Shards), length(Docs), list_to_integer("3"), GroupedDocs,
+ Dict},
+
+ {ok,{WaitingCountW3_1,_,_,_,_}=AccW3_1} =
+ handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW3),
+ ?assertEqual(WaitingCountW3_1,2),
+
+ {ok,{WaitingCountW3_2,_,_,_,_}=AccW3_2} =
+ handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW3_1),
+ ?assertEqual(WaitingCountW3_2,1),
+
+ {stop, FinalReplyW3 } =
+ handle_message({ok, [{ok, Doc1}]},lists:nth(3,Shards),AccW3_2),
+ ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW3),
+
+ % test w quorum > # shards, which should fail immediately
+
+ Shards2 = mem3_util:create_partition_map("foo",1,1,["node1"]),
+ GroupedDocs2 = group_docs_by_shard_hack(<<"foo">>,Shards2,Docs),
+
+ AccW4 =
+ {length(Shards2), length(Docs), list_to_integer("2"), GroupedDocs2, Dict},
+ Bool =
+ case handle_message({ok, [{ok, Doc1}]},hd(Shards2),AccW4) of
+ {stop, _Reply} ->
+ true;
+ _ -> false
+ end,
+ ?assertEqual(Bool,true),
+
+ % Docs with no replies should end up as {error, internal_server_error}
+ SA1 = #shard{node=a, range=1},
+ SB1 = #shard{node=b, range=1},
+ SA2 = #shard{node=a, range=2},
+ SB2 = #shard{node=b, range=2},
+ GroupedDocs3 = [{SA1,[Doc1]}, {SB1,[Doc1]}, {SA2,[Doc2]}, {SB2,[Doc2]}],
+ StW5_0 = {length(GroupedDocs3), length(Docs2), 2, GroupedDocs3, Dict2},
+ {ok, StW5_1} = handle_message({ok, [{ok, "A"}]}, SA1, StW5_0),
+ {ok, StW5_2} = handle_message({rexi_EXIT, nil}, SB1, StW5_1),
+ {ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2),
+ {stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3),
+ ?assertEqual(
+ {error, [{Doc1,{accepted,"A"}},{Doc2,{error,internal_server_error}}]},
+ ReplyW5
+ ).
+
+
+doc_update2_test() ->
+ Doc1 = #doc{revs = {1,[<<"foo">>]}},
+ Doc2 = #doc{revs = {1,[<<"bar">>]}},
+ Docs = [Doc2, Doc1],
+ Shards =
+ mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
+ GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
+ Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
+ dict:from_list([{Doc,[]} || Doc <- Docs])},
+
+ {ok,{WaitingCount1,_,_,_,_}=Acc1} =
+ handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
+ ?assertEqual(WaitingCount1,2),
+
+ {ok,{WaitingCount2,_,_,_,_}=Acc2} =
+ handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
+ ?assertEqual(WaitingCount2,1),
+
+ {stop, Reply} =
+ handle_message({rexi_EXIT, 1},lists:nth(3,Shards),Acc2),
+
+ ?assertEqual({accepted, [{Doc1,{accepted,Doc2}}, {Doc2,{accepted,Doc1}}]},
+ Reply).
+
+doc_update3_test() ->
+ Doc1 = #doc{revs = {1,[<<"foo">>]}},
+ Doc2 = #doc{revs = {1,[<<"bar">>]}},
+ Docs = [Doc2, Doc1],
+ Shards =
+ mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
+ GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
+ Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
+ dict:from_list([{Doc,[]} || Doc <- Docs])},
+
+ {ok,{WaitingCount1,_,_,_,_}=Acc1} =
+ handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
+ ?assertEqual(WaitingCount1,2),
+
+ {ok,{WaitingCount2,_,_,_,_}=Acc2} =
+ handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
+ ?assertEqual(WaitingCount2,1),
+
+ {stop, Reply} =
+ handle_message({ok, [{ok, Doc1},{ok, Doc2}]},lists:nth(3,Shards),Acc2),
+
+ ?assertEqual({ok, [{Doc1, {ok, Doc2}},{Doc2, {ok,Doc1}}]},Reply).
+
+% needed for testing to avoid having to start the mem3 application
+group_docs_by_shard_hack(_DbName, Shards, Docs) ->
+ dict:to_list(lists:foldl(fun(#doc{id=_Id} = Doc, D0) ->
+ lists:foldl(fun(Shard, D1) ->
+ dict:append(Shard, Doc, D1)
+ end, D0, Shards)
+ end, dict:new(), Docs)).
diff --git a/deps/fabric/src/fabric_group_info.erl b/deps/fabric/src/fabric_group_info.erl
new file mode 100644
index 00000000..27b0f839
--- /dev/null
+++ b/deps/fabric/src/fabric_group_info.erl
@@ -0,0 +1,100 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_group_info).
+
+-export([go/2]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+go(DbName, GroupId) when is_binary(GroupId) ->
+ {ok, DDoc} = fabric:open_doc(DbName, GroupId, []),
+ go(DbName, DDoc);
+
+go(DbName, #doc{} = DDoc) ->
+ Group = couch_view_group:design_doc_to_view_group(DDoc),
+ Shards = mem3:shards(DbName),
+ Workers = fabric_util:submit_jobs(Shards, group_info, [Group]),
+ RexiMon = fabric_util:create_monitors(Shards),
+ Acc0 = {fabric_dict:init(Workers, nil), []},
+ try
+ fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0)
+ after
+ rexi_monitor:stop(RexiMon)
+ end.
+
+handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {Counters, Acc}) ->
+ case fabric_util:remove_down_workers(Counters, NodeRef) of
+ {ok, NewCounters} ->
+ {ok, {NewCounters, Acc}};
+ error ->
+ {error, {nodedown, <<"progress not possible">>}}
+ end;
+
+handle_message({rexi_EXIT, Reason}, Shard, {Counters, Acc}) ->
+ NewCounters = lists:keydelete(Shard,1,Counters),
+ case fabric_view:is_progress_possible(Counters) of
+ true ->
+ {ok, {NewCounters, Acc}};
+ false ->
+ {error, Reason}
+ end;
+
+handle_message({ok, Info}, Shard, {Counters, Acc}) ->
+ case fabric_dict:lookup_element(Shard, Counters) of
+ undefined ->
+ % already heard from someone else in this range
+ {ok, {Counters, Acc}};
+ nil ->
+ C1 = fabric_dict:store(Shard, ok, Counters),
+ C2 = fabric_view:remove_overlapping_shards(Shard, C1),
+ case fabric_dict:any(nil, C2) of
+ true ->
+ {ok, {C2, [Info|Acc]}};
+ false ->
+ {stop, merge_results(lists:flatten([Info|Acc]))}
+ end
+ end;
+handle_message(_, _, Acc) ->
+ {ok, Acc}.
+
+merge_results(Info) ->
+ Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
+ orddict:new(), Info),
+ orddict:fold(fun
+ (signature, [X|_], Acc) ->
+ [{signature, X} | Acc];
+ (language, [X|_], Acc) ->
+ [{language, X} | Acc];
+ (disk_size, X, Acc) ->
+ [{disk_size, lists:sum(X)} | Acc];
+ (data_size, X, Acc) ->
+ [{data_size, lists:sum(X)} | Acc];
+ (compact_running, X, Acc) ->
+ [{compact_running, lists:member(true, X)} | Acc];
+ (updater_running, X, Acc) ->
+ [{updater_running, lists:member(true, X)} | Acc];
+ (waiting_commit, X, Acc) ->
+ [{waiting_commit, lists:member(true, X)} | Acc];
+ (waiting_clients, X, Acc) ->
+ [{waiting_clients, lists:sum(X)} | Acc];
+ (update_seq, X, Acc) ->
+ [{update_seq, lists:sum(X)} | Acc];
+ (purge_seq, X, Acc) ->
+ [{purge_seq, lists:sum(X)} | Acc];
+ (_, _, Acc) ->
+ Acc
+ end, [], Dict).
diff --git a/deps/fabric/src/fabric_rpc.erl b/deps/fabric/src/fabric_rpc.erl
new file mode 100644
index 00000000..3f25dfd7
--- /dev/null
+++ b/deps/fabric/src/fabric_rpc.erl
@@ -0,0 +1,485 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_rpc).
+
+-export([get_db_info/1, get_doc_count/1, get_update_seq/1]).
+-export([open_doc/3, open_revs/4, get_missing_revs/2, get_missing_revs/3,
+ update_docs/3]).
+-export([all_docs/2, changes/3, map_view/4, reduce_view/4, group_info/2]).
+-export([create_db/1, delete_db/1, reset_validation_funs/1, set_security/3,
+ set_revs_limit/3, create_shard_db_doc/2, delete_shard_db_doc/2]).
+
+-include("fabric.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-record (view_acc, {
+ db,
+ limit,
+ include_docs,
+ conflicts,
+ doc_info = nil,
+ offset = nil,
+ total_rows,
+ reduce_fun = fun couch_db:enum_docs_reduce_to_count/1,
+ group_level = 0
+}).
+
+%% rpc endpoints
+%% call to with_db will supply your M:F with a #db{} and then remaining args
+
+all_docs(DbName, #view_query_args{keys=nil} = QueryArgs) ->
+ {ok, Db} = get_or_create_db(DbName, []),
+ #view_query_args{
+ start_key = StartKey,
+ start_docid = StartDocId,
+ end_key = EndKey,
+ end_docid = EndDocId,
+ limit = Limit,
+ skip = Skip,
+ include_docs = IncludeDocs,
+ conflicts = Conflicts,
+ direction = Dir,
+ inclusive_end = Inclusive,
+ extra = Extra
+ } = QueryArgs,
+ set_io_priority(DbName, Extra),
+ {ok, Total} = couch_db:get_doc_count(Db),
+ Acc0 = #view_acc{
+ db = Db,
+ include_docs = IncludeDocs,
+ conflicts = Conflicts,
+ limit = Limit+Skip,
+ total_rows = Total
+ },
+ EndKeyType = if Inclusive -> end_key; true -> end_key_gt end,
+ Options = [
+ {dir, Dir},
+ {start_key, if is_binary(StartKey) -> StartKey; true -> StartDocId end},
+ {EndKeyType, if is_binary(EndKey) -> EndKey; true -> EndDocId end}
+ ],
+ {ok, _, Acc} = couch_db:enum_docs(Db, fun view_fold/3, Acc0, Options),
+ final_response(Total, Acc#view_acc.offset).
+
+changes(DbName, Args, StartSeq) ->
+ erlang:put(io_priority, {interactive, DbName}),
+ #changes_args{dir=Dir} = Args,
+ case get_or_create_db(DbName, []) of
+ {ok, Db} ->
+ Enum = fun changes_enumerator/2,
+ Opts = [{dir,Dir}],
+ Acc0 = {Db, StartSeq, Args},
+ try
+ {ok, {_, LastSeq, _}} =
+ couch_db:changes_since(Db, StartSeq, Enum, Opts, Acc0),
+ rexi:reply({complete, LastSeq})
+ after
+ couch_db:close(Db)
+ end;
+ Error ->
+ rexi:reply(Error)
+ end.
+
+map_view(DbName, DDoc, ViewName, QueryArgs) ->
+ {ok, Db} = get_or_create_db(DbName, []),
+ #view_query_args{
+ limit = Limit,
+ skip = Skip,
+ keys = Keys,
+ include_docs = IncludeDocs,
+ conflicts = Conflicts,
+ stale = Stale,
+ view_type = ViewType,
+ extra = Extra
+ } = QueryArgs,
+ set_io_priority(DbName, Extra),
+ {LastSeq, MinSeq} = calculate_seqs(Db, Stale),
+ Group0 = couch_view_group:design_doc_to_view_group(DDoc),
+ {ok, Pid} = gen_server:call(couch_view, {get_group_server, DbName, Group0}),
+ {ok, Group} = couch_view_group:request_group(Pid, MinSeq),
+ maybe_update_view_group(Pid, LastSeq, Stale),
+ erlang:monitor(process, Group#group.fd),
+ View = fabric_view:extract_view(Pid, ViewName, Group#group.views, ViewType),
+ {ok, Total} = couch_view:get_row_count(View),
+ Acc0 = #view_acc{
+ db = Db,
+ include_docs = IncludeDocs,
+ conflicts = Conflicts,
+ limit = Limit+Skip,
+ total_rows = Total,
+ reduce_fun = fun couch_view:reduce_to_count/1
+ },
+ case Keys of
+ nil ->
+ Options = couch_httpd_view:make_key_options(QueryArgs),
+ {ok, _, Acc} = couch_view:fold(View, fun view_fold/3, Acc0, Options);
+ _ ->
+ Acc = lists:foldl(fun(Key, AccIn) ->
+ KeyArgs = QueryArgs#view_query_args{start_key=Key, end_key=Key},
+ Options = couch_httpd_view:make_key_options(KeyArgs),
+ {_Go, _, Out} = couch_view:fold(View, fun view_fold/3, AccIn,
+ Options),
+ Out
+ end, Acc0, Keys)
+ end,
+ final_response(Total, Acc#view_acc.offset).
+
+reduce_view(DbName, Group0, ViewName, QueryArgs) ->
+ erlang:put(io_priority, {interactive, DbName}),
+ {ok, Db} = get_or_create_db(DbName, []),
+ #view_query_args{
+ group_level = GroupLevel,
+ limit = Limit,
+ skip = Skip,
+ keys = Keys,
+ stale = Stale,
+ extra = Extra
+ } = QueryArgs,
+ set_io_priority(DbName, Extra),
+ GroupFun = group_rows_fun(GroupLevel),
+ {LastSeq, MinSeq} = calculate_seqs(Db, Stale),
+ {ok, Pid} = gen_server:call(couch_view, {get_group_server, DbName, Group0}),
+ {ok, Group} = couch_view_group:request_group(Pid, MinSeq),
+ maybe_update_view_group(Pid, LastSeq, Stale),
+ #group{views=Views, def_lang=Lang, fd=Fd} = Group,
+ erlang:monitor(process, Fd),
+ {NthRed, View} = fabric_view:extract_view(Pid, ViewName, Views, reduce),
+ ReduceView = {reduce, NthRed, Lang, View},
+ Acc0 = #view_acc{group_level = GroupLevel, limit = Limit+Skip},
+ case Keys of
+ nil ->
+ Options0 = couch_httpd_view:make_key_options(QueryArgs),
+ Options = [{key_group_fun, GroupFun} | Options0],
+ couch_view:fold_reduce(ReduceView, fun reduce_fold/3, Acc0, Options);
+ _ ->
+ lists:map(fun(Key) ->
+ KeyArgs = QueryArgs#view_query_args{start_key=Key, end_key=Key},
+ Options0 = couch_httpd_view:make_key_options(KeyArgs),
+ Options = [{key_group_fun, GroupFun} | Options0],
+ couch_view:fold_reduce(ReduceView, fun reduce_fold/3, Acc0, Options)
+ end, Keys)
+ end,
+ rexi:reply(complete).
+
+calculate_seqs(Db, Stale) ->
+ LastSeq = couch_db:get_update_seq(Db),
+ if
+ Stale == ok orelse Stale == update_after ->
+ {LastSeq, 0};
+ true ->
+ {LastSeq, LastSeq}
+ end.
+
+maybe_update_view_group(GroupPid, LastSeq, update_after) ->
+ couch_view_group:trigger_group_update(GroupPid, LastSeq);
+maybe_update_view_group(_, _, _) ->
+ ok.
+
+create_db(DbName) ->
+ rexi:reply(case couch_server:create(DbName, []) of
+ {ok, _} ->
+ ok;
+ Error ->
+ Error
+ end).
+
+create_shard_db_doc(_, Doc) ->
+ rexi:reply(mem3_util:write_db_doc(Doc)).
+
+delete_db(DbName) ->
+ couch_server:delete(DbName, []).
+
+delete_shard_db_doc(_, DocId) ->
+ rexi:reply(mem3_util:delete_db_doc(DocId)).
+
+get_db_info(DbName) ->
+ with_db(DbName, [], {couch_db, get_db_info, []}).
+
+get_doc_count(DbName) ->
+ with_db(DbName, [], {couch_db, get_doc_count, []}).
+
+get_update_seq(DbName) ->
+ with_db(DbName, [], {couch_db, get_update_seq, []}).
+
+set_security(DbName, SecObj, Options) ->
+ with_db(DbName, Options, {couch_db, set_security, [SecObj]}).
+
+set_revs_limit(DbName, Limit, Options) ->
+ with_db(DbName, Options, {couch_db, set_revs_limit, [Limit]}).
+
+open_doc(DbName, DocId, Options) ->
+ with_db(DbName, Options, {couch_db, open_doc, [DocId, Options]}).
+
+open_revs(DbName, Id, Revs, Options) ->
+ with_db(DbName, Options, {couch_db, open_doc_revs, [Id, Revs, Options]}).
+
+get_missing_revs(DbName, IdRevsList) ->
+ get_missing_revs(DbName, IdRevsList, []).
+
+get_missing_revs(DbName, IdRevsList, Options) ->
+ % reimplement here so we get [] for Ids with no missing revs in response
+ set_io_priority(DbName, Options),
+ rexi:reply(case get_or_create_db(DbName, Options) of
+ {ok, Db} ->
+ Ids = [Id1 || {Id1, _Revs} <- IdRevsList],
+ {ok, lists:zipwith(fun({Id, Revs}, FullDocInfoResult) ->
+ case FullDocInfoResult of
+ {ok, #full_doc_info{rev_tree=RevisionTree} = FullInfo} ->
+ MissingRevs = couch_key_tree:find_missing(RevisionTree, Revs),
+ {Id, MissingRevs, possible_ancestors(FullInfo, MissingRevs)};
+ not_found ->
+ {Id, Revs, []}
+ end
+ end, IdRevsList, couch_btree:lookup(Db#db.id_tree, Ids))};
+ Error ->
+ Error
+ end).
+
+update_docs(DbName, Docs0, Options) ->
+ case proplists:get_value(replicated_changes, Options) of
+ true ->
+ X = replicated_changes;
+ _ ->
+ X = interactive_edit
+ end,
+ Docs = make_att_readers(Docs0),
+ with_db(DbName, Options, {couch_db, update_docs, [Docs, Options, X]}).
+
+group_info(DbName, Group0) ->
+ {ok, Pid} = gen_server:call(couch_view, {get_group_server, DbName, Group0}),
+ rexi:reply(couch_view_group:request_group_info(Pid)).
+
+reset_validation_funs(DbName) ->
+ case get_or_create_db(DbName, []) of
+ {ok, #db{main_pid = Pid}} ->
+ gen_server:cast(Pid, {load_validation_funs, undefined});
+ _ ->
+ ok
+ end.
+
+%%
+%% internal
+%%
+
+with_db(DbName, Options, {M,F,A}) ->
+ set_io_priority(DbName, Options),
+ case get_or_create_db(DbName, Options) of
+ {ok, Db} ->
+ rexi:reply(try
+ apply(M, F, [Db | A])
+ catch Exception ->
+ Exception;
+ error:Reason ->
+ twig:log(error, "rpc ~p:~p/~p ~p ~p", [M, F, length(A)+1, Reason,
+ clean_stack()]),
+ {error, Reason}
+ end);
+ Error ->
+ rexi:reply(Error)
+ end.
+
+get_or_create_db(DbName, Options) ->
+ case couch_db:open_int(DbName, Options) of
+ {not_found, no_db_file} ->
+ twig:log(warn, "~p creating ~s", [?MODULE, DbName]),
+ couch_server:create(DbName, Options);
+ Else ->
+ Else
+ end.
+
+view_fold(#full_doc_info{} = FullDocInfo, OffsetReds, Acc) ->
+ % matches for _all_docs and translates #full_doc_info{} -> KV pair
+ case couch_doc:to_doc_info(FullDocInfo) of
+ #doc_info{id=Id, revs=[#rev_info{deleted=false, rev=Rev}|_]} = DI ->
+ Value = {[{rev,couch_doc:rev_to_str(Rev)}]},
+ view_fold({{Id,Id}, Value}, OffsetReds, Acc#view_acc{doc_info=DI});
+ #doc_info{revs=[#rev_info{deleted=true}|_]} ->
+ {ok, Acc}
+ end;
+view_fold(KV, OffsetReds, #view_acc{offset=nil, total_rows=Total} = Acc) ->
+ % calculates the offset for this shard
+ #view_acc{reduce_fun=Reduce} = Acc,
+ Offset = Reduce(OffsetReds),
+ case rexi:sync_reply({total_and_offset, Total, Offset}) of
+ ok ->
+ view_fold(KV, OffsetReds, Acc#view_acc{offset=Offset});
+ stop ->
+ exit(normal);
+ timeout ->
+ exit(timeout)
+ end;
+view_fold(_KV, _Offset, #view_acc{limit=0} = Acc) ->
+ % we scanned through limit+skip local rows
+ {stop, Acc};
+view_fold({{Key,Id}, Value}, _Offset, Acc) ->
+ % the normal case
+ #view_acc{
+ db = Db,
+ doc_info = DocInfo,
+ limit = Limit,
+ conflicts = Conflicts,
+ include_docs = IncludeDocs
+ } = Acc,
+ case Value of {Props} ->
+ LinkedDocs = (couch_util:get_value(<<"_id">>, Props) =/= undefined);
+ _ ->
+ LinkedDocs = false
+ end,
+ if LinkedDocs ->
+ % we'll embed this at a higher level b/c the doc may be non-local
+ Doc = undefined;
+ IncludeDocs ->
+ IdOrInfo = if DocInfo =/= nil -> DocInfo; true -> Id end,
+ Options = if Conflicts -> [conflicts]; true -> [] end,
+ case couch_db:open_doc(Db, IdOrInfo, Options) of
+ {not_found, deleted} ->
+ Doc = null;
+ {not_found, missing} ->
+ Doc = undefined;
+ {ok, Doc0} ->
+ Doc = couch_doc:to_json_obj(Doc0, [])
+ end;
+ true ->
+ Doc = undefined
+ end,
+ case rexi:sync_reply(#view_row{key=Key, id=Id, value=Value, doc=Doc}) of
+ ok ->
+ {ok, Acc#view_acc{limit=Limit-1}};
+ timeout ->
+ exit(timeout)
+ end.
+
+final_response(Total, nil) ->
+ case rexi:sync_reply({total_and_offset, Total, Total}) of ok ->
+ rexi:reply(complete);
+ stop ->
+ ok;
+ timeout ->
+ exit(timeout)
+ end;
+final_response(_Total, _Offset) ->
+ rexi:reply(complete).
+
+%% TODO: handle case of bogus group level
+group_rows_fun(exact) ->
+ fun({Key1,_}, {Key2,_}) -> Key1 == Key2 end;
+group_rows_fun(0) ->
+ fun(_A, _B) -> true end;
+group_rows_fun(GroupLevel) when is_integer(GroupLevel) ->
+ fun({[_|_] = Key1,_}, {[_|_] = Key2,_}) ->
+ lists:sublist(Key1, GroupLevel) == lists:sublist(Key2, GroupLevel);
+ ({Key1,_}, {Key2,_}) ->
+ Key1 == Key2
+ end.
+
+reduce_fold(_Key, _Red, #view_acc{limit=0} = Acc) ->
+ {stop, Acc};
+reduce_fold(_Key, Red, #view_acc{group_level=0} = Acc) ->
+ send(null, Red, Acc);
+reduce_fold(Key, Red, #view_acc{group_level=exact} = Acc) ->
+ send(Key, Red, Acc);
+reduce_fold(K, Red, #view_acc{group_level=I} = Acc) when I > 0, is_list(K) ->
+ send(lists:sublist(K, I), Red, Acc);
+reduce_fold(K, Red, #view_acc{group_level=I} = Acc) when I > 0 ->
+ send(K, Red, Acc).
+
+
+send(Key, Value, #view_acc{limit=Limit} = Acc) ->
+ case rexi:sync_reply(#view_row{key=Key, value=Value}) of
+ ok ->
+ {ok, Acc#view_acc{limit=Limit-1}};
+ stop ->
+ exit(normal);
+ timeout ->
+ exit(timeout)
+ end.
+
+changes_enumerator(DocInfo, {Db, _Seq, Args}) ->
+ #changes_args{
+ include_docs = IncludeDocs,
+ filter = Acc,
+ conflicts = Conflicts
+ } = Args,
+ #doc_info{high_seq=Seq, revs=[#rev_info{deleted=Del}|_]} = DocInfo,
+ case [X || X <- couch_changes:filter(DocInfo, Acc), X /= null] of
+ [] ->
+ {ok, {Db, Seq, Args}};
+ Results ->
+ Opts = if Conflicts -> [conflicts]; true -> [] end,
+ ChangesRow = changes_row(Db, DocInfo, Results, Del, IncludeDocs, Opts),
+ Go = rexi:sync_reply(ChangesRow),
+ {Go, {Db, Seq, Args}}
+ end.
+
+changes_row(Db, #doc_info{id=Id, high_seq=Seq}=DI, Results, Del, true, Opts) ->
+ Doc = doc_member(Db, DI, Opts),
+ #change{key=Seq, id=Id, value=Results, doc=Doc, deleted=Del};
+changes_row(_, #doc_info{id=Id, high_seq=Seq}, Results, true, _, _) ->
+ #change{key=Seq, id=Id, value=Results, deleted=true};
+changes_row(_, #doc_info{id=Id, high_seq=Seq}, Results, _, _, _) ->
+ #change{key=Seq, id=Id, value=Results}.
+
+doc_member(Shard, DocInfo, Opts) ->
+ case couch_db:open_doc(Shard, DocInfo, [deleted | Opts]) of
+ {ok, Doc} ->
+ couch_doc:to_json_obj(Doc, []);
+ Error ->
+ Error
+ end.
+
+possible_ancestors(_FullInfo, []) ->
+ [];
+possible_ancestors(FullInfo, MissingRevs) ->
+ #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
+ LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
+ % Find the revs that are possible parents of this rev
+ lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
+ % this leaf is a "possible ancenstor" of the missing
+ % revs if this LeafPos lessthan any of the missing revs
+ case lists:any(fun({MissingPos, _}) ->
+ LeafPos < MissingPos end, MissingRevs) of
+ true ->
+ [{LeafPos, LeafRevId} | Acc];
+ false ->
+ Acc
+ end
+ end, [], LeafRevs).
+
+make_att_readers([]) ->
+ [];
+make_att_readers([#doc{atts=Atts0} = Doc | Rest]) ->
+ % % go through the attachments looking for 'follows' in the data,
+ % % replace with function that reads the data from MIME stream.
+ Atts = [Att#att{data=make_att_reader(D)} || #att{data=D} = Att <- Atts0],
+ [Doc#doc{atts = Atts} | make_att_readers(Rest)].
+
+make_att_reader({follows, Parser}) ->
+ fun() ->
+ Parser ! {get_bytes, self()},
+ receive {bytes, Bytes} -> Bytes end
+ end;
+make_att_reader(Else) ->
+ Else.
+
+clean_stack() ->
+ lists:map(fun({M,F,A}) when is_list(A) -> {M,F,length(A)}; (X) -> X end,
+ erlang:get_stacktrace()).
+
+set_io_priority(DbName, Options) ->
+ case lists:keyfind(io_priority, 1, Options) of
+ {io_priority, Pri} ->
+ erlang:put(io_priority, Pri);
+ false ->
+ erlang:put(io_priority, {interactive, DbName})
+ end.
diff --git a/deps/fabric/src/fabric_util.erl b/deps/fabric/src/fabric_util.erl
new file mode 100644
index 00000000..42fe900f
--- /dev/null
+++ b/deps/fabric/src/fabric_util.erl
@@ -0,0 +1,168 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_util).
+
+-export([submit_jobs/3, cleanup/1, recv/4, get_db/1, get_db/2, error_info/1,
+ update_counter/3, remove_ancestors/2, create_monitors/1, kv/2,
+ remove_down_workers/2]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+remove_down_workers(Workers, BadNode) ->
+ Filter = fun(#shard{node = Node}, _) -> Node =/= BadNode end,
+ NewWorkers = fabric_dict:filter(Filter, Workers),
+ case fabric_view:is_progress_possible(NewWorkers) of
+ true ->
+ {ok, NewWorkers};
+ false ->
+ error
+ end.
+
+submit_jobs(Shards, EndPoint, ExtraArgs) ->
+ lists:map(fun(#shard{node=Node, name=ShardName} = Shard) ->
+ Ref = rexi:cast(Node, {fabric_rpc, EndPoint, [ShardName | ExtraArgs]}),
+ Shard#shard{ref = Ref}
+ end, Shards).
+
+cleanup(Workers) ->
+ [rexi:kill(Node, Ref) || #shard{node=Node, ref=Ref} <- Workers].
+
+recv(Workers, Keypos, Fun, Acc0) ->
+ Timeout = case couch_config:get("fabric", "request_timeout", "60000") of
+ "infinity" -> infinity;
+ N -> list_to_integer(N)
+ end,
+ rexi_utils:recv(Workers, Keypos, Fun, Acc0, Timeout, infinity).
+
+
+get_db(DbName) ->
+ get_db(DbName, []).
+
+get_db(DbName, Options) ->
+ % prefer local shards
+ {Local, Remote} = lists:partition(fun(S) -> S#shard.node =:= node() end,
+ mem3:shards(DbName)),
+ % suppress shards from down nodes
+ Nodes = erlang:nodes(),
+ Live = [S || #shard{node = N} = S <- Remote, lists:member(N, Nodes)],
+ % sort the live remote shards so that we don't repeatedly try the same node
+ get_shard(Local ++ lists:keysort(#shard.name, Live), Options, 100).
+
+get_shard([], _Opts, _Timeout) ->
+ erlang:error({internal_server_error, "No DB shards could be opened."});
+get_shard([#shard{node = Node, name = Name} | Rest], Opts, Timeout) ->
+ case rpc:call(Node, couch_db, open, [Name, [{timeout, Timeout} | Opts]]) of
+ {ok, Db} ->
+ {ok, Db};
+ {unauthorized, _} = Error ->
+ throw(Error);
+ {badrpc, {'EXIT', {timeout, _}}} ->
+ get_shard(Rest, Opts, 2*Timeout);
+ _Else ->
+ get_shard(Rest, Opts, Timeout)
+ end.
+
+error_info({{<<"reduce_overflow_error">>, _} = Error, _Stack}) ->
+ Error;
+error_info({{timeout, _} = Error, _Stack}) ->
+ Error;
+error_info({{Error, Reason}, Stack}) ->
+ {Error, Reason, Stack};
+error_info({Error, Stack}) ->
+ {Error, nil, Stack}.
+
+update_counter(Item, Incr, D) ->
+ UpdateFun = fun ({Old, Count}) -> {Old, Count + Incr} end,
+ orddict:update(make_key(Item), UpdateFun, {Item, Incr}, D).
+
+make_key({ok, L}) when is_list(L) ->
+ make_key(L);
+make_key([]) ->
+ [];
+make_key([{ok, #doc{revs= {Pos,[RevId | _]}}} | Rest]) ->
+ [{ok, {Pos, RevId}} | make_key(Rest)];
+make_key([{{not_found, missing}, Rev} | Rest]) ->
+ [{not_found, Rev} | make_key(Rest)];
+make_key({ok, #doc{id=Id,revs=Revs}}) ->
+ {Id, Revs};
+make_key(Else) ->
+ Else.
+
+% this presumes the incoming list is sorted, i.e. shorter revlists come first
+remove_ancestors([], Acc) ->
+ lists:reverse(Acc);
+remove_ancestors([{_, {{not_found, _}, Count}} = Head | Tail], Acc) ->
+ % any document is a descendant
+ case lists:filter(fun({_,{{ok, #doc{}}, _}}) -> true; (_) -> false end, Tail) of
+ [{_,{{ok, #doc{}} = Descendant, _}} | _] ->
+ remove_ancestors(update_counter(Descendant, Count, Tail), Acc);
+ [] ->
+ remove_ancestors(Tail, [Head | Acc])
+ end;
+remove_ancestors([{_,{{ok, #doc{revs = {Pos, Revs}}}, Count}} = Head | Tail], Acc) ->
+ Descendants = lists:dropwhile(fun
+ ({_,{{ok, #doc{revs = {Pos2, Revs2}}}, _}}) ->
+ case lists:nthtail(erlang:min(Pos2 - Pos, length(Revs2)), Revs2) of
+ [] ->
+ % impossible to tell if Revs2 is a descendant - assume no
+ true;
+ History ->
+ % if Revs2 is a descendant, History is a prefix of Revs
+ not lists:prefix(History, Revs)
+ end
+ end, Tail),
+ case Descendants of [] ->
+ remove_ancestors(Tail, [Head | Acc]);
+ [{Descendant, _} | _] ->
+ remove_ancestors(update_counter(Descendant, Count, Tail), Acc)
+ end;
+remove_ancestors([Error | Tail], Acc) ->
+ remove_ancestors(Tail, [Error | Acc]).
+
+create_monitors(Shards) ->
+ MonRefs = lists:usort([{rexi_server, N} || #shard{node=N} <- Shards]),
+ rexi_monitor:start(MonRefs).
+
+%% verify only id and rev are used in key.
+update_counter_test() ->
+ Reply = {ok, #doc{id = <<"id">>, revs = <<"rev">>,
+ body = <<"body">>, atts = <<"atts">>}},
+ ?assertEqual([{{<<"id">>,<<"rev">>}, {Reply, 1}}],
+ update_counter(Reply, 1, [])).
+
+remove_ancestors_test() ->
+ Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
+ Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
+ Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
+ Bar2 = {not_found, {1,<<"bar">>}},
+ ?assertEqual(
+ [kv(Bar1,1), kv(Foo1,1)],
+ remove_ancestors([kv(Bar1,1), kv(Foo1,1)], [])
+ ),
+ ?assertEqual(
+ [kv(Bar1,1), kv(Foo2,2)],
+ remove_ancestors([kv(Bar1,1), kv(Foo1,1), kv(Foo2,1)], [])
+ ),
+ ?assertEqual(
+ [kv(Bar1,2)],
+ remove_ancestors([kv(Bar2,1), kv(Bar1,1)], [])
+ ).
+
+%% test function
+kv(Item, Count) ->
+ {make_key(Item), {Item,Count}}.
diff --git a/deps/fabric/src/fabric_view.erl b/deps/fabric/src/fabric_view.erl
new file mode 100644
index 00000000..fa2127e7
--- /dev/null
+++ b/deps/fabric/src/fabric_view.erl
@@ -0,0 +1,362 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_view).
+
+-export([is_progress_possible/1, remove_overlapping_shards/2, maybe_send_row/1,
+ maybe_pause_worker/3, maybe_resume_worker/2, transform_row/1, keydict/1,
+ extract_view/4, get_shards/2, remove_down_shards/2]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-spec remove_down_shards(#collector{}, node()) ->
+ {ok, #collector{}} | {error, any()}.
+remove_down_shards(Collector, BadNode) ->
+ #collector{callback=Callback, counters=Counters, user_acc=Acc} = Collector,
+ case fabric_util:remove_down_workers(Counters, BadNode) of
+ {ok, NewCounters} ->
+ {ok, Collector#collector{counters = NewCounters}};
+ error ->
+ Reason = {nodedown, <<"progress not possible">>},
+ Callback({error, Reason}, Acc)
+ end.
+
+%% @doc looks for a fully covered keyrange in the list of counters
+-spec is_progress_possible([{#shard{}, term()}]) -> boolean().
+is_progress_possible([]) ->
+ false;
+is_progress_possible(Counters) ->
+ Ranges = fabric_dict:fold(fun(#shard{range=[X,Y]}, _, A) -> [{X,Y}|A] end,
+ [], Counters),
+ [{Start, Tail0} | Rest] = lists:ukeysort(1, Ranges),
+ Result = lists:foldl(fun
+ (_, fail) ->
+ % we've already declared failure
+ fail;
+ (_, complete) ->
+ % this is the success condition, we can fast-forward
+ complete;
+ ({X,_}, Tail) when X > (Tail+1) ->
+ % gap in the keyrange, we're dead
+ fail;
+ ({_,Y}, Tail) ->
+ case erlang:max(Tail, Y) of
+ End when (End+1) =:= (2 bsl 31) ->
+ complete;
+ Else ->
+ % the normal condition, adding to the tail
+ Else
+ end
+ end, if (Tail0+1) =:= (2 bsl 31) -> complete; true -> Tail0 end, Rest),
+ (Start =:= 0) andalso (Result =:= complete).
+
+-spec remove_overlapping_shards(#shard{}, [{#shard{}, any()}]) ->
+ [{#shard{}, any()}].
+remove_overlapping_shards(#shard{range=[A,B]} = Shard0, Shards) ->
+ fabric_dict:filter(fun(#shard{range=[X,Y], node=Node, ref=Ref} = Shard, _) ->
+ if Shard =:= Shard0 ->
+ % we can't remove ourselves
+ true;
+ A < B, X >= A, X < B ->
+ % lower bound is inside our range
+ rexi:kill(Node, Ref),
+ false;
+ A < B, Y > A, Y =< B ->
+ % upper bound is inside our range
+ rexi:kill(Node, Ref),
+ false;
+ B < A, X >= A orelse B < A, X < B ->
+ % target shard wraps the key range, lower bound is inside
+ rexi:kill(Node, Ref),
+ false;
+ B < A, Y > A orelse B < A, Y =< B ->
+ % target shard wraps the key range, upper bound is inside
+ rexi:kill(Node, Ref),
+ false;
+ true ->
+ true
+ end
+ end, Shards).
+
+maybe_pause_worker(Worker, From, State) ->
+ #collector{buffer_size = BufferSize, counters = Counters} = State,
+ case fabric_dict:lookup_element(Worker, Counters) of
+ BufferSize ->
+ State#collector{blocked = [{Worker,From} | State#collector.blocked]};
+ _Count ->
+ gen_server:reply(From, ok),
+ State
+ end.
+
+maybe_resume_worker(Worker, State) ->
+ #collector{buffer_size = Buffer, counters = C, blocked = B} = State,
+ case fabric_dict:lookup_element(Worker, C) of
+ Count when Count < Buffer/2 ->
+ case couch_util:get_value(Worker, B) of
+ undefined ->
+ State;
+ From ->
+ gen_server:reply(From, ok),
+ State#collector{blocked = lists:keydelete(Worker, 1, B)}
+ end;
+ _Other ->
+ State
+ end.
+
+maybe_send_row(#collector{limit=0} = State) ->
+ #collector{counters=Counters, user_acc=AccIn, callback=Callback} = State,
+ case fabric_dict:any(0, Counters) of
+ true ->
+ % we still need to send the total/offset header
+ {ok, State};
+ false ->
+ {_, Acc} = Callback(complete, AccIn),
+ {stop, State#collector{user_acc=Acc}}
+ end;
+maybe_send_row(State) ->
+ #collector{
+ callback = Callback,
+ counters = Counters,
+ skip = Skip,
+ limit = Limit,
+ user_acc = AccIn
+ } = State,
+ case fabric_dict:any(0, Counters) of
+ true ->
+ {ok, State};
+ false ->
+ try get_next_row(State) of
+ {_, NewState} when Skip > 0 ->
+ maybe_send_row(NewState#collector{skip=Skip-1});
+ {Row, NewState} ->
+ case Callback(transform_row(possibly_embed_doc(NewState,Row)), AccIn) of
+ {stop, Acc} ->
+ {stop, NewState#collector{user_acc=Acc, limit=Limit-1}};
+ {ok, Acc} ->
+ maybe_send_row(NewState#collector{user_acc=Acc, limit=Limit-1})
+ end
+ catch complete ->
+ {_, Acc} = Callback(complete, AccIn),
+ {stop, State#collector{user_acc=Acc}}
+ end
+ end.
+
+%% if include_docs=true is used when keys and
+%% the values contain "_id" then use the "_id"s
+%% to retrieve documents and embed in result
+possibly_embed_doc(_State,
+ #view_row{id=reduced}=Row) ->
+ Row;
+possibly_embed_doc(_State,
+ #view_row{value=undefined}=Row) ->
+ Row;
+possibly_embed_doc(#collector{db_name=DbName, query_args=Args},
+ #view_row{key=_Key, id=_Id, value=Value, doc=_Doc}=Row) ->
+ #view_query_args{include_docs=IncludeDocs} = Args,
+ case IncludeDocs andalso is_tuple(Value) of
+ true ->
+ {Props} = Value,
+ Rev0 = couch_util:get_value(<<"_rev">>, Props),
+ case couch_util:get_value(<<"_id">>,Props) of
+ undefined -> Row;
+ IncId ->
+ % use separate process to call fabric:open_doc
+ % to not interfere with current call
+ {Pid, Ref} = spawn_monitor(fun() ->
+ exit(
+ case Rev0 of
+ undefined ->
+ case fabric:open_doc(DbName, IncId, []) of
+ {ok, NewDoc} ->
+ Row#view_row{doc=couch_doc:to_json_obj(NewDoc,[])};
+ {not_found, _} ->
+ Row#view_row{doc=null}
+ end;
+ Rev0 ->
+ Rev = couch_doc:parse_rev(Rev0),
+ case fabric:open_revs(DbName, IncId, [Rev], []) of
+ {ok, [{ok, NewDoc}]} ->
+ Row#view_row{doc=couch_doc:to_json_obj(NewDoc,[])};
+ {ok, [{{not_found, _}, Rev}]} ->
+ Row#view_row{doc=null}
+ end
+ end) end),
+ receive {'DOWN',Ref,process,Pid, Resp} ->
+ Resp
+ end
+ end;
+ _ -> Row
+ end.
+
+
+keydict(nil) ->
+ undefined;
+keydict(Keys) ->
+ {Dict,_} = lists:foldl(fun(K, {D,I}) -> {dict:store(K,I,D), I+1} end,
+ {dict:new(),0}, Keys),
+ Dict.
+
+%% internal %%
+
+get_next_row(#collector{rows = []}) ->
+ throw(complete);
+get_next_row(#collector{reducer = RedSrc} = St) when RedSrc =/= undefined ->
+ #collector{
+ query_args = #view_query_args{direction=Dir},
+ keys = Keys,
+ rows = RowDict,
+ os_proc = Proc,
+ counters = Counters0
+ } = St,
+ {Key, RestKeys} = find_next_key(Keys, Dir, RowDict),
+ case dict:find(Key, RowDict) of
+ {ok, Records} ->
+ NewRowDict = dict:erase(Key, RowDict),
+ Counters = lists:foldl(fun(#view_row{worker=Worker}, CountersAcc) ->
+ fabric_dict:update_counter(Worker, -1, CountersAcc)
+ end, Counters0, Records),
+ Wrapped = [[V] || #view_row{value=V} <- Records],
+ {ok, [Reduced]} = couch_query_servers:rereduce(Proc, [RedSrc], Wrapped),
+ NewSt = St#collector{keys=RestKeys, rows=NewRowDict, counters=Counters},
+ NewState = lists:foldl(fun(#view_row{worker=Worker}, StateAcc) ->
+ maybe_resume_worker(Worker, StateAcc)
+ end, NewSt, Records),
+ {#view_row{key=Key, id=reduced, value=Reduced}, NewState};
+ error ->
+ get_next_row(St#collector{keys=RestKeys})
+ end;
+get_next_row(State) ->
+ #collector{rows = [Row|Rest], counters = Counters0} = State,
+ Worker = Row#view_row.worker,
+ Counters1 = fabric_dict:update_counter(Worker, -1, Counters0),
+ NewState = maybe_resume_worker(Worker, State#collector{counters=Counters1}),
+ {Row, NewState#collector{rows = Rest}}.
+
+find_next_key(nil, Dir, RowDict) ->
+ case lists:sort(sort_fun(Dir), dict:fetch_keys(RowDict)) of
+ [] ->
+ throw(complete);
+ [Key|_] ->
+ {Key, nil}
+ end;
+find_next_key([], _, _) ->
+ throw(complete);
+find_next_key([Key|Rest], _, _) ->
+ {Key, Rest}.
+
+transform_row(#view_row{key=Key, id=reduced, value=Value}) ->
+ {row, {[{key,Key}, {value,Value}]}};
+transform_row(#view_row{key=Key, id=undefined}) ->
+ {row, {[{key,Key}, {error,not_found}]}};
+transform_row(#view_row{key=Key, id=Id, value=Value, doc=undefined}) ->
+ {row, {[{id,Id}, {key,Key}, {value,Value}]}};
+transform_row(#view_row{key=Key, id=Id, value=Value, doc={error,Reason}}) ->
+ {row, {[{id,Id}, {key,Key}, {value,Value}, {error,Reason}]}};
+transform_row(#view_row{key=Key, id=Id, value=Value, doc=Doc}) ->
+ {row, {[{id,Id}, {key,Key}, {value,Value}, {doc,Doc}]}}.
+
+
+sort_fun(fwd) ->
+ fun(A,A) -> true; (A,B) -> couch_view:less_json(A,B) end;
+sort_fun(rev) ->
+ fun(A,A) -> true; (A,B) -> couch_view:less_json(B,A) end.
+
+extract_view(Pid, ViewName, [], _ViewType) ->
+ twig:log(error, "missing_named_view ~p", [ViewName]),
+ exit(Pid, kill),
+ exit(missing_named_view);
+extract_view(Pid, ViewName, [View|Rest], ViewType) ->
+ case lists:member(ViewName, view_names(View, ViewType)) of
+ true ->
+ if ViewType == reduce ->
+ {index_of(ViewName, view_names(View, reduce)), View};
+ true ->
+ View
+ end;
+ false ->
+ extract_view(Pid, ViewName, Rest, ViewType)
+ end.
+
+view_names(View, Type) when Type == red_map; Type == reduce ->
+ [Name || {Name, _} <- View#view.reduce_funs];
+view_names(View, map) ->
+ View#view.map_names.
+
+index_of(X, List) ->
+ index_of(X, List, 1).
+
+index_of(_X, [], _I) ->
+ not_found;
+index_of(X, [X|_Rest], I) ->
+ I;
+index_of(X, [_|Rest], I) ->
+ index_of(X, Rest, I+1).
+
+get_shards(DbName, #view_query_args{stale=Stale})
+ when Stale == ok orelse Stale == update_after ->
+ mem3:ushards(DbName);
+get_shards(DbName, #view_query_args{stale=false}) ->
+ mem3:shards(DbName).
+
+% unit test
+is_progress_possible_test() ->
+ EndPoint = 2 bsl 31,
+ T1 = [[0, EndPoint-1]],
+ ?assertEqual(is_progress_possible(mk_cnts(T1)),true),
+ T2 = [[0,10],[11,20],[21,EndPoint-1]],
+ ?assertEqual(is_progress_possible(mk_cnts(T2)),true),
+ % gap
+ T3 = [[0,10],[12,EndPoint-1]],
+ ?assertEqual(is_progress_possible(mk_cnts(T3)),false),
+ % outside range
+ T4 = [[1,10],[11,20],[21,EndPoint-1]],
+ ?assertEqual(is_progress_possible(mk_cnts(T4)),false),
+ % outside range
+ T5 = [[0,10],[11,20],[21,EndPoint]],
+ ?assertEqual(is_progress_possible(mk_cnts(T5)),false).
+
+remove_overlapping_shards_test() ->
+ EndPoint = 2 bsl 31,
+ T1 = [[0,10],[11,20],[21,EndPoint-1]],
+ Shards = mk_cnts(T1,3),
+ ?assertEqual(orddict:size(
+ remove_overlapping_shards(#shard{name=list_to_atom("node-3"),
+ node=list_to_atom("node-3"),
+ range=[11,20]},
+ Shards)),7).
+
+mk_cnts(Ranges) ->
+ Shards = lists:map(fun(Range) ->
+ #shard{range=Range}
+ end,
+ Ranges),
+ orddict:from_list([{Shard,nil} || Shard <- Shards]).
+
+mk_cnts(Ranges, NoNodes) ->
+ orddict:from_list([{Shard,nil}
+ || Shard <-
+ lists:flatten(lists:map(
+ fun(Range) ->
+ mk_shards(NoNodes,Range,[])
+ end, Ranges))]
+ ).
+
+mk_shards(0,_Range,Shards) ->
+ Shards;
+mk_shards(NoNodes,Range,Shards) ->
+ NodeName = list_to_atom("node-" ++ integer_to_list(NoNodes)),
+ mk_shards(NoNodes-1,Range,
+ [#shard{name=NodeName, node=NodeName, range=Range} | Shards]).
diff --git a/deps/fabric/src/fabric_view_all_docs.erl b/deps/fabric/src/fabric_view_all_docs.erl
new file mode 100644
index 00000000..a769aedc
--- /dev/null
+++ b/deps/fabric/src/fabric_view_all_docs.erl
@@ -0,0 +1,181 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_view_all_docs).
+
+-export([go/4]).
+-export([open_doc/3]). % exported for spawn
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+go(DbName, #view_query_args{keys=nil} = QueryArgs, Callback, Acc0) ->
+ Workers = fabric_util:submit_jobs(mem3:shards(DbName),all_docs,[QueryArgs]),
+ BufferSize = couch_config:get("fabric", "map_buffer_size", "2"),
+ #view_query_args{limit = Limit, skip = Skip} = QueryArgs,
+ State = #collector{
+ query_args = QueryArgs,
+ callback = Callback,
+ buffer_size = list_to_integer(BufferSize),
+ counters = fabric_dict:init(Workers, 0),
+ skip = Skip,
+ limit = Limit,
+ user_acc = Acc0
+ },
+ RexiMon = fabric_util:create_monitors(Workers),
+ try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
+ State, infinity, 5000) of
+ {ok, NewState} ->
+ {ok, NewState#collector.user_acc};
+ {timeout, NewState} ->
+ Callback({error, timeout}, NewState#collector.user_acc);
+ {error, Resp} ->
+ {ok, Resp}
+ after
+ rexi_monitor:stop(RexiMon),
+ fabric_util:cleanup(Workers)
+ end;
+
+
+go(DbName, QueryArgs, Callback, Acc0) ->
+ #view_query_args{
+ direction = Dir,
+ include_docs = IncludeDocs,
+ limit = Limit0,
+ skip = Skip0,
+ keys = Keys
+ } = QueryArgs,
+ {_, Ref0} = spawn_monitor(fun() -> exit(fabric:get_doc_count(DbName)) end),
+ Monitors0 = [spawn_monitor(?MODULE, open_doc, [DbName, Id, IncludeDocs]) ||
+ Id <- Keys],
+ Monitors = if Dir=:=fwd -> Monitors0; true -> lists:reverse(Monitors0) end,
+ receive {'DOWN', Ref0, _, _, {ok, TotalRows}} ->
+ {ok, Acc1} = Callback({total_and_offset, TotalRows, 0}, Acc0),
+ {ok, Acc2} = doc_receive_loop(Monitors, Skip0, Limit0, Callback, Acc1),
+ Callback(complete, Acc2)
+ after 10000 ->
+ Callback(timeout, Acc0)
+ end.
+
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
+ fabric_view:remove_down_shards(State, NodeRef);
+
+handle_message({rexi_EXIT, Reason}, Worker, State) ->
+ #collector{callback=Callback, counters=Counters0, user_acc=Acc} = State,
+ Counters = fabric_dict:erase(Worker, Counters0),
+ case fabric_view:is_progress_possible(Counters) of
+ true ->
+ {ok, State#collector{counters = Counters}};
+ false ->
+ {ok, Resp} = Callback({error, fabric_util:error_info(Reason)}, Acc),
+ {error, Resp}
+ end;
+
+handle_message({total_and_offset, Tot, Off}, {Worker, From}, State) ->
+ #collector{
+ callback = Callback,
+ counters = Counters0,
+ total_rows = Total0,
+ offset = Offset0,
+ user_acc = AccIn
+ } = State,
+ case fabric_dict:lookup_element(Worker, Counters0) of
+ undefined ->
+ % this worker lost the race with other partition copies, terminate
+ gen_server:reply(From, stop),
+ {ok, State};
+ 0 ->
+ gen_server:reply(From, ok),
+ Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
+ Counters2 = fabric_view:remove_overlapping_shards(Worker, Counters1),
+ Total = Total0 + Tot,
+ Offset = Offset0 + Off,
+ case fabric_dict:any(0, Counters2) of
+ true ->
+ {ok, State#collector{
+ counters = Counters2,
+ total_rows = Total,
+ offset = Offset
+ }};
+ false ->
+ FinalOffset = erlang:min(Total, Offset+State#collector.skip),
+ {Go, Acc} = Callback({total_and_offset, Total, FinalOffset}, AccIn),
+ {Go, State#collector{
+ counters = fabric_dict:decrement_all(Counters2),
+ total_rows = Total,
+ offset = FinalOffset,
+ user_acc = Acc
+ }}
+ end
+ end;
+
+handle_message(#view_row{} = Row, {Worker, From}, State) ->
+ #collector{query_args = Args, counters = Counters0, rows = Rows0} = State,
+ Dir = Args#view_query_args.direction,
+ Rows = merge_row(Dir, Row#view_row{worker=Worker}, Rows0),
+ Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
+ State1 = State#collector{rows=Rows, counters=Counters1},
+ State2 = fabric_view:maybe_pause_worker(Worker, From, State1),
+ fabric_view:maybe_send_row(State2);
+
+handle_message(complete, Worker, State) ->
+ Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
+ fabric_view:maybe_send_row(State#collector{counters = Counters}).
+
+
+merge_row(fwd, Row, Rows) ->
+ lists:keymerge(#view_row.id, [Row], Rows);
+merge_row(rev, Row, Rows) ->
+ lists:rkeymerge(#view_row.id, [Row], Rows).
+
+doc_receive_loop([], _, _, _, Acc) ->
+ {ok, Acc};
+doc_receive_loop(_, _, 0, _, Acc) ->
+ {ok, Acc};
+doc_receive_loop([{Pid,Ref}|Rest], Skip, Limit, Callback, Acc) when Skip > 0 ->
+ receive {'DOWN', Ref, process, Pid, #view_row{}} ->
+ doc_receive_loop(Rest, Skip-1, Limit-1, Callback, Acc)
+ after 10000 ->
+ timeout
+ end;
+doc_receive_loop([{Pid,Ref}|Rest], 0, Limit, Callback, AccIn) ->
+ receive {'DOWN', Ref, process, Pid, #view_row{} = Row} ->
+ case Callback(fabric_view:transform_row(Row), AccIn) of
+ {ok, Acc} ->
+ doc_receive_loop(Rest, 0, Limit-1, Callback, Acc);
+ {stop, Acc} ->
+ {ok, Acc}
+ end
+ after 10000 ->
+ timeout
+ end.
+
+open_doc(DbName, Id, IncludeDocs) ->
+ Row = case fabric:open_doc(DbName, Id, [deleted]) of
+ {not_found, missing} ->
+ Doc = undefined,
+ #view_row{key=Id};
+ {ok, #doc{deleted=true, revs=Revs}} ->
+ Doc = null,
+ {RevPos, [RevId|_]} = Revs,
+ Value = {[{rev,couch_doc:rev_to_str({RevPos, RevId})}, {deleted,true}]},
+ #view_row{key=Id, id=Id, value=Value};
+ {ok, #doc{revs=Revs} = Doc0} ->
+ Doc = couch_doc:to_json_obj(Doc0, []),
+ {RevPos, [RevId|_]} = Revs,
+ Value = {[{rev,couch_doc:rev_to_str({RevPos, RevId})}]},
+ #view_row{key=Id, id=Id, value=Value}
+ end,
+ exit(if IncludeDocs -> Row#view_row{doc=Doc}; true -> Row end).
diff --git a/deps/fabric/src/fabric_view_changes.erl b/deps/fabric/src/fabric_view_changes.erl
new file mode 100644
index 00000000..41347095
--- /dev/null
+++ b/deps/fabric/src/fabric_view_changes.erl
@@ -0,0 +1,334 @@
+% Copyright 2012 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_view_changes).
+
+-export([go/5, pack_seqs/1]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-import(fabric_db_update_listener, [wait_db_updated/1, stop/1]).
+
+go(DbName, Feed, Options, Callback, Acc0) when Feed == "continuous" orelse
+ Feed == "longpoll" ->
+ Args = make_changes_args(Options),
+ Since = get_start_seq(DbName, Args),
+ case validate_start_seq(DbName, Since) of
+ ok ->
+ {ok, Acc} = Callback(start, Acc0),
+ {Timeout, _} = couch_changes:get_changes_timeout(Args, Callback),
+ Ref = make_ref(),
+ Parent = self(),
+ UpdateListener = {spawn_link(fabric_db_update_listener, go,
+ [Parent, Ref, DbName, Timeout]),
+ Ref},
+ try
+ keep_sending_changes(
+ DbName,
+ Args,
+ Callback,
+ Since,
+ Acc,
+ Timeout,
+ UpdateListener
+ )
+ after
+ stop(UpdateListener)
+ end;
+ Error ->
+ Callback(Error, Acc0)
+ end;
+
+go(DbName, "normal", Options, Callback, Acc0) ->
+ Args = make_changes_args(Options),
+ Since = get_start_seq(DbName, Args),
+ case validate_start_seq(DbName, Since) of
+ ok ->
+ {ok, Acc} = Callback(start, Acc0),
+ {ok, #collector{counters=Seqs, user_acc=AccOut}} = send_changes(
+ DbName,
+ Args,
+ Callback,
+ Since,
+ Acc,
+ 5000
+ ),
+ Callback({stop, pack_seqs(Seqs)}, AccOut);
+ Error ->
+ Callback(Error, Acc0)
+ end.
+
+keep_sending_changes(DbName, Args, Callback, Seqs, AccIn, Timeout, UpListen) ->
+ #changes_args{limit=Limit, feed=Feed, heartbeat=Heartbeat} = Args,
+ {ok, Collector} = send_changes(DbName, Args, Callback, Seqs, AccIn, Timeout),
+ #collector{limit=Limit2, counters=NewSeqs, user_acc=AccOut} = Collector,
+ LastSeq = pack_seqs(NewSeqs),
+ if Limit > Limit2, Feed == "longpoll" ->
+ Callback({stop, LastSeq}, AccOut);
+ true ->
+ case {Heartbeat, wait_db_updated(UpListen)} of
+ {undefined, timeout} ->
+ Callback({stop, LastSeq}, AccOut);
+ _ ->
+ {ok, AccTimeout} = Callback(timeout, AccOut),
+ keep_sending_changes(
+ DbName,
+ Args#changes_args{limit=Limit2},
+ Callback,
+ LastSeq,
+ AccTimeout,
+ Timeout,
+ UpListen
+ )
+ end
+ end.
+
+send_changes(DbName, ChangesArgs, Callback, PackedSeqs, AccIn, Timeout) ->
+ AllShards = mem3:shards(DbName),
+ Seqs = lists:flatmap(fun({#shard{name=Name, node=N} = Shard, Seq}) ->
+ case lists:member(Shard, AllShards) of
+ true ->
+ Ref = rexi:cast(N, {fabric_rpc, changes, [Name,ChangesArgs,Seq]}),
+ [{Shard#shard{ref = Ref}, Seq}];
+ false ->
+ % Find some replacement shards to cover the missing range
+ % TODO It's possible in rare cases of shard merging to end up
+ % with overlapping shard ranges from this technique
+ lists:map(fun(#shard{name=Name2, node=N2} = NewShard) ->
+ Ref = rexi:cast(N2, {fabric_rpc, changes, [Name2,ChangesArgs,0]}),
+ {NewShard#shard{ref = Ref}, 0}
+ end, find_replacement_shards(Shard, AllShards))
+ end
+ end, unpack_seqs(PackedSeqs, DbName)),
+ {Workers, _} = lists:unzip(Seqs),
+ RexiMon = fabric_util:create_monitors(Workers),
+ State = #collector{
+ query_args = ChangesArgs,
+ callback = Callback,
+ counters = orddict:from_list(Seqs),
+ user_acc = AccIn,
+ limit = ChangesArgs#changes_args.limit,
+ rows = Seqs % store sequence positions instead
+ },
+ %% TODO: errors need to be handled here
+ try
+ receive_results(Workers, State, Timeout, Callback, AccIn)
+ after
+ rexi_monitor:stop(RexiMon),
+ fabric_util:cleanup(Workers)
+ end.
+
+receive_results(Workers, State, Timeout, Callback, AccIn) ->
+ case rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, State,
+ infinity, Timeout) of
+ {timeout, NewState0} ->
+ {ok, AccOut} = Callback(timeout, NewState0#collector.user_acc),
+ NewState = NewState0#collector{user_acc = AccOut},
+ receive_results(Workers, NewState, Timeout, Callback, AccOut);
+ {_, NewState} ->
+ {ok, NewState}
+ end.
+
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, nil, State) ->
+ fabric_view:remove_down_shards(State, NodeRef);
+
+handle_message({rexi_EXIT, Reason}, Worker, State) ->
+ #collector{
+ callback=Callback,
+ counters=Counters0,
+ rows = Seqs0,
+ user_acc=Acc
+ } = State,
+ Counters = fabric_dict:erase(Worker, Counters0),
+ Seqs = fabric_dict:erase(Worker, Seqs0),
+ case fabric_view:is_progress_possible(Counters) of
+ true ->
+ {ok, State#collector{counters = Counters, rows=Seqs}};
+ false ->
+ {ok, Resp} = Callback({error, fabric_util:error_info(Reason)}, Acc),
+ {error, Resp}
+ end;
+
+handle_message(_, _, #collector{limit=0} = State) ->
+ {stop, State};
+
+handle_message(#change{key=Seq} = Row0, {Worker, From}, St) ->
+ #collector{
+ query_args = #changes_args{include_docs=IncludeDocs},
+ callback = Callback,
+ counters = S0,
+ limit = Limit,
+ user_acc = AccIn
+ } = St,
+ case fabric_dict:lookup_element(Worker, S0) of
+ undefined ->
+ % this worker lost the race with other partition copies, terminate it
+ gen_server:reply(From, stop),
+ {ok, St};
+ _ ->
+ S1 = fabric_dict:store(Worker, Seq, S0),
+ S2 = fabric_view:remove_overlapping_shards(Worker, S1),
+ Row = Row0#change{key = pack_seqs(S2)},
+ {Go, Acc} = Callback(changes_row(Row, IncludeDocs), AccIn),
+ gen_server:reply(From, Go),
+ {Go, St#collector{counters=S2, limit=Limit-1, user_acc=Acc}}
+ end;
+
+handle_message({complete, EndSeq}, Worker, State) ->
+ #collector{
+ counters = S0,
+ total_rows = Completed % override
+ } = State,
+ case fabric_dict:lookup_element(Worker, S0) of
+ undefined ->
+ {ok, State};
+ _ ->
+ S1 = fabric_dict:store(Worker, EndSeq, S0),
+ % unlikely to have overlaps here, but possible w/ filters
+ S2 = fabric_view:remove_overlapping_shards(Worker, S1),
+ NewState = State#collector{counters=S2, total_rows=Completed+1},
+ case fabric_dict:size(S2) =:= (Completed+1) of
+ true ->
+ {stop, NewState};
+ false ->
+ {ok, NewState}
+ end
+ end.
+
+make_changes_args(#changes_args{style=Style, filter=undefined}=Args) ->
+ Args#changes_args{filter = Style};
+make_changes_args(Args) ->
+ Args.
+
+get_start_seq(_DbName, #changes_args{dir=fwd, since=Since}) ->
+ Since;
+get_start_seq(DbName, #changes_args{dir=rev}) ->
+ Shards = mem3:shards(DbName),
+ Workers = fabric_util:submit_jobs(Shards, get_update_seq, []),
+ {ok, Since} = fabric_util:recv(Workers, #shard.ref,
+ fun collect_update_seqs/3, fabric_dict:init(Workers, -1)),
+ Since.
+
+collect_update_seqs(Seq, Shard, Counters) when is_integer(Seq) ->
+ case fabric_dict:lookup_element(Shard, Counters) of
+ undefined ->
+ % already heard from someone else in this range
+ {ok, Counters};
+ -1 ->
+ C1 = fabric_dict:store(Shard, Seq, Counters),
+ C2 = fabric_view:remove_overlapping_shards(Shard, C1),
+ case fabric_dict:any(-1, C2) of
+ true ->
+ {ok, C2};
+ false ->
+ {stop, pack_seqs(C2)}
+ end
+ end.
+
+pack_seqs(Workers) ->
+ SeqList = [{N,R,S} || {#shard{node=N, range=R}, S} <- Workers],
+ SeqSum = lists:sum(element(2, lists:unzip(Workers))),
+ Opaque = couch_util:encodeBase64Url(term_to_binary(SeqList, [compressed])),
+ [SeqSum, Opaque].
+
+unpack_seqs(0, DbName) ->
+ fabric_dict:init(mem3:shards(DbName), 0);
+
+unpack_seqs("0", DbName) ->
+ fabric_dict:init(mem3:shards(DbName), 0);
+
+unpack_seqs([_SeqNum, Opaque], DbName) ->
+ do_unpack_seqs(Opaque, DbName);
+
+unpack_seqs(Packed, DbName) ->
+ NewPattern = "^\\[[0-9]+,\"(?<opaque>.*)\"\\]$",
+ OldPattern = "^([0-9]+-)?(?<opaque>.*)$",
+ Options = [{capture, [opaque], binary}],
+ Opaque = case re:run(Packed, NewPattern, Options) of
+ {match, Match} ->
+ Match;
+ nomatch ->
+ {match, Match} = re:run(Packed, OldPattern, Options),
+ Match
+ end,
+ do_unpack_seqs(Opaque, DbName).
+
+do_unpack_seqs(Opaque, DbName) ->
+ % TODO relies on internal structure of fabric_dict as keylist
+ lists:map(fun({Node, [A,B], Seq}) ->
+ Match = #shard{node=Node, range=[A,B], dbname=DbName, _ = '_'},
+ case ets:match_object(partitions, Match) of
+ [Shard] ->
+ {Shard, Seq};
+ [] ->
+ {Match, Seq} % will be replaced in find_replacement_shards
+ end
+ end, binary_to_term(couch_util:decodeBase64Url(Opaque))).
+
+changes_row(#change{key=Seq, id=Id, value=Value, deleted=true, doc=Doc}, true) ->
+ {change, {[{seq,Seq}, {id,Id}, {changes,Value}, {deleted, true}, {doc, Doc}]}};
+changes_row(#change{key=Seq, id=Id, value=Value, deleted=true}, false) ->
+ {change, {[{seq,Seq}, {id,Id}, {changes,Value}, {deleted, true}]}};
+changes_row(#change{key=Seq, id=Id, value=Value, doc={error,Reason}}, true) ->
+ {change, {[{seq,Seq}, {id,Id}, {changes,Value}, {error,Reason}]}};
+changes_row(#change{key=Seq, id=Id, value=Value, doc=Doc}, true) ->
+ {change, {[{seq,Seq}, {id,Id}, {changes,Value}, {doc,Doc}]}};
+changes_row(#change{key=Seq, id=Id, value=Value}, false) ->
+ {change, {[{seq,Seq}, {id,Id}, {changes,Value}]}}.
+
+find_replacement_shards(#shard{range=Range}, AllShards) ->
+ % TODO make this moar betta -- we might have split or merged the partition
+ [Shard || Shard <- AllShards, Shard#shard.range =:= Range].
+
+validate_start_seq(DbName, Seq) ->
+ try unpack_seqs(Seq, DbName) of _Any ->
+ ok
+ catch _:_ ->
+ Reason = <<"Malformed sequence supplied in 'since' parameter.">>,
+ {error, {bad_request, Reason}}
+ end.
+
+unpack_seqs_test() ->
+ ets:new(partitions, [named_table]),
+
+ % BigCouch 0.3 style.
+ assert_shards("23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+ "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+ "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"),
+
+ % BigCouch 0.4 style.
+ assert_shards([23423,<<"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+ "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+ "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA">>]),
+
+ % BigCouch 0.4 style (as string).
+ assert_shards("[23423,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+ "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+ "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+
+ % with internal hypen
+ assert_shards("651-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
+ "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
+ "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"),
+ assert_shards([651,"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
+ "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
+ "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"]),
+
+ ets:delete(partitions).
+
+assert_shards(Packed) ->
+ ?assertMatch([{#shard{},_}|_], unpack_seqs(Packed, <<"foo">>)).
diff --git a/deps/fabric/src/fabric_view_map.erl b/deps/fabric/src/fabric_view_map.erl
new file mode 100644
index 00000000..96741a8e
--- /dev/null
+++ b/deps/fabric/src/fabric_view_map.erl
@@ -0,0 +1,151 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_view_map).
+
+-export([go/6]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+go(DbName, GroupId, View, Args, Callback, Acc0) when is_binary(GroupId) ->
+ {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
+ go(DbName, DDoc, View, Args, Callback, Acc0);
+
+go(DbName, DDoc, View, Args, Callback, Acc0) ->
+ Shards = fabric_view:get_shards(DbName, Args),
+ Workers = fabric_util:submit_jobs(Shards, map_view, [DDoc, View, Args]),
+ BufferSize = couch_config:get("fabric", "map_buffer_size", "2"),
+ #view_query_args{limit = Limit, skip = Skip, keys = Keys} = Args,
+ State = #collector{
+ db_name=DbName,
+ query_args = Args,
+ callback = Callback,
+ buffer_size = list_to_integer(BufferSize),
+ counters = fabric_dict:init(Workers, 0),
+ skip = Skip,
+ limit = Limit,
+ keys = fabric_view:keydict(Keys),
+ sorted = Args#view_query_args.sorted,
+ user_acc = Acc0
+ },
+ RexiMon = fabric_util:create_monitors(Workers),
+ try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
+ State, infinity, 1000 * 60 * 60) of
+ {ok, NewState} ->
+ {ok, NewState#collector.user_acc};
+ {timeout, NewState} ->
+ Callback({error, timeout}, NewState#collector.user_acc);
+ {error, Resp} ->
+ {ok, Resp}
+ after
+ rexi_monitor:stop(RexiMon),
+ fabric_util:cleanup(Workers)
+ end.
+
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
+ fabric_view:remove_down_shards(State, NodeRef);
+
+handle_message({rexi_EXIT, Reason}, Worker, State) ->
+ #collector{callback=Callback, counters=Counters0, user_acc=Acc} = State,
+ Counters = fabric_dict:erase(Worker, Counters0),
+ case fabric_view:is_progress_possible(Counters) of
+ true ->
+ {ok, State#collector{counters = Counters}};
+ false ->
+ {ok, Resp} = Callback({error, fabric_util:error_info(Reason)}, Acc),
+ {error, Resp}
+ end;
+
+handle_message({total_and_offset, Tot, Off}, {Worker, From}, State) ->
+ #collector{
+ callback = Callback,
+ counters = Counters0,
+ total_rows = Total0,
+ offset = Offset0,
+ user_acc = AccIn
+ } = State,
+ case fabric_dict:lookup_element(Worker, Counters0) of
+ undefined ->
+ % this worker lost the race with other partition copies, terminate
+ gen_server:reply(From, stop),
+ {ok, State};
+ 0 ->
+ gen_server:reply(From, ok),
+ Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
+ Counters2 = fabric_view:remove_overlapping_shards(Worker, Counters1),
+ Total = Total0 + Tot,
+ Offset = Offset0 + Off,
+ case fabric_dict:any(0, Counters2) of
+ true ->
+ {ok, State#collector{
+ counters = Counters2,
+ total_rows = Total,
+ offset = Offset
+ }};
+ false ->
+ FinalOffset = erlang:min(Total, Offset+State#collector.skip),
+ {Go, Acc} = Callback({total_and_offset, Total, FinalOffset}, AccIn),
+ {Go, State#collector{
+ counters = fabric_dict:decrement_all(Counters2),
+ total_rows = Total,
+ offset = FinalOffset,
+ user_acc = Acc
+ }}
+ end
+ end;
+
+handle_message(#view_row{}, {_, _}, #collector{limit=0} = State) ->
+ #collector{callback=Callback} = State,
+ {_, Acc} = Callback(complete, State#collector.user_acc),
+ {stop, State#collector{user_acc=Acc}};
+
+handle_message(#view_row{} = Row, {_,From}, #collector{sorted=false} = St) ->
+ #collector{callback=Callback, user_acc=AccIn, limit=Limit} = St,
+ {Go, Acc} = Callback(fabric_view:transform_row(Row), AccIn),
+ gen_server:reply(From, ok),
+ {Go, St#collector{user_acc=Acc, limit=Limit-1}};
+
+handle_message(#view_row{} = Row, {Worker, From}, State) ->
+ #collector{
+ query_args = #view_query_args{direction=Dir},
+ counters = Counters0,
+ rows = Rows0,
+ keys = KeyDict
+ } = State,
+ Rows = merge_row(Dir, KeyDict, Row#view_row{worker=Worker}, Rows0),
+ Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
+ State1 = State#collector{rows=Rows, counters=Counters1},
+ State2 = fabric_view:maybe_pause_worker(Worker, From, State1),
+ fabric_view:maybe_send_row(State2);
+
+handle_message(complete, Worker, State) ->
+ Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
+ fabric_view:maybe_send_row(State#collector{counters = Counters}).
+
+merge_row(fwd, undefined, Row, Rows) ->
+ lists:merge(fun(#view_row{key=KeyA, id=IdA}, #view_row{key=KeyB, id=IdB}) ->
+ couch_view:less_json([KeyA, IdA], [KeyB, IdB])
+ end, [Row], Rows);
+merge_row(rev, undefined, Row, Rows) ->
+ lists:merge(fun(#view_row{key=KeyA, id=IdA}, #view_row{key=KeyB, id=IdB}) ->
+ couch_view:less_json([KeyB, IdB], [KeyA, IdA])
+ end, [Row], Rows);
+merge_row(_, KeyDict, Row, Rows) ->
+ lists:merge(fun(#view_row{key=A, id=IdA}, #view_row{key=B, id=IdB}) ->
+ if A =:= B -> IdA < IdB; true ->
+ dict:fetch(A, KeyDict) < dict:fetch(B, KeyDict)
+ end
+ end, [Row], Rows).
diff --git a/deps/fabric/src/fabric_view_reduce.erl b/deps/fabric/src/fabric_view_reduce.erl
new file mode 100644
index 00000000..58438573
--- /dev/null
+++ b/deps/fabric/src/fabric_view_reduce.erl
@@ -0,0 +1,114 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_view_reduce).
+
+-export([go/6]).
+
+-include("fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+go(DbName, GroupId, View, Args, Callback, Acc0) when is_binary(GroupId) ->
+ {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
+ go(DbName, DDoc, View, Args, Callback, Acc0);
+
+go(DbName, DDoc, VName, Args, Callback, Acc0) ->
+ #group{def_lang=Lang, views=Views} = Group =
+ couch_view_group:design_doc_to_view_group(DDoc),
+ {NthRed, View} = fabric_view:extract_view(nil, VName, Views, reduce),
+ {VName, RedSrc} = lists:nth(NthRed, View#view.reduce_funs),
+ Workers = lists:map(fun(#shard{name=Name, node=N} = Shard) ->
+ Ref = rexi:cast(N, {fabric_rpc, reduce_view, [Name,Group,VName,Args]}),
+ Shard#shard{ref = Ref}
+ end, fabric_view:get_shards(DbName, Args)),
+ RexiMon = fabric_util:create_monitors(Workers),
+ BufferSize = couch_config:get("fabric", "reduce_buffer_size", "20"),
+ #view_query_args{limit = Limit, skip = Skip} = Args,
+ State = #collector{
+ db_name = DbName,
+ query_args = Args,
+ callback = Callback,
+ buffer_size = list_to_integer(BufferSize),
+ counters = fabric_dict:init(Workers, 0),
+ keys = Args#view_query_args.keys,
+ skip = Skip,
+ limit = Limit,
+ lang = Group#group.def_lang,
+ os_proc = couch_query_servers:get_os_process(Lang),
+ reducer = RedSrc,
+ rows = dict:new(),
+ user_acc = Acc0
+ },
+ try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
+ State, infinity, 1000 * 60 * 60) of
+ {ok, NewState} ->
+ {ok, NewState#collector.user_acc};
+ {timeout, NewState} ->
+ Callback({error, timeout}, NewState#collector.user_acc);
+ {error, Resp} ->
+ {ok, Resp}
+ after
+ rexi_monitor:stop(RexiMon),
+ fabric_util:cleanup(Workers),
+ catch couch_query_servers:ret_os_process(State#collector.os_proc)
+ end.
+
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
+ fabric_view:remove_down_shards(State, NodeRef);
+
+handle_message({rexi_EXIT, Reason}, Worker, State) ->
+ #collector{callback=Callback, counters=Counters0, user_acc=Acc} = State,
+ Counters = fabric_dict:erase(Worker, Counters0),
+ case fabric_view:is_progress_possible(Counters) of
+ true ->
+ {ok, State#collector{counters = Counters}};
+ false ->
+ {ok, Resp} = Callback({error, fabric_util:error_info(Reason)}, Acc),
+ {error, Resp}
+ end;
+
+handle_message(#view_row{key=Key} = Row, {Worker, From}, State) ->
+ #collector{counters = Counters0, rows = Rows0} = State,
+ case fabric_dict:lookup_element(Worker, Counters0) of
+ undefined ->
+ % this worker lost the race with other partition copies, terminate it
+ gen_server:reply(From, stop),
+ {ok, State};
+ _ ->
+ Rows = dict:append(Key, Row#view_row{worker=Worker}, Rows0),
+ C1 = fabric_dict:update_counter(Worker, 1, Counters0),
+ % TODO time this call, if slow don't do it every time
+ C2 = fabric_view:remove_overlapping_shards(Worker, C1),
+ State1 = State#collector{rows=Rows, counters=C2},
+ State2 = fabric_view:maybe_pause_worker(Worker, From, State1),
+ fabric_view:maybe_send_row(State2)
+ end;
+
+handle_message(complete, Worker, State) ->
+ C1 = fabric_dict:update_counter(Worker, 1, State#collector.counters),
+ C2 = fabric_view:remove_overlapping_shards(Worker, C1),
+ fabric_view:maybe_send_row(State#collector{counters = C2}).
+
+complete_worker_test() ->
+ Shards =
+ mem3_util:create_partition_map("foo",3,3,[node(),node(),node()]),
+ Workers = lists:map(fun(#shard{} = Shard) ->
+ Ref = make_ref(),
+ Shard#shard{ref = Ref}
+ end,
+ Shards),
+ State = #collector{counters=fabric_dict:init(Workers,0)},
+ {ok, NewState} = handle_message(complete, lists:nth(2,Workers), State),
+ ?assertEqual(orddict:size(NewState#collector.counters),length(Workers) - 2).
diff --git a/deps/ibrowse/BSD_LICENSE b/deps/ibrowse/BSD_LICENSE
new file mode 100644
index 00000000..bc06a2c7
--- /dev/null
+++ b/deps/ibrowse/BSD_LICENSE
@@ -0,0 +1,10 @@
+Copyright (c) 2010, Chandrashekhar Mullaparthi
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+ * Neither the name of the T-Mobile nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/ibrowse/LICENSE b/deps/ibrowse/LICENSE
new file mode 100644
index 00000000..c5c0c8d9
--- /dev/null
+++ b/deps/ibrowse/LICENSE
@@ -0,0 +1,9 @@
+ibrowse - a HTTP client written in erlang
+Copyright (C) 2005-2010 Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
+
+This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
diff --git a/deps/ibrowse/Makefile b/deps/ibrowse/Makefile
new file mode 100644
index 00000000..4021c5b9
--- /dev/null
+++ b/deps/ibrowse/Makefile
@@ -0,0 +1,20 @@
+IBROWSE_VSN = $(shell sed -n 's/.*{vsn,.*"\(.*\)"}.*/\1/p' src/ibrowse.app.src)
+
+all:
+ ./rebar compile
+
+clean:
+ ./rebar clean
+
+install: all
+ mkdir -p $(DESTDIR)/lib/ibrowse-$(IBROWSE_VSN)/
+ cp -r ebin $(DESTDIR)/lib/ibrowse-$(IBROWSE_VSN)/
+
+test: all
+ ./rebar eunit
+ (cd test; make)
+ erl -noshell -pa ebin -pa test -s ibrowse -s ibrowse_test unit_tests \
+ -s ibrowse_test verify_chunked_streaming \
+ -s ibrowse_test test_chunked_streaming_once \
+ -s erlang halt
+
diff --git a/deps/ibrowse/README b/deps/ibrowse/README
new file mode 100644
index 00000000..b14822f7
--- /dev/null
+++ b/deps/ibrowse/README
@@ -0,0 +1,531 @@
+ibrowse is a HTTP client. The following are a list of features.
+ - RFC2616 compliant (AFAIK)
+ - supports GET, POST, OPTIONS, HEAD, PUT, DELETE, TRACE,
+ MKCOL, PROPFIND, PROPPATCH, LOCK, UNLOCK, MOVE and COPY
+ - Understands HTTP/0.9, HTTP/1.0 and HTTP/1.1
+ - Understands chunked encoding
+ - Can generate requests using Chunked Transfer-Encoding
+ - Pools of connections to each webserver
+ - Pipelining support
+ - Download to file
+ - Asynchronous requests. Responses are streamed to a process
+ - Basic authentication
+ - Supports proxy authentication
+ - Can talk to Secure webservers using SSL
+ - any other features in the code not listed here :)
+
+ibrowse is available under two different licenses. LGPL or the BSD license.
+
+Comments to : Chandrashekhar.Mullaparthi@gmail.com
+
+Version : 2.2.0
+
+Latest version : git://github.com/cmullaparthi/ibrowse.git
+
+CONTRIBUTORS
+============
+The following people have helped maked ibrowse better by reporting bugs,
+supplying patches and also asking for new features. Please write to me if you
+have contributed and I've missed you out.
+
+In alphabetical order:
+
+Adam Kocoloski
+Andrew Tunnell-Jones
+Anthony Molinaro
+Benoit Chesneau
+Chris Newcombe
+Dan Kelley
+Derek Upham
+Eric Merritt
+Erik Reitsma
+Filipe David Manana
+Geoff Cant
+Jeroen Koops
+João Lopes
+Joseph Wayne Norton
+Karol Skocik
+Kostis Sagonas
+Matthew Reilly
+Oscar Hellström
+Paul J. Davis
+Peter Kristensen
+Ram Krishnan
+Richard Cameron
+Ryan Zezeski
+Sean Hinde
+Seth Falcon
+Steve Vinoski
+Thomas Lindgren
+Younès Hafri
+fholzhauser (https://github.com/fholzhauser/)
+tholschuh (https://github.com/tholschuh/)
+
+CONTRIBUTIONS & CHANGE HISTORY
+==============================
+13-04-2011 - v2.2.0
+ * Filipe David Manana added IPv6 support. This is a mjor new
+ feature, Thank you Filipe!
+ * Joseph Wayne Norton contributed tweaks to .gitignore
+
+09-02-2011 - v2.1.4
+ * Fixed a bug reported by Ryan Zezeski with the
+ save_response_to_file option.
+ https://github.com/cmullaparthi/ibrowse/issues#issue/33
+
+16-01-2011 - v2.1.3
+ * Fixed issues with streaming and chunked responses when using
+ the 'caller controls socket' feature. See following links for
+ details. Contributed by Filipe David Manana.
+ https://github.com/cmullaparthi/ibrowse/pull/24
+ https://github.com/cmullaparthi/ibrowse/pull/25
+ https://github.com/cmullaparthi/ibrowse/pull/27
+ https://github.com/cmullaparthi/ibrowse/pull/28
+ https://github.com/cmullaparthi/ibrowse/pull/29
+
+ * Fix for issue 32 reported by fholzhauser
+ https://github.com/cmullaparthi/ibrowse/issues#issue/32
+
+ * Fixed some dialyzer warnings. Thanks to Kostis for reporting
+ them.
+
+20-12-2010 - v2.1.2
+ * Pipelining wasn't working when used in conjunction with the
+ {stream_to, {self(), once}} option. Bug report by
+ Filipe David Manana.
+
+10-12-2010 - v2.1.1
+ * Fix for https://github.com/cmullaparthi/ibrowse/issues/issue/20
+ by Filipe David Manana
+
+ * Fix for https://github.com/cmullaparthi/ibrowse/issues/issue/21
+ by Filipe David Manana
+
+ * Fix for https://github.com/cmullaparthi/ibrowse/issues/issue/23
+ by Filipe David Manana
+
+ * Fix for bugs when using SSL by João Lopes
+
+25-10-2010 - v2.1.0
+ * Fixed build on OpenSolaris. Bug report and patch from
+ tholschuh.
+ http://github.com/cmullaparthi/ibrowse/issues/issue/10
+
+ * Fixed behaviour of inactivity_timeout option. Reported by
+ João Lopes.
+ http://github.com/cmullaparthi/ibrowse/issues/issue/11
+
+ * Prevent atom table pollution when bogus URLs are input to
+ ibrowse. Bug report by João Lopes.
+ http://github.com/cmullaparthi/ibrowse/issues/issue/13
+
+ * Automatically do Chunked-Transfer encoding of request body
+ when the body is generated by a fun. Patch provided by
+ Filipe David Manana.
+ http://github.com/cmullaparthi/ibrowse/issues/issue/14
+
+ * Depending on input options, ibrowse sometimes included multiple
+ Content-Length headers. Bug reported by Paul J. Davis
+ http://github.com/cmullaparthi/ibrowse/issues/issue/15
+
+ * Deal with webservers which do not provide a Reason-Phrase on the
+ response Status-Line. Patch provided by Jeroen Koops.
+ http://github.com/cmullaparthi/ibrowse/issues/issue/16
+
+ * Fixed http://github.com/cmullaparthi/ibrowse/issues/issue/17
+ This was reported by Filipe David Manana.
+
+ * Fixed http://github.com/cmullaparthi/ibrowse/issues/issue/19
+ This was reported by Dan Kelley and Filipe David Manana.
+
+ * Added ibrowse:stream_close/1 to close the connection
+ associated with a certain response stream. Patch provided by
+ João Lopes.
+
+ * Prevent port number being included in the Host header when port
+ 443 is intended. Bug reported by Andrew Tunnell-Jones
+
+24-09-2010 - v2.0.1
+ * Removed a spurious io:format statement
+
+22-09-2010 - v2.0.0.
+
+ * Added option preserve_chunked_encoding. This allows the
+ caller to get the raw HTTP response when the
+ Transfer-Encoding is Chunked. This feature was requested
+ by Benoit Chesneau who wanted to write a HTTP proxy using
+ ibrowse.
+
+ * Fixed bug with the {stream_to, {Pid, once}} option. Bug
+ report and lot of help from Filipe David Manana. Thank
+ you Filipe.
+
+ * The {error, conn_failed} and {error, send_failed} return
+ values are now of the form {error, {conn_failed, Err}}
+ and {error, {send_failed, Err}}. This is so that the
+ specific socket error can be returned to the caller. I
+ think it looks a bit ugly, but that is the best
+ compromise I could come up with.
+
+ * Added application configuration parameters
+ default_max_sessions and default_max_pipeline_size. These
+ were previously hard coded to 10.
+
+ * Versioning of ibrowse now follows the Semantic Versioning
+ principles. See http://semver.org. Thanks to Anthony
+ Molinaro for nudging me in this direction.
+
+ * The connect_timeout option now only applies to the
+ connection setup phase. In previous versions, the time
+ taken to setup the connection was deducted from the
+ specified timeout value for the request.
+
+17-07-2010 - * Merged change made by Filipe David Manana to use the base64
+ module for encoding/decoding.
+
+11-06-2010 - * Removed use of deprecated concat_binary. Patch supplied by
+ Steve Vinoski
+
+10-06-2010 - * Fixed bug in https requests not going via the proxy
+
+12-05-2010 - * Added support for the CONNECT method to tunnel HTTPS through
+ a proxy. When a https URL is requested through a proxy,
+ ibrowse will automatically use the CONNECT method to first
+ setup a tunnel through the proxy. Once this succeeds, the
+ actual request is dispatched. Successfully tested with the
+ new SSL implementation in R13B-03
+ * Added SSL support for direct connections.
+ See ibrowse:spawn_worker_process/1 and
+ ibrowse:spawn_link_worker_process/1
+ * Added option to return raw status line and raw unparsed headers
+
+23-04-2010 - * Fixes to URL parsing by Karol Skocik
+
+08-11-2009 - * Added option headers_as_is
+
+04-10-2009 - * Patch from Kostis Sagonas to cleanup some code and suppress
+ dialyzer warnings
+
+24-09-2009 - * When a filename was supplied with the 'save_response_to_file'
+ option, the option was being ignored. Bug report from
+ Adam Kocoloski
+
+05-09-2009 - * Introduced option to allow caller to set socket options.
+
+29-07-2009 - * The ETS table created for load balancing of requests was not
+ being deleted which led to the node not being able to create
+ any more ETS tables if queries were made to many number of
+ webservers. ibrowse now deletes the ETS table it creates once the
+ last connection to a webserver is dropped.
+ Reported by Seth Falcon.
+ * Spurious data being returned at end of body in certain cases of
+ chunked encoded responses from the server.
+ Reported by Chris Newcombe.
+
+03-07-2009 - Added option {stream_to, {Pid, once}} which allows the caller
+ to control when it wants to receive more data. If this option
+ is used, the call ibrowse:stream_next(Req_id) should be used
+ to get more data.
+ - Patch submitted by Steve Vinoski to remove compiler warnings
+ about the use of obsolete guards
+
+29-06-2009 - * Fixed following issues reported by Oscar Hellström
+ - Use {active, once} instead of {active, true}
+ - Fix 'dodgy' timeout handling
+ - Use binaries internally instead of lists to reduce memory
+ consumption on 64 bit platforms. The default response format
+ is still 'list' to maintain backwards compatibility. Use the
+ option {response_format, binary} to get responses as binaries.
+ * Fixed chunking bug (reported by Adam Kocoloski)
+ * Added new option {inactivity_timeout, Milliseconds} to timeout
+ requests if no data is received on the link for the specified
+ interval. Useful when responses are large and links are flaky.
+ * Added ibrowse:all_trace_off/0 to turn off all tracing
+ * Change to the way responses to asynchronous requests are
+ returned. The following messages have been removed.
+ * {ibrowse_async_response, Req_id, {chunk_start, Chunk_size}}
+ * {ibrowse_async_response, Req_id, chunk_end}
+ * Fixed Makefiles as part of Debian packaging
+ (thanks to Thomas Lindgren)
+ * Moved repository from Sourceforge to Github
+
+11-06-2009 - * Added option to control size of streamed chunks. Also added
+ option for the client to receive responses in binary format.
+
+21-05-2008 - * Fixed bug in reading some options from the ibrowse.conf file.
+ Reported by Erik Reitsma on the erlyaws mailing list
+ * Fixed bug when cleaning up closing connections
+
+27-03-2008 - * Major rewrite of the load balancing feature. Additional module,
+ ibrowse_lb.erl, introduced to achieve this.
+ * Can now get a handle to a connection process which is not part of
+ the load balancing pool. Useful when an application is making
+ requests to a webserver which are time consuming (such as
+ uploading a large file). Such requests can be put on a separate
+ connection, and all other smaller/quicker requests can use the
+ load balancing pool. See ibrowse:spawn_worker_process/2 and
+ ibrowse:spawn_link_worker_process/2
+ * Ram Krishnan sent a patch to enable a client to send a lot of
+ data in a request by providing a fun which is invoked by the
+ connection handling process. This fun can fetch the data from
+ any where. This is useful when trying to upload a large file
+ to a webserver.
+ * Use the TCP_NODELAY option on every socket by default
+ * Rudimentary support for load testing of ibrowse. Undocumented,
+ but see ibrowse_test:load_test/3. Use the source, Luke!
+ * New function ibrowse:show_dest_status/2 to view state of
+ connections/pipelines to a web server
+
+20-02-2008 - Ram Krishnan sent another patch for another hidden bug in the
+ save_response_to_file feature.
+
+07-02-2008 - Ram Krishnan (kriyative _at_ gmail dot com) sent a simple patch to
+ enable specifying the filename in the save_response_to_file option.
+ When testing the patch, I realised that my original implementation
+ of this feature was quite flaky and a lot of corner cases were
+ not covered. Fixed all of them. Thanks Ram!
+
+17-10-2007 - Matthew Reilly (matthew dot reilly _at_ sipphone dot com)
+ sent a bug report and a fix. If the chunk trailer spans two TCP
+ packets, then ibrowse fails to recognise that the chunked transfer
+ has ended.
+
+29-08-2007 - Bug report by Peter Kristensen(ptx _at_ daimi dot au dot dk).
+ ibrowse crashes when the webserver returns just the Status line
+ and nothing else.
+
+28-06-2007 - Added host_header option to enable connection to secure sites
+ via stunnel
+
+20-04-2007 - Geoff Cant sent a patch to remove URL encoding for digits in
+ ibrowse_lib:url_encode/1.
+ ibrowse had a dependency on the inets application because the
+ ibrowse_http_client.erl invoked httpd_util:encode_base64/1. This
+ dependency is now removed and the encode_base64/1 has been
+ implemented in ibrowse_lib.erl
+
+06-03-2007 - Eric Merritt sent a patch to support WebDAV requests.
+
+12-01-2007 - Derek Upham sent in a bug fix. The reset_state function was not
+ behaving correctly when the transfer encoding was not chunked.
+
+13-11-2006 - Youns Hafri reported a bug where ibrowse was not returning the
+ temporary filename when the server was closing the connection
+ after sending the data (as in HTTP/1.0).
+ Released ibrowse under the BSD license
+
+12-10-2006 - Chris Newcombe reported bug in dealing with requests where no
+ body is expected in the response. The first request would succeed
+ and the next request would hang.
+
+24-May-2006 - Sean Hinde reported a bug. Async responses with pipelining was
+ returning the wrong result.
+
+08-Dec-2005 - Richard Cameron (camster@citeulike.org). Patch to ibrowse to
+ prevent port number being included in the Host header when port
+ 80 is intended.
+
+22-Nov-2005 - Added ability to generate requests using the Chunked
+ Transfer-Encoding.
+
+08-May-2005 - Youns Hafri made a CRUX LINUX port of ibrowse.
+ http://yhafri.club.fr/crux/index.html
+
+Here are some usage examples. Enjoy!
+
+5> ibrowse:start().
+{ok,<0.94.0>}
+
+%% A simple GET
+6> ibrowse:send_req("http://intranet/messenger/", [], get).
+{ok,"200",
+ [{"Server","Microsoft-IIS/5.0"},
+ {"Content-Location","http://intranet/messenger/index.html"},
+ {"Date","Fri, 17 Dec 2004 15:16:19 GMT"},
+ {"Content-Type","text/html"},
+ {"Accept-Ranges","bytes"},
+ {"Last-Modified","Fri, 17 Dec 2004 08:38:21 GMT"},
+ {"Etag","\"aa7c9dc313e4c41:d77\""},
+ {"Content-Length","953"}],
+ "<html>\r\n\r\n<head>\r\n<title>Messenger</title>\r\n<meta name=\"GENERATOR\" content=\"Microsoft FrontPage 5.0\">\r\n<meta name=\"ProgId\" content=\"FrontPage.Editor.Document\">\r\n<meta name=\"description\" content=\"Messenger Home Page\">\r\n</head>\r\n\r\n<frameset border=\"0\" frameborder=\"0\" rows=\"60,*\">\r\n <frame src=\"/messenger/images/topnav.html\" name=\"mFrameTopNav\" scrolling=\"NO\" target=\"mFrameMain\">\r\n <frameset cols=\"18%,*\">\r\n <frameset rows=\"*,120\">\r\n <frame src=\"index-toc.html\" name=\"mFrameTOC\" target=\"mFrameMain\" scrolling=\"auto\" noresize=\"true\">\r\n <frame src=\"/shared/search/namesearch.html\" name=\"mFrameNameSearch\" scrolling=\"NO\" target=\"mFrameMain\">\r\n </frameset>\r\n <frame src=\"home/16-12-04-xmascardsmms.htm\" name=\"mFrameMain\" scrolling=\"auto\" target=\"mFrameMain\" id=\"mFrameMain\">\r\n </frameset>\r\n <noframes>\r\n <body>\r\n\r\n <p><i>This site requires a browser that can view frames.</i></p>\r\n\r\n </body>\r\n </noframes>\r\n</frameset>\r\n\r\n</html>"}
+
+%% =============================================================================
+%% A GET using a proxy
+7> ibrowse:send_req("http://www.google.com/", [], get, [],
+ [{proxy_user, "XXXXX"},
+ {proxy_password, "XXXXX"},
+ {proxy_host, "proxy"},
+ {proxy_port, 8080}], 1000).
+{ok,"302",
+ [{"Date","Fri, 17 Dec 2004 15:22:56 GMT"},
+ {"Content-Length","217"},
+ {"Content-Type","text/html"},
+ {"Set-Cookie",
+ "PREF=ID=f58155c797f96096:CR=1:TM=1103296999:LM=1103296999:S=FiWdtAqQvhQ0TvHq; expires=Sun, 17-Jan-2038 19:14:07 GMT; path=/; domain=.google.com"},
+ {"Server","GWS/2.1"},
+ {"Location",
+ "http://www.google.co.uk/cxfer?c=PREF%3D:TM%3D1103296999:S%3Do8bEY2FIHwdyGenS&prev=/"},
+ {"Via","1.1 netapp01 (NetCache NetApp/5.5R2)"}],
+ "<HTML><HEAD><TITLE>302 Moved</TITLE></HEAD><BODY>\n<H1>302 Moved</H1>\nThe document has moved\n<A HREF=\"http://www.google.co.uk/cxfer?c=PREF%3D:TM%3D1103296999:S%3Do8bEY2FIHwdyGenS&amp;prev=/\">here</A>.\r\n</BODY></HTML>\r\n"}
+
+%% =============================================================================
+%% A GET response saved to file. A temporary file is created and the
+%% filename returned. The response will only be saved to file is the
+%% status code is in the 200 range. The directory to download to can
+%% be set using the application env var 'download_dir' - the default
+%% is the current working directory.
+8> ibrowse:send_req("http://www.erlang.se/", [], get, [],
+ [{proxy_user, "XXXXX"},
+ {proxy_password, "XXXXX"},
+ {proxy_host, "proxy"},
+ {proxy_port, 8080},
+ {save_response_to_file, true}], 1000).
+{error,req_timedout}
+
+%% =============================================================================
+9> ibrowse:send_req("http://www.erlang.se/", [], get, [],
+ [{proxy_user, "XXXXX"},
+ {proxy_password, "XXXXX"},
+ {proxy_host, "proxy"},
+ {proxy_port, 8080},
+ {save_response_to_file, true}], 5000).
+{ok,"200",
+ [{"Transfer-Encoding","chunked"},
+ {"Date","Fri, 17 Dec 2004 15:24:36 GMT"},
+ {"Content-Type","text/html"},
+ {"Server","Apache/1.3.9 (Unix)"},
+ {"Via","1.1 netapp01 (NetCache NetApp/5.5R2)"}],
+ {file,"/Users/chandru/code/ibrowse/src/ibrowse_tmp_file_1103297041125854"}}
+
+%% =============================================================================
+%% Setting size of connection pool and pipeline size. This sets the
+%% number of maximum connections to this server to 10 and the pipeline
+%% size to 1. Connections are setup a required.
+11> ibrowse:set_dest("www.hotmail.com", 80, [{max_sessions, 10},
+ {max_pipeline_size, 1}]).
+ok
+
+%% =============================================================================
+%% Example using the HEAD method
+56> ibrowse:send_req("http://www.erlang.org", [], head).
+{ok,"200",
+ [{"Date","Mon, 28 Feb 2005 04:40:53 GMT"},
+ {"Server","Apache/1.3.9 (Unix)"},
+ {"Last-Modified","Thu, 10 Feb 2005 09:31:23 GMT"},
+ {"Etag","\"8d71d-1efa-420b29eb\""},
+ {"Accept-ranges","bytes"},
+ {"Content-Length","7930"},
+ {"Content-Type","text/html"}],
+ []}
+
+%% =============================================================================
+%% Example using the OPTIONS method
+62> ibrowse:send_req("http://www.sun.com", [], options).
+{ok,"200",
+ [{"Server","Sun Java System Web Server 6.1"},
+ {"Date","Mon, 28 Feb 2005 04:44:39 GMT"},
+ {"Content-Length","0"},
+ {"P3p",
+ "policyref=\"http://www.sun.com/p3p/Sun_P3P_Policy.xml\", CP=\"CAO DSP COR CUR ADMa DEVa TAIa PSAa PSDa CONi TELi OUR SAMi PUBi IND PHY ONL PUR COM NAV INT DEM CNT STA POL PRE GOV\""},
+ {"Set-Cookie",
+ "SUN_ID=X.X.X.X:169191109565879; EXPIRES=Wednesday, 31-Dec-2025 23:59:59 GMT; DOMAIN=.sun.com; PATH=/"},
+ {"Allow",
+ "HEAD, GET, PUT, POST, DELETE, TRACE, OPTIONS, MOVE, INDEX, MKDIR, RMDIR"}],
+ []}
+
+%% =============================================================================
+%% Example of using Asynchronous requests
+18> ibrowse:send_req("http://www.google.com", [], get, [],
+ [{proxy_user, "XXXXX"},
+ {proxy_password, "XXXXX"},
+ {proxy_host, "proxy"},
+ {proxy_port, 8080},
+ {stream_to, self()}]).
+{ibrowse_req_id,{1115,327256,389608}}
+19> flush().
+Shell got {ibrowse_async_headers,{1115,327256,389608},
+ "302",
+ [{"Date","Thu, 05 May 2005 21:06:41 GMT"},
+ {"Content-Length","217"},
+ {"Content-Type","text/html"},
+ {"Set-Cookie",
+ "PREF=ID=b601f16bfa32f071:CR=1:TM=1115327201:LM=1115327201:S=OX5hSB525AMjUUu7; expires=Sun, 17-Jan-2038 19:14:07 GMT; path=/; domain=.google.com"},
+ {"Server","GWS/2.1"},
+ {"Location",
+ "http://www.google.co.uk/cxfer?c=PREF%3D:TM%3D1115327201:S%3DDS9pDJ4IHcAuZ_AS&prev=/"},
+ {"Via",
+ "1.1 hatproxy01 (NetCache NetApp/5.6.2)"}]}
+Shell got {ibrowse_async_response,{1115,327256,389608},
+ "<HTML><HEAD><TITLE>302 Moved</TITLE></HEAD><BODY>\n<H1>302 Moved</H1>\nThe document has moved\n<A HREF=\"http://www.google.co.uk/cxfer?c=PREF%3D:TM%3D1115327201:S%3DDS9pDJ4IHcAuZ_AS&amp;prev=/\">here</A>.\r\n</BODY></HTML>\r\n"}
+Shell got {ibrowse_async_response_end,{1115,327256,389608}}
+ok
+
+%% =============================================================================
+%% Another example of using async requests
+24> ibrowse:send_req("http://yaws.hyber.org/simple_ex2.yaws", [], get, [],
+ [{proxy_user, "XXXXX"},
+ {proxy_password, "XXXXX"},
+ {proxy_host, "proxy"},
+ {proxy_port, 8080},
+ {stream_to, self()}]).
+{ibrowse_req_id,{1115,327430,512314}}
+25> flush().
+Shell got {ibrowse_async_headers,{1115,327430,512314},
+ "200",
+ [{"Date","Thu, 05 May 2005 20:58:08 GMT"},
+ {"Content-Length","64"},
+ {"Content-Type","text/html;charset="},
+ {"Server",
+ "Yaws/1.54 Yet Another Web Server"},
+ {"Via",
+ "1.1 hatproxy01 (NetCache NetApp/5.6.2)"}]}
+Shell got {ibrowse_async_response,{1115,327430,512314},
+ "<html>\n\n\n<h1> Yesssssss </h1>\n\n<h2> Hello again </h2>\n\n\n</html>\n"}
+Shell got {ibrowse_async_response_end,{1115,327430,512314}}
+
+%% =============================================================================
+%% Example of request which fails when using the async option. Here
+%% the {ibrowse_req_id, ReqId} is not returned. Instead the error code is
+%% returned.
+68> ibrowse:send_req("http://www.earlyriser.org", [], get, [], [{stream_to, self()}]).
+{error,conn_failed}
+
+%% Example of request using both Proxy-Authorization and authorization by the final webserver.
+17> ibrowse:send_req("http://www.erlang.se/lic_area/protected/patches/erl_756_otp_beam.README",
+ [], get, [],
+ [{proxy_user, "XXXXX"},
+ {proxy_password, "XXXXX"},
+ {proxy_host, "proxy"},
+ {proxy_port, 8080},
+ {basic_auth, {"XXXXX", "XXXXXX"}}]).
+{ok,"200",
+ [{"Accept-Ranges","bytes"},
+ {"Date","Thu, 05 May 2005 21:02:09 GMT"},
+ {"Content-Length","2088"},
+ {"Content-Type","text/plain"},
+ {"Server","Apache/1.3.9 (Unix)"},
+ {"Last-Modified","Tue, 03 May 2005 15:08:18 GMT"},
+ {"ETag","\"1384c8-828-427793e2\""},
+ {"Via","1.1 hatproxy01 (NetCache NetApp/5.6.2)"}],
+ "Patch Id:\t\terl_756_otp_beam\nLabel:\t\t\tinets patch\nDate:\t\t\t2005-05-03\nTrouble Report Id:\tOTP-5513, OTP-5514, OTP-5516, OTP-5517, OTP-5521, OTP-5537\nSeq num:\t\tseq9806\nSystem:\t\t\totp\nRelease:\t\tR10B\nOperating System:\tall\nArchitecture:\t\tall\nErlang machine:\t\tBEAM\nApplication:\t\tinets-4.4\nFiles:\t\t\tall\n\nDescription:\n\n OTP-5513 The server did not handle HTTP-0.9 messages with an implicit\n\t version.\n\n OTP-5514 An internal server timeout killed the request handling\n\t process without sending a message back to the client. As this\n\t timeout only affects a single request it has been set to\n\t infinity (if the main server process dies the request\n\t handling process will also die and the client will receive an\n\t error). This might make a client that does not use a timeout\n\t hang for a longer period of time, but that is an expected\n\t behavior!\n\n OTP-5516 That a third party closes the http servers accept socket is\n\t recoverable for inets, hence intes will only produce an info\n\t report as there was no error in inets but measures where\n\t taken to avoid failure due to errors elsewhere.\n\n OTP-5517 The HTTP client proxy settings where ignored. Bug introduced\n\t in inets-4.3.\n\n OTP-5521 Inets only sent the \"WWW-Authenticate\" header at the first\n\t attempt to get a page, if the user supplied the wrong\n\t user/password combination the header was not sent again. This\n\t forces the user to kill the browser entirely after a failed\n\t login attempt, before the user may try to login again. Inets\n\t now always send the authentication header.\n\n OTP-5537 A major rewrite of big parts of the HTTP server code was\n\t performed. There where many things that did not work\n\t satisfactory. Cgi script handling can never have worked\n\t properly and the cases when it did sort of work, a big\n\t unnecessary delay was enforced. Headers where not always\n\t treated as expected and HTTP version handling did not work,\n\t all responses where sent as version HTTP/1.1 no matter what.\n\n\n"}
+
+%% =============================================================================
+%% Example of a TRACE request. Very interesting! yaws.hyber.org didn't
+%% support this. Nor did www.google.com. But good old BBC supports
+%% this.
+35> 37> ibrowse:send_req("http://www.bbc.co.uk/", [], trace, [],
+ [{proxy_user, "XXXXX"},
+ {proxy_password, "XXXXX"},
+ {proxy_host, "proxy"},
+ {proxy_port, 8080}]).
+{ok,"200",
+ [{"Transfer-Encoding","chunked"},
+ {"Date","Thu, 05 May 2005 21:40:27 GMT"},
+ {"Content-Type","message/http"},
+ {"Server","Apache/2.0.51 (Unix)"},
+ {"Set-Cookie",
+ "BBC-UID=7452e72a29424c5b0b232c7131c7d9395d209b7170e8604072e0fcb3630467300; expires=Mon, 04-May-09 21:40:27 GMT; path=/; domain=bbc.co.uk;"},
+ {"Set-Cookie",
+ "BBC-UID=7452e72a29424c5b0b232c7131c7d9395d209b7170e8604072e0fcb3630467300; expires=Mon, 04-May-09 21:40:27 GMT; path=/; domain=bbc.co.uk;"},
+ {"Via","1.1 hatproxy01 (NetCache NetApp/5.6.2)"}],
+ "TRACE / HTTP/1.1\r\nHost: www.bbc.co.uk\r\nConnection: keep-alive\r\nX-Forwarded-For: 172.24.28.29\r\nVia: 1.1 hatproxy01 (NetCache NetApp/5.6.2)\r\nCookie: BBC-UID=7452e72a29424c5b0b232c7131c7d9395d209b7170e8604072e0fcb3630467300\r\n\r\n"}
diff --git a/deps/ibrowse/doc/ibrowse.html b/deps/ibrowse/doc/ibrowse.html
new file mode 100644
index 00000000..1594d741
--- /dev/null
+++ b/deps/ibrowse/doc/ibrowse.html
@@ -0,0 +1,472 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+<title>Module ibrowse</title>
+<link rel="stylesheet" type="text/css" href="stylesheet.css" title="EDoc">
+</head>
+<body bgcolor="white">
+<div class="navbar"><a name="#navbar_top"></a><table width="100%" border="0" cellspacing="0" cellpadding="2" summary="navigation bar"><tr><td><a href="overview-summary.html" target="overviewFrame">Overview</a></td><td><a href="http://www.erlang.org/"><img src="erlang.png" align="right" border="0" alt="erlang logo"></a></td></tr></table></div>
+<hr>
+
+<h1>Module ibrowse</h1>
+<ul class="index"><li><a href="#description">Description</a></li><li><a href="#index">Function Index</a></li><li><a href="#functions">Function Details</a></li></ul>The ibrowse application implements an HTTP 1.1 client in erlang.
+<p>Copyright © 2005-2010 Chandrashekhar Mullaparthi</p>
+
+<p><b>Version:</b> 2.1.2</p>
+<p><b>Behaviours:</b> <a href="gen_server.html"><tt>gen_server</tt></a>.</p>
+<p><b>Authors:</b> Chandrashekhar Mullaparthi (<a href="mailto:chandrashekhar dot mullaparthi at gmail dot com"><tt>chandrashekhar dot mullaparthi at gmail dot com</tt></a>).</p>
+
+<h2><a name="description">Description</a></h2><p>The ibrowse application implements an HTTP 1.1 client in erlang. This
+module implements the API of the HTTP client. There is one named
+process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
+one process to handle one TCP connection to a webserver
+(implemented in the module ibrowse_http_client). Multiple connections to a
+webserver are setup based on the settings for each webserver. The
+ibrowse process also determines which connection to pipeline a
+certain request on. The functions to call are send_req/3,
+send_req/4, send_req/5, send_req/6.</p>
+
+ <p>Here are a few sample invocations.</p>
+
+ <code>
+ ibrowse:send_req("http://intranet/messenger/", [], get).
+ <br><br>
+
+ ibrowse:send_req("http://www.google.com/", [], get, [],
+ [{proxy_user, "XXXXX"},
+ {proxy_password, "XXXXX"},
+ {proxy_host, "proxy"},
+ {proxy_port, 8080}], 1000).
+ <br><br>
+
+ ibrowse:send_req("http://www.erlang.org/download/otp_src_R10B-3.tar.gz", [], get, [],
+ [{proxy_user, "XXXXX"},
+ {proxy_password, "XXXXX"},
+ {proxy_host, "proxy"},
+ {proxy_port, 8080},
+ {save_response_to_file, true}], 1000).
+ <br><br>
+
+ ibrowse:send_req("http://www.erlang.org", [], head).
+
+ <br><br>
+ ibrowse:send_req("http://www.sun.com", [], options).
+
+ <br><br>
+ ibrowse:send_req("http://www.bbc.co.uk", [], trace).
+
+ <br><br>
+ ibrowse:send_req("http://www.google.com", [], get, [],
+ [{stream_to, self()}]).
+ </code>
+
+<h2><a name="index">Function Index</a></h2>
+<table width="100%" border="1" cellspacing="0" cellpadding="2" summary="function index"><tr><td valign="top"><a href="#all_trace_off-0">all_trace_off/0</a></td><td>Turn Off ALL tracing.</td></tr>
+<tr><td valign="top"><a href="#code_change-3">code_change/3</a></td><td></td></tr>
+<tr><td valign="top"><a href="#get_config_value-1">get_config_value/1</a></td><td>Internal export.</td></tr>
+<tr><td valign="top"><a href="#get_config_value-2">get_config_value/2</a></td><td>Internal export.</td></tr>
+<tr><td valign="top"><a href="#handle_call-3">handle_call/3</a></td><td></td></tr>
+<tr><td valign="top"><a href="#handle_cast-2">handle_cast/2</a></td><td></td></tr>
+<tr><td valign="top"><a href="#handle_info-2">handle_info/2</a></td><td></td></tr>
+<tr><td valign="top"><a href="#init-1">init/1</a></td><td></td></tr>
+<tr><td valign="top"><a href="#rescan_config-0">rescan_config/0</a></td><td>Clear current configuration for ibrowse and load from the file
+ ibrowse.conf in the IBROWSE_EBIN/../priv directory.</td></tr>
+<tr><td valign="top"><a href="#rescan_config-1">rescan_config/1</a></td><td></td></tr>
+<tr><td valign="top"><a href="#send_req-3">send_req/3</a></td><td>This is the basic function to send a HTTP request.</td></tr>
+<tr><td valign="top"><a href="#send_req-4">send_req/4</a></td><td>Same as send_req/3.</td></tr>
+<tr><td valign="top"><a href="#send_req-5">send_req/5</a></td><td>Same as send_req/4.</td></tr>
+<tr><td valign="top"><a href="#send_req-6">send_req/6</a></td><td>Same as send_req/5.</td></tr>
+<tr><td valign="top"><a href="#send_req_direct-4">send_req_direct/4</a></td><td>Same as send_req/3 except that the first argument is the PID
+ returned by spawn_worker_process/2 or spawn_link_worker_process/2.</td></tr>
+<tr><td valign="top"><a href="#send_req_direct-5">send_req_direct/5</a></td><td>Same as send_req/4 except that the first argument is the PID
+ returned by spawn_worker_process/2 or spawn_link_worker_process/2.</td></tr>
+<tr><td valign="top"><a href="#send_req_direct-6">send_req_direct/6</a></td><td>Same as send_req/5 except that the first argument is the PID
+ returned by spawn_worker_process/2 or spawn_link_worker_process/2.</td></tr>
+<tr><td valign="top"><a href="#send_req_direct-7">send_req_direct/7</a></td><td>Same as send_req/6 except that the first argument is the PID
+ returned by spawn_worker_process/2 or spawn_link_worker_process/2.</td></tr>
+<tr><td valign="top"><a href="#set_dest-3">set_dest/3</a></td><td>Deprecated.</td></tr>
+<tr><td valign="top"><a href="#set_max_pipeline_size-3">set_max_pipeline_size/3</a></td><td>Set the maximum pipeline size for each connection to a specific Host:Port.</td></tr>
+<tr><td valign="top"><a href="#set_max_sessions-3">set_max_sessions/3</a></td><td>Set the maximum number of connections allowed to a specific Host:Port.</td></tr>
+<tr><td valign="top"><a href="#show_dest_status-0">show_dest_status/0</a></td><td>Shows some internal information about load balancing.</td></tr>
+<tr><td valign="top"><a href="#show_dest_status-2">show_dest_status/2</a></td><td>Shows some internal information about load balancing to a
+ specified Host:Port.</td></tr>
+<tr><td valign="top"><a href="#spawn_link_worker_process-1">spawn_link_worker_process/1</a></td><td>Same as spawn_worker_process/1 except the the calling process
+ is linked to the worker process which is spawned.</td></tr>
+<tr><td valign="top"><a href="#spawn_link_worker_process-2">spawn_link_worker_process/2</a></td><td>Same as spawn_worker_process/2 except the the calling process
+ is linked to the worker process which is spawned.</td></tr>
+<tr><td valign="top"><a href="#spawn_worker_process-1">spawn_worker_process/1</a></td><td>Creates a HTTP client process to the specified Host:Port which
+ is not part of the load balancing pool.</td></tr>
+<tr><td valign="top"><a href="#spawn_worker_process-2">spawn_worker_process/2</a></td><td>Same as spawn_worker_process/1 but takes as input a Host and Port
+ instead of a URL.</td></tr>
+<tr><td valign="top"><a href="#start-0">start/0</a></td><td>Starts the ibrowse process without linking.</td></tr>
+<tr><td valign="top"><a href="#start_link-0">start_link/0</a></td><td>Starts the ibrowse process linked to the calling process.</td></tr>
+<tr><td valign="top"><a href="#stop-0">stop/0</a></td><td>Stop the ibrowse process.</td></tr>
+<tr><td valign="top"><a href="#stop_worker_process-1">stop_worker_process/1</a></td><td>Terminate a worker process spawned using
+ spawn_worker_process/2 or spawn_link_worker_process/2.</td></tr>
+<tr><td valign="top"><a href="#stream_close-1">stream_close/1</a></td><td>Tell ibrowse to close the connection associated with the
+ specified stream.</td></tr>
+<tr><td valign="top"><a href="#stream_next-1">stream_next/1</a></td><td>Tell ibrowse to stream the next chunk of data to the
+ caller.</td></tr>
+<tr><td valign="top"><a href="#terminate-2">terminate/2</a></td><td></td></tr>
+<tr><td valign="top"><a href="#trace_off-0">trace_off/0</a></td><td>Turn tracing off for the ibrowse process.</td></tr>
+<tr><td valign="top"><a href="#trace_off-2">trace_off/2</a></td><td>Turn tracing OFF for all connections to the specified HTTP
+ server.</td></tr>
+<tr><td valign="top"><a href="#trace_on-0">trace_on/0</a></td><td>Turn tracing on for the ibrowse process.</td></tr>
+<tr><td valign="top"><a href="#trace_on-2">trace_on/2</a></td><td>Turn tracing on for all connections to the specified HTTP
+ server.</td></tr>
+</table>
+
+<h2><a name="functions">Function Details</a></h2>
+
+<h3 class="function"><a name="all_trace_off-0">all_trace_off/0</a></h3>
+<div class="spec">
+<p><tt>all_trace_off() -&gt; ok</tt></p>
+</div><p>Turn Off ALL tracing</p>
+
+<h3 class="function"><a name="code_change-3">code_change/3</a></h3>
+<div class="spec">
+<p><tt>code_change(OldVsn, State, Extra) -&gt; any()</tt></p>
+</div>
+
+<h3 class="function"><a name="get_config_value-1">get_config_value/1</a></h3>
+<div class="spec">
+<p><tt>get_config_value(Key) -&gt; any()</tt></p>
+</div><p>Internal export</p>
+
+<h3 class="function"><a name="get_config_value-2">get_config_value/2</a></h3>
+<div class="spec">
+<p><tt>get_config_value(Key, DefVal) -&gt; any()</tt></p>
+</div><p>Internal export</p>
+
+<h3 class="function"><a name="handle_call-3">handle_call/3</a></h3>
+<div class="spec">
+<p><tt>handle_call(Request, From, State) -&gt; any()</tt></p>
+</div>
+
+<h3 class="function"><a name="handle_cast-2">handle_cast/2</a></h3>
+<div class="spec">
+<p><tt>handle_cast(Msg, State) -&gt; any()</tt></p>
+</div>
+
+<h3 class="function"><a name="handle_info-2">handle_info/2</a></h3>
+<div class="spec">
+<p><tt>handle_info(Info, State) -&gt; any()</tt></p>
+</div>
+
+<h3 class="function"><a name="init-1">init/1</a></h3>
+<div class="spec">
+<p><tt>init(X1) -&gt; any()</tt></p>
+</div>
+
+<h3 class="function"><a name="rescan_config-0">rescan_config/0</a></h3>
+<div class="spec">
+<p><tt>rescan_config() -&gt; any()</tt></p>
+</div><p>Clear current configuration for ibrowse and load from the file
+ ibrowse.conf in the IBROWSE_EBIN/../priv directory. Current
+ configuration is cleared only if the ibrowse.conf file is readable
+ using file:consult/1</p>
+
+<h3 class="function"><a name="rescan_config-1">rescan_config/1</a></h3>
+<div class="spec">
+<p><tt>rescan_config(File) -&gt; any()</tt></p>
+</div>
+
+<h3 class="function"><a name="send_req-3">send_req/3</a></h3>
+<div class="spec">
+<p><tt>send_req(Url::string(), Headers::<a href="#type-headerList">headerList()</a>, Method::<a href="#type-method">method()</a>) -&gt; <a href="#type-response">response()</a></tt>
+<ul class="definitions"><li><tt><a name="type-headerList">headerList()</a> = [{<a href="#type-header">header()</a>, <a href="#type-value">value()</a>}]</tt></li>
+<li><tt><a name="type-header">header()</a> = atom() | string()</tt></li>
+<li><tt><a name="type-value">value()</a> = term()</tt></li>
+<li><tt><a name="type-method">method()</a> = get | post | head | options | put | delete | trace | mkcol | propfind | proppatch | lock | unlock | move | copy</tt></li>
+<li><tt>Status = string()</tt></li>
+<li><tt>ResponseHeaders = [<a href="#type-respHeader">respHeader()</a>]</tt></li>
+<li><tt><a name="type-respHeader">respHeader()</a> = {<a href="#type-headerName">headerName()</a>, <a href="#type-headerValue">headerValue()</a>}</tt></li>
+<li><tt><a name="type-headerName">headerName()</a> = string()</tt></li>
+<li><tt><a name="type-headerValue">headerValue()</a> = string()</tt></li>
+<li><tt><a name="type-response">response()</a> = {ok, Status, ResponseHeaders, ResponseBody} | {ibrowse_req_id, <a href="#type-req_id">req_id()</a>} | {error, Reason}</tt></li>
+<li><tt><a name="type-req_id">req_id()</a> = term()</tt></li>
+<li><tt>ResponseBody = string() | {file, Filename}</tt></li>
+<li><tt>Reason = term()</tt></li>
+</ul></p>
+</div><p>This is the basic function to send a HTTP request.
+ The Status return value indicates the HTTP status code returned by the webserver</p>
+
+<h3 class="function"><a name="send_req-4">send_req/4</a></h3>
+<div class="spec">
+<p><tt>send_req(Url, Headers, Method::<a href="#type-method">method()</a>, Body::<a href="#type-body">body()</a>) -&gt; <a href="#type-response">response()</a></tt>
+<ul class="definitions"><li><tt><a name="type-body">body()</a> = [] | string() | binary() | <a href="#type-fun_arity_0">fun_arity_0()</a> | {<a href="#type-fun_arity_1">fun_arity_1()</a>, <a href="#type-initial_state">initial_state()</a>}</tt></li>
+<li><tt><a name="type-initial_state">initial_state()</a> = term()</tt></li>
+</ul></p>
+</div><p>Same as send_req/3.
+ If a list is specified for the body it has to be a flat list. The body can also be a fun/0 or a fun/1. <br>
+ If fun/0, the connection handling process will repeatdely call the fun until it returns an error or eof. <pre>Fun() = {ok, Data} | eof</pre><br>
+ If fun/1, the connection handling process will repeatedly call the fun with the supplied state until it returns an error or eof. <pre>Fun(State) = {ok, Data} | {ok, Data, NewState} | eof</pre></p>
+
+<h3 class="function"><a name="send_req-5">send_req/5</a></h3>
+<div class="spec">
+<p><tt>send_req(Url::string(), Headers::<a href="#type-headerList">headerList()</a>, Method::<a href="#type-method">method()</a>, Body::<a href="#type-body">body()</a>, Options::<a href="#type-optionList">optionList()</a>) -&gt; <a href="#type-response">response()</a></tt>
+<ul class="definitions"><li><tt><a name="type-optionList">optionList()</a> = [<a href="#type-option">option()</a>]</tt></li>
+<li><tt><a name="type-option">option()</a> = {max_sessions, integer()} | {response_format, <a href="#type-response_format">response_format()</a>} | {stream_chunk_size, integer()} | {max_pipeline_size, integer()} | {trace, <a href="#type-boolean">boolean()</a>} | {is_ssl, <a href="#type-boolean">boolean()</a>} | {ssl_options, [SSLOpt]} | {pool_name, atom()} | {proxy_host, string()} | {proxy_port, integer()} | {proxy_user, string()} | {proxy_password, string()} | {use_absolute_uri, <a href="#type-boolean">boolean()</a>} | {basic_auth, {<a href="#type-username">username()</a>, <a href="#type-password">password()</a>}} | {cookie, string()} | {content_length, integer()} | {content_type, string()} | {save_response_to_file, <a href="#type-srtf">srtf()</a>} | {stream_to, <a href="#type-stream_to">stream_to()</a>} | {http_vsn, {MajorVsn, MinorVsn}} | {host_header, string()} | {inactivity_timeout, integer()} | {connect_timeout, integer()} | {socket_options, Sock_opts} | {transfer_encoding, {chunked, ChunkSize}} | {headers_as_is, <a href="#type-boolean">boolean()</a>} | {give_raw_headers, <a href="#type-boolean">boolean()</a>} | {preserve_chunked_encoding, <a href="#type-boolean">boolean()</a>}</tt></li>
+<li><tt><a name="type-stream_to">stream_to()</a> = <a href="#type-process">process()</a> | {<a href="#type-process">process()</a>, once}</tt></li>
+<li><tt><a name="type-process">process()</a> = pid() | atom()</tt></li>
+<li><tt><a name="type-username">username()</a> = string()</tt></li>
+<li><tt><a name="type-password">password()</a> = string()</tt></li>
+<li><tt>SSLOpt = term()</tt></li>
+<li><tt>Sock_opts = [Sock_opt]</tt></li>
+<li><tt>Sock_opt = term()</tt></li>
+<li><tt>ChunkSize = integer()</tt></li>
+<li><tt><a name="type-srtf">srtf()</a> = <a href="#type-boolean">boolean()</a> | <a href="#type-filename">filename()</a></tt></li>
+<li><tt><a name="type-filename">filename()</a> = string()</tt></li>
+<li><tt><a name="type-response_format">response_format()</a> = list | binary</tt></li>
+</ul></p>
+</div><p>Same as send_req/4.
+ For a description of SSL Options, look in the <a href="http://www.erlang.org/doc/apps/ssl/index.html">ssl</a> manpage. If the
+ HTTP Version to use is not specified, the default is 1.1.
+ <br>
+ <ul>
+ <li>The <code>host_header</code> option is useful in the case where ibrowse is
+ connecting to a component such as <a href="http://www.stunnel.org">stunnel</a> which then sets up a
+ secure connection to a webserver. In this case, the URL supplied to
+ ibrowse must have the stunnel host/port details, but that won't
+ make sense to the destination webserver. This option can then be
+ used to specify what should go in the <code>Host</code> header in
+ the request.</li>
+ <li>The <code>stream_to</code> option can be used to have the HTTP
+ response streamed to a process as messages as data arrives on the
+ socket. If the calling process wishes to control the rate at which
+ data is received from the server, the option <code>{stream_to,
+ {process(), once}}</code> can be specified. The calling process
+ will have to invoke <code>ibrowse:stream_next(Request_id)</code> to
+ receive the next packet.</li>
+
+ <li>When both the options <code>save_response_to_file</code> and <code>stream_to</code>
+ are specified, the former takes precedence.</li>
+
+ <li>For the <code>save_response_to_file</code> option, the response body is saved to
+ file only if the status code is in the 200-299 range. If not, the response body is returned
+ as a string.</li>
+ <li>Whenever an error occurs in the processing of a request, ibrowse will return as much
+ information as it has, such as HTTP Status Code and HTTP Headers. When this happens, the response
+ is of the form <code>{error, {Reason, {stat_code, StatusCode}, HTTP_headers}}</code></li>
+
+ <li><p>The <code>inactivity_timeout</code> option is useful when
+dealing with large response bodies and/or slow links. In these
+cases, it might be hard to estimate how long a request will take to
+complete. In such cases, the client might want to timeout if no
+data has been received on the link for a certain time interval.</p>
+
+ This value is also used to close connections which are not in use for
+ the specified timeout value.
+ </li>
+
+ <li>
+ The <code>connect_timeout</code> option is to specify how long the
+ client process should wait for connection establishment. This is
+ useful in scenarios where connections to servers are usually setup
+ very fast, but responses might take much longer compared to
+ connection setup. In such cases, it is better for the calling
+ process to timeout faster if there is a problem (DNS lookup
+ delays/failures, network routing issues, etc). The total timeout
+ value specified for the request will enforced. To illustrate using
+ an example:
+ <code>
+ ibrowse:send_req("http://www.example.com/cgi-bin/request", [], get, [], [{connect_timeout, 100}], 1000).
+ </code>
+ In the above invocation, if the connection isn't established within
+ 100 milliseconds, the request will fail with
+ <code>{error, conn_failed}</code>.<br>
+ If connection setup succeeds, the total time allowed for the
+ request to complete will be 1000 milliseconds minus the time taken
+ for connection setup.
+ </li>
+
+ <li> The <code>socket_options</code> option can be used to set
+ specific options on the socket. The <code>{active, true | false | once}</code>
+ and <code>{packet_type, Packet_type}</code> will be filtered out by ibrowse. </li>
+
+ <li> The <code>headers_as_is</code> option is to enable the caller
+ to send headers exactly as specified in the request without ibrowse
+ adding some of its own. Required for some picky servers apparently. </li>
+
+ <li>The <code>give_raw_headers</code> option is to enable the
+ caller to get access to the raw status line and raw unparsed
+ headers. Not quite sure why someone would want this, but one of my
+ users asked for it, so here it is. </li>
+
+ <li> The <code>preserve_chunked_encoding</code> option enables the caller
+ to receive the raw data stream when the Transfer-Encoding of the server
+ response is Chunked.
+ </li>
+ </ul>
+ </p>
+
+<h3 class="function"><a name="send_req-6">send_req/6</a></h3>
+<div class="spec">
+<p><tt>send_req(Url, Headers::<a href="#type-headerList">headerList()</a>, Method::<a href="#type-method">method()</a>, Body::<a href="#type-body">body()</a>, Options::<a href="#type-optionList">optionList()</a>, Timeout) -&gt; <a href="#type-response">response()</a></tt>
+<ul class="definitions"><li><tt>Timeout = integer() | infinity</tt></li>
+</ul></p>
+</div><p>Same as send_req/5.
+ All timeout values are in milliseconds.</p>
+
+<h3 class="function"><a name="send_req_direct-4">send_req_direct/4</a></h3>
+<div class="spec">
+<p><tt>send_req_direct(Conn_pid, Url, Headers, Method) -&gt; any()</tt></p>
+</div><p>Same as send_req/3 except that the first argument is the PID
+ returned by spawn_worker_process/2 or spawn_link_worker_process/2</p>
+
+<h3 class="function"><a name="send_req_direct-5">send_req_direct/5</a></h3>
+<div class="spec">
+<p><tt>send_req_direct(Conn_pid, Url, Headers, Method, Body) -&gt; any()</tt></p>
+</div><p>Same as send_req/4 except that the first argument is the PID
+ returned by spawn_worker_process/2 or spawn_link_worker_process/2</p>
+
+<h3 class="function"><a name="send_req_direct-6">send_req_direct/6</a></h3>
+<div class="spec">
+<p><tt>send_req_direct(Conn_pid, Url, Headers, Method, Body, Options) -&gt; any()</tt></p>
+</div><p>Same as send_req/5 except that the first argument is the PID
+ returned by spawn_worker_process/2 or spawn_link_worker_process/2</p>
+
+<h3 class="function"><a name="send_req_direct-7">send_req_direct/7</a></h3>
+<div class="spec">
+<p><tt>send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, Timeout) -&gt; any()</tt></p>
+</div><p>Same as send_req/6 except that the first argument is the PID
+ returned by spawn_worker_process/2 or spawn_link_worker_process/2</p>
+
+<h3 class="function"><a name="set_dest-3">set_dest/3</a></h3>
+<div class="spec">
+<p><tt>set_dest(Host, Port, T) -&gt; any()</tt></p>
+</div><p>Deprecated. Use set_max_sessions/3 and set_max_pipeline_size/3
+ for achieving the same effect.</p>
+
+<h3 class="function"><a name="set_max_pipeline_size-3">set_max_pipeline_size/3</a></h3>
+<div class="spec">
+<p><tt>set_max_pipeline_size(Host::string(), Port::integer(), Max::integer()) -&gt; ok</tt></p>
+</div><p>Set the maximum pipeline size for each connection to a specific Host:Port.</p>
+
+<h3 class="function"><a name="set_max_sessions-3">set_max_sessions/3</a></h3>
+<div class="spec">
+<p><tt>set_max_sessions(Host::string(), Port::integer(), Max::integer()) -&gt; ok</tt></p>
+</div><p>Set the maximum number of connections allowed to a specific Host:Port.</p>
+
+<h3 class="function"><a name="show_dest_status-0">show_dest_status/0</a></h3>
+<div class="spec">
+<p><tt>show_dest_status() -&gt; any()</tt></p>
+</div><p>Shows some internal information about load balancing. Info
+ about workers spawned using spawn_worker_process/2 or
+ spawn_link_worker_process/2 is not included.</p>
+
+<h3 class="function"><a name="show_dest_status-2">show_dest_status/2</a></h3>
+<div class="spec">
+<p><tt>show_dest_status(Host, Port) -&gt; any()</tt></p>
+</div><p>Shows some internal information about load balancing to a
+ specified Host:Port. Info about workers spawned using
+ spawn_worker_process/2 or spawn_link_worker_process/2 is not
+ included.</p>
+
+<h3 class="function"><a name="spawn_link_worker_process-1">spawn_link_worker_process/1</a></h3>
+<div class="spec">
+<p><tt>spawn_link_worker_process(Url::string()) -&gt; {ok, pid()}</tt></p>
+</div><p>Same as spawn_worker_process/1 except the the calling process
+ is linked to the worker process which is spawned.</p>
+
+<h3 class="function"><a name="spawn_link_worker_process-2">spawn_link_worker_process/2</a></h3>
+<div class="spec">
+<p><tt>spawn_link_worker_process(Host::string(), Port::integer()) -&gt; {ok, pid()}</tt></p>
+</div><p>Same as spawn_worker_process/2 except the the calling process
+ is linked to the worker process which is spawned.</p>
+
+<h3 class="function"><a name="spawn_worker_process-1">spawn_worker_process/1</a></h3>
+<div class="spec">
+<p><tt>spawn_worker_process(Url::string()) -&gt; {ok, pid()}</tt></p>
+</div><p>Creates a HTTP client process to the specified Host:Port which
+ is not part of the load balancing pool. This is useful in cases
+ where some requests to a webserver might take a long time whereas
+ some might take a very short time. To avoid getting these quick
+ requests stuck in the pipeline behind time consuming requests, use
+ this function to get a handle to a connection process. <br>
+ <b>Note:</b> Calling this function only creates a worker process. No connection
+ is setup. The connection attempt is made only when the first
+ request is sent via any of the send_req_direct/4,5,6,7 functions.<br>
+ <b>Note:</b> It is the responsibility of the calling process to control
+ pipeline size on such connections.
+ </p>
+
+<h3 class="function"><a name="spawn_worker_process-2">spawn_worker_process/2</a></h3>
+<div class="spec">
+<p><tt>spawn_worker_process(Host::string(), Port::integer()) -&gt; {ok, pid()}</tt></p>
+</div><p>Same as spawn_worker_process/1 but takes as input a Host and Port
+ instead of a URL.</p>
+
+<h3 class="function"><a name="start-0">start/0</a></h3>
+<div class="spec">
+<p><tt>start() -&gt; any()</tt></p>
+</div><p>Starts the ibrowse process without linking. Useful when testing using the shell</p>
+
+<h3 class="function"><a name="start_link-0">start_link/0</a></h3>
+<div class="spec">
+<p><tt>start_link() -&gt; {ok, pid()}</tt></p>
+</div><p>Starts the ibrowse process linked to the calling process. Usually invoked by the supervisor ibrowse_sup</p>
+
+<h3 class="function"><a name="stop-0">stop/0</a></h3>
+<div class="spec">
+<p><tt>stop() -&gt; any()</tt></p>
+</div><p>Stop the ibrowse process. Useful when testing using the shell.</p>
+
+<h3 class="function"><a name="stop_worker_process-1">stop_worker_process/1</a></h3>
+<div class="spec">
+<p><tt>stop_worker_process(Conn_pid::pid()) -&gt; ok</tt></p>
+</div><p>Terminate a worker process spawned using
+ spawn_worker_process/2 or spawn_link_worker_process/2. Requests in
+ progress will get the error response <pre>{error, closing_on_request}</pre></p>
+
+<h3 class="function"><a name="stream_close-1">stream_close/1</a></h3>
+<div class="spec">
+<p><tt>stream_close(Req_id::<a href="#type-req_id">req_id()</a>) -&gt; ok | {error, unknown_req_id}</tt></p>
+</div><p>Tell ibrowse to close the connection associated with the
+ specified stream. Should be used in conjunction with the
+ <code>stream_to</code> option. Note that all requests in progress on
+ the connection which is serving this Req_id will be aborted, and an
+ error returned.</p>
+
+<h3 class="function"><a name="stream_next-1">stream_next/1</a></h3>
+<div class="spec">
+<p><tt>stream_next(Req_id::<a href="#type-req_id">req_id()</a>) -&gt; ok | {error, unknown_req_id}</tt></p>
+</div><p>Tell ibrowse to stream the next chunk of data to the
+ caller. Should be used in conjunction with the
+ <code>stream_to</code> option</p>
+
+<h3 class="function"><a name="terminate-2">terminate/2</a></h3>
+<div class="spec">
+<p><tt>terminate(Reason, State) -&gt; any()</tt></p>
+</div>
+
+<h3 class="function"><a name="trace_off-0">trace_off/0</a></h3>
+<div class="spec">
+<p><tt>trace_off() -&gt; any()</tt></p>
+</div><p>Turn tracing off for the ibrowse process</p>
+
+<h3 class="function"><a name="trace_off-2">trace_off/2</a></h3>
+<div class="spec">
+<p><tt>trace_off(Host, Port) -&gt; ok</tt></p>
+</div><p>Turn tracing OFF for all connections to the specified HTTP
+ server.</p>
+
+<h3 class="function"><a name="trace_on-0">trace_on/0</a></h3>
+<div class="spec">
+<p><tt>trace_on() -&gt; any()</tt></p>
+</div><p>Turn tracing on for the ibrowse process</p>
+
+<h3 class="function"><a name="trace_on-2">trace_on/2</a></h3>
+<div class="spec">
+<p><tt>trace_on(Host, Port) -&gt; ok</tt>
+<ul class="definitions"><li><tt>Host = string()</tt></li>
+<li><tt>Port = integer()</tt></li>
+</ul></p>
+</div><p>Turn tracing on for all connections to the specified HTTP
+ server. Host is whatever is specified as the domain name in the URL</p>
+<hr>
+
+<div class="navbar"><a name="#navbar_bottom"></a><table width="100%" border="0" cellspacing="0" cellpadding="2" summary="navigation bar"><tr><td><a href="overview-summary.html" target="overviewFrame">Overview</a></td><td><a href="http://www.erlang.org/"><img src="erlang.png" align="right" border="0" alt="erlang logo"></a></td></tr></table></div>
+<p><i>Generated by EDoc, Nov 10 2010, 06:04:33.</i></p>
+</body>
+</html>
diff --git a/deps/ibrowse/doc/ibrowse_lib.html b/deps/ibrowse/doc/ibrowse_lib.html
new file mode 100644
index 00000000..7a00d4f8
--- /dev/null
+++ b/deps/ibrowse/doc/ibrowse_lib.html
@@ -0,0 +1,67 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+<title>Module ibrowse_lib</title>
+<link rel="stylesheet" type="text/css" href="stylesheet.css">
+</head>
+<body bgcolor="white">
+
+<h1>Module ibrowse_lib</h1>
+Module with a few useful functions.
+<ul><li><a href="#description">Description</a></li><li><a href="#index">Function Index</a></li><li><a href="#functions">Function Details</a></li></ul>
+
+<h2><a name="description">Description</a></h2>Module with a few useful functions
+<h2><a name="index">Function Index</a></h2>
+<table width="100%" border="1"><tr><td valign="top"><a href="#dec2hex-2">dec2hex/2</a></td><td>dec2hex taken from gtk.erl in std dist
+ M = integer() -- number of hex digits required
+ N = integer() -- the number to represent as hex.</td></tr>
+<tr><td valign="top"><a href="#decode_base64-1">decode_base64/1</a></td><td>Implements the base64 decoding algorithm.</td></tr>
+<tr><td valign="top"><a href="#decode_rfc822_date-1">decode_rfc822_date/1</a></td><td/></tr>
+<tr><td valign="top"><a href="#drv_ue-1">drv_ue/1</a></td><td/></tr>
+<tr><td valign="top"><a href="#drv_ue-2">drv_ue/2</a></td><td/></tr>
+<tr><td valign="top"><a href="#encode_base64-1">encode_base64/1</a></td><td>Implements the base64 encoding algorithm.</td></tr>
+<tr><td valign="top"><a href="#status_code-1">status_code/1</a></td><td>Given a status code, returns an atom describing the status code.</td></tr>
+<tr><td valign="top"><a href="#url_encode-1">url_encode/1</a></td><td>URL-encodes a string based on RFC 1738.</td></tr>
+</table>
+
+<h2><a name="functions">Function Details</a></h2>
+
+<h3><a name="dec2hex-2">dec2hex/2</a></h3>
+<p><tt>dec2hex(M::integer(), N::integer()) -&gt; string()</tt></p>
+<p>dec2hex taken from gtk.erl in std dist
+ M = integer() -- number of hex digits required
+ N = integer() -- the number to represent as hex</p>
+
+<h3><a name="decode_base64-1">decode_base64/1</a></h3>
+<p><tt>decode_base64(List::In) -&gt; Out | <a href="#type-exit">exit({error, invalid_input})</a></tt>
+<ul><li><tt>In = string() | binary()</tt></li><li><tt>Out = string() | binary()</tt></li></ul></p>
+<p>Implements the base64 decoding algorithm. The output data type matches in the input data type.</p>
+
+<h3><a name="decode_rfc822_date-1">decode_rfc822_date/1</a></h3>
+<tt>decode_rfc822_date(String) -&gt; term()
+</tt>
+
+<h3><a name="drv_ue-1">drv_ue/1</a></h3>
+<tt>drv_ue(Str) -&gt; term()
+</tt>
+
+<h3><a name="drv_ue-2">drv_ue/2</a></h3>
+<tt>drv_ue(Str, Port) -&gt; term()
+</tt>
+
+<h3><a name="encode_base64-1">encode_base64/1</a></h3>
+<p><tt>encode_base64(List::In) -&gt; Out</tt>
+<ul><li><tt>In = string() | binary()</tt></li><li><tt>Out = string() | binary()</tt></li></ul></p>
+<p>Implements the base64 encoding algorithm. The output data type matches in the input data type.</p>
+
+<h3><a name="status_code-1">status_code/1</a></h3>
+<p><tt>status_code(StatusCode::<a href="#type-status_code">status_code()</a>) -&gt; StatusDescription</tt>
+<ul><li><tt><a name="type-status_code">status_code()</a> = string() | integer()</tt></li><li><tt>StatusDescription = atom()</tt></li></ul></p>
+<p>Given a status code, returns an atom describing the status code.</p>
+
+<h3><a name="url_encode-1">url_encode/1</a></h3>
+<p><tt>url_encode(Str) -&gt; UrlEncodedStr</tt>
+<ul><li><tt>Str = string()</tt></li><li><tt>UrlEncodedStr = string()</tt></li></ul></p>
+<p>URL-encodes a string based on RFC 1738. Returns a flat list.</p>
+</body>
+</html>
diff --git a/deps/ibrowse/doc/short-desc b/deps/ibrowse/doc/short-desc
new file mode 100644
index 00000000..56fa9091
--- /dev/null
+++ b/deps/ibrowse/doc/short-desc
@@ -0,0 +1 @@
+A powerful HTTP/1.1 client written in erlang
diff --git a/deps/ibrowse/include/ibrowse.hrl b/deps/ibrowse/include/ibrowse.hrl
new file mode 100644
index 00000000..18dde827
--- /dev/null
+++ b/deps/ibrowse/include/ibrowse.hrl
@@ -0,0 +1,21 @@
+-ifndef(IBROWSE_HRL).
+-define(IBROWSE_HRL, "ibrowse.hrl").
+
+-record(url, {
+ abspath,
+ host,
+ port,
+ username,
+ password,
+ path,
+ protocol,
+ host_type % 'hostname', 'ipv4_address' or 'ipv6_address'
+}).
+
+-record(lb_pid, {host_port, pid}).
+
+-record(client_conn, {key, cur_pipeline_size = 0, reqs_served = 0}).
+
+-record(ibrowse_conf, {key, value}).
+
+-endif.
diff --git a/deps/ibrowse/priv/ibrowse.conf b/deps/ibrowse/priv/ibrowse.conf
new file mode 100644
index 00000000..83412d7b
--- /dev/null
+++ b/deps/ibrowse/priv/ibrowse.conf
@@ -0,0 +1,18 @@
+%% Configuration file for specifying settings for HTTP servers which this
+%% client will connect to.
+%% The format of each entry is (one per line)
+%% {dest, Hostname, Portnumber, MaxSessions, MaxPipelineSize, Options}.
+%%
+%% where Hostname = string()
+%% Portnumber = integer()
+%% MaxSessions = integer()
+%% MaxPipelineSize = integer()
+%% Options = [{Tag, Val} | ...]
+%% Tag = term()
+%% Value = term()
+%% e.g.
+%% {dest, "covig02", 8000, 10, 10, [{is_ssl, true}, {ssl_options, [option()]}]}.
+%% If SSL is to be used, both the options, is_ssl and ssl_options MUST be specified
+%% where option() is all options supported in the ssl module
+
+{{options, "www.google.co.uk", 80}, [{proxy_host, "proxy"}, {proxy_port, 8080}, {proxy_user, "cmullaparthi"}, {proxy_password, "20nov99"}]}.
diff --git a/deps/ibrowse/rebar.config b/deps/ibrowse/rebar.config
new file mode 100644
index 00000000..a23b6e1d
--- /dev/null
+++ b/deps/ibrowse/rebar.config
@@ -0,0 +1,2 @@
+{erl_opts, [debug_info, warn_unused_vars, nowarn_shadow_vars, warn_unused_import]}.
+{eunit_opts, [verbose]}. \ No newline at end of file
diff --git a/deps/ibrowse/src/Emakefile.src b/deps/ibrowse/src/Emakefile.src
new file mode 100644
index 00000000..ff46b784
--- /dev/null
+++ b/deps/ibrowse/src/Emakefile.src
@@ -0,0 +1,7 @@
+'../src/ibrowse'.
+'../src/ibrowse_http_client'.
+'../src/ibrowse_app'.
+'../src/ibrowse_sup'.
+'../src/ibrowse_lib'.
+'../src/ibrowse_lb'.
+'../src/ibrowse_test'.
diff --git a/deps/ibrowse/src/ibrowse.app.src b/deps/ibrowse/src/ibrowse.app.src
new file mode 100644
index 00000000..3dcbef8f
--- /dev/null
+++ b/deps/ibrowse/src/ibrowse.app.src
@@ -0,0 +1,7 @@
+{application, ibrowse,
+ [{description, "HTTP client application"},
+ {vsn, git},
+ {registered, []},
+ {applications, [kernel,stdlib,sasl]},
+ {env, []},
+ {mod, {ibrowse_app, []}}]}.
diff --git a/deps/ibrowse/src/ibrowse.erl b/deps/ibrowse/src/ibrowse.erl
new file mode 100644
index 00000000..d219212b
--- /dev/null
+++ b/deps/ibrowse/src/ibrowse.erl
@@ -0,0 +1,862 @@
+%%%-------------------------------------------------------------------
+%%% File : ibrowse.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description : Load balancer process for HTTP client connections.
+%%%
+%%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+%% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
+%% @copyright 2005-2011 Chandrashekhar Mullaparthi
+%% @doc The ibrowse application implements an HTTP 1.1 client in erlang. This
+%% module implements the API of the HTTP client. There is one named
+%% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
+%% one process to handle one TCP connection to a webserver
+%% (implemented in the module ibrowse_http_client). Multiple connections to a
+%% webserver are setup based on the settings for each webserver. The
+%% ibrowse process also determines which connection to pipeline a
+%% certain request on. The functions to call are send_req/3,
+%% send_req/4, send_req/5, send_req/6.
+%%
+%% <p>Here are a few sample invocations.</p>
+%%
+%% <code>
+%% ibrowse:send_req("http://intranet/messenger/", [], get).
+%% <br/><br/>
+%%
+%% ibrowse:send_req("http://www.google.com/", [], get, [],
+%% [{proxy_user, "XXXXX"},
+%% {proxy_password, "XXXXX"},
+%% {proxy_host, "proxy"},
+%% {proxy_port, 8080}], 1000).
+%% <br/><br/>
+%%
+%%ibrowse:send_req("http://www.erlang.org/download/otp_src_R10B-3.tar.gz", [], get, [],
+%% [{proxy_user, "XXXXX"},
+%% {proxy_password, "XXXXX"},
+%% {proxy_host, "proxy"},
+%% {proxy_port, 8080},
+%% {save_response_to_file, true}], 1000).
+%% <br/><br/>
+%%
+%% ibrowse:send_req("http://www.erlang.org", [], head).
+%%
+%% <br/><br/>
+%% ibrowse:send_req("http://www.sun.com", [], options).
+%%
+%% <br/><br/>
+%% ibrowse:send_req("http://www.bbc.co.uk", [], trace).
+%%
+%% <br/><br/>
+%% ibrowse:send_req("http://www.google.com", [], get, [],
+%% [{stream_to, self()}]).
+%% </code>
+%%
+
+-module(ibrowse).
+-behaviour(gen_server).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+-export([start_link/0, start/0, stop/0]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% API interface
+-export([
+ rescan_config/0,
+ rescan_config/1,
+ get_config_value/1,
+ get_config_value/2,
+ spawn_worker_process/1,
+ spawn_worker_process/2,
+ spawn_link_worker_process/1,
+ spawn_link_worker_process/2,
+ stop_worker_process/1,
+ send_req/3,
+ send_req/4,
+ send_req/5,
+ send_req/6,
+ send_req_direct/4,
+ send_req_direct/5,
+ send_req_direct/6,
+ send_req_direct/7,
+ stream_next/1,
+ stream_close/1,
+ set_max_sessions/3,
+ set_max_pipeline_size/3,
+ set_dest/3,
+ trace_on/0,
+ trace_off/0,
+ trace_on/2,
+ trace_off/2,
+ all_trace_off/0,
+ show_dest_status/0,
+ show_dest_status/2
+ ]).
+
+-ifdef(debug).
+-compile(export_all).
+-endif.
+
+-import(ibrowse_lib, [
+ parse_url/1,
+ get_value/3,
+ do_trace/2
+ ]).
+
+-record(state, {trace = false}).
+
+-include("ibrowse.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+
+-define(DEF_MAX_SESSIONS,10).
+-define(DEF_MAX_PIPELINE_SIZE,10).
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link/0
+%% Description: Starts the server
+%%--------------------------------------------------------------------
+%% @doc Starts the ibrowse process linked to the calling process. Usually invoked by the supervisor ibrowse_sup
+%% @spec start_link() -> {ok, pid()}
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+%% @doc Starts the ibrowse process without linking. Useful when testing using the shell
+start() ->
+ gen_server:start({local, ?MODULE}, ?MODULE, [], [{debug, []}]).
+
+%% @doc Stop the ibrowse process. Useful when testing using the shell.
+stop() ->
+ catch gen_server:call(ibrowse, stop).
+
+%% @doc This is the basic function to send a HTTP request.
+%% The Status return value indicates the HTTP status code returned by the webserver
+%% @spec send_req(Url::string(), Headers::headerList(), Method::method()) -> response()
+%% headerList() = [{header(), value()}]
+%% header() = atom() | string()
+%% value() = term()
+%% method() = get | post | head | options | put | delete | trace | mkcol | propfind | proppatch | lock | unlock | move | copy
+%% Status = string()
+%% ResponseHeaders = [respHeader()]
+%% respHeader() = {headerName(), headerValue()}
+%% headerName() = string()
+%% headerValue() = string()
+%% response() = {ok, Status, ResponseHeaders, ResponseBody} | {ibrowse_req_id, req_id() } | {error, Reason}
+%% req_id() = term()
+%% ResponseBody = string() | {file, Filename}
+%% Reason = term()
+send_req(Url, Headers, Method) ->
+ send_req(Url, Headers, Method, [], []).
+
+%% @doc Same as send_req/3.
+%% If a list is specified for the body it has to be a flat list. The body can also be a fun/0 or a fun/1. <br/>
+%% If fun/0, the connection handling process will repeatdely call the fun until it returns an error or eof. <pre>Fun() = {ok, Data} | eof</pre><br/>
+%% If fun/1, the connection handling process will repeatedly call the fun with the supplied state until it returns an error or eof. <pre>Fun(State) = {ok, Data} | {ok, Data, NewState} | eof</pre>
+%% @spec send_req(Url, Headers, Method::method(), Body::body()) -> response()
+%% body() = [] | string() | binary() | fun_arity_0() | {fun_arity_1(), initial_state()}
+%% initial_state() = term()
+send_req(Url, Headers, Method, Body) ->
+ send_req(Url, Headers, Method, Body, []).
+
+%% @doc Same as send_req/4.
+%% For a description of SSL Options, look in the <a href="http://www.erlang.org/doc/apps/ssl/index.html">ssl</a> manpage. If the
+%% HTTP Version to use is not specified, the default is 1.1.
+%% <br/>
+%% <ul>
+%% <li>The <code>host_header</code> option is useful in the case where ibrowse is
+%% connecting to a component such as <a
+%% href="http://www.stunnel.org">stunnel</a> which then sets up a
+%% secure connection to a webserver. In this case, the URL supplied to
+%% ibrowse must have the stunnel host/port details, but that won't
+%% make sense to the destination webserver. This option can then be
+%% used to specify what should go in the <code>Host</code> header in
+%% the request.</li>
+%% <li>The <code>stream_to</code> option can be used to have the HTTP
+%% response streamed to a process as messages as data arrives on the
+%% socket. If the calling process wishes to control the rate at which
+%% data is received from the server, the option <code>{stream_to,
+%% {process(), once}}</code> can be specified. The calling process
+%% will have to invoke <code>ibrowse:stream_next(Request_id)</code> to
+%% receive the next packet.</li>
+%%
+%% <li>When both the options <code>save_response_to_file</code> and <code>stream_to</code>
+%% are specified, the former takes precedence.</li>
+%%
+%% <li>For the <code>save_response_to_file</code> option, the response body is saved to
+%% file only if the status code is in the 200-299 range. If not, the response body is returned
+%% as a string.</li>
+%% <li>Whenever an error occurs in the processing of a request, ibrowse will return as much
+%% information as it has, such as HTTP Status Code and HTTP Headers. When this happens, the response
+%% is of the form <code>{error, {Reason, {stat_code, StatusCode}, HTTP_headers}}</code></li>
+%%
+%% <li>The <code>inactivity_timeout</code> option is useful when
+%% dealing with large response bodies and/or slow links. In these
+%% cases, it might be hard to estimate how long a request will take to
+%% complete. In such cases, the client might want to timeout if no
+%% data has been received on the link for a certain time interval.
+%%
+%% This value is also used to close connections which are not in use for
+%% the specified timeout value.
+%% </li>
+%%
+%% <li>
+%% The <code>connect_timeout</code> option is to specify how long the
+%% client process should wait for connection establishment. This is
+%% useful in scenarios where connections to servers are usually setup
+%% very fast, but responses might take much longer compared to
+%% connection setup. In such cases, it is better for the calling
+%% process to timeout faster if there is a problem (DNS lookup
+%% delays/failures, network routing issues, etc). The total timeout
+%% value specified for the request will enforced. To illustrate using
+%% an example:
+%% <code>
+%% ibrowse:send_req("http://www.example.com/cgi-bin/request", [], get, [], [{connect_timeout, 100}], 1000).
+%% </code>
+%% In the above invocation, if the connection isn't established within
+%% 100 milliseconds, the request will fail with
+%% <code>{error, conn_failed}</code>.<br/>
+%% If connection setup succeeds, the total time allowed for the
+%% request to complete will be 1000 milliseconds minus the time taken
+%% for connection setup.
+%% </li>
+%%
+%% <li> The <code>socket_options</code> option can be used to set
+%% specific options on the socket. The <code>{active, true | false | once}</code>
+%% and <code>{packet_type, Packet_type}</code> will be filtered out by ibrowse. </li>
+%%
+%% <li> The <code>headers_as_is</code> option is to enable the caller
+%% to send headers exactly as specified in the request without ibrowse
+%% adding some of its own. Required for some picky servers apparently. </li>
+%%
+%% <li>The <code>give_raw_headers</code> option is to enable the
+%% caller to get access to the raw status line and raw unparsed
+%% headers. Not quite sure why someone would want this, but one of my
+%% users asked for it, so here it is. </li>
+%%
+%% <li> The <code>preserve_chunked_encoding</code> option enables the caller
+%% to receive the raw data stream when the Transfer-Encoding of the server
+%% response is Chunked.
+%% </li>
+%% </ul>
+%%
+%% @spec send_req(Url::string(), Headers::headerList(), Method::method(), Body::body(), Options::optionList()) -> response()
+%% optionList() = [option()]
+%% option() = {max_sessions, integer()} |
+%% {response_format,response_format()}|
+%% {stream_chunk_size, integer()} |
+%% {max_pipeline_size, integer()} |
+%% {trace, boolean()} |
+%% {is_ssl, boolean()} |
+%% {ssl_options, [SSLOpt]} |
+%% {pool_name, atom()} |
+%% {proxy_host, string()} |
+%% {proxy_port, integer()} |
+%% {proxy_user, string()} |
+%% {proxy_password, string()} |
+%% {use_absolute_uri, boolean()} |
+%% {basic_auth, {username(), password()}} |
+%% {cookie, string()} |
+%% {content_length, integer()} |
+%% {content_type, string()} |
+%% {save_response_to_file, srtf()} |
+%% {stream_to, stream_to()} |
+%% {http_vsn, {MajorVsn, MinorVsn}} |
+%% {host_header, string()} |
+%% {inactivity_timeout, integer()} |
+%% {connect_timeout, integer()} |
+%% {socket_options, Sock_opts} |
+%% {transfer_encoding, {chunked, ChunkSize}} |
+%% {headers_as_is, boolean()} |
+%% {give_raw_headers, boolean()} |
+%% {preserve_chunked_encoding,boolean()}
+%%
+%% stream_to() = process() | {process(), once}
+%% process() = pid() | atom()
+%% username() = string()
+%% password() = string()
+%% SSLOpt = term()
+%% Sock_opts = [Sock_opt]
+%% Sock_opt = term()
+%% ChunkSize = integer()
+%% srtf() = boolean() | filename()
+%% filename() = string()
+%% response_format() = list | binary
+send_req(Url, Headers, Method, Body, Options) ->
+ send_req(Url, Headers, Method, Body, Options, 30000).
+
+%% @doc Same as send_req/5.
+%% All timeout values are in milliseconds.
+%% @spec send_req(Url, Headers::headerList(), Method::method(), Body::body(), Options::optionList(), Timeout) -> response()
+%% Timeout = integer() | infinity
+send_req(Url, Headers, Method, Body, Options, Timeout) ->
+ case catch parse_url(Url) of
+ #url{host = Host,
+ port = Port,
+ protocol = Protocol} = Parsed_url ->
+ Lb_pid = case ets:lookup(ibrowse_lb, {Host, Port}) of
+ [] ->
+ get_lb_pid(Parsed_url);
+ [#lb_pid{pid = Lb_pid_1}] ->
+ Lb_pid_1
+ end,
+ Max_sessions = get_max_sessions(Host, Port, Options),
+ Max_pipeline_size = get_max_pipeline_size(Host, Port, Options),
+ Options_1 = merge_options(Host, Port, Options),
+ {SSLOptions, IsSSL} =
+ case (Protocol == https) orelse
+ get_value(is_ssl, Options_1, false) of
+ false -> {[], false};
+ true -> {get_value(ssl_options, Options_1, []), true}
+ end,
+ try_routing_request(Lb_pid, Parsed_url,
+ Max_sessions,
+ Max_pipeline_size,
+ {SSLOptions, IsSSL},
+ Headers, Method, Body, Options_1, Timeout, 0);
+ Err ->
+ {error, {url_parsing_failed, Err}}
+ end.
+
+try_routing_request(Lb_pid, Parsed_url,
+ Max_sessions,
+ Max_pipeline_size,
+ {SSLOptions, IsSSL},
+ Headers, Method, Body, Options_1, Timeout, Try_count) when Try_count < 3 ->
+ case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
+ Max_sessions,
+ Max_pipeline_size,
+ {SSLOptions, IsSSL}) of
+ {ok, Conn_Pid} ->
+ case do_send_req(Conn_Pid, Parsed_url, Headers,
+ Method, Body, Options_1, Timeout) of
+ {error, sel_conn_closed} ->
+ try_routing_request(Lb_pid, Parsed_url,
+ Max_sessions,
+ Max_pipeline_size,
+ {SSLOptions, IsSSL},
+ Headers, Method, Body, Options_1, Timeout, Try_count + 1);
+ Res ->
+ Res
+ end;
+ Err ->
+ Err
+ end;
+try_routing_request(_, _, _, _, _, _, _, _, _, _, _) ->
+ {error, retry_later}.
+
+merge_options(Host, Port, Options) ->
+ Config_options = get_config_value({options, Host, Port}, []),
+ lists:foldl(
+ fun({Key, Val}, Acc) ->
+ case lists:keysearch(Key, 1, Options) of
+ false ->
+ [{Key, Val} | Acc];
+ _ ->
+ Acc
+ end
+ end, Options, Config_options).
+
+get_lb_pid(Url) ->
+ gen_server:call(?MODULE, {get_lb_pid, Url}).
+
+get_max_sessions(Host, Port, Options) ->
+ get_value(max_sessions, Options,
+ get_config_value({max_sessions, Host, Port},
+ default_max_sessions())).
+
+get_max_pipeline_size(Host, Port, Options) ->
+ get_value(max_pipeline_size, Options,
+ get_config_value({max_pipeline_size, Host, Port},
+ default_max_pipeline_size())).
+
+default_max_sessions() ->
+ safe_get_env(ibrowse, default_max_sessions, ?DEF_MAX_SESSIONS).
+
+default_max_pipeline_size() ->
+ safe_get_env(ibrowse, default_max_pipeline_size, ?DEF_MAX_PIPELINE_SIZE).
+
+safe_get_env(App, Key, Def_val) ->
+ case application:get_env(App, Key) of
+ undefined ->
+ Def_val;
+ {ok, Val} ->
+ Val
+ end.
+
+%% @doc Deprecated. Use set_max_sessions/3 and set_max_pipeline_size/3
+%% for achieving the same effect.
+set_dest(Host, Port, [{max_sessions, Max} | T]) ->
+ set_max_sessions(Host, Port, Max),
+ set_dest(Host, Port, T);
+set_dest(Host, Port, [{max_pipeline_size, Max} | T]) ->
+ set_max_pipeline_size(Host, Port, Max),
+ set_dest(Host, Port, T);
+set_dest(Host, Port, [{trace, Bool} | T]) when Bool == true; Bool == false ->
+ ibrowse ! {trace, true, Host, Port},
+ set_dest(Host, Port, T);
+set_dest(_Host, _Port, [H | _]) ->
+ exit({invalid_option, H});
+set_dest(_, _, []) ->
+ ok.
+
+%% @doc Set the maximum number of connections allowed to a specific Host:Port.
+%% @spec set_max_sessions(Host::string(), Port::integer(), Max::integer()) -> ok
+set_max_sessions(Host, Port, Max) when is_integer(Max), Max > 0 ->
+ gen_server:call(?MODULE, {set_config_value, {max_sessions, Host, Port}, Max}).
+
+%% @doc Set the maximum pipeline size for each connection to a specific Host:Port.
+%% @spec set_max_pipeline_size(Host::string(), Port::integer(), Max::integer()) -> ok
+set_max_pipeline_size(Host, Port, Max) when is_integer(Max), Max > 0 ->
+ gen_server:call(?MODULE, {set_config_value, {max_pipeline_size, Host, Port}, Max}).
+
+do_send_req(Conn_Pid, Parsed_url, Headers, Method, Body, Options, Timeout) ->
+ case catch ibrowse_http_client:send_req(Conn_Pid, Parsed_url,
+ Headers, Method, ensure_bin(Body),
+ Options, Timeout) of
+ {'EXIT', {timeout, _}} ->
+ {error, req_timedout};
+ {'EXIT', {noproc, {gen_server, call, [Conn_Pid, _, _]}}} ->
+ {error, sel_conn_closed};
+ {error, connection_closed} ->
+ {error, sel_conn_closed};
+ {'EXIT', Reason} ->
+ {error, {'EXIT', Reason}};
+ {ok, St_code, Headers, Body} = Ret when is_binary(Body) ->
+ case get_value(response_format, Options, list) of
+ list ->
+ {ok, St_code, Headers, binary_to_list(Body)};
+ binary ->
+ Ret
+ end;
+ Ret ->
+ Ret
+ end.
+
+ensure_bin(L) when is_list(L) -> list_to_binary(L);
+ensure_bin(B) when is_binary(B) -> B;
+ensure_bin(Fun) when is_function(Fun) -> Fun;
+ensure_bin({Fun}) when is_function(Fun) -> Fun;
+ensure_bin({Fun, _} = Body) when is_function(Fun) -> Body.
+
+%% @doc Creates a HTTP client process to the specified Host:Port which
+%% is not part of the load balancing pool. This is useful in cases
+%% where some requests to a webserver might take a long time whereas
+%% some might take a very short time. To avoid getting these quick
+%% requests stuck in the pipeline behind time consuming requests, use
+%% this function to get a handle to a connection process. <br/>
+%% <b>Note:</b> Calling this function only creates a worker process. No connection
+%% is setup. The connection attempt is made only when the first
+%% request is sent via any of the send_req_direct/4,5,6,7 functions.<br/>
+%% <b>Note:</b> It is the responsibility of the calling process to control
+%% pipeline size on such connections.
+%%
+%% @spec spawn_worker_process(Url::string()) -> {ok, pid()}
+spawn_worker_process(Url) ->
+ ibrowse_http_client:start(Url).
+
+%% @doc Same as spawn_worker_process/1 but takes as input a Host and Port
+%% instead of a URL.
+%% @spec spawn_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
+spawn_worker_process(Host, Port) ->
+ ibrowse_http_client:start({Host, Port}).
+
+%% @doc Same as spawn_worker_process/1 except the the calling process
+%% is linked to the worker process which is spawned.
+%% @spec spawn_link_worker_process(Url::string()) -> {ok, pid()}
+spawn_link_worker_process(Url) ->
+ ibrowse_http_client:start_link(Url).
+
+%% @doc Same as spawn_worker_process/2 except the the calling process
+%% is linked to the worker process which is spawned.
+%% @spec spawn_link_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
+spawn_link_worker_process(Host, Port) ->
+ ibrowse_http_client:start_link({Host, Port}).
+
+%% @doc Terminate a worker process spawned using
+%% spawn_worker_process/2 or spawn_link_worker_process/2. Requests in
+%% progress will get the error response <pre>{error, closing_on_request}</pre>
+%% @spec stop_worker_process(Conn_pid::pid()) -> ok
+stop_worker_process(Conn_pid) ->
+ ibrowse_http_client:stop(Conn_pid).
+
+%% @doc Same as send_req/3 except that the first argument is the PID
+%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
+send_req_direct(Conn_pid, Url, Headers, Method) ->
+ send_req_direct(Conn_pid, Url, Headers, Method, [], []).
+
+%% @doc Same as send_req/4 except that the first argument is the PID
+%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
+send_req_direct(Conn_pid, Url, Headers, Method, Body) ->
+ send_req_direct(Conn_pid, Url, Headers, Method, Body, []).
+
+%% @doc Same as send_req/5 except that the first argument is the PID
+%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
+send_req_direct(Conn_pid, Url, Headers, Method, Body, Options) ->
+ send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, 30000).
+
+%% @doc Same as send_req/6 except that the first argument is the PID
+%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
+send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, Timeout) ->
+ case catch parse_url(Url) of
+ #url{host = Host,
+ port = Port} = Parsed_url ->
+ Options_1 = merge_options(Host, Port, Options),
+ case do_send_req(Conn_pid, Parsed_url, Headers, Method, Body, Options_1, Timeout) of
+ {error, {'EXIT', {noproc, _}}} ->
+ {error, worker_is_dead};
+ Ret ->
+ Ret
+ end;
+ Err ->
+ {error, {url_parsing_failed, Err}}
+ end.
+
+%% @doc Tell ibrowse to stream the next chunk of data to the
+%% caller. Should be used in conjunction with the
+%% <code>stream_to</code> option
+%% @spec stream_next(Req_id :: req_id()) -> ok | {error, unknown_req_id}
+stream_next(Req_id) ->
+ case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of
+ [] ->
+ {error, unknown_req_id};
+ [{_, Pid}] ->
+ catch Pid ! {stream_next, Req_id},
+ ok
+ end.
+
+%% @doc Tell ibrowse to close the connection associated with the
+%% specified stream. Should be used in conjunction with the
+%% <code>stream_to</code> option. Note that all requests in progress on
+%% the connection which is serving this Req_id will be aborted, and an
+%% error returned.
+%% @spec stream_close(Req_id :: req_id()) -> ok | {error, unknown_req_id}
+stream_close(Req_id) ->
+ case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of
+ [] ->
+ {error, unknown_req_id};
+ [{_, Pid}] ->
+ catch Pid ! {stream_close, Req_id},
+ ok
+ end.
+
+%% @doc Turn tracing on for the ibrowse process
+trace_on() ->
+ ibrowse ! {trace, true}.
+%% @doc Turn tracing off for the ibrowse process
+trace_off() ->
+ ibrowse ! {trace, false}.
+
+%% @doc Turn tracing on for all connections to the specified HTTP
+%% server. Host is whatever is specified as the domain name in the URL
+%% @spec trace_on(Host, Port) -> ok
+%% Host = string()
+%% Port = integer()
+trace_on(Host, Port) ->
+ ibrowse ! {trace, true, Host, Port},
+ ok.
+
+%% @doc Turn tracing OFF for all connections to the specified HTTP
+%% server.
+%% @spec trace_off(Host, Port) -> ok
+trace_off(Host, Port) ->
+ ibrowse ! {trace, false, Host, Port},
+ ok.
+
+%% @doc Turn Off ALL tracing
+%% @spec all_trace_off() -> ok
+all_trace_off() ->
+ ibrowse ! all_trace_off,
+ ok.
+
+%% @doc Shows some internal information about load balancing. Info
+%% about workers spawned using spawn_worker_process/2 or
+%% spawn_link_worker_process/2 is not included.
+show_dest_status() ->
+ Dests = lists:filter(fun({lb_pid, {Host, Port}, _}) when is_list(Host),
+ is_integer(Port) ->
+ true;
+ (_) ->
+ false
+ end, ets:tab2list(ibrowse_lb)),
+ All_ets = ets:all(),
+ io:format("~-40.40s | ~-5.5s | ~-10.10s | ~s~n",
+ ["Server:port", "ETS", "Num conns", "LB Pid"]),
+ io:format("~80.80.=s~n", [""]),
+ lists:foreach(fun({lb_pid, {Host, Port}, Lb_pid}) ->
+ case lists:dropwhile(
+ fun(Tid) ->
+ ets:info(Tid, owner) /= Lb_pid
+ end, All_ets) of
+ [] ->
+ io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
+ [Host ++ ":" ++ integer_to_list(Port),
+ "",
+ "",
+ io_lib:format("~p", [Lb_pid])]
+ );
+ [Tid | _] ->
+ catch (
+ begin
+ Size = ets:info(Tid, size),
+ io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
+ [Host ++ ":" ++ integer_to_list(Port),
+ io_lib:format("~p", [Tid]),
+ integer_to_list(Size),
+ io_lib:format("~p", [Lb_pid])]
+ )
+ end
+ )
+ end
+ end, Dests).
+
+%% @doc Shows some internal information about load balancing to a
+%% specified Host:Port. Info about workers spawned using
+%% spawn_worker_process/2 or spawn_link_worker_process/2 is not
+%% included.
+show_dest_status(Host, Port) ->
+ case ets:lookup(ibrowse_lb, {Host, Port}) of
+ [] ->
+ no_active_processes;
+ [#lb_pid{pid = Lb_pid}] ->
+ io:format("Load Balancer Pid : ~p~n", [Lb_pid]),
+ io:format("LB process msg q size : ~p~n", [(catch process_info(Lb_pid, message_queue_len))]),
+ case lists:dropwhile(
+ fun(Tid) ->
+ ets:info(Tid, owner) /= Lb_pid
+ end, ets:all()) of
+ [] ->
+ io:format("Couldn't locate ETS table for ~p~n", [Lb_pid]);
+ [Tid | _] ->
+ First = ets:first(Tid),
+ Last = ets:last(Tid),
+ Size = ets:info(Tid, size),
+ io:format("LB ETS table id : ~p~n", [Tid]),
+ io:format("Num Connections : ~p~n", [Size]),
+ case Size of
+ 0 ->
+ ok;
+ _ ->
+ {First_p_sz, _} = First,
+ {Last_p_sz, _} = Last,
+ io:format("Smallest pipeline : ~1000.p~n", [First_p_sz]),
+ io:format("Largest pipeline : ~1000.p~n", [Last_p_sz])
+ end
+ end
+ end.
+
+%% @doc Clear current configuration for ibrowse and load from the file
+%% ibrowse.conf in the IBROWSE_EBIN/../priv directory. Current
+%% configuration is cleared only if the ibrowse.conf file is readable
+%% using file:consult/1
+rescan_config() ->
+ gen_server:call(?MODULE, rescan_config).
+
+%% Clear current configuration for ibrowse and load from the specified
+%% file. Current configuration is cleared only if the specified
+%% file is readable using file:consult/1
+rescan_config(File) when is_list(File) ->
+ gen_server:call(?MODULE, {rescan_config, File}).
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: init/1
+%% Description: Initiates the server
+%% Returns: {ok, State} |
+%% {ok, State, Timeout} |
+%% ignore |
+%% {stop, Reason}
+%%--------------------------------------------------------------------
+init(_) ->
+ process_flag(trap_exit, true),
+ State = #state{},
+ put(my_trace_flag, State#state.trace),
+ put(ibrowse_trace_token, "ibrowse"),
+ ibrowse_lb = ets:new(ibrowse_lb, [named_table, public, {keypos, 2}]),
+ ibrowse_conf = ets:new(ibrowse_conf, [named_table, protected, {keypos, 2}]),
+ ibrowse_stream = ets:new(ibrowse_stream, [named_table, public]),
+ import_config(),
+ {ok, #state{}}.
+
+import_config() ->
+ case code:priv_dir(ibrowse) of
+ {error, _} ->
+ ok;
+ PrivDir ->
+ Filename = filename:join(PrivDir, "ibrowse.conf"),
+ import_config(Filename)
+ end.
+
+import_config(Filename) ->
+ case file:consult(Filename) of
+ {ok, Terms} ->
+ ets:delete_all_objects(ibrowse_conf),
+ Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options})
+ when is_list(Host), is_integer(Port),
+ is_integer(MaxSess), MaxSess > 0,
+ is_integer(MaxPipe), MaxPipe > 0, is_list(Options) ->
+ I = [{{max_sessions, Host, Port}, MaxSess},
+ {{max_pipeline_size, Host, Port}, MaxPipe},
+ {{options, Host, Port}, Options}],
+ lists:foreach(
+ fun({X, Y}) ->
+ ets:insert(ibrowse_conf,
+ #ibrowse_conf{key = X,
+ value = Y})
+ end, I);
+ ({K, V}) ->
+ ets:insert(ibrowse_conf,
+ #ibrowse_conf{key = K,
+ value = V});
+ (X) ->
+ io:format("Skipping unrecognised term: ~p~n", [X])
+ end,
+ lists:foreach(Fun, Terms);
+ _Err ->
+ ok
+ end.
+
+%% @doc Internal export
+get_config_value(Key) ->
+ [#ibrowse_conf{value = V}] = ets:lookup(ibrowse_conf, Key),
+ V.
+
+%% @doc Internal export
+get_config_value(Key, DefVal) ->
+ case ets:lookup(ibrowse_conf, Key) of
+ [] ->
+ DefVal;
+ [#ibrowse_conf{value = V}] ->
+ V
+ end.
+
+set_config_value(Key, Val) ->
+ ets:insert(ibrowse_conf, #ibrowse_conf{key = Key, value = Val}).
+%%--------------------------------------------------------------------
+%% Function: handle_call/3
+%% Description: Handling call messages
+%% Returns: {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} | (terminate/2 is called)
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_call({get_lb_pid, #url{host = Host, port = Port} = Url}, _From, State) ->
+ Pid = do_get_connection(Url, ets:lookup(ibrowse_lb, {Host, Port})),
+ {reply, Pid, State};
+
+handle_call(stop, _From, State) ->
+ do_trace("IBROWSE shutting down~n", []),
+ ets:foldl(fun(#lb_pid{pid = Pid}, Acc) ->
+ ibrowse_lb:stop(Pid),
+ Acc
+ end, [], ibrowse_lb),
+ {stop, normal, ok, State};
+
+handle_call({set_config_value, Key, Val}, _From, State) ->
+ set_config_value(Key, Val),
+ {reply, ok, State};
+
+handle_call(rescan_config, _From, State) ->
+ Ret = (catch import_config()),
+ {reply, Ret, State};
+
+handle_call({rescan_config, File}, _From, State) ->
+ Ret = (catch import_config(File)),
+ {reply, Ret, State};
+
+handle_call(Request, _From, State) ->
+ Reply = {unknown_request, Request},
+ {reply, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast/2
+%% Description: Handling cast messages
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info/2
+%% Description: Handling all non call/cast messages
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_info(all_trace_off, State) ->
+ Mspec = [{{ibrowse_conf,{trace,'$1','$2'},true},[],[{{'$1','$2'}}]}],
+ Trace_on_dests = ets:select(ibrowse_conf, Mspec),
+ Fun = fun(#lb_pid{host_port = {H, P}, pid = Pid}, _) ->
+ case lists:member({H, P}, Trace_on_dests) of
+ false ->
+ ok;
+ true ->
+ catch Pid ! {trace, false}
+ end;
+ (_, Acc) ->
+ Acc
+ end,
+ ets:foldl(Fun, undefined, ibrowse_lb),
+ ets:select_delete(ibrowse_conf, [{{ibrowse_conf,{trace,'$1','$2'},true},[],['true']}]),
+ {noreply, State};
+
+handle_info({trace, Bool}, State) ->
+ put(my_trace_flag, Bool),
+ {noreply, State};
+
+handle_info({trace, Bool, Host, Port}, State) ->
+ Fun = fun(#lb_pid{host_port = {H, P}, pid = Pid}, _)
+ when H == Host,
+ P == Port ->
+ catch Pid ! {trace, Bool};
+ (_, Acc) ->
+ Acc
+ end,
+ ets:foldl(Fun, undefined, ibrowse_lb),
+ ets:insert(ibrowse_conf, #ibrowse_conf{key = {trace, Host, Port},
+ value = Bool}),
+ {noreply, State};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: terminate/2
+%% Description: Shutdown the server
+%% Returns: any (ignored by gen_server)
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Func: code_change/3
+%% Purpose: Convert process state when code is changed
+%% Returns: {ok, NewState}
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+do_get_connection(#url{host = Host, port = Port}, []) ->
+ {ok, Pid} = ibrowse_lb:start_link([Host, Port]),
+ ets:insert(ibrowse_lb, #lb_pid{host_port = {Host, Port}, pid = Pid}),
+ Pid;
+do_get_connection(_Url, [#lb_pid{pid = Pid}]) ->
+ Pid.
diff --git a/deps/ibrowse/src/ibrowse_app.erl b/deps/ibrowse/src/ibrowse_app.erl
new file mode 100644
index 00000000..d3a0f7bb
--- /dev/null
+++ b/deps/ibrowse/src/ibrowse_app.erl
@@ -0,0 +1,63 @@
+%%%-------------------------------------------------------------------
+%%% File : ibrowse_app.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description :
+%%%
+%%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+-module(ibrowse_app).
+
+-behaviour(application).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+%%--------------------------------------------------------------------
+-export([
+ start/2,
+ stop/1
+ ]).
+
+%%--------------------------------------------------------------------
+%% Internal exports
+%%--------------------------------------------------------------------
+-export([
+ ]).
+
+%%--------------------------------------------------------------------
+%% Macros
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% Records
+%%--------------------------------------------------------------------
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Func: start/2
+%% Returns: {ok, Pid} |
+%% {ok, Pid, State} |
+%% {error, Reason}
+%%--------------------------------------------------------------------
+start(_Type, _StartArgs) ->
+ case ibrowse_sup:start_link() of
+ {ok, Pid} ->
+ {ok, Pid};
+ Error ->
+ Error
+ end.
+
+%%--------------------------------------------------------------------
+%% Func: stop/1
+%% Returns: any
+%%--------------------------------------------------------------------
+stop(_State) ->
+ ok.
+
+%%====================================================================
+%% Internal functions
+%%====================================================================
diff --git a/deps/ibrowse/src/ibrowse_http_client.erl b/deps/ibrowse/src/ibrowse_http_client.erl
new file mode 100644
index 00000000..eb2bf315
--- /dev/null
+++ b/deps/ibrowse/src/ibrowse_http_client.erl
@@ -0,0 +1,1855 @@
+%%%-------------------------------------------------------------------
+%%% File : ibrowse_http_client.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description : The name says it all
+%%%
+%%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+-module(ibrowse_http_client).
+-behaviour(gen_server).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+-export([
+ start_link/1,
+ start/1,
+ stop/1,
+ send_req/7
+ ]).
+
+-ifdef(debug).
+-compile(export_all).
+-endif.
+
+%% gen_server callbacks
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+ ]).
+
+-include("ibrowse.hrl").
+-include_lib("kernel/include/inet.hrl").
+
+-record(state, {host, port, connect_timeout,
+ inactivity_timer_ref,
+ use_proxy = false, proxy_auth_digest,
+ ssl_options = [], is_ssl = false, socket,
+ proxy_tunnel_setup = false,
+ tunnel_setup_queue = [],
+ reqs=queue:new(), cur_req, status=idle, http_status_code,
+ reply_buffer = <<>>, rep_buf_size=0, streamed_size = 0,
+ recvd_headers=[],
+ status_line, raw_headers,
+ is_closing, send_timer, content_length,
+ deleted_crlf = false, transfer_encoding,
+ chunk_size, chunk_size_buffer = <<>>,
+ recvd_chunk_size, interim_reply_sent = false,
+ lb_ets_tid, cur_pipeline_size = 0, prev_req_id
+ }).
+
+-record(request, {url, method, options, from,
+ stream_to, caller_controls_socket = false,
+ caller_socket_options = [],
+ req_id,
+ stream_chunk_size,
+ save_response_to_file = false,
+ tmp_file_name, tmp_file_fd, preserve_chunked_encoding,
+ response_format}).
+
+-import(ibrowse_lib, [
+ get_value/2,
+ get_value/3,
+ do_trace/2
+ ]).
+
+-define(DEFAULT_STREAM_CHUNK_SIZE, 1024*1024).
+-define(dec2hex(X), erlang:integer_to_list(X, 16)).
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link/0
+%% Description: Starts the server
+%%--------------------------------------------------------------------
+start(Args) ->
+ gen_server:start(?MODULE, Args, []).
+
+start_link(Args) ->
+ gen_server:start_link(?MODULE, Args, []).
+
+stop(Conn_pid) ->
+ case catch gen_server:call(Conn_pid, stop) of
+ {'EXIT', {timeout, _}} ->
+ exit(Conn_pid, kill),
+ ok;
+ _ ->
+ ok
+ end.
+
+send_req(Conn_Pid, Url, Headers, Method, Body, Options, Timeout) ->
+ gen_server:call(
+ Conn_Pid,
+ {send_req, {Url, Headers, Method, Body, Options, Timeout}}, Timeout).
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: init/1
+%% Description: Initiates the server
+%% Returns: {ok, State} |
+%% {ok, State, Timeout} |
+%% ignore |
+%% {stop, Reason}
+%%--------------------------------------------------------------------
+init({Lb_Tid, #url{host = Host, port = Port}, {SSLOptions, Is_ssl}}) ->
+ State = #state{host = Host,
+ port = Port,
+ ssl_options = SSLOptions,
+ is_ssl = Is_ssl,
+ lb_ets_tid = Lb_Tid},
+ put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
+ put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
+ {ok, State};
+init(Url) when is_list(Url) ->
+ case catch ibrowse_lib:parse_url(Url) of
+ #url{protocol = Protocol} = Url_rec ->
+ init({undefined, Url_rec, {[], Protocol == https}});
+ {'EXIT', _} ->
+ {error, invalid_url}
+ end;
+init({Host, Port}) ->
+ State = #state{host = Host,
+ port = Port},
+ put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
+ put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_call/3
+%% Description: Handling call messages
+%% Returns: {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} | (terminate/2 is called)
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+%% Received a request when the remote server has already sent us a
+%% Connection: Close header
+handle_call({send_req, _}, _From, #state{is_closing = true} = State) ->
+ {reply, {error, connection_closing}, State};
+
+handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}},
+ From, State) ->
+ send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State);
+
+handle_call(stop, _From, State) ->
+ do_close(State),
+ do_error_reply(State, closing_on_request),
+ {stop, normal, ok, State};
+
+handle_call(Request, _From, State) ->
+ Reply = {unknown_request, Request},
+ {reply, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast/2
+%% Description: Handling cast messages
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info/2
+%% Description: Handling all non call/cast messages
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_info({tcp, _Sock, Data}, #state{status = Status} = State) ->
+%% io:format("Recvd data: ~p~n", [Data]),
+ do_trace("Data recvd in state: ~p. Size: ~p. ~p~n~n", [Status, size(Data), Data]),
+ handle_sock_data(Data, State);
+handle_info({ssl, _Sock, Data}, State) ->
+ handle_sock_data(Data, State);
+
+handle_info({stream_next, Req_id}, #state{socket = Socket,
+ cur_req = #request{req_id = Req_id}} = State) ->
+ %% io:format("Client process set {active, once}~n", []),
+ do_setopts(Socket, [{active, once}], State),
+ {noreply, set_inac_timer(State)};
+
+handle_info({stream_next, _Req_id}, State) ->
+ _Cur_req_id = case State#state.cur_req of
+ #request{req_id = Cur} ->
+ Cur;
+ _ ->
+ undefined
+ end,
+%% io:format("Ignoring stream_next as ~1000.p is not cur req (~1000.p)~n",
+%% [_Req_id, _Cur_req_id]),
+ {noreply, State};
+
+handle_info({stream_close, _Req_id}, State) ->
+ shutting_down(State),
+ do_close(State),
+ do_error_reply(State, closing_on_request),
+ {stop, normal, State};
+
+handle_info({tcp_closed, _Sock}, State) ->
+ do_trace("TCP connection closed by peer!~n", []),
+ handle_sock_closed(State),
+ {stop, normal, State};
+handle_info({ssl_closed, _Sock}, State) ->
+ do_trace("SSL connection closed by peer!~n", []),
+ handle_sock_closed(State),
+ {stop, normal, State};
+
+handle_info({tcp_error, _Sock, Reason}, State) ->
+ do_trace("Error on connection to ~1000.p:~1000.p -> ~1000.p~n",
+ [State#state.host, State#state.port, Reason]),
+ handle_sock_closed(State),
+ {stop, normal, State};
+handle_info({ssl_error, _Sock, Reason}, State) ->
+ do_trace("Error on SSL connection to ~1000.p:~1000.p -> ~1000.p~n",
+ [State#state.host, State#state.port, Reason]),
+ handle_sock_closed(State),
+ {stop, normal, State};
+
+handle_info({req_timedout, From}, State) ->
+ case lists:keymember(From, #request.from, queue:to_list(State#state.reqs)) of
+ false ->
+ {noreply, State};
+ true ->
+ shutting_down(State),
+ do_error_reply(State, req_timedout),
+ {stop, normal, State}
+ end;
+
+handle_info(timeout, State) ->
+ do_trace("Inactivity timeout triggered. Shutting down connection~n", []),
+ shutting_down(State),
+ do_error_reply(State, req_timedout),
+ {stop, normal, State};
+
+handle_info({trace, Bool}, State) ->
+ put(my_trace_flag, Bool),
+ {noreply, State};
+
+handle_info(Info, State) ->
+ io:format("Unknown message recvd for ~1000.p:~1000.p -> ~p~n",
+ [State#state.host, State#state.port, Info]),
+ io:format("Recvd unknown message ~p when in state: ~p~n", [Info, State]),
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: terminate/2
+%% Description: Shutdown the server
+%% Returns: any (ignored by gen_server)
+%%--------------------------------------------------------------------
+terminate(_Reason, State) ->
+ do_close(State),
+ ok.
+
+%%--------------------------------------------------------------------
+%% Func: code_change/3
+%% Purpose: Convert process state when code is changed
+%% Returns: {ok, NewState}
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% Handles data recvd on the socket
+%%--------------------------------------------------------------------
+handle_sock_data(Data, #state{status=idle}=State) ->
+ do_trace("Data recvd on socket in state idle!. ~1000.p~n", [Data]),
+ shutting_down(State),
+ do_error_reply(State, data_in_status_idle),
+ do_close(State),
+ {stop, normal, State};
+
+handle_sock_data(Data, #state{status = get_header}=State) ->
+ case parse_response(Data, State) of
+ {error, _Reason} ->
+ shutting_down(State),
+ {stop, normal, State};
+ #state{socket = Socket, status = Status, cur_req = CurReq} = State_1 ->
+ case {Status, CurReq} of
+ {get_header, #request{caller_controls_socket = true}} ->
+ do_setopts(Socket, [{active, once}], State_1);
+ _ ->
+ active_once(State_1)
+ end,
+ {noreply, set_inac_timer(State_1)}
+ end;
+
+handle_sock_data(Data, #state{status = get_body,
+ socket = Socket,
+ content_length = CL,
+ http_status_code = StatCode,
+ recvd_headers = Headers,
+ chunk_size = CSz} = State) ->
+ case (CL == undefined) and (CSz == undefined) of
+ true ->
+ case accumulate_response(Data, State) of
+ {error, Reason} ->
+ shutting_down(State),
+ fail_pipelined_requests(State,
+ {error, {Reason, {stat_code, StatCode}, Headers}}),
+ {stop, normal, State};
+ State_1 ->
+ active_once(State_1),
+ State_2 = set_inac_timer(State_1),
+ {noreply, State_2}
+ end;
+ _ ->
+ case parse_11_response(Data, State) of
+ {error, Reason} ->
+ shutting_down(State),
+ fail_pipelined_requests(State,
+ {error, {Reason, {stat_code, StatCode}, Headers}}),
+ {stop, normal, State};
+ #state{cur_req = #request{caller_controls_socket = Ccs},
+ interim_reply_sent = Irs} = State_1 ->
+ case Irs of
+ true ->
+ active_once(State_1);
+ false when Ccs == true ->
+ do_setopts(Socket, [{active, once}], State);
+ false ->
+ active_once(State_1)
+ end,
+ State_2 = State_1#state{interim_reply_sent = false},
+ case Ccs of
+ true ->
+ cancel_timer(State_2#state.inactivity_timer_ref, {eat_message, timeout}),
+ {noreply, State_2#state{inactivity_timer_ref = undefined}};
+ _ ->
+ {noreply, set_inac_timer(State_2)}
+ end;
+ State_1 ->
+ active_once(State_1),
+ State_2 = set_inac_timer(State_1),
+ {noreply, State_2}
+ end
+ end.
+
+accumulate_response(Data,
+ #state{
+ cur_req = #request{save_response_to_file = Srtf,
+ tmp_file_fd = undefined} = CurReq,
+ http_status_code=[$2 | _]}=State) when Srtf /= false ->
+ TmpFilename = make_tmp_filename(Srtf),
+ case file:open(TmpFilename, [write, delayed_write, raw]) of
+ {ok, Fd} ->
+ accumulate_response(Data, State#state{
+ cur_req = CurReq#request{
+ tmp_file_fd = Fd,
+ tmp_file_name = TmpFilename}});
+ {error, Reason} ->
+ {error, {file_open_error, Reason}}
+ end;
+accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf,
+ tmp_file_fd = Fd},
+ transfer_encoding=chunked,
+ reply_buffer = Reply_buf,
+ http_status_code=[$2 | _]
+ } = State) when Srtf /= false ->
+ case file:write(Fd, [Reply_buf, Data]) of
+ ok ->
+ State#state{reply_buffer = <<>>};
+ {error, Reason} ->
+ {error, {file_write_error, Reason}}
+ end;
+accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf,
+ tmp_file_fd = Fd},
+ reply_buffer = RepBuf,
+ http_status_code=[$2 | _]
+ } = State) when Srtf /= false ->
+ case file:write(Fd, [RepBuf, Data]) of
+ ok ->
+ State#state{reply_buffer = <<>>};
+ {error, Reason} ->
+ {error, {file_write_error, Reason}}
+ end;
+accumulate_response(Data, #state{reply_buffer = RepBuf,
+ rep_buf_size = RepBufSize,
+ streamed_size = Streamed_size,
+ cur_req = CurReq}=State) ->
+ #request{stream_to = StreamTo,
+ req_id = ReqId,
+ stream_chunk_size = Stream_chunk_size,
+ response_format = Response_format,
+ caller_controls_socket = Caller_controls_socket} = CurReq,
+ RepBuf_1 = <<RepBuf/binary, Data/binary>>,
+ New_data_size = RepBufSize - Streamed_size,
+ case StreamTo of
+ undefined ->
+ State#state{reply_buffer = RepBuf_1};
+ _ when Caller_controls_socket == true ->
+ do_interim_reply(StreamTo, Response_format, ReqId, RepBuf_1),
+ State#state{reply_buffer = <<>>,
+ interim_reply_sent = true,
+ streamed_size = Streamed_size + size(RepBuf_1)};
+ _ when New_data_size >= Stream_chunk_size ->
+ {Stream_chunk, Rem_data} = split_binary(RepBuf_1, Stream_chunk_size),
+ do_interim_reply(StreamTo, Response_format, ReqId, Stream_chunk),
+ State_1 = State#state{
+ reply_buffer = <<>>,
+ interim_reply_sent = true,
+ streamed_size = Streamed_size + Stream_chunk_size},
+ case Rem_data of
+ <<>> ->
+ State_1;
+ _ ->
+ accumulate_response(Rem_data, State_1)
+ end;
+ _ ->
+ State#state{reply_buffer = RepBuf_1}
+ end.
+
+make_tmp_filename(true) ->
+ DownloadDir = ibrowse:get_config_value(download_dir, filename:absname("./")),
+ {A,B,C} = now(),
+ filename:join([DownloadDir,
+ "ibrowse_tmp_file_"++
+ integer_to_list(A) ++
+ integer_to_list(B) ++
+ integer_to_list(C)]);
+make_tmp_filename(File) when is_list(File) ->
+ File.
+
+
+%%--------------------------------------------------------------------
+%% Handles the case when the server closes the socket
+%%--------------------------------------------------------------------
+handle_sock_closed(#state{status=get_header} = State) ->
+ shutting_down(State),
+ do_error_reply(State, connection_closed);
+
+handle_sock_closed(#state{cur_req=undefined} = State) ->
+ shutting_down(State);
+
+%% We check for IsClosing because this the server could have sent a
+%% Connection-Close header and has closed the socket to indicate end
+%% of response. There maybe requests pipelined which need a response.
+handle_sock_closed(#state{reply_buffer = Buf, reqs = Reqs, http_status_code = SC,
+ is_closing = IsClosing,
+ cur_req = #request{tmp_file_name=TmpFilename,
+ tmp_file_fd=Fd} = CurReq,
+ status = get_body,
+ recvd_headers = Headers,
+ status_line = Status_line,
+ raw_headers = Raw_headers
+ }=State) ->
+ #request{from=From, stream_to=StreamTo, req_id=ReqId,
+ response_format = Resp_format,
+ options = Options} = CurReq,
+ case IsClosing of
+ true ->
+ {_, Reqs_1} = queue:out(Reqs),
+ Body = case TmpFilename of
+ undefined ->
+ Buf;
+ _ ->
+ ok = file:close(Fd),
+ {file, TmpFilename}
+ end,
+ Reply = case get_value(give_raw_headers, Options, false) of
+ true ->
+ {ok, Status_line, Raw_headers, Body};
+ false ->
+ {ok, SC, Headers, Buf}
+ end,
+ State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+ ok = do_error_reply(State_1#state{reqs = Reqs_1}, connection_closed),
+ State_1;
+ _ ->
+ ok = do_error_reply(State, connection_closed),
+ State
+ end.
+
+do_connect(Host, Port, Options, #state{is_ssl = true,
+ use_proxy = false,
+ ssl_options = SSLOptions},
+ Timeout) ->
+ ssl:connect(Host, Port, get_sock_options(Host, Options, SSLOptions), Timeout);
+do_connect(Host, Port, Options, _State, Timeout) ->
+ gen_tcp:connect(Host, Port, get_sock_options(Host, Options, []), Timeout).
+
+get_sock_options(Host, Options, SSLOptions) ->
+ Caller_socket_options = get_value(socket_options, Options, []),
+ Ipv6Options = case is_ipv6_host(Host) of
+ true ->
+ [inet6];
+ false ->
+ []
+ end,
+ Other_sock_options = filter_sock_options(SSLOptions ++ Caller_socket_options ++ Ipv6Options),
+ case lists:keysearch(nodelay, 1, Other_sock_options) of
+ false ->
+ [{nodelay, true}, binary, {active, false} | Other_sock_options];
+ {value, _} ->
+ [binary, {active, false} | Other_sock_options]
+ end.
+
+is_ipv6_host(Host) ->
+ case inet_parse:address(Host) of
+ {ok, {_, _, _, _, _, _, _, _}} ->
+ true;
+ {ok, {_, _, _, _}} ->
+ false;
+ _ ->
+ case inet:gethostbyname(Host) of
+ {ok, #hostent{h_addrtype = inet6}} ->
+ true;
+ _ ->
+ false
+ end
+ end.
+
+%% We don't want the caller to specify certain options
+filter_sock_options(Opts) ->
+ lists:filter(fun({active, _}) ->
+ false;
+ ({packet, _}) ->
+ false;
+ (list) ->
+ false;
+ (_) ->
+ true
+ end, Opts).
+
+do_send(Req, #state{socket = Sock,
+ is_ssl = true,
+ use_proxy = true,
+ proxy_tunnel_setup = Pts}) when Pts /= done -> gen_tcp:send(Sock, Req);
+do_send(Req, #state{socket = Sock, is_ssl = true}) -> ssl:send(Sock, Req);
+do_send(Req, #state{socket = Sock, is_ssl = false}) -> gen_tcp:send(Sock, Req).
+
+%% @spec do_send_body(Sock::socket_descriptor(), Source::source_descriptor(), IsSSL::boolean()) -> ok | error()
+%% source_descriptor() = fun_arity_0 |
+%% {fun_arity_0} |
+%% {fun_arity_1, term()}
+%% error() = term()
+do_send_body(Source, State, TE) when is_function(Source) ->
+ do_send_body({Source}, State, TE);
+do_send_body({Source}, State, TE) when is_function(Source) ->
+ do_send_body1(Source, Source(), State, TE);
+do_send_body({Source, Source_state}, State, TE) when is_function(Source) ->
+ do_send_body1(Source, Source(Source_state), State, TE);
+do_send_body(Body, State, _TE) ->
+ do_send(Body, State).
+
+do_send_body1(Source, Resp, State, TE) ->
+ case Resp of
+ {ok, Data} ->
+ do_send(maybe_chunked_encode(Data, TE), State),
+ do_send_body({Source}, State, TE);
+ {ok, Data, New_source_state} ->
+ do_send(maybe_chunked_encode(Data, TE), State),
+ do_send_body({Source, New_source_state}, State, TE);
+ eof when TE == true ->
+ do_send(<<"0\r\n\r\n">>, State),
+ ok;
+ eof ->
+ ok;
+ Err ->
+ Err
+ end.
+
+maybe_chunked_encode(Data, false) ->
+ Data;
+maybe_chunked_encode(Data, true) ->
+ [?dec2hex(iolist_size(Data)), "\r\n", Data, "\r\n"].
+
+do_close(#state{socket = undefined}) -> ok;
+do_close(#state{socket = Sock,
+ is_ssl = true,
+ use_proxy = true,
+ proxy_tunnel_setup = Pts
+ }) when Pts /= done -> catch gen_tcp:close(Sock);
+do_close(#state{socket = Sock, is_ssl = true}) -> catch ssl:close(Sock);
+do_close(#state{socket = Sock, is_ssl = false}) -> catch gen_tcp:close(Sock).
+
+active_once(#state{cur_req = #request{caller_controls_socket = true}}) ->
+ ok;
+active_once(#state{socket = Socket} = State) ->
+ do_setopts(Socket, [{active, once}], State).
+
+do_setopts(_Sock, [], _) -> ok;
+do_setopts(Sock, Opts, #state{is_ssl = true,
+ use_proxy = true,
+ proxy_tunnel_setup = Pts}
+ ) when Pts /= done -> inet:setopts(Sock, Opts);
+do_setopts(Sock, Opts, #state{is_ssl = true}) -> ssl:setopts(Sock, Opts);
+do_setopts(Sock, Opts, _) -> inet:setopts(Sock, Opts).
+
+check_ssl_options(Options, State) ->
+ case get_value(is_ssl, Options, false) of
+ false ->
+ State;
+ true ->
+ State#state{is_ssl=true, ssl_options=get_value(ssl_options, Options)}
+ end.
+
+send_req_1(From,
+ #url{host = Host,
+ port = Port} = Url,
+ Headers, Method, Body, Options, Timeout,
+ #state{socket = undefined} = State) ->
+ {Host_1, Port_1, State_1} =
+ case get_value(proxy_host, Options, false) of
+ false ->
+ {Host, Port, State};
+ PHost ->
+ ProxyUser = get_value(proxy_user, Options, []),
+ ProxyPassword = get_value(proxy_password, Options, []),
+ Digest = http_auth_digest(ProxyUser, ProxyPassword),
+ {PHost, get_value(proxy_port, Options, 80),
+ State#state{use_proxy = true,
+ proxy_auth_digest = Digest}}
+ end,
+ State_2 = check_ssl_options(Options, State_1),
+ do_trace("Connecting...~n", []),
+ Conn_timeout = get_value(connect_timeout, Options, Timeout),
+ case do_connect(Host_1, Port_1, Options, State_2, Conn_timeout) of
+ {ok, Sock} ->
+ do_trace("Connected! Socket: ~1000.p~n", [Sock]),
+ State_3 = State_2#state{socket = Sock,
+ connect_timeout = Conn_timeout},
+ send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State_3);
+ Err ->
+ shutting_down(State_2),
+ do_trace("Error connecting. Reason: ~1000.p~n", [Err]),
+ gen_server:reply(From, {error, {conn_failed, Err}}),
+ {stop, normal, State_2}
+ end;
+
+%% Send a CONNECT request.
+%% Wait for 200 OK
+%% Upgrade to SSL connection
+%% Then send request
+
+send_req_1(From,
+ #url{
+ host = Server_host,
+ port = Server_port
+ } = Url,
+ Headers, Method, Body, Options, Timeout,
+ #state{
+ proxy_tunnel_setup = false,
+ use_proxy = true,
+ is_ssl = true} = State) ->
+ NewReq = #request{
+ method = connect,
+ preserve_chunked_encoding = get_value(preserve_chunked_encoding, Options, false),
+ options = Options
+ },
+ State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
+ Pxy_auth_headers = maybe_modify_headers(Url, Method, Options, [], State_1),
+ Path = [Server_host, $:, integer_to_list(Server_port)],
+ {Req, Body_1} = make_request(connect, Pxy_auth_headers,
+ Path, Path,
+ [], Options, State_1, undefined),
+ TE = is_chunked_encoding_specified(Options),
+ trace_request(Req),
+ case do_send(Req, State) of
+ ok ->
+ case do_send_body(Body_1, State_1, TE) of
+ ok ->
+ trace_request_body(Body_1),
+ active_once(State_1),
+ Ref = case Timeout of
+ infinity ->
+ undefined;
+ _ ->
+ erlang:send_after(Timeout, self(), {req_timedout, From})
+ end,
+ State_2 = State_1#state{status = get_header,
+ cur_req = NewReq,
+ send_timer = Ref,
+ proxy_tunnel_setup = in_progress,
+ tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout}]},
+ State_3 = set_inac_timer(State_2),
+ {noreply, State_3};
+ Err ->
+ shutting_down(State_1),
+ do_trace("Send failed... Reason: ~p~n", [Err]),
+ gen_server:reply(From, {error, {send_failed, Err}}),
+ {stop, normal, State_1}
+ end;
+ Err ->
+ shutting_down(State_1),
+ do_trace("Send failed... Reason: ~p~n", [Err]),
+ gen_server:reply(From, {error, {send_failed, Err}}),
+ {stop, normal, State_1}
+ end;
+
+send_req_1(From, Url, Headers, Method, Body, Options, Timeout,
+ #state{proxy_tunnel_setup = in_progress,
+ tunnel_setup_queue = Q} = State) ->
+ do_trace("Queued SSL request awaiting tunnel setup: ~n"
+ "URL : ~s~n"
+ "Method : ~p~n"
+ "Headers : ~p~n", [Url, Method, Headers]),
+ {noreply, State#state{tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout} | Q]}};
+
+send_req_1(From,
+ #url{abspath = AbsPath,
+ path = RelPath} = Url,
+ Headers, Method, Body, Options, Timeout,
+ #state{status = Status,
+ socket = Socket} = State) ->
+ cancel_timer(State#state.inactivity_timer_ref, {eat_message, timeout}),
+ ReqId = make_req_id(),
+ Resp_format = get_value(response_format, Options, list),
+ Caller_socket_options = get_value(socket_options, Options, []),
+ {StreamTo, Caller_controls_socket} =
+ case get_value(stream_to, Options, undefined) of
+ {Caller, once} when is_pid(Caller) or
+ is_atom(Caller) ->
+ Async_pid_rec = {{req_id_pid, ReqId}, self()},
+ true = ets:insert(ibrowse_stream, Async_pid_rec),
+ {Caller, true};
+ undefined ->
+ {undefined, false};
+ Caller when is_pid(Caller) or
+ is_atom(Caller) ->
+ {Caller, false};
+ Stream_to_inv ->
+ exit({invalid_option, {stream_to, Stream_to_inv}})
+ end,
+ SaveResponseToFile = get_value(save_response_to_file, Options, false),
+ NewReq = #request{url = Url,
+ method = Method,
+ stream_to = StreamTo,
+ caller_controls_socket = Caller_controls_socket,
+ caller_socket_options = Caller_socket_options,
+ options = Options,
+ req_id = ReqId,
+ save_response_to_file = SaveResponseToFile,
+ stream_chunk_size = get_stream_chunk_size(Options),
+ response_format = Resp_format,
+ from = From,
+ preserve_chunked_encoding = get_value(preserve_chunked_encoding, Options, false)
+ },
+ State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
+ Headers_1 = maybe_modify_headers(Url, Method, Options, Headers, State_1),
+ {Req, Body_1} = make_request(Method,
+ Headers_1,
+ AbsPath, RelPath, Body, Options, State_1,
+ ReqId),
+ trace_request(Req),
+ do_setopts(Socket, Caller_socket_options, State_1),
+ TE = is_chunked_encoding_specified(Options),
+ case do_send(Req, State_1) of
+ ok ->
+ case do_send_body(Body_1, State_1, TE) of
+ ok ->
+ trace_request_body(Body_1),
+ State_2 = inc_pipeline_counter(State_1),
+ active_once(State_2),
+ Ref = case Timeout of
+ infinity ->
+ undefined;
+ _ ->
+ erlang:send_after(Timeout, self(), {req_timedout, From})
+ end,
+ State_3 = case Status of
+ idle ->
+ State_2#state{status = get_header,
+ cur_req = NewReq,
+ send_timer = Ref};
+ _ ->
+ State_2#state{send_timer = Ref}
+ end,
+ case StreamTo of
+ undefined ->
+ ok;
+ _ ->
+ gen_server:reply(From, {ibrowse_req_id, ReqId})
+ end,
+ State_4 = set_inac_timer(State_3),
+ {noreply, State_4};
+ Err ->
+ shutting_down(State_1),
+ do_trace("Send failed... Reason: ~p~n", [Err]),
+ gen_server:reply(From, {error, {send_failed, Err}}),
+ {stop, normal, State_1}
+ end;
+ Err ->
+ shutting_down(State_1),
+ do_trace("Send failed... Reason: ~p~n", [Err]),
+ gen_server:reply(From, {error, {send_failed, Err}}),
+ {stop, normal, State_1}
+ end.
+
+maybe_modify_headers(#url{}, connect, _, Headers, State) ->
+ add_proxy_auth_headers(State, Headers);
+maybe_modify_headers(#url{host = Host, port = Port} = Url,
+ _Method,
+ Options, Headers, State) ->
+ case get_value(headers_as_is, Options, false) of
+ false ->
+ Headers_1 = add_auth_headers(Url, Options, Headers, State),
+ HostHeaderValue = case lists:keysearch(host_header, 1, Options) of
+ false ->
+ case Port of
+ 80 -> Host;
+ 443 -> Host;
+ _ -> [Host, ":", integer_to_list(Port)]
+ end;
+ {value, {_, Host_h_val}} ->
+ Host_h_val
+ end,
+ [{"Host", HostHeaderValue} | Headers_1];
+ true ->
+ Headers
+ end.
+
+add_auth_headers(#url{username = User,
+ password = UPw},
+ Options,
+ Headers,
+ State) ->
+ Headers_1 = case User of
+ undefined ->
+ case get_value(basic_auth, Options, undefined) of
+ undefined ->
+ Headers;
+ {U,P} ->
+ [{"Authorization", ["Basic ", http_auth_digest(U, P)]} | Headers]
+ end;
+ _ ->
+ [{"Authorization", ["Basic ", http_auth_digest(User, UPw)]} | Headers]
+ end,
+ add_proxy_auth_headers(State, Headers_1).
+
+add_proxy_auth_headers(#state{use_proxy = false}, Headers) ->
+ Headers;
+add_proxy_auth_headers(#state{proxy_auth_digest = []}, Headers) ->
+ Headers;
+add_proxy_auth_headers(#state{proxy_auth_digest = Auth_digest}, Headers) ->
+ [{"Proxy-Authorization", ["Basic ", Auth_digest]} | Headers].
+
+http_auth_digest([], []) ->
+ [];
+http_auth_digest(Username, Password) ->
+ ibrowse_lib:encode_base64(Username ++ [$: | Password]).
+
+make_request(Method, Headers, AbsPath, RelPath, Body, Options,
+ #state{use_proxy = UseProxy, is_ssl = Is_ssl}, ReqId) ->
+ HttpVsn = http_vsn_string(get_value(http_vsn, Options, {1,1})),
+ Fun1 = fun({X, Y}) when is_atom(X) ->
+ {to_lower(atom_to_list(X)), X, Y};
+ ({X, Y}) when is_list(X) ->
+ {to_lower(X), X, Y}
+ end,
+ Headers_0 = [Fun1(X) || X <- Headers],
+ Headers_1 =
+ case lists:keysearch("content-length", 1, Headers_0) of
+ false when (Body =:= [] orelse Body =:= <<>>) andalso
+ (Method =:= post orelse Method =:= put) ->
+ [{"content-length", "Content-Length", "0"} | Headers_0];
+ false when is_binary(Body) orelse is_list(Body) ->
+ [{"content-length", "Content-Length", integer_to_list(iolist_size(Body))} | Headers_0];
+ _ ->
+ %% Content-Length is already specified or Body is a
+ %% function or function/state pair
+ Headers_0
+ end,
+ {Headers_2, Body_1} =
+ case is_chunked_encoding_specified(Options) of
+ false ->
+ {[{Y, Z} || {_, Y, Z} <- Headers_1], Body};
+ true ->
+ Chunk_size_1 = case get_value(transfer_encoding, Options) of
+ chunked ->
+ 5120;
+ {chunked, Chunk_size} ->
+ Chunk_size
+ end,
+ {[{Y, Z} || {X, Y, Z} <- Headers_1,
+ X /= "content-length"] ++
+ [{"Transfer-Encoding", "chunked"}],
+ chunk_request_body(Body, Chunk_size_1)}
+ end,
+ Headers_3 = case lists:member({include_ibrowse_req_id, true}, Options) of
+ true ->
+ [{"x-ibrowse-request-id", io_lib:format("~1000.p",[ReqId])} | Headers_2];
+ false ->
+ Headers_2
+ end,
+ Headers_4 = cons_headers(Headers_3),
+ Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of
+ true ->
+ case Is_ssl of
+ true ->
+ RelPath;
+ false ->
+ AbsPath
+ end;
+ false ->
+ RelPath
+ end,
+ {[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_4, crnl()], Body_1}.
+
+is_chunked_encoding_specified(Options) ->
+ case get_value(transfer_encoding, Options, false) of
+ false ->
+ false;
+ {chunked, _} ->
+ true;
+ chunked ->
+ true
+ end.
+
+http_vsn_string({0,9}) -> "HTTP/0.9";
+http_vsn_string({1,0}) -> "HTTP/1.0";
+http_vsn_string({1,1}) -> "HTTP/1.1".
+
+cons_headers(Headers) ->
+ cons_headers(Headers, []).
+cons_headers([], Acc) ->
+ encode_headers(Acc);
+cons_headers([{basic_auth, {U,P}} | T], Acc) ->
+ cons_headers(T, [{"Authorization",
+ ["Basic ", ibrowse_lib:encode_base64(U++":"++P)]} | Acc]);
+cons_headers([{cookie, Cookie} | T], Acc) ->
+ cons_headers(T, [{"Cookie", Cookie} | Acc]);
+cons_headers([{content_length, L} | T], Acc) ->
+ cons_headers(T, [{"Content-Length", L} | Acc]);
+cons_headers([{content_type, L} | T], Acc) ->
+ cons_headers(T, [{"Content-Type", L} | Acc]);
+cons_headers([H | T], Acc) ->
+ cons_headers(T, [H | Acc]).
+
+encode_headers(L) ->
+ encode_headers(L, []).
+encode_headers([{http_vsn, _Val} | T], Acc) ->
+ encode_headers(T, Acc);
+encode_headers([{Name,Val} | T], Acc) when is_list(Name) ->
+ encode_headers(T, [[Name, ": ", fmt_val(Val), crnl()] | Acc]);
+encode_headers([{Name,Val} | T], Acc) when is_atom(Name) ->
+ encode_headers(T, [[atom_to_list(Name), ": ", fmt_val(Val), crnl()] | Acc]);
+encode_headers([], Acc) ->
+ lists:reverse(Acc).
+
+chunk_request_body(Body, _ChunkSize) when is_tuple(Body) orelse
+ is_function(Body) ->
+ Body;
+chunk_request_body(Body, ChunkSize) ->
+ chunk_request_body(Body, ChunkSize, []).
+
+chunk_request_body(Body, _ChunkSize, Acc) when Body == <<>>; Body == [] ->
+ LastChunk = "0\r\n",
+ lists:reverse(["\r\n", LastChunk | Acc]);
+chunk_request_body(Body, ChunkSize, Acc) when is_binary(Body),
+ size(Body) >= ChunkSize ->
+ <<ChunkBody:ChunkSize/binary, Rest/binary>> = Body,
+ Chunk = [?dec2hex(ChunkSize),"\r\n",
+ ChunkBody, "\r\n"],
+ chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
+chunk_request_body(Body, _ChunkSize, Acc) when is_binary(Body) ->
+ BodySize = size(Body),
+ Chunk = [?dec2hex(BodySize),"\r\n",
+ Body, "\r\n"],
+ LastChunk = "0\r\n",
+ lists:reverse(["\r\n", LastChunk, Chunk | Acc]);
+chunk_request_body(Body, ChunkSize, Acc) when length(Body) >= ChunkSize ->
+ {ChunkBody, Rest} = split_list_at(Body, ChunkSize),
+ Chunk = [?dec2hex(ChunkSize),"\r\n",
+ ChunkBody, "\r\n"],
+ chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
+chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) ->
+ BodySize = length(Body),
+ Chunk = [?dec2hex(BodySize),"\r\n",
+ Body, "\r\n"],
+ LastChunk = "0\r\n",
+ lists:reverse(["\r\n", LastChunk, Chunk | Acc]).
+
+
+parse_response(_Data, #state{cur_req = undefined}=State) ->
+ State#state{status = idle};
+parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
+ cur_req = CurReq} = State) ->
+ #request{from=From, stream_to=StreamTo, req_id=ReqId,
+ method=Method, response_format = Resp_format,
+ options = Options
+ } = CurReq,
+ MaxHeaderSize = ibrowse:get_config_value(max_headers_size, infinity),
+ case scan_header(Acc, Data) of
+ {yes, Headers, Data_1} ->
+ do_trace("Recvd Header Data -> ~s~n----~n", [Headers]),
+ do_trace("Recvd headers~n--- Headers Begin ---~n~s~n--- Headers End ---~n~n", [Headers]),
+ {HttpVsn, StatCode, Headers_1, Status_line, Raw_headers} = parse_headers(Headers),
+ do_trace("HttpVsn: ~p StatusCode: ~p Headers_1 -> ~1000.p~n", [HttpVsn, StatCode, Headers_1]),
+ LCHeaders = [{to_lower(X), Y} || {X,Y} <- Headers_1],
+ ConnClose = to_lower(get_value("connection", LCHeaders, "false")),
+ IsClosing = is_connection_closing(HttpVsn, ConnClose),
+ case IsClosing of
+ true ->
+ shutting_down(State);
+ false ->
+ ok
+ end,
+ Give_raw_headers = get_value(give_raw_headers, Options, false),
+ State_1 = case Give_raw_headers of
+ true ->
+ State#state{recvd_headers=Headers_1, status=get_body,
+ reply_buffer = <<>>,
+ status_line = Status_line,
+ raw_headers = Raw_headers,
+ http_status_code=StatCode, is_closing=IsClosing};
+ false ->
+ State#state{recvd_headers=Headers_1, status=get_body,
+ reply_buffer = <<>>,
+ http_status_code=StatCode, is_closing=IsClosing}
+ end,
+ put(conn_close, ConnClose),
+ TransferEncoding = to_lower(get_value("transfer-encoding", LCHeaders, "false")),
+ case get_value("content-length", LCHeaders, undefined) of
+ _ when Method == connect,
+ hd(StatCode) == $2 ->
+ cancel_timer(State#state.send_timer),
+ {_, Reqs_1} = queue:out(Reqs),
+ upgrade_to_ssl(set_cur_request(State#state{reqs = Reqs_1,
+ recvd_headers = [],
+ status = idle
+ }));
+ _ when Method == connect ->
+ {_, Reqs_1} = queue:out(Reqs),
+ do_error_reply(State#state{reqs = Reqs_1},
+ {error, proxy_tunnel_failed}),
+ {error, proxy_tunnel_failed};
+ _ when Method == head ->
+ {_, Reqs_1} = queue:out(Reqs),
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
+ {ok, StatCode, Headers_1, []}),
+ cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
+ State_2 = reset_state(State_1_1),
+ State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
+ parse_response(Data_1, State_3);
+ _ when hd(StatCode) =:= $1 ->
+ %% No message body is expected. Server may send
+ %% one or more 1XX responses before a proper
+ %% response.
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ do_trace("Recvd a status code of ~p. Ignoring and waiting for a proper response~n", [StatCode]),
+ parse_response(Data_1, State_1#state{recvd_headers = [],
+ status = get_header});
+ _ when StatCode =:= "204";
+ StatCode =:= "304" ->
+ %% No message body is expected for these Status Codes.
+ %% RFC2616 - Sec 4.4
+ {_, Reqs_1} = queue:out(Reqs),
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
+ {ok, StatCode, Headers_1, []}),
+ cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
+ State_2 = reset_state(State_1_1),
+ State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
+ parse_response(Data_1, State_3);
+ _ when TransferEncoding =:= "chunked" ->
+ do_trace("Chunked encoding detected...~n",[]),
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ case parse_11_response(Data_1, State_1#state{transfer_encoding=chunked,
+ chunk_size=chunk_start,
+ reply_buffer = <<>>}) of
+ {error, Reason} ->
+ fail_pipelined_requests(State_1,
+ {error, {Reason,
+ {stat_code, StatCode}, Headers_1}}),
+ {error, Reason};
+ State_2 ->
+ State_2
+ end;
+ undefined when HttpVsn =:= "HTTP/1.0";
+ ConnClose =:= "close" ->
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ State_1#state{reply_buffer = Data_1};
+ undefined ->
+ fail_pipelined_requests(State_1,
+ {error, {content_length_undefined,
+ {stat_code, StatCode}, Headers}}),
+ {error, content_length_undefined};
+ V ->
+ case catch list_to_integer(V) of
+ V_1 when is_integer(V_1), V_1 >= 0 ->
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ do_trace("Recvd Content-Length of ~p~n", [V_1]),
+ State_2 = State_1#state{rep_buf_size=0,
+ reply_buffer = <<>>,
+ content_length=V_1},
+ case parse_11_response(Data_1, State_2) of
+ {error, Reason} ->
+ fail_pipelined_requests(State_1,
+ {error, {Reason,
+ {stat_code, StatCode}, Headers_1}}),
+ {error, Reason};
+ State_3 ->
+ State_3
+ end;
+ _ ->
+ fail_pipelined_requests(State_1,
+ {error, {content_length_undefined,
+ {stat_code, StatCode}, Headers}}),
+ {error, content_length_undefined}
+ end
+ end;
+ {no, Acc_1} when MaxHeaderSize == infinity ->
+ State#state{reply_buffer = Acc_1};
+ {no, Acc_1} when size(Acc_1) < MaxHeaderSize ->
+ State#state{reply_buffer = Acc_1};
+ {no, _Acc_1} ->
+ fail_pipelined_requests(State, {error, max_headers_size_exceeded}),
+ {error, max_headers_size_exceeded}
+ end.
+
+upgrade_to_ssl(#state{socket = Socket,
+ connect_timeout = Conn_timeout,
+ ssl_options = Ssl_options,
+ tunnel_setup_queue = Q} = State) ->
+ case ssl:connect(Socket, Ssl_options, Conn_timeout) of
+ {ok, Ssl_socket} ->
+ do_trace("Upgraded to SSL socket!!~n", []),
+ State_1 = State#state{socket = Ssl_socket,
+ proxy_tunnel_setup = done},
+ send_queued_requests(lists:reverse(Q), State_1);
+ Err ->
+ do_trace("Upgrade to SSL socket failed. Reson: ~p~n", [Err]),
+ do_error_reply(State, {error, {send_failed, Err}}),
+ {error, send_failed}
+ end.
+
+send_queued_requests([], State) ->
+ do_trace("Sent all queued requests via SSL connection~n", []),
+ State#state{tunnel_setup_queue = []};
+send_queued_requests([{From, Url, Headers, Method, Body, Options, Timeout} | Q],
+ State) ->
+ case send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State) of
+ {noreply, State_1} ->
+ send_queued_requests(Q, State_1);
+ Err ->
+ do_trace("Error sending queued SSL request: ~n"
+ "URL : ~s~n"
+ "Method : ~p~n"
+ "Headers : ~p~n", [Url, Method, Headers]),
+ do_error_reply(State, {error, {send_failed, Err}}),
+ {error, send_failed}
+ end.
+
+is_connection_closing("HTTP/0.9", _) -> true;
+is_connection_closing(_, "close") -> true;
+is_connection_closing("HTTP/1.0", "false") -> true;
+is_connection_closing(_, _) -> false.
+
+%% This clause determines the chunk size when given data from the beginning of the chunk
+parse_11_response(DataRecvd,
+ #state{transfer_encoding = chunked,
+ chunk_size = chunk_start,
+ chunk_size_buffer = Chunk_sz_buf
+ } = State) ->
+ case scan_crlf(Chunk_sz_buf, DataRecvd) of
+ {yes, ChunkHeader, Data_1} ->
+ State_1 = maybe_accumulate_ce_data(State, <<ChunkHeader/binary, $\r, $\n>>),
+ ChunkSize = parse_chunk_header(ChunkHeader),
+ %%
+ %% Do we have to preserve the chunk encoding when
+ %% streaming? NO. This should be transparent to the client
+ %% process. Chunked encoding was only introduced to make
+ %% it efficient for the server.
+ %%
+ RemLen = size(Data_1),
+ do_trace("Determined chunk size: ~p. Already recvd: ~p~n",
+ [ChunkSize, RemLen]),
+ parse_11_response(Data_1, State_1#state{chunk_size_buffer = <<>>,
+ deleted_crlf = true,
+ recvd_chunk_size = 0,
+ chunk_size = ChunkSize});
+ {no, Data_1} ->
+ State#state{chunk_size_buffer = Data_1}
+ end;
+
+%% This clause is to remove the CRLF between two chunks
+%%
+parse_11_response(DataRecvd,
+ #state{transfer_encoding = chunked,
+ chunk_size = tbd,
+ chunk_size_buffer = Buf
+ } = State) ->
+ case scan_crlf(Buf, DataRecvd) of
+ {yes, _, NextChunk} ->
+ State_1 = maybe_accumulate_ce_data(State, <<$\r, $\n>>),
+ State_2 = State_1#state{chunk_size = chunk_start,
+ chunk_size_buffer = <<>>,
+ deleted_crlf = true},
+ parse_11_response(NextChunk, State_2);
+ {no, Data_1} ->
+ State#state{chunk_size_buffer = Data_1}
+ end;
+
+%% This clause deals with the end of a chunked transfer. ibrowse does
+%% not support Trailers in the Chunked Transfer encoding. Any trailer
+%% received is silently discarded.
+parse_11_response(DataRecvd,
+ #state{transfer_encoding = chunked, chunk_size = 0,
+ cur_req = CurReq,
+ deleted_crlf = DelCrlf,
+ chunk_size_buffer = Trailer,
+ reqs = Reqs} = State) ->
+ do_trace("Detected end of chunked transfer...~n", []),
+ DataRecvd_1 = case DelCrlf of
+ false ->
+ DataRecvd;
+ true ->
+ <<$\r, $\n, DataRecvd/binary>>
+ end,
+ case scan_header(Trailer, DataRecvd_1) of
+ {yes, TEHeaders, Rem} ->
+ {_, Reqs_1} = queue:out(Reqs),
+ State_1 = maybe_accumulate_ce_data(State, <<TEHeaders/binary, $\r, $\n>>),
+ State_2 = handle_response(CurReq,
+ State_1#state{reqs = Reqs_1}),
+ parse_response(Rem, reset_state(State_2));
+ {no, Rem} ->
+ accumulate_response(<<>>, State#state{chunk_size_buffer = Rem, deleted_crlf = false})
+ end;
+
+%% This clause extracts a chunk, given the size.
+parse_11_response(DataRecvd,
+ #state{transfer_encoding = chunked,
+ chunk_size = CSz,
+ recvd_chunk_size = Recvd_csz,
+ rep_buf_size = RepBufSz} = State) ->
+ NeedBytes = CSz - Recvd_csz,
+ DataLen = size(DataRecvd),
+ do_trace("Recvd more data: size: ~p. NeedBytes: ~p~n", [DataLen, NeedBytes]),
+ case DataLen >= NeedBytes of
+ true ->
+ {RemChunk, RemData} = split_binary(DataRecvd, NeedBytes),
+ do_trace("Recvd another chunk...~p~n", [RemChunk]),
+ do_trace("RemData -> ~p~n", [RemData]),
+ case accumulate_response(RemChunk, State) of
+ {error, Reason} ->
+ do_trace("Error accumulating response --> ~p~n", [Reason]),
+ {error, Reason};
+ #state{} = State_1 ->
+ State_2 = State_1#state{chunk_size=tbd},
+ parse_11_response(RemData, State_2)
+ end;
+ false ->
+ accumulate_response(DataRecvd,
+ State#state{rep_buf_size = RepBufSz + DataLen,
+ recvd_chunk_size = Recvd_csz + DataLen})
+ end;
+
+%% This clause to extract the body when Content-Length is specified
+parse_11_response(DataRecvd,
+ #state{content_length=CL, rep_buf_size=RepBufSz,
+ reqs=Reqs}=State) ->
+ NeedBytes = CL - RepBufSz,
+ DataLen = size(DataRecvd),
+ case DataLen >= NeedBytes of
+ true ->
+ {RemBody, Rem} = split_binary(DataRecvd, NeedBytes),
+ {_, Reqs_1} = queue:out(Reqs),
+ State_1 = accumulate_response(RemBody, State),
+ State_2 = handle_response(State_1#state.cur_req, State_1#state{reqs=Reqs_1}),
+ State_3 = reset_state(State_2),
+ parse_response(Rem, State_3);
+ false ->
+ accumulate_response(DataRecvd, State#state{rep_buf_size = (RepBufSz+DataLen)})
+ end.
+
+maybe_accumulate_ce_data(#state{cur_req = #request{preserve_chunked_encoding = false}} = State, _) ->
+ State;
+maybe_accumulate_ce_data(State, Data) ->
+ accumulate_response(Data, State).
+
+handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
+ response_format = Resp_format,
+ save_response_to_file = SaveResponseToFile,
+ tmp_file_name = TmpFilename,
+ tmp_file_fd = Fd,
+ options = Options
+ },
+ #state{http_status_code = SCode,
+ status_line = Status_line,
+ raw_headers = Raw_headers,
+ send_timer = ReqTimer,
+ reply_buffer = RepBuf,
+ recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
+ Body = RepBuf,
+ case Fd of
+ undefined ->
+ ok;
+ _ ->
+ ok = file:close(Fd)
+ end,
+ ResponseBody = case TmpFilename of
+ undefined ->
+ Body;
+ _ ->
+ {file, TmpFilename}
+ end,
+ {Resp_headers_1, Raw_headers_1} = maybe_add_custom_headers(RespHeaders, Raw_headers, Options),
+ Reply = case get_value(give_raw_headers, Options, false) of
+ true ->
+ {ok, Status_line, Raw_headers_1, ResponseBody};
+ false ->
+ {ok, SCode, Resp_headers_1, ResponseBody}
+ end,
+ State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+ cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
+ set_cur_request(State_1);
+handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
+ response_format = Resp_format,
+ options = Options},
+ #state{http_status_code = SCode,
+ status_line = Status_line,
+ raw_headers = Raw_headers,
+ recvd_headers = Resp_headers,
+ reply_buffer = RepBuf,
+ send_timer = ReqTimer} = State) ->
+ Body = RepBuf,
+ {Resp_headers_1, Raw_headers_1} = maybe_add_custom_headers(Resp_headers, Raw_headers, Options),
+ Reply = case get_value(give_raw_headers, Options, false) of
+ true ->
+ {ok, Status_line, Raw_headers_1, Body};
+ false ->
+ {ok, SCode, Resp_headers_1, Body}
+ end,
+ State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+ cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
+ set_cur_request(State_1).
+
+reset_state(State) ->
+ State#state{status = get_header,
+ rep_buf_size = 0,
+ streamed_size = 0,
+ content_length = undefined,
+ reply_buffer = <<>>,
+ chunk_size_buffer = <<>>,
+ recvd_headers = [],
+ status_line = undefined,
+ raw_headers = undefined,
+ deleted_crlf = false,
+ http_status_code = undefined,
+ chunk_size = undefined,
+ transfer_encoding = undefined
+ }.
+
+set_cur_request(#state{reqs = Reqs, socket = Socket} = State) ->
+ case queue:to_list(Reqs) of
+ [] ->
+ State#state{cur_req = undefined};
+ [#request{caller_controls_socket = Ccs} = NextReq | _] ->
+ case Ccs of
+ true ->
+ do_setopts(Socket, [{active, once}], State);
+ _ ->
+ ok
+ end,
+ State#state{cur_req = NextReq}
+ end.
+
+parse_headers(Headers) ->
+ case scan_crlf(Headers) of
+ {yes, StatusLine, T} ->
+ parse_headers(StatusLine, T);
+ {no, StatusLine} ->
+ parse_headers(StatusLine, <<>>)
+ end.
+
+parse_headers(StatusLine, Headers) ->
+ Headers_1 = parse_headers_1(Headers),
+ case parse_status_line(StatusLine) of
+ {ok, HttpVsn, StatCode, _Msg} ->
+ put(http_prot_vsn, HttpVsn),
+ {HttpVsn, StatCode, Headers_1, StatusLine, Headers};
+ _ -> %% A HTTP 0.9 response?
+ put(http_prot_vsn, "HTTP/0.9"),
+ {"HTTP/0.9", undefined, Headers, StatusLine, Headers}
+ end.
+
+% From RFC 2616
+%
+% HTTP/1.1 header field values can be folded onto multiple lines if
+% the continuation line begins with a space or horizontal tab. All
+% linear white space, including folding, has the same semantics as
+% SP. A recipient MAY replace any linear white space with a single
+% SP before interpreting the field value or forwarding the message
+% downstream.
+parse_headers_1(B) when is_binary(B) ->
+ parse_headers_1(binary_to_list(B));
+parse_headers_1(String) ->
+ parse_headers_1(String, [], []).
+
+parse_headers_1([$\n, H |T], [$\r | L], Acc) when H =:= 32;
+ H =:= $\t ->
+ parse_headers_1(lists:dropwhile(fun(X) ->
+ is_whitespace(X)
+ end, T), [32 | L], Acc);
+parse_headers_1([$\n|T], [$\r | L], Acc) ->
+ case parse_header(lists:reverse(L)) of
+ invalid ->
+ parse_headers_1(T, [], Acc);
+ NewHeader ->
+ parse_headers_1(T, [], [NewHeader | Acc])
+ end;
+parse_headers_1([H|T], L, Acc) ->
+ parse_headers_1(T, [H|L], Acc);
+parse_headers_1([], [], Acc) ->
+ lists:reverse(Acc);
+parse_headers_1([], L, Acc) ->
+ Acc_1 = case parse_header(lists:reverse(L)) of
+ invalid ->
+ Acc;
+ NewHeader ->
+ [NewHeader | Acc]
+ end,
+ lists:reverse(Acc_1).
+
+parse_status_line(Line) when is_binary(Line) ->
+ parse_status_line(binary_to_list(Line));
+parse_status_line(Line) ->
+ parse_status_line(Line, get_prot_vsn, [], []).
+parse_status_line([32 | T], get_prot_vsn, ProtVsn, StatCode) ->
+ parse_status_line(T, get_status_code, ProtVsn, StatCode);
+parse_status_line([32 | T], get_status_code, ProtVsn, StatCode) ->
+ {ok, lists:reverse(ProtVsn), lists:reverse(StatCode), T};
+parse_status_line([], get_status_code, ProtVsn, StatCode) ->
+ {ok, lists:reverse(ProtVsn), lists:reverse(StatCode), []};
+parse_status_line([H | T], get_prot_vsn, ProtVsn, StatCode) ->
+ parse_status_line(T, get_prot_vsn, [H|ProtVsn], StatCode);
+parse_status_line([H | T], get_status_code, ProtVsn, StatCode) ->
+ parse_status_line(T, get_status_code, ProtVsn, [H | StatCode]);
+parse_status_line([], _, _, _) ->
+ http_09.
+
+parse_header(L) ->
+ parse_header(L, []).
+
+parse_header([$: | V], Acc) ->
+ {lists:reverse(Acc), string:strip(V)};
+parse_header([H | T], Acc) ->
+ parse_header(T, [H | Acc]);
+parse_header([], _) ->
+ invalid.
+
+scan_header(Bin) ->
+ case get_crlf_crlf_pos(Bin, 0) of
+ {yes, Pos} ->
+ {Headers, <<_:4/binary, Body/binary>>} = split_binary(Bin, Pos),
+ {yes, Headers, Body};
+ no ->
+ {no, Bin}
+ end.
+
+scan_header(Bin1, Bin2) when size(Bin1) < 4 ->
+ scan_header(<<Bin1/binary, Bin2/binary>>);
+scan_header(Bin1, <<>>) ->
+ scan_header(Bin1);
+scan_header(Bin1, Bin2) ->
+ Bin1_already_scanned_size = size(Bin1) - 4,
+ <<Headers_prefix:Bin1_already_scanned_size/binary, Rest/binary>> = Bin1,
+ Bin_to_scan = <<Rest/binary, Bin2/binary>>,
+ case get_crlf_crlf_pos(Bin_to_scan, 0) of
+ {yes, Pos} ->
+ {Headers_suffix, <<_:4/binary, Body/binary>>} = split_binary(Bin_to_scan, Pos),
+ {yes, <<Headers_prefix/binary, Headers_suffix/binary>>, Body};
+ no ->
+ {no, <<Bin1/binary, Bin2/binary>>}
+ end.
+
+get_crlf_crlf_pos(<<$\r, $\n, $\r, $\n, _/binary>>, Pos) -> {yes, Pos};
+get_crlf_crlf_pos(<<_, Rest/binary>>, Pos) -> get_crlf_crlf_pos(Rest, Pos + 1);
+get_crlf_crlf_pos(<<>>, _) -> no.
+
+scan_crlf(Bin) ->
+ case get_crlf_pos(Bin) of
+ {yes, Pos} ->
+ {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin, Pos),
+ {yes, Prefix, Suffix};
+ no ->
+ {no, Bin}
+ end.
+
+scan_crlf(<<>>, Bin2) ->
+ scan_crlf(Bin2);
+scan_crlf(Bin1, Bin2) when size(Bin1) < 2 ->
+ scan_crlf(<<Bin1/binary, Bin2/binary>>);
+scan_crlf(Bin1, Bin2) ->
+ scan_crlf_1(size(Bin1) - 2, Bin1, Bin2).
+
+scan_crlf_1(Bin1_head_size, Bin1, Bin2) ->
+ <<Bin1_head:Bin1_head_size/binary, Bin1_tail/binary>> = Bin1,
+ Bin3 = <<Bin1_tail/binary, Bin2/binary>>,
+ case get_crlf_pos(Bin3) of
+ {yes, Pos} ->
+ {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin3, Pos),
+ {yes, list_to_binary([Bin1_head, Prefix]), Suffix};
+ no ->
+ {no, list_to_binary([Bin1, Bin2])}
+ end.
+
+get_crlf_pos(Bin) ->
+ get_crlf_pos(Bin, 0).
+
+get_crlf_pos(<<$\r, $\n, _/binary>>, Pos) -> {yes, Pos};
+get_crlf_pos(<<_, Rest/binary>>, Pos) -> get_crlf_pos(Rest, Pos + 1);
+get_crlf_pos(<<>>, _) -> no.
+
+fmt_val(L) when is_list(L) -> L;
+fmt_val(I) when is_integer(I) -> integer_to_list(I);
+fmt_val(A) when is_atom(A) -> atom_to_list(A);
+fmt_val(Term) -> io_lib:format("~p", [Term]).
+
+crnl() -> "\r\n".
+
+method(get) -> "GET";
+method(post) -> "POST";
+method(head) -> "HEAD";
+method(options) -> "OPTIONS";
+method(put) -> "PUT";
+method(delete) -> "DELETE";
+method(trace) -> "TRACE";
+method(mkcol) -> "MKCOL";
+method(propfind) -> "PROPFIND";
+method(proppatch) -> "PROPPATCH";
+method(lock) -> "LOCK";
+method(unlock) -> "UNLOCK";
+method(move) -> "MOVE";
+method(copy) -> "COPY";
+method(connect) -> "CONNECT".
+
+%% From RFC 2616
+%%
+% The chunked encoding modifies the body of a message in order to
+% transfer it as a series of chunks, each with its own size indicator,
+% followed by an OPTIONAL trailer containing entity-header
+% fields. This allows dynamically produced content to be transferred
+% along with the information necessary for the recipient to verify
+% that it has received the full message.
+% Chunked-Body = *chunk
+% last-chunk
+% trailer
+% CRLF
+% chunk = chunk-size [ chunk-extension ] CRLF
+% chunk-data CRLF
+% chunk-size = 1*HEX
+% last-chunk = 1*("0") [ chunk-extension ] CRLF
+% chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
+% chunk-ext-name = token
+% chunk-ext-val = token | quoted-string
+% chunk-data = chunk-size(OCTET)
+% trailer = *(entity-header CRLF)
+% The chunk-size field is a string of hex digits indicating the size
+% of the chunk. The chunked encoding is ended by any chunk whose size
+% is zero, followed by the trailer, which is terminated by an empty
+% line.
+%%
+%% The parsing implemented here discards all chunk extensions. It also
+%% strips trailing spaces from the chunk size fields as Apache 1.3.27 was
+%% sending them.
+parse_chunk_header(ChunkHeader) ->
+ parse_chunk_header(ChunkHeader, []).
+
+parse_chunk_header(<<$;, _/binary>>, Acc) ->
+ hexlist_to_integer(lists:reverse(Acc));
+parse_chunk_header(<<H, T/binary>>, Acc) ->
+ case is_whitespace(H) of
+ true ->
+ parse_chunk_header(T, Acc);
+ false ->
+ parse_chunk_header(T, [H | Acc])
+ end;
+parse_chunk_header(<<>>, Acc) ->
+ hexlist_to_integer(lists:reverse(Acc)).
+
+is_whitespace($\s) -> true;
+is_whitespace($\r) -> true;
+is_whitespace($\n) -> true;
+is_whitespace($\t) -> true;
+is_whitespace(_) -> false.
+
+send_async_headers(_ReqId, undefined, _, _State) ->
+ ok;
+send_async_headers(ReqId, StreamTo, Give_raw_headers,
+ #state{status_line = Status_line, raw_headers = Raw_headers,
+ recvd_headers = Headers, http_status_code = StatCode,
+ cur_req = #request{options = Opts}
+ }) ->
+ {Headers_1, Raw_headers_1} = maybe_add_custom_headers(Headers, Raw_headers, Opts),
+ case Give_raw_headers of
+ false ->
+ catch StreamTo ! {ibrowse_async_headers, ReqId, StatCode, Headers_1};
+ true ->
+ catch StreamTo ! {ibrowse_async_headers, ReqId, Status_line, Raw_headers_1}
+ end.
+
+maybe_add_custom_headers(Headers, Raw_headers, Opts) ->
+ Custom_headers = get_value(add_custom_headers, Opts, []),
+ Headers_1 = Headers ++ Custom_headers,
+ Raw_headers_1 = case Custom_headers of
+ [_ | _] when is_binary(Raw_headers) ->
+ Custom_headers_bin = list_to_binary(string:join([[X, $:, Y] || {X, Y} <- Custom_headers], "\r\n")),
+ <<Raw_headers/binary, "\r\n", Custom_headers_bin/binary>>;
+ _ ->
+ Raw_headers
+ end,
+ {Headers_1, Raw_headers_1}.
+
+format_response_data(Resp_format, Body) ->
+ case Resp_format of
+ list when is_list(Body) ->
+ flatten(Body);
+ list when is_binary(Body) ->
+ binary_to_list(Body);
+ binary when is_list(Body) ->
+ list_to_binary(Body);
+ _ ->
+ %% This is to cater for sending messages such as
+ %% {chunk_start, _}, chunk_end etc
+ Body
+ end.
+
+do_reply(State, From, undefined, _, Resp_format, {ok, St_code, Headers, Body}) ->
+ Msg_1 = {ok, St_code, Headers, format_response_data(Resp_format, Body)},
+ gen_server:reply(From, Msg_1),
+ dec_pipeline_counter(State);
+do_reply(State, From, undefined, _, _, Msg) ->
+ gen_server:reply(From, Msg),
+ dec_pipeline_counter(State);
+do_reply(#state{prev_req_id = Prev_req_id} = State,
+ _From, StreamTo, ReqId, Resp_format, {ok, _, _, Body}) ->
+ State_1 = dec_pipeline_counter(State),
+ case Body of
+ [] ->
+ ok;
+ _ ->
+ Body_1 = format_response_data(Resp_format, Body),
+ catch StreamTo ! {ibrowse_async_response, ReqId, Body_1}
+ end,
+ catch StreamTo ! {ibrowse_async_response_end, ReqId},
+ %% We don't want to delete the Req-id to Pid mapping straightaway
+ %% as the client may send a stream_next message just while we are
+ %% sending back this ibrowse_async_response_end message. If we
+ %% deleted this mapping straightaway, the caller will see a
+ %% {error, unknown_req_id} when it calls ibrowse:stream_next/1. To
+ %% get around this, we store the req id, and clear it after the
+ %% next request. If there are wierd combinations of stream,
+ %% stream_once and sync requests on the same connection, it will
+ %% take a while for the req_id-pid mapping to get cleared, but it
+ %% should do no harm.
+ ets:delete(ibrowse_stream, {req_id_pid, Prev_req_id}),
+ State_1#state{prev_req_id = ReqId};
+do_reply(State, _From, StreamTo, ReqId, Resp_format, Msg) ->
+ State_1 = dec_pipeline_counter(State),
+ Msg_1 = format_response_data(Resp_format, Msg),
+ catch StreamTo ! {ibrowse_async_response, ReqId, Msg_1},
+ State_1.
+
+do_interim_reply(undefined, _, _ReqId, _Msg) ->
+ ok;
+do_interim_reply(StreamTo, Response_format, ReqId, Msg) ->
+ Msg_1 = format_response_data(Response_format, Msg),
+ catch StreamTo ! {ibrowse_async_response, ReqId, Msg_1}.
+
+do_error_reply(#state{reqs = Reqs, tunnel_setup_queue = Tun_q} = State, Err) ->
+ ReqList = queue:to_list(Reqs),
+ lists:foreach(fun(#request{from=From, stream_to=StreamTo, req_id=ReqId,
+ response_format = Resp_format}) ->
+ ets:delete(ibrowse_stream, {req_id_pid, ReqId}),
+ do_reply(State, From, StreamTo, ReqId, Resp_format, {error, Err})
+ end, ReqList),
+ lists:foreach(
+ fun({From, _Url, _Headers, _Method, _Body, _Options, _Timeout}) ->
+ do_reply(State, From, undefined, undefined, undefined, Err)
+ end, Tun_q).
+
+fail_pipelined_requests(#state{reqs = Reqs, cur_req = CurReq} = State, Reply) ->
+ {_, Reqs_1} = queue:out(Reqs),
+ #request{from=From, stream_to=StreamTo, req_id=ReqId,
+ response_format = Resp_format} = CurReq,
+ State_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+ do_error_reply(State_1#state{reqs = Reqs_1}, previous_request_failed).
+
+split_list_at(List, N) ->
+ split_list_at(List, N, []).
+
+split_list_at([], _, Acc) ->
+ {lists:reverse(Acc), []};
+split_list_at(List2, 0, List1) ->
+ {lists:reverse(List1), List2};
+split_list_at([H | List2], N, List1) ->
+ split_list_at(List2, N-1, [H | List1]).
+
+hexlist_to_integer(List) ->
+ hexlist_to_integer(lists:reverse(List), 1, 0).
+
+hexlist_to_integer([H | T], Multiplier, Acc) ->
+ hexlist_to_integer(T, Multiplier*16, Multiplier*to_ascii(H) + Acc);
+hexlist_to_integer([], _, Acc) ->
+ Acc.
+
+to_ascii($A) -> 10;
+to_ascii($a) -> 10;
+to_ascii($B) -> 11;
+to_ascii($b) -> 11;
+to_ascii($C) -> 12;
+to_ascii($c) -> 12;
+to_ascii($D) -> 13;
+to_ascii($d) -> 13;
+to_ascii($E) -> 14;
+to_ascii($e) -> 14;
+to_ascii($F) -> 15;
+to_ascii($f) -> 15;
+to_ascii($1) -> 1;
+to_ascii($2) -> 2;
+to_ascii($3) -> 3;
+to_ascii($4) -> 4;
+to_ascii($5) -> 5;
+to_ascii($6) -> 6;
+to_ascii($7) -> 7;
+to_ascii($8) -> 8;
+to_ascii($9) -> 9;
+to_ascii($0) -> 0.
+
+cancel_timer(undefined) -> ok;
+cancel_timer(Ref) -> _ = erlang:cancel_timer(Ref),
+ ok.
+
+cancel_timer(Ref, {eat_message, Msg}) ->
+ cancel_timer(Ref),
+ receive
+ Msg ->
+ ok
+ after 0 ->
+ ok
+ end.
+
+make_req_id() ->
+ now().
+
+to_lower(Str) ->
+ to_lower(Str, []).
+to_lower([H|T], Acc) when H >= $A, H =< $Z ->
+ to_lower(T, [H+32|Acc]);
+to_lower([H|T], Acc) ->
+ to_lower(T, [H|Acc]);
+to_lower([], Acc) ->
+ lists:reverse(Acc).
+
+shutting_down(#state{lb_ets_tid = undefined}) ->
+ ok;
+shutting_down(#state{lb_ets_tid = Tid,
+ cur_pipeline_size = Sz}) ->
+ catch ets:delete(Tid, {Sz, self()}).
+
+inc_pipeline_counter(#state{is_closing = true} = State) ->
+ State;
+inc_pipeline_counter(#state{cur_pipeline_size = Pipe_sz} = State) ->
+ State#state{cur_pipeline_size = Pipe_sz + 1}.
+
+dec_pipeline_counter(#state{is_closing = true} = State) ->
+ State;
+dec_pipeline_counter(#state{lb_ets_tid = undefined} = State) ->
+ State;
+dec_pipeline_counter(#state{cur_pipeline_size = Pipe_sz,
+ lb_ets_tid = Tid} = State) ->
+ ets:delete(Tid, {Pipe_sz, self()}),
+ ets:insert(Tid, {{Pipe_sz - 1, self()}, []}),
+ State#state{cur_pipeline_size = Pipe_sz - 1}.
+
+flatten([H | _] = L) when is_integer(H) ->
+ L;
+flatten([H | _] = L) when is_list(H) ->
+ lists:flatten(L);
+flatten([]) ->
+ [].
+
+get_stream_chunk_size(Options) ->
+ case lists:keysearch(stream_chunk_size, 1, Options) of
+ {value, {_, V}} when V > 0 ->
+ V;
+ _ ->
+ ?DEFAULT_STREAM_CHUNK_SIZE
+ end.
+
+set_inac_timer(State) ->
+ cancel_timer(State#state.inactivity_timer_ref),
+ set_inac_timer(State#state{inactivity_timer_ref = undefined},
+ get_inac_timeout(State)).
+
+set_inac_timer(State, Timeout) when is_integer(Timeout) ->
+ Ref = erlang:send_after(Timeout, self(), timeout),
+ State#state{inactivity_timer_ref = Ref};
+set_inac_timer(State, _) ->
+ State.
+
+get_inac_timeout(#state{cur_req = #request{options = Opts}}) ->
+ get_value(inactivity_timeout, Opts, infinity);
+get_inac_timeout(#state{cur_req = undefined}) ->
+ case ibrowse:get_config_value(inactivity_timeout, undefined) of
+ Val when is_integer(Val) ->
+ Val;
+ _ ->
+ case application:get_env(ibrowse, inactivity_timeout) of
+ {ok, Val} when is_integer(Val), Val > 0 ->
+ Val;
+ _ ->
+ 10000
+ end
+ end.
+
+trace_request(Req) ->
+ case get(my_trace_flag) of
+ true ->
+ %%Avoid the binary operations if trace is not on...
+ NReq = to_binary(Req),
+ do_trace("Sending request: ~n"
+ "--- Request Begin ---~n~s~n"
+ "--- Request End ---~n", [NReq]);
+ _ -> ok
+ end.
+
+trace_request_body(Body) ->
+ case get(my_trace_flag) of
+ true ->
+ %%Avoid the binary operations if trace is not on...
+ NBody = to_binary(Body),
+ case size(NBody) > 1024 of
+ true ->
+ ok;
+ false ->
+ do_trace("Sending request body: ~n"
+ "--- Request Body Begin ---~n~s~n"
+ "--- Request Body End ---~n", [NBody])
+ end;
+ false ->
+ ok
+ end.
+
+to_binary(X) when is_list(X) -> list_to_binary(X);
+to_binary(X) when is_binary(X) -> X.
diff --git a/deps/ibrowse/src/ibrowse_lb.erl b/deps/ibrowse/src/ibrowse_lb.erl
new file mode 100644
index 00000000..0e001d48
--- /dev/null
+++ b/deps/ibrowse/src/ibrowse_lb.erl
@@ -0,0 +1,235 @@
+%%%-------------------------------------------------------------------
+%%% File : ibrowse_lb.erl
+%%% Author : chandru <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description :
+%%%
+%%% Created : 6 Mar 2008 by chandru <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+-module(ibrowse_lb).
+-author(chandru).
+-behaviour(gen_server).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+-export([
+ start_link/1,
+ spawn_connection/5,
+ stop/1
+ ]).
+
+%% gen_server callbacks
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+ ]).
+
+-record(state, {parent_pid,
+ ets_tid,
+ host,
+ port,
+ max_sessions,
+ max_pipeline_size,
+ num_cur_sessions = 0}).
+
+-include("ibrowse.hrl").
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link/0
+%% Description: Starts the server
+%%--------------------------------------------------------------------
+start_link(Args) ->
+ gen_server:start_link(?MODULE, Args, []).
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: init/1
+%% Description: Initiates the server
+%% Returns: {ok, State} |
+%% {ok, State, Timeout} |
+%% ignore |
+%% {stop, Reason}
+%%--------------------------------------------------------------------
+init([Host, Port]) ->
+ process_flag(trap_exit, true),
+ Max_sessions = ibrowse:get_config_value({max_sessions, Host, Port}, 10),
+ Max_pipe_sz = ibrowse:get_config_value({max_pipeline_size, Host, Port}, 10),
+ put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
+ put(ibrowse_trace_token, ["LB: ", Host, $:, integer_to_list(Port)]),
+ Tid = ets:new(ibrowse_lb, [public, ordered_set]),
+ {ok, #state{parent_pid = whereis(ibrowse),
+ host = Host,
+ port = Port,
+ ets_tid = Tid,
+ max_pipeline_size = Max_pipe_sz,
+ max_sessions = Max_sessions}}.
+
+spawn_connection(Lb_pid, Url,
+ Max_sessions,
+ Max_pipeline_size,
+ SSL_options)
+ when is_pid(Lb_pid),
+ is_record(Url, url),
+ is_integer(Max_pipeline_size),
+ is_integer(Max_sessions) ->
+ gen_server:call(Lb_pid,
+ {spawn_connection, Url, Max_sessions, Max_pipeline_size, SSL_options}).
+
+stop(Lb_pid) ->
+ case catch gen_server:call(Lb_pid, stop) of
+ {'EXIT', {timeout, _}} ->
+ exit(Lb_pid, kill);
+ ok ->
+ ok
+ end.
+%%--------------------------------------------------------------------
+%% Function: handle_call/3
+%% Description: Handling call messages
+%% Returns: {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} | (terminate/2 is called)
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+% handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
+% #state{max_sessions = Max_sess,
+% ets_tid = Tid,
+% max_pipeline_size = Max_pipe_sz,
+% num_cur_sessions = Num} = State)
+% when Num >= Max ->
+% Reply = find_best_connection(Tid),
+% {reply, sorry_dude_reuse, State};
+
+%% Update max_sessions in #state with supplied value
+handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
+ #state{num_cur_sessions = Num} = State)
+ when Num >= Max_sess ->
+ State_1 = maybe_create_ets(State),
+ Reply = find_best_connection(State_1#state.ets_tid, Max_pipe),
+ {reply, Reply, State_1#state{max_sessions = Max_sess}};
+
+handle_call({spawn_connection, Url, _Max_sess, _Max_pipe, SSL_options}, _From,
+ #state{num_cur_sessions = Cur} = State) ->
+ State_1 = maybe_create_ets(State),
+ Tid = State_1#state.ets_tid,
+ {ok, Pid} = ibrowse_http_client:start_link({Tid, Url, SSL_options}),
+ ets:insert(Tid, {{1, Pid}, []}),
+ {reply, {ok, Pid}, State_1#state{num_cur_sessions = Cur + 1}};
+
+handle_call(stop, _From, #state{ets_tid = undefined} = State) ->
+ gen_server:reply(_From, ok),
+ {stop, normal, State};
+
+handle_call(stop, _From, #state{ets_tid = Tid} = State) ->
+ ets:foldl(fun({{_, Pid}, _}, Acc) ->
+ ibrowse_http_client:stop(Pid),
+ Acc
+ end, [], Tid),
+ gen_server:reply(_From, ok),
+ {stop, normal, State};
+
+handle_call(Request, _From, State) ->
+ Reply = {unknown_request, Request},
+ {reply, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast/2
+%% Description: Handling cast messages
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info/2
+%% Description: Handling all non call/cast messages
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%--------------------------------------------------------------------
+handle_info({'EXIT', Parent, _Reason}, #state{parent_pid = Parent} = State) ->
+ {stop, normal, State};
+
+handle_info({'EXIT', _Pid, _Reason}, #state{ets_tid = undefined} = State) ->
+ {noreply, State};
+
+handle_info({'EXIT', Pid, _Reason},
+ #state{num_cur_sessions = Cur,
+ ets_tid = Tid} = State) ->
+ ets:match_delete(Tid, {{'_', Pid}, '_'}),
+ Cur_1 = Cur - 1,
+ State_1 = case Cur_1 of
+ 0 ->
+ ets:delete(Tid),
+ State#state{ets_tid = undefined};
+ _ ->
+ State
+ end,
+ {noreply, State_1#state{num_cur_sessions = Cur_1}};
+
+handle_info({trace, Bool}, #state{ets_tid = undefined} = State) ->
+ put(my_trace_flag, Bool),
+ {noreply, State};
+
+handle_info({trace, Bool}, #state{ets_tid = Tid} = State) ->
+ ets:foldl(fun({{_, Pid}, _}, Acc) when is_pid(Pid) ->
+ catch Pid ! {trace, Bool},
+ Acc;
+ (_, Acc) ->
+ Acc
+ end, undefined, Tid),
+ put(my_trace_flag, Bool),
+ {noreply, State};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+%% Function: terminate/2
+%% Description: Shutdown the server
+%% Returns: any (ignored by gen_server)
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Func: code_change/3
+%% Purpose: Convert process state when code is changed
+%% Returns: {ok, NewState}
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+find_best_connection(Tid, Max_pipe) ->
+ case ets:first(Tid) of
+ {Cur_sz, Pid} when Cur_sz < Max_pipe ->
+ ets:delete(Tid, {Cur_sz, Pid}),
+ ets:insert(Tid, {{Cur_sz + 1, Pid}, []}),
+ {ok, Pid};
+ _ ->
+ {error, retry_later}
+ end.
+
+maybe_create_ets(#state{ets_tid = undefined} = State) ->
+ Tid = ets:new(ibrowse_lb, [public, ordered_set]),
+ State#state{ets_tid = Tid};
+maybe_create_ets(State) ->
+ State.
diff --git a/deps/ibrowse/src/ibrowse_lib.erl b/deps/ibrowse/src/ibrowse_lib.erl
new file mode 100644
index 00000000..3cbe3ace
--- /dev/null
+++ b/deps/ibrowse/src/ibrowse_lib.erl
@@ -0,0 +1,391 @@
+%%% File : ibrowse_lib.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description :
+%%% Created : 27 Feb 2004 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%% @doc Module with a few useful functions
+
+-module(ibrowse_lib).
+-author('chandru').
+-ifdef(debug).
+-compile(export_all).
+-endif.
+
+-include("ibrowse.hrl").
+
+-export([
+ get_trace_status/2,
+ do_trace/2,
+ do_trace/3,
+ url_encode/1,
+ decode_rfc822_date/1,
+ status_code/1,
+ encode_base64/1,
+ decode_base64/1,
+ get_value/2,
+ get_value/3,
+ parse_url/1,
+ printable_date/0
+ ]).
+
+get_trace_status(Host, Port) ->
+ ibrowse:get_config_value({trace, Host, Port}, false).
+
+%% @doc URL-encodes a string based on RFC 1738. Returns a flat list.
+%% @spec url_encode(Str) -> UrlEncodedStr
+%% Str = string()
+%% UrlEncodedStr = string()
+url_encode(Str) when is_list(Str) ->
+ url_encode_char(lists:reverse(Str), []).
+
+url_encode_char([X | T], Acc) when X >= $0, X =< $9 ->
+ url_encode_char(T, [X | Acc]);
+url_encode_char([X | T], Acc) when X >= $a, X =< $z ->
+ url_encode_char(T, [X | Acc]);
+url_encode_char([X | T], Acc) when X >= $A, X =< $Z ->
+ url_encode_char(T, [X | Acc]);
+url_encode_char([X | T], Acc) when X == $-; X == $_; X == $. ->
+ url_encode_char(T, [X | Acc]);
+url_encode_char([32 | T], Acc) ->
+ url_encode_char(T, [$+ | Acc]);
+url_encode_char([X | T], Acc) ->
+ url_encode_char(T, [$%, d2h(X bsr 4), d2h(X band 16#0f) | Acc]);
+url_encode_char([], Acc) ->
+ Acc.
+
+d2h(N) when N<10 -> N+$0;
+d2h(N) -> N+$a-10.
+
+decode_rfc822_date(String) when is_list(String) ->
+ case catch decode_rfc822_date_1(string:tokens(String, ", \t\r\n")) of
+ {'EXIT', _} ->
+ {error, invalid_date};
+ Res ->
+ Res
+ end.
+
+% TODO: Have to handle the Zone
+decode_rfc822_date_1([_,DayInt,Month,Year, Time,Zone]) ->
+ decode_rfc822_date_1([DayInt,Month,Year, Time,Zone]);
+decode_rfc822_date_1([Day,Month,Year, Time,_Zone]) ->
+ DayI = list_to_integer(Day),
+ MonthI = month_int(Month),
+ YearI = list_to_integer(Year),
+ TimeTup = case string:tokens(Time, ":") of
+ [H,M] ->
+ {list_to_integer(H),
+ list_to_integer(M),
+ 0};
+ [H,M,S] ->
+ {list_to_integer(H),
+ list_to_integer(M),
+ list_to_integer(S)}
+ end,
+ {{YearI,MonthI,DayI}, TimeTup}.
+
+month_int("Jan") -> 1;
+month_int("Feb") -> 2;
+month_int("Mar") -> 3;
+month_int("Apr") -> 4;
+month_int("May") -> 5;
+month_int("Jun") -> 6;
+month_int("Jul") -> 7;
+month_int("Aug") -> 8;
+month_int("Sep") -> 9;
+month_int("Oct") -> 10;
+month_int("Nov") -> 11;
+month_int("Dec") -> 12.
+
+%% @doc Given a status code, returns an atom describing the status code.
+%% @spec status_code(StatusCode::status_code()) -> StatusDescription
+%% status_code() = string() | integer()
+%% StatusDescription = atom()
+status_code(100) -> continue;
+status_code(101) -> switching_protocols;
+status_code(102) -> processing;
+status_code(200) -> ok;
+status_code(201) -> created;
+status_code(202) -> accepted;
+status_code(203) -> non_authoritative_information;
+status_code(204) -> no_content;
+status_code(205) -> reset_content;
+status_code(206) -> partial_content;
+status_code(207) -> multi_status;
+status_code(300) -> multiple_choices;
+status_code(301) -> moved_permanently;
+status_code(302) -> found;
+status_code(303) -> see_other;
+status_code(304) -> not_modified;
+status_code(305) -> use_proxy;
+status_code(306) -> unused;
+status_code(307) -> temporary_redirect;
+status_code(400) -> bad_request;
+status_code(401) -> unauthorized;
+status_code(402) -> payment_required;
+status_code(403) -> forbidden;
+status_code(404) -> not_found;
+status_code(405) -> method_not_allowed;
+status_code(406) -> not_acceptable;
+status_code(407) -> proxy_authentication_required;
+status_code(408) -> request_timeout;
+status_code(409) -> conflict;
+status_code(410) -> gone;
+status_code(411) -> length_required;
+status_code(412) -> precondition_failed;
+status_code(413) -> request_entity_too_large;
+status_code(414) -> request_uri_too_long;
+status_code(415) -> unsupported_media_type;
+status_code(416) -> requested_range_not_satisfiable;
+status_code(417) -> expectation_failed;
+status_code(422) -> unprocessable_entity;
+status_code(423) -> locked;
+status_code(424) -> failed_dependency;
+status_code(500) -> internal_server_error;
+status_code(501) -> not_implemented;
+status_code(502) -> bad_gateway;
+status_code(503) -> service_unavailable;
+status_code(504) -> gateway_timeout;
+status_code(505) -> http_version_not_supported;
+status_code(507) -> insufficient_storage;
+status_code(X) when is_list(X) -> status_code(list_to_integer(X));
+status_code(_) -> unknown_status_code.
+
+%% @doc Implements the base64 encoding algorithm. The output data type matches in the input data type.
+%% @spec encode_base64(In) -> Out
+%% In = string() | binary()
+%% Out = string() | binary()
+encode_base64(List) when is_list(List) ->
+ binary_to_list(base64:encode(List));
+encode_base64(Bin) when is_binary(Bin) ->
+ base64:encode(Bin).
+
+%% @doc Implements the base64 decoding algorithm. The output data type matches in the input data type.
+%% @spec decode_base64(In) -> Out | exit({error, invalid_input})
+%% In = string() | binary()
+%% Out = string() | binary()
+decode_base64(List) when is_list(List) ->
+ binary_to_list(base64:decode(List));
+decode_base64(Bin) when is_binary(Bin) ->
+ base64:decode(Bin).
+
+get_value(Tag, TVL, DefVal) ->
+ case lists:keysearch(Tag, 1, TVL) of
+ false ->
+ DefVal;
+ {value, {_, Val}} ->
+ Val
+ end.
+
+get_value(Tag, TVL) ->
+ {value, {_, V}} = lists:keysearch(Tag,1,TVL),
+ V.
+
+parse_url(Url) ->
+ case parse_url(Url, get_protocol, #url{abspath=Url}, []) of
+ #url{host_type = undefined, host = Host} = UrlRec ->
+ case inet_parse:address(Host) of
+ {ok, {_, _, _, _, _, _, _, _}} ->
+ UrlRec#url{host_type = ipv6_address};
+ {ok, {_, _, _, _}} ->
+ UrlRec#url{host_type = ipv4_address};
+ _ ->
+ UrlRec#url{host_type = hostname}
+ end;
+ Else ->
+ Else
+ end.
+
+parse_url([$:, $/, $/ | _], get_protocol, Url, []) ->
+ {invalid_uri_1, Url};
+parse_url([$:, $/, $/ | T], get_protocol, Url, TmpAcc) ->
+ Prot = list_to_existing_atom(lists:reverse(TmpAcc)),
+ parse_url(T, get_username,
+ Url#url{protocol = Prot},
+ []);
+parse_url([H | T], get_username, Url, TmpAcc) when H == $/;
+ H == $? ->
+ Path = case H of
+ $/ ->
+ [$/ | T];
+ $? ->
+ [$/, $? | T]
+ end,
+ %% No username/password. No port number
+ Url#url{host = lists:reverse(TmpAcc),
+ port = default_port(Url#url.protocol),
+ path = Path};
+parse_url([$: | T], get_username, Url, TmpAcc) ->
+ %% It is possible that no username/password has been
+ %% specified. But we'll continue with the assumption that there is
+ %% a username/password. If we encounter a '@' later on, there is a
+ %% username/password indeed. If we encounter a '/', it was
+ %% actually the hostname
+ parse_url(T, get_password,
+ Url#url{username = lists:reverse(TmpAcc)},
+ []);
+parse_url([$@ | T], get_username, Url, TmpAcc) ->
+ parse_url(T, get_host,
+ Url#url{username = lists:reverse(TmpAcc),
+ password = ""},
+ []);
+parse_url([$[ | T], get_username, Url, []) ->
+ % IPv6 address literals are enclosed by square brackets:
+ % http://www.ietf.org/rfc/rfc2732.txt
+ parse_url(T, get_ipv6_address, Url#url{host_type = ipv6_address}, []);
+parse_url([$[ | T], get_username, _Url, TmpAcc) ->
+ {error, {invalid_username_or_host, lists:reverse(TmpAcc) ++ "[" ++ T}};
+parse_url([$[ | _], get_password, _Url, []) ->
+ {error, missing_password};
+parse_url([$[ | T], get_password, Url, TmpAcc) ->
+ % IPv6 address literals are enclosed by square brackets:
+ % http://www.ietf.org/rfc/rfc2732.txt
+ parse_url(T, get_ipv6_address,
+ Url#url{host_type = ipv6_address,
+ password = lists:reverse(TmpAcc)},
+ []);
+parse_url([$@ | T], get_password, Url, TmpAcc) ->
+ parse_url(T, get_host,
+ Url#url{password = lists:reverse(TmpAcc)},
+ []);
+parse_url([H | T], get_password, Url, TmpAcc) when H == $/;
+ H == $? ->
+ %% Ok, what we thought was the username/password was the hostname
+ %% and portnumber
+ #url{username=User} = Url,
+ Port = list_to_integer(lists:reverse(TmpAcc)),
+ Path = case H of
+ $/ ->
+ [$/ | T];
+ $? ->
+ [$/, $? | T]
+ end,
+ Url#url{host = User,
+ port = Port,
+ username = undefined,
+ password = undefined,
+ path = Path};
+parse_url([$] | T], get_ipv6_address, #url{protocol = Prot} = Url, TmpAcc) ->
+ Addr = lists:reverse(TmpAcc),
+ case inet_parse:address(Addr) of
+ {ok, {_, _, _, _, _, _, _, _}} ->
+ Url2 = Url#url{host = Addr, port = default_port(Prot)},
+ case T of
+ [$: | T2] ->
+ parse_url(T2, get_port, Url2, []);
+ [$/ | T2] ->
+ Url2#url{path = [$/ | T2]};
+ [$? | T2] ->
+ Url2#url{path = [$/, $? | T2]};
+ [] ->
+ Url2#url{path = "/"};
+ _ ->
+ {error, {invalid_host, "[" ++ Addr ++ "]" ++ T}}
+ end;
+ _ ->
+ {error, {invalid_ipv6_address, Addr}}
+ end;
+parse_url([$[ | T], get_host, #url{} = Url, []) ->
+ parse_url(T, get_ipv6_address, Url#url{host_type = ipv6_address}, []);
+parse_url([$: | T], get_host, #url{} = Url, TmpAcc) ->
+ parse_url(T, get_port,
+ Url#url{host = lists:reverse(TmpAcc)},
+ []);
+parse_url([H | T], get_host, #url{protocol=Prot} = Url, TmpAcc) when H == $/;
+ H == $? ->
+ Path = case H of
+ $/ ->
+ [$/ | T];
+ $? ->
+ [$/, $? | T]
+ end,
+ Url#url{host = lists:reverse(TmpAcc),
+ port = default_port(Prot),
+ path = Path};
+parse_url([H | T], get_port, #url{protocol=Prot} = Url, TmpAcc) when H == $/;
+ H == $? ->
+ Path = case H of
+ $/ ->
+ [$/ | T];
+ $? ->
+ [$/, $? | T]
+ end,
+ Port = case TmpAcc of
+ [] ->
+ default_port(Prot);
+ _ ->
+ list_to_integer(lists:reverse(TmpAcc))
+ end,
+ Url#url{port = Port, path = Path};
+parse_url([H | T], State, Url, TmpAcc) ->
+ parse_url(T, State, Url, [H | TmpAcc]);
+parse_url([], get_host, Url, TmpAcc) when TmpAcc /= [] ->
+ Url#url{host = lists:reverse(TmpAcc),
+ port = default_port(Url#url.protocol),
+ path = "/"};
+parse_url([], get_username, Url, TmpAcc) when TmpAcc /= [] ->
+ Url#url{host = lists:reverse(TmpAcc),
+ port = default_port(Url#url.protocol),
+ path = "/"};
+parse_url([], get_port, #url{protocol=Prot} = Url, TmpAcc) ->
+ Port = case TmpAcc of
+ [] ->
+ default_port(Prot);
+ _ ->
+ list_to_integer(lists:reverse(TmpAcc))
+ end,
+ Url#url{port = Port,
+ path = "/"};
+parse_url([], get_password, Url, TmpAcc) ->
+ %% Ok, what we thought was the username/password was the hostname
+ %% and portnumber
+ #url{username=User} = Url,
+ Port = case TmpAcc of
+ [] ->
+ default_port(Url#url.protocol);
+ _ ->
+ list_to_integer(lists:reverse(TmpAcc))
+ end,
+ Url#url{host = User,
+ port = Port,
+ username = undefined,
+ password = undefined,
+ path = "/"};
+parse_url([], State, Url, TmpAcc) ->
+ {invalid_uri_2, State, Url, TmpAcc}.
+
+default_port(http) -> 80;
+default_port(https) -> 443;
+default_port(ftp) -> 21.
+
+printable_date() ->
+ {{Y,Mo,D},{H, M, S}} = calendar:local_time(),
+ {_,_,MicroSecs} = now(),
+ [integer_to_list(Y),
+ $-,
+ integer_to_list(Mo),
+ $-,
+ integer_to_list(D),
+ $_,
+ integer_to_list(H),
+ $:,
+ integer_to_list(M),
+ $:,
+ integer_to_list(S),
+ $:,
+ integer_to_list(MicroSecs div 1000)].
+
+do_trace(Fmt, Args) ->
+ do_trace(get(my_trace_flag), Fmt, Args).
+
+-ifdef(DEBUG).
+do_trace(_, Fmt, Args) ->
+ io:format("~s -- (~s) - "++Fmt,
+ [printable_date(),
+ get(ibrowse_trace_token) | Args]).
+-else.
+do_trace(true, Fmt, Args) ->
+ io:format("~s -- (~s) - "++Fmt,
+ [printable_date(),
+ get(ibrowse_trace_token) | Args]);
+do_trace(_, _, _) ->
+ ok.
+-endif.
diff --git a/deps/ibrowse/src/ibrowse_sup.erl b/deps/ibrowse/src/ibrowse_sup.erl
new file mode 100644
index 00000000..ace33d16
--- /dev/null
+++ b/deps/ibrowse/src/ibrowse_sup.erl
@@ -0,0 +1,63 @@
+%%%-------------------------------------------------------------------
+%%% File : ibrowse_sup.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description :
+%%%
+%%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%%-------------------------------------------------------------------
+-module(ibrowse_sup).
+-behaviour(supervisor).
+%%--------------------------------------------------------------------
+%% Include files
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% External exports
+%%--------------------------------------------------------------------
+-export([
+ start_link/0
+ ]).
+
+%%--------------------------------------------------------------------
+%% Internal exports
+%%--------------------------------------------------------------------
+-export([
+ init/1
+ ]).
+
+%%--------------------------------------------------------------------
+%% Macros
+%%--------------------------------------------------------------------
+-define(SERVER, ?MODULE).
+
+%%--------------------------------------------------------------------
+%% Records
+%%--------------------------------------------------------------------
+
+%%====================================================================
+%% External functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Function: start_link/0
+%% Description: Starts the supervisor
+%%--------------------------------------------------------------------
+start_link() ->
+ supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+%%====================================================================
+%% Server functions
+%%====================================================================
+%%--------------------------------------------------------------------
+%% Func: init/1
+%% Returns: {ok, {SupFlags, [ChildSpec]}} |
+%% ignore |
+%% {error, Reason}
+%%--------------------------------------------------------------------
+init([]) ->
+ AChild = {ibrowse,{ibrowse,start_link,[]},
+ permanent,2000,worker,[ibrowse, ibrowse_http_client]},
+ {ok,{{one_for_all,10,1}, [AChild]}}.
+
+%%====================================================================
+%% Internal functions
+%%====================================================================
diff --git a/deps/ibrowse/src/ibrowse_test.erl b/deps/ibrowse/src/ibrowse_test.erl
new file mode 100644
index 00000000..502806b7
--- /dev/null
+++ b/deps/ibrowse/src/ibrowse_test.erl
@@ -0,0 +1,513 @@
+%%% File : ibrowse_test.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description : Test ibrowse
+%%% Created : 14 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+
+-module(ibrowse_test).
+-export([
+ test_load/3,
+ send_reqs_1/3,
+ do_send_req/2,
+ unit_tests/0,
+ unit_tests/1,
+ unit_tests_1/2,
+ ue_test/0,
+ ue_test/1,
+ verify_chunked_streaming/0,
+ verify_chunked_streaming/1,
+ test_chunked_streaming_once/0,
+ i_do_async_req_list/4,
+ test_stream_once/3,
+ test_stream_once/4,
+ test_20122010/0,
+ test_20122010/1
+ ]).
+
+test_stream_once(Url, Method, Options) ->
+ test_stream_once(Url, Method, Options, 5000).
+
+test_stream_once(Url, Method, Options, Timeout) ->
+ case ibrowse:send_req(Url, [], Method, [], [{stream_to, {self(), once}} | Options], Timeout) of
+ {ibrowse_req_id, Req_id} ->
+ case ibrowse:stream_next(Req_id) of
+ ok ->
+ test_stream_once(Req_id);
+ Err ->
+ Err
+ end;
+ Err ->
+ Err
+ end.
+
+test_stream_once(Req_id) ->
+ receive
+ {ibrowse_async_headers, Req_id, StatCode, Headers} ->
+ io:format("Recvd headers~n~p~n", [{ibrowse_async_headers, Req_id, StatCode, Headers}]),
+ case ibrowse:stream_next(Req_id) of
+ ok ->
+ test_stream_once(Req_id);
+ Err ->
+ Err
+ end;
+ {ibrowse_async_response, Req_id, {error, Err}} ->
+ io:format("Recvd error: ~p~n", [Err]);
+ {ibrowse_async_response, Req_id, Body_1} ->
+ io:format("Recvd body part: ~n~p~n", [{ibrowse_async_response, Req_id, Body_1}]),
+ case ibrowse:stream_next(Req_id) of
+ ok ->
+ test_stream_once(Req_id);
+ Err ->
+ Err
+ end;
+ {ibrowse_async_response_end, Req_id} ->
+ ok
+ end.
+%% Use ibrowse:set_max_sessions/3 and ibrowse:set_max_pipeline_size/3 to
+%% tweak settings before running the load test. The defaults are 10 and 10.
+test_load(Url, NumWorkers, NumReqsPerWorker) when is_list(Url),
+ is_integer(NumWorkers),
+ is_integer(NumReqsPerWorker),
+ NumWorkers > 0,
+ NumReqsPerWorker > 0 ->
+ proc_lib:spawn(?MODULE, send_reqs_1, [Url, NumWorkers, NumReqsPerWorker]).
+
+send_reqs_1(Url, NumWorkers, NumReqsPerWorker) ->
+ Start_time = now(),
+ ets:new(pid_table, [named_table, public]),
+ ets:new(ibrowse_test_results, [named_table, public]),
+ ets:new(ibrowse_errors, [named_table, public, ordered_set]),
+ init_results(),
+ process_flag(trap_exit, true),
+ log_msg("Starting spawning of workers...~n", []),
+ spawn_workers(Url, NumWorkers, NumReqsPerWorker),
+ log_msg("Finished spawning workers...~n", []),
+ do_wait(),
+ End_time = now(),
+ log_msg("All workers are done...~n", []),
+ log_msg("ibrowse_test_results table: ~n~p~n", [ets:tab2list(ibrowse_test_results)]),
+ log_msg("Start time: ~1000.p~n", [calendar:now_to_local_time(Start_time)]),
+ log_msg("End time : ~1000.p~n", [calendar:now_to_local_time(End_time)]),
+ Elapsed_time_secs = trunc(timer:now_diff(End_time, Start_time) / 1000000),
+ log_msg("Elapsed : ~p~n", [Elapsed_time_secs]),
+ log_msg("Reqs/sec : ~p~n", [round(trunc((NumWorkers*NumReqsPerWorker) / Elapsed_time_secs))]),
+ dump_errors().
+
+init_results() ->
+ ets:insert(ibrowse_test_results, {crash, 0}),
+ ets:insert(ibrowse_test_results, {send_failed, 0}),
+ ets:insert(ibrowse_test_results, {other_error, 0}),
+ ets:insert(ibrowse_test_results, {success, 0}),
+ ets:insert(ibrowse_test_results, {retry_later, 0}),
+ ets:insert(ibrowse_test_results, {trid_mismatch, 0}),
+ ets:insert(ibrowse_test_results, {success_no_trid, 0}),
+ ets:insert(ibrowse_test_results, {failed, 0}),
+ ets:insert(ibrowse_test_results, {timeout, 0}),
+ ets:insert(ibrowse_test_results, {req_id, 0}).
+
+spawn_workers(_Url, 0, _) ->
+ ok;
+spawn_workers(Url, NumWorkers, NumReqsPerWorker) ->
+ Pid = proc_lib:spawn_link(?MODULE, do_send_req, [Url, NumReqsPerWorker]),
+ ets:insert(pid_table, {Pid, []}),
+ spawn_workers(Url, NumWorkers - 1, NumReqsPerWorker).
+
+do_wait() ->
+ receive
+ {'EXIT', _, normal} ->
+ do_wait();
+ {'EXIT', Pid, Reason} ->
+ ets:delete(pid_table, Pid),
+ ets:insert(ibrowse_errors, {Pid, Reason}),
+ ets:update_counter(ibrowse_test_results, crash, 1),
+ do_wait();
+ Msg ->
+ io:format("Recvd unknown message...~p~n", [Msg]),
+ do_wait()
+ after 1000 ->
+ case ets:info(pid_table, size) of
+ 0 ->
+ done;
+ _ ->
+ do_wait()
+ end
+ end.
+
+do_send_req(Url, NumReqs) ->
+ do_send_req_1(Url, NumReqs).
+
+do_send_req_1(_Url, 0) ->
+ ets:delete(pid_table, self());
+do_send_req_1(Url, NumReqs) ->
+ Counter = integer_to_list(ets:update_counter(ibrowse_test_results, req_id, 1)),
+ case ibrowse:send_req(Url, [{"ib_req_id", Counter}], get, [], [], 10000) of
+ {ok, _Status, Headers, _Body} ->
+ case lists:keysearch("ib_req_id", 1, Headers) of
+ {value, {_, Counter}} ->
+ ets:update_counter(ibrowse_test_results, success, 1);
+ {value, _} ->
+ ets:update_counter(ibrowse_test_results, trid_mismatch, 1);
+ false ->
+ ets:update_counter(ibrowse_test_results, success_no_trid, 1)
+ end;
+ {error, req_timedout} ->
+ ets:update_counter(ibrowse_test_results, timeout, 1);
+ {error, send_failed} ->
+ ets:update_counter(ibrowse_test_results, send_failed, 1);
+ {error, retry_later} ->
+ ets:update_counter(ibrowse_test_results, retry_later, 1);
+ Err ->
+ ets:insert(ibrowse_errors, {now(), Err}),
+ ets:update_counter(ibrowse_test_results, other_error, 1),
+ ok
+ end,
+ do_send_req_1(Url, NumReqs-1).
+
+dump_errors() ->
+ case ets:info(ibrowse_errors, size) of
+ 0 ->
+ ok;
+ _ ->
+ {A, B, C} = now(),
+ Filename = lists:flatten(
+ io_lib:format("ibrowse_errors_~p_~p_~p.txt" , [A, B, C])),
+ case file:open(Filename, [write, delayed_write, raw]) of
+ {ok, Iod} ->
+ dump_errors(ets:first(ibrowse_errors), Iod);
+ Err ->
+ io:format("failed to create file ~s. Reason: ~p~n", [Filename, Err]),
+ ok
+ end
+ end.
+
+dump_errors('$end_of_table', Iod) ->
+ file:close(Iod);
+dump_errors(Key, Iod) ->
+ [{_, Term}] = ets:lookup(ibrowse_errors, Key),
+ file:write(Iod, io_lib:format("~p~n", [Term])),
+ dump_errors(ets:next(ibrowse_errors, Key), Iod).
+
+%%------------------------------------------------------------------------------
+%% Unit Tests
+%%------------------------------------------------------------------------------
+-define(TEST_LIST, [{"http://intranet/messenger", get},
+ {"http://www.google.co.uk", get},
+ {"http://www.google.com", get},
+ {"http://www.google.com", options},
+ {"https://mail.google.com", get},
+ {"http://www.sun.com", get},
+ {"http://www.oracle.com", get},
+ {"http://www.bbc.co.uk", get},
+ {"http://www.bbc.co.uk", trace},
+ {"http://www.bbc.co.uk", options},
+ {"http://yaws.hyber.org", get},
+ {"http://jigsaw.w3.org/HTTP/ChunkedScript", get},
+ {"http://jigsaw.w3.org/HTTP/TE/foo.txt", get},
+ {"http://jigsaw.w3.org/HTTP/TE/bar.txt", get},
+ {"http://jigsaw.w3.org/HTTP/connection.html", get},
+ {"http://jigsaw.w3.org/HTTP/cc.html", get},
+ {"http://jigsaw.w3.org/HTTP/cc-private.html", get},
+ {"http://jigsaw.w3.org/HTTP/cc-proxy-revalidate.html", get},
+ {"http://jigsaw.w3.org/HTTP/cc-nocache.html", get},
+ {"http://jigsaw.w3.org/HTTP/h-content-md5.html", get},
+ {"http://jigsaw.w3.org/HTTP/h-retry-after.html", get},
+ {"http://jigsaw.w3.org/HTTP/h-retry-after-date.html", get},
+ {"http://jigsaw.w3.org/HTTP/neg", get},
+ {"http://jigsaw.w3.org/HTTP/negbad", get},
+ {"http://jigsaw.w3.org/HTTP/400/toolong/", get},
+ {"http://jigsaw.w3.org/HTTP/300/", get},
+ {"http://jigsaw.w3.org/HTTP/Basic/", get, [{basic_auth, {"guest", "guest"}}]},
+ {"http://jigsaw.w3.org/HTTP/CL/", get},
+ {"http://www.httpwatch.com/httpgallery/chunked/", get},
+ {"https://github.com", get, [{ssl_options, [{depth, 2}]}]},
+ {local_test_fun, test_20122010, []}
+ ]).
+
+unit_tests() ->
+ unit_tests([]).
+
+unit_tests(Options) ->
+ application:start(crypto),
+ application:start(public_key),
+ application:start(ssl),
+ (catch ibrowse_test_server:start_server(8181, tcp)),
+ ibrowse:start(),
+ Options_1 = Options ++ [{connect_timeout, 5000}],
+ {Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]),
+ receive
+ {done, Pid} ->
+ ok;
+ {'DOWN', Ref, _, _, Info} ->
+ io:format("Test process crashed: ~p~n", [Info])
+ after 60000 ->
+ exit(Pid, kill),
+ io:format("Timed out waiting for tests to complete~n", [])
+ end.
+
+unit_tests_1(Parent, Options) ->
+ lists:foreach(fun({local_test_fun, Fun_name, Args}) ->
+ execute_req(local_test_fun, Fun_name, Args);
+ ({Url, Method}) ->
+ execute_req(Url, Method, Options);
+ ({Url, Method, X_Opts}) ->
+ execute_req(Url, Method, X_Opts ++ Options)
+ end, ?TEST_LIST),
+ Parent ! {done, self()}.
+
+verify_chunked_streaming() ->
+ verify_chunked_streaming([]).
+
+verify_chunked_streaming(Options) ->
+ io:format("~nVerifying that chunked streaming is working...~n", []),
+ Url = "http://www.httpwatch.com/httpgallery/chunked/",
+ io:format(" URL: ~s~n", [Url]),
+ io:format(" Fetching data without streaming...~n", []),
+ Result_without_streaming = ibrowse:send_req(
+ Url, [], get, [],
+ [{response_format, binary} | Options]),
+ io:format(" Fetching data with streaming as list...~n", []),
+ Async_response_list = do_async_req_list(
+ Url, get, [{response_format, list} | Options]),
+ io:format(" Fetching data with streaming as binary...~n", []),
+ Async_response_bin = do_async_req_list(
+ Url, get, [{response_format, binary} | Options]),
+ io:format(" Fetching data with streaming as binary, {active, once}...~n", []),
+ Async_response_bin_once = do_async_req_list(
+ Url, get, [once, {response_format, binary} | Options]),
+ Res1 = compare_responses(Result_without_streaming, Async_response_list, Async_response_bin),
+ Res2 = compare_responses(Result_without_streaming, Async_response_list, Async_response_bin_once),
+ case {Res1, Res2} of
+ {success, success} ->
+ io:format(" Chunked streaming working~n", []);
+ _ ->
+ ok
+ end.
+
+test_chunked_streaming_once() ->
+ test_chunked_streaming_once([]).
+
+test_chunked_streaming_once(Options) ->
+ io:format("~nTesting chunked streaming with the {stream_to, {Pid, once}} option...~n", []),
+ Url = "http://www.httpwatch.com/httpgallery/chunked/",
+ io:format(" URL: ~s~n", [Url]),
+ io:format(" Fetching data with streaming as binary, {active, once}...~n", []),
+ case do_async_req_list(Url, get, [once, {response_format, binary} | Options]) of
+ {ok, _, _, _} ->
+ io:format(" Success!~n", []);
+ Err ->
+ io:format(" Fail: ~p~n", [Err])
+ end.
+
+compare_responses({ok, St_code, _, Body}, {ok, St_code, _, Body}, {ok, St_code, _, Body}) ->
+ success;
+compare_responses({ok, St_code, _, Body_1}, {ok, St_code, _, Body_2}, {ok, St_code, _, Body_3}) ->
+ case Body_1 of
+ Body_2 ->
+ io:format("Body_1 and Body_2 match~n", []);
+ Body_3 ->
+ io:format("Body_1 and Body_3 match~n", []);
+ _ when Body_2 == Body_3 ->
+ io:format("Body_2 and Body_3 match~n", []);
+ _ ->
+ io:format("All three bodies are different!~n", [])
+ end,
+ io:format("Body_1 -> ~p~n", [Body_1]),
+ io:format("Body_2 -> ~p~n", [Body_2]),
+ io:format("Body_3 -> ~p~n", [Body_3]),
+ fail_bodies_mismatch;
+compare_responses(R1, R2, R3) ->
+ io:format("R1 -> ~p~n", [R1]),
+ io:format("R2 -> ~p~n", [R2]),
+ io:format("R3 -> ~p~n", [R3]),
+ fail.
+
+%% do_async_req_list(Url) ->
+%% do_async_req_list(Url, get).
+
+%% do_async_req_list(Url, Method) ->
+%% do_async_req_list(Url, Method, [{stream_to, self()},
+%% {stream_chunk_size, 1000}]).
+
+do_async_req_list(Url, Method, Options) ->
+ {Pid,_} = erlang:spawn_monitor(?MODULE, i_do_async_req_list,
+ [self(), Url, Method,
+ Options ++ [{stream_chunk_size, 1000}]]),
+%% io:format("Spawned process ~p~n", [Pid]),
+ wait_for_resp(Pid).
+
+wait_for_resp(Pid) ->
+ receive
+ {async_result, Pid, Res} ->
+ Res;
+ {async_result, Other_pid, _} ->
+ io:format("~p: Waiting for result from ~p: got from ~p~n", [self(), Pid, Other_pid]),
+ wait_for_resp(Pid);
+ {'DOWN', _, _, Pid, Reason} ->
+ {'EXIT', Reason};
+ {'DOWN', _, _, _, _} ->
+ wait_for_resp(Pid);
+ Msg ->
+ io:format("Recvd unknown message: ~p~n", [Msg]),
+ wait_for_resp(Pid)
+ after 100000 ->
+ {error, timeout}
+ end.
+
+i_do_async_req_list(Parent, Url, Method, Options) ->
+ Options_1 = case lists:member(once, Options) of
+ true ->
+ [{stream_to, {self(), once}} | (Options -- [once])];
+ false ->
+ [{stream_to, self()} | Options]
+ end,
+ Res = ibrowse:send_req(Url, [], Method, [], Options_1),
+ case Res of
+ {ibrowse_req_id, Req_id} ->
+ Result = wait_for_async_resp(Req_id, Options, undefined, undefined, []),
+ Parent ! {async_result, self(), Result};
+ Err ->
+ Parent ! {async_result, self(), Err}
+ end.
+
+wait_for_async_resp(Req_id, Options, Acc_Stat_code, Acc_Headers, Body) ->
+ receive
+ {ibrowse_async_headers, Req_id, StatCode, Headers} ->
+ %% io:format("Recvd headers...~n", []),
+ maybe_stream_next(Req_id, Options),
+ wait_for_async_resp(Req_id, Options, StatCode, Headers, Body);
+ {ibrowse_async_response_end, Req_id} ->
+ %% io:format("Recvd end of response.~n", []),
+ Body_1 = list_to_binary(lists:reverse(Body)),
+ {ok, Acc_Stat_code, Acc_Headers, Body_1};
+ {ibrowse_async_response, Req_id, Data} ->
+ maybe_stream_next(Req_id, Options),
+ %% io:format("Recvd data...~n", []),
+ wait_for_async_resp(Req_id, Options, Acc_Stat_code, Acc_Headers, [Data | Body]);
+ {ibrowse_async_response, Req_id, {error, _} = Err} ->
+ {ok, Acc_Stat_code, Acc_Headers, Err};
+ Err ->
+ {ok, Acc_Stat_code, Acc_Headers, Err}
+ after 10000 ->
+ {timeout, Acc_Stat_code, Acc_Headers, Body}
+ end.
+
+maybe_stream_next(Req_id, Options) ->
+ case lists:member(once, Options) of
+ true ->
+ ibrowse:stream_next(Req_id);
+ false ->
+ ok
+ end.
+
+execute_req(local_test_fun, Method, Args) ->
+ io:format(" ~-54.54w: ", [Method]),
+ Result = (catch apply(?MODULE, Method, Args)),
+ io:format("~p~n", [Result]);
+execute_req(Url, Method, Options) ->
+ io:format("~7.7w, ~50.50s: ", [Method, Url]),
+ Result = (catch ibrowse:send_req(Url, [], Method, [], Options)),
+ case Result of
+ {ok, SCode, _H, _B} ->
+ io:format("Status code: ~p~n", [SCode]);
+ Err ->
+ io:format("~p~n", [Err])
+ end.
+
+ue_test() ->
+ ue_test(lists:duplicate(1024, $?)).
+ue_test(Data) ->
+ {Time, Res} = timer:tc(ibrowse_lib, url_encode, [Data]),
+ io:format("Time -> ~p~n", [Time]),
+ io:format("Data Length -> ~p~n", [length(Data)]),
+ io:format("Res Length -> ~p~n", [length(Res)]).
+% io:format("Result -> ~s~n", [Res]).
+
+log_msg(Fmt, Args) ->
+ io:format("~s -- " ++ Fmt,
+ [ibrowse_lib:printable_date() | Args]).
+
+%%------------------------------------------------------------------------------
+%%
+%%------------------------------------------------------------------------------
+
+test_20122010() ->
+ test_20122010("http://localhost:8181").
+
+test_20122010(Url) ->
+ {ok, Pid} = ibrowse:spawn_worker_process(Url),
+ Expected_resp = <<"1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36-37-38-39-40-41-42-43-44-45-46-47-48-49-50-51-52-53-54-55-56-57-58-59-60-61-62-63-64-65-66-67-68-69-70-71-72-73-74-75-76-77-78-79-80-81-82-83-84-85-86-87-88-89-90-91-92-93-94-95-96-97-98-99-100">>,
+ Test_parent = self(),
+ Fun = fun() ->
+ do_test_20122010(Url, Pid, Expected_resp, Test_parent)
+ end,
+ Pids = [erlang:spawn_monitor(Fun) || _ <- lists:seq(1,10)],
+ wait_for_workers(Pids).
+
+wait_for_workers([{Pid, _Ref} | Pids]) ->
+ receive
+ {Pid, success} ->
+ wait_for_workers(Pids)
+ after 60000 ->
+ test_failed
+ end;
+wait_for_workers([]) ->
+ success.
+
+do_test_20122010(Url, Pid, Expected_resp, Test_parent) ->
+ do_test_20122010(10, Url, Pid, Expected_resp, Test_parent).
+
+do_test_20122010(0, _Url, _Pid, _Expected_resp, Test_parent) ->
+ Test_parent ! {self(), success};
+do_test_20122010(Rem_count, Url, Pid, Expected_resp, Test_parent) ->
+ {ibrowse_req_id, Req_id} = ibrowse:send_req_direct(
+ Pid,
+ Url ++ "/ibrowse_stream_once_chunk_pipeline_test",
+ [], get, [],
+ [{stream_to, {self(), once}},
+ {inactivity_timeout, 10000},
+ {include_ibrowse_req_id, true}]),
+ do_trace("~p -- sent request ~1000.p~n", [self(), Req_id]),
+ Req_id_str = lists:flatten(io_lib:format("~1000.p",[Req_id])),
+ receive
+ {ibrowse_async_headers, Req_id, "200", Headers} ->
+ case lists:keysearch("x-ibrowse-request-id", 1, Headers) of
+ {value, {_, Req_id_str}} ->
+ ok;
+ {value, {_, Req_id_1}} ->
+ do_trace("~p -- Sent req-id: ~1000.p. Recvd: ~1000.p~n",
+ [self(), Req_id, Req_id_1]),
+ exit(req_id_mismatch)
+ end
+ after 5000 ->
+ do_trace("~p -- response headers not received~n", [self()]),
+ exit({timeout, test_failed})
+ end,
+ do_trace("~p -- response headers received~n", [self()]),
+ ok = ibrowse:stream_next(Req_id),
+ case do_test_20122010_1(Expected_resp, Req_id, []) of
+ true ->
+ do_test_20122010(Rem_count - 1, Url, Pid, Expected_resp, Test_parent);
+ false ->
+ Test_parent ! {self(), failed}
+ end.
+
+do_test_20122010_1(Expected_resp, Req_id, Acc) ->
+ receive
+ {ibrowse_async_response, Req_id, Body_part} ->
+ ok = ibrowse:stream_next(Req_id),
+ do_test_20122010_1(Expected_resp, Req_id, [Body_part | Acc]);
+ {ibrowse_async_response_end, Req_id} ->
+ Acc_1 = list_to_binary(lists:reverse(Acc)),
+ Result = Acc_1 == Expected_resp,
+ do_trace("~p -- End of response. Result: ~p~n", [self(), Result]),
+ Result
+ after 1000 ->
+ exit({timeout, test_failed})
+ end.
+
+do_trace(Fmt, Args) ->
+ do_trace(get(my_trace_flag), Fmt, Args).
+
+do_trace(true, Fmt, Args) ->
+ io:format("~s -- " ++ Fmt, [ibrowse_lib:printable_date() | Args]);
+do_trace(_, _, _) ->
+ ok.
diff --git a/deps/ibrowse/test/Makefile b/deps/ibrowse/test/Makefile
new file mode 100644
index 00000000..2851ed2e
--- /dev/null
+++ b/deps/ibrowse/test/Makefile
@@ -0,0 +1,19 @@
+ERL_FILES = ibrowse_test_server.erl
+
+
+INCLUDE_DIRS = -I./
+
+ERLC ?= erlc
+ERLC_EMULATOR ?= erl -boot start_clean
+COMPILER_OPTIONS = -W +warn_unused_vars +nowarn_shadow_vars +warn_unused_import
+
+.SUFFIXES: .erl .beam $(SUFFIXES)
+
+all: $(ERL_FILES:%.erl=%.beam)
+
+%.beam: %.erl
+ ${ERLC} $(COMPILER_OPTIONS) $(INCLUDE_DIRS) -o ./ $<
+
+clean:
+ rm -f *.beam
+
diff --git a/deps/ibrowse/test/ibrowse_lib_tests.erl b/deps/ibrowse/test/ibrowse_lib_tests.erl
new file mode 100644
index 00000000..6f613e96
--- /dev/null
+++ b/deps/ibrowse/test/ibrowse_lib_tests.erl
@@ -0,0 +1,135 @@
+%%% File : ibrowse_lib.erl
+%%% Authors : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>,
+%%% Filipe David Manana <fdmanana@apache.org>
+%%% Description : Tests for the module ibrowse_lib.erl
+%%% Created : 12 April 2011 by Filipe David Manana <fdmanana@apache.org>
+
+-module(ibrowse_lib_tests).
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("ibrowse/include/ibrowse.hrl").
+
+
+parse_urls_test_() ->
+ {timeout, 60, [fun parse_urls/0]}.
+
+
+parse_urls() ->
+ ?assertMatch(#url{
+ abspath = "http://localhost",
+ host = "localhost",
+ host_type = hostname,
+ port = 80,
+ path = "/",
+ username = undefined,
+ password = undefined,
+ protocol = http
+ },
+ ibrowse_lib:parse_url("http://localhost")),
+ ?assertMatch(#url{
+ abspath = "http://localhost:80/",
+ host = "localhost",
+ host_type = hostname,
+ port = 80,
+ path = "/",
+ username = undefined,
+ password = undefined,
+ protocol = http
+ },
+ ibrowse_lib:parse_url("http://localhost:80/")),
+ ?assertMatch(#url{
+ abspath = "http://127.0.0.1:8000/",
+ host = "127.0.0.1",
+ host_type = ipv4_address,
+ port = 8000,
+ path = "/",
+ username = undefined,
+ password = undefined,
+ protocol = http
+ },
+ ibrowse_lib:parse_url("http://127.0.0.1:8000/")),
+ ?assertMatch(#url{
+ abspath = "https://foo:bar@127.0.0.1:8000/test",
+ host = "127.0.0.1",
+ host_type = ipv4_address,
+ port = 8000,
+ path = "/test",
+ username = "foo",
+ password = "bar",
+ protocol = https
+ },
+ ibrowse_lib:parse_url("https://foo:bar@127.0.0.1:8000/test")),
+ ?assertMatch(#url{
+ abspath = "https://[::1]",
+ host = "::1",
+ host_type = ipv6_address,
+ port = 443,
+ path = "/",
+ username = undefined,
+ password = undefined,
+ protocol = https
+ },
+ ibrowse_lib:parse_url("https://[::1]")),
+ ?assertMatch(#url{
+ abspath = "http://[::1]:8080",
+ host = "::1",
+ host_type = ipv6_address,
+ port = 8080,
+ path = "/",
+ username = undefined,
+ password = undefined,
+ protocol = http
+ },
+ ibrowse_lib:parse_url("http://[::1]:8080")),
+ ?assertMatch(#url{
+ abspath = "http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:8081/index.html",
+ host = "FEDC:BA98:7654:3210:FEDC:BA98:7654:3210",
+ host_type = ipv6_address,
+ port = 8081,
+ path = "/index.html",
+ username = undefined,
+ password = undefined,
+ protocol = http
+ },
+ ibrowse_lib:parse_url("http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:8081/index.html")),
+ ?assertMatch(#url{
+ abspath = "http://[1080:0:0:0:8:800:200C:417A]/foo/bar",
+ host = "1080:0:0:0:8:800:200C:417A",
+ host_type = ipv6_address,
+ port = 80,
+ path = "/foo/bar",
+ username = undefined,
+ password = undefined,
+ protocol = http
+ },
+ ibrowse_lib:parse_url("http://[1080:0:0:0:8:800:200C:417A]/foo/bar")),
+ ?assertMatch(#url{
+ abspath = "http://[1080:0:0:0:8:800:200C:417A]:8080/foo/bar",
+ host = "1080:0:0:0:8:800:200C:417A",
+ host_type = ipv6_address,
+ port = 8080,
+ path = "/foo/bar",
+ username = undefined,
+ password = undefined,
+ protocol = http
+ },
+ ibrowse_lib:parse_url("http://[1080:0:0:0:8:800:200C:417A]:8080/foo/bar")),
+ ?assertMatch(#url{
+ abspath = "http://[::192.9.5.5]:6000/foo?q=bar",
+ host = "::192.9.5.5",
+ host_type = ipv6_address,
+ port = 6000,
+ path = "/foo?q=bar",
+ username = undefined,
+ password = undefined,
+ protocol = http
+ },
+ ibrowse_lib:parse_url("http://[::192.9.5.5]:6000/foo?q=bar")),
+ ?assertMatch({error, {invalid_ipv6_address, ":1080:0:0:0:8:800:200C:417A:"}},
+ ibrowse_lib:parse_url("http://[:1080:0:0:0:8:800:200C:417A:]:6000/foo?q=bar")),
+ ?assertMatch({error, {invalid_ipv6_address, "12::z"}},
+ ibrowse_lib:parse_url("http://[12::z]")),
+ ?assertMatch({error, {invalid_username_or_host, _}},
+ ibrowse_lib:parse_url("http://foo[1080:0:0:0:8:800:200C:417A]:6000")),
+ ?assertMatch({error, missing_password},
+ ibrowse_lib:parse_url("http://foo:[1080:0:0:0:8:800:200C:417A]:6000")),
+ ok.
diff --git a/deps/ibrowse/test/ibrowse_test_server.erl b/deps/ibrowse/test/ibrowse_test_server.erl
new file mode 100644
index 00000000..45c69587
--- /dev/null
+++ b/deps/ibrowse/test/ibrowse_test_server.erl
@@ -0,0 +1,195 @@
+%%% File : ibrowse_test_server.erl
+%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+%%% Description : A server to simulate various test scenarios
+%%% Created : 17 Oct 2010 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
+
+-module(ibrowse_test_server).
+-export([
+ start_server/2,
+ stop_server/1
+ ]).
+
+-record(request, {method, uri, version, headers = [], body = []}).
+
+-define(dec2hex(X), erlang:integer_to_list(X, 16)).
+
+start_server(Port, Sock_type) ->
+ Fun = fun() ->
+ register(server_proc_name(Port), self()),
+ case do_listen(Sock_type, Port, [{active, false},
+ {reuseaddr, true},
+ {nodelay, true},
+ {packet, http}]) of
+ {ok, Sock} ->
+ do_trace("Server listening on port: ~p~n", [Port]),
+ accept_loop(Sock, Sock_type);
+ Err ->
+ erlang:error(
+ lists:flatten(
+ io_lib:format(
+ "Failed to start server on port ~p. ~p~n",
+ [Port, Err]))),
+ exit({listen_error, Err})
+ end
+ end,
+ spawn_link(Fun).
+
+stop_server(Port) ->
+ exit(whereis(server_proc_name(Port)), kill).
+
+server_proc_name(Port) ->
+ list_to_atom("ibrowse_test_server_"++integer_to_list(Port)).
+
+do_listen(tcp, Port, Opts) ->
+ gen_tcp:listen(Port, Opts);
+do_listen(ssl, Port, Opts) ->
+ application:start(crypto),
+ application:start(ssl),
+ ssl:listen(Port, Opts).
+
+do_accept(tcp, Listen_sock) ->
+ gen_tcp:accept(Listen_sock);
+do_accept(ssl, Listen_sock) ->
+ ssl:ssl_accept(Listen_sock).
+
+accept_loop(Sock, Sock_type) ->
+ case do_accept(Sock_type, Sock) of
+ {ok, Conn} ->
+ Pid = spawn_link(
+ fun() ->
+ server_loop(Conn, Sock_type, #request{})
+ end),
+ set_controlling_process(Conn, Sock_type, Pid),
+ Pid ! {setopts, [{active, true}]},
+ accept_loop(Sock, Sock_type);
+ Err ->
+ Err
+ end.
+
+set_controlling_process(Sock, tcp, Pid) ->
+ gen_tcp:controlling_process(Sock, Pid);
+set_controlling_process(Sock, ssl, Pid) ->
+ ssl:controlling_process(Sock, Pid).
+
+setopts(Sock, tcp, Opts) ->
+ inet:setopts(Sock, Opts);
+setopts(Sock, ssl, Opts) ->
+ ssl:setopts(Sock, Opts).
+
+server_loop(Sock, Sock_type, #request{headers = Headers} = Req) ->
+ receive
+ {http, Sock, {http_request, HttpMethod, HttpUri, HttpVersion}} ->
+ server_loop(Sock, Sock_type, Req#request{method = HttpMethod,
+ uri = HttpUri,
+ version = HttpVersion});
+ {http, Sock, {http_header, _, _, _, _} = H} ->
+ server_loop(Sock, Sock_type, Req#request{headers = [H | Headers]});
+ {http, Sock, http_eoh} ->
+ process_request(Sock, Sock_type, Req),
+ server_loop(Sock, Sock_type, #request{});
+ {http, Sock, {http_error, Err}} ->
+ do_trace("Error parsing HTTP request:~n"
+ "Req so far : ~p~n"
+ "Err : ", [Req, Err]),
+ exit({http_error, Err});
+ {setopts, Opts} ->
+ setopts(Sock, Sock_type, Opts),
+ server_loop(Sock, Sock_type, Req);
+ {tcp_closed, Sock} ->
+ do_trace("Client closed connection~n", []),
+ ok;
+ Other ->
+ do_trace("Recvd unknown msg: ~p~n", [Other]),
+ exit({unknown_msg, Other})
+ after 5000 ->
+ do_trace("Timing out client connection~n", []),
+ ok
+ end.
+
+do_trace(Fmt, Args) ->
+ do_trace(get(my_trace_flag), Fmt, Args).
+
+do_trace(true, Fmt, Args) ->
+ io:format("~s -- " ++ Fmt, [ibrowse_lib:printable_date() | Args]);
+do_trace(_, _, _) ->
+ ok.
+
+process_request(Sock, Sock_type,
+ #request{method='GET',
+ headers = Headers,
+ uri = {abs_path, "/ibrowse_stream_once_chunk_pipeline_test"}} = Req) ->
+ Req_id = case lists:keysearch("X-Ibrowse-Request-Id", 3, Headers) of
+ false ->
+ "";
+ {value, {http_header, _, _, _, Req_id_1}} ->
+ Req_id_1
+ end,
+ Req_id_header = ["x-ibrowse-request-id: ", Req_id, "\r\n"],
+ do_trace("Recvd req: ~p~n", [Req]),
+ Body = string:join([integer_to_list(X) || X <- lists:seq(1,100)], "-"),
+ Chunked_body = chunk_request_body(Body, 50),
+ Resp_1 = [<<"HTTP/1.1 200 OK\r\n">>,
+ Req_id_header,
+ <<"Transfer-Encoding: chunked\r\n\r\n">>],
+ Resp_2 = Chunked_body,
+ do_send(Sock, Sock_type, Resp_1),
+ timer:sleep(100),
+ do_send(Sock, Sock_type, Resp_2);
+process_request(Sock, Sock_type, Req) ->
+ do_trace("Recvd req: ~p~n", [Req]),
+ Resp = <<"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n">>,
+ do_send(Sock, Sock_type, Resp).
+
+do_send(Sock, tcp, Resp) ->
+ ok = gen_tcp:send(Sock, Resp);
+do_send(Sock, ssl, Resp) ->
+ ok = ssl:send(Sock, Resp).
+
+
+%%------------------------------------------------------------------------------
+%% Utility functions
+%%------------------------------------------------------------------------------
+
+chunk_request_body(Body, _ChunkSize) when is_tuple(Body) orelse
+ is_function(Body) ->
+ Body;
+chunk_request_body(Body, ChunkSize) ->
+ chunk_request_body(Body, ChunkSize, []).
+
+chunk_request_body(Body, _ChunkSize, Acc) when Body == <<>>; Body == [] ->
+ LastChunk = "0\r\n",
+ lists:reverse(["\r\n", LastChunk | Acc]);
+chunk_request_body(Body, ChunkSize, Acc) when is_binary(Body),
+ size(Body) >= ChunkSize ->
+ <<ChunkBody:ChunkSize/binary, Rest/binary>> = Body,
+ Chunk = [?dec2hex(ChunkSize),"\r\n",
+ ChunkBody, "\r\n"],
+ chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
+chunk_request_body(Body, _ChunkSize, Acc) when is_binary(Body) ->
+ BodySize = size(Body),
+ Chunk = [?dec2hex(BodySize),"\r\n",
+ Body, "\r\n"],
+ LastChunk = "0\r\n",
+ lists:reverse(["\r\n", LastChunk, Chunk | Acc]);
+chunk_request_body(Body, ChunkSize, Acc) when length(Body) >= ChunkSize ->
+ {ChunkBody, Rest} = split_list_at(Body, ChunkSize),
+ Chunk = [?dec2hex(ChunkSize),"\r\n",
+ ChunkBody, "\r\n"],
+ chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
+chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) ->
+ BodySize = length(Body),
+ Chunk = [?dec2hex(BodySize),"\r\n",
+ Body, "\r\n"],
+ LastChunk = "0\r\n",
+ lists:reverse(["\r\n", LastChunk, Chunk | Acc]).
+
+split_list_at(List, N) ->
+ split_list_at(List, N, []).
+
+split_list_at([], _, Acc) ->
+ {lists:reverse(Acc), []};
+split_list_at(List2, 0, List1) ->
+ {lists:reverse(List1), List2};
+split_list_at([H | List2], N, List1) ->
+ split_list_at(List2, N-1, [H | List1]).
+
diff --git a/deps/meck/.scripts/tag_with_changelog.sh b/deps/meck/.scripts/tag_with_changelog.sh
new file mode 100755
index 00000000..e6a4d3d1
--- /dev/null
+++ b/deps/meck/.scripts/tag_with_changelog.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+# Install for alias 'tag-cl' with:
+# git config alias.tag-cl '!.scripts/tag_with_changelog.sh'
+
+set -e # Abort on first failure, so we don't mess something up
+
+if [ -z "$1" ]; then
+ # Missing tag name
+ echo "usage: git tag-cl <tag>" >&2
+ exit 129
+fi
+if [ ! -f CHANGELOG ]; then
+ # No changelog to be used
+ echo "fatal: CHANGELOG missing" >&2
+ exit 128
+fi
+if [ ! -z "$(git status --short)" ]; then
+ # Sanity check
+ echo "fatal: dirty repository" >&2
+ exit 128
+fi
+
+CHANGELOG=$(cat CHANGELOG)
+
+# Clean up changelog
+echo "" > CHANGELOG
+git add CHANGELOG
+
+# Update version in .app file
+sed -i "" -e "s/{vsn, .*}/{vsn, \"$1\"}/g" src/meck.app.src
+sed -i "" -e "s/@version .*/@version \"$1\"/g" doc/overview.edoc
+git add src/meck.app.src
+git add doc/overview.edoc
+
+# Commit, tag and push
+git commit -m "Version $1"
+git tag -s $1 -m "Version $
+
+$CHANGELOG"
+git push && git push --tags
diff --git a/deps/meck/.travis.yml b/deps/meck/.travis.yml
new file mode 100644
index 00000000..6e520dbe
--- /dev/null
+++ b/deps/meck/.travis.yml
@@ -0,0 +1,8 @@
+language: erlang
+notifications:
+ disabled: true
+otp_release:
+ - R15B
+ - R14B04
+ - R14B03
+ - R14B02
diff --git a/deps/meck/CHANGELOG b/deps/meck/CHANGELOG
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/deps/meck/CHANGELOG
@@ -0,0 +1 @@
+
diff --git a/deps/meck/LICENSE b/deps/meck/LICENSE
new file mode 100644
index 00000000..e454a525
--- /dev/null
+++ b/deps/meck/LICENSE
@@ -0,0 +1,178 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
diff --git a/deps/meck/Makefile b/deps/meck/Makefile
new file mode 100644
index 00000000..5ac14ee2
--- /dev/null
+++ b/deps/meck/Makefile
@@ -0,0 +1,14 @@
+REBAR=`which rebar || ./rebar`
+
+all: compile
+
+compile:
+ @$(REBAR) compile
+
+test: force
+ @$(REBAR) eunit
+
+clean:
+ @$(REBAR) clean
+
+force: ;
diff --git a/deps/meck/NOTICE b/deps/meck/NOTICE
new file mode 100644
index 00000000..18ce5a73
--- /dev/null
+++ b/deps/meck/NOTICE
@@ -0,0 +1,5 @@
+Copyright 2011 Adam Lindberg
+
+Copyright 2011 Erlang Solutions
+This product contains code developed at Erlang Solutions.
+(http://www.erlang-solutions.com/)
diff --git a/deps/meck/README.md b/deps/meck/README.md
new file mode 100644
index 00000000..6613a088
--- /dev/null
+++ b/deps/meck/README.md
@@ -0,0 +1,210 @@
+[![Build Status](https://secure.travis-ci.org/eproxus/meck.png)](http://travis-ci.org/eproxus/meck)
+
+ * [Introduction](#introduction)
+ * [Features](#features)
+ * [Examples](#examples)
+ * [Build](#build)
+ * [Install](#install)
+ * [Contribute](#contribute)
+
+meck
+====
+A mocking library for Erlang.
+
+
+<a name='introduction'>
+
+Introduction
+------------
+
+With meck you can easily mock modules in Erlang. You can also perform
+some basic validations on the mocked modules, such as making sure no
+unexpected exceptions occurred or looking at the call history.
+
+
+<a name='features'>
+
+Features
+--------
+
+ * Automatic renaming and restoration of original modules
+ * Automatic backup and restore of cover data
+ * Changing return values using sequences and loops of static values
+ * Pass through: use functions from the original module
+ * Mock is linked to the creating process (disable with `no_link`)
+ * Complete call history showing calls, results and exceptions
+ * Mocking of sticky modules (using the option `unstick`)
+ * Throwing of expected exceptions that keeps the module valid
+
+
+<a name='examples'>
+
+Examples
+--------
+Here's an example of using meck in the Erlang shell:
+
+```erl
+Eshell V5.8.4 (abort with ^G)
+1> meck:new(dog).
+ok
+2> meck:expect(dog, bark, fun() -> "Woof!" end).
+ok
+3> dog:bark().
+"Woof!"
+4> meck:validate(dog).
+true
+5> meck:unload(dog).
+ok
+6> dog:bark().
+** exception error: undefined function dog:bark/0
+```
+
+Exceptions can be anticipated by meck (resulting in validation still
+passing). This is intended to be used to test code that can and should
+handle certain exceptions indeed does take care of them:
+
+```erl
+5> meck:expect(dog, meow, fun() -> meck:exception(error, not_a_cat) end).
+ok
+6> catch dog:meow().
+{'EXIT',{not_a_cat,[{meck,exception,2},
+ {meck,exec,4},
+ {dog,meow,[]},
+ {erl_eval,do_apply,5},
+ {erl_eval,expr,5},
+ {shell,exprs,6},
+ {shell,eval_exprs,6},
+ {shell,eval_loop,3}]}}
+7> meck:validate(dog).
+true
+```
+
+Normal Erlang exceptions result in a failed validation. The following
+example is just to demonstrate the behavior, in real test code the
+exception would normally come from the code under test (which should,
+if not expected, invalidate the mocked module):
+
+```erl
+8> meck:expect(dog, jump, fun(Height) when Height > 3 ->
+ erlang:error(too_high);
+ (Height) ->
+ ok
+ end).
+ok
+9> dog:jump(2).
+ok
+10> catch dog:jump(5).
+{'EXIT',{too_high,[{meck,exec,4},
+ {dog,jump,[5]},
+ {erl_eval,do_apply,5},
+ {erl_eval,expr,5},
+ {shell,exprs,6},
+ {shell,eval_exprs,6},
+ {shell,eval_loop,3}]}}
+11> meck:validate(dog).
+false
+```
+
+Here's an example of using meck inside an EUnit test case:
+
+```erlang
+my_test() ->
+ meck:new(my_library_module),
+ meck:expect(my_library_module, fib, fun(8) -> 21 end),
+ ?assertEqual(21, code_under_test:run(fib, 8)), % Uses my_library_module
+ ?assert(meck:validate(my_library_module)),
+ meck:unload(my_library_module).
+```
+
+Pass-through is used when the original functionality of a module
+should be kept. When the option `passthrough` is used when calling
+`new/2` all functions in the original module will be kept in the
+mock. These can later be overridden by calling `expect/3` or
+`expect/4`.
+
+```erl
+Eshell V5.8.4 (abort with ^G)
+1> meck:new(string, [unstick, passthrough]).
+ok
+2> string:strip(" test ").
+"test"
+```
+
+It's also possible to pass calls to the original function allowing us
+to override only a certain behavior of a function (this usage is
+compatible with the `passthrough` option). `passthrough/1` will always
+call the original function with the same name as the expect is
+defined in):
+
+```erl
+Eshell V5.8.4 (abort with ^G)
+1> meck:new(string, [unstick]).
+ok
+2> meck:expect(string, strip, fun(String) -> meck:passthrough([String]) end).
+ok
+3> string:strip(" test ").
+"test"
+4> meck:unload(string).
+ok
+5> string:strip(" test ").
+"test"
+```
+
+<a name='build'>
+
+Build
+-----
+
+meck requires [rebar][1] to build. To build meck, go to the meck
+directory and simply type:
+
+```sh
+rebar compile
+```
+
+To make sure meck works on your platform, run the tests:
+
+```sh
+rebar eunit
+```
+
+Two things might seem alarming when running the tests:
+
+ 1. Warnings emitted by cover
+ 2. En exception printed by SASL
+
+Both are expected due to the way Erlang currently prints errors. The
+important line you should look for is `All XX tests passed`, if that
+appears all is correct.
+
+
+<a name='install'>
+
+Install
+-------
+
+To install meck permanently, use of [Agner][2] is recommended:
+
+```sh
+agner install meck
+```
+
+If you want to install your own built version of meck add the ebin
+directory to your Erlang code path or move the meck folder into your
+release folder and make sure that folder is in your `ERL_LIBS`
+environment variable.
+
+
+<a name='contribute'>
+
+Contribute
+----------
+
+Patches are greatly appreciated!
+
+Should you find yourself using meck and have issues, comments or
+feedback please [create an issue here on GitHub.] [3]
+
+ [1]: https://github.com/basho/rebar "Rebar - A build tool for Erlang"
+ [2]: http://erlagner.org/ "Agner - Erlang Package Index & Package Manager"
+ [3]: http://github.com/eproxus/meck/issues "meck issues"
diff --git a/deps/meck/doc/overview.edoc b/deps/meck/doc/overview.edoc
new file mode 100644
index 00000000..b04686e6
--- /dev/null
+++ b/deps/meck/doc/overview.edoc
@@ -0,0 +1,25 @@
+%%==============================================================================
+%% Copyright 2010 Erlang Solutions Ltd.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%==============================================================================
+
+@author Adam Lindberg <eproxus@gmail.com>
+@copyright 2011, Adam Lindberg & Erlang Solutions Ltd
+@version "0.7.2"
+@title meck, a Mocking Library for Erlang
+
+@doc
+
+== About ==
+meck is a mocking library for Erlang.
diff --git a/deps/meck/rebar.config b/deps/meck/rebar.config
new file mode 100644
index 00000000..c589f4c7
--- /dev/null
+++ b/deps/meck/rebar.config
@@ -0,0 +1,6 @@
+{erl_opts, [warnings_as_errors, debug_info]}.
+{xref_checks, [undefined_function_calls]}.
+{dialyzer_opts, [{warnings, [unmatched_returns]}]}.
+
+{cover_enabled, true}.
+{clean_files, [".eunit", "ebin/*.beam", "test/*.beam"]}.
diff --git a/deps/meck/src/meck.app.src b/deps/meck/src/meck.app.src
new file mode 100644
index 00000000..d9f57979
--- /dev/null
+++ b/deps/meck/src/meck.app.src
@@ -0,0 +1,9 @@
+%% -*- mode: erlang; -*-
+{application, meck,
+ [{description, "A mocking framework for Erlang"},
+ {vsn, "0.7.2"},
+ {modules, []},
+ {registered, []},
+ {applications, [kernel, stdlib]},
+ {build_dependencies, []},
+ {env, []}]}.
diff --git a/deps/meck/src/meck.erl b/deps/meck/src/meck.erl
new file mode 100644
index 00000000..1669c32f
--- /dev/null
+++ b/deps/meck/src/meck.erl
@@ -0,0 +1,813 @@
+%%==============================================================================
+%% Copyright 2011 Adam Lindberg & Erlang Solutions Ltd.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%==============================================================================
+
+%% @author Adam Lindberg <eproxus@gmail.com>
+%% @copyright 2011, Adam Lindberg & Erlang Solutions Ltd
+%% @doc Module mocking library for Erlang.
+
+-module(meck).
+-behaviour(gen_server).
+
+%% Interface exports
+-export([new/1]).
+-export([new/2]).
+-export([expect/3]).
+-export([expect/4]).
+-export([sequence/4]).
+-export([loop/4]).
+-export([delete/3]).
+-export([exception/2]).
+-export([passthrough/1]).
+-export([history/1]).
+-export([history/2]).
+-export([validate/1]).
+-export([unload/0]).
+-export([unload/1]).
+-export([called/3]).
+-export([called/4]).
+-export([num_calls/3]).
+-export([num_calls/4]).
+
+%% Callback exports
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+-export([exec/5]).
+
+%% Types
+%% @type meck_mfa() = {Mod::atom(), Func::atom(), Args::list(term())}.
+%% Module, function and arguments that the mock module got called with.
+-type meck_mfa() :: {Mod::atom(), Func::atom(), Args::[term()]}.
+
+%% @type history() = [{pid(), meck_mfa(), Result::term()}
+%% | {pid(), meck_mfa(), Class:: exit | error | throw,
+%% Reason::term(), Stacktrace::list(mfa())}].
+%% History is a list of either successful function calls with a returned
+%% result or function calls that resulted in an exception with a type,
+%% reason and a stack trace. Each tuple begins with the pid of the process
+%% that made the call to the function.
+-type history() :: [{pid(), meck_mfa(), Result::term()}
+ | {pid(), meck_mfa(), Class:: exit | error | throw,
+ Reason::term(), Stacktrace::[mfa()]}].
+
+%% Records
+-record(state, {mod :: atom(),
+ expects :: dict(),
+ valid = true :: boolean(),
+ history = [] :: history(),
+ original :: term(),
+ was_sticky :: boolean()}).
+
+%% Includes
+-include("meck_abstract.hrl").
+
+%%==============================================================================
+%% Interface exports
+%%==============================================================================
+
+%% @spec new(Mod:: atom() | list(atom())) -> ok
+%% @equiv new(Mod, [])
+-spec new(Mod:: atom() | [atom()]) -> ok.
+new(Mod) when is_atom(Mod) -> new(Mod, []);
+new(Mod) when is_list(Mod) -> lists:foreach(fun new/1, Mod), ok.
+
+%% @spec new(Mod:: atom() | list(atom()), Options::list(term())) -> ok
+%% @doc Creates new mocked module(s).
+%%
+%% This replaces the current version (if any) of the modules in `Mod'
+%% with an empty module.
+%%
+%% Since this library is intended to use from test code, this
+%% function links a process for each mock to the calling process.
+%%
+%% The valid options are:
+%% <dl>
+%% <dt>`passthrough'</dt><dd>Retains the original functions, if not
+%% mocked by meck.</dd>
+%% <dt>`no_link'</dt> <dd>Does not link the meck process to the caller
+%% process (needed for using meck in rpc calls).
+%% </dd>
+%% <dt>`unstick'</dt> <dd>Unstick the module to be mocked (e.g. needed
+%% for using meck with kernel and stdlib modules).
+%% </dd>
+%% <dt>`no_passthrough_cover'</dt><dd>If cover is enabled on the module to be
+%% mocked then meck will continue to
+%% capture coverage on passthrough calls.
+%% This option allows you to disable that
+%% feature if it causes problems.
+%% </dd>
+%% </dl>
+-spec new(Mod:: atom() | [atom()], Options::[term()]) -> ok.
+new(Mod, Options) when is_atom(Mod), is_list(Options) ->
+ case start(Mod, Options) of
+ {ok, _Pid} -> ok;
+ {error, Reason} -> erlang:error(Reason, [Mod, Options])
+ end;
+new(Mod, Options) when is_list(Mod) ->
+ lists:foreach(fun(M) -> new(M, Options) end, Mod),
+ ok.
+
+%% @spec expect(Mod:: atom() | list(atom()), Func::atom(), Expect::fun()) -> ok
+%% @doc Add expectation for a function `Func' to the mocked modules `Mod'.
+%%
+%% An expectation is a fun that is executed whenever the function
+%% `Func' is called.
+%%
+%% It affects the validation status of the mocked module(s). If an
+%% expectation is called with the wrong number of arguments or invalid
+%% arguments the mock module(s) is invalidated. It is also invalidated if
+%% an unexpected exception occurs.
+-spec expect(Mod:: atom() | [atom()], Func::atom(), Expect::fun()) -> ok.
+expect(Mod, Func, Expect)
+ when is_atom(Mod), is_atom(Func), is_function(Expect) ->
+ call(Mod, {expect, Func, Expect});
+expect(Mod, Func, Expect) when is_list(Mod) ->
+ lists:foreach(fun(M) -> expect(M, Func, Expect) end, Mod),
+ ok.
+
+%% @spec expect(Mod:: atom() | list(atom()), Func::atom(),
+%% Arity::pos_integer(), Result::term()) -> ok
+%% @doc Adds an expectation with the supplied arity and return value.
+%%
+%% This creates an expectation which takes `Arity' number of functions
+%% and always returns `Result'.
+%%
+%% @see expect/3.
+-spec expect(Mod:: atom() | [atom()], Func::atom(),
+ Arity::pos_integer(), Result::term()) -> ok.
+expect(Mod, Func, Arity, Result)
+ when is_atom(Mod), is_atom(Func), is_integer(Arity), Arity >= 0 ->
+ valid_expect(Mod, Func, Arity),
+ call(Mod, {expect, Func, Arity, Result});
+expect(Mod, Func, Arity, Result) when is_list(Mod) ->
+ lists:foreach(fun(M) -> expect(M, Func, Arity, Result) end, Mod),
+ ok.
+
+%% @spec sequence(Mod:: atom() | list(atom()), Func::atom(),
+%% Arity::pos_integer(), Sequence::[term()]) -> ok
+%% @doc Adds an expectation which returns a value from `Sequence'
+%% until exhausted.
+%%
+%% This creates an expectation which takes `Arity' number of arguments
+%% and returns one element from `Sequence' at a time. Thus, calls to
+%% this expect will exhaust the list of return values in order until
+%% the last value is reached. That value is then returned for all
+%% subsequent calls.
+-spec sequence(Mod:: atom() | [atom()], Func::atom(),
+ Arity::pos_integer(), Sequence::[term()]) -> ok.
+sequence(Mod, Func, Arity, Sequence)
+ when is_atom(Mod), is_atom(Func), is_integer(Arity), Arity >= 0 ->
+ call(Mod, {sequence, Func, Arity, Sequence});
+sequence(Mod, Func, Arity, Sequence) when is_list(Mod) ->
+ lists:foreach(fun(M) -> sequence(M, Func, Arity, Sequence) end, Mod),
+ ok.
+
+%% @spec loop(Mod:: atom() | list(atom()), Func::atom(),
+%% Arity::pos_integer(), Loop::[term()]) -> ok
+%% @doc Adds an expectation which returns a value from `Loop'
+%% infinitely.
+%%
+%% This creates an expectation which takes `Arity' number of arguments
+%% and returns one element from `Loop' at a time. Thus, calls to this
+%% expect will return one element at a time from the list and will
+%% restart at the first element when the end is reached.
+-spec loop(Mod:: atom() | [atom()], Func::atom(),
+ Arity::pos_integer(), Loop::[term()]) -> ok.
+loop(Mod, Func, Arity, Loop)
+ when is_atom(Mod), is_atom(Func), is_integer(Arity), Arity >= 0 ->
+ call(Mod, {loop, Func, Arity, Loop});
+loop(Mod, Func, Arity, Loop) when is_list(Mod) ->
+ lists:foreach(fun(M) -> loop(M, Func, Arity, Loop) end, Mod),
+ ok.
+
+%% @spec delete(Mod:: atom() | list(atom()), Func::atom(),
+%% Arity::pos_integer()) -> ok
+%% @doc Deletes an expectation.
+%%
+%% Deletes the expectation for the function `Func' with the matching
+%% arity `Arity'.
+-spec delete(Mod:: atom() | [atom()], Func::atom(), Arity::pos_integer()) ->
+ ok.
+delete(Mod, Func, Arity)
+ when is_atom(Mod), is_atom(Func), Arity >= 0 ->
+ call(Mod, {delete, Func, Arity});
+delete(Mod, Func, Arity) when is_list(Mod) ->
+ lists:foreach(fun(M) -> delete(M, Func, Arity) end, Mod),
+ ok.
+
+%% @spec exception(Class:: throw | error | exit, Reason::term()) -> no_return()
+%% @doc Throws an expected exception inside an expect fun.
+%%
+%% This exception will get thrown without invalidating the mocked
+%% module. That is, the code using the mocked module is expected to
+%% handle this exception.
+%%
+%% <em>Note: this code should only be used inside an expect fun.</em>
+-spec exception(Class:: throw | error | exit, Reason::term()) -> no_return().
+exception(Class, Reason) when Class == throw; Class == error; Class == exit ->
+ throw(mock_exception_fun(Class, Reason)).
+
+%% @spec passthrough(Args::list(term())) -> no_return()
+%% @doc Calls the original function (if existing) inside an expectation fun.
+%%
+%% This call does not return, thus everything after this call inside
+%% an expectation fun will be ignored.
+%%
+%% <em>Note: this code should only be used inside an expect fun.</em>
+-spec passthrough(Args::[term()]) -> no_return().
+passthrough(Args) -> throw(passthrough_fun(Args)).
+
+%% @spec validate(Mod:: atom() | list(atom())) -> boolean()
+%% @doc Validate the state of the mock module(s).
+%%
+%% The function returns `true' if the mocked module(s) has been used
+%% according to its expectations. It returns `false' if a call has
+%% failed in some way. Reasons for failure are wrong number of
+%% arguments or non-existing function (undef), wrong arguments
+%% (function clause) or unexpected exceptions.
+%%
+%% Use the {@link history/1} or {@link history/2} function to analyze errors.
+-spec validate(Mod:: atom() | [atom()]) -> boolean().
+validate(Mod) when is_atom(Mod) ->
+ call(Mod, validate);
+validate(Mod) when is_list(Mod) ->
+ not lists:member(false, [validate(M) || M <- Mod]).
+
+%% @spec history(Mod::atom()) -> history()
+%% @doc Return the call history of the mocked module for all processes.
+%%
+%% @equiv history(Mod, '_')
+-spec history(Mod::atom()) -> history().
+history(Mod) when is_atom(Mod) -> call(Mod, history).
+
+%% @spec history(Mod::atom(), Pid::pid()) -> history()
+%% @doc Return the call history of the mocked module for the specified process.
+%%
+%% Returns a list of calls to the mocked module and their results for
+%% the specified `Pid'. Results can be either normal Erlang terms or
+%% exceptions that occurred.
+%%
+%% @see history/1
+%% @see called/3
+%% @see called/4
+%% @see num_calls/3
+%% @see num_calls/4
+-spec history(Mod::atom(), Pid:: pid() | '_') -> history().
+history(Mod, Pid) when is_atom(Mod), is_pid(Pid) orelse Pid == '_' ->
+ match_history(match_mfa('_', Pid), call(Mod, history)).
+
+%% @spec unload() -> list(atom())
+%% @doc Unloads all mocked modules from memory.
+%%
+%% The function returns the list of mocked modules that were unloaded
+%% in the process.
+-spec unload() -> [atom()].
+unload() -> lists:foldl(fun unload_if_mocked/2, [], registered()).
+
+%% @spec unload(Mod:: atom() | list(atom())) -> ok
+%% @doc Unload a mocked module or a list of mocked modules.
+%%
+%% This will purge and delete the module(s) from the Erlang virtual
+%% machine. If the mocked module(s) replaced an existing module, this
+%% module will still be in the Erlang load path and can be loaded
+%% manually or when called.
+-spec unload(Mods:: atom() | [atom()]) -> ok.
+unload(Mod) when is_atom(Mod) -> call(Mod, stop), wait_for_exit(Mod);
+unload(Mods) when is_list(Mods) -> lists:foreach(fun unload/1, Mods), ok.
+
+%% @spec called(Mod:: atom(), Fun:: atom(), Args:: list(term())) -> boolean()
+%% @doc Returns whether `Mod:Func' has been called with `Args'.
+%%
+%% @equiv called(Mod, Fun, Args, '_')
+called(Mod, Fun, Args) ->
+ has_call({Mod, Fun, Args}, meck:history(Mod)).
+
+%% @spec called(Mod:: atom(), Fun:: atom(), Args:: list(term()),
+%% Pid::pid()) -> boolean()
+%% @doc Returns whether `Pid' has called `Mod:Func' with `Args'.
+%%
+%% This will check the history for the module, `Mod', to determine
+%% whether process `Pid' call the function, `Fun', with arguments, `Args'. If
+%% so, this function returns true, otherwise false.
+%%
+%% Wildcards can be used, at any level in any term, by using the underscore
+%% atom: ``'_' ''
+%%
+%% @see called/3
+-spec called(Mod::atom(), Fun::atom(), Args::list(), Pid::pid()) -> boolean().
+called(Mod, Fun, Args, Pid) ->
+ has_call({Mod, Fun, Args}, meck:history(Mod, Pid)).
+
+%% @spec num_calls(Mod:: atom(), Fun:: atom(), Args:: list(term()))
+%% -> non_neg_integer()
+%% @doc Returns the number of times `Mod:Func' has been called with `Args'.
+%%
+%% @equiv num_calls(Mod, Fun, Args, '_')
+num_calls(Mod, Fun, Args) ->
+ num_calls({Mod, Fun, Args}, meck:history(Mod)).
+
+%% @spec num_calls(Mod:: atom(), Fun:: atom(), Args:: list(term()),
+%% Pid::pid()) -> non_neg_integer()
+%% @doc Returns the number of times process `Pid' has called `Mod:Func'
+%% with `Args'.
+%%
+%% This will check the history for the module, `Mod', to determine how
+%% many times process `Pid' has called the function, `Fun', with
+%% arguments, `Args' and returns the result.
+%%
+%% @see num_calls/3
+-spec num_calls(Mod::atom(), Fun::atom(), Args::list(), Pid::pid()) ->
+ non_neg_integer().
+num_calls(Mod, Fun, Args, Pid) ->
+ num_calls({Mod, Fun, Args}, meck:history(Mod, Pid)).
+
+%%==============================================================================
+%% Callback functions
+%%==============================================================================
+
+%% @hidden
+init([Mod, Options]) ->
+ WasSticky = case proplists:is_defined(unstick, Options) of
+ true -> {module, Mod} = code:ensure_loaded(Mod),
+ unstick_original(Mod);
+ _ -> false
+ end,
+ NoPassCover = proplists:get_bool(no_passthrough_cover, Options),
+ Original = backup_original(Mod, NoPassCover),
+ process_flag(trap_exit, true),
+ Expects = init_expects(Mod, Options),
+ try
+ _Bin = meck_mod:compile_and_load_forms(to_forms(Mod, Expects)),
+ {ok, #state{mod = Mod, expects = Expects, original = Original,
+ was_sticky = WasSticky}}
+ catch
+ exit:{error_loading_module, Mod, sticky_directory} ->
+ {stop, module_is_sticky}
+ end.
+
+%% @hidden
+handle_call({get_expect, Func, Arity}, _From, S) ->
+ {Expect, NewExpects} = get_expect(S#state.expects, Func, Arity),
+ {reply, Expect, S#state{expects = NewExpects}};
+handle_call({expect, Func, Expect}, _From, S) ->
+ NewExpects = store_expect(S#state.mod, Func, Expect, S#state.expects),
+ {reply, ok, S#state{expects = NewExpects}};
+handle_call({expect, Func, Arity, Result}, _From, S) ->
+ NewExpects = store_expect(S#state.mod, Func, {anon, Arity, Result},
+ S#state.expects),
+ {reply, ok, S#state{expects = NewExpects}};
+handle_call({sequence, Func, Arity, Sequence}, _From, S) ->
+ NewExpects = store_expect(S#state.mod, Func, {sequence, Arity, Sequence},
+ S#state.expects),
+ {reply, ok, S#state{expects = NewExpects}};
+handle_call({loop, Func, Arity, Loop}, _From, S) ->
+ NewExpects = store_expect(S#state.mod, Func, {loop, Arity, Loop, Loop},
+ S#state.expects),
+ {reply, ok, S#state{expects = NewExpects}};
+handle_call({delete, Func, Arity}, _From, S) ->
+ NewExpects = delete_expect(S#state.mod, Func, Arity, S#state.expects),
+ {reply, ok, S#state{expects = NewExpects}};
+handle_call(history, _From, S) ->
+ {reply, lists:reverse(S#state.history), S};
+handle_call(invalidate, _From, S) ->
+ {reply, ok, S#state{valid = false}};
+handle_call(validate, _From, S) ->
+ {reply, S#state.valid, S};
+handle_call(stop, _From, S) ->
+ {stop, normal, ok, S}.
+
+%% @hidden
+handle_cast({add_history, Item}, S) ->
+ {noreply, S#state{history = [Item| S#state.history]}};
+handle_cast(_Msg, S) ->
+ {noreply, S}.
+
+%% @hidden
+handle_info(_Info, S) -> {noreply, S}.
+
+%% @hidden
+terminate(_Reason, #state{mod = Mod, original = OriginalState,
+ was_sticky = WasSticky}) ->
+ export_original_cover(Mod, OriginalState),
+ cleanup(Mod),
+ restore_original(Mod, OriginalState, WasSticky),
+ ok.
+
+%% @hidden
+code_change(_OldVsn, S, _Extra) -> {ok, S}.
+
+%% @hidden
+exec(Pid, Mod, Func, Arity, Args) ->
+ Expect = call(Mod, {get_expect, Func, Arity}),
+ try Result = call_expect(Mod, Func, Expect, Args),
+ add_history(Pid, Mod, Func, Args, Result),
+ Result
+ catch
+ throw:Fun when is_function(Fun) ->
+ case is_mock_exception(Fun) of
+ true -> handle_mock_exception(Pid, Mod, Func, Fun, Args);
+ false -> invalidate_and_raise(Pid, Mod, Func, Args, throw, Fun)
+ end;
+ Class:Reason ->
+ invalidate_and_raise(Pid, Mod, Func, Args, Class, Reason)
+ end.
+
+%%==============================================================================
+%% Internal functions
+%%==============================================================================
+
+%% --- Process functions -------------------------------------------------------
+
+start(Mod, Options) ->
+ case proplists:is_defined(no_link, Options) of
+ true -> start(start, Mod, Options);
+ false -> start(start_link, Mod, Options)
+ end.
+
+start(Func, Mod, Options) ->
+ gen_server:Func({local, proc_name(Mod)}, ?MODULE, [Mod, Options], []).
+
+cast(Mod, Msg) -> gen_server(cast, Mod, Msg).
+call(Mod, Msg) -> gen_server(call, Mod, Msg).
+
+gen_server(Func, Mod, Msg) ->
+ Name = proc_name(Mod),
+ try gen_server:Func(Name, Msg)
+ catch exit:_Reason -> erlang:error({not_mocked, Mod}) end.
+
+proc_name(Name) -> list_to_atom(atom_to_list(Name) ++ "_meck").
+
+original_name(Name) -> list_to_atom(atom_to_list(Name) ++ "_meck_original").
+
+wait_for_exit(Mod) ->
+ MonitorRef = erlang:monitor(process, proc_name(Mod)),
+ receive {'DOWN', MonitorRef, _Type, _Object, _Info} -> ok end.
+
+unload_if_mocked(P, L) when is_atom(P) ->
+ unload_if_mocked(atom_to_list(P), L);
+unload_if_mocked(P, L) when length(P) > 5 ->
+ case lists:split(length(P) - 5, P) of
+ {Name, "_meck"} ->
+ Mocked = list_to_existing_atom(Name),
+ try
+ unload(Mocked)
+ catch error:{not_mocked, Mocked} ->
+ ok
+ end,
+ [Mocked|L];
+ _Else ->
+ L
+ end;
+unload_if_mocked(_P, L) ->
+ L.
+
+%% --- Mock handling -----------------------------------------------------------
+
+valid_expect(M, F, A) ->
+ case expect_type(M, F, A) of
+ autogenerated -> erlang:error({cannot_mock_autogenerated, {M, F, A}});
+ builtin -> erlang:error({cannot_mock_builtin, {M, F, A}});
+ normal -> ok
+ end.
+
+init_expects(Mod, Options) ->
+ case proplists:get_value(passthrough, Options, false) andalso exists(Mod) of
+ true -> dict:from_list([{FA, passthrough} || FA <- exports(Mod)]);
+ _ -> dict:new()
+ end.
+
+
+get_expect(Expects, Func, Arity) ->
+ case e_fetch(Expects, Func, Arity) of
+ {sequence, Arity, [Result]} ->
+ {{sequence, Arity, Result}, Expects};
+ {sequence, Arity, [Result|Rest]} ->
+ {{sequence, Arity, Result},
+ e_store(Expects, Func, {sequence, Arity, Rest})};
+ {loop, Arity, [Result], Loop} ->
+ {{loop, Arity, Result},
+ e_store(Expects, Func, {loop, Arity, Loop, Loop})};
+ {loop, Arity, [Result|Rest], Loop} ->
+ {{loop, Arity, Result},
+ e_store(Expects, Func, {loop, Arity, Rest, Loop})};
+ Other ->
+ {Other, Expects}
+ end.
+
+store_expect(Mod, Func, Expect, Expects) ->
+ change_expects(fun e_store/3, Mod, Func, Expect, Expects).
+
+delete_expect(Mod, Func, Arity, Expects) ->
+ change_expects(fun e_delete/3, Mod, Func, Arity, Expects).
+
+change_expects(Op, Mod, Func, Value, Expects) ->
+ NewExpects = Op(Expects, Func, Value),
+ _Bin = meck_mod:compile_and_load_forms(to_forms(Mod, NewExpects)),
+ NewExpects.
+
+e_store(Expects, Func, Expect) ->
+ dict:store({Func, arity(Expect)}, Expect, Expects).
+
+e_fetch(Expects, Func, Arity) ->
+ dict:fetch({Func, Arity}, Expects).
+
+e_delete(Expects, Func, Arity) ->
+ dict:erase({Func, Arity}, Expects).
+
+%% --- Code generation ---------------------------------------------------------
+
+func(Mod, {Func, Arity}, {anon, Arity, Result}) ->
+ case contains_opaque(Result) of
+ true ->
+ func_exec(Mod, Func, Arity);
+ false ->
+ func_native(Mod, Func, Arity, Result)
+ end;
+func(Mod, {Func, Arity}, _Expect) ->
+ func_exec(Mod, Func, Arity).
+
+func_exec(Mod, Func, Arity) ->
+ Args = args(Arity),
+ ?function(Func, Arity,
+ [?clause(Args,
+ [?call(?MODULE, exec,
+ [?call(erlang, self, []),
+ ?atom(Mod),
+ ?atom(Func),
+ ?integer(Arity),
+ list(Args)])])]).
+
+func_native(Mod, Func, Arity, Result) ->
+ Args = args(Arity),
+ AbsResult = erl_parse:abstract(Result),
+ ?function(
+ Func, Arity,
+ [?clause(
+ Args,
+ [?call(gen_server, cast,
+ [?atom(proc_name(Mod)),
+ ?tuple([?atom(add_history),
+ ?tuple([?call(erlang, self, []),
+ ?tuple([?atom(Mod), ?atom(Func),
+ list(Args)]),
+ AbsResult])])]),
+ AbsResult])]).
+
+contains_opaque(Term) when is_pid(Term); is_port(Term); is_function(Term) ->
+ true;
+contains_opaque(Term) when is_list(Term) ->
+ lists:any(fun contains_opaque/1, Term);
+contains_opaque(Term) when is_tuple(Term) ->
+ lists:any(fun contains_opaque/1, tuple_to_list(Term));
+contains_opaque(_Term) ->
+ false.
+
+
+to_forms(Mod, Expects) ->
+ {Exports, Functions} = functions(Mod, Expects),
+ [?attribute(module, Mod)] ++ Exports ++ Functions.
+
+functions(Mod, Expects) ->
+ dict:fold(fun(Export, Expect, {Exports, Functions}) ->
+ {[?attribute(export, [Export])|Exports],
+ [func(Mod, Export, Expect)|Functions]}
+ end, {[], []}, Expects).
+
+args(0) -> [];
+args(Arity) -> [?var(var_name(N)) || N <- lists:seq(1, Arity)].
+
+list([]) -> {nil, ?LINE};
+list([H|T]) -> {cons, ?LINE, H, list(T)}.
+
+var_name(A) -> list_to_atom("A"++integer_to_list(A)).
+
+arity({anon, Arity, _Result}) ->
+ Arity;
+arity({sequence, Arity, _Sequence}) ->
+ Arity;
+arity({loop, Arity, _Current, _Loop}) ->
+ Arity;
+arity(Fun) when is_function(Fun) ->
+ {arity, Arity} = erlang:fun_info(Fun, arity),
+ Arity.
+
+%% --- Execution utilities -----------------------------------------------------
+
+is_local_function(Fun) ->
+ {module, Module} = erlang:fun_info(Fun, module),
+ ?MODULE == Module.
+
+handle_mock_exception(Pid, Mod, Func, Fun, Args) ->
+ case Fun() of
+ {exception, Class, Reason} ->
+ % exception created with the mock:exception function,
+ % do not invalidate Mod
+ raise(Pid, Mod, Func, Args, Class, Reason);
+ {passthrough, PassthroughArgs} ->
+ % call_original(Args) called from mock function
+ Result = apply(original_name(Mod), Func, PassthroughArgs),
+ add_history(Pid, Mod, Func, PassthroughArgs, Result),
+ Result
+ end.
+
+-spec invalidate_and_raise(_, _, _, _, _, _) -> no_return().
+invalidate_and_raise(Pid, Mod, Func, Args, Class, Reason) ->
+ call(Mod, invalidate),
+ raise(Pid, Mod, Func, Args, Class, Reason).
+
+raise(Pid, Mod, Func, Args, Class, Reason) ->
+ Stacktrace = inject(Mod, Func, Args, erlang:get_stacktrace()),
+ add_history(Pid, Mod, Func, Args, Class, Reason, Stacktrace),
+ erlang:raise(Class, Reason, Stacktrace).
+
+mock_exception_fun(Class, Reason) -> fun() -> {exception, Class, Reason} end.
+
+passthrough_fun(Args) -> fun() -> {passthrough, Args} end.
+
+call_expect(_Mod, _Func, {_Type, Arity, Return}, VarList)
+ when Arity == length(VarList) ->
+ Return;
+call_expect(Mod, Func, passthrough, VarList) ->
+ apply(original_name(Mod), Func, VarList);
+call_expect(_Mod, _Func, Fun, VarList) when is_function(Fun) ->
+ apply(Fun, VarList).
+
+inject(_Mod, _Func, _Args, []) ->
+ [];
+inject(Mod, Func, Args, [{meck, exec, _Arity} = Meck|Stack]) ->
+ [Meck, {Mod, Func, Args}|Stack];
+inject(Mod, Func, Args, [{meck, exec, _Arity, _Location} = Meck|Stack]) ->
+ [Meck, {Mod, Func, Args}|Stack];
+inject(Mod, Func, Args, [H|Stack]) ->
+ [H|inject(Mod, Func, Args, Stack)].
+
+is_mock_exception(Fun) -> is_local_function(Fun).
+
+%% --- Original module handling ------------------------------------------------
+
+backup_original(Module, NoPassCover) ->
+ Cover = get_cover_state(Module),
+ try
+ Forms = meck_mod:abstract_code(meck_mod:beam_file(Module)),
+ NewName = original_name(Module),
+ CompileOpts = meck_mod:compile_options(meck_mod:beam_file(Module)),
+ Renamed = meck_mod:rename_module(Forms, NewName),
+ Binary = meck_mod:compile_and_load_forms(Renamed, CompileOpts),
+
+ %% At this point we care about `Binary' if and only if we want
+ %% to recompile it to enable cover on the original module code
+ %% so that we can still collect cover stats on functions that
+ %% have not been mocked. Below are the different values
+ %% passed back along with `Cover'.
+ %%
+ %% `no_passthrough_cover' - there is no coverage on the
+ %% original module OR passthrough coverage has been disabled
+ %% via the `no_passthrough_cover' option
+ %%
+ %% `no_binary' - something went wrong while trying to compile
+ %% the original module in `backup_original'
+ %%
+ %% Binary - a `binary()' of the compiled code for the original
+ %% module that is being mocked, this needs to be passed around
+ %% so that it can be passed to Cover later. There is no way
+ %% to use the code server to access this binary without first
+ %% saving it to disk. Instead, it's passed around as state.
+ if (Cover == false) orelse NoPassCover ->
+ Binary2 = no_passthrough_cover;
+ true ->
+ Binary2 = Binary,
+ meck_cover:compile_beam(NewName, Binary2)
+ end,
+ {Cover, Binary2}
+ catch
+ throw:{object_code_not_found, _Module} ->
+ {Cover, no_binary}; % TODO: What to do here?
+ throw:no_abstract_code ->
+ {Cover, no_binary} % TODO: What to do here?
+ end.
+
+restore_original(Mod, {false, _}, WasSticky) ->
+ restick_original(Mod, WasSticky),
+ ok;
+restore_original(Mod, OriginalState={{File, Data, Options},_}, WasSticky) ->
+ case filename:extension(File) of
+ ".erl" ->
+ {ok, Mod} = cover:compile_module(File, Options);
+ ".beam" ->
+ cover:compile_beam(File)
+ end,
+ restick_original(Mod, WasSticky),
+ import_original_cover(Mod, OriginalState),
+ ok = cover:import(Data),
+ ok = file:delete(Data),
+ ok.
+
+%% @doc Import the cover data for `<name>_meck_original' but since it
+%% was modified by `export_original_cover' it will count towards
+%% `<name>'.
+import_original_cover(Mod, {_,Bin}) when is_binary(Bin) ->
+ OriginalData = atom_to_list(original_name(Mod)) ++ ".coverdata",
+ ok = cover:import(OriginalData),
+ ok = file:delete(OriginalData);
+import_original_cover(_, _) ->
+ ok.
+
+%% @doc Export the cover data for `<name>_meck_original' and modify
+%% the data so it can be imported under `<name>'.
+export_original_cover(Mod, {_, Bin}) when is_binary(Bin) ->
+ OriginalMod = original_name(Mod),
+ File = atom_to_list(OriginalMod) ++ ".coverdata",
+ ok = cover:export(File, OriginalMod),
+ ok = meck_cover:rename_module(File, Mod);
+export_original_cover(_, _) ->
+ ok.
+
+
+unstick_original(Module) -> unstick_original(Module, code:is_sticky(Module)).
+
+unstick_original(Module, true) -> code:unstick_mod(Module);
+unstick_original(_,_) -> false.
+
+restick_original(Module, true) ->
+ code:stick_mod(Module),
+ {module, Module} = code:ensure_loaded(Module),
+ ok;
+restick_original(_,_) -> ok.
+
+get_cover_state(Module) -> get_cover_state(Module, cover:is_compiled(Module)).
+
+get_cover_state(Module, {file, File}) ->
+ Data = atom_to_list(Module) ++ ".coverdata",
+ ok = cover:export(Data, Module),
+ CompileOptions =
+ try
+ meck_mod:compile_options(meck_mod:beam_file(Module))
+ catch
+ throw:{object_code_not_found, _Module} -> []
+ end,
+ {File, Data, CompileOptions};
+get_cover_state(_Module, _IsCompiled) ->
+ false.
+
+exists(Module) ->
+ code:which(Module) /= non_existing.
+
+exports(M) ->
+ [ FA || FA = {F, A} <- M:module_info(exports),
+ normal == expect_type(M, F, A)].
+
+%% Functions we should not create expects for (auto-generated and BIFs)
+expect_type(_, module_info, 0) -> autogenerated;
+expect_type(_, module_info, 1) -> autogenerated;
+expect_type(M, F, A) -> expect_type(erlang:is_builtin(M, F, A)).
+
+expect_type(true) -> builtin;
+expect_type(false) -> normal.
+
+cleanup(Mod) ->
+ code:purge(Mod),
+ code:delete(Mod),
+ code:purge(original_name(Mod)),
+ code:delete(original_name(Mod)).
+
+%% --- History utilities -------------------------------------------------------
+
+add_history(Pid, Mod, Func, Args, Result) ->
+ add_history(Mod, {Pid, {Mod, Func, Args}, Result}).
+add_history(Pid, Mod, Func, Args, Class, Reason, Stacktrace) ->
+ add_history(Mod, {Pid, {Mod, Func, Args}, Class, Reason, Stacktrace}).
+
+add_history(Mod, Item) ->
+ cast(Mod, {add_history, Item}).
+
+has_call(MFA, History) ->
+ [] =/= match_history(match_mfa(MFA), History).
+
+num_calls(MFA, History) ->
+ length(match_history(match_mfa(MFA), History)).
+
+match_history(MatchSpec, History) ->
+ MS = ets:match_spec_compile(MatchSpec),
+ ets:match_spec_run(History, MS).
+
+match_mfa(MFA) -> match_mfa(MFA, '_').
+
+match_mfa(MFA, Pid) ->
+ [{{Pid, MFA, '_'}, [], ['$_']},
+ {{Pid, MFA, '_', '_', '_'}, [], ['$_']}].
diff --git a/deps/meck/src/meck_abstract.hrl b/deps/meck/src/meck_abstract.hrl
new file mode 100644
index 00000000..8f3b9829
--- /dev/null
+++ b/deps/meck/src/meck_abstract.hrl
@@ -0,0 +1,19 @@
+-define(call(Module, Function, Arguments),
+ {call, ?LINE,
+ {remote, ?LINE, ?atom(Module), ?atom(Function)},
+ Arguments}).
+
+-define(atom(Atom), {atom, ?LINE, Atom}).
+
+-define(integer(Integer), {integer, ?LINE, Integer}).
+
+-define(var(Name), {var, ?LINE, Name}).
+
+-define(attribute(Attribute, Args), {attribute, ?LINE, Attribute, Args}).
+
+-define(function(Name, Arity, Clauses),
+ {function, ?LINE, Name, Arity, Clauses}).
+
+-define(clause(Arguments, Body), {clause, ?LINE, Arguments, [], Body}).
+
+-define(tuple(Elements), {tuple, ?LINE, Elements}).
diff --git a/deps/meck/src/meck_cover.erl b/deps/meck/src/meck_cover.erl
new file mode 100644
index 00000000..da8888c8
--- /dev/null
+++ b/deps/meck/src/meck_cover.erl
@@ -0,0 +1,110 @@
+%%==============================================================================
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%==============================================================================
+
+%% @doc Module containing functions needed by meck to integrate with cover.
+
+-module(meck_cover).
+
+%% Interface exports
+-export([compile_beam/2]).
+-export([rename_module/2]).
+
+%%==============================================================================
+%% Interface exports
+%%==============================================================================
+
+%% @doc Enabled cover on `<name>_meck_original'.
+compile_beam(OriginalMod, Bin) ->
+ alter_cover(),
+ {ok, _} = cover:compile_beam(OriginalMod, Bin).
+
+%% @doc Given a cover file `File' exported by `cover:export' overwrite
+%% the module name with `Name'.
+rename_module(File, Name) ->
+ NewTerms = change_cover_mod_name(read_cover_file(File), Name),
+ write_terms(File, NewTerms),
+ ok.
+
+%%==============================================================================
+%% Internal functions
+%%==============================================================================
+
+%% @private
+%%
+%% @doc Alter the cover BEAM module to export some of it's private
+%% functions. This is done for two reasons:
+%%
+%% 1. Meck needs to alter the export analysis data on disk and
+%% therefore needs to understand this format. This is why `get_term'
+%% and `write' are exposed.
+%%
+%% 2. In order to avoid creating temporary files meck needs direct
+%% access to `compile_beam/2' which allows passing a binary.
+alter_cover() ->
+ case lists:member({compile_beam,2}, cover:module_info(exports)) of
+ true ->
+ ok;
+ false ->
+ Beam = meck_mod:beam_file(cover),
+ AbsCode = meck_mod:abstract_code(Beam),
+ Exports = [{compile_beam, 2}, {get_term, 1}, {write, 2}],
+ AbsCode2 = meck_mod:add_exports(Exports, AbsCode),
+ _Bin = meck_mod:compile_and_load_forms(AbsCode2),
+ ok
+ end.
+
+change_cover_mod_name(CoverTerms, Name) ->
+ {_, Terms} = lists:foldl(fun change_name_in_term/2, {Name,[]}, CoverTerms),
+ Terms.
+
+change_name_in_term({file, Mod, File}, {Name, Terms}) ->
+ Term2 = {file, Name, replace_string(File, Mod, Name)},
+ {Name, [Term2|Terms]};
+change_name_in_term({Bump={bump,_,_,_,_,_},_}=Term, {Name, Terms}) ->
+ Bump2 = setelement(2, Bump, Name),
+ Term2 = setelement(1, Term, Bump2),
+ {Name, [Term2|Terms]};
+change_name_in_term({_Mod,Clauses}, {Name, Terms}) ->
+ Clauses2 = lists:foldl(fun change_name_in_clause/2, {Name, []}, Clauses),
+ Term2 = {Name, Clauses2},
+ {Name, [Term2|Terms]}.
+
+change_name_in_clause(Clause, {Name, NewClauses}) ->
+ {Name, [setelement(1, Clause, Name)|NewClauses]}.
+
+replace_string(File, Old, New) ->
+ Old2 = atom_to_list(Old),
+ New2 = atom_to_list(New),
+ re:replace(File, Old2, New2, [{return, list}]).
+
+read_cover_file(File) ->
+ {ok, Fd} = file:open(File, [read, binary, raw]),
+ Terms = get_terms(Fd, []),
+ ok = file:close(Fd),
+ Terms.
+
+get_terms(Fd, Terms) ->
+ case cover:get_term(Fd) of
+ eof -> Terms;
+ Term -> get_terms(Fd, [Term|Terms])
+ end.
+
+write_terms(File, Terms) ->
+ {ok, Fd} = file:open(File, [write, binary, raw]),
+ lists:foreach(write_term(Fd), Terms),
+ ok.
+
+write_term(Fd) ->
+ fun(Term) -> cover:write(Term, Fd) end.
+
diff --git a/deps/meck/src/meck_mod.erl b/deps/meck/src/meck_mod.erl
new file mode 100644
index 00000000..22a237d8
--- /dev/null
+++ b/deps/meck/src/meck_mod.erl
@@ -0,0 +1,118 @@
+%%==============================================================================
+%% Copyright 2011 Adam Lindberg & Erlang Solutions Ltd.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%==============================================================================
+
+%% @hidden
+%% @author Adam Lindberg <eproxus@gmail.com>
+%% @copyright 2011, Adam Lindberg & Erlang Solutions Ltd
+%% @doc Module wrangling helper functions.
+
+-module(meck_mod).
+
+%% Interface exports
+-export([abstract_code/1]).
+-export([add_exports/2]).
+-export([beam_file/1]).
+-export([compile_and_load_forms/1]).
+-export([compile_and_load_forms/2]).
+-export([compile_options/1]).
+-export([rename_module/2]).
+
+%% Types
+-type erlang_form() :: term().
+-type compile_options() :: [term()].
+-type export() :: {atom(), byte()}.
+
+%%==============================================================================
+%% Interface exports
+%%==============================================================================
+
+-spec abstract_code(binary()) -> erlang_form().
+abstract_code(BeamFile) ->
+ case beam_lib:chunks(BeamFile, [abstract_code]) of
+ {ok, {_, [{abstract_code, {raw_abstract_v1, Forms}}]}} ->
+ Forms;
+ {ok, {_, [{abstract_code, no_abstract_code}]}} ->
+ throw(no_abstract_code)
+ end.
+
+-spec add_exports([export()], erlang_form()) -> erlang_form().
+add_exports(Exports, AbsCode) ->
+ {attribute, Line, export, OrigExports} = lists:keyfind(export, 3, AbsCode),
+ Attr = {attribute, Line, export, OrigExports ++ Exports},
+ lists:keyreplace(export, 3, AbsCode, Attr).
+
+-spec beam_file(module()) -> binary().
+beam_file(Module) ->
+ % code:which/1 cannot be used for cover_compiled modules
+ case code:get_object_code(Module) of
+ {_, Binary, _Filename} -> Binary;
+ error -> throw({object_code_not_found, Module})
+ end.
+
+-spec compile_and_load_forms(erlang_form()) -> binary().
+compile_and_load_forms(AbsCode) -> compile_and_load_forms(AbsCode, []).
+
+-spec compile_and_load_forms(erlang_form(), compile_options()) -> binary().
+compile_and_load_forms(AbsCode, Opts) ->
+ case compile:forms(AbsCode, [return_errors|Opts]) of
+ {ok, ModName, Binary} ->
+ load_binary(ModName, Binary),
+ Binary;
+ {ok, ModName, Binary, _Warnings} ->
+ load_binary(ModName, Binary),
+ Binary;
+ Error ->
+ exit({compile_forms, Error})
+ end.
+
+-spec compile_options(binary() | module()) -> compile_options().
+compile_options(BeamFile) when is_binary(BeamFile) ->
+ case beam_lib:chunks(BeamFile, [compile_info]) of
+ {ok, {_, [{compile_info, Info}]}} ->
+ filter_options(proplists:get_value(options, Info));
+ _ ->
+ []
+ end;
+compile_options(Module) ->
+ filter_options(proplists:get_value(options, Module:module_info(compile))).
+
+-spec rename_module(erlang_form(), module()) -> erlang_form().
+rename_module([{attribute, Line, module, OldAttribute}|T], NewName) ->
+ case OldAttribute of
+ {_OldName, Variables} ->
+ [{attribute, Line, module, {NewName, Variables}}|T];
+ _OldName ->
+ [{attribute, Line, module, NewName}|T]
+ end;
+rename_module([H|T], NewName) ->
+ [H|rename_module(T, NewName)].
+
+%%==============================================================================
+%% Internal functions
+%%==============================================================================
+
+load_binary(Name, Binary) ->
+ case code:load_binary(Name, "", Binary) of
+ {module, Name} -> ok;
+ {error, Reason} -> exit({error_loading_module, Name, Reason})
+ end.
+
+% parse transforms have already been applied to the abstract code in the
+% module, and often are not always available when compiling the forms, so
+% filter them out of the options
+filter_options (Options) ->
+ lists:filter(fun({parse_transform,_}) -> false; (_) -> true end, Options).
+
diff --git a/deps/meck/test/cover_test_module.dontcompile b/deps/meck/test/cover_test_module.dontcompile
new file mode 100644
index 00000000..06e88967
--- /dev/null
+++ b/deps/meck/test/cover_test_module.dontcompile
@@ -0,0 +1,21 @@
+%% -*- mode: erlang -*-
+
+%% This file needs not to have the extension .erl, since otherwise
+%% rebar will try to compile it, which won't work since it requires
+%% special compilation options. See meck_tests:cover_test_.
+
+-module(cover_test_module).
+-export([a/0, b/0, c/2]).
+
+%% a/0 is defined in include/cover_test.hrl. We don't put the full
+%% path here, since it should be provided as a compiler option.
+-include("cover_test.hrl").
+
+%% Also, make that this module was compiled with -DTEST.
+-ifdef(TEST).
+b() ->
+ b.
+-endif.
+
+c(A, B) ->
+ {A, B}.
diff --git a/deps/meck/test/include/cover_test.hrl b/deps/meck/test/include/cover_test.hrl
new file mode 100644
index 00000000..0e770862
--- /dev/null
+++ b/deps/meck/test/include/cover_test.hrl
@@ -0,0 +1 @@
+a() -> a.
diff --git a/deps/meck/test/meck_performance_test.erl b/deps/meck/test/meck_performance_test.erl
new file mode 100644
index 00000000..71af107d
--- /dev/null
+++ b/deps/meck/test/meck_performance_test.erl
@@ -0,0 +1,65 @@
+%% @doc
+-module(meck_performance_test).
+
+%% Interface exports
+-export([run/1]).
+
+%%==============================================================================
+%% Interface exports
+%%==============================================================================
+
+run(N) ->
+ meck:new(test),
+ io:format("\t\tMin\tMax\tMed\tAvg~n"),
+ io:format("expect/3\t~p\t~p\t~p\t~p~n",
+ test_avg(meck, expect, [test, normal, fun() -> ok end], N)),
+ io:format("expect/3+args\t~p\t~p\t~p\t~p~n",
+ test_avg(meck, expect, [test, normal_args,
+ fun(_, _) -> ok end], N)),
+ io:format("expect/4\t~p\t~p\t~p\t~p~n",
+ test_avg(meck, expect, [test, shortcut, 0, ok], N)),
+ io:format("expect/4+args\t~p\t~p\t~p\t~p~n",
+ test_avg(meck, expect, [test, shortcut_args, 2, ok], N)),
+
+ meck:expect(test, shortcut_opaque, 0, self()),
+
+ io:format("~n\t\tMin\tMax\tMed\tAvg~n"),
+ io:format("normal\t\t~p\t~p\t~p\t~p~n",
+ test_avg(test, normal, [], N)),
+ io:format("normal_args\t~p\t~p\t~p\t~p~n",
+ test_avg(test, normal_args, [a, b], N)),
+ io:format("shortcut\t~p\t~p\t~p\t~p~n",
+ test_avg(test, shortcut, [], N)),
+ io:format("shortcut_args\t~p\t~p\t~p\t~p~n",
+ test_avg(test, shortcut_args, [a, b], N)),
+ io:format("shortcut_opaque\t~p\t~p\t~p\t~p~n",
+ test_avg(test, shortcut_opaque, [], N)),
+ meck:unload(test),
+
+ meck:new(test),
+ meck:expect(test, func, 1, ok),
+ [test:func(I) || I <- lists:seq(1, 100)],
+ io:format("~n\t\tMin\tMax\tMed\tAvg~n"),
+ io:format("called\t\t~p\t~p\t~p\t~p~n",
+ test_avg(meck, called, [test, func, 50], N)),
+ meck:unload(test),
+ ok.
+
+%%==============================================================================
+%% Internal functions
+%%==============================================================================
+
+test_avg(M, F, A, N) when N > 0 ->
+ L = test_loop(M, F, A, N, []),
+ Length = length(L),
+ Min = lists:min(L),
+ Max = lists:max(L),
+ Med = lists:nth(round((Length / 2)), lists:sort(L)),
+ Avg = round(lists:foldl(fun(X, Sum) -> X + Sum end, 0, L) / Length),
+ [Min, Max, Med, Avg].
+
+test_loop(_M, _F, _A, 0, List) ->
+ List;
+test_loop(M, F, A, N, List) ->
+ {T, _Result} = timer:tc(M, F, A),
+ test_loop(M, F, A, N - 1, [T|List]).
diff --git a/deps/meck/test/meck_test_module.erl b/deps/meck/test/meck_test_module.erl
new file mode 100644
index 00000000..6dee52be
--- /dev/null
+++ b/deps/meck/test/meck_test_module.erl
@@ -0,0 +1,8 @@
+-module(meck_test_module).
+-export([a/0, b/0, c/2]).
+
+a() -> a.
+b() -> b.
+
+c(A, B) ->
+ {A, B}.
diff --git a/deps/meck/test/meck_test_parametrized_module.erl b/deps/meck/test/meck_test_parametrized_module.erl
new file mode 100644
index 00000000..19126797
--- /dev/null
+++ b/deps/meck/test/meck_test_parametrized_module.erl
@@ -0,0 +1,7 @@
+-module(meck_test_parametrized_module, [Var1, Var2]).
+-export([which/0, var1/0, var2/0]).
+
+which() -> original.
+
+var1() -> {original, Var1}.
+var2() -> {original, Var2}.
diff --git a/deps/meck/test/meck_tests.erl b/deps/meck/test/meck_tests.erl
new file mode 100644
index 00000000..9e019521
--- /dev/null
+++ b/deps/meck/test/meck_tests.erl
@@ -0,0 +1,890 @@
+%%==============================================================================
+%% Copyright 2010 Erlang Solutions Ltd.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%==============================================================================
+
+-module(meck_tests).
+
+-compile(export_all).
+
+-include_lib("eunit/include/eunit.hrl").
+
+meck_test_() ->
+ {foreach, fun setup/0, fun teardown/1,
+ [{with, [T]} || T <- [fun ?MODULE:new_/1,
+ fun ?MODULE:unload_/1,
+ fun ?MODULE:double_new_/1,
+ fun ?MODULE:validate_/1,
+ fun ?MODULE:expect_/1,
+ fun ?MODULE:exports_/1,
+ fun ?MODULE:call_return_value_/1,
+ fun ?MODULE:call_argument_/1,
+ fun ?MODULE:call_undef_/1,
+ fun ?MODULE:call_function_clause_/1,
+ fun ?MODULE:validate_unexpected_error_/1,
+ fun ?MODULE:validate_expected_error_/1,
+ fun ?MODULE:validate_chained_/1,
+ fun ?MODULE:stacktrace_/1,
+ fun ?MODULE:stacktrace_function_clause_/1,
+ fun ?MODULE:change_func_/1,
+ fun ?MODULE:caller_does_not_crash_on_reload_/1,
+ fun ?MODULE:call_original_undef_/1,
+ fun ?MODULE:history_empty_/1,
+ fun ?MODULE:history_call_/1,
+ fun ?MODULE:history_throw_/1,
+ fun ?MODULE:history_throw_fun_/1,
+ fun ?MODULE:history_exit_/1,
+ fun ?MODULE:history_error_/1,
+ fun ?MODULE:history_error_args_/1,
+ fun ?MODULE:history_meck_throw_/1,
+ fun ?MODULE:history_meck_throw_fun_/1,
+ fun ?MODULE:history_meck_exit_/1,
+ fun ?MODULE:history_meck_error_/1,
+ fun ?MODULE:history_by_pid_/1,
+ fun ?MODULE:shortcut_expect_/1,
+ fun ?MODULE:shortcut_expect_negative_arity_/1,
+ fun ?MODULE:shortcut_call_return_value_/1,
+ fun ?MODULE:shortcut_call_argument_/1,
+ fun ?MODULE:shortcut_re_add_/1,
+ fun ?MODULE:shortcut_opaque_/1,
+ fun ?MODULE:delete_/1,
+ fun ?MODULE:called_false_no_args_/1,
+ fun ?MODULE:called_true_no_args_/1,
+ fun ?MODULE:called_true_two_functions_/1,
+ fun ?MODULE:called_false_one_arg_/1,
+ fun ?MODULE:called_true_one_arg_/1,
+ fun ?MODULE:called_false_few_args_/1,
+ fun ?MODULE:called_true_few_args_/1,
+ fun ?MODULE:called_false_error_/1,
+ fun ?MODULE:called_true_error_/1,
+ fun ?MODULE:called_with_pid_no_args_/1,
+ fun ?MODULE:num_calls_/1,
+ fun ?MODULE:num_calls_error_/1,
+ fun ?MODULE:num_calls_with_pid_no_args_/1,
+ fun ?MODULE:called_wildcard_/1,
+ fun ?MODULE:sequence_/1,
+ fun ?MODULE:sequence_multi_/1,
+ fun ?MODULE:loop_/1,
+ fun ?MODULE:loop_multi_/1
+ ]]}.
+
+setup() ->
+ % Uncomment to run tests with dbg:
+ % dbg:tracer(),
+ % dbg:p(all, call),
+ % dbg:tpl(meck, []),
+ ok = meck:new(mymod),
+ mymod.
+
+teardown(Module) ->
+ catch meck:unload(Module).
+
+%% --- Tests using setup and teardown ------------------------------------------
+
+new_(Mod) ->
+ Info = Mod:module_info(),
+ ?assert(is_list(Info)).
+
+unload_(Mod) ->
+ ok = meck:unload(Mod),
+ ?assertEqual(false, code:is_loaded(Mod)).
+
+double_new_(Mod) ->
+ ?assertError({already_started, _}, meck:new(Mod)).
+
+validate_(Mod) ->
+ ?assertEqual(true, meck:validate(Mod)).
+
+expect_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> ok end),
+ ?assertEqual(true, meck:validate(Mod)).
+
+exports_(Mod) ->
+ ok = meck:expect(Mod, test1, fun() -> ok end),
+ ok = meck:expect(Mod, test2, fun(_) -> ok end),
+ ok = meck:expect(Mod, test3, fun(_, _) -> ok end),
+ ?assertEqual(0, proplists:get_value(test1, Mod:module_info(exports))),
+ ?assertEqual(1, proplists:get_value(test2, Mod:module_info(exports))),
+ ?assertEqual(2, proplists:get_value(test3, Mod:module_info(exports))),
+ ?assertEqual(true, meck:validate(Mod)).
+
+call_return_value_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> apa end),
+ ?assertEqual(apa, Mod:test()),
+ ?assertEqual(true, meck:validate(Mod)).
+
+call_argument_(Mod) ->
+ ok = meck:expect(Mod, test, fun(hest, 1) -> apa end),
+ ?assertEqual(apa, Mod:test(hest, 1)),
+ ?assertEqual(true, meck:validate(Mod)).
+
+call_function_clause_(Mod) ->
+ ok = meck:expect(Mod, test, fun(hest, 1) -> apa end),
+ ?assertError(function_clause, Mod:test(hest, 2)),
+ ?assertEqual(false, meck:validate(Mod)).
+
+validate_unexpected_error_(Mod) ->
+ ok = meck:expect(Mod, test, fun(hest, 1) -> erlang:error(timeout) end),
+ ?assertError(timeout, Mod:test(hest, 1)),
+ ?assertEqual(false, meck:validate(Mod)).
+
+validate_expected_error_(Mod) ->
+ ok = meck:expect(Mod, test, fun(hest, 1) ->
+ meck:exception(error, timeout)
+ end),
+ ?assertError(timeout, Mod:test(hest, 1)),
+ ?assertEqual(true, meck:validate(Mod)).
+
+validate_chained_(Mod) ->
+ ok = meck:new(mymod2),
+ ok = meck:expect(mymod2, test, fun() ->
+ meck:exception(error, test_error)
+ end),
+ ok = meck:expect(Mod, test, fun() ->
+ mymod2:test()
+ end),
+ ?assertError(test_error, Mod:test()),
+ ?assertEqual(false, meck:validate(Mod)),
+ ?assertEqual(true, meck:validate(mymod2)),
+ ok = meck:unload(mymod2).
+
+stacktrace_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> erlang:error(test_error) end),
+ try
+ Mod:test(),
+ throw(failed)
+ catch
+ error:test_error ->
+ ?assert(lists:any(fun({M, test, []}) when M == Mod -> true;
+ ({M, test, [],[]}) when M == Mod -> true;
+ (_) -> false
+ end, erlang:get_stacktrace()))
+ end.
+
+stacktrace_function_clause_(Mod) ->
+ ok = meck:expect(Mod, test, fun(1) -> ok end),
+ try
+ Mod:test(error),
+ throw(failed)
+ catch
+ error:function_clause ->
+ Stacktrace = erlang:get_stacktrace(),
+ ?assert(lists:any(fun({M, test, [error]}) when M == Mod -> true;
+ ({M, test, [error], []}) when M == Mod -> true;
+ (_) -> false
+ end, Stacktrace))
+ end.
+
+
+call_undef_(Mod) ->
+ ok = meck:expect(Mod, test, fun(hest, 1) -> apa end),
+ ?assertError(undef, Mod:test(hest)).
+
+caller_does_not_crash_on_reload_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> timer:sleep(infinity) end),
+ Pid = spawn(fun() -> Mod:test() end),
+ ok = meck:expect(Mod, new1, fun() -> ok end),
+ ok = meck:expect(Mod, new2, fun() -> ok end),
+ ok = meck:expect(Mod, new3, fun() -> ok end),
+ ?assertEqual(true, is_process_alive(Pid)).
+
+change_func_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> 1 end),
+ ?assertEqual(1, Mod:test()),
+ ok = meck:expect(Mod, test, fun() -> 2 end),
+ ?assertEqual(2, Mod:test()).
+
+call_original_undef_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> meck:passthrough([]) end),
+ ?assertError(undef, Mod:test()).
+
+history_empty_(Mod) ->
+ ?assertEqual([], meck:history(Mod)).
+
+history_call_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> ok end),
+ ok = meck:expect(Mod, test2, fun(_, _) -> result end),
+ ok = meck:expect(Mod, test3, 0, 3),
+ Mod:test(),
+ Mod:test2(a, b),
+ Mod:test3(),
+ ?assertEqual([{self(), {Mod, test, []}, ok},
+ {self(), {Mod, test2, [a, b]}, result},
+ {self(), {Mod, test3, []}, 3}], meck:history(Mod)).
+
+history_throw_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> throw(test_exception) end),
+ catch Mod:test(),
+ ?assertMatch([{_Pid, {Mod, test, []}, throw, test_exception, _Stacktrace}],
+ meck:history(Mod)).
+
+history_throw_fun_(Mod) ->
+ Fun = fun() -> exception_fun end,
+ ok = meck:expect(Mod, test, fun() -> throw(Fun) end),
+ catch Mod:test(),
+ ?assertMatch([{_Pid, {Mod, test, []}, throw, Fun, _Stacktrace}],
+ meck:history(Mod)).
+
+history_exit_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> exit(test_exit) end),
+ catch Mod:test(),
+ ?assertMatch([{_Pid, {Mod, test, []}, exit, test_exit, _Stacktrace}],
+ meck:history(Mod)).
+
+history_error_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> erlang:error(test_error) end),
+ catch Mod:test(),
+ ?assertMatch([{_Pid, {Mod, test, []}, error, test_error, _Stacktrace}],
+ meck:history(Mod)).
+
+history_error_args_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> erlang:error(test_error, [fake_args]) end),
+ catch Mod:test(),
+ History = meck:history(Mod),
+ ?assertMatch([{_Pid, {Mod, test, []}, error, test_error, _Stacktrace}],
+ meck:history(Mod)),
+ [{_Pid, _MFA, error, test_error, Stacktrace}] = History,
+ ?assert(lists:any(fun({_M, _F, [fake_args]}) -> true;
+ ({_M, _F, [fake_args], [{file,_},{line,_}]}) -> true;
+ (_) -> false end, Stacktrace)).
+
+history_meck_throw_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> meck:exception(throw, test_exception) end),
+ catch Mod:test(),
+ ?assertMatch([{_Pid, {Mod, test, []}, throw, test_exception, _Stacktrace}],
+ meck:history(Mod)).
+
+history_meck_throw_fun_(Mod) ->
+ Fun = fun() -> exception_fun end,
+ ok = meck:expect(Mod, test, fun() -> meck:exception(throw, Fun) end),
+ catch Mod:test(),
+ ?assertMatch([{_Pid, {Mod, test, []}, throw, Fun, _Stacktrace}],
+ meck:history(Mod)).
+
+history_meck_exit_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> meck:exception(exit, test_exit) end),
+ catch Mod:test(),
+ ?assertMatch([{_Pid, {Mod, test, []}, exit, test_exit, _Stacktrace}],
+ meck:history(Mod)).
+
+history_meck_error_(Mod) ->
+ ok = meck:expect(Mod, test, fun() -> meck:exception(error, test_error) end),
+ catch Mod:test(),
+ ?assertMatch([{_Pid, {Mod, test, []}, error, test_error, _Stacktrace}],
+ meck:history(Mod)).
+
+history_by_pid_(Mod) ->
+ ok = meck:expect(Mod, test1, fun() -> ok end),
+ ok = meck:expect(Mod, test2, fun() -> ok end),
+
+ TestPid = self(),
+ Fun = fun() ->
+ Mod:test1(),
+ TestPid ! {self(), done}
+ end,
+ Pid = spawn(Fun),
+ Mod:test1(),
+ Mod:test2(),
+ receive {Pid, done} -> ok end,
+ ?assertEqual([{Pid, {Mod, test1, []}, ok}], meck:history(Mod, Pid)),
+ ?assertEqual([{TestPid, {Mod, test1, []}, ok},
+ {TestPid, {Mod, test2, []}, ok}], meck:history(Mod, TestPid)),
+ ?assertEqual(meck:history(Mod), meck:history(Mod, '_')).
+
+shortcut_expect_(Mod) ->
+ ok = meck:expect(Mod, test, 0, ok),
+ ?assertEqual(true, meck:validate(Mod)).
+
+shortcut_expect_negative_arity_(Mod) ->
+ ?assertError(function_clause, meck:expect(Mod, test, -1, ok)).
+
+shortcut_call_return_value_(Mod) ->
+ ok = meck:expect(Mod, test, 0, apa),
+ ?assertEqual(apa, Mod:test()),
+ ?assertEqual(true, meck:validate(Mod)).
+
+shortcut_call_argument_(Mod) ->
+ ok = meck:expect(Mod, test, 2, apa),
+ ?assertEqual(apa, Mod:test(hest, 1)),
+ ?assertEqual(true, meck:validate(Mod)).
+
+shortcut_re_add_(Mod) ->
+ ok = meck:expect(Mod, test, 2, apa),
+ ?assertEqual(apa, Mod:test(hest, 1)),
+ ok = meck:expect(Mod, test, 2, new),
+ ?assertEqual(new, Mod:test(hest, 1)),
+ ?assertEqual(true, meck:validate(Mod)).
+
+shortcut_opaque_(Mod) ->
+ ok = meck:expect(Mod, test, 0, {test, [a, self()]}),
+ ?assertMatch({test, [a, P]} when P == self(), Mod:test()).
+
+delete_(Mod) ->
+ ok = meck:expect(Mod, test, 2, ok),
+ ?assertEqual(ok, meck:delete(Mod, test, 2)),
+ ?assertError(undef, Mod:test(a, b)),
+ ?assert(meck:validate(Mod)).
+
+called_false_no_args_(Mod) ->
+ Args = [],
+ ok = meck:expect(Mod, test, length(Args), ok),
+ assert_called(Mod, test, Args, false),
+ ok.
+
+called_true_no_args_(Mod) ->
+ Args = [],
+ ok = meck:expect(Mod, test, length(Args), ok),
+ ok = apply(Mod, test, Args),
+ assert_called(Mod, test, Args, true),
+ ok.
+
+called_true_two_functions_(Mod) ->
+ Args = [],
+ ok = meck:expect(Mod, test1, length(Args), ok),
+ ok = meck:expect(Mod, test2, length(Args), ok),
+ ok = apply(Mod, test1, Args),
+ ok = apply(Mod, test2, Args),
+ assert_called(Mod, test2, Args, true),
+ ok.
+
+called_false_one_arg_(Mod) ->
+ Args = ["hello"],
+ ok = meck:expect(Mod, test, length(Args), ok),
+ assert_called(Mod, test, Args, false),
+ ok.
+
+called_true_one_arg_(Mod) ->
+ Args = ["hello"],
+ ok = meck:expect(Mod, test, length(Args), ok),
+ ok = apply(Mod, test, Args),
+ assert_called(Mod, test, Args, true),
+ ok.
+
+called_false_few_args_(Mod) ->
+ Args = [one, 2, {three, 3}, "four"],
+ ok = meck:expect(Mod, test, length(Args), ok),
+ assert_called(Mod, test, Args, false),
+ ok.
+
+called_true_few_args_(Mod) ->
+ Args = [one, 2, {three, 3}, "four"],
+ ok = meck:expect(Mod, test, length(Args), ok),
+ ok = apply(Mod, test, Args),
+ assert_called(Mod, test, Args, true),
+ ok.
+
+called_false_error_(Mod) ->
+ Args = [one, "two", {3, 3}],
+ TestFun = fun (_, _, _) -> meck:exception(error, my_error) end,
+ ok = meck:expect(Mod, test, TestFun),
+ assert_called(Mod, test, Args, false),
+ ok.
+
+called_true_error_(Mod) ->
+ Args = [one, "two", {3, 3}],
+ expect_catch_apply(Mod, test, Args),
+ assert_called(Mod, test, Args, true),
+ ok.
+
+called_with_pid_no_args_(Mod) ->
+ Args = [],
+ ok = meck:expect(Mod, test, length(Args), ok),
+ Pid = spawn_caller_and_sync(Mod, test, Args),
+ assert_called(Mod, test, Args, self(), false),
+ assert_called(Mod, test, Args, Pid, true),
+ ok = apply(Mod, test, Args),
+ assert_called(Mod, test, Args, self(), true),
+ ?assertEqual(meck:called(Mod, test, Args, '_'),
+ meck:called(Mod, test, Args)).
+
+spawn_caller_and_sync(Mod, Func, Args) ->
+ TestPid = self(),
+ Fun = fun() ->
+ catch apply(Mod, Func, Args),
+ TestPid ! {self(), done}
+ end,
+ Pid = spawn(Fun),
+ receive {Pid, done} -> ok end, % sync with the spawned process
+ Pid.
+
+num_calls_(Mod) ->
+ Args = [],
+ IncorrectArgs = [foo],
+ ok = meck:expect(Mod, test1, length(Args), ok),
+ ?assertEqual(0, meck:num_calls(Mod, test1, Args)),
+ ok = apply(Mod, test1, Args),
+ ?assertEqual(1, meck:num_calls(Mod, test1, Args)),
+ ?assertEqual(0, meck:num_calls(Mod, test1, IncorrectArgs)).
+
+num_calls_error_(Mod) ->
+ Args = [one, "two", {3, 3}],
+ expect_catch_apply(Mod, test, Args),
+ ?assertEqual(1, meck:num_calls(Mod, test, Args)).
+
+num_calls_with_pid_no_args_(Mod) ->
+ Args = [],
+ ok = meck:expect(Mod, test, length(Args), ok),
+ Pid = spawn_caller_and_sync(Mod, test, Args),
+ ?assertEqual(0, meck:num_calls(Mod, test, Args, self())),
+ ?assertEqual(1, meck:num_calls(Mod, test, Args, Pid)),
+ ok = apply(Mod, test, Args),
+ ?assertEqual(1, meck:num_calls(Mod, test, Args, self())),
+ ?assertEqual(meck:num_calls(Mod, test, Args, '_'),
+ meck:num_calls(Mod, test, Args)).
+
+expect_apply(Mod, Func, Args) ->
+ ok = meck:expect(Mod, Func, length(Args), ok),
+ ok = apply(Mod, Func, Args).
+
+expect_catch_apply(Mod, Func, Args) ->
+ TestFun = fun (_, _, _) -> meck:exception(error, my_error) end,
+ ok = meck:expect(Mod, Func, TestFun),
+ catch apply(Mod, Func, Args).
+
+called_wildcard_(Mod) ->
+ Args = [one, 2, {three, 3}, "four"],
+ ok = meck:expect(Mod, test, length(Args), ok),
+ ok = apply(Mod, test, Args),
+ assert_called(Mod, test, [one, '_', {three, '_'}, "four"], true),
+ ok.
+
+sequence_(Mod) ->
+ Sequence = [a, b, c, d, e],
+ ?assertEqual(ok, meck:sequence(Mod, s, 2, Sequence)),
+ ?assertEqual(Sequence,
+ [Mod:s(a, b) || _ <- lists:seq(1, length(Sequence))]),
+ ?assertEqual([e, e, e, e, e],
+ [Mod:s(a, b) || _ <- lists:seq(1, 5)]),
+ ?assert(meck:validate(Mod)).
+
+sequence_multi_(Mod) ->
+ meck:new(mymod2),
+ Mods = [Mod, mymod2],
+ Sequence = [a, b, c, d, e],
+ ?assertEqual(ok, meck:sequence(Mods, s, 2, Sequence)),
+ ?assertEqual(Sequence,
+ [Mod:s(a, b) || _ <- lists:seq(1, length(Sequence))]),
+ ?assertEqual([e, e, e, e, e],
+ [Mod:s(a, b) || _ <- lists:seq(1, 5)]),
+ ?assertEqual(Sequence,
+ [mymod2:s(a, b) || _ <- lists:seq(1, length(Sequence))]),
+ ?assertEqual([e, e, e, e, e],
+ [mymod2:s(a, b) || _ <- lists:seq(1, 5)]),
+ ?assert(meck:validate(Mods)).
+
+loop_(Mod) ->
+ Loop = [a, b, c, d, e],
+ ?assertEqual(ok, meck:loop(Mod, l, 2, Loop)),
+ [?assertEqual(V, Mod:l(a, b)) || _ <- lists:seq(1, length(Loop)), V <- Loop],
+ ?assert(meck:validate(Mod)).
+
+loop_multi_(Mod) ->
+ meck:new(mymod2),
+ Mods = [Mod, mymod2],
+ Loop = [a, b, c, d, e],
+ ?assertEqual(ok, meck:loop(Mods, l, 2, Loop)),
+ [[?assertEqual(V, M:l(a, b)) || _ <- lists:seq(1, length(Loop)), V <- Loop]
+ || M <- Mods],
+ ?assert(meck:validate(Mods)).
+
+%% --- Tests with own setup ----------------------------------------------------
+
+call_original_test() ->
+ false = code:purge(meck_test_module),
+ ?assertEqual({module, meck_test_module}, code:load_file(meck_test_module)),
+ ok = meck:new(meck_test_module, [no_passthrough_cover]),
+ ?assertEqual({file, ""}, code:is_loaded(meck_test_module_meck_original)),
+ ok = meck:expect(meck_test_module, a, fun() -> c end),
+ ok = meck:expect(meck_test_module, b, fun() -> meck:passthrough([]) end),
+ ?assertEqual(c, meck_test_module:a()),
+ ?assertEqual(b, meck_test_module:b()),
+ ok = meck:unload(meck_test_module).
+
+unload_renamed_original_test() ->
+ ok = meck:new(meck_test_module),
+ ok = meck:unload(meck_test_module),
+ ?assertEqual(false, code:is_loaded(meck_test_module_meck_original)).
+
+unload_all_test() ->
+ Mods = [test_a, test_b, test_c, test_d, test_e],
+ ok = meck:new(Mods),
+ ?assertEqual(lists:sort(Mods), lists:sort(meck:unload())),
+ [?assertEqual(false, code:is_loaded(M)) || M <- Mods].
+
+original_no_file_test() ->
+ {ok, Mod, Beam} = compile:forms([{attribute, 1, module, meck_not_on_disk}]),
+ {module, Mod} = code:load_binary(Mod, "", Beam),
+ ?assertEqual(ok, meck:new(meck_not_on_disk)),
+ ok = meck:unload(meck_not_on_disk).
+
+original_has_no_object_code_test() ->
+ {ok, Mod, Beam} = compile:forms([{attribute, 1, module, meck_on_disk}]),
+ ok = file:write_file("meck_on_disk.beam", Beam),
+ {module, Mod} = code:load_binary(Mod, "meck_on_disk.beam", Beam),
+ ?assertEqual(ok, meck:new(meck_on_disk)),
+ ok = file:delete("meck_on_disk.beam"),
+ ok = meck:unload(meck_on_disk).
+
+passthrough_nonexisting_module_test() ->
+ ok = meck:new(mymod, [passthrough]),
+ ok = meck:expect(mymod, test, fun() -> ok end),
+ ?assertEqual(ok, mymod:test()),
+ ok = meck:unload(mymod).
+
+passthrough_test() ->
+ passthrough_test([]).
+
+passthrough_test(Opts) ->
+ ok = meck:new(meck_test_module, [passthrough|Opts]),
+ ok = meck:expect(meck_test_module, a, fun() -> c end),
+ ?assertEqual(c, meck_test_module:a()),
+ ?assertEqual(b, meck_test_module:b()),
+ ?assertEqual({1, 2}, meck_test_module:c(1, 2)),
+ ok = meck:unload(meck_test_module).
+
+passthrough_different_arg_test() ->
+ ok = meck:new(meck_test_module),
+ ok = meck:expect(meck_test_module, c,
+ fun(_, _) -> meck:passthrough([x, y]) end),
+ ?assertEqual({x, y}, meck_test_module:c(1, 2)),
+ ok = meck:unload(meck_test_module).
+
+passthrough_bif_test() ->
+ ?assertEqual(ok, meck:new(file, [unstick, passthrough])),
+ ?assertEqual(ok, meck:unload(file)).
+
+cover_test() ->
+ {ok, _} = cover:compile("../test/meck_test_module.erl"),
+ a = meck_test_module:a(),
+ b = meck_test_module:b(),
+ {1, 2} = meck_test_module:c(1, 2),
+ {ok, {meck_test_module, {3,0}}} = cover:analyze(meck_test_module, module),
+ run_mock_no_cover_file(meck_test_module),
+ {ok, {meck_test_module, {3,0}}} = cover:analyze(meck_test_module, module).
+
+cover_options_test_() ->
+ {foreach, fun compile_options_setup/0, fun compile_options_teardown/1,
+ [{with, [T]} || T <- [fun ?MODULE:cover_options_/1,
+ fun ?MODULE:cover_options_fail_/1
+ ]]}.
+
+compile_options_setup() ->
+ Module = cover_test_module,
+ % Our test module won't compile without compiler options that
+ % rebar won't give it, thus the rename dance.
+ Src = join("../test/", Module, ".erl"),
+ ok = file:rename(join("../test/", Module, ".dontcompile"), Src),
+ OldPath = code:get_path(),
+ code:add_path("../test"),
+ {OldPath, Src, Module}.
+
+compile_options_teardown({OldPath, Src, Module}) ->
+ file:rename(Src, join("../test/", Module, ".dontcompile")),
+ code:purge(Module),
+ code:delete(Module),
+ code:set_path(OldPath).
+
+cover_options_({_OldPath, Src, Module}) ->
+ % Test that compilation options (include paths and preprocessor
+ % definitions) are used when un-mecking previously cover compiled
+ % modules.
+ CompilerOptions = [{i, "../test/include"}, {d, 'TEST', true}],
+ % The option recover feature depends on having the BEAM file
+ % available.
+ {ok, _} = compile:file(Src, [{outdir, "../test"}|CompilerOptions]),
+ {ok, _} = cover:compile(Src, CompilerOptions),
+ a = Module:a(),
+ b = Module:b(),
+ {1, 2} = Module:c(1, 2),
+ % We get 2 instead of 3 as expected. Maybe because cover doesn't
+ % count include files?
+ ?assertEqual({ok, {Module, {2,0}}}, cover:analyze(Module, module)),
+ run_mock_no_cover_file(Module),
+ % 2 instead of 3, as above
+ ?assertEqual({ok, {Module, {2,0}}}, cover:analyze(Module, module)).
+
+cover_options_fail_({_OldPath, Src, Module}) ->
+ %% This may look like the test above but there is a subtle
+ %% difference. When `cover:compile_beam' is called it squashes
+ %% compile options. This test verifies that function `b/0', which
+ %% relies on the `TEST' directive being set can still be called
+ %% after the module is meck'ed.
+ CompilerOptions = [{i, "../test/include"}, {d, 'TEST', true},
+ {outdir, "../test"}, debug_info],
+ {ok, _} = compile:file(Src, CompilerOptions),
+ ?assertEqual(CompilerOptions, meck_mod:compile_options(Module)),
+ {ok, _} = cover:compile_beam(Module),
+ ?assertEqual([], meck_mod:compile_options(Module)),
+ a = Module:a(),
+ b = Module:b(),
+ {1, 2} = Module:c(1, 2),
+ ?assertEqual({ok, {Module, {2,0}}}, cover:analyze(Module, module)),
+ ok = meck:new(Module, [passthrough]),
+ ok = meck:expect(Module, a, fun () -> c end),
+ ?assertEqual(c, Module:a()),
+ ?assertEqual(b, Module:b()),
+ ?assertEqual({1, 2}, Module:c(1, 2)),
+ ok = meck:unload(Module),
+ %% Verify passthru calls went to cover
+ ?assertEqual({ok, {Module, 4}}, cover:analyze(Module, calls, module)).
+
+join(Path, Module, Ext) -> filename:join(Path, atom_to_list(Module) ++ Ext).
+
+run_mock_no_cover_file(Module) ->
+ ok = meck:new(Module),
+ ok = meck:expect(Module, a, fun () -> c end),
+ ?assertEqual(c, Module:a()),
+ ok = meck:unload(Module),
+ ?assert(not filelib:is_file(atom_to_list(Module) ++ ".coverdata")).
+
+%% @doc Verify that passthrough calls _don't_ appear in cover
+%% analysis.
+no_cover_passthrough_test() ->
+ {ok, _} = cover:compile("../test/meck_test_module.erl"),
+ {ok, {meck_test_module, {0,3}}} = cover:analyze(meck_test_module, module),
+ passthrough_test([no_passthrough_cover]),
+ {ok, {meck_test_module, {0,3}}} = cover:analyze(meck_test_module, module).
+
+%% @doc Verify that passthrough calls appear in cover analysis.
+cover_passthrough_test() ->
+ {ok, _} = cover:compile("../test/meck_test_module.erl"),
+ ?assertEqual({ok, {meck_test_module, {0,3}}},
+ cover:analyze(meck_test_module, module)),
+ passthrough_test([]),
+ ?assertEqual({ok, {meck_test_module, {2,1}}},
+ cover:analyze(meck_test_module, module)).
+
+% @doc The mocked module is unloaded if the meck process crashes.
+unload_when_crashed_test() ->
+ ok = meck:new(mymod),
+ ?assertMatch({file, _}, code:is_loaded(mymod)),
+ SaltedName = mymod_meck,
+ Pid = whereis(SaltedName),
+ ?assertEqual(true, is_pid(Pid)),
+ unlink(Pid),
+ exit(Pid, expected_test_exit),
+ timer:sleep(100),
+ ?assertEqual(undefined, whereis(SaltedName)),
+ ?assertEqual(false, code:is_loaded(mymod)).
+
+% @doc The mocked module is unloaded if the meck process crashes.
+unlink_test() ->
+ ok = meck:new(mymod, [no_link]),
+ SaltedName = mymod_meck,
+ {links, Links} = process_info(whereis(SaltedName), links),
+ ?assert(not lists:member(self(), Links)),
+ ok = meck:unload(mymod).
+
+%% @doc Exception is thrown when you run expect on a non-existing (and not yet mocked) module.
+expect_without_new_test() ->
+ ?assertError({not_mocked, othermod},
+ meck:expect(othermod, test, fun() -> ok end)).
+
+history_passthrough_test() ->
+ ok = meck:new(meck_test_module, [passthrough]),
+ ok = meck:expect(meck_test_module, a, fun() -> c end),
+ c = meck_test_module:a(),
+ b = meck_test_module:b(),
+ ?assertEqual([{self(), {meck_test_module, a, []}, c},
+ {self(), {meck_test_module, b, []}, b}],
+ meck:history(meck_test_module)),
+ ok = meck:unload(meck_test_module).
+
+multi_test() ->
+ Mods = [mod1, mod2, mod3],
+ ok = meck:new(Mods),
+ ok = meck:expect(Mods, test, fun() -> ok end),
+ ok = meck:expect(Mods, test2, 0, ok),
+ [?assertEqual(ok, M:test()) || M <- Mods],
+ ?assert(meck:validate(Mods)),
+ ok = meck:unload(Mods).
+
+multi_invalid_test() ->
+ Mods = [mod1, mod2, mod3],
+ ok = meck:new(Mods),
+ ok = meck:expect(Mods, test, fun(1) -> ok end),
+ ?assertError(function_clause, mod2:test(2)),
+ ?assert(not meck:validate(Mods)),
+ ok = meck:unload(Mods).
+
+multi_option_test() ->
+ Mods = [mod1, mod2, mod3],
+ ok = meck:new(Mods, [passthrough]),
+ ok = meck:expect(Mods, test, fun() -> ok end),
+ [?assertEqual(ok, M:test()) || M <- Mods],
+ ?assert(meck:validate(Mods)),
+ ok = meck:unload(Mods).
+
+multi_shortcut_test() ->
+ Mods = [mod1, mod2, mod3],
+ ok = meck:new(Mods),
+ ok = meck:expect(Mods, test, 0, ok),
+ [?assertEqual(ok, M:test()) || M <- Mods],
+ ?assert(meck:validate(Mods)),
+ ok = meck:unload(Mods).
+
+multi_delete_test() ->
+ Mods = [mod1, mod2, mod3],
+ ok = meck:new(Mods),
+ ok = meck:expect(Mods, test, 0, ok),
+ ?assertEqual(ok, meck:delete(Mods, test, 0)),
+ [?assertError(undef, M:test()) || M <- Mods],
+ ?assert(meck:validate(Mods)),
+ ok = meck:unload(Mods).
+
+handle_cast_unmodified_state_test() ->
+ S = dummy_state,
+ ?assertEqual({noreply, S}, meck:handle_cast(dummy_msg, S)).
+
+code_change_unmodified_state_test() ->
+ S = dummy_state,
+ ?assertEqual({ok, S}, meck:code_change(old_version, S, [])).
+
+remote_meck_test_() ->
+ {foreach, fun remote_setup/0, fun remote_teardown/1,
+ [{with, [T]} || T <- [fun remote_meck_/1,
+ fun remote_meck_cover_/1]]}.
+
+remote_setup() ->
+ [] = os:cmd("epmd -daemon"),
+ Hostname = "localhost",
+ Myself = list_to_atom("meck_eunit_test@" ++ Hostname),
+ net_kernel:start([Myself, shortnames]),
+ {ok, Node} = slave:start_link(list_to_atom(Hostname), meck_remote_test,
+ "-pa test"),
+ {Mod, Bin, File} = code:get_object_code(meck),
+ {module, Mod} = rpc:call(Node, code, load_binary, [Mod, File, Bin]),
+ {module, meck_test_module} =
+ rpc:call(Node, code, load_file, [meck_test_module]),
+ {Node, meck_test_module}.
+
+remote_teardown({Node, _Mod}) ->
+ ok = slave:stop(Node).
+
+remote_meck_({Node, Mod}) ->
+ ?assertEqual(ok, rpc:call(Node, meck, new, [Mod, [no_link]])),
+ ?assertEqual(ok, rpc:call(Node, meck, expect, [Mod, test, 0, true])),
+ ?assertEqual(true, rpc:call(Node, Mod, test, [])).
+
+remote_meck_cover_({Node, Mod}) ->
+ {ok, Mod} = cover:compile(Mod),
+ {ok, _Nodes} = cover:start([Node]),
+ ?assertEqual(ok, rpc:call(Node, meck, new, [Mod])).
+
+can_mock_sticky_modules_test() ->
+ code:stick_mod(meck_test_module),
+ meck:new(meck_test_module, [unstick]),
+ ?assertNot(code:is_sticky(meck_test_module)),
+ meck:unload(meck_test_module),
+ ?assert(code:is_sticky(meck_test_module)),
+ code:unstick_mod(meck_test_module).
+
+
+sticky_directory_test_() ->
+ {foreach, fun sticky_setup/0, fun sticky_teardown/1,
+ [{with, [T]}
+ || T <- [fun ?MODULE:can_mock_sticky_module_not_yet_loaded_/1,
+ fun ?MODULE:cannot_mock_sticky_module_without_unstick_/1]]}.
+
+sticky_setup() ->
+ % Find out where the beam file is (purge because it is cover compiled)
+ Module = meck_test_module,
+ false = code:purge(Module),
+ {module, Module} = code:load_file(Module),
+ Beam = code:which(Module),
+
+ % Unload module so it's not loaded when running meck
+ false = code:purge(Module),
+ true = code:delete(Module),
+
+ % Create new sticky dir and copy beam file
+ Dir = "sticky_test",
+ ok = filelib:ensure_dir(filename:join(Dir, "dummy")),
+ Dest = filename:join(Dir, filename:basename(Beam)),
+ {ok, _BytesCopied} = file:copy(Beam, Dest),
+ true = code:add_patha(Dir),
+ ok = code:stick_dir(Dir),
+ code:load_file(Module),
+
+ {Module, {Dir, Dest}}.
+
+sticky_teardown({Module, {Dir, Dest}}) ->
+ % Clean up
+ ok = code:unstick_dir(Dir),
+ false = code:purge(Module),
+ true = code:del_path(Dir),
+ ok = file:delete(Dest),
+ ok = file:del_dir(Dir).
+
+can_mock_sticky_module_not_yet_loaded_({Mod, _}) ->
+ ?assertEqual(ok, meck:new(Mod, [unstick])),
+ ?assertNot(code:is_sticky(Mod)),
+ ?assertEqual(ok, meck:unload(Mod)),
+ ?assert(code:is_sticky(Mod)).
+
+cannot_mock_sticky_module_without_unstick_({Mod, _}) ->
+ ?assertError(module_is_sticky, meck:new(Mod, [no_link])).
+
+can_mock_non_sticky_module_test() ->
+ ?assertNot(code:is_sticky(meck_test_module)),
+ ?assertEqual(ok, meck:new(meck_test_module, [unstick])),
+ ?assertNot(code:is_sticky(meck_test_module)),
+ ?assertEqual(ok, meck:unload(meck_test_module)),
+ ?assertNot(code:is_sticky(meck_test_module)).
+
+cannot_expect_bif_or_autogenerated_test() ->
+ ?assertEqual(ok, meck:new(unicode, [unstick, passthrough])),
+ ?assertError({cannot_mock_builtin, {unicode, characters_to_binary, 2}},
+ meck:expect(unicode, characters_to_binary, 2, doh)),
+ ?assertError({cannot_mock_autogenerated, {unicode, module_info, 0}},
+ meck:expect(unicode, module_info, 0, doh)),
+ ?assertEqual(ok, meck:unload(unicode)).
+
+meck_parametrized_module_test() ->
+ ?assertEqual(ok, meck:new(meck_test_parametrized_module)),
+ ?assertEqual(ok, meck:expect(meck_test_parametrized_module, new,
+ fun(V1, V2) ->
+ {meck_test_parametrized_module, V1, V2}
+ end)),
+ ?assertEqual(ok, meck:expect(meck_test_parametrized_module, which, 1, mecked)),
+ Object = meck_test_parametrized_module:new(var1, var2),
+ ?assertEqual(mecked, Object:which()),
+ ?assertEqual(ok, meck:unload(meck_test_parametrized_module)).
+
+meck_parametrized_module_passthrough_test() ->
+ ?assertEqual(ok, meck:new(meck_test_parametrized_module, [passthrough])),
+ ?assertEqual(ok, meck:expect(meck_test_parametrized_module, new,
+ fun(V1, V2) ->
+ {meck_test_parametrized_module, V1, V2}
+ end)),
+ ?assertEqual(ok, meck:expect(meck_test_parametrized_module, var2,
+ fun({_, _Var1, Var2} = _This) ->
+ {mecked, Var2}
+ end)),
+ Object = meck_test_parametrized_module:new(var1, var2),
+ ?assertEqual({original, var1}, Object:var1()),
+ ?assertEqual({mecked, var2}, Object:var2()),
+ ?assertEqual(ok, meck:unload(meck_test_parametrized_module)).
+
+%%==============================================================================
+%% Internal Functions
+%%==============================================================================
+
+assert_called(Mod, Function, Args, WasCalled) ->
+ ?assertEqual(WasCalled, meck:called(Mod, Function, Args)),
+ ?assert(meck:validate(Mod)).
+
+assert_called(Mod, Function, Args, Pid, WasCalled) ->
+ ?assertEqual(WasCalled, meck:called(Mod, Function, Args, Pid)),
+ ?assert(meck:validate(Mod)).
diff --git a/deps/mem3/README.md b/deps/mem3/README.md
new file mode 100644
index 00000000..7f6998bb
--- /dev/null
+++ b/deps/mem3/README.md
@@ -0,0 +1,33 @@
+## mem3
+
+Mem3 is the node membership application for clustered [CouchDB][1]. It is used in [BigCouch][2] and tracks two very important things for the cluster:
+
+ 1. member nodes
+ 2. node/partition mappings for each database
+
+Both the nodes and partitions are tracked in node-local couch databases. Partitions are heavily used, so an ETS cache is also maintained for low-latency lookups. The nodes and partitions are synchronized via continuous CouchDB replication, which serves as 'gossip' in Dynamo parlance. The partitions ETS cache is kept in sync based on membership and database event listeners.
+
+A very important point to make here is that BigCouch does not necessarily divide up each database into equal partitions across the nodes of a cluster. For instance, in a 20-node cluster, you may have the need to create a small database with very few documents. For efficiency reasons, you may create your database with Q=4 and keep the default of N=3. This means you only have 12 partitions total, so 8 nodes will hold none of the data for this database. Given this feature, we even partition use out across the cluster by altering the 'start' node for the database's partitions.
+
+Splitting and merging partitions is an immature feature of the system, and will require attention in the near-term. We believe we can implement both functions and perform them while the database remains online.
+
+### Getting Started
+
+Mem3 requires R13B03 or higher and can be built with [rebar][6], which comes bundled in the repository. Rebar needs to be able to find the `couch_db.hrl` header file; one way to accomplish this is to set ERL_LIBS to point to the apps
+subdirectory of a bigcouch checkout, e.g.
+
+ ERL_LIBS="/usr/local/src/bigcouch/apps" ./rebar compile
+
+### License
+[Apache 2.0][3]
+
+### Contact
+ * [http://cloudant.com][4]
+ * [info@cloudant.com][5]
+
+[1]: http://couchdb.apache.org
+[2]: http://github.com/cloudant/bigcouch
+[3]: http://www.apache.org/licenses/LICENSE-2.0.html
+[4]: http://cloudant.com
+[5]: mailto:info@cloudant.com
+[6]: http://github.com/basho/rebar
diff --git a/deps/mem3/include/mem3.hrl b/deps/mem3/include/mem3.hrl
new file mode 100644
index 00000000..04658bb3
--- /dev/null
+++ b/deps/mem3/include/mem3.hrl
@@ -0,0 +1,44 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% type specification hacked to suppress dialyzer warning re: match spec
+-record(shard, {
+ name :: binary() | '_',
+ node :: node() | '_',
+ dbname :: binary(),
+ range :: [non_neg_integer() | '$1' | '$2'],
+ ref :: reference() | 'undefined' | '_'
+}).
+
+%% types
+-type join_type() :: init | join | replace | leave.
+-type join_order() :: non_neg_integer().
+-type options() :: list().
+-type mem_node() :: {join_order(), node(), options()}.
+-type mem_node_list() :: [mem_node()].
+-type arg_options() :: {test, boolean()}.
+-type args() :: [] | [arg_options()].
+-type test() :: undefined | node().
+-type epoch() :: float().
+-type clock() :: {node(), epoch()}.
+-type vector_clock() :: [clock()].
+-type ping_node() :: node() | nil.
+-type gossip_fun() :: call | cast.
+
+-type part() :: #shard{}.
+-type fullmap() :: [part()].
+-type ref_part_map() :: {reference(), part()}.
+-type tref() :: reference().
+-type np() :: {node(), part()}.
+-type beg_acc() :: [integer()].
diff --git a/deps/mem3/rebar b/deps/mem3/rebar
new file mode 100755
index 00000000..30c43ba5
--- /dev/null
+++ b/deps/mem3/rebar
Binary files differ
diff --git a/deps/mem3/rebar.config b/deps/mem3/rebar.config
new file mode 100644
index 00000000..4af6b852
--- /dev/null
+++ b/deps/mem3/rebar.config
@@ -0,0 +1,17 @@
+% Copyright 2011 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{deps, [
+ {twig, ".*", {git, "https://github.com/cloudant/twig.git", {tag, "0.2.1"}}}
+]}.
diff --git a/deps/mem3/src/mem3.app.src b/deps/mem3/src/mem3.app.src
new file mode 100644
index 00000000..88447783
--- /dev/null
+++ b/deps/mem3/src/mem3.app.src
@@ -0,0 +1,13 @@
+{application, mem3, [
+ {description, "CouchDB Cluster Membership"},
+ {vsn, git},
+ {mod, {mem3_app, []}},
+ {registered, [
+ mem3_cache,
+ mem3_events,
+ mem3_nodes,
+ mem3_sync,
+ mem3_sup
+ ]},
+ {applications, [kernel, stdlib, sasl, crypto, mochiweb, couch, twig]}
+]}.
diff --git a/deps/mem3/src/mem3.erl b/deps/mem3/src/mem3.erl
new file mode 100644
index 00000000..c7979642
--- /dev/null
+++ b/deps/mem3/src/mem3.erl
@@ -0,0 +1,238 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3).
+
+-export([start/0, stop/0, restart/0, nodes/0, node_info/2, shards/1, shards/2,
+ choose_shards/2, n/1, dbname/1, ushards/1]).
+-export([compare_nodelists/0, compare_shards/1]).
+-export([quorum/1]).
+
+-include("mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+start() ->
+ application:start(mem3).
+
+stop() ->
+ application:stop(mem3).
+
+restart() ->
+ stop(),
+ start().
+
+%% @doc Detailed report of cluster-wide membership state. Queries the state
+%% on all member nodes and builds a dictionary with unique states as the
+%% key and the nodes holding that state as the value. Also reports member
+%% nodes which fail to respond and nodes which are connected but are not
+%% cluster members. Useful for debugging.
+-spec compare_nodelists() -> [{{cluster_nodes, [node()]} | bad_nodes
+ | non_member_nodes, [node()]}].
+compare_nodelists() ->
+ Nodes = mem3:nodes(),
+ AllNodes = erlang:nodes([this, visible]),
+ {Replies, BadNodes} = gen_server:multi_call(Nodes, mem3_nodes, get_nodelist),
+ Dict = lists:foldl(fun({Node, Nodelist}, D) ->
+ orddict:append({cluster_nodes, Nodelist}, Node, D)
+ end, orddict:new(), Replies),
+ [{non_member_nodes, AllNodes -- Nodes}, {bad_nodes, BadNodes} | Dict].
+
+-spec compare_shards(DbName::iodata()) -> [{bad_nodes | [#shard{}], [node()]}].
+compare_shards(DbName) when is_list(DbName) ->
+ compare_shards(list_to_binary(DbName));
+compare_shards(DbName) ->
+ Nodes = mem3:nodes(),
+ {Replies, BadNodes} = rpc:multicall(mem3, shards, [DbName]),
+ GoodNodes = [N || N <- Nodes, not lists:member(N, BadNodes)],
+ Dict = lists:foldl(fun({Shards, Node}, D) ->
+ orddict:append(Shards, Node, D)
+ end, orddict:new(), lists:zip(Replies, GoodNodes)),
+ [{bad_nodes, BadNodes} | Dict].
+
+-spec n(DbName::iodata()) -> integer().
+n(DbName) ->
+ length(mem3:shards(DbName, <<"foo">>)).
+
+-spec nodes() -> [node()].
+nodes() ->
+ mem3_nodes:get_nodelist().
+
+node_info(Node, Key) ->
+ mem3_nodes:get_node_info(Node, Key).
+
+-spec shards(DbName::iodata()) -> [#shard{}].
+shards(DbName) when is_list(DbName) ->
+ shards(list_to_binary(DbName));
+shards(DbName) ->
+ ShardDbName =
+ list_to_binary(couch_config:get("mem3", "shard_db", "dbs")),
+ case DbName of
+ ShardDbName ->
+ %% shard_db is treated as a single sharded db to support calls to db_info
+ %% and view_all_docs
+ [#shard{
+ node = node(),
+ name = ShardDbName,
+ dbname = ShardDbName,
+ range = [0, 2 bsl 31]}];
+ _ ->
+ try ets:lookup(partitions, DbName) of
+ [] ->
+ mem3_util:load_shards_from_disk(DbName);
+ Else ->
+ Else
+ catch error:badarg ->
+ mem3_util:load_shards_from_disk(DbName)
+ end
+ end.
+
+-spec shards(DbName::iodata(), DocId::binary()) -> [#shard{}].
+shards(DbName, DocId) when is_list(DbName) ->
+ shards(list_to_binary(DbName), DocId);
+shards(DbName, DocId) when is_list(DocId) ->
+ shards(DbName, list_to_binary(DocId));
+shards(DbName, DocId) ->
+ HashKey = mem3_util:hash(DocId),
+ Head = #shard{
+ name = '_',
+ node = '_',
+ dbname = DbName,
+ range = ['$1','$2'],
+ ref = '_'
+ },
+ Conditions = [{'=<', '$1', HashKey}, {'=<', HashKey, '$2'}],
+ try ets:select(partitions, [{Head, Conditions, ['$_']}]) of
+ [] ->
+ mem3_util:load_shards_from_disk(DbName, DocId);
+ Shards ->
+ Shards
+ catch error:badarg ->
+ mem3_util:load_shards_from_disk(DbName, DocId)
+ end.
+
+ushards(DbName) ->
+ Shards = mem3:shards(DbName),
+ Nodes = rotate_nodes(DbName, live_nodes()),
+ Buckets = bucket_by_range(Shards),
+ choose_ushards(Buckets, Nodes).
+
+rotate_nodes(DbName, Nodes) ->
+ {H, T} = lists:split(erlang:crc32(DbName) rem length(Nodes), Nodes),
+ T ++ H.
+
+live_nodes() ->
+ lists:sort([node()|erlang:nodes()]).
+
+bucket_by_range(Shards) ->
+ Buckets0 = lists:foldl(fun(#shard{range=Range}=Shard, Dict) ->
+ orddict:append(Range, Shard, Dict) end, orddict:new(), Shards),
+ {_, Buckets} = lists:unzip(Buckets0),
+ Buckets.
+
+choose_ushards(Buckets, Nodes) ->
+ choose_ushards(Buckets, Nodes, []).
+
+choose_ushards([], _, Acc) ->
+ lists:reverse(Acc);
+choose_ushards([Bucket|RestBuckets], Nodes, Acc) ->
+ #shard{node=Node} = Shard = first_match(Bucket, Bucket, Nodes),
+ choose_ushards(RestBuckets, lists:delete(Node, Nodes) ++ [Node],
+ [Shard | Acc]).
+
+first_match([], [#shard{range=Range}|_], []) ->
+ throw({range_not_available, Range});
+first_match([#shard{node=Node}=Shard|_], _, [Node|_]) ->
+ Shard;
+first_match([], Shards, [_|RestNodes]) ->
+ first_match(Shards, Shards, RestNodes);
+first_match([_|RestShards], Shards, Nodes) ->
+ first_match(RestShards, Shards, Nodes).
+
+-spec choose_shards(DbName::iodata(), Options::list()) -> [#shard{}].
+choose_shards(DbName, Options) when is_list(DbName) ->
+ choose_shards(list_to_binary(DbName), Options);
+choose_shards(DbName, Options) ->
+ try shards(DbName)
+ catch error:E when E==database_does_not_exist; E==badarg ->
+ Nodes = mem3:nodes(),
+ NodeCount = length(Nodes),
+ Zones = zones(Nodes),
+ ZoneCount = length(Zones),
+ N = mem3_util:n_val(couch_util:get_value(n, Options), NodeCount),
+ Q = mem3_util:to_integer(couch_util:get_value(q, Options,
+ couch_config:get("cluster", "q", "8"))),
+ Z = mem3_util:z_val(couch_util:get_value(z, Options), NodeCount, ZoneCount),
+ Suffix = couch_util:get_value(shard_suffix, Options, ""),
+ ChosenZones = lists:sublist(shuffle(Zones), Z),
+ lists:flatmap(
+ fun({Zone, N1}) ->
+ Nodes1 = nodes_in_zone(Nodes, Zone),
+ {A, B} = lists:split(crypto:rand_uniform(1,length(Nodes1)+1), Nodes1),
+ RotatedNodes = B ++ A,
+ mem3_util:create_partition_map(DbName, erlang:min(N1,length(Nodes1)),
+ Q, RotatedNodes, Suffix)
+ end,
+ lists:zip(ChosenZones, apportion(N, Z)))
+ end.
+
+-spec dbname(#shard{} | iodata()) -> binary().
+dbname(#shard{dbname = DbName}) ->
+ DbName;
+dbname(<<"shards/", _:8/binary, "-", _:8/binary, "/", DbName/binary>>) ->
+ list_to_binary(filename:rootname(binary_to_list(DbName)));
+dbname(DbName) when is_list(DbName) ->
+ dbname(list_to_binary(DbName));
+dbname(DbName) when is_binary(DbName) ->
+ DbName;
+dbname(_) ->
+ erlang:error(badarg).
+
+
+zones(Nodes) ->
+ lists:usort([mem3:node_info(Node, <<"zone">>) || Node <- Nodes]).
+
+nodes_in_zone(Nodes, Zone) ->
+ [Node || Node <- Nodes, Zone == mem3:node_info(Node, <<"zone">>)].
+
+shuffle(List) ->
+ %% Determine the log n portion then randomize the list.
+ randomize(round(math:log(length(List)) + 0.5), List).
+
+randomize(1, List) ->
+ randomize(List);
+randomize(T, List) ->
+ lists:foldl(fun(_E, Acc) -> randomize(Acc) end,
+ randomize(List), lists:seq(1, (T - 1))).
+
+randomize(List) ->
+ D = lists:map(fun(A) -> {random:uniform(), A} end, List),
+ {_, D1} = lists:unzip(lists:keysort(1, D)),
+ D1.
+
+apportion(Shares, Ways) ->
+ apportion(Shares, lists:duplicate(Ways, 0), Shares).
+
+apportion(_Shares, Acc, 0) ->
+ Acc;
+apportion(Shares, Acc, Remaining) ->
+ N = Remaining rem length(Acc),
+ [H|T] = lists:nthtail(N, Acc),
+ apportion(Shares, lists:sublist(Acc, N) ++ [H+1|T], Remaining - 1).
+
+% quorum functions
+
+quorum(#db{name=DbName}) ->
+ quorum(DbName);
+quorum(DbName) ->
+ n(DbName) div 2 + 1.
diff --git a/deps/mem3/src/mem3_app.erl b/deps/mem3/src/mem3_app.erl
new file mode 100644
index 00000000..bb27171f
--- /dev/null
+++ b/deps/mem3/src/mem3_app.erl
@@ -0,0 +1,23 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3_app).
+-behaviour(application).
+-export([start/2, stop/1]).
+
+start(_Type, []) ->
+ mem3_sup:start_link().
+
+stop([]) ->
+ ok.
diff --git a/deps/mem3/src/mem3_cache.erl b/deps/mem3/src/mem3_cache.erl
new file mode 100644
index 00000000..84686b91
--- /dev/null
+++ b/deps/mem3/src/mem3_cache.erl
@@ -0,0 +1,118 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3_cache).
+-behaviour(gen_server).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([start_link/0]).
+
+-record(state, {changes_pid}).
+
+-include("mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+init([]) ->
+ ets:new(partitions, [bag, public, named_table, {keypos,#shard.dbname}]),
+ {Pid, _} = spawn_monitor(fun() -> listen_for_changes(0) end),
+ {ok, #state{changes_pid = Pid}}.
+
+handle_call(_Call, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info({'DOWN', _, _, Pid, {badarg, [{ets,delete,[partitions,_]}|_]}},
+ #state{changes_pid=Pid} = State) ->
+ % fatal error, somebody deleted our ets table
+ {stop, ets_table_error, State};
+handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) ->
+ twig:log(notice, "~p changes listener died ~p", [?MODULE, Reason]),
+ Seq = case Reason of {seq, EndSeq} -> EndSeq; _ -> 0 end,
+ erlang:send_after(5000, self(), {start_listener, Seq}),
+ {noreply, State};
+handle_info({start_listener, Seq}, State) ->
+ {NewPid, _} = spawn_monitor(fun() -> listen_for_changes(Seq) end),
+ {noreply, State#state{changes_pid=NewPid}};
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+terminate(_Reason, #state{changes_pid=Pid}) ->
+ exit(Pid, kill),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% internal functions
+
+listen_for_changes(Since) ->
+ DbName = couch_config:get("mem3", "shard_db", "dbs"),
+ {ok, Db} = mem3_util:ensure_exists(DbName),
+ Args = #changes_args{
+ feed = "continuous",
+ since = Since,
+ heartbeat = true,
+ include_docs = true
+ },
+ ChangesFun = couch_changes:handle_changes(Args, nil, Db),
+ ChangesFun(fun changes_callback/2).
+
+changes_callback(start, _) ->
+ {ok, nil};
+changes_callback({stop, EndSeq}, _) ->
+ exit({seq, EndSeq});
+changes_callback({change, {Change}, _}, _) ->
+ DbName = couch_util:get_value(<<"id">>, Change),
+ case DbName of <<"_design/", _/binary>> -> ok; _Else ->
+ case couch_util:get_value(<<"deleted">>, Change, false) of
+ true ->
+ ets:delete(partitions, DbName);
+ false ->
+ case couch_util:get_value(doc, Change) of
+ {error, Reason} ->
+ twig:log(error, "missing partition table for ~s: ~p", [DbName, Reason]);
+ {Doc} ->
+ ets:delete(partitions, DbName),
+ Shards = mem3_util:build_shards(DbName, Doc),
+ ets:insert(partitions, Shards),
+ [create_if_missing(Name) || #shard{name=Name, node=Node}
+ <- Shards, Node =:= node()]
+ end
+ end
+ end,
+ {ok, couch_util:get_value(<<"seq">>, Change)};
+changes_callback(timeout, _) ->
+ {ok, nil}.
+
+create_if_missing(Name) ->
+ DbDir = couch_config:get("couchdb", "database_dir"),
+ Filename = filename:join(DbDir, ?b2l(Name) ++ ".couch"),
+ case filelib:is_regular(Filename) of
+ true ->
+ ok;
+ false ->
+ Options = [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}],
+ case couch_server:create(Name, Options) of
+ {ok, Db} ->
+ couch_db:close(Db);
+ Error ->
+ twig:log(error, "~p tried to create ~s, got ~p", [?MODULE, Name, Error])
+ end
+ end.
diff --git a/deps/mem3/src/mem3_httpd.erl b/deps/mem3/src/mem3_httpd.erl
new file mode 100644
index 00000000..716080f8
--- /dev/null
+++ b/deps/mem3/src/mem3_httpd.erl
@@ -0,0 +1,53 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3_httpd).
+
+-export([handle_membership_req/1]).
+
+%% includes
+-include("mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+
+handle_membership_req(#httpd{method='GET',
+ path_parts=[<<"_membership">>]} = Req) ->
+ ClusterNodes = try mem3:nodes()
+ catch _:_ -> {ok,[]} end,
+ couch_httpd:send_json(Req, {[
+ {all_nodes, lists:sort([node()|nodes()])},
+ {cluster_nodes, lists:sort(ClusterNodes)}
+ ]});
+handle_membership_req(#httpd{method='GET',
+ path_parts=[<<"_membership">>, <<"parts">>, DbName]} = Req) ->
+ ClusterNodes = try mem3:nodes()
+ catch _:_ -> {ok,[]} end,
+ Shards = mem3:shards(DbName),
+ JsonShards = json_shards(Shards, dict:new()),
+ couch_httpd:send_json(Req, {[
+ {all_nodes, lists:sort([node()|nodes()])},
+ {cluster_nodes, lists:sort(ClusterNodes)},
+ {partitions, JsonShards}
+ ]}).
+
+%%
+%% internal
+%%
+
+json_shards([], AccIn) ->
+ List = dict:to_list(AccIn),
+ {lists:sort(List)};
+json_shards([#shard{node=Node, range=[B,_E]} | Rest], AccIn) ->
+ HexBeg = couch_util:to_hex(<<B:32/integer>>),
+ json_shards(Rest, dict:append(HexBeg, Node, AccIn)).
diff --git a/deps/mem3/src/mem3_nodes.erl b/deps/mem3/src/mem3_nodes.erl
new file mode 100644
index 00000000..0be66462
--- /dev/null
+++ b/deps/mem3/src/mem3_nodes.erl
@@ -0,0 +1,136 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3_nodes).
+-behaviour(gen_server).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([start_link/0, get_nodelist/0, get_node_info/2]).
+
+-include("mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-record(state, {changes_pid, update_seq, nodes}).
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+get_nodelist() ->
+ gen_server:call(?MODULE, get_nodelist).
+
+get_node_info(Node, Key) ->
+ gen_server:call(?MODULE, {get_node_info, Node, Key}).
+
+init([]) ->
+ {Nodes, UpdateSeq} = initialize_nodelist(),
+ {Pid, _} = spawn_monitor(fun() -> listen_for_changes(UpdateSeq) end),
+ {ok, #state{changes_pid = Pid, update_seq = UpdateSeq, nodes = Nodes}}.
+
+handle_call(get_nodelist, _From, State) ->
+ {reply, lists:sort(dict:fetch_keys(State#state.nodes)), State};
+handle_call({get_node_info, Node, Key}, _From, State) ->
+ case dict:find(Node, State#state.nodes) of
+ {ok, NodeInfo} ->
+ {reply, couch_util:get_value(Key, NodeInfo), State};
+ error ->
+ {reply, error, State}
+ end;
+handle_call({add_node, Node, NodeInfo}, _From, #state{nodes=Nodes} = State) ->
+ gen_event:notify(mem3_events, {add_node, Node}),
+ {reply, ok, State#state{nodes = dict:store(Node, NodeInfo, Nodes)}};
+handle_call({remove_node, Node}, _From, #state{nodes=Nodes} = State) ->
+ gen_event:notify(mem3_events, {remove_node, Node}),
+ {reply, ok, State#state{nodes = dict:erase(Node, Nodes)}};
+handle_call(_Call, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) ->
+ twig:log(notice, "~p changes listener died ~p", [?MODULE, Reason]),
+ StartSeq = State#state.update_seq,
+ Seq = case Reason of {seq, EndSeq} -> EndSeq; _ -> StartSeq end,
+ erlang:send_after(5000, self(), start_listener),
+ {noreply, State#state{update_seq = Seq}};
+handle_info(start_listener, #state{update_seq = Seq} = State) ->
+ {NewPid, _} = spawn_monitor(fun() -> listen_for_changes(Seq) end),
+ {noreply, State#state{changes_pid=NewPid}};
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%% internal functions
+
+initialize_nodelist() ->
+ DbName = couch_config:get("mem3", "node_db", "nodes"),
+ {ok, Db} = mem3_util:ensure_exists(DbName),
+ {ok, _, {_, Nodes0}} = couch_btree:fold(Db#db.id_tree, fun first_fold/3,
+ {Db, dict:new()}, []),
+ % add self if not already present
+ case dict:find(node(), Nodes0) of
+ {ok, _} ->
+ Nodes = Nodes0;
+ error ->
+ Doc = #doc{id = couch_util:to_binary(node())},
+ {ok, _} = couch_db:update_doc(Db, Doc, []),
+ Nodes = dict:store(node(), [], Nodes0)
+ end,
+ couch_db:close(Db),
+ {Nodes, Db#db.update_seq}.
+
+first_fold(#full_doc_info{id = <<"_design/", _/binary>>}, _, Acc) ->
+ {ok, Acc};
+first_fold(#full_doc_info{deleted=true}, _, Acc) ->
+ {ok, Acc};
+first_fold(#full_doc_info{id=Id}=DocInfo, _, {Db, Dict}) ->
+ {ok, #doc{body={Props}}} = couch_db:open_doc(Db, DocInfo),
+ {ok, {Db, dict:store(mem3_util:to_atom(Id), Props, Dict)}}.
+
+listen_for_changes(Since) ->
+ DbName = couch_config:get("mem3", "node_db", "nodes"),
+ {ok, Db} = mem3_util:ensure_exists(DbName),
+ Args = #changes_args{
+ feed = "continuous",
+ since = Since,
+ heartbeat = true,
+ include_docs = true
+ },
+ ChangesFun = couch_changes:handle_changes(Args, nil, Db),
+ ChangesFun(fun changes_callback/2).
+
+changes_callback(start, _) ->
+ {ok, nil};
+changes_callback({stop, EndSeq}, _) ->
+ exit({seq, EndSeq});
+changes_callback({change, {Change}, _}, _) ->
+ Node = couch_util:get_value(<<"id">>, Change),
+ case Node of <<"_design/", _/binary>> -> ok; _ ->
+ case couch_util:get_value(<<"deleted">>, Change, false) of
+ false ->
+ {Props} = couch_util:get_value(doc, Change),
+ gen_server:call(?MODULE, {add_node, mem3_util:to_atom(Node), Props});
+ true ->
+ gen_server:call(?MODULE, {remove_node, mem3_util:to_atom(Node)})
+ end
+ end,
+ {ok, couch_util:get_value(<<"seq">>, Change)};
+changes_callback(timeout, _) ->
+ {ok, nil}.
diff --git a/deps/mem3/src/mem3_rep.erl b/deps/mem3/src/mem3_rep.erl
new file mode 100644
index 00000000..b68973c5
--- /dev/null
+++ b/deps/mem3/src/mem3_rep.erl
@@ -0,0 +1,144 @@
+-module(mem3_rep).
+
+-export([go/2, changes_enumerator/3, make_local_id/2]).
+
+-include("mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-define(CTX, #user_ctx{roles = [<<"_admin">>]}).
+
+-record(acc, {revcount = 0, infos = [], seq, localid, source, target}).
+
+go(DbName, Node) when is_binary(DbName), is_atom(Node) ->
+ go(#shard{name=DbName, node=node()}, #shard{name=DbName, node=Node});
+
+go(#shard{} = Source, #shard{} = Target) ->
+ LocalId = make_local_id(Source, Target),
+ case couch_db:open(Source#shard.name, [{user_ctx,?CTX}]) of
+ {ok, Db} ->
+ try
+ go(Db, Target, LocalId)
+ catch error:{not_found, no_db_file} ->
+ {error, missing_target}
+ after
+ couch_db:close(Db)
+ end;
+ {not_found, no_db_file} ->
+ {error, missing_source}
+ end.
+
+go(#db{name = DbName, seq_tree = Bt} = Db, #shard{} = Target, LocalId) ->
+ erlang:put(io_priority, {internal_repl, DbName}),
+ Seq = calculate_start_seq(Db, Target, LocalId),
+ Acc0 = #acc{source=Db, target=Target, seq=Seq, localid=LocalId},
+ Fun = fun ?MODULE:changes_enumerator/3,
+ {ok, _, AccOut} = couch_btree:fold(Bt, Fun, Acc0, [{start_key, Seq + 1}]),
+ {ok, #acc{seq = LastSeq}} = replicate_batch(AccOut),
+ case couch_db:count_changes_since(Db, LastSeq) of
+ 0 ->
+ ok;
+ N ->
+ exit({pending_changes, N})
+ end.
+
+make_local_id(#shard{node=SourceNode}, #shard{node=TargetNode}) ->
+ S = couch_util:encodeBase64Url(couch_util:md5(term_to_binary(SourceNode))),
+ T = couch_util:encodeBase64Url(couch_util:md5(term_to_binary(TargetNode))),
+ <<"_local/shard-sync-", S/binary, "-", T/binary>>.
+
+changes_enumerator(FullDocInfo, _, #acc{revcount = C} = Acc) when C >= 99 ->
+ #doc_info{high_seq = Seq} = couch_doc:to_doc_info(FullDocInfo),
+ {stop, Acc#acc{seq = Seq, infos = [FullDocInfo | Acc#acc.infos]}};
+
+changes_enumerator(FullDocInfo, _, #acc{revcount = C, infos = Infos} = Acc) ->
+ #doc_info{high_seq = Seq, revs = Revs} = couch_doc:to_doc_info(FullDocInfo),
+ Count = C + length(Revs),
+ {ok, Acc#acc{seq = Seq, revcount = Count, infos = [FullDocInfo | Infos]}}.
+
+replicate_batch(#acc{target = #shard{node=Node, name=Name}} = Acc) ->
+ case find_missing_revs(Acc) of
+ [] ->
+ ok;
+ Missing ->
+ ok = save_on_target(Node, Name, open_docs(Acc, Missing))
+ end,
+ update_locals(Acc),
+ {ok, Acc#acc{revcount=0, infos=[]}}.
+
+find_missing_revs(Acc) ->
+ #acc{target = #shard{node=Node, name=Name}, infos = Infos} = Acc,
+ IdsRevs = lists:map(fun(FDI) ->
+ #doc_info{id=Id, revs=RevInfos} = couch_doc:to_doc_info(FDI),
+ {Id, [R || #rev_info{rev=R} <- RevInfos]}
+ end, Infos),
+ Options = [{io_priority, {internal_repl, Name}}, {user_ctx, ?CTX}],
+ rexi_call(Node, {fabric_rpc, get_missing_revs, [Name, IdsRevs, Options]}).
+
+open_docs(#acc{source=Source, infos=Infos}, Missing) ->
+ lists:flatmap(fun({Id, Revs, _}) ->
+ FDI = lists:keyfind(Id, #full_doc_info.id, Infos),
+ open_doc_revs(Source, FDI, Revs)
+ end, Missing).
+
+save_on_target(Node, Name, Docs) ->
+ Options = [replicated_changes, full_commit, {user_ctx, ?CTX},
+ {io_priority, {internal_repl, Name}}],
+ rexi_call(Node, {fabric_rpc, update_docs, [Name, Docs, Options]}),
+ ok.
+
+update_locals(Acc) ->
+ #acc{seq=Seq, source=Db, target=Target, localid=Id} = Acc,
+ #shard{name=Name, node=Node} = Target,
+ Doc = #doc{id = Id, body = {[
+ {<<"seq">>, Seq},
+ {<<"node">>, list_to_binary(atom_to_list(Node))},
+ {<<"timestamp">>, list_to_binary(iso8601_timestamp())}
+ ]}},
+ {ok, _} = couch_db:update_doc(Db, Doc, []),
+ Options = [{user_ctx, ?CTX}, {io_priority, {internal_repl, Name}}],
+ rexi_call(Node, {fabric_rpc, update_docs, [Name, [Doc], Options]}).
+
+rexi_call(Node, MFA) ->
+ Mon = rexi_monitor:start([{rexi_server, Node}]),
+ Ref = rexi:cast(Node, MFA),
+ try
+ receive {Ref, {ok, Reply}} ->
+ Reply;
+ {Ref, Error} ->
+ erlang:error(Error);
+ {rexi_DOWN, Mon, _, Reason} ->
+ erlang:error({rexi_DOWN, Reason})
+ after 600000 ->
+ erlang:error(timeout)
+ end
+ after
+ rexi_monitor:stop(Mon)
+ end.
+
+calculate_start_seq(Db, #shard{node=Node, name=Name}, LocalId) ->
+ case couch_db:open_doc(Db, LocalId, []) of
+ {ok, #doc{body = {SProps}}} ->
+ Opts = [{user_ctx, ?CTX}, {io_priority, {internal_repl, Name}}],
+ try rexi_call(Node, {fabric_rpc, open_doc, [Name, LocalId, Opts]}) of
+ #doc{body = {TProps}} ->
+ SourceSeq = couch_util:get_value(<<"seq">>, SProps, 0),
+ TargetSeq = couch_util:get_value(<<"seq">>, TProps, 0),
+ erlang:min(SourceSeq, TargetSeq)
+ catch error:{not_found, _} ->
+ 0
+ end;
+ {not_found, _} ->
+ 0
+ end.
+
+open_doc_revs(Db, #full_doc_info{id=Id, rev_tree=RevTree}, Revs) ->
+ {FoundRevs, _} = couch_key_tree:get_key_leafs(RevTree, Revs),
+ lists:map(fun({#leaf{deleted=IsDel, ptr=SummaryPtr}, FoundRevPath}) ->
+ couch_db:make_doc(Db, Id, IsDel, SummaryPtr, FoundRevPath)
+ end, FoundRevs).
+
+iso8601_timestamp() ->
+ {_,_,Micro} = Now = os:timestamp(),
+ {{Year,Month,Date},{Hour,Minute,Second}} = calendar:now_to_datetime(Now),
+ Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0B.~6.10.0BZ",
+ io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second, Micro]).
diff --git a/deps/mem3/src/mem3_rep_manager.erl b/deps/mem3/src/mem3_rep_manager.erl
new file mode 100644
index 00000000..7b98701d
--- /dev/null
+++ b/deps/mem3/src/mem3_rep_manager.erl
@@ -0,0 +1,627 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+-module(mem3_rep_manager).
+-behaviour(gen_server).
+
+% public API
+-export([start_link/0, config_change/3]).
+-export([replication_started/1, replication_completed/1, replication_error/2]).
+
+% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-include("mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_js_functions.hrl").
+
+-define(DOC_TO_REP, mem3_rep_doc_id_to_rep_id).
+-define(REP_TO_STATE, mem3_rep_id_to_rep_state).
+-define(DB_TO_SEQ, mem3_db_to_seq).
+-define(INITIAL_WAIT, 2.5). % seconds
+-define(MAX_WAIT, 600). % seconds
+-define(CTX, {user_ctx, #user_ctx{roles=[<<"_admin">>, <<"_replicator">>]}}).
+
+-record(state, {
+ db_notifier = nil,
+ max_retries,
+ scan_pid = nil,
+ rep_start_pids = []
+}).
+
+-record(rep_state, {
+ dbname,
+ doc_id,
+ user_ctx,
+ doc,
+ starting,
+ retries_left,
+ max_retries,
+ wait = ?INITIAL_WAIT
+}).
+
+-import(couch_util, [
+ get_value/2,
+ get_value/3,
+ to_binary/1
+]).
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+replication_started({BaseId, _} = RepId) ->
+ case rep_state(RepId) of
+ nil ->
+ ok;
+ #rep_state{dbname = DbName, doc_id = DocId} ->
+ update_rep_doc(DbName, DocId, [
+ {<<"_replication_state">>, <<"triggered">>},
+ {<<"_replication_id">>, ?l2b(BaseId)}]),
+ ok = gen_server:call(?MODULE, {rep_started, RepId}, infinity),
+ twig:log(notice, "Document `~s` triggered replication `~s`",
+ [DocId, pp_rep_id(RepId)])
+ end.
+
+
+replication_completed(RepId) ->
+ case rep_state(RepId) of
+ nil ->
+ ok;
+ #rep_state{dbname = DbName, doc_id = DocId} ->
+ update_rep_doc(DbName, DocId, [{<<"_replication_state">>, <<"completed">>}]),
+ ok = gen_server:call(?MODULE, {rep_complete, RepId}, infinity),
+ twig:log(notice, "Replication `~s` finished (triggered by document `~s`)",
+ [pp_rep_id(RepId), DocId])
+ end.
+
+
+replication_error({BaseId, _} = RepId, Error) ->
+ case rep_state(RepId) of
+ nil ->
+ ok;
+ #rep_state{dbname = DbName, doc_id = DocId} ->
+ % TODO: maybe add error reason to replication document
+ update_rep_doc(DbName, DocId, [
+ {<<"_replication_state">>, <<"error">>},
+ {<<"_replication_id">>, ?l2b(BaseId)}]),
+ ok = gen_server:call(?MODULE, {rep_error, RepId, Error}, infinity)
+ end.
+
+init(_) ->
+ process_flag(trap_exit, true),
+ net_kernel:monitor_nodes(true),
+ ?DOC_TO_REP = ets:new(?DOC_TO_REP, [named_table, set, protected]),
+ ?REP_TO_STATE = ets:new(?REP_TO_STATE, [named_table, set, protected]),
+ ?DB_TO_SEQ = ets:new(?DB_TO_SEQ, [named_table, set, protected]),
+ Server = self(),
+ ok = couch_config:register(fun ?MODULE:config_change/3, Server),
+ NotifierPid = db_update_notifier(),
+ ScanPid = spawn_link(fun() -> scan_all_dbs(Server) end),
+ {ok, #state{
+ db_notifier = NotifierPid,
+ scan_pid = ScanPid,
+ max_retries = retries_value(
+ couch_config:get("replicator", "max_replication_retry_count", "10"))
+ }}.
+
+config_change("replicator", "max_replication_retry_count", V) ->
+ ok = gen_server:cast(?MODULE, {set_max_retries, retries_value(V)}).
+
+handle_call({rep_db_update, DbName, {ChangeProps} = Change}, _From, State) ->
+ NewState = try
+ process_update(State, DbName, Change)
+ catch
+ _Tag:Error ->
+ {RepProps} = get_value(doc, ChangeProps),
+ DocId = get_value(<<"_id">>, RepProps),
+ rep_db_update_error(Error, DbName, DocId),
+ State
+ end,
+ {reply, ok, NewState};
+
+handle_call({rep_started, RepId}, _From, State) ->
+ case rep_state(RepId) of
+ nil ->
+ ok;
+ RepState ->
+ NewRepState = RepState#rep_state{
+ starting = false,
+ retries_left = State#state.max_retries,
+ max_retries = State#state.max_retries,
+ wait = ?INITIAL_WAIT
+ },
+ true = ets:insert(?REP_TO_STATE, {RepId, NewRepState})
+ end,
+ {reply, ok, State};
+
+handle_call({rep_complete, RepId}, _From, State) ->
+ true = ets:delete(?REP_TO_STATE, RepId),
+ {reply, ok, State};
+
+handle_call({rep_error, RepId, Error}, _From, State) ->
+ {reply, ok, replication_error(State, RepId, Error)};
+
+handle_call({resume_scan, DbName}, _From, State) ->
+ Since = case ets:lookup(?DB_TO_SEQ, DbName) of
+ [] -> 0;
+ [{DbName, EndSeq}] -> EndSeq
+ end,
+ Pid = changes_feed_loop(DbName, Since),
+ twig:log(debug, "Scanning ~s from update_seq ~p", [DbName, Since]),
+ {reply, ok, State#state{rep_start_pids = [Pid | State#state.rep_start_pids]}};
+
+handle_call({rep_db_checkpoint, DbName, EndSeq}, _From, State) ->
+ true = ets:insert(?DB_TO_SEQ, {DbName, EndSeq}),
+ {reply, ok, State};
+
+handle_call(Msg, From, State) ->
+ twig:log(error, "Replication manager received unexpected call ~p from ~p",
+ [Msg, From]),
+ {stop, {error, {unexpected_call, Msg}}, State}.
+
+
+handle_cast({set_max_retries, MaxRetries}, State) ->
+ {noreply, State#state{max_retries = MaxRetries}};
+
+handle_cast(Msg, State) ->
+ twig:log(error, "Replication manager received unexpected cast ~p", [Msg]),
+ {stop, {error, {unexpected_cast, Msg}}, State}.
+
+handle_info({nodeup, _Node}, State) ->
+ {noreply, rescan(State)};
+
+handle_info({nodedown, _Node}, State) ->
+ {noreply, rescan(State)};
+
+handle_info({'EXIT', From, normal}, #state{scan_pid = From} = State) ->
+ twig:log(debug, "Background scan has completed.", []),
+ {noreply, State#state{scan_pid=nil}};
+
+handle_info({'EXIT', From, Reason}, #state{scan_pid = From} = State) ->
+ twig:log(error, "Background scanner died. Reason: ~p", [Reason]),
+ {stop, {scanner_died, Reason}, State};
+
+handle_info({'EXIT', From, Reason}, #state{db_notifier = From} = State) ->
+ twig:log(error, "Database update notifier died. Reason: ~p", [Reason]),
+ {stop, {db_update_notifier_died, Reason}, State};
+
+handle_info({'EXIT', From, normal}, #state{rep_start_pids = Pids} = State) ->
+ % one of the replication start processes terminated successfully
+ {noreply, State#state{rep_start_pids = Pids -- [From]}};
+
+handle_info({'DOWN', _Ref, _, _, _}, State) ->
+ % From a db monitor created by a replication process. Ignore.
+ {noreply, State};
+
+handle_info(Msg, State) ->
+ twig:log(error, "Replication manager received unexpected message ~p", [Msg]),
+ {stop, {unexpected_msg, Msg}, State}.
+
+terminate(_Reason, State) ->
+ #state{
+ scan_pid = ScanPid,
+ rep_start_pids = StartPids,
+ db_notifier = DbNotifier
+ } = State,
+ stop_all_replications(),
+ lists:foreach(
+ fun(Pid) ->
+ catch unlink(Pid),
+ catch exit(Pid, stop)
+ end,
+ [ScanPid | StartPids]),
+ true = ets:delete(?REP_TO_STATE),
+ true = ets:delete(?DOC_TO_REP),
+ true = ets:delete(?DB_TO_SEQ),
+ couch_db_update_notifier:stop(DbNotifier).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+changes_feed_loop(DbName, Since) ->
+ Server = self(),
+ Pid = spawn_link(
+ fun() ->
+ fabric:changes(DbName, fun
+ ({change, Change}, Acc) ->
+ case has_valid_rep_id(Change) of
+ true ->
+ ok = gen_server:call(
+ Server, {rep_db_update, DbName, Change}, infinity);
+ false ->
+ ok
+ end,
+ {ok, Acc};
+ ({stop, EndSeq}, Acc) ->
+ ok = gen_server:call(Server, {rep_db_checkpoint, DbName, EndSeq}),
+ {ok, Acc};
+ (_, Acc) ->
+ {ok, Acc}
+ end,
+ nil,
+ #changes_args{
+ include_docs = true,
+ feed = "normal",
+ since = Since,
+ filter = main_only,
+ timeout = infinity,
+ db_open_options = [sys_db]
+ }
+ )
+ end),
+ Pid.
+
+has_valid_rep_id({Change}) ->
+ has_valid_rep_id(get_value(<<"id">>, Change));
+has_valid_rep_id(<<?DESIGN_DOC_PREFIX, _Rest/binary>>) ->
+ false;
+has_valid_rep_id(_Else) ->
+ true.
+
+
+db_update_notifier() ->
+ Server = self(),
+ IsReplicatorDbFun = is_replicator_db_fun(),
+ {ok, Notifier} = couch_db_update_notifier:start_link(
+ fun({_, DbName}) ->
+ case IsReplicatorDbFun(DbName) of
+ true ->
+ ok = gen_server:call(Server, {resume_scan, mem3:dbname(DbName)});
+ _ ->
+ ok
+ end
+ end
+ ),
+ Notifier.
+
+rescan(#state{scan_pid = nil} = State) ->
+ true = ets:delete_all_objects(?DB_TO_SEQ),
+ Server = self(),
+ NewScanPid = spawn_link(fun() -> scan_all_dbs(Server) end),
+ State#state{scan_pid = NewScanPid};
+rescan(#state{scan_pid = ScanPid} = State) ->
+ unlink(ScanPid),
+ exit(ScanPid, exit),
+ rescan(State#state{scan_pid = nil}).
+
+process_update(State, DbName, {Change}) ->
+ {RepProps} = JsonRepDoc = get_value(doc, Change),
+ DocId = get_value(<<"_id">>, RepProps),
+ case {owner(DbName, DocId), get_value(deleted, Change, false)} of
+ {false, _} ->
+ replication_complete(DocId),
+ State;
+ {true, true} ->
+ rep_doc_deleted(DocId),
+ State;
+ {true, false} ->
+ case get_value(<<"_replication_state">>, RepProps) of
+ undefined ->
+ maybe_start_replication(State, DbName, DocId, JsonRepDoc);
+ <<"triggered">> ->
+ maybe_start_replication(State, DbName, DocId, JsonRepDoc);
+ <<"completed">> ->
+ replication_complete(DocId),
+ State;
+ <<"error">> ->
+ case ets:lookup(?DOC_TO_REP, DocId) of
+ [] ->
+ maybe_start_replication(State, DbName, DocId, JsonRepDoc);
+ _ ->
+ State
+ end
+ end
+ end.
+
+
+rep_db_update_error(Error, DbName, DocId) ->
+ case Error of
+ {bad_rep_doc, Reason} ->
+ ok;
+ _ ->
+ Reason = to_binary(Error)
+ end,
+ twig:log(error, "Replication manager, error processing document `~s`: ~s",
+ [DocId, Reason]),
+ update_rep_doc(DbName, DocId, [{<<"_replication_state">>, <<"error">>}]).
+
+
+rep_user_ctx({RepDoc}) ->
+ case get_value(<<"user_ctx">>, RepDoc) of
+ undefined ->
+ #user_ctx{};
+ {UserCtx} ->
+ #user_ctx{
+ name = get_value(<<"name">>, UserCtx, null),
+ roles = get_value(<<"roles">>, UserCtx, [])
+ }
+ end.
+
+
+maybe_start_replication(State, DbName, DocId, RepDoc) ->
+ UserCtx = rep_user_ctx(RepDoc),
+ {BaseId, _} = RepId = make_rep_id(RepDoc, UserCtx),
+ case rep_state(RepId) of
+ nil ->
+ RepState = #rep_state{
+ dbname = DbName,
+ doc_id = DocId,
+ user_ctx = UserCtx,
+ doc = RepDoc,
+ starting = true,
+ retries_left = State#state.max_retries,
+ max_retries = State#state.max_retries
+ },
+ true = ets:insert(?REP_TO_STATE, {RepId, RepState}),
+ true = ets:insert(?DOC_TO_REP, {DocId, RepId}),
+ twig:log(notice, "Attempting to start replication `~s` (document `~s`).",
+ [pp_rep_id(RepId), DocId]),
+ Server = self(),
+ Pid = spawn_link(fun() ->
+ start_replication(Server, RepDoc, RepId, UserCtx, 0)
+ end),
+ State#state{rep_start_pids = [Pid | State#state.rep_start_pids]};
+ #rep_state{doc_id = DocId} ->
+ State;
+ #rep_state{starting = false, dbname = DbName, doc_id = OtherDocId} ->
+ twig:log(notice, "The replication specified by the document `~s` was already"
+ " triggered by the document `~s`", [DocId, OtherDocId]),
+ maybe_tag_rep_doc(DbName, DocId, RepDoc, ?l2b(BaseId)),
+ State;
+ #rep_state{starting = true, dbname = DbName, doc_id = OtherDocId} ->
+ twig:log(notice, "The replication specified by the document `~s` is already"
+ " being triggered by the document `~s`", [DocId, OtherDocId]),
+ maybe_tag_rep_doc(DbName, DocId, RepDoc, ?l2b(BaseId)),
+ State
+ end.
+
+
+make_rep_id(RepDoc, UserCtx) ->
+ try
+ couch_rep:make_replication_id(RepDoc, UserCtx)
+ catch
+ throw:{error, Reason} ->
+ throw({bad_rep_doc, Reason});
+ Tag:Err ->
+ throw({bad_rep_doc, to_binary({Tag, Err})})
+ end.
+
+
+maybe_tag_rep_doc(DbName, DocId, {RepProps}, RepId) ->
+ case get_value(<<"_replication_id">>, RepProps) of
+ RepId ->
+ ok;
+ _ ->
+ update_rep_doc(DbName, DocId, [{<<"_replication_id">>, RepId}])
+ end.
+
+
+start_replication(Server, RepDoc, RepId, UserCtx, Wait) ->
+ ok = timer:sleep(Wait * 1000),
+ case (catch couch_rep:start_replication(RepDoc, RepId, UserCtx, ?MODULE)) of
+ Pid when is_pid(Pid) ->
+ ok = gen_server:call(Server, {rep_started, RepId}, infinity),
+ couch_rep:get_result(Pid, RepId, RepDoc, UserCtx);
+ Error ->
+ replication_error(RepId, Error)
+ end.
+
+
+replication_complete(DocId) ->
+ case ets:lookup(?DOC_TO_REP, DocId) of
+ [{DocId, RepId}] ->
+ case rep_state(RepId) of
+ nil ->
+ couch_rep:end_replication(RepId);
+ #rep_state{} ->
+ ok
+ end,
+ true = ets:delete(?DOC_TO_REP, DocId);
+ _ ->
+ ok
+ end.
+
+
+rep_doc_deleted(DocId) ->
+ case ets:lookup(?DOC_TO_REP, DocId) of
+ [{DocId, RepId}] ->
+ couch_rep:end_replication(RepId),
+ true = ets:delete(?REP_TO_STATE, RepId),
+ true = ets:delete(?DOC_TO_REP, DocId),
+ twig:log(notice, "Stopped replication `~s` because replication document `~s`"
+ " was deleted", [pp_rep_id(RepId), DocId]);
+ [] ->
+ ok
+ end.
+
+
+replication_error(State, RepId, Error) ->
+ case rep_state(RepId) of
+ nil ->
+ State;
+ RepState ->
+ maybe_retry_replication(RepId, RepState, Error, State)
+ end.
+
+maybe_retry_replication(RepId, #rep_state{retries_left = 0} = RepState, Error, State) ->
+ #rep_state{
+ doc_id = DocId,
+ max_retries = MaxRetries
+ } = RepState,
+ couch_rep:end_replication(RepId),
+ true = ets:delete(?REP_TO_STATE, RepId),
+ true = ets:delete(?DOC_TO_REP, DocId),
+ twig:log(error, "Error in replication `~s` (triggered by document `~s`): ~s"
+ "~nReached maximum retry attempts (~p).",
+ [pp_rep_id(RepId), DocId, to_binary(error_reason(Error)), MaxRetries]),
+ State;
+
+maybe_retry_replication(RepId, RepState, Error, State) ->
+ #rep_state{
+ doc_id = DocId,
+ user_ctx = UserCtx,
+ doc = RepDoc
+ } = RepState,
+ #rep_state{wait = Wait} = NewRepState = state_after_error(RepState),
+ true = ets:insert(?REP_TO_STATE, {RepId, NewRepState}),
+ twig:log(error, "Error in replication `~s` (triggered by document `~s`): ~s"
+ "~nRestarting replication in ~p seconds.",
+ [pp_rep_id(RepId), DocId, to_binary(error_reason(Error)), Wait]),
+ Server = self(),
+ Pid = spawn_link(fun() ->
+ start_replication(Server, RepDoc, RepId, UserCtx, Wait)
+ end),
+ State#state{rep_start_pids = [Pid | State#state.rep_start_pids]}.
+
+stop_all_replications() ->
+ twig:log(notice, "Stopping all ongoing replications.", []),
+ ets:foldl(
+ fun({_, RepId}, _) ->
+ couch_rep:end_replication(RepId)
+ end,
+ ok, ?DOC_TO_REP),
+ true = ets:delete_all_objects(?REP_TO_STATE),
+ true = ets:delete_all_objects(?DOC_TO_REP),
+ true = ets:delete_all_objects(?DB_TO_SEQ).
+
+update_rep_doc(RepDbName, RepDocId, KVs) when is_binary(RepDocId) ->
+ spawn_link(fun() ->
+ try
+ case fabric:open_doc(mem3:dbname(RepDbName), RepDocId, []) of
+ {ok, LatestRepDoc} ->
+ update_rep_doc(RepDbName, LatestRepDoc, KVs);
+ _ ->
+ ok
+ end
+ catch throw:conflict ->
+ % Shouldn't happen, as by default only the role _replicator can
+ % update replication documents.
+ twig:log(error, "Conflict error when updating replication document `~s`."
+ " Retrying.", [RepDocId]),
+ ok = timer:sleep(5),
+ update_rep_doc(RepDbName, RepDocId, KVs)
+ end
+ end);
+
+update_rep_doc(RepDbName, #doc{body = {RepDocBody}} = RepDoc, KVs) ->
+ NewRepDocBody = lists:foldl(
+ fun({<<"_replication_state">> = K, State} = KV, Body) ->
+ case get_value(K, Body) of
+ State ->
+ Body;
+ _ ->
+ Body1 = lists:keystore(K, 1, Body, KV),
+ lists:keystore(
+ <<"_replication_state_time">>, 1, Body1,
+ {<<"_replication_state_time">>, timestamp()})
+ end;
+ ({K, _V} = KV, Body) ->
+ lists:keystore(K, 1, Body, KV)
+ end,
+ RepDocBody, KVs),
+ case NewRepDocBody of
+ RepDocBody ->
+ ok;
+ _ ->
+ % Might not succeed - when the replication doc is deleted right
+ % before this update (not an error, ignore).
+ spawn_link(fun() ->
+ fabric:update_doc(RepDbName, RepDoc#doc{body = {NewRepDocBody}}, [?CTX])
+ end)
+ end.
+
+
+% RFC3339 timestamps.
+% Note: doesn't include the time seconds fraction (RFC3339 says it's optional).
+timestamp() ->
+ {{Year, Month, Day}, {Hour, Min, Sec}} = calendar:now_to_local_time(now()),
+ UTime = erlang:universaltime(),
+ LocalTime = calendar:universal_time_to_local_time(UTime),
+ DiffSecs = calendar:datetime_to_gregorian_seconds(LocalTime) -
+ calendar:datetime_to_gregorian_seconds(UTime),
+ zone(DiffSecs div 3600, (DiffSecs rem 3600) div 60),
+ iolist_to_binary(
+ io_lib:format("~4..0w-~2..0w-~2..0wT~2..0w:~2..0w:~2..0w~s",
+ [Year, Month, Day, Hour, Min, Sec,
+ zone(DiffSecs div 3600, (DiffSecs rem 3600) div 60)])).
+
+zone(Hr, Min) when Hr >= 0, Min >= 0 ->
+ io_lib:format("+~2..0w:~2..0w", [Hr, Min]);
+zone(Hr, Min) ->
+ io_lib:format("-~2..0w:~2..0w", [abs(Hr), abs(Min)]).
+
+% pretty-print replication id
+pp_rep_id({Base, Extension}) ->
+ Base ++ Extension.
+
+
+rep_state(RepId) ->
+ case ets:lookup(?REP_TO_STATE, RepId) of
+ [{RepId, RepState}] ->
+ RepState;
+ [] ->
+ nil
+ end.
+
+
+error_reason({error, Reason}) ->
+ Reason;
+error_reason(Reason) ->
+ Reason.
+
+retries_value("infinity") ->
+ infinity;
+retries_value(Value) ->
+ list_to_integer(Value).
+
+state_after_error(#rep_state{retries_left = Left, wait = Wait} = State) ->
+ Wait2 = erlang:min(trunc(Wait * 2), ?MAX_WAIT),
+ case Left of
+ infinity ->
+ State#rep_state{wait = Wait2};
+ _ ->
+ State#rep_state{retries_left = Left - 1, wait = Wait2}
+ end.
+
+scan_all_dbs(Server) when is_pid(Server) ->
+ {ok, Db} = mem3_util:ensure_exists(
+ couch_config:get("mem3", "shard_db", "dbs")),
+ ChangesFun = couch_changes:handle_changes(#changes_args{}, nil, Db),
+ IsReplicatorDbFun = is_replicator_db_fun(),
+ ChangesFun(fun({change, {Change}, _}, _) ->
+ DbName = couch_util:get_value(<<"id">>, Change),
+ case DbName of <<"_design/", _/binary>> -> ok; _Else ->
+ case couch_util:get_value(<<"deleted">>, Change, false) of
+ true ->
+ ok;
+ false ->
+ IsReplicatorDbFun(DbName) andalso
+ gen_server:call(Server, {resume_scan, DbName})
+ end
+ end;
+ (_, _) -> ok
+ end),
+ couch_db:close(Db).
+
+is_replicator_db_fun() ->
+ {ok, RegExp} = re:compile("^([a-z][a-z0-9\\_\\$()\\+\\-\\/]*/)?_replicator$"),
+ fun(DbName) ->
+ match =:= re:run(mem3:dbname(DbName), RegExp, [{capture,none}])
+ end.
+
+owner(DbName, DocId) ->
+ Shards = mem3:shards(DbName, DocId),
+ Nodes = [node()|nodes()],
+ LiveShards = [S || #shard{node=Node} = S <- Shards, lists:member(Node, Nodes)],
+ [#shard{node=Node}] = lists:usort(fun(#shard{name=A}, #shard{name=B}) ->
+ A =< B end, LiveShards),
+ node() =:= Node.
diff --git a/deps/mem3/src/mem3_sup.erl b/deps/mem3/src/mem3_sup.erl
new file mode 100644
index 00000000..07b9498b
--- /dev/null
+++ b/deps/mem3/src/mem3_sup.erl
@@ -0,0 +1,36 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3_sup).
+-behaviour(supervisor).
+-export([start_link/0, init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init(_Args) ->
+ Children = [
+ child(mem3_events),
+ child(mem3_cache),
+ child(mem3_nodes),
+ child(mem3_sync),
+ child(mem3_rep_manager)
+ ],
+ {ok, {{one_for_one,10,1}, Children}}.
+
+child(mem3_events) ->
+ MFA = {gen_event, start_link, [{local, mem3_events}]},
+ {mem3_events, MFA, permanent, 1000, worker, dynamic};
+child(Child) ->
+ {Child, {Child, start_link, []}, permanent, 1000, worker, [Child]}.
diff --git a/deps/mem3/src/mem3_sync.erl b/deps/mem3/src/mem3_sync.erl
new file mode 100644
index 00000000..191a98c6
--- /dev/null
+++ b/deps/mem3/src/mem3_sync.erl
@@ -0,0 +1,267 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3_sync).
+-behaviour(gen_server).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([start_link/0, get_active/0, get_queue/0, push/1, push/2,
+ remove_node/1, initial_sync/1]).
+
+-include("mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-record(state, {
+ active = [],
+ count = 0,
+ limit,
+ dict = dict:new(),
+ waiting = [],
+ update_notifier
+}).
+
+-record(job, {name, node, count=nil, pid=nil}).
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+get_active() ->
+ gen_server:call(?MODULE, get_active).
+
+get_queue() ->
+ gen_server:call(?MODULE, get_queue).
+
+push(#shard{name = Name}, Target) ->
+ push(Name, Target);
+push(Name, #shard{node=Node}) ->
+ push(Name, Node);
+push(Name, Node) ->
+ push(#job{name = Name, node = Node}).
+
+push(#job{node = Node} = Job) when Node =/= node() ->
+ gen_server:cast(?MODULE, {push, Job});
+push(_) ->
+ ok.
+
+remove_node(Node) ->
+ gen_server:cast(?MODULE, {remove_node, Node}).
+
+init([]) ->
+ process_flag(trap_exit, true),
+ Concurrency = couch_config:get("mem3", "sync_concurrency", "10"),
+ gen_event:add_handler(mem3_events, mem3_sync_event, []),
+ {ok, Pid} = start_update_notifier(),
+ spawn(fun initial_sync/0),
+ {ok, #state{limit = list_to_integer(Concurrency), update_notifier=Pid}}.
+
+handle_call(get_active, _From, State) ->
+ {reply, State#state.active, State};
+
+handle_call(get_queue, _From, State) ->
+ {reply, State#state.waiting, State};
+
+handle_call(get_backlog, _From, #state{active=A, waiting=W} = State) ->
+ CA = lists:sum([C || #job{count=C} <- A, is_integer(C)]),
+ CW = lists:sum([C || #job{count=C} <- W, is_integer(C)]),
+ {reply, CA+CW, State}.
+
+handle_cast({push, DbName, Node}, State) ->
+ handle_cast({push, #job{name = DbName, node = Node}}, State);
+
+handle_cast({push, Job}, #state{count=Count, limit=Limit} = State)
+ when Count >= Limit ->
+ {noreply, add_to_queue(State, Job)};
+
+handle_cast({push, Job}, State) ->
+ #state{active = L, count = C} = State,
+ #job{name = DbName, node = Node} = Job,
+ case is_running(DbName, Node, L) of
+ true ->
+ {noreply, add_to_queue(State, Job)};
+ false ->
+ Pid = start_push_replication(Job),
+ {noreply, State#state{active=[Job#job{pid=Pid}|L], count=C+1}}
+ end;
+
+handle_cast({remove_node, Node}, #state{waiting = W0} = State) ->
+ {Alive, Dead} = lists:partition(fun(#job{node=N}) -> N =/= Node end, W0),
+ Dict = remove_entries(State#state.dict, Dead),
+ [exit(Pid, die_now) || #job{node=N, pid=Pid} <- State#state.active,
+ N =:= Node],
+ {noreply, State#state{dict = Dict, waiting = Alive}};
+
+handle_cast({remove_shard, Shard}, #state{waiting = W0} = State) ->
+ {Alive, Dead} = lists:partition(fun(#job{name=S}) -> S =/= Shard end, W0),
+ Dict = remove_entries(State#state.dict, Dead),
+ [exit(Pid, die_now) || #job{name=S, pid=Pid} <- State#state.active,
+ S =:= Shard],
+ {noreply, State#state{dict = Dict, waiting = Alive}}.
+
+handle_info({'EXIT', Pid, _}, #state{update_notifier=Pid} = State) ->
+ {ok, NewPid} = start_update_notifier(),
+ {noreply, State#state{update_notifier=NewPid}};
+
+handle_info({'EXIT', Active, normal}, State) ->
+ handle_replication_exit(State, Active);
+
+handle_info({'EXIT', Active, die_now}, State) ->
+ % we forced this one ourselves, do not retry
+ handle_replication_exit(State, Active);
+
+handle_info({'EXIT', Active, {{not_found, no_db_file}, _Stack}}, State) ->
+ % target doesn't exist, do not retry
+ handle_replication_exit(State, Active);
+
+handle_info({'EXIT', Active, Reason}, State) ->
+ NewState = case lists:keyfind(Active, #job.pid, State#state.active) of
+ #job{name=OldDbName, node=OldNode} = Job ->
+ twig:log(warn, "~p ~s -> ~p ~p", [?MODULE, OldDbName, OldNode,
+ Reason]),
+ case Reason of {pending_changes, Count} ->
+ add_to_queue(State, Job#job{pid = nil, count = Count});
+ _ ->
+ timer:apply_after(5000, ?MODULE, push, [Job#job{pid=nil}]),
+ State
+ end;
+ false -> State end,
+ handle_replication_exit(NewState, Active);
+
+handle_info(Msg, State) ->
+ twig:log(notice, "unexpected msg at replication manager ~p", [Msg]),
+ {noreply, State}.
+
+terminate(_Reason, State) ->
+ [exit(Pid, shutdown) || #job{pid=Pid} <- State#state.active],
+ ok.
+
+code_change(_, #state{waiting = [{_,_}|_] = W, active=A} = State, _) ->
+ Waiting = [#job{name=Name, node=Node} || {Name,Node} <- W],
+ Active = [#job{name=Name, node=Node, pid=Pid} || {Name,Node,Pid} <- A],
+ {ok, State#state{active = Active, waiting = Waiting}};
+
+code_change(_, State, _) ->
+ {ok, State}.
+
+handle_replication_exit(#state{waiting=[]} = State, Pid) ->
+ NewActive = lists:keydelete(Pid, #job.pid, State#state.active),
+ {noreply, State#state{active=NewActive, count=length(NewActive)}};
+handle_replication_exit(State, Pid) ->
+ #state{active=Active, limit=Limit, dict=D, waiting=Waiting} = State,
+ Active1 = lists:keydelete(Pid, #job.pid, Active),
+ Count = length(Active1),
+ NewState = if Count < Limit ->
+ case next_replication(Active1, Waiting) of
+ nil -> % all waiting replications are also active
+ State#state{active = Active1, count = Count};
+ {#job{name=DbName, node=Node} = Job, StillWaiting} ->
+ NewPid = start_push_replication(Job),
+ State#state{
+ active = [Job#job{pid = NewPid} | Active1],
+ count = Count+1,
+ dict = dict:erase({DbName,Node}, D),
+ waiting = StillWaiting
+ }
+ end;
+ true ->
+ State#state{active = Active1, count=Count}
+ end,
+ {noreply, NewState}.
+
+start_push_replication(#job{name=Name, node=Node}) ->
+ spawn_link(mem3_rep, go, [Name, Node]).
+
+add_to_queue(State, #job{name=DbName, node=Node} = Job) ->
+ #state{dict=D, waiting=Waiting} = State,
+ case dict:is_key({DbName, Node}, D) of
+ true ->
+ State;
+ false ->
+ twig:log(debug, "adding ~s -> ~p to mem3_sync queue", [DbName, Node]),
+ State#state{
+ dict = dict:store({DbName,Node}, ok, D),
+ waiting = Waiting ++ [Job]
+ }
+ end.
+
+sync_nodes_and_dbs() ->
+ Db1 = couch_config:get("mem3", "node_db", "nodes"),
+ Db2 = couch_config:get("mem3", "shard_db", "dbs"),
+ Db3 = couch_config:get("couch_httpd_auth", "authentication_db", "_users"),
+ Dbs = [Db1, Db2, Db3],
+ Nodes = mem3:nodes(),
+ Live = nodes(),
+ [[push(?l2b(Db), N) || Db <- Dbs] || N <- Nodes, lists:member(N, Live)].
+
+initial_sync() ->
+ [net_kernel:connect_node(Node) || Node <- mem3:nodes()],
+ sync_nodes_and_dbs(),
+ initial_sync(nodes()).
+
+initial_sync(Live) ->
+ Self = node(),
+ {ok, AllDbs} = fabric:all_dbs(),
+ lists:foreach(fun(Db) ->
+ LocalShards = [S || #shard{node=N} = S <- mem3:shards(Db), N =:= Self],
+ lists:foreach(fun(#shard{name=ShardName}) ->
+ Targets = [S || #shard{node=N, name=Name} = S <- mem3:shards(Db),
+ N =/= Self, Name =:= ShardName],
+ [?MODULE:push(ShardName, N) || #shard{node=N} <- Targets,
+ lists:member(N, Live)]
+ end, LocalShards)
+ end, AllDbs).
+
+start_update_notifier() ->
+ Db1 = ?l2b(couch_config:get("mem3", "node_db", "nodes")),
+ Db2 = ?l2b(couch_config:get("mem3", "shard_db", "dbs")),
+ Db3 = ?l2b(couch_config:get("couch_httpd_auth", "authentication_db",
+ "_users")),
+ couch_db_update_notifier:start_link(fun
+ ({updated, Db}) when Db == Db1; Db == Db2; Db == Db3 ->
+ Nodes = mem3:nodes(),
+ Live = nodes(),
+ [?MODULE:push(Db, N) || N <- Nodes, lists:member(N, Live)];
+ ({updated, <<"shards/", _/binary>> = ShardName}) ->
+ % TODO deal with split/merged partitions by comparing keyranges
+ try mem3:shards(mem3:dbname(ShardName)) of
+ Shards ->
+ Targets = [S || #shard{node=N, name=Name} = S <- Shards,
+ N =/= node(), Name =:= ShardName],
+ Live = nodes(),
+ [?MODULE:push(ShardName,N) || #shard{node=N} <- Targets,
+ lists:member(N, Live)]
+ catch error:database_does_not_exist ->
+ ok
+ end;
+ ({deleted, <<"shards/", _:18/binary, _/binary>> = ShardName}) ->
+ gen_server:cast(?MODULE, {remove_shard, ShardName});
+ (_) -> ok end).
+
+%% @doc Finds the next {DbName,Node} pair in the list of waiting replications
+%% which does not correspond to an already running replication
+-spec next_replication([#job{}], [#job{}]) -> {#job{}, [#job{}]} | nil.
+next_replication(Active, Waiting) ->
+ Fun = fun(#job{name=S, node=N}) -> is_running(S,N,Active) end,
+ case lists:splitwith(Fun, Waiting) of
+ {_, []} ->
+ nil;
+ {Running, [Job|Rest]} ->
+ {Job, Running ++ Rest}
+ end.
+
+is_running(DbName, Node, ActiveList) ->
+ [] =/= [true || #job{name=S, node=N} <- ActiveList, S=:=DbName, N=:=Node].
+
+remove_entries(Dict, Entries) ->
+ lists:foldl(fun(Entry, D) -> dict:erase(Entry, D) end, Dict, Entries).
diff --git a/deps/mem3/src/mem3_sync_event.erl b/deps/mem3/src/mem3_sync_event.erl
new file mode 100644
index 00000000..be5ab450
--- /dev/null
+++ b/deps/mem3/src/mem3_sync_event.erl
@@ -0,0 +1,68 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3_sync_event).
+-behaviour(gen_event).
+
+-export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+init(_) ->
+ net_kernel:monitor_nodes(true),
+ {ok, nil}.
+
+handle_event({add_node, Node}, State) when Node =/= node() ->
+ Db1 = list_to_binary(couch_config:get("mem3", "node_db", "nodes")),
+ Db2 = list_to_binary(couch_config:get("mem3", "shard_db", "dbs")),
+ Db3 = list_to_binary(couch_config:get("couch_httpd_auth",
+ "authentication_db", "_users")),
+ [mem3_sync:push(Db, Node) || Db <- [Db1, Db2, Db3]],
+ {ok, State};
+
+handle_event({remove_node, Node}, State) ->
+ mem3_sync:remove_node(Node),
+ {ok, State};
+
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_call(_Request, State) ->
+ {ok, ok, State}.
+
+handle_info({nodeup, Node}, State) ->
+ case lists:member(Node, mem3:nodes()) of
+ true ->
+ Db1 = list_to_binary(couch_config:get("mem3", "node_db", "nodes")),
+ Db2 = list_to_binary(couch_config:get("mem3", "shard_db", "dbs")),
+ Db3 = list_to_binary(couch_config:get("couch_httpd_auth",
+ "authentication_db", "_users")),
+ [mem3_sync:push(Db, Node) || Db <- [Db1, Db2, Db3]],
+ mem3_sync:initial_sync([Node]);
+ false ->
+ ok
+ end,
+ {ok, State};
+
+handle_info({nodedown, Node}, State) ->
+ mem3_sync:remove_node(Node),
+ {ok, State};
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/mem3/src/mem3_util.erl b/deps/mem3/src/mem3_util.erl
new file mode 100644
index 00000000..c1b965bb
--- /dev/null
+++ b/deps/mem3/src/mem3_util.erl
@@ -0,0 +1,211 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3_util).
+
+-export([hash/1, name_shard/2, create_partition_map/5, build_shards/2,
+ n_val/2, z_val/3, to_atom/1, to_integer/1, write_db_doc/1, delete_db_doc/1,
+ load_shards_from_disk/1, load_shards_from_disk/2, shard_info/1,
+ ensure_exists/1, open_db_doc/1]).
+
+-export([create_partition_map/4, name_shard/1]).
+-deprecated({create_partition_map, 4, eventually}).
+-deprecated({name_shard, 1, eventually}).
+
+-define(RINGTOP, 2 bsl 31). % CRC32 space
+
+-include("mem3.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+hash(Item) when is_binary(Item) ->
+ erlang:crc32(Item);
+hash(Item) ->
+ erlang:crc32(term_to_binary(Item)).
+
+name_shard(Shard) ->
+ name_shard(Shard, "").
+
+name_shard(#shard{dbname = DbName, range=[B,E]} = Shard, Suffix) ->
+ Name = ["shards/", couch_util:to_hex(<<B:32/integer>>), "-",
+ couch_util:to_hex(<<E:32/integer>>), "/", DbName, Suffix],
+ Shard#shard{name = ?l2b(Name)}.
+
+create_partition_map(DbName, N, Q, Nodes) ->
+ create_partition_map(DbName, N, Q, Nodes, "").
+
+create_partition_map(DbName, N, Q, Nodes, Suffix) ->
+ UniqueShards = make_key_ranges((?RINGTOP) div Q, 0, []),
+ Shards0 = lists:flatten([lists:duplicate(N, S) || S <- UniqueShards]),
+ Shards1 = attach_nodes(Shards0, [], Nodes, []),
+ [name_shard(S#shard{dbname=DbName}, Suffix) || S <- Shards1].
+
+make_key_ranges(_, CurrentPos, Acc) when CurrentPos >= ?RINGTOP ->
+ Acc;
+make_key_ranges(Increment, Start, Acc) ->
+ case Start + 2*Increment of
+ X when X > ?RINGTOP ->
+ End = ?RINGTOP - 1;
+ _ ->
+ End = Start + Increment - 1
+ end,
+ make_key_ranges(Increment, End+1, [#shard{range=[Start, End]} | Acc]).
+
+attach_nodes([], Acc, _, _) ->
+ lists:reverse(Acc);
+attach_nodes(Shards, Acc, [], UsedNodes) ->
+ attach_nodes(Shards, Acc, lists:reverse(UsedNodes), []);
+attach_nodes([S | Rest], Acc, [Node | Nodes], UsedNodes) ->
+ attach_nodes(Rest, [S#shard{node=Node} | Acc], Nodes, [Node | UsedNodes]).
+
+open_db_doc(DocId) ->
+ DbName = ?l2b(couch_config:get("mem3", "shard_db", "dbs")),
+ {ok, Db} = couch_db:open(DbName, []),
+ try couch_db:open_doc(Db, DocId, []) after couch_db:close(Db) end.
+
+write_db_doc(Doc) ->
+ DbName = ?l2b(couch_config:get("mem3", "shard_db", "dbs")),
+ write_db_doc(DbName, Doc, true).
+
+write_db_doc(DbName, #doc{id=Id, body=Body} = Doc, ShouldMutate) ->
+ {ok, Db} = couch_db:open(DbName, []),
+ try couch_db:open_doc(Db, Id, []) of
+ {ok, #doc{body = Body}} ->
+ % the doc is already in the desired state, we're done here
+ ok;
+ {not_found, _} when ShouldMutate ->
+ try couch_db:update_doc(Db, Doc, []) of
+ {ok, _} ->
+ ok
+ catch conflict ->
+ % check to see if this was a replication race or a different edit
+ write_db_doc(DbName, Doc, false)
+ end;
+ _ ->
+ % the doc already exists in a different state
+ conflict
+ after
+ couch_db:close(Db)
+ end.
+
+delete_db_doc(DocId) ->
+ DbName = ?l2b(couch_config:get("mem3", "shard_db", "dbs")),
+ delete_db_doc(DbName, DocId, true).
+
+delete_db_doc(DbName, DocId, ShouldMutate) ->
+ {ok, Db} = couch_db:open(DbName, []),
+ {ok, Revs} = couch_db:open_doc_revs(Db, DocId, all, []),
+ try [Doc#doc{deleted=true} || {ok, #doc{deleted=false}=Doc} <- Revs] of
+ [] ->
+ not_found;
+ Docs when ShouldMutate ->
+ try couch_db:update_docs(Db, Docs, []) of
+ {ok, _} ->
+ ok
+ catch conflict ->
+ % check to see if this was a replication race or if leafs survived
+ delete_db_doc(DbName, DocId, false)
+ end;
+ _ ->
+ % we have live leafs that we aren't allowed to delete. let's bail
+ conflict
+ after
+ couch_db:close(Db)
+ end.
+
+build_shards(DbName, DocProps) ->
+ {ByNode} = couch_util:get_value(<<"by_node">>, DocProps, {[]}),
+ Suffix = couch_util:get_value(<<"shard_suffix">>, DocProps, ""),
+ lists:flatmap(fun({Node, Ranges}) ->
+ lists:map(fun(Range) ->
+ [B,E] = string:tokens(?b2l(Range), "-"),
+ Beg = httpd_util:hexlist_to_integer(B),
+ End = httpd_util:hexlist_to_integer(E),
+ name_shard(#shard{
+ dbname = DbName,
+ node = to_atom(Node),
+ range = [Beg, End]
+ }, Suffix)
+ end, Ranges)
+ end, ByNode).
+
+to_atom(Node) when is_binary(Node) ->
+ list_to_atom(binary_to_list(Node));
+to_atom(Node) when is_atom(Node) ->
+ Node.
+
+to_integer(N) when is_integer(N) ->
+ N;
+to_integer(N) when is_binary(N) ->
+ list_to_integer(binary_to_list(N));
+to_integer(N) when is_list(N) ->
+ list_to_integer(N).
+
+n_val(undefined, NodeCount) ->
+ n_val(couch_config:get("cluster", "n", "3"), NodeCount);
+n_val(N, NodeCount) when is_list(N) ->
+ n_val(list_to_integer(N), NodeCount);
+n_val(N, NodeCount) when is_integer(NodeCount), N > NodeCount ->
+ twig:log(error, "Request to create N=~p DB but only ~p node(s)", [N, NodeCount]),
+ NodeCount;
+n_val(N, _) when N < 1 ->
+ 1;
+n_val(N, _) ->
+ N.
+
+z_val(undefined, NodeCount, ZoneCount) ->
+ z_val(couch_config:get("cluster", "z", "3"), NodeCount, ZoneCount);
+z_val(N, NodeCount, ZoneCount) when is_list(N) ->
+ z_val(list_to_integer(N), NodeCount, ZoneCount);
+z_val(N, NodeCount, ZoneCount) when N > NodeCount orelse N > ZoneCount ->
+ twig:log(error, "Request to create Z=~p DB but only ~p nodes(s) and ~p zone(s)",
+ [N, NodeCount, ZoneCount]),
+ erlang:min(NodeCount, ZoneCount);
+z_val(N, _, _) when N < 1 ->
+ 1;
+z_val(N, _, _) ->
+ N.
+
+load_shards_from_disk(DbName) when is_binary(DbName) ->
+ X = ?l2b(couch_config:get("mem3", "shard_db", "dbs")),
+ {ok, Db} = couch_db:open(X, []),
+ try load_shards_from_db(Db, DbName) after couch_db:close(Db) end.
+
+load_shards_from_db(#db{} = ShardDb, DbName) ->
+ case couch_db:open_doc(ShardDb, DbName, []) of
+ {ok, #doc{body = {Props}}} ->
+ twig:log(notice, "dbs cache miss for ~s", [DbName]),
+ build_shards(DbName, Props);
+ {not_found, _} ->
+ erlang:error(database_does_not_exist, ?b2l(DbName))
+ end.
+
+load_shards_from_disk(DbName, DocId)->
+ Shards = load_shards_from_disk(DbName),
+ HashKey = hash(DocId),
+ [S || #shard{range = [B,E]} = S <- Shards, B < HashKey, HashKey =< E].
+
+shard_info(DbName) ->
+ [{n, mem3:n(DbName)},
+ {q, length(mem3:shards(DbName)) div mem3:n(DbName)}].
+
+ensure_exists(DbName) when is_list(DbName) ->
+ ensure_exists(list_to_binary(DbName));
+ensure_exists(DbName) ->
+ Options = [{user_ctx, #user_ctx{roles=[<<"_admin">>]}}],
+ case couch_db:open(DbName, Options) of
+ {ok, Db} ->
+ {ok, Db};
+ _ ->
+ couch_server:create(DbName, Options)
+ end.
diff --git a/deps/mem3/test/01-config-default.ini b/deps/mem3/test/01-config-default.ini
new file mode 100644
index 00000000..757f7830
--- /dev/null
+++ b/deps/mem3/test/01-config-default.ini
@@ -0,0 +1,2 @@
+[cluster]
+n=3
diff --git a/deps/mem3/test/mem3_util_test.erl b/deps/mem3/test/mem3_util_test.erl
new file mode 100644
index 00000000..89c23ca6
--- /dev/null
+++ b/deps/mem3/test/mem3_util_test.erl
@@ -0,0 +1,154 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(mem3_util_test).
+
+-include("mem3.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+hash_test() ->
+ ?assertEqual(1624516141,mem3_util:hash(0)),
+ ?assertEqual(3816901808,mem3_util:hash("0")),
+ ?assertEqual(3523407757,mem3_util:hash(<<0>>)),
+ ?assertEqual(4108050209,mem3_util:hash(<<"0">>)),
+ ?assertEqual(3094724072,mem3_util:hash(zero)),
+ ok.
+
+name_shard_test() ->
+ Shard1 = #shard{},
+ ?assertError(function_clause, mem3_util:name_shard(Shard1, ".1234")),
+
+ Shard2 = #shard{dbname = <<"testdb">>, range = [0,100]},
+ #shard{name=Name2} = mem3_util:name_shard(Shard2, ".1234"),
+ ?assertEqual(<<"shards/00000000-00000064/testdb.1234">>, Name2),
+
+ ok.
+
+create_partition_map_test() ->
+ {DbName1, N1, Q1, Nodes1} = {<<"testdb1">>, 3, 4, [a,b,c,d]},
+ Map1 = mem3_util:create_partition_map(DbName1, N1, Q1, Nodes1),
+ ?assertEqual(12, length(Map1)),
+
+ {DbName2, N2, Q2, Nodes2} = {<<"testdb2">>, 1, 1, [a,b,c,d]},
+ [#shard{name=Name2,node=Node2}] = Map2 =
+ mem3_util:create_partition_map(DbName2, N2, Q2, Nodes2, ".1234"),
+ ?assertEqual(1, length(Map2)),
+ ?assertEqual(<<"shards/00000000-ffffffff/testdb2.1234">>, Name2),
+ ?assertEqual(a, Node2),
+ ok.
+
+build_shards_test() ->
+ DocProps1 =
+ [{<<"changelog">>,
+ [[<<"add">>,<<"00000000-1fffffff">>,
+ <<"bigcouch@node.local">>],
+ [<<"add">>,<<"20000000-3fffffff">>,
+ <<"bigcouch@node.local">>],
+ [<<"add">>,<<"40000000-5fffffff">>,
+ <<"bigcouch@node.local">>],
+ [<<"add">>,<<"60000000-7fffffff">>,
+ <<"bigcouch@node.local">>],
+ [<<"add">>,<<"80000000-9fffffff">>,
+ <<"bigcouch@node.local">>],
+ [<<"add">>,<<"a0000000-bfffffff">>,
+ <<"bigcouch@node.local">>],
+ [<<"add">>,<<"c0000000-dfffffff">>,
+ <<"bigcouch@node.local">>],
+ [<<"add">>,<<"e0000000-ffffffff">>,
+ <<"bigcouch@node.local">>]]},
+ {<<"by_node">>,
+ {[{<<"bigcouch@node.local">>,
+ [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>,
+ <<"40000000-5fffffff">>,<<"60000000-7fffffff">>,
+ <<"80000000-9fffffff">>,<<"a0000000-bfffffff">>,
+ <<"c0000000-dfffffff">>,<<"e0000000-ffffffff">>]}]}},
+ {<<"by_range">>,
+ {[{<<"00000000-1fffffff">>,[<<"bigcouch@node.local">>]},
+ {<<"20000000-3fffffff">>,[<<"bigcouch@node.local">>]},
+ {<<"40000000-5fffffff">>,[<<"bigcouch@node.local">>]},
+ {<<"60000000-7fffffff">>,[<<"bigcouch@node.local">>]},
+ {<<"80000000-9fffffff">>,[<<"bigcouch@node.local">>]},
+ {<<"a0000000-bfffffff">>,[<<"bigcouch@node.local">>]},
+ {<<"c0000000-dfffffff">>,[<<"bigcouch@node.local">>]},
+ {<<"e0000000-ffffffff">>,[<<"bigcouch@node.local">>]}]}}],
+ Shards1 = mem3_util:build_shards(<<"testdb1">>, DocProps1),
+ ExpectedShards1 =
+ [{shard,<<"shards/00000000-1fffffff/testdb1">>,
+ 'bigcouch@node.local',<<"testdb1">>,
+ [0,536870911],
+ undefined},
+ {shard,<<"shards/20000000-3fffffff/testdb1">>,
+ 'bigcouch@node.local',<<"testdb1">>,
+ [536870912,1073741823],
+ undefined},
+ {shard,<<"shards/40000000-5fffffff/testdb1">>,
+ 'bigcouch@node.local',<<"testdb1">>,
+ [1073741824,1610612735],
+ undefined},
+ {shard,<<"shards/60000000-7fffffff/testdb1">>,
+ 'bigcouch@node.local',<<"testdb1">>,
+ [1610612736,2147483647],
+ undefined},
+ {shard,<<"shards/80000000-9fffffff/testdb1">>,
+ 'bigcouch@node.local',<<"testdb1">>,
+ [2147483648,2684354559],
+ undefined},
+ {shard,<<"shards/a0000000-bfffffff/testdb1">>,
+ 'bigcouch@node.local',<<"testdb1">>,
+ [2684354560,3221225471],
+ undefined},
+ {shard,<<"shards/c0000000-dfffffff/testdb1">>,
+ 'bigcouch@node.local',<<"testdb1">>,
+ [3221225472,3758096383],
+ undefined},
+ {shard,<<"shards/e0000000-ffffffff/testdb1">>,
+ 'bigcouch@node.local',<<"testdb1">>,
+ [3758096384,4294967295],
+ undefined}],
+ ?assertEqual(ExpectedShards1, Shards1),
+ ok.
+
+
+%% n_val tests
+
+nval_test() ->
+ ?assertEqual(2, mem3_util:n_val(2,4)),
+ ?assertEqual(1, mem3_util:n_val(-1,4)),
+ ?assertEqual(4, mem3_util:n_val(6,4)),
+ ok.
+
+config_01_setup() ->
+ Ini = filename:join([code:lib_dir(mem3, test), "01-config-default.ini"]),
+ {ok, Pid} = couch_config:start_link([Ini]),
+ Pid.
+
+config_teardown(_Pid) ->
+ couch_config:stop().
+
+n_val_test_() ->
+ {"n_val tests",
+ [
+ {setup,
+ fun config_01_setup/0,
+ fun config_teardown/1,
+ fun(Pid) ->
+ {with, Pid, [
+ fun n_val_1/1
+ ]}
+ end}
+ ]
+ }.
+
+n_val_1(_Pid) ->
+ ?assertEqual(3, mem3_util:n_val(undefined, 4)).
diff --git a/deps/mochiweb/LICENSE b/deps/mochiweb/LICENSE
new file mode 100644
index 00000000..c85b65a4
--- /dev/null
+++ b/deps/mochiweb/LICENSE
@@ -0,0 +1,9 @@
+This is the MIT license.
+
+Copyright (c) 2007 Mochi Media, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/deps/mochiweb/Makefile b/deps/mochiweb/Makefile
new file mode 100644
index 00000000..dfd85a0e
--- /dev/null
+++ b/deps/mochiweb/Makefile
@@ -0,0 +1,20 @@
+all: ebin/
+ (cd src;$(MAKE) all)
+
+edoc:
+ (cd src;$(MAKE) edoc)
+
+test: ebin/
+ (cd src;$(MAKE) test)
+
+clean:
+ rm -rf ebin
+
+clean_plt:
+ (cd src;$(MAKE) clean_plt)
+
+dialyzer:
+ (cd src;$(MAKE) dialyzer)
+
+ebin/:
+ @mkdir -p ebin
diff --git a/deps/mochiweb/README b/deps/mochiweb/README
new file mode 100644
index 00000000..f5d2c0de
--- /dev/null
+++ b/deps/mochiweb/README
@@ -0,0 +1 @@
+MochiWeb is an Erlang library for building lightweight HTTP servers.
diff --git a/deps/mochiweb/examples/https/https_store.erl b/deps/mochiweb/examples/https/https_store.erl
new file mode 100644
index 00000000..959cc00c
--- /dev/null
+++ b/deps/mochiweb/examples/https/https_store.erl
@@ -0,0 +1,146 @@
+
+%% Trivial web storage app. It's available over both HTTP (port 8442)
+%% and HTTPS (port 8443). You use a PUT to store items, a GET to
+%% retrieve them and DELETE to delete them. The HTTP POST method is
+%% invalid for this application. Example (using HTTPS transport):
+%%
+%% $ curl -k --verbose https://localhost:8443/flintstones
+%% ...
+%% 404 Not Found
+%% ...
+%% $ echo -e "Fred\nWilma\nBarney" |
+%% curl -k --verbose https://localhost:8443/flintstones \
+%% -X PUT -H "Content-Type: text/plain" --data-binary @-
+%% ...
+%% 201 Created
+%% ...
+%% $ curl -k --verbose https://localhost:8443/flintstones
+%% ...
+%% Fred
+%% Wilma
+%% Barney
+%% ...
+%% $ curl -k --verbose https://localhost:8443/flintstones -X DELETE
+%% ...
+%% 200 OK
+%% ...
+%% $ curl -k --verbose https://localhost:8443/flintstones
+%% ...
+%% 404 Not Found
+%% ...
+%%
+%% All submitted data is stored in memory (in an ets table). Could be
+%% useful for ad-hoc testing.
+
+-module(https_store).
+
+-export([start/0,
+ stop/0,
+ dispatch/1,
+ loop/1
+ ]).
+
+-define(HTTP_OPTS, [
+ {loop, {?MODULE, dispatch}},
+ {port, 8442},
+ {name, http_8442}
+ ]).
+
+-define(HTTPS_OPTS, [
+ {loop, {?MODULE, dispatch}},
+ {port, 8443},
+ {name, https_8443},
+ {ssl, true},
+ {ssl_opts, [
+ {certfile, "server_cert.pem"},
+ {keyfile, "server_key.pem"}]}
+ ]).
+
+-record(sd, {http, https}).
+-record(resource, {type, data}).
+
+start() ->
+ {ok, Http} = mochiweb_http:start(?HTTP_OPTS),
+ {ok, Https} = mochiweb_http:start(?HTTPS_OPTS),
+ SD = #sd{http=Http, https=Https},
+ Pid = spawn_link(fun() ->
+ ets:new(?MODULE, [named_table]),
+ loop(SD)
+ end),
+ register(http_store, Pid),
+ ok.
+
+stop() ->
+ http_store ! stop,
+ ok.
+
+dispatch(Req) ->
+ case Req:get(method) of
+ 'GET' ->
+ get_resource(Req);
+ 'PUT' ->
+ put_resource(Req);
+ 'DELETE' ->
+ delete_resource(Req);
+ _ ->
+ Headers = [{"Allow", "GET,PUT,DELETE"}],
+ Req:respond({405, Headers, "405 Method Not Allowed\r\n"})
+ end.
+
+get_resource(Req) ->
+ Path = Req:get(path),
+ case ets:lookup(?MODULE, Path) of
+ [{Path, #resource{type=Type, data=Data}}] ->
+ Req:ok({Type, Data});
+ [] ->
+ Req:respond({404, [], "404 Not Found\r\n"})
+ end.
+
+put_resource(Req) ->
+ ContentType = case Req:get_header_value("Content-Type") of
+ undefined ->
+ "application/octet-stream";
+ S ->
+ S
+ end,
+ Resource = #resource{type=ContentType, data=Req:recv_body()},
+ http_store ! {self(), {put, Req:get(path), Resource}},
+ Pid = whereis(http_store),
+ receive
+ {Pid, created} ->
+ Req:respond({201, [], "201 Created\r\n"});
+ {Pid, updated} ->
+ Req:respond({200, [], "200 OK\r\n"})
+ end.
+
+delete_resource(Req) ->
+ http_store ! {self(), {delete, Req:get(path)}},
+ Pid = whereis(http_store),
+ receive
+ {Pid, ok} ->
+ Req:respond({200, [], "200 OK\r\n"})
+ end.
+
+loop(#sd{http=Http, https=Https} = SD) ->
+ receive
+ stop ->
+ ok = mochiweb_http:stop(Http),
+ ok = mochiweb_http:stop(Https),
+ exit(normal);
+ {From, {put, Key, Val}} ->
+ Exists = ets:member(?MODULE, Key),
+ ets:insert(?MODULE, {Key, Val}),
+ case Exists of
+ true ->
+ From ! {self(), updated};
+ false ->
+ From ! {self(), created}
+ end;
+ {From, {delete, Key}} ->
+ ets:delete(?MODULE, Key),
+ From ! {self(), ok};
+ _ ->
+ ignore
+ end,
+ ?MODULE:loop(SD).
+
diff --git a/deps/mochiweb/examples/https/server_cert.pem b/deps/mochiweb/examples/https/server_cert.pem
new file mode 100644
index 00000000..f84ccca7
--- /dev/null
+++ b/deps/mochiweb/examples/https/server_cert.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAgigAwIBAgIJAJLkNZzERPIUMA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNV
+BAMTCWxvY2FsaG9zdDAeFw0xMDAzMTgxOTM5MThaFw0yMDAzMTUxOTM5MThaMBQx
+EjAQBgNVBAMTCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAJeUCOZxbmtngF4S5lXckjSDLc+8C+XjMBYBPyy5eKdJY20AQ1s9/hhp3ulI
+8pAvl+xVo4wQ+iBSvOzcy248Q+Xi6+zjceF7UNRgoYPgtJjKhdwcHV3mvFFrS/fp
+9ggoAChaJQWDO1OCfUgTWXImhkw+vcDR11OVMAJ/h73dqzJPI9mfq44PTTHfYtgr
+v4LAQAOlhXIAa2B+a6PlF6sqDqJaW5jLTcERjsBwnRhUGi7JevQzkejujX/vdA+N
+jRBjKH/KLU5h3Q7wUchvIez0PXWVTCnZjpA9aR4m7YV05nKQfxtGd71czYDYk+j8
+hd005jetT4ir7JkAWValBybJVksCAwEAAaN1MHMwHQYDVR0OBBYEFJl9s51SnjJt
+V/wgKWqV5Q6jnv1ZMEQGA1UdIwQ9MDuAFJl9s51SnjJtV/wgKWqV5Q6jnv1ZoRik
+FjAUMRIwEAYDVQQDEwlsb2NhbGhvc3SCCQCS5DWcxETyFDAMBgNVHRMEBTADAQH/
+MA0GCSqGSIb3DQEBBQUAA4IBAQB2ldLeLCc+lxK5i0EZquLamMBJwDIjGpT0JMP9
+b4XQOK2JABIu54BQIZhwcjk3FDJz/uOW5vm8k1kYni8FCjNZAaRZzCUfiUYTbTKL
+Rq9LuIAODyP2dnTqyKaQOOJHvrx9MRZ3XVecXPS0Tib4aO57vCaAbIkmhtYpTWmw
+e3t8CAIDVtgvjR6Se0a1JA4LktR7hBu22tDImvCSJn1nVAaHpani6iPBPPdMuMsP
+TBoeQfj8VpqBUjCStqJGa8ytjDFX73YaxV2mgrtGwPNme1x3YNRR11yTu7tksyMO
+GrmgxNriqYRchBhNEf72AKF0LR1ByKwfbDB9rIsV00HtCgOp
+-----END CERTIFICATE-----
diff --git a/deps/mochiweb/examples/https/server_key.pem b/deps/mochiweb/examples/https/server_key.pem
new file mode 100644
index 00000000..69bbf823
--- /dev/null
+++ b/deps/mochiweb/examples/https/server_key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAl5QI5nFua2eAXhLmVdySNIMtz7wL5eMwFgE/LLl4p0ljbQBD
+Wz3+GGne6UjykC+X7FWjjBD6IFK87NzLbjxD5eLr7ONx4XtQ1GChg+C0mMqF3Bwd
+Xea8UWtL9+n2CCgAKFolBYM7U4J9SBNZciaGTD69wNHXU5UwAn+Hvd2rMk8j2Z+r
+jg9NMd9i2Cu/gsBAA6WFcgBrYH5ro+UXqyoOolpbmMtNwRGOwHCdGFQaLsl69DOR
+6O6Nf+90D42NEGMof8otTmHdDvBRyG8h7PQ9dZVMKdmOkD1pHibthXTmcpB/G0Z3
+vVzNgNiT6PyF3TTmN61PiKvsmQBZVqUHJslWSwIDAQABAoIBACI8Ky5xHDFh9RpK
+Rn/KC7OUlTpADKflgizWJ0Cgu2F9L9mkn5HyFHvLHa+u7CootbWJOiEejH/UcBtH
+WyMQtX0snYCpdkUpJv5wvMoebGu+AjHOn8tfm9T/2O6rhwgckLyMb6QpGbMo28b1
+p9QiY17BJPZx7qJQJcHKsAvwDwSThlb7MFmWf42LYWlzybpeYQvwpd+UY4I0WXLu
+/dqJIS9Npq+5Y5vbo2kAEAssb2hSCvhCfHmwFdKmBzlvgOn4qxgZ1iHQgfKI6Z3Y
+J0573ZgOVTuacn+lewtdg5AaHFcl/zIYEr9SNqRoPNGbPliuv6k6N2EYcufWL5lR
+sCmmmHECgYEAxm+7OpepGr++K3+O1e1MUhD7vSPkKJrCzNtUxbOi2NWj3FFUSPRU
+adWhuxvUnZgTcgM1+KuQ0fB2VmxXe9IDcrSFS7PKFGtd2kMs/5mBw4UgDZkOQh+q
+kDiBEV3HYYJWRq0w3NQ/9Iy1jxxdENHtGmG9aqamHxNtuO608wGW2S8CgYEAw4yG
+ZyAic0Q/U9V2OHI0MLxLCzuQz17C2wRT1+hBywNZuil5YeTuIt2I46jro6mJmWI2
+fH4S/geSZzg2RNOIZ28+aK79ab2jWBmMnvFCvaru+odAuser4N9pfAlHZvY0pT+S
+1zYX3f44ygiio+oosabLC5nWI0zB2gG8pwaJlaUCgYEAgr7poRB+ZlaCCY0RYtjo
+mYYBKD02vp5BzdKSB3V1zeLuBWM84pjB6b3Nw0fyDig+X7fH3uHEGN+USRs3hSj6
+BqD01s1OT6fyfbYXNw5A1r+nP+5h26Wbr0zblcKxdQj4qbbBZC8hOJNhqTqqA0Qe
+MmzF7jiBaiZV/Cyj4x1f9BcCgYEAhjL6SeuTuOctTqs/5pz5lDikh6DpUGcH8qaV
+o6aRAHHcMhYkZzpk8yh1uUdD7516APmVyvn6rrsjjhLVq4ZAJjwB6HWvE9JBN0TR
+bILF+sREHUqU8Zn2Ku0nxyfXCKIOnxlx/J/y4TaGYqBqfXNFWiXNUrjQbIlQv/xR
+K48g/MECgYBZdQlYbMSDmfPCC5cxkdjrkmAl0EgV051PWAi4wR+hLxIMRjHBvAk7
+IweobkFvT4TICulgroLkYcSa5eOZGxB/DHqcQCbWj3reFV0VpzmTDoFKG54sqBRl
+vVntGt0pfA40fF17VoS7riAdHF53ippTtsovHEsg5tq5NrBl5uKm2g==
+-----END RSA PRIVATE KEY-----
diff --git a/deps/mochiweb/examples/keepalive/keepalive.erl b/deps/mochiweb/examples/keepalive/keepalive.erl
new file mode 100644
index 00000000..965a17eb
--- /dev/null
+++ b/deps/mochiweb/examples/keepalive/keepalive.erl
@@ -0,0 +1,81 @@
+-module(keepalive).
+
+%% your web app can push data to clients using a technique called comet long
+%% polling. browsers make a request and your server waits to send a
+%% response until data is available. see wikipedia for a better explanation:
+%% http://en.wikipedia.org/wiki/Comet_(programming)#Ajax_with_long_polling
+%%
+%% since the majority of your http handlers will be idle at any given moment,
+%% you might consider making them hibernate while they wait for more data from
+%% another process. however, since the execution stack is discarded when a
+%% process hibernates, the handler would usually terminate after your response
+%% code runs. this means http keep alives wouldn't work; the handler process
+%% would terminate after each response and close its socket rather than
+%% returning to the big @mochiweb_http@ loop and processing another request.
+%%
+%% however, if mochiweb exposes a continuation that encapsulates the return to
+%% the top of the big loop in @mochiweb_http@, we can call that after the
+%% response. if you do that then control flow returns to the proper place,
+%% and keep alives work like they would if you hadn't hibernated.
+
+-export([ start/1, loop/1
+ ]).
+
+%% internal export (so hibernate can reach it)
+-export([ resume/3
+ ]).
+
+-define(LOOP, {?MODULE, loop}).
+
+start(Options = [{port, _Port}]) ->
+ mochiweb_http:start([{name, ?MODULE}, {loop, ?LOOP} | Options]).
+
+loop(Req) ->
+ Path = Req:get(path),
+ case string:tokens(Path, "/") of
+ ["longpoll" | RestOfPath] ->
+ %% the "reentry" is a continuation -- what @mochiweb_http@
+ %% needs to do to start its loop back at the top
+ Reentry = mochiweb_http:reentry(?LOOP),
+
+ %% here we could send a message to some other process and hope
+ %% to get an interesting message back after a while. for
+ %% simplicity let's just send ourselves a message after a few
+ %% seconds
+ erlang:send_after(2000, self(), "honk honk"),
+
+ %% since we expect to wait for a long time before getting a
+ %% reply, let's hibernate. memory usage will be minimized, so
+ %% we won't be wasting memory just sitting in a @receive@
+ proc_lib:hibernate(?MODULE, resume, [Req, RestOfPath, Reentry]),
+
+ %% we'll never reach this point, and this function @loop/1@
+ %% won't ever return control to @mochiweb_http@. luckily
+ %% @resume/3@ will take care of that.
+ io:format("not gonna happen~n", []);
+
+ _ ->
+ ok(Req, io_lib:format("some other page: ~p", [Path]))
+ end,
+
+ io:format("restarting loop normally in ~p~n", [Path]),
+ ok.
+
+%% this is the function that's called when a message arrives.
+resume(Req, RestOfPath, Reentry) ->
+ receive
+ Msg ->
+ Text = io_lib:format("wake up message: ~p~nrest of path: ~p", [Msg, RestOfPath]),
+ ok(Req, Text)
+ end,
+
+ %% if we didn't call @Reentry@ here then the function would finish and the
+ %% process would exit. calling @Reentry@ takes care of returning control
+ %% to @mochiweb_http@
+ io:format("reentering loop via continuation in ~p~n", [Req:get(path)]),
+ Reentry(Req).
+
+ok(Req, Response) ->
+ Req:ok({_ContentType = "text/plain",
+ _Headers = [],
+ Response}).
diff --git a/deps/mochiweb/priv/skel/Makefile b/deps/mochiweb/priv/skel/Makefile
new file mode 100644
index 00000000..615fa90a
--- /dev/null
+++ b/deps/mochiweb/priv/skel/Makefile
@@ -0,0 +1,20 @@
+all: ebin/
+ (cd src;$(MAKE) all)
+
+edoc:
+ (cd src;$(MAKE) edoc)
+
+test:
+ (cd src;$(MAKE) test)
+
+clean:
+ (cd src;$(MAKE) clean)
+
+clean_plt:
+ (cd src;$(MAKE) clean_plt)
+
+dialyzer:
+ (cd src;$(MAKE) dialyzer)
+
+ebin/:
+ @mkdir -p ebin
diff --git a/deps/mochiweb/priv/skel/priv/www/index.html b/deps/mochiweb/priv/skel/priv/www/index.html
new file mode 100644
index 00000000..8e7a2c62
--- /dev/null
+++ b/deps/mochiweb/priv/skel/priv/www/index.html
@@ -0,0 +1,8 @@
+<html>
+<head>
+<title>It Worked</title>
+</head>
+<body>
+MochiWeb running.
+</body>
+</html>
diff --git a/deps/mochiweb/priv/skel/src/Makefile b/deps/mochiweb/priv/skel/src/Makefile
new file mode 100644
index 00000000..97fec1a1
--- /dev/null
+++ b/deps/mochiweb/priv/skel/src/Makefile
@@ -0,0 +1,33 @@
+include ../support/include.mk
+
+APPLICATION=skel
+DOC_OPTS={dir,\"../doc\"}
+TEST_PLT=$(TEST_DIR)/dialyzer_plt
+
+all: $(EBIN_FILES)
+
+debug:
+ $(MAKE) DEBUG=-DDEBUG
+
+clean:
+ rm -rf $(EBIN_FILES)
+
+edoc:
+ $(ERL) -noshell -pa ../ebin \
+ -eval "edoc:application($(APPLICATION), \".\", [$(DOC_OPTS)])" \
+ -s init stop
+
+test: $(EBIN_FILES)
+ mkdir -p $(TEST_DIR);
+ @../support/run_tests.escript $(EBIN_DIR) | tee $(TEST_DIR)/test.log
+
+$(TEST_PLT):
+ mkdir -p $(TEST_DIR)
+ cp $(DIALYZER_PLT) $(TEST_PLT)
+ dialyzer --plt $(TEST_PLT) --add_to_plt -r ../deps/*/ebin
+
+clean_plt:
+ rm $(TEST_PLT)
+
+dialyzer: $(TEST_PLT)
+ dialyzer --src --plt $(TEST_PLT) -DNOTEST -DDIALYZER -c ../src | tee $(TEST_DIR)/dialyzer.log \ No newline at end of file
diff --git a/deps/mochiweb/priv/skel/src/skel.app b/deps/mochiweb/priv/skel/src/skel.app
new file mode 100644
index 00000000..fc16aae0
--- /dev/null
+++ b/deps/mochiweb/priv/skel/src/skel.app
@@ -0,0 +1,14 @@
+{application, skel,
+ [{description, "skel"},
+ {vsn, "0.01"},
+ {modules, [
+ skel,
+ skel_app,
+ skel_sup,
+ skel_web,
+ skel_deps
+ ]},
+ {registered, []},
+ {mod, {skel_app, []}},
+ {env, []},
+ {applications, [kernel, stdlib, crypto]}]}.
diff --git a/deps/mochiweb/priv/skel/src/skel.erl b/deps/mochiweb/priv/skel/src/skel.erl
new file mode 100644
index 00000000..7ac4e2bc
--- /dev/null
+++ b/deps/mochiweb/priv/skel/src/skel.erl
@@ -0,0 +1,30 @@
+%% @author author <author@example.com>
+%% @copyright YYYY author.
+
+%% @doc TEMPLATE.
+
+-module(skel).
+-author('author <author@example.com>').
+-export([start/0, stop/0]).
+
+ensure_started(App) ->
+ case application:start(App) of
+ ok ->
+ ok;
+ {error, {already_started, App}} ->
+ ok
+ end.
+
+%% @spec start() -> ok
+%% @doc Start the skel server.
+start() ->
+ skel_deps:ensure(),
+ ensure_started(crypto),
+ application:start(skel).
+
+%% @spec stop() -> ok
+%% @doc Stop the skel server.
+stop() ->
+ Res = application:stop(skel),
+ application:stop(crypto),
+ Res.
diff --git a/deps/mochiweb/priv/skel/src/skel.hrl b/deps/mochiweb/priv/skel/src/skel.hrl
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/deps/mochiweb/priv/skel/src/skel.hrl
@@ -0,0 +1 @@
+
diff --git a/deps/mochiweb/priv/skel/src/skel_app.erl b/deps/mochiweb/priv/skel/src/skel_app.erl
new file mode 100644
index 00000000..7ee8b50e
--- /dev/null
+++ b/deps/mochiweb/priv/skel/src/skel_app.erl
@@ -0,0 +1,30 @@
+%% @author author <author@example.com>
+%% @copyright YYYY author.
+
+%% @doc Callbacks for the skel application.
+
+-module(skel_app).
+-author('author <author@example.com>').
+
+-behaviour(application).
+-export([start/2, stop/1]).
+
+
+%% @spec start(_Type, _StartArgs) -> ServerRet
+%% @doc application start callback for skel.
+start(_Type, _StartArgs) ->
+ skel_deps:ensure(),
+ skel_sup:start_link().
+
+%% @spec stop(_State) -> ServerRet
+%% @doc application stop callback for skel.
+stop(_State) ->
+ ok.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/priv/skel/src/skel_deps.erl b/deps/mochiweb/priv/skel/src/skel_deps.erl
new file mode 100644
index 00000000..cba796b1
--- /dev/null
+++ b/deps/mochiweb/priv/skel/src/skel_deps.erl
@@ -0,0 +1,92 @@
+%% @author author <author@example.com>
+%% @copyright YYYY author.
+
+%% @doc Ensure that the relatively-installed dependencies are on the code
+%% loading path, and locate resources relative
+%% to this application's path.
+
+-module(skel_deps).
+-author('author <author@example.com>').
+
+-export([ensure/0, ensure/1]).
+-export([get_base_dir/0, get_base_dir/1]).
+-export([local_path/1, local_path/2]).
+-export([deps_on_path/0, new_siblings/1]).
+
+%% @spec deps_on_path() -> [ProjNameAndVers]
+%% @doc List of project dependencies on the path.
+deps_on_path() ->
+ F = fun (X, Acc) ->
+ ProjDir = filename:dirname(X),
+ case {filename:basename(X),
+ filename:basename(filename:dirname(ProjDir))} of
+ {"ebin", "deps"} ->
+ [filename:basename(ProjDir) | Acc];
+ _ ->
+ Acc
+ end
+ end,
+ ordsets:from_list(lists:foldl(F, [], code:get_path())).
+
+%% @spec new_siblings(Module) -> [Dir]
+%% @doc Find new siblings paths relative to Module that aren't already on the
+%% code path.
+new_siblings(Module) ->
+ Existing = deps_on_path(),
+ SiblingEbin = filelib:wildcard(local_path(["deps", "*", "ebin"], Module)),
+ Siblings = [filename:dirname(X) || X <- SiblingEbin,
+ ordsets:is_element(
+ filename:basename(filename:dirname(X)),
+ Existing) =:= false],
+ lists:filter(fun filelib:is_dir/1,
+ lists:append([[filename:join([X, "ebin"]),
+ filename:join([X, "include"])] ||
+ X <- Siblings])).
+
+
+%% @spec ensure(Module) -> ok
+%% @doc Ensure that all ebin and include paths for dependencies
+%% of the application for Module are on the code path.
+ensure(Module) ->
+ code:add_paths(new_siblings(Module)),
+ code:clash(),
+ ok.
+
+%% @spec ensure() -> ok
+%% @doc Ensure that the ebin and include paths for dependencies of
+%% this application are on the code path. Equivalent to
+%% ensure(?Module).
+ensure() ->
+ ensure(?MODULE).
+
+%% @spec get_base_dir(Module) -> string()
+%% @doc Return the application directory for Module. It assumes Module is in
+%% a standard OTP layout application in the ebin or src directory.
+get_base_dir(Module) ->
+ {file, Here} = code:is_loaded(Module),
+ filename:dirname(filename:dirname(Here)).
+
+%% @spec get_base_dir() -> string()
+%% @doc Return the application directory for this application. Equivalent to
+%% get_base_dir(?MODULE).
+get_base_dir() ->
+ get_base_dir(?MODULE).
+
+%% @spec local_path([string()], Module) -> string()
+%% @doc Return an application-relative directory from Module's application.
+local_path(Components, Module) ->
+ filename:join([get_base_dir(Module) | Components]).
+
+%% @spec local_path(Components) -> string()
+%% @doc Return an application-relative directory for this application.
+%% Equivalent to local_path(Components, ?MODULE).
+local_path(Components) ->
+ local_path(Components, ?MODULE).
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/priv/skel/src/skel_sup.erl b/deps/mochiweb/priv/skel/src/skel_sup.erl
new file mode 100644
index 00000000..1add1903
--- /dev/null
+++ b/deps/mochiweb/priv/skel/src/skel_sup.erl
@@ -0,0 +1,62 @@
+%% @author author <author@example.com>
+%% @copyright YYYY author.
+
+%% @doc Supervisor for the skel application.
+
+-module(skel_sup).
+-author('author <author@example.com>').
+
+-behaviour(supervisor).
+
+%% External exports
+-export([start_link/0, upgrade/0]).
+
+%% supervisor callbacks
+-export([init/1]).
+
+%% @spec start_link() -> ServerRet
+%% @doc API for starting the supervisor.
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%% @spec upgrade() -> ok
+%% @doc Add processes if necessary.
+upgrade() ->
+ {ok, {_, Specs}} = init([]),
+
+ Old = sets:from_list(
+ [Name || {Name, _, _, _} <- supervisor:which_children(?MODULE)]),
+ New = sets:from_list([Name || {Name, _, _, _, _, _} <- Specs]),
+ Kill = sets:subtract(Old, New),
+
+ sets:fold(fun (Id, ok) ->
+ supervisor:terminate_child(?MODULE, Id),
+ supervisor:delete_child(?MODULE, Id),
+ ok
+ end, ok, Kill),
+
+ [supervisor:start_child(?MODULE, Spec) || Spec <- Specs],
+ ok.
+
+%% @spec init([]) -> SupervisorTree
+%% @doc supervisor callback.
+init([]) ->
+ Ip = case os:getenv("MOCHIWEB_IP") of false -> "0.0.0.0"; Any -> Any end,
+ WebConfig = [
+ {ip, Ip},
+ {port, 8000},
+ {docroot, skel_deps:local_path(["priv", "www"])}],
+ Web = {skel_web,
+ {skel_web, start, [WebConfig]},
+ permanent, 5000, worker, dynamic},
+
+ Processes = [Web],
+ {ok, {{one_for_one, 10, 10}, Processes}}.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/priv/skel/src/skel_web.erl b/deps/mochiweb/priv/skel/src/skel_web.erl
new file mode 100644
index 00000000..67064cc5
--- /dev/null
+++ b/deps/mochiweb/priv/skel/src/skel_web.erl
@@ -0,0 +1,51 @@
+%% @author author <author@example.com>
+%% @copyright YYYY author.
+
+%% @doc Web server for skel.
+
+-module(skel_web).
+-author('author <author@example.com>').
+
+-export([start/1, stop/0, loop/2]).
+
+%% External API
+
+start(Options) ->
+ {DocRoot, Options1} = get_option(docroot, Options),
+ Loop = fun (Req) ->
+ ?MODULE:loop(Req, DocRoot)
+ end,
+ mochiweb_http:start([{name, ?MODULE}, {loop, Loop} | Options1]).
+
+stop() ->
+ mochiweb_http:stop(?MODULE).
+
+loop(Req, DocRoot) ->
+ "/" ++ Path = Req:get(path),
+ case Req:get(method) of
+ Method when Method =:= 'GET'; Method =:= 'HEAD' ->
+ case Path of
+ _ ->
+ Req:serve_file(Path, DocRoot)
+ end;
+ 'POST' ->
+ case Path of
+ _ ->
+ Req:not_found()
+ end;
+ _ ->
+ Req:respond({501, [], []})
+ end.
+
+%% Internal API
+
+get_option(Option, Options) ->
+ {proplists:get_value(Option, Options), proplists:delete(Option, Options)}.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/priv/skel/start-dev.sh b/deps/mochiweb/priv/skel/start-dev.sh
new file mode 100755
index 00000000..f684cb07
--- /dev/null
+++ b/deps/mochiweb/priv/skel/start-dev.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+cd `dirname $0`
+
+MAKE=make
+case `uname` in
+*BSD)
+ MAKE=gmake
+ ;;
+esac
+
+"${MAKE}"
+exec erl -pa $PWD/ebin $PWD/deps/*/ebin -boot start_sasl -s reloader -s skel
diff --git a/deps/mochiweb/priv/skel/start.sh b/deps/mochiweb/priv/skel/start.sh
new file mode 100755
index 00000000..599f8e67
--- /dev/null
+++ b/deps/mochiweb/priv/skel/start.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+cd `dirname $0`
+exec erl -pa $PWD/ebin $PWD/deps/*/ebin -boot start_sasl -s skel
diff --git a/deps/mochiweb/priv/skel/support/include.mk b/deps/mochiweb/priv/skel/support/include.mk
new file mode 100644
index 00000000..bcba2142
--- /dev/null
+++ b/deps/mochiweb/priv/skel/support/include.mk
@@ -0,0 +1,40 @@
+## -*- makefile -*-
+
+######################################################################
+## Erlang
+
+ERL := erl
+ERLC := $(ERL)c
+
+INCLUDE_DIRS := ../include $(wildcard ../deps/*/include)
+EBIN_DIRS := $(wildcard ../deps/*/ebin)
+ERLC_FLAGS := -W $(INCLUDE_DIRS:../%=-I ../%) $(EBIN_DIRS:%=-pa %)
+
+ifndef no_debug_info
+ ERLC_FLAGS += +debug_info
+endif
+
+ifdef debug
+ ERLC_FLAGS += -Ddebug
+endif
+
+EBIN_DIR := ../ebin
+TEST_DIR := ../_test
+EMULATOR := beam
+
+ERL_SOURCES := $(wildcard *.erl)
+ERL_HEADERS := $(wildcard *.hrl) $(wildcard ../include/*.hrl)
+ERL_OBJECTS := $(ERL_SOURCES:%.erl=$(EBIN_DIR)/%.$(EMULATOR))
+ERL_OBJECTS_LOCAL := $(ERL_SOURCES:%.erl=./%.$(EMULATOR))
+APP_FILES := $(wildcard *.app)
+EBIN_FILES = $(ERL_OBJECTS) $(APP_FILES:%.app=../ebin/%.app)
+MODULES = $(ERL_SOURCES:%.erl=%)
+
+../ebin/%.app: %.app
+ cp $< $@
+
+$(EBIN_DIR)/%.$(EMULATOR): %.erl
+ $(ERLC) $(ERLC_FLAGS) -o $(EBIN_DIR) $<
+
+./%.$(EMULATOR): %.erl
+ $(ERLC) $(ERLC_FLAGS) -o . $<
diff --git a/deps/mochiweb/priv/skel/support/run_tests.escript b/deps/mochiweb/priv/skel/support/run_tests.escript
new file mode 100755
index 00000000..ff49c064
--- /dev/null
+++ b/deps/mochiweb/priv/skel/support/run_tests.escript
@@ -0,0 +1,94 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+%%! -name mochiweb__test@127.0.0.1
+main([Ebin]) ->
+ code:add_path(Ebin),
+ code:add_paths(filelib:wildcard("../deps/*/ebin", Ebin)),
+ code:add_paths(filelib:wildcard("../deps/*/deps/*/ebin", Ebin)),
+
+ ModuleNames = [hd(string:tokens(M, "."))
+ || "../src/" ++ M <- filelib:wildcard("../src/*.erl")],
+
+ {ok, NonTestRe} = re:compile("_tests$"),
+ Modules = [list_to_atom(M) ||
+ M <- lists:filter(
+ fun(M) ->
+ nomatch == re:run(M, NonTestRe)
+ end,
+ ModuleNames)],
+
+
+ crypto:start(),
+ start_cover(Modules),
+ eunit:test(Modules, [verbose,{report,{eunit_surefire,[{dir,"../_test"}]}}]),
+ analyze_cover(Modules);
+main(_) ->
+ io:format("usage: run_tests.escript EBIN_DIR~n"),
+ halt(1).
+
+start_cover(Modules) ->
+ {ok, _Cover} = cover:start(),
+ io:format("Cover compiling...~n"),
+ Compiled = [ M || {ok, M} <- [ cover:compile(
+ M,
+ [{i, "include"}
+ ])
+ || M <- Modules ] ],
+ case length(Modules) == length(Compiled) of
+ true -> ok;
+ false ->
+ io:format("Warning: the following modules were not"
+ " cover-compiled:~n ~p~n", [Compiled])
+ end.
+
+analyze_cover(Modules) ->
+ io:format("Analyzing cover...~n"),
+ CoverBase = filename:join(["..", "_test", "cover"]),
+ ok = filelib:ensure_dir(filename:join([CoverBase, "fake"])),
+ Coverages = lists:foldl(
+ fun(M, Acc) ->
+ [analyze_module(CoverBase, M)|Acc]
+ end,
+ [], Modules),
+ IndexFilename = filename:join([CoverBase, "index.html"]),
+ {ok, Index} = file:open(IndexFilename, [write]),
+ {LineTotal, CoverTotal} =
+ lists:foldl(fun({_,_,Lines,Covered}, {LineAcc, CovAcc}) ->
+ {LineAcc+Lines, CovAcc+Covered}
+ end, {0,0}, Coverages),
+ file:write(Index,
+ "<html><head><title>Coverage</title></head>\n"
+ "<body><h1>Coverage</h1><ul>\n"),
+ file:write(Index,
+ io_lib:format("<h2>Total: ~.2f%</h2>\n",
+ [percentage(CoverTotal, LineTotal)])),
+ [ file:write(Index,
+ io_lib:format(
+ "<li><a href=\"~s\">~p</a>: ~.2f%</li>~n",
+ [Filename, Module, percentage(Covered, Lines)]))
+ || {Filename, Module, Lines, Covered} <- Coverages ],
+ file:write(Index,"</ul></body></html>"),
+ file:close(Index),
+ io:format("Cover analysis in ~s~n", [IndexFilename]).
+
+analyze_module(CoverBase, Module) ->
+ {ok, Filename} =
+ cover:analyze_to_file(
+ Module,
+ filename:join(CoverBase, atom_to_list(Module)++".COVER.html"),
+ [html]),
+ Lines = count_lines(Filename, "[[:digit:]]\.\.|"),
+ Covered = count_lines(Filename, "[[:space:]]0\.\.|"),
+ {filename:basename(Filename), Module, Lines, Lines-Covered}.
+
+count_lines(Filename, Pattern) ->
+ {ok, [Lines],_} = io_lib:fread(
+ "~d",
+ os:cmd(io_lib:format("grep -e \"~s\" ~s | wc -l",
+ [Pattern, Filename]))),
+ Lines.
+
+percentage(_, 0) -> 1000.0;
+percentage(Part, Total) ->
+ (Part/Total)*100.
+
diff --git a/deps/mochiweb/scripts/new_mochiweb.erl b/deps/mochiweb/scripts/new_mochiweb.erl
new file mode 100755
index 00000000..ed1147a7
--- /dev/null
+++ b/deps/mochiweb/scripts/new_mochiweb.erl
@@ -0,0 +1,37 @@
+#!/usr/bin/env escript
+%% -*- mode: erlang -*-
+-export([main/1]).
+
+%% External API
+
+main([Name]) ->
+ main([Name, "."]);
+main([Name, Dest]) ->
+ ensure(),
+ DestDir = filename:absname(Dest),
+ case code:which(mochiweb_skel) of
+ non_existing ->
+ io:format("mochiweb not compiled, running make~n"),
+ os:cmd("(cd \"" ++ filename:dirname(escript:script_name())
+ ++ "/..\"; make)"),
+ ensure(),
+ code:rehash();
+ _ ->
+ ok
+ end,
+ ok = mochiweb_skel:skelcopy(DestDir, Name);
+main(_) ->
+ usage().
+
+%% Internal API
+
+ensure() ->
+ code:add_patha(filename:join(filename:dirname(escript:script_name()),
+ "../ebin")).
+
+usage() ->
+ io:format("usage: ~s name [destdir]~n",
+ [filename:basename(escript:script_name())]),
+ halt(1).
+
+
diff --git a/deps/mochiweb/src/Makefile b/deps/mochiweb/src/Makefile
new file mode 100644
index 00000000..12d9885b
--- /dev/null
+++ b/deps/mochiweb/src/Makefile
@@ -0,0 +1,33 @@
+include ../support/include.mk
+
+APPLICATION=mochiweb
+DOC_OPTS={dir,\"../doc\"}
+TEST_PLT=$(TEST_DIR)/dialyzer_plt
+
+all: $(EBIN_FILES)
+
+debug:
+ $(MAKE) DEBUG=-DDEBUG
+
+clean:
+ rm -rf $(EBIN_FILES)
+
+edoc:
+ $(ERL) -noshell -pa ../ebin \
+ -eval "edoc:application($(APPLICATION), \".\", [$(DOC_OPTS)])" \
+ -s init stop
+
+test: $(EBIN_FILES)
+ mkdir -p $(TEST_DIR);
+ @../support/run_tests.escript $(EBIN_DIR) | tee $(TEST_DIR)/test.log
+
+$(TEST_PLT):
+ mkdir -p $(TEST_DIR)
+ cp $(DIALYZER_PLT) $(TEST_PLT)
+ dialyzer --plt $(TEST_PLT) --add_to_plt
+
+clean_plt:
+ rm $(TEST_PLT)
+
+dialyzer: $(TEST_PLT)
+ dialyzer --src --plt $(TEST_PLT) -DNOTEST -DDIALYZER -c ../src | tee $(TEST_DIR)/dialyzer.log \ No newline at end of file
diff --git a/deps/mochiweb/src/internal.hrl b/deps/mochiweb/src/internal.hrl
new file mode 100644
index 00000000..6db899a0
--- /dev/null
+++ b/deps/mochiweb/src/internal.hrl
@@ -0,0 +1,3 @@
+
+-define(RECBUF_SIZE, 8192).
+
diff --git a/deps/mochiweb/src/mochifmt.erl b/deps/mochiweb/src/mochifmt.erl
new file mode 100644
index 00000000..5bc6b9c4
--- /dev/null
+++ b/deps/mochiweb/src/mochifmt.erl
@@ -0,0 +1,425 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2008 Mochi Media, Inc.
+
+%% @doc String Formatting for Erlang, inspired by Python 2.6
+%% (<a href="http://www.python.org/dev/peps/pep-3101/">PEP 3101</a>).
+%%
+-module(mochifmt).
+-author('bob@mochimedia.com').
+-export([format/2, format_field/2, convert_field/2, get_value/2, get_field/2]).
+-export([tokenize/1, format/3, get_field/3, format_field/3]).
+-export([bformat/2, bformat/3]).
+-export([f/2, f/3]).
+
+-record(conversion, {length, precision, ctype, align, fill_char, sign}).
+
+%% @spec tokenize(S::string()) -> tokens()
+%% @doc Tokenize a format string into mochifmt's internal format.
+tokenize(S) ->
+ {?MODULE, tokenize(S, "", [])}.
+
+%% @spec convert_field(Arg, Conversion::conversion()) -> term()
+%% @doc Process Arg according to the given explicit conversion specifier.
+convert_field(Arg, "") ->
+ Arg;
+convert_field(Arg, "r") ->
+ repr(Arg);
+convert_field(Arg, "s") ->
+ str(Arg).
+
+%% @spec get_value(Key::string(), Args::args()) -> term()
+%% @doc Get the Key from Args. If Args is a tuple then convert Key to
+%% an integer and get element(1 + Key, Args). If Args is a list and Key
+%% can be parsed as an integer then use lists:nth(1 + Key, Args),
+%% otherwise try and look for Key in Args as a proplist, converting
+%% Key to an atom or binary if necessary.
+get_value(Key, Args) when is_tuple(Args) ->
+ element(1 + list_to_integer(Key), Args);
+get_value(Key, Args) when is_list(Args) ->
+ try lists:nth(1 + list_to_integer(Key), Args)
+ catch error:_ ->
+ {_K, V} = proplist_lookup(Key, Args),
+ V
+ end.
+
+%% @spec get_field(Key::string(), Args) -> term()
+%% @doc Consecutively call get_value/2 on parts of Key delimited by ".",
+%% replacing Args with the result of the previous get_value. This
+%% is used to implement formats such as {0.0}.
+get_field(Key, Args) ->
+ get_field(Key, Args, ?MODULE).
+
+%% @spec get_field(Key::string(), Args, Module) -> term()
+%% @doc Consecutively call Module:get_value/2 on parts of Key delimited by ".",
+%% replacing Args with the result of the previous get_value. This
+%% is used to implement formats such as {0.0}.
+get_field(Key, Args, Module) ->
+ {Name, Next} = lists:splitwith(fun (C) -> C =/= $. end, Key),
+ Res = try Module:get_value(Name, Args)
+ catch error:undef -> get_value(Name, Args) end,
+ case Next of
+ "" ->
+ Res;
+ "." ++ S1 ->
+ get_field(S1, Res, Module)
+ end.
+
+%% @spec format(Format::string(), Args) -> iolist()
+%% @doc Format Args with Format.
+format(Format, Args) ->
+ format(Format, Args, ?MODULE).
+
+%% @spec format(Format::string(), Args, Module) -> iolist()
+%% @doc Format Args with Format using Module.
+format({?MODULE, Parts}, Args, Module) ->
+ format2(Parts, Args, Module, []);
+format(S, Args, Module) ->
+ format(tokenize(S), Args, Module).
+
+%% @spec format_field(Arg, Format) -> iolist()
+%% @doc Format Arg with Format.
+format_field(Arg, Format) ->
+ format_field(Arg, Format, ?MODULE).
+
+%% @spec format_field(Arg, Format, _Module) -> iolist()
+%% @doc Format Arg with Format.
+format_field(Arg, Format, _Module) ->
+ F = default_ctype(Arg, parse_std_conversion(Format)),
+ fix_padding(fix_sign(convert2(Arg, F), F), F).
+
+%% @spec f(Format::string(), Args) -> string()
+%% @doc Format Args with Format and return a string().
+f(Format, Args) ->
+ f(Format, Args, ?MODULE).
+
+%% @spec f(Format::string(), Args, Module) -> string()
+%% @doc Format Args with Format using Module and return a string().
+f(Format, Args, Module) ->
+ case lists:member(${, Format) of
+ true ->
+ binary_to_list(bformat(Format, Args, Module));
+ false ->
+ Format
+ end.
+
+%% @spec bformat(Format::string(), Args) -> binary()
+%% @doc Format Args with Format and return a binary().
+bformat(Format, Args) ->
+ iolist_to_binary(format(Format, Args)).
+
+%% @spec bformat(Format::string(), Args, Module) -> binary()
+%% @doc Format Args with Format using Module and return a binary().
+bformat(Format, Args, Module) ->
+ iolist_to_binary(format(Format, Args, Module)).
+
+%% Internal API
+
+add_raw("", Acc) ->
+ Acc;
+add_raw(S, Acc) ->
+ [{raw, lists:reverse(S)} | Acc].
+
+tokenize([], S, Acc) ->
+ lists:reverse(add_raw(S, Acc));
+tokenize("{{" ++ Rest, S, Acc) ->
+ tokenize(Rest, "{" ++ S, Acc);
+tokenize("{" ++ Rest, S, Acc) ->
+ {Format, Rest1} = tokenize_format(Rest),
+ tokenize(Rest1, "", [{format, make_format(Format)} | add_raw(S, Acc)]);
+tokenize("}}" ++ Rest, S, Acc) ->
+ tokenize(Rest, "}" ++ S, Acc);
+tokenize([C | Rest], S, Acc) ->
+ tokenize(Rest, [C | S], Acc).
+
+tokenize_format(S) ->
+ tokenize_format(S, 1, []).
+
+tokenize_format("}" ++ Rest, 1, Acc) ->
+ {lists:reverse(Acc), Rest};
+tokenize_format("}" ++ Rest, N, Acc) ->
+ tokenize_format(Rest, N - 1, "}" ++ Acc);
+tokenize_format("{" ++ Rest, N, Acc) ->
+ tokenize_format(Rest, 1 + N, "{" ++ Acc);
+tokenize_format([C | Rest], N, Acc) ->
+ tokenize_format(Rest, N, [C | Acc]).
+
+make_format(S) ->
+ {Name0, Spec} = case lists:splitwith(fun (C) -> C =/= $: end, S) of
+ {_, ""} ->
+ {S, ""};
+ {SN, ":" ++ SS} ->
+ {SN, SS}
+ end,
+ {Name, Transform} = case lists:splitwith(fun (C) -> C =/= $! end, Name0) of
+ {_, ""} ->
+ {Name0, ""};
+ {TN, "!" ++ TT} ->
+ {TN, TT}
+ end,
+ {Name, Transform, Spec}.
+
+proplist_lookup(S, P) ->
+ A = try list_to_existing_atom(S)
+ catch error:_ -> make_ref() end,
+ B = try list_to_binary(S)
+ catch error:_ -> make_ref() end,
+ proplist_lookup2({S, A, B}, P).
+
+proplist_lookup2({KS, KA, KB}, [{K, V} | _])
+ when KS =:= K orelse KA =:= K orelse KB =:= K ->
+ {K, V};
+proplist_lookup2(Keys, [_ | Rest]) ->
+ proplist_lookup2(Keys, Rest).
+
+format2([], _Args, _Module, Acc) ->
+ lists:reverse(Acc);
+format2([{raw, S} | Rest], Args, Module, Acc) ->
+ format2(Rest, Args, Module, [S | Acc]);
+format2([{format, {Key, Convert, Format0}} | Rest], Args, Module, Acc) ->
+ Format = f(Format0, Args, Module),
+ V = case Module of
+ ?MODULE ->
+ V0 = get_field(Key, Args),
+ V1 = convert_field(V0, Convert),
+ format_field(V1, Format);
+ _ ->
+ V0 = try Module:get_field(Key, Args)
+ catch error:undef -> get_field(Key, Args, Module) end,
+ V1 = try Module:convert_field(V0, Convert)
+ catch error:undef -> convert_field(V0, Convert) end,
+ try Module:format_field(V1, Format)
+ catch error:undef -> format_field(V1, Format, Module) end
+ end,
+ format2(Rest, Args, Module, [V | Acc]).
+
+default_ctype(_Arg, C=#conversion{ctype=N}) when N =/= undefined ->
+ C;
+default_ctype(Arg, C) when is_integer(Arg) ->
+ C#conversion{ctype=decimal};
+default_ctype(Arg, C) when is_float(Arg) ->
+ C#conversion{ctype=general};
+default_ctype(_Arg, C) ->
+ C#conversion{ctype=string}.
+
+fix_padding(Arg, #conversion{length=undefined}) ->
+ Arg;
+fix_padding(Arg, F=#conversion{length=Length, fill_char=Fill0, align=Align0,
+ ctype=Type}) ->
+ Padding = Length - iolist_size(Arg),
+ Fill = case Fill0 of
+ undefined ->
+ $\s;
+ _ ->
+ Fill0
+ end,
+ Align = case Align0 of
+ undefined ->
+ case Type of
+ string ->
+ left;
+ _ ->
+ right
+ end;
+ _ ->
+ Align0
+ end,
+ case Padding > 0 of
+ true ->
+ do_padding(Arg, Padding, Fill, Align, F);
+ false ->
+ Arg
+ end.
+
+do_padding(Arg, Padding, Fill, right, _F) ->
+ [lists:duplicate(Padding, Fill), Arg];
+do_padding(Arg, Padding, Fill, center, _F) ->
+ LPadding = lists:duplicate(Padding div 2, Fill),
+ RPadding = case Padding band 1 of
+ 1 ->
+ [Fill | LPadding];
+ _ ->
+ LPadding
+ end,
+ [LPadding, Arg, RPadding];
+do_padding([$- | Arg], Padding, Fill, sign_right, _F) ->
+ [[$- | lists:duplicate(Padding, Fill)], Arg];
+do_padding(Arg, Padding, Fill, sign_right, #conversion{sign=$-}) ->
+ [lists:duplicate(Padding, Fill), Arg];
+do_padding([S | Arg], Padding, Fill, sign_right, #conversion{sign=S}) ->
+ [[S | lists:duplicate(Padding, Fill)], Arg];
+do_padding(Arg, Padding, Fill, sign_right, #conversion{sign=undefined}) ->
+ [lists:duplicate(Padding, Fill), Arg];
+do_padding(Arg, Padding, Fill, left, _F) ->
+ [Arg | lists:duplicate(Padding, Fill)].
+
+fix_sign(Arg, #conversion{sign=$+}) when Arg >= 0 ->
+ [$+, Arg];
+fix_sign(Arg, #conversion{sign=$\s}) when Arg >= 0 ->
+ [$\s, Arg];
+fix_sign(Arg, _F) ->
+ Arg.
+
+ctype($\%) -> percent;
+ctype($s) -> string;
+ctype($b) -> bin;
+ctype($o) -> oct;
+ctype($X) -> upper_hex;
+ctype($x) -> hex;
+ctype($c) -> char;
+ctype($d) -> decimal;
+ctype($g) -> general;
+ctype($f) -> fixed;
+ctype($e) -> exp.
+
+align($<) -> left;
+align($>) -> right;
+align($^) -> center;
+align($=) -> sign_right.
+
+convert2(Arg, F=#conversion{ctype=percent}) ->
+ [convert2(100.0 * Arg, F#conversion{ctype=fixed}), $\%];
+convert2(Arg, #conversion{ctype=string}) ->
+ str(Arg);
+convert2(Arg, #conversion{ctype=bin}) ->
+ erlang:integer_to_list(Arg, 2);
+convert2(Arg, #conversion{ctype=oct}) ->
+ erlang:integer_to_list(Arg, 8);
+convert2(Arg, #conversion{ctype=upper_hex}) ->
+ erlang:integer_to_list(Arg, 16);
+convert2(Arg, #conversion{ctype=hex}) ->
+ string:to_lower(erlang:integer_to_list(Arg, 16));
+convert2(Arg, #conversion{ctype=char}) when Arg < 16#80 ->
+ [Arg];
+convert2(Arg, #conversion{ctype=char}) ->
+ xmerl_ucs:to_utf8(Arg);
+convert2(Arg, #conversion{ctype=decimal}) ->
+ integer_to_list(Arg);
+convert2(Arg, #conversion{ctype=general, precision=undefined}) ->
+ try mochinum:digits(Arg)
+ catch error:undef -> io_lib:format("~g", [Arg]) end;
+convert2(Arg, #conversion{ctype=fixed, precision=undefined}) ->
+ io_lib:format("~f", [Arg]);
+convert2(Arg, #conversion{ctype=exp, precision=undefined}) ->
+ io_lib:format("~e", [Arg]);
+convert2(Arg, #conversion{ctype=general, precision=P}) ->
+ io_lib:format("~." ++ integer_to_list(P) ++ "g", [Arg]);
+convert2(Arg, #conversion{ctype=fixed, precision=P}) ->
+ io_lib:format("~." ++ integer_to_list(P) ++ "f", [Arg]);
+convert2(Arg, #conversion{ctype=exp, precision=P}) ->
+ io_lib:format("~." ++ integer_to_list(P) ++ "e", [Arg]).
+
+str(A) when is_atom(A) ->
+ atom_to_list(A);
+str(I) when is_integer(I) ->
+ integer_to_list(I);
+str(F) when is_float(F) ->
+ try mochinum:digits(F)
+ catch error:undef -> io_lib:format("~g", [F]) end;
+str(L) when is_list(L) ->
+ L;
+str(B) when is_binary(B) ->
+ B;
+str(P) ->
+ repr(P).
+
+repr(P) when is_float(P) ->
+ try mochinum:digits(P)
+ catch error:undef -> float_to_list(P) end;
+repr(P) ->
+ io_lib:format("~p", [P]).
+
+parse_std_conversion(S) ->
+ parse_std_conversion(S, #conversion{}).
+
+parse_std_conversion("", Acc) ->
+ Acc;
+parse_std_conversion([Fill, Align | Spec], Acc)
+ when Align =:= $< orelse Align =:= $> orelse Align =:= $= orelse Align =:= $^ ->
+ parse_std_conversion(Spec, Acc#conversion{fill_char=Fill,
+ align=align(Align)});
+parse_std_conversion([Align | Spec], Acc)
+ when Align =:= $< orelse Align =:= $> orelse Align =:= $= orelse Align =:= $^ ->
+ parse_std_conversion(Spec, Acc#conversion{align=align(Align)});
+parse_std_conversion([Sign | Spec], Acc)
+ when Sign =:= $+ orelse Sign =:= $- orelse Sign =:= $\s ->
+ parse_std_conversion(Spec, Acc#conversion{sign=Sign});
+parse_std_conversion("0" ++ Spec, Acc) ->
+ Align = case Acc#conversion.align of
+ undefined ->
+ sign_right;
+ A ->
+ A
+ end,
+ parse_std_conversion(Spec, Acc#conversion{fill_char=$0, align=Align});
+parse_std_conversion(Spec=[D|_], Acc) when D >= $0 andalso D =< $9 ->
+ {W, Spec1} = lists:splitwith(fun (C) -> C >= $0 andalso C =< $9 end, Spec),
+ parse_std_conversion(Spec1, Acc#conversion{length=list_to_integer(W)});
+parse_std_conversion([$. | Spec], Acc) ->
+ case lists:splitwith(fun (C) -> C >= $0 andalso C =< $9 end, Spec) of
+ {"", Spec1} ->
+ parse_std_conversion(Spec1, Acc);
+ {P, Spec1} ->
+ parse_std_conversion(Spec1,
+ Acc#conversion{precision=list_to_integer(P)})
+ end;
+parse_std_conversion([Type], Acc) ->
+ parse_std_conversion("", Acc#conversion{ctype=ctype(Type)}).
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+tokenize_test() ->
+ {?MODULE, [{raw, "ABC"}]} = tokenize("ABC"),
+ {?MODULE, [{format, {"0", "", ""}}]} = tokenize("{0}"),
+ {?MODULE, [{raw, "ABC"}, {format, {"1", "", ""}}, {raw, "DEF"}]} =
+ tokenize("ABC{1}DEF"),
+ ok.
+
+format_test() ->
+ <<" -4">> = bformat("{0:4}", [-4]),
+ <<" 4">> = bformat("{0:4}", [4]),
+ <<" 4">> = bformat("{0:{0}}", [4]),
+ <<"4 ">> = bformat("{0:4}", ["4"]),
+ <<"4 ">> = bformat("{0:{0}}", ["4"]),
+ <<"1.2yoDEF">> = bformat("{2}{0}{1}{3}", {yo, "DE", 1.2, <<"F">>}),
+ <<"cafebabe">> = bformat("{0:x}", {16#cafebabe}),
+ <<"CAFEBABE">> = bformat("{0:X}", {16#cafebabe}),
+ <<"CAFEBABE">> = bformat("{0:X}", {16#cafebabe}),
+ <<"755">> = bformat("{0:o}", {8#755}),
+ <<"a">> = bformat("{0:c}", {97}),
+ %% Horizontal ellipsis
+ <<226, 128, 166>> = bformat("{0:c}", {16#2026}),
+ <<"11">> = bformat("{0:b}", {3}),
+ <<"11">> = bformat("{0:b}", [3]),
+ <<"11">> = bformat("{three:b}", [{three, 3}]),
+ <<"11">> = bformat("{three:b}", [{"three", 3}]),
+ <<"11">> = bformat("{three:b}", [{<<"three">>, 3}]),
+ <<"\"foo\"">> = bformat("{0!r}", {"foo"}),
+ <<"2008-5-4">> = bformat("{0.0}-{0.1}-{0.2}", {{2008,5,4}}),
+ <<"2008-05-04">> = bformat("{0.0:04}-{0.1:02}-{0.2:02}", {{2008,5,4}}),
+ <<"foo6bar-6">> = bformat("foo{1}{0}-{1}", {bar, 6}),
+ <<"-'atom test'-">> = bformat("-{arg!r}-", [{arg, 'atom test'}]),
+ <<"2008-05-04">> = bformat("{0.0:0{1.0}}-{0.1:0{1.1}}-{0.2:0{1.2}}",
+ {{2008,5,4}, {4, 2, 2}}),
+ ok.
+
+std_test() ->
+ M = mochifmt_std:new(),
+ <<"01">> = bformat("{0}{1}", [0, 1], M),
+ ok.
+
+records_test() ->
+ M = mochifmt_records:new([{conversion, record_info(fields, conversion)}]),
+ R = #conversion{length=long, precision=hard, sign=peace},
+ long = M:get_value("length", R),
+ hard = M:get_value("precision", R),
+ peace = M:get_value("sign", R),
+ <<"long hard">> = bformat("{length} {precision}", R, M),
+ <<"long hard">> = bformat("{0.length} {0.precision}", [R], M),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochifmt_records.erl b/deps/mochiweb/src/mochifmt_records.erl
new file mode 100644
index 00000000..2326d1dd
--- /dev/null
+++ b/deps/mochiweb/src/mochifmt_records.erl
@@ -0,0 +1,38 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2008 Mochi Media, Inc.
+
+%% @doc Formatter that understands records.
+%%
+%% Usage:
+%%
+%% 1> M = mochifmt_records:new([{rec, record_info(fields, rec)}]),
+%% M:format("{0.bar}", [#rec{bar=foo}]).
+%% foo
+
+-module(mochifmt_records, [Recs]).
+-author('bob@mochimedia.com').
+-export([get_value/2]).
+
+get_value(Key, Rec) when is_tuple(Rec) and is_atom(element(1, Rec)) ->
+ try begin
+ Atom = list_to_existing_atom(Key),
+ {_, Fields} = proplists:lookup(element(1, Rec), Recs),
+ element(get_rec_index(Atom, Fields, 2), Rec)
+ end
+ catch error:_ -> mochifmt:get_value(Key, Rec)
+ end;
+get_value(Key, Args) ->
+ mochifmt:get_value(Key, Args).
+
+get_rec_index(Atom, [Atom | _], Index) ->
+ Index;
+get_rec_index(Atom, [_ | Rest], Index) ->
+ get_rec_index(Atom, Rest, 1 + Index).
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/src/mochifmt_std.erl b/deps/mochiweb/src/mochifmt_std.erl
new file mode 100644
index 00000000..d4d74f6f
--- /dev/null
+++ b/deps/mochiweb/src/mochifmt_std.erl
@@ -0,0 +1,30 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2008 Mochi Media, Inc.
+
+%% @doc Template module for a mochifmt formatter.
+
+-module(mochifmt_std, []).
+-author('bob@mochimedia.com').
+-export([format/2, get_value/2, format_field/2, get_field/2, convert_field/2]).
+
+format(Format, Args) ->
+ mochifmt:format(Format, Args, THIS).
+
+get_field(Key, Args) ->
+ mochifmt:get_field(Key, Args, THIS).
+
+convert_field(Key, Args) ->
+ mochifmt:convert_field(Key, Args).
+
+get_value(Key, Args) ->
+ mochifmt:get_value(Key, Args).
+
+format_field(Arg, Format) ->
+ mochifmt:format_field(Arg, Format, THIS).
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/src/mochiglobal.erl b/deps/mochiweb/src/mochiglobal.erl
new file mode 100644
index 00000000..c740b878
--- /dev/null
+++ b/deps/mochiweb/src/mochiglobal.erl
@@ -0,0 +1,107 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+%% @doc Abuse module constant pools as a "read-only shared heap" (since erts 5.6)
+%% <a href="http://www.erlang.org/pipermail/erlang-questions/2009-March/042503.html">[1]</a>.
+-module(mochiglobal).
+-author("Bob Ippolito <bob@mochimedia.com>").
+-export([get/1, get/2, put/2, delete/1]).
+
+-spec get(atom()) -> any() | undefined.
+%% @equiv get(K, undefined)
+get(K) ->
+ get(K, undefined).
+
+-spec get(atom(), T) -> any() | T.
+%% @doc Get the term for K or return Default.
+get(K, Default) ->
+ get(K, Default, key_to_module(K)).
+
+get(_K, Default, Mod) ->
+ try Mod:term()
+ catch error:undef ->
+ Default
+ end.
+
+-spec put(atom(), any()) -> ok.
+%% @doc Store term V at K, replaces an existing term if present.
+put(K, V) ->
+ put(K, V, key_to_module(K)).
+
+put(_K, V, Mod) ->
+ Bin = compile(Mod, V),
+ code:purge(Mod),
+ code:load_binary(Mod, atom_to_list(Mod) ++ ".erl", Bin),
+ ok.
+
+-spec delete(atom()) -> boolean().
+%% @doc Delete term stored at K, no-op if non-existent.
+delete(K) ->
+ delete(K, key_to_module(K)).
+
+delete(_K, Mod) ->
+ code:purge(Mod),
+ code:delete(Mod).
+
+-spec key_to_module(atom()) -> atom().
+key_to_module(K) ->
+ list_to_atom("mochiglobal:" ++ atom_to_list(K)).
+
+-spec compile(atom(), any()) -> binary().
+compile(Module, T) ->
+ {ok, Module, Bin} = compile:forms(forms(Module, T),
+ [verbose, report_errors]),
+ Bin.
+
+-spec forms(atom(), any()) -> [erl_syntax:syntaxTree()].
+forms(Module, T) ->
+ [erl_syntax:revert(X) || X <- term_to_abstract(Module, term, T)].
+
+-spec term_to_abstract(atom(), atom(), any()) -> [erl_syntax:syntaxTree()].
+term_to_abstract(Module, Getter, T) ->
+ [%% -module(Module).
+ erl_syntax:attribute(
+ erl_syntax:atom(module),
+ [erl_syntax:atom(Module)]),
+ %% -export([Getter/0]).
+ erl_syntax:attribute(
+ erl_syntax:atom(export),
+ [erl_syntax:list(
+ [erl_syntax:arity_qualifier(
+ erl_syntax:atom(Getter),
+ erl_syntax:integer(0))])]),
+ %% Getter() -> T.
+ erl_syntax:function(
+ erl_syntax:atom(Getter),
+ [erl_syntax:clause([], none, [erl_syntax:abstract(T)])])].
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+get_put_delete_test() ->
+ K = '$$test$$mochiglobal',
+ delete(K),
+ ?assertEqual(
+ bar,
+ get(K, bar)),
+ try
+ ?MODULE:put(K, baz),
+ ?assertEqual(
+ baz,
+ get(K, bar)),
+ ?MODULE:put(K, wibble),
+ ?assertEqual(
+ wibble,
+ ?MODULE:get(K))
+ after
+ delete(K)
+ end,
+ ?assertEqual(
+ bar,
+ get(K, bar)),
+ ?assertEqual(
+ undefined,
+ ?MODULE:get(K)),
+ ok.
+-endif.
diff --git a/deps/mochiweb/src/mochihex.erl b/deps/mochiweb/src/mochihex.erl
new file mode 100644
index 00000000..44a2aa7f
--- /dev/null
+++ b/deps/mochiweb/src/mochihex.erl
@@ -0,0 +1,91 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2006 Mochi Media, Inc.
+
+%% @doc Utilities for working with hexadecimal strings.
+
+-module(mochihex).
+-author('bob@mochimedia.com').
+
+-export([to_hex/1, to_bin/1, to_int/1, dehex/1, hexdigit/1]).
+
+%% @type iolist() = [char() | binary() | iolist()]
+%% @type iodata() = iolist() | binary()
+
+%% @spec to_hex(integer | iolist()) -> string()
+%% @doc Convert an iolist to a hexadecimal string.
+to_hex(0) ->
+ "0";
+to_hex(I) when is_integer(I), I > 0 ->
+ to_hex_int(I, []);
+to_hex(B) ->
+ to_hex(iolist_to_binary(B), []).
+
+%% @spec to_bin(string()) -> binary()
+%% @doc Convert a hexadecimal string to a binary.
+to_bin(L) ->
+ to_bin(L, []).
+
+%% @spec to_int(string()) -> integer()
+%% @doc Convert a hexadecimal string to an integer.
+to_int(L) ->
+ erlang:list_to_integer(L, 16).
+
+%% @spec dehex(char()) -> integer()
+%% @doc Convert a hex digit to its integer value.
+dehex(C) when C >= $0, C =< $9 ->
+ C - $0;
+dehex(C) when C >= $a, C =< $f ->
+ C - $a + 10;
+dehex(C) when C >= $A, C =< $F ->
+ C - $A + 10.
+
+%% @spec hexdigit(integer()) -> char()
+%% @doc Convert an integer less than 16 to a hex digit.
+hexdigit(C) when C >= 0, C =< 9 ->
+ C + $0;
+hexdigit(C) when C =< 15 ->
+ C + $a - 10.
+
+%% Internal API
+
+to_hex(<<>>, Acc) ->
+ lists:reverse(Acc);
+to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
+ to_hex(Rest, [hexdigit(C2), hexdigit(C1) | Acc]).
+
+to_hex_int(0, Acc) ->
+ Acc;
+to_hex_int(I, Acc) ->
+ to_hex_int(I bsr 4, [hexdigit(I band 15) | Acc]).
+
+to_bin([], Acc) ->
+ iolist_to_binary(lists:reverse(Acc));
+to_bin([C1, C2 | Rest], Acc) ->
+ to_bin(Rest, [(dehex(C1) bsl 4) bor dehex(C2) | Acc]).
+
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+to_hex_test() ->
+ "ff000ff1" = to_hex([255, 0, 15, 241]),
+ "ff000ff1" = to_hex(16#ff000ff1),
+ "0" = to_hex(16#0),
+ ok.
+
+to_bin_test() ->
+ <<255, 0, 15, 241>> = to_bin("ff000ff1"),
+ <<255, 0, 10, 161>> = to_bin("Ff000aA1"),
+ ok.
+
+to_int_test() ->
+ 16#ff000ff1 = to_int("ff000ff1"),
+ 16#ff000aa1 = to_int("FF000Aa1"),
+ 16#0 = to_int("0"),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochijson.erl b/deps/mochiweb/src/mochijson.erl
new file mode 100644
index 00000000..2e3d1452
--- /dev/null
+++ b/deps/mochiweb/src/mochijson.erl
@@ -0,0 +1,531 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2006 Mochi Media, Inc.
+
+%% @doc Yet another JSON (RFC 4627) library for Erlang.
+-module(mochijson).
+-author('bob@mochimedia.com').
+-export([encoder/1, encode/1]).
+-export([decoder/1, decode/1]).
+-export([binary_encoder/1, binary_encode/1]).
+-export([binary_decoder/1, binary_decode/1]).
+
+% This is a macro to placate syntax highlighters..
+-define(Q, $\").
+-define(ADV_COL(S, N), S#decoder{column=N+S#decoder.column}).
+-define(INC_COL(S), S#decoder{column=1+S#decoder.column}).
+-define(INC_LINE(S), S#decoder{column=1, line=1+S#decoder.line}).
+
+%% @type iolist() = [char() | binary() | iolist()]
+%% @type iodata() = iolist() | binary()
+%% @type json_string() = atom | string() | binary()
+%% @type json_number() = integer() | float()
+%% @type json_array() = {array, [json_term()]}
+%% @type json_object() = {struct, [{json_string(), json_term()}]}
+%% @type json_term() = json_string() | json_number() | json_array() |
+%% json_object()
+%% @type encoding() = utf8 | unicode
+%% @type encoder_option() = {input_encoding, encoding()} |
+%% {handler, function()}
+%% @type decoder_option() = {input_encoding, encoding()} |
+%% {object_hook, function()}
+%% @type bjson_string() = binary()
+%% @type bjson_number() = integer() | float()
+%% @type bjson_array() = [bjson_term()]
+%% @type bjson_object() = {struct, [{bjson_string(), bjson_term()}]}
+%% @type bjson_term() = bjson_string() | bjson_number() | bjson_array() |
+%% bjson_object()
+%% @type binary_encoder_option() = {handler, function()}
+%% @type binary_decoder_option() = {object_hook, function()}
+
+-record(encoder, {input_encoding=unicode,
+ handler=null}).
+
+-record(decoder, {input_encoding=utf8,
+ object_hook=null,
+ line=1,
+ column=1,
+ state=null}).
+
+%% @spec encoder([encoder_option()]) -> function()
+%% @doc Create an encoder/1 with the given options.
+encoder(Options) ->
+ State = parse_encoder_options(Options, #encoder{}),
+ fun (O) -> json_encode(O, State) end.
+
+%% @spec encode(json_term()) -> iolist()
+%% @doc Encode the given as JSON to an iolist.
+encode(Any) ->
+ json_encode(Any, #encoder{}).
+
+%% @spec decoder([decoder_option()]) -> function()
+%% @doc Create a decoder/1 with the given options.
+decoder(Options) ->
+ State = parse_decoder_options(Options, #decoder{}),
+ fun (O) -> json_decode(O, State) end.
+
+%% @spec decode(iolist()) -> json_term()
+%% @doc Decode the given iolist to Erlang terms.
+decode(S) ->
+ json_decode(S, #decoder{}).
+
+%% @spec binary_decoder([binary_decoder_option()]) -> function()
+%% @doc Create a binary_decoder/1 with the given options.
+binary_decoder(Options) ->
+ mochijson2:decoder(Options).
+
+%% @spec binary_encoder([binary_encoder_option()]) -> function()
+%% @doc Create a binary_encoder/1 with the given options.
+binary_encoder(Options) ->
+ mochijson2:encoder(Options).
+
+%% @spec binary_encode(bjson_term()) -> iolist()
+%% @doc Encode the given as JSON to an iolist, using lists for arrays and
+%% binaries for strings.
+binary_encode(Any) ->
+ mochijson2:encode(Any).
+
+%% @spec binary_decode(iolist()) -> bjson_term()
+%% @doc Decode the given iolist to Erlang terms, using lists for arrays and
+%% binaries for strings.
+binary_decode(S) ->
+ mochijson2:decode(S).
+
+%% Internal API
+
+parse_encoder_options([], State) ->
+ State;
+parse_encoder_options([{input_encoding, Encoding} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{input_encoding=Encoding});
+parse_encoder_options([{handler, Handler} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{handler=Handler}).
+
+parse_decoder_options([], State) ->
+ State;
+parse_decoder_options([{input_encoding, Encoding} | Rest], State) ->
+ parse_decoder_options(Rest, State#decoder{input_encoding=Encoding});
+parse_decoder_options([{object_hook, Hook} | Rest], State) ->
+ parse_decoder_options(Rest, State#decoder{object_hook=Hook}).
+
+json_encode(true, _State) ->
+ "true";
+json_encode(false, _State) ->
+ "false";
+json_encode(null, _State) ->
+ "null";
+json_encode(I, _State) when is_integer(I) ->
+ integer_to_list(I);
+json_encode(F, _State) when is_float(F) ->
+ mochinum:digits(F);
+json_encode(L, State) when is_list(L); is_binary(L); is_atom(L) ->
+ json_encode_string(L, State);
+json_encode({array, Props}, State) when is_list(Props) ->
+ json_encode_array(Props, State);
+json_encode({struct, Props}, State) when is_list(Props) ->
+ json_encode_proplist(Props, State);
+json_encode(Bad, #encoder{handler=null}) ->
+ exit({json_encode, {bad_term, Bad}});
+json_encode(Bad, State=#encoder{handler=Handler}) ->
+ json_encode(Handler(Bad), State).
+
+json_encode_array([], _State) ->
+ "[]";
+json_encode_array(L, State) ->
+ F = fun (O, Acc) ->
+ [$,, json_encode(O, State) | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "[", L),
+ lists:reverse([$\] | Acc1]).
+
+json_encode_proplist([], _State) ->
+ "{}";
+json_encode_proplist(Props, State) ->
+ F = fun ({K, V}, Acc) ->
+ KS = case K of
+ K when is_atom(K) ->
+ json_encode_string_utf8(atom_to_list(K));
+ K when is_integer(K) ->
+ json_encode_string(integer_to_list(K), State);
+ K when is_list(K); is_binary(K) ->
+ json_encode_string(K, State)
+ end,
+ VS = json_encode(V, State),
+ [$,, VS, $:, KS | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "{", Props),
+ lists:reverse([$\} | Acc1]).
+
+json_encode_string(A, _State) when is_atom(A) ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(atom_to_list(A)));
+json_encode_string(B, _State) when is_binary(B) ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(B));
+json_encode_string(S, #encoder{input_encoding=utf8}) ->
+ json_encode_string_utf8(S);
+json_encode_string(S, #encoder{input_encoding=unicode}) ->
+ json_encode_string_unicode(S).
+
+json_encode_string_utf8(S) ->
+ [?Q | json_encode_string_utf8_1(S)].
+
+json_encode_string_utf8_1([C | Cs]) when C >= 0, C =< 16#7f ->
+ NewC = case C of
+ $\\ -> "\\\\";
+ ?Q -> "\\\"";
+ _ when C >= $\s, C < 16#7f -> C;
+ $\t -> "\\t";
+ $\n -> "\\n";
+ $\r -> "\\r";
+ $\f -> "\\f";
+ $\b -> "\\b";
+ _ when C >= 0, C =< 16#7f -> unihex(C);
+ _ -> exit({json_encode, {bad_char, C}})
+ end,
+ [NewC | json_encode_string_utf8_1(Cs)];
+json_encode_string_utf8_1(All=[C | _]) when C >= 16#80, C =< 16#10FFFF ->
+ [?Q | Rest] = json_encode_string_unicode(xmerl_ucs:from_utf8(All)),
+ Rest;
+json_encode_string_utf8_1([]) ->
+ "\"".
+
+json_encode_string_unicode(S) ->
+ [?Q | json_encode_string_unicode_1(S)].
+
+json_encode_string_unicode_1([C | Cs]) ->
+ NewC = case C of
+ $\\ -> "\\\\";
+ ?Q -> "\\\"";
+ _ when C >= $\s, C < 16#7f -> C;
+ $\t -> "\\t";
+ $\n -> "\\n";
+ $\r -> "\\r";
+ $\f -> "\\f";
+ $\b -> "\\b";
+ _ when C >= 0, C =< 16#10FFFF -> unihex(C);
+ _ -> exit({json_encode, {bad_char, C}})
+ end,
+ [NewC | json_encode_string_unicode_1(Cs)];
+json_encode_string_unicode_1([]) ->
+ "\"".
+
+dehex(C) when C >= $0, C =< $9 ->
+ C - $0;
+dehex(C) when C >= $a, C =< $f ->
+ C - $a + 10;
+dehex(C) when C >= $A, C =< $F ->
+ C - $A + 10.
+
+hexdigit(C) when C >= 0, C =< 9 ->
+ C + $0;
+hexdigit(C) when C =< 15 ->
+ C + $a - 10.
+
+unihex(C) when C < 16#10000 ->
+ <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
+ Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
+ [$\\, $u | Digits];
+unihex(C) when C =< 16#10FFFF ->
+ N = C - 16#10000,
+ S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
+ S2 = 16#dc00 bor (N band 16#3ff),
+ [unihex(S1), unihex(S2)].
+
+json_decode(B, S) when is_binary(B) ->
+ json_decode(binary_to_list(B), S);
+json_decode(L, S) ->
+ {Res, L1, S1} = decode1(L, S),
+ {eof, [], _} = tokenize(L1, S1#decoder{state=trim}),
+ Res.
+
+decode1(L, S=#decoder{state=null}) ->
+ case tokenize(L, S#decoder{state=any}) of
+ {{const, C}, L1, S1} ->
+ {C, L1, S1};
+ {start_array, L1, S1} ->
+ decode_array(L1, S1#decoder{state=any}, []);
+ {start_object, L1, S1} ->
+ decode_object(L1, S1#decoder{state=key}, [])
+ end.
+
+make_object(V, #decoder{object_hook=null}) ->
+ V;
+make_object(V, #decoder{object_hook=Hook}) ->
+ Hook(V).
+
+decode_object(L, S=#decoder{state=key}, Acc) ->
+ case tokenize(L, S) of
+ {end_object, Rest, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, Rest, S1#decoder{state=null}};
+ {{const, K}, Rest, S1} when is_list(K) ->
+ {colon, L2, S2} = tokenize(Rest, S1),
+ {V, L3, S3} = decode1(L2, S2#decoder{state=null}),
+ decode_object(L3, S3#decoder{state=comma}, [{K, V} | Acc])
+ end;
+decode_object(L, S=#decoder{state=comma}, Acc) ->
+ case tokenize(L, S) of
+ {end_object, Rest, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, Rest, S1#decoder{state=null}};
+ {comma, Rest, S1} ->
+ decode_object(Rest, S1#decoder{state=key}, Acc)
+ end.
+
+decode_array(L, S=#decoder{state=any}, Acc) ->
+ case tokenize(L, S) of
+ {end_array, Rest, S1} ->
+ {{array, lists:reverse(Acc)}, Rest, S1#decoder{state=null}};
+ {start_array, Rest, S1} ->
+ {Array, Rest1, S2} = decode_array(Rest, S1#decoder{state=any}, []),
+ decode_array(Rest1, S2#decoder{state=comma}, [Array | Acc]);
+ {start_object, Rest, S1} ->
+ {Array, Rest1, S2} = decode_object(Rest, S1#decoder{state=key}, []),
+ decode_array(Rest1, S2#decoder{state=comma}, [Array | Acc]);
+ {{const, Const}, Rest, S1} ->
+ decode_array(Rest, S1#decoder{state=comma}, [Const | Acc])
+ end;
+decode_array(L, S=#decoder{state=comma}, Acc) ->
+ case tokenize(L, S) of
+ {end_array, Rest, S1} ->
+ {{array, lists:reverse(Acc)}, Rest, S1#decoder{state=null}};
+ {comma, Rest, S1} ->
+ decode_array(Rest, S1#decoder{state=any}, Acc)
+ end.
+
+tokenize_string(IoList=[C | _], S=#decoder{input_encoding=utf8}, Acc)
+ when is_list(C); is_binary(C); C >= 16#7f ->
+ List = xmerl_ucs:from_utf8(iolist_to_binary(IoList)),
+ tokenize_string(List, S#decoder{input_encoding=unicode}, Acc);
+tokenize_string("\"" ++ Rest, S, Acc) ->
+ {lists:reverse(Acc), Rest, ?INC_COL(S)};
+tokenize_string("\\\"" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\" | Acc]);
+tokenize_string("\\\\" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\\ | Acc]);
+tokenize_string("\\/" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$/ | Acc]);
+tokenize_string("\\b" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\b | Acc]);
+tokenize_string("\\f" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\f | Acc]);
+tokenize_string("\\n" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\n | Acc]);
+tokenize_string("\\r" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\r | Acc]);
+tokenize_string("\\t" ++ Rest, S, Acc) ->
+ tokenize_string(Rest, ?ADV_COL(S, 2), [$\t | Acc]);
+tokenize_string([$\\, $u, C3, C2, C1, C0 | Rest], S, Acc) ->
+ % coalesce UTF-16 surrogate pair?
+ C = dehex(C0) bor
+ (dehex(C1) bsl 4) bor
+ (dehex(C2) bsl 8) bor
+ (dehex(C3) bsl 12),
+ tokenize_string(Rest, ?ADV_COL(S, 6), [C | Acc]);
+tokenize_string([C | Rest], S, Acc) when C >= $\s; C < 16#10FFFF ->
+ tokenize_string(Rest, ?ADV_COL(S, 1), [C | Acc]).
+
+tokenize_number(IoList=[C | _], Mode, S=#decoder{input_encoding=utf8}, Acc)
+ when is_list(C); is_binary(C); C >= 16#7f ->
+ List = xmerl_ucs:from_utf8(iolist_to_binary(IoList)),
+ tokenize_number(List, Mode, S#decoder{input_encoding=unicode}, Acc);
+tokenize_number([$- | Rest], sign, S, []) ->
+ tokenize_number(Rest, int, ?INC_COL(S), [$-]);
+tokenize_number(Rest, sign, S, []) ->
+ tokenize_number(Rest, int, S, []);
+tokenize_number([$0 | Rest], int, S, Acc) ->
+ tokenize_number(Rest, frac, ?INC_COL(S), [$0 | Acc]);
+tokenize_number([C | Rest], int, S, Acc) when C >= $1, C =< $9 ->
+ tokenize_number(Rest, int1, ?INC_COL(S), [C | Acc]);
+tokenize_number([C | Rest], int1, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, int1, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, int1, S, Acc) ->
+ tokenize_number(Rest, frac, S, Acc);
+tokenize_number([$., C | Rest], frac, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
+tokenize_number([E | Rest], frac, S, Acc) when E == $e; E == $E ->
+ tokenize_number(Rest, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
+tokenize_number(Rest, frac, S, Acc) ->
+ {{int, lists:reverse(Acc)}, Rest, S};
+tokenize_number([C | Rest], frac1, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, frac1, ?INC_COL(S), [C | Acc]);
+tokenize_number([E | Rest], frac1, S, Acc) when E == $e; E == $E ->
+ tokenize_number(Rest, esign, ?INC_COL(S), [$e | Acc]);
+tokenize_number(Rest, frac1, S, Acc) ->
+ {{float, lists:reverse(Acc)}, Rest, S};
+tokenize_number([C | Rest], esign, S, Acc) when C == $-; C == $+ ->
+ tokenize_number(Rest, eint, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, esign, S, Acc) ->
+ tokenize_number(Rest, eint, S, Acc);
+tokenize_number([C | Rest], eint, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, eint1, ?INC_COL(S), [C | Acc]);
+tokenize_number([C | Rest], eint1, S, Acc) when C >= $0, C =< $9 ->
+ tokenize_number(Rest, eint1, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, eint1, S, Acc) ->
+ {{float, lists:reverse(Acc)}, Rest, S}.
+
+tokenize([], S=#decoder{state=trim}) ->
+ {eof, [], S};
+tokenize([L | Rest], S) when is_list(L) ->
+ tokenize(L ++ Rest, S);
+tokenize([B | Rest], S) when is_binary(B) ->
+ tokenize(xmerl_ucs:from_utf8(B) ++ Rest, S);
+tokenize("\r\n" ++ Rest, S) ->
+ tokenize(Rest, ?INC_LINE(S));
+tokenize("\n" ++ Rest, S) ->
+ tokenize(Rest, ?INC_LINE(S));
+tokenize([C | Rest], S) when C == $\s; C == $\t ->
+ tokenize(Rest, ?INC_COL(S));
+tokenize("{" ++ Rest, S) ->
+ {start_object, Rest, ?INC_COL(S)};
+tokenize("}" ++ Rest, S) ->
+ {end_object, Rest, ?INC_COL(S)};
+tokenize("[" ++ Rest, S) ->
+ {start_array, Rest, ?INC_COL(S)};
+tokenize("]" ++ Rest, S) ->
+ {end_array, Rest, ?INC_COL(S)};
+tokenize("," ++ Rest, S) ->
+ {comma, Rest, ?INC_COL(S)};
+tokenize(":" ++ Rest, S) ->
+ {colon, Rest, ?INC_COL(S)};
+tokenize("null" ++ Rest, S) ->
+ {{const, null}, Rest, ?ADV_COL(S, 4)};
+tokenize("true" ++ Rest, S) ->
+ {{const, true}, Rest, ?ADV_COL(S, 4)};
+tokenize("false" ++ Rest, S) ->
+ {{const, false}, Rest, ?ADV_COL(S, 5)};
+tokenize("\"" ++ Rest, S) ->
+ {String, Rest1, S1} = tokenize_string(Rest, ?INC_COL(S), []),
+ {{const, String}, Rest1, S1};
+tokenize(L=[C | _], S) when C >= $0, C =< $9; C == $- ->
+ case tokenize_number(L, sign, S, []) of
+ {{int, Int}, Rest, S1} ->
+ {{const, list_to_integer(Int)}, Rest, S1};
+ {{float, Float}, Rest, S1} ->
+ {{const, list_to_float(Float)}, Rest, S1}
+ end.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+%% testing constructs borrowed from the Yaws JSON implementation.
+
+%% Create an object from a list of Key/Value pairs.
+
+obj_new() ->
+ {struct, []}.
+
+is_obj({struct, Props}) ->
+ F = fun ({K, _}) when is_list(K) ->
+ true;
+ (_) ->
+ false
+ end,
+ lists:all(F, Props).
+
+obj_from_list(Props) ->
+ Obj = {struct, Props},
+ case is_obj(Obj) of
+ true -> Obj;
+ false -> exit(json_bad_object)
+ end.
+
+%% Test for equivalence of Erlang terms.
+%% Due to arbitrary order of construction, equivalent objects might
+%% compare unequal as erlang terms, so we need to carefully recurse
+%% through aggregates (tuples and objects).
+
+equiv({struct, Props1}, {struct, Props2}) ->
+ equiv_object(Props1, Props2);
+equiv({array, L1}, {array, L2}) ->
+ equiv_list(L1, L2);
+equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
+equiv(S1, S2) when is_list(S1), is_list(S2) -> S1 == S2;
+equiv(true, true) -> true;
+equiv(false, false) -> true;
+equiv(null, null) -> true.
+
+%% Object representation and traversal order is unknown.
+%% Use the sledgehammer and sort property lists.
+
+equiv_object(Props1, Props2) ->
+ L1 = lists:keysort(1, Props1),
+ L2 = lists:keysort(1, Props2),
+ Pairs = lists:zip(L1, L2),
+ true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
+ equiv(K1, K2) and equiv(V1, V2)
+ end, Pairs).
+
+%% Recursively compare tuple elements for equivalence.
+
+equiv_list([], []) ->
+ true;
+equiv_list([V1 | L1], [V2 | L2]) ->
+ equiv(V1, V2) andalso equiv_list(L1, L2).
+
+e2j_vec_test() ->
+ test_one(e2j_test_vec(utf8), 1).
+
+issue33_test() ->
+ %% http://code.google.com/p/mochiweb/issues/detail?id=33
+ Js = {struct, [{"key", [194, 163]}]},
+ Encoder = encoder([{input_encoding, utf8}]),
+ "{\"key\":\"\\u00a3\"}" = lists:flatten(Encoder(Js)).
+
+test_one([], _N) ->
+ %% io:format("~p tests passed~n", [N-1]),
+ ok;
+test_one([{E, J} | Rest], N) ->
+ %% io:format("[~p] ~p ~p~n", [N, E, J]),
+ true = equiv(E, decode(J)),
+ true = equiv(E, decode(encode(E))),
+ test_one(Rest, 1+N).
+
+e2j_test_vec(utf8) ->
+ [
+ {1, "1"},
+ {3.1416, "3.14160"}, % text representation may truncate, trail zeroes
+ {-1, "-1"},
+ {-3.1416, "-3.14160"},
+ {12.0e10, "1.20000e+11"},
+ {1.234E+10, "1.23400e+10"},
+ {-1.234E-10, "-1.23400e-10"},
+ {10.0, "1.0e+01"},
+ {123.456, "1.23456E+2"},
+ {10.0, "1e1"},
+ {"foo", "\"foo\""},
+ {"foo" ++ [5] ++ "bar", "\"foo\\u0005bar\""},
+ {"", "\"\""},
+ {"\"", "\"\\\"\""},
+ {"\n\n\n", "\"\\n\\n\\n\""},
+ {"\\", "\"\\\\\""},
+ {"\" \b\f\r\n\t\"", "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
+ {obj_new(), "{}"},
+ {obj_from_list([{"foo", "bar"}]), "{\"foo\":\"bar\"}"},
+ {obj_from_list([{"foo", "bar"}, {"baz", 123}]),
+ "{\"foo\":\"bar\",\"baz\":123}"},
+ {{array, []}, "[]"},
+ {{array, [{array, []}]}, "[[]]"},
+ {{array, [1, "foo"]}, "[1,\"foo\"]"},
+
+ % json array in a json object
+ {obj_from_list([{"foo", {array, [123]}}]),
+ "{\"foo\":[123]}"},
+
+ % json object in a json object
+ {obj_from_list([{"foo", obj_from_list([{"bar", true}])}]),
+ "{\"foo\":{\"bar\":true}}"},
+
+ % fold evaluation order
+ {obj_from_list([{"foo", {array, []}},
+ {"bar", obj_from_list([{"baz", true}])},
+ {"alice", "bob"}]),
+ "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
+
+ % json object in a json array
+ {{array, [-123, "foo", obj_from_list([{"bar", {array, []}}]), null]},
+ "[-123,\"foo\",{\"bar\":[]},null]"}
+ ].
+
+-endif.
diff --git a/deps/mochiweb/src/mochijson2.erl b/deps/mochiweb/src/mochijson2.erl
new file mode 100644
index 00000000..4c2cd25b
--- /dev/null
+++ b/deps/mochiweb/src/mochijson2.erl
@@ -0,0 +1,802 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Yet another JSON (RFC 4627) library for Erlang. mochijson2 works
+%% with binaries as strings, arrays as lists (without an {array, _})
+%% wrapper and it only knows how to decode UTF-8 (and ASCII).
+
+-module(mochijson2).
+-author('bob@mochimedia.com').
+-export([encoder/1, encode/1]).
+-export([decoder/1, decode/1]).
+
+% This is a macro to placate syntax highlighters..
+-define(Q, $\").
+-define(ADV_COL(S, N), S#decoder{offset=N+S#decoder.offset,
+ column=N+S#decoder.column}).
+-define(INC_COL(S), S#decoder{offset=1+S#decoder.offset,
+ column=1+S#decoder.column}).
+-define(INC_LINE(S), S#decoder{offset=1+S#decoder.offset,
+ column=1,
+ line=1+S#decoder.line}).
+-define(INC_CHAR(S, C),
+ case C of
+ $\n ->
+ S#decoder{column=1,
+ line=1+S#decoder.line,
+ offset=1+S#decoder.offset};
+ _ ->
+ S#decoder{column=1+S#decoder.column,
+ offset=1+S#decoder.offset}
+ end).
+-define(IS_WHITESPACE(C),
+ (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
+
+%% @type iolist() = [char() | binary() | iolist()]
+%% @type iodata() = iolist() | binary()
+%% @type json_string() = atom | binary()
+%% @type json_number() = integer() | float()
+%% @type json_array() = [json_term()]
+%% @type json_object() = {struct, [{json_string(), json_term()}]}
+%% @type json_iolist() = {json, iolist()}
+%% @type json_term() = json_string() | json_number() | json_array() |
+%% json_object() | json_iolist()
+
+-record(encoder, {handler=null,
+ utf8=false}).
+
+-record(decoder, {object_hook=null,
+ offset=0,
+ line=1,
+ column=1,
+ state=null}).
+
+%% @spec encoder([encoder_option()]) -> function()
+%% @doc Create an encoder/1 with the given options.
+%% @type encoder_option() = handler_option() | utf8_option()
+%% @type utf8_option() = boolean(). Emit unicode as utf8 (default - false)
+encoder(Options) ->
+ State = parse_encoder_options(Options, #encoder{}),
+ fun (O) -> json_encode(O, State) end.
+
+%% @spec encode(json_term()) -> iolist()
+%% @doc Encode the given as JSON to an iolist.
+encode(Any) ->
+ json_encode(Any, #encoder{}).
+
+%% @spec decoder([decoder_option()]) -> function()
+%% @doc Create a decoder/1 with the given options.
+decoder(Options) ->
+ State = parse_decoder_options(Options, #decoder{}),
+ fun (O) -> json_decode(O, State) end.
+
+%% @spec decode(iolist()) -> json_term()
+%% @doc Decode the given iolist to Erlang terms.
+decode(S) ->
+ json_decode(S, #decoder{}).
+
+%% Internal API
+
+parse_encoder_options([], State) ->
+ State;
+parse_encoder_options([{handler, Handler} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{handler=Handler});
+parse_encoder_options([{utf8, Switch} | Rest], State) ->
+ parse_encoder_options(Rest, State#encoder{utf8=Switch}).
+
+parse_decoder_options([], State) ->
+ State;
+parse_decoder_options([{object_hook, Hook} | Rest], State) ->
+ parse_decoder_options(Rest, State#decoder{object_hook=Hook}).
+
+json_encode(true, _State) ->
+ <<"true">>;
+json_encode(false, _State) ->
+ <<"false">>;
+json_encode(null, _State) ->
+ <<"null">>;
+json_encode(I, _State) when is_integer(I) ->
+ integer_to_list(I);
+json_encode(F, _State) when is_float(F) ->
+ mochinum:digits(F);
+json_encode(S, State) when is_binary(S); is_atom(S) ->
+ json_encode_string(S, State);
+json_encode(Array, State) when is_list(Array) ->
+ json_encode_array(Array, State);
+json_encode({struct, Props}, State) when is_list(Props) ->
+ json_encode_proplist(Props, State);
+json_encode({json, IoList}, _State) ->
+ IoList;
+json_encode(Bad, #encoder{handler=null}) ->
+ exit({json_encode, {bad_term, Bad}});
+json_encode(Bad, State=#encoder{handler=Handler}) ->
+ json_encode(Handler(Bad), State).
+
+json_encode_array([], _State) ->
+ <<"[]">>;
+json_encode_array(L, State) ->
+ F = fun (O, Acc) ->
+ [$,, json_encode(O, State) | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "[", L),
+ lists:reverse([$\] | Acc1]).
+
+json_encode_proplist([], _State) ->
+ <<"{}">>;
+json_encode_proplist(Props, State) ->
+ F = fun ({K, V}, Acc) ->
+ KS = json_encode_string(K, State),
+ VS = json_encode(V, State),
+ [$,, VS, $:, KS | Acc]
+ end,
+ [$, | Acc1] = lists:foldl(F, "{", Props),
+ lists:reverse([$\} | Acc1]).
+
+json_encode_string(A, State) when is_atom(A) ->
+ L = atom_to_list(A),
+ case json_string_is_safe(L) of
+ true ->
+ [?Q, L, ?Q];
+ false ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(L), State, [?Q])
+ end;
+json_encode_string(B, State) when is_binary(B) ->
+ case json_bin_is_safe(B) of
+ true ->
+ [?Q, B, ?Q];
+ false ->
+ json_encode_string_unicode(xmerl_ucs:from_utf8(B), State, [?Q])
+ end;
+json_encode_string(I, _State) when is_integer(I) ->
+ [?Q, integer_to_list(I), ?Q];
+json_encode_string(L, State) when is_list(L) ->
+ case json_string_is_safe(L) of
+ true ->
+ [?Q, L, ?Q];
+ false ->
+ json_encode_string_unicode(L, State, [?Q])
+ end.
+
+json_string_is_safe([]) ->
+ true;
+json_string_is_safe([C | Rest]) ->
+ case C of
+ ?Q ->
+ false;
+ $\\ ->
+ false;
+ $\b ->
+ false;
+ $\f ->
+ false;
+ $\n ->
+ false;
+ $\r ->
+ false;
+ $\t ->
+ false;
+ C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
+ false;
+ C when C < 16#7f ->
+ json_string_is_safe(Rest);
+ _ ->
+ false
+ end.
+
+json_bin_is_safe(<<>>) ->
+ true;
+json_bin_is_safe(<<C, Rest/binary>>) ->
+ case C of
+ ?Q ->
+ false;
+ $\\ ->
+ false;
+ $\b ->
+ false;
+ $\f ->
+ false;
+ $\n ->
+ false;
+ $\r ->
+ false;
+ $\t ->
+ false;
+ C when C >= 0, C < $\s; C >= 16#7f ->
+ false;
+ C when C < 16#7f ->
+ json_bin_is_safe(Rest)
+ end.
+
+json_encode_string_unicode([], _State, Acc) ->
+ lists:reverse([$\" | Acc]);
+json_encode_string_unicode([C | Cs], State, Acc) ->
+ Acc1 = case C of
+ ?Q ->
+ [?Q, $\\ | Acc];
+ %% Escaping solidus is only useful when trying to protect
+ %% against "</script>" injection attacks which are only
+ %% possible when JSON is inserted into a HTML document
+ %% in-line. mochijson2 does not protect you from this, so
+ %% if you do insert directly into HTML then you need to
+ %% uncomment the following case or escape the output of encode.
+ %%
+ %% $/ ->
+ %% [$/, $\\ | Acc];
+ %%
+ $\\ ->
+ [$\\, $\\ | Acc];
+ $\b ->
+ [$b, $\\ | Acc];
+ $\f ->
+ [$f, $\\ | Acc];
+ $\n ->
+ [$n, $\\ | Acc];
+ $\r ->
+ [$r, $\\ | Acc];
+ $\t ->
+ [$t, $\\ | Acc];
+ C when C >= 0, C < $\s ->
+ [unihex(C) | Acc];
+ C when C >= 16#7f, C =< 16#10FFFF, State#encoder.utf8 ->
+ [xmerl_ucs:to_utf8(C) | Acc];
+ C when C >= 16#7f, C =< 16#10FFFF, not State#encoder.utf8 ->
+ [unihex(C) | Acc];
+ C when C < 16#7f ->
+ [C | Acc];
+ _ ->
+ exit({json_encode, {bad_char, C}})
+ end,
+ json_encode_string_unicode(Cs, State, Acc1).
+
+hexdigit(C) when C >= 0, C =< 9 ->
+ C + $0;
+hexdigit(C) when C =< 15 ->
+ C + $a - 10.
+
+unihex(C) when C < 16#10000 ->
+ <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
+ Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
+ [$\\, $u | Digits];
+unihex(C) when C =< 16#10FFFF ->
+ N = C - 16#10000,
+ S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
+ S2 = 16#dc00 bor (N band 16#3ff),
+ [unihex(S1), unihex(S2)].
+
+json_decode(L, S) when is_list(L) ->
+ json_decode(iolist_to_binary(L), S);
+json_decode(B, S) ->
+ {Res, S1} = decode1(B, S),
+ {eof, _} = tokenize(B, S1#decoder{state=trim}),
+ Res.
+
+decode1(B, S=#decoder{state=null}) ->
+ case tokenize(B, S#decoder{state=any}) of
+ {{const, C}, S1} ->
+ {C, S1};
+ {start_array, S1} ->
+ decode_array(B, S1);
+ {start_object, S1} ->
+ decode_object(B, S1)
+ end.
+
+make_object(V, #decoder{object_hook=null}) ->
+ V;
+make_object(V, #decoder{object_hook=Hook}) ->
+ Hook(V).
+
+decode_object(B, S) ->
+ decode_object(B, S#decoder{state=key}, []).
+
+decode_object(B, S=#decoder{state=key}, Acc) ->
+ case tokenize(B, S) of
+ {end_object, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, S1#decoder{state=null}};
+ {{const, K}, S1} ->
+ {colon, S2} = tokenize(B, S1),
+ {V, S3} = decode1(B, S2#decoder{state=null}),
+ decode_object(B, S3#decoder{state=comma}, [{K, V} | Acc])
+ end;
+decode_object(B, S=#decoder{state=comma}, Acc) ->
+ case tokenize(B, S) of
+ {end_object, S1} ->
+ V = make_object({struct, lists:reverse(Acc)}, S1),
+ {V, S1#decoder{state=null}};
+ {comma, S1} ->
+ decode_object(B, S1#decoder{state=key}, Acc)
+ end.
+
+decode_array(B, S) ->
+ decode_array(B, S#decoder{state=any}, []).
+
+decode_array(B, S=#decoder{state=any}, Acc) ->
+ case tokenize(B, S) of
+ {end_array, S1} ->
+ {lists:reverse(Acc), S1#decoder{state=null}};
+ {start_array, S1} ->
+ {Array, S2} = decode_array(B, S1),
+ decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
+ {start_object, S1} ->
+ {Array, S2} = decode_object(B, S1),
+ decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
+ {{const, Const}, S1} ->
+ decode_array(B, S1#decoder{state=comma}, [Const | Acc])
+ end;
+decode_array(B, S=#decoder{state=comma}, Acc) ->
+ case tokenize(B, S) of
+ {end_array, S1} ->
+ {lists:reverse(Acc), S1#decoder{state=null}};
+ {comma, S1} ->
+ decode_array(B, S1#decoder{state=any}, Acc)
+ end.
+
+tokenize_string(B, S=#decoder{offset=O}) ->
+ case tokenize_string_fast(B, O) of
+ {escape, O1} ->
+ Length = O1 - O,
+ S1 = ?ADV_COL(S, Length),
+ <<_:O/binary, Head:Length/binary, _/binary>> = B,
+ tokenize_string(B, S1, lists:reverse(binary_to_list(Head)));
+ O1 ->
+ Length = O1 - O,
+ <<_:O/binary, String:Length/binary, ?Q, _/binary>> = B,
+ {{const, String}, ?ADV_COL(S, Length + 1)}
+ end.
+
+tokenize_string_fast(B, O) ->
+ case B of
+ <<_:O/binary, ?Q, _/binary>> ->
+ O;
+ <<_:O/binary, $\\, _/binary>> ->
+ {escape, O};
+ <<_:O/binary, C1, _/binary>> when C1 < 128 ->
+ tokenize_string_fast(B, 1 + O);
+ <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
+ C2 >= 128, C2 =< 191 ->
+ tokenize_string_fast(B, 2 + O);
+ <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191 ->
+ tokenize_string_fast(B, 3 + O);
+ <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191,
+ C4 >= 128, C4 =< 191 ->
+ tokenize_string_fast(B, 4 + O);
+ _ ->
+ throw(invalid_utf8)
+ end.
+
+tokenize_string(B, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, ?Q, _/binary>> ->
+ {{const, iolist_to_binary(lists:reverse(Acc))}, ?INC_COL(S)};
+ <<_:O/binary, "\\\"", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\" | Acc]);
+ <<_:O/binary, "\\\\", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\\ | Acc]);
+ <<_:O/binary, "\\/", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$/ | Acc]);
+ <<_:O/binary, "\\b", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\b | Acc]);
+ <<_:O/binary, "\\f", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\f | Acc]);
+ <<_:O/binary, "\\n", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\n | Acc]);
+ <<_:O/binary, "\\r", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\r | Acc]);
+ <<_:O/binary, "\\t", _/binary>> ->
+ tokenize_string(B, ?ADV_COL(S, 2), [$\t | Acc]);
+ <<_:O/binary, "\\u", C3, C2, C1, C0, Rest/binary>> ->
+ C = erlang:list_to_integer([C3, C2, C1, C0], 16),
+ if C > 16#D7FF, C < 16#DC00 ->
+ %% coalesce UTF-16 surrogate pair
+ <<"\\u", D3, D2, D1, D0, _/binary>> = Rest,
+ D = erlang:list_to_integer([D3,D2,D1,D0], 16),
+ [CodePoint] = xmerl_ucs:from_utf16be(<<C:16/big-unsigned-integer,
+ D:16/big-unsigned-integer>>),
+ Acc1 = lists:reverse(xmerl_ucs:to_utf8(CodePoint), Acc),
+ tokenize_string(B, ?ADV_COL(S, 12), Acc1);
+ true ->
+ Acc1 = lists:reverse(xmerl_ucs:to_utf8(C), Acc),
+ tokenize_string(B, ?ADV_COL(S, 6), Acc1)
+ end;
+ <<_:O/binary, C1, _/binary>> when C1 < 128 ->
+ tokenize_string(B, ?INC_CHAR(S, C1), [C1 | Acc]);
+ <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
+ C2 >= 128, C2 =< 191 ->
+ tokenize_string(B, ?ADV_COL(S, 2), [C2, C1 | Acc]);
+ <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191 ->
+ tokenize_string(B, ?ADV_COL(S, 3), [C3, C2, C1 | Acc]);
+ <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
+ C2 >= 128, C2 =< 191,
+ C3 >= 128, C3 =< 191,
+ C4 >= 128, C4 =< 191 ->
+ tokenize_string(B, ?ADV_COL(S, 4), [C4, C3, C2, C1 | Acc]);
+ _ ->
+ throw(invalid_utf8)
+ end.
+
+tokenize_number(B, S) ->
+ case tokenize_number(B, sign, S, []) of
+ {{int, Int}, S1} ->
+ {{const, list_to_integer(Int)}, S1};
+ {{float, Float}, S1} ->
+ {{const, list_to_float(Float)}, S1}
+ end.
+
+tokenize_number(B, sign, S=#decoder{offset=O}, []) ->
+ case B of
+ <<_:O/binary, $-, _/binary>> ->
+ tokenize_number(B, int, ?INC_COL(S), [$-]);
+ _ ->
+ tokenize_number(B, int, S, [])
+ end;
+tokenize_number(B, int, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, $0, _/binary>> ->
+ tokenize_number(B, frac, ?INC_COL(S), [$0 | Acc]);
+ <<_:O/binary, C, _/binary>> when C >= $1 andalso C =< $9 ->
+ tokenize_number(B, int1, ?INC_COL(S), [C | Acc])
+ end;
+tokenize_number(B, int1, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, int1, ?INC_COL(S), [C | Acc]);
+ _ ->
+ tokenize_number(B, frac, S, Acc)
+ end;
+tokenize_number(B, frac, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, $., C, _/binary>> when C >= $0, C =< $9 ->
+ tokenize_number(B, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
+ <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
+ tokenize_number(B, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
+ _ ->
+ {{int, lists:reverse(Acc)}, S}
+ end;
+tokenize_number(B, frac1, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, frac1, ?INC_COL(S), [C | Acc]);
+ <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
+ tokenize_number(B, esign, ?INC_COL(S), [$e | Acc]);
+ _ ->
+ {{float, lists:reverse(Acc)}, S}
+ end;
+tokenize_number(B, esign, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C =:= $- orelse C=:= $+ ->
+ tokenize_number(B, eint, ?INC_COL(S), [C | Acc]);
+ _ ->
+ tokenize_number(B, eint, S, Acc)
+ end;
+tokenize_number(B, eint, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, eint1, ?INC_COL(S), [C | Acc])
+ end;
+tokenize_number(B, eint1, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
+ tokenize_number(B, eint1, ?INC_COL(S), [C | Acc]);
+ _ ->
+ {{float, lists:reverse(Acc)}, S}
+ end.
+
+tokenize(B, S=#decoder{offset=O}) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ tokenize(B, ?INC_CHAR(S, C));
+ <<_:O/binary, "{", _/binary>> ->
+ {start_object, ?INC_COL(S)};
+ <<_:O/binary, "}", _/binary>> ->
+ {end_object, ?INC_COL(S)};
+ <<_:O/binary, "[", _/binary>> ->
+ {start_array, ?INC_COL(S)};
+ <<_:O/binary, "]", _/binary>> ->
+ {end_array, ?INC_COL(S)};
+ <<_:O/binary, ",", _/binary>> ->
+ {comma, ?INC_COL(S)};
+ <<_:O/binary, ":", _/binary>> ->
+ {colon, ?INC_COL(S)};
+ <<_:O/binary, "null", _/binary>> ->
+ {{const, null}, ?ADV_COL(S, 4)};
+ <<_:O/binary, "true", _/binary>> ->
+ {{const, true}, ?ADV_COL(S, 4)};
+ <<_:O/binary, "false", _/binary>> ->
+ {{const, false}, ?ADV_COL(S, 5)};
+ <<_:O/binary, "\"", _/binary>> ->
+ tokenize_string(B, ?INC_COL(S));
+ <<_:O/binary, C, _/binary>> when (C >= $0 andalso C =< $9)
+ orelse C =:= $- ->
+ tokenize_number(B, S);
+ <<_:O/binary>> ->
+ trim = S#decoder.state,
+ {eof, S}
+ end.
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+
+%% testing constructs borrowed from the Yaws JSON implementation.
+
+%% Create an object from a list of Key/Value pairs.
+
+obj_new() ->
+ {struct, []}.
+
+is_obj({struct, Props}) ->
+ F = fun ({K, _}) when is_binary(K) -> true end,
+ lists:all(F, Props).
+
+obj_from_list(Props) ->
+ Obj = {struct, Props},
+ ?assert(is_obj(Obj)),
+ Obj.
+
+%% Test for equivalence of Erlang terms.
+%% Due to arbitrary order of construction, equivalent objects might
+%% compare unequal as erlang terms, so we need to carefully recurse
+%% through aggregates (tuples and objects).
+
+equiv({struct, Props1}, {struct, Props2}) ->
+ equiv_object(Props1, Props2);
+equiv(L1, L2) when is_list(L1), is_list(L2) ->
+ equiv_list(L1, L2);
+equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
+equiv(B1, B2) when is_binary(B1), is_binary(B2) -> B1 == B2;
+equiv(A, A) when A =:= true orelse A =:= false orelse A =:= null -> true.
+
+%% Object representation and traversal order is unknown.
+%% Use the sledgehammer and sort property lists.
+
+equiv_object(Props1, Props2) ->
+ L1 = lists:keysort(1, Props1),
+ L2 = lists:keysort(1, Props2),
+ Pairs = lists:zip(L1, L2),
+ true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
+ equiv(K1, K2) and equiv(V1, V2)
+ end, Pairs).
+
+%% Recursively compare tuple elements for equivalence.
+
+equiv_list([], []) ->
+ true;
+equiv_list([V1 | L1], [V2 | L2]) ->
+ equiv(V1, V2) andalso equiv_list(L1, L2).
+
+decode_test() ->
+ [1199344435545.0, 1] = decode(<<"[1199344435545.0,1]">>),
+ <<16#F0,16#9D,16#9C,16#95>> = decode([34,"\\ud835","\\udf15",34]).
+
+e2j_vec_test() ->
+ test_one(e2j_test_vec(utf8), 1).
+
+test_one([], _N) ->
+ %% io:format("~p tests passed~n", [N-1]),
+ ok;
+test_one([{E, J} | Rest], N) ->
+ %% io:format("[~p] ~p ~p~n", [N, E, J]),
+ true = equiv(E, decode(J)),
+ true = equiv(E, decode(encode(E))),
+ test_one(Rest, 1+N).
+
+e2j_test_vec(utf8) ->
+ [
+ {1, "1"},
+ {3.1416, "3.14160"}, %% text representation may truncate, trail zeroes
+ {-1, "-1"},
+ {-3.1416, "-3.14160"},
+ {12.0e10, "1.20000e+11"},
+ {1.234E+10, "1.23400e+10"},
+ {-1.234E-10, "-1.23400e-10"},
+ {10.0, "1.0e+01"},
+ {123.456, "1.23456E+2"},
+ {10.0, "1e1"},
+ {<<"foo">>, "\"foo\""},
+ {<<"foo", 5, "bar">>, "\"foo\\u0005bar\""},
+ {<<"">>, "\"\""},
+ {<<"\n\n\n">>, "\"\\n\\n\\n\""},
+ {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
+ {obj_new(), "{}"},
+ {obj_from_list([{<<"foo">>, <<"bar">>}]), "{\"foo\":\"bar\"}"},
+ {obj_from_list([{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]),
+ "{\"foo\":\"bar\",\"baz\":123}"},
+ {[], "[]"},
+ {[[]], "[[]]"},
+ {[1, <<"foo">>], "[1,\"foo\"]"},
+
+ %% json array in a json object
+ {obj_from_list([{<<"foo">>, [123]}]),
+ "{\"foo\":[123]}"},
+
+ %% json object in a json object
+ {obj_from_list([{<<"foo">>, obj_from_list([{<<"bar">>, true}])}]),
+ "{\"foo\":{\"bar\":true}}"},
+
+ %% fold evaluation order
+ {obj_from_list([{<<"foo">>, []},
+ {<<"bar">>, obj_from_list([{<<"baz">>, true}])},
+ {<<"alice">>, <<"bob">>}]),
+ "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
+
+ %% json object in a json array
+ {[-123, <<"foo">>, obj_from_list([{<<"bar">>, []}]), null],
+ "[-123,\"foo\",{\"bar\":[]},null]"}
+ ].
+
+%% test utf8 encoding
+encoder_utf8_test() ->
+ %% safe conversion case (default)
+ [34,"\\u0001","\\u0442","\\u0435","\\u0441","\\u0442",34] =
+ encode(<<1,"\321\202\320\265\321\201\321\202">>),
+
+ %% raw utf8 output (optional)
+ Enc = mochijson2:encoder([{utf8, true}]),
+ [34,"\\u0001",[209,130],[208,181],[209,129],[209,130],34] =
+ Enc(<<1,"\321\202\320\265\321\201\321\202">>).
+
+input_validation_test() ->
+ Good = [
+ {16#00A3, <<?Q, 16#C2, 16#A3, ?Q>>}, %% pound
+ {16#20AC, <<?Q, 16#E2, 16#82, 16#AC, ?Q>>}, %% euro
+ {16#10196, <<?Q, 16#F0, 16#90, 16#86, 16#96, ?Q>>} %% denarius
+ ],
+ lists:foreach(fun({CodePoint, UTF8}) ->
+ Expect = list_to_binary(xmerl_ucs:to_utf8(CodePoint)),
+ Expect = decode(UTF8)
+ end, Good),
+
+ Bad = [
+ %% 2nd, 3rd, or 4th byte of a multi-byte sequence w/o leading byte
+ <<?Q, 16#80, ?Q>>,
+ %% missing continuations, last byte in each should be 80-BF
+ <<?Q, 16#C2, 16#7F, ?Q>>,
+ <<?Q, 16#E0, 16#80,16#7F, ?Q>>,
+ <<?Q, 16#F0, 16#80, 16#80, 16#7F, ?Q>>,
+ %% we don't support code points > 10FFFF per RFC 3629
+ <<?Q, 16#F5, 16#80, 16#80, 16#80, ?Q>>,
+ %% escape characters trigger a different code path
+ <<?Q, $\\, $\n, 16#80, ?Q>>
+ ],
+ lists:foreach(
+ fun(X) ->
+ ok = try decode(X) catch invalid_utf8 -> ok end,
+ %% could be {ucs,{bad_utf8_character_code}} or
+ %% {json_encode,{bad_char,_}}
+ {'EXIT', _} = (catch encode(X))
+ end, Bad).
+
+inline_json_test() ->
+ ?assertEqual(<<"\"iodata iodata\"">>,
+ iolist_to_binary(
+ encode({json, [<<"\"iodata">>, " iodata\""]}))),
+ ?assertEqual({struct, [{<<"key">>, <<"iodata iodata">>}]},
+ decode(
+ encode({struct,
+ [{key, {json, [<<"\"iodata">>, " iodata\""]}}]}))),
+ ok.
+
+big_unicode_test() ->
+ UTF8Seq = list_to_binary(xmerl_ucs:to_utf8(16#0001d120)),
+ ?assertEqual(
+ <<"\"\\ud834\\udd20\"">>,
+ iolist_to_binary(encode(UTF8Seq))),
+ ?assertEqual(
+ UTF8Seq,
+ decode(iolist_to_binary(encode(UTF8Seq)))),
+ ok.
+
+custom_decoder_test() ->
+ ?assertEqual(
+ {struct, [{<<"key">>, <<"value">>}]},
+ (decoder([]))("{\"key\": \"value\"}")),
+ F = fun ({struct, [{<<"key">>, <<"value">>}]}) -> win end,
+ ?assertEqual(
+ win,
+ (decoder([{object_hook, F}]))("{\"key\": \"value\"}")),
+ ok.
+
+atom_test() ->
+ %% JSON native atoms
+ [begin
+ ?assertEqual(A, decode(atom_to_list(A))),
+ ?assertEqual(iolist_to_binary(atom_to_list(A)),
+ iolist_to_binary(encode(A)))
+ end || A <- [true, false, null]],
+ %% Atom to string
+ ?assertEqual(
+ <<"\"foo\"">>,
+ iolist_to_binary(encode(foo))),
+ ?assertEqual(
+ <<"\"\\ud834\\udd20\"">>,
+ iolist_to_binary(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))),
+ ok.
+
+key_encode_test() ->
+ %% Some forms are accepted as keys that would not be strings in other
+ %% cases
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode({struct, [{foo, 1}]}))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode({struct, [{<<"foo">>, 1}]}))),
+ ?assertEqual(
+ <<"{\"foo\":1}">>,
+ iolist_to_binary(encode({struct, [{"foo", 1}]}))),
+ ?assertEqual(
+ <<"{\"\\ud834\\udd20\":1}">>,
+ iolist_to_binary(
+ encode({struct, [{[16#0001d120], 1}]}))),
+ ?assertEqual(
+ <<"{\"1\":1}">>,
+ iolist_to_binary(encode({struct, [{1, 1}]}))),
+ ok.
+
+unsafe_chars_test() ->
+ Chars = "\"\\\b\f\n\r\t",
+ [begin
+ ?assertEqual(false, json_string_is_safe([C])),
+ ?assertEqual(false, json_bin_is_safe(<<C>>)),
+ ?assertEqual(<<C>>, decode(encode(<<C>>)))
+ end || C <- Chars],
+ ?assertEqual(
+ false,
+ json_string_is_safe([16#0001d120])),
+ ?assertEqual(
+ false,
+ json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8(16#0001d120)))),
+ ?assertEqual(
+ [16#0001d120],
+ xmerl_ucs:from_utf8(
+ binary_to_list(
+ decode(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))))),
+ ?assertEqual(
+ false,
+ json_string_is_safe([16#110000])),
+ ?assertEqual(
+ false,
+ json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8([16#110000])))),
+ %% solidus can be escaped but isn't unsafe by default
+ ?assertEqual(
+ <<"/">>,
+ decode(<<"\"\\/\"">>)),
+ ok.
+
+int_test() ->
+ ?assertEqual(0, decode("0")),
+ ?assertEqual(1, decode("1")),
+ ?assertEqual(11, decode("11")),
+ ok.
+
+large_int_test() ->
+ ?assertEqual(<<"-2147483649214748364921474836492147483649">>,
+ iolist_to_binary(encode(-2147483649214748364921474836492147483649))),
+ ?assertEqual(<<"2147483649214748364921474836492147483649">>,
+ iolist_to_binary(encode(2147483649214748364921474836492147483649))),
+ ok.
+
+float_test() ->
+ ?assertEqual(<<"-2147483649.0">>, iolist_to_binary(encode(-2147483649.0))),
+ ?assertEqual(<<"2147483648.0">>, iolist_to_binary(encode(2147483648.0))),
+ ok.
+
+handler_test() ->
+ ?assertEqual(
+ {'EXIT',{json_encode,{bad_term,{}}}},
+ catch encode({})),
+ F = fun ({}) -> [] end,
+ ?assertEqual(
+ <<"[]">>,
+ iolist_to_binary((encoder([{handler, F}]))({}))),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochilists.erl b/deps/mochiweb/src/mochilists.erl
new file mode 100644
index 00000000..8981e7b6
--- /dev/null
+++ b/deps/mochiweb/src/mochilists.erl
@@ -0,0 +1,104 @@
+%% @copyright Copyright (c) 2010 Mochi Media, Inc.
+%% @author David Reid <dreid@mochimedia.com>
+
+%% @doc Utility functions for dealing with proplists.
+
+-module(mochilists).
+-author("David Reid <dreid@mochimedia.com>").
+-export([get_value/2, get_value/3, is_defined/2, set_default/2, set_defaults/2]).
+
+%% @spec set_default({Key::term(), Value::term()}, Proplist::list()) -> list()
+%%
+%% @doc Return new Proplist with {Key, Value} set if not is_defined(Key, Proplist).
+set_default({Key, Value}, Proplist) ->
+ case is_defined(Key, Proplist) of
+ true ->
+ Proplist;
+ false ->
+ [{Key, Value} | Proplist]
+ end.
+
+%% @spec set_defaults([{Key::term(), Value::term()}], Proplist::list()) -> list()
+%%
+%% @doc Return new Proplist with {Key, Value} set if not is_defined(Key, Proplist).
+set_defaults(DefaultProps, Proplist) ->
+ lists:foldl(fun set_default/2, Proplist, DefaultProps).
+
+
+%% @spec is_defined(Key::term(), Proplist::list()) -> bool()
+%%
+%% @doc Returns true if Propist contains at least one entry associated
+%% with Key, otherwise false is returned.
+is_defined(Key, Proplist) ->
+ lists:keyfind(Key, 1, Proplist) =/= false.
+
+
+%% @spec get_value(Key::term(), Proplist::list()) -> term() | undefined
+%%
+%% @doc Return the value of <code>Key</code> or undefined
+get_value(Key, Proplist) ->
+ get_value(Key, Proplist, undefined).
+
+%% @spec get_value(Key::term(), Proplist::list(), Default::term()) -> term()
+%%
+%% @doc Return the value of <code>Key</code> or <code>Default</code>
+get_value(_Key, [], Default) ->
+ Default;
+get_value(Key, Proplist, Default) ->
+ case lists:keyfind(Key, 1, Proplist) of
+ false ->
+ Default;
+ {Key, Value} ->
+ Value
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+set_defaults_test() ->
+ ?assertEqual(
+ [{k, v}],
+ set_defaults([{k, v}], [])),
+ ?assertEqual(
+ [{k, v}],
+ set_defaults([{k, vee}], [{k, v}])),
+ ?assertEqual(
+ lists:sort([{kay, vee}, {k, v}]),
+ lists:sort(set_defaults([{k, vee}, {kay, vee}], [{k, v}]))),
+ ok.
+
+set_default_test() ->
+ ?assertEqual(
+ [{k, v}],
+ set_default({k, v}, [])),
+ ?assertEqual(
+ [{k, v}],
+ set_default({k, vee}, [{k, v}])),
+ ok.
+
+get_value_test() ->
+ ?assertEqual(
+ undefined,
+ get_value(foo, [])),
+ ?assertEqual(
+ undefined,
+ get_value(foo, [{bar, baz}])),
+ ?assertEqual(
+ bar,
+ get_value(foo, [{foo, bar}])),
+ ?assertEqual(
+ default,
+ get_value(foo, [], default)),
+ ?assertEqual(
+ default,
+ get_value(foo, [{bar, baz}], default)),
+ ?assertEqual(
+ bar,
+ get_value(foo, [{foo, bar}], default)),
+ ok.
+
+-endif.
+
diff --git a/deps/mochiweb/src/mochilogfile2.erl b/deps/mochiweb/src/mochilogfile2.erl
new file mode 100644
index 00000000..c34ee73a
--- /dev/null
+++ b/deps/mochiweb/src/mochilogfile2.erl
@@ -0,0 +1,140 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc Write newline delimited log files, ensuring that if a truncated
+%% entry is found on log open then it is fixed before writing. Uses
+%% delayed writes and raw files for performance.
+-module(mochilogfile2).
+-author('bob@mochimedia.com').
+
+-export([open/1, write/2, close/1, name/1]).
+
+%% @spec open(Name) -> Handle
+%% @doc Open the log file Name, creating or appending as necessary. All data
+%% at the end of the file will be truncated until a newline is found, to
+%% ensure that all records are complete.
+open(Name) ->
+ {ok, FD} = file:open(Name, [raw, read, write, delayed_write, binary]),
+ fix_log(FD),
+ {?MODULE, Name, FD}.
+
+%% @spec name(Handle) -> string()
+%% @doc Return the path of the log file.
+name({?MODULE, Name, _FD}) ->
+ Name.
+
+%% @spec write(Handle, IoData) -> ok
+%% @doc Write IoData to the log file referenced by Handle.
+write({?MODULE, _Name, FD}, IoData) ->
+ ok = file:write(FD, [IoData, $\n]),
+ ok.
+
+%% @spec close(Handle) -> ok
+%% @doc Close the log file referenced by Handle.
+close({?MODULE, _Name, FD}) ->
+ ok = file:sync(FD),
+ ok = file:close(FD),
+ ok.
+
+fix_log(FD) ->
+ {ok, Location} = file:position(FD, eof),
+ Seek = find_last_newline(FD, Location),
+ {ok, Seek} = file:position(FD, Seek),
+ ok = file:truncate(FD),
+ ok.
+
+%% Seek backwards to the last valid log entry
+find_last_newline(_FD, N) when N =< 1 ->
+ 0;
+find_last_newline(FD, Location) ->
+ case file:pread(FD, Location - 1, 1) of
+ {ok, <<$\n>>} ->
+ Location;
+ {ok, _} ->
+ find_last_newline(FD, Location - 1)
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+name_test() ->
+ D = mochitemp:mkdtemp(),
+ FileName = filename:join(D, "open_close_test.log"),
+ H = open(FileName),
+ ?assertEqual(
+ FileName,
+ name(H)),
+ close(H),
+ file:delete(FileName),
+ file:del_dir(D),
+ ok.
+
+open_close_test() ->
+ D = mochitemp:mkdtemp(),
+ FileName = filename:join(D, "open_close_test.log"),
+ OpenClose = fun () ->
+ H = open(FileName),
+ ?assertEqual(
+ true,
+ filelib:is_file(FileName)),
+ ok = close(H),
+ ?assertEqual(
+ {ok, <<>>},
+ file:read_file(FileName)),
+ ok
+ end,
+ OpenClose(),
+ OpenClose(),
+ file:delete(FileName),
+ file:del_dir(D),
+ ok.
+
+write_test() ->
+ D = mochitemp:mkdtemp(),
+ FileName = filename:join(D, "write_test.log"),
+ F = fun () ->
+ H = open(FileName),
+ write(H, "test line"),
+ close(H),
+ ok
+ end,
+ F(),
+ ?assertEqual(
+ {ok, <<"test line\n">>},
+ file:read_file(FileName)),
+ F(),
+ ?assertEqual(
+ {ok, <<"test line\ntest line\n">>},
+ file:read_file(FileName)),
+ file:delete(FileName),
+ file:del_dir(D),
+ ok.
+
+fix_log_test() ->
+ D = mochitemp:mkdtemp(),
+ FileName = filename:join(D, "write_test.log"),
+ file:write_file(FileName, <<"first line good\nsecond line bad">>),
+ F = fun () ->
+ H = open(FileName),
+ write(H, "test line"),
+ close(H),
+ ok
+ end,
+ F(),
+ ?assertEqual(
+ {ok, <<"first line good\ntest line\n">>},
+ file:read_file(FileName)),
+ file:write_file(FileName, <<"first line bad">>),
+ F(),
+ ?assertEqual(
+ {ok, <<"test line\n">>},
+ file:read_file(FileName)),
+ F(),
+ ?assertEqual(
+ {ok, <<"test line\ntest line\n">>},
+ file:read_file(FileName)),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochinum.erl b/deps/mochiweb/src/mochinum.erl
new file mode 100644
index 00000000..a7e2bfbc
--- /dev/null
+++ b/deps/mochiweb/src/mochinum.erl
@@ -0,0 +1,331 @@
+%% @copyright 2007 Mochi Media, Inc.
+%% @author Bob Ippolito <bob@mochimedia.com>
+
+%% @doc Useful numeric algorithms for floats that cover some deficiencies
+%% in the math module. More interesting is digits/1, which implements
+%% the algorithm from:
+%% http://www.cs.indiana.edu/~burger/fp/index.html
+%% See also "Printing Floating-Point Numbers Quickly and Accurately"
+%% in Proceedings of the SIGPLAN '96 Conference on Programming Language
+%% Design and Implementation.
+
+-module(mochinum).
+-author("Bob Ippolito <bob@mochimedia.com>").
+-export([digits/1, frexp/1, int_pow/2, int_ceil/1]).
+
+%% IEEE 754 Float exponent bias
+-define(FLOAT_BIAS, 1022).
+-define(MIN_EXP, -1074).
+-define(BIG_POW, 4503599627370496).
+
+%% External API
+
+%% @spec digits(number()) -> string()
+%% @doc Returns a string that accurately represents the given integer or float
+%% using a conservative amount of digits. Great for generating
+%% human-readable output, or compact ASCII serializations for floats.
+digits(N) when is_integer(N) ->
+ integer_to_list(N);
+digits(0.0) ->
+ "0.0";
+digits(Float) ->
+ {Frac, Exp} = frexp(Float),
+ Exp1 = Exp - 53,
+ Frac1 = trunc(abs(Frac) * (1 bsl 53)),
+ [Place | Digits] = digits1(Float, Exp1, Frac1),
+ R = insert_decimal(Place, [$0 + D || D <- Digits]),
+ case Float < 0 of
+ true ->
+ [$- | R];
+ _ ->
+ R
+ end.
+
+%% @spec frexp(F::float()) -> {Frac::float(), Exp::float()}
+%% @doc Return the fractional and exponent part of an IEEE 754 double,
+%% equivalent to the libc function of the same name.
+%% F = Frac * pow(2, Exp).
+frexp(F) ->
+ frexp1(unpack(F)).
+
+%% @spec int_pow(X::integer(), N::integer()) -> Y::integer()
+%% @doc Moderately efficient way to exponentiate integers.
+%% int_pow(10, 2) = 100.
+int_pow(_X, 0) ->
+ 1;
+int_pow(X, N) when N > 0 ->
+ int_pow(X, N, 1).
+
+%% @spec int_ceil(F::float()) -> integer()
+%% @doc Return the ceiling of F as an integer. The ceiling is defined as
+%% F when F == trunc(F);
+%% trunc(F) when F &lt; 0;
+%% trunc(F) + 1 when F &gt; 0.
+int_ceil(X) ->
+ T = trunc(X),
+ case (X - T) of
+ Neg when Neg < 0 -> T;
+ Pos when Pos > 0 -> T + 1;
+ _ -> T
+ end.
+
+
+%% Internal API
+
+int_pow(X, N, R) when N < 2 ->
+ R * X;
+int_pow(X, N, R) ->
+ int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
+
+insert_decimal(0, S) ->
+ "0." ++ S;
+insert_decimal(Place, S) when Place > 0 ->
+ L = length(S),
+ case Place - L of
+ 0 ->
+ S ++ ".0";
+ N when N < 0 ->
+ {S0, S1} = lists:split(L + N, S),
+ S0 ++ "." ++ S1;
+ N when N < 6 ->
+ %% More places than digits
+ S ++ lists:duplicate(N, $0) ++ ".0";
+ _ ->
+ insert_decimal_exp(Place, S)
+ end;
+insert_decimal(Place, S) when Place > -6 ->
+ "0." ++ lists:duplicate(abs(Place), $0) ++ S;
+insert_decimal(Place, S) ->
+ insert_decimal_exp(Place, S).
+
+insert_decimal_exp(Place, S) ->
+ [C | S0] = S,
+ S1 = case S0 of
+ [] ->
+ "0";
+ _ ->
+ S0
+ end,
+ Exp = case Place < 0 of
+ true ->
+ "e-";
+ false ->
+ "e+"
+ end,
+ [C] ++ "." ++ S1 ++ Exp ++ integer_to_list(abs(Place - 1)).
+
+
+digits1(Float, Exp, Frac) ->
+ Round = ((Frac band 1) =:= 0),
+ case Exp >= 0 of
+ true ->
+ BExp = 1 bsl Exp,
+ case (Frac =/= ?BIG_POW) of
+ true ->
+ scale((Frac * BExp * 2), 2, BExp, BExp,
+ Round, Round, Float);
+ false ->
+ scale((Frac * BExp * 4), 4, (BExp * 2), BExp,
+ Round, Round, Float)
+ end;
+ false ->
+ case (Exp =:= ?MIN_EXP) orelse (Frac =/= ?BIG_POW) of
+ true ->
+ scale((Frac * 2), 1 bsl (1 - Exp), 1, 1,
+ Round, Round, Float);
+ false ->
+ scale((Frac * 4), 1 bsl (2 - Exp), 2, 1,
+ Round, Round, Float)
+ end
+ end.
+
+scale(R, S, MPlus, MMinus, LowOk, HighOk, Float) ->
+ Est = int_ceil(math:log10(abs(Float)) - 1.0e-10),
+ %% Note that the scheme implementation uses a 326 element look-up table
+ %% for int_pow(10, N) where we do not.
+ case Est >= 0 of
+ true ->
+ fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est,
+ LowOk, HighOk);
+ false ->
+ Scale = int_pow(10, -Est),
+ fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est,
+ LowOk, HighOk)
+ end.
+
+fixup(R, S, MPlus, MMinus, K, LowOk, HighOk) ->
+ TooLow = case HighOk of
+ true ->
+ (R + MPlus) >= S;
+ false ->
+ (R + MPlus) > S
+ end,
+ case TooLow of
+ true ->
+ [(K + 1) | generate(R, S, MPlus, MMinus, LowOk, HighOk)];
+ false ->
+ [K | generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)]
+ end.
+
+generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
+ D = R0 div S,
+ R = R0 rem S,
+ TC1 = case LowOk of
+ true ->
+ R =< MMinus;
+ false ->
+ R < MMinus
+ end,
+ TC2 = case HighOk of
+ true ->
+ (R + MPlus) >= S;
+ false ->
+ (R + MPlus) > S
+ end,
+ case TC1 of
+ false ->
+ case TC2 of
+ false ->
+ [D | generate(R * 10, S, MPlus * 10, MMinus * 10,
+ LowOk, HighOk)];
+ true ->
+ [D + 1]
+ end;
+ true ->
+ case TC2 of
+ false ->
+ [D];
+ true ->
+ case R * 2 < S of
+ true ->
+ [D];
+ false ->
+ [D + 1]
+ end
+ end
+ end.
+
+unpack(Float) ->
+ <<Sign:1, Exp:11, Frac:52>> = <<Float:64/float>>,
+ {Sign, Exp, Frac}.
+
+frexp1({_Sign, 0, 0}) ->
+ {0.0, 0};
+frexp1({Sign, 0, Frac}) ->
+ Exp = log2floor(Frac),
+ <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, (Frac-1):52>>,
+ {Frac1, -(?FLOAT_BIAS) - 52 + Exp};
+frexp1({Sign, Exp, Frac}) ->
+ <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, Frac:52>>,
+ {Frac1, Exp - ?FLOAT_BIAS}.
+
+log2floor(Int) ->
+ log2floor(Int, 0).
+
+log2floor(0, N) ->
+ N;
+log2floor(Int, N) ->
+ log2floor(Int bsr 1, 1 + N).
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+int_ceil_test() ->
+ 1 = int_ceil(0.0001),
+ 0 = int_ceil(0.0),
+ 1 = int_ceil(0.99),
+ 1 = int_ceil(1.0),
+ -1 = int_ceil(-1.5),
+ -2 = int_ceil(-2.0),
+ ok.
+
+int_pow_test() ->
+ 1 = int_pow(1, 1),
+ 1 = int_pow(1, 0),
+ 1 = int_pow(10, 0),
+ 10 = int_pow(10, 1),
+ 100 = int_pow(10, 2),
+ 1000 = int_pow(10, 3),
+ ok.
+
+digits_test() ->
+ ?assertEqual("0",
+ digits(0)),
+ ?assertEqual("0.0",
+ digits(0.0)),
+ ?assertEqual("1.0",
+ digits(1.0)),
+ ?assertEqual("-1.0",
+ digits(-1.0)),
+ ?assertEqual("0.1",
+ digits(0.1)),
+ ?assertEqual("0.01",
+ digits(0.01)),
+ ?assertEqual("0.001",
+ digits(0.001)),
+ ?assertEqual("1.0e+6",
+ digits(1000000.0)),
+ ?assertEqual("0.5",
+ digits(0.5)),
+ ?assertEqual("4503599627370496.0",
+ digits(4503599627370496.0)),
+ %% small denormalized number
+ %% 4.94065645841246544177e-324
+ <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+ ?assertEqual("4.9406564584124654e-324",
+ digits(SmallDenorm)),
+ ?assertEqual(SmallDenorm,
+ list_to_float(digits(SmallDenorm))),
+ %% large denormalized number
+ %% 2.22507385850720088902e-308
+ <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+ ?assertEqual("2.225073858507201e-308",
+ digits(BigDenorm)),
+ ?assertEqual(BigDenorm,
+ list_to_float(digits(BigDenorm))),
+ %% small normalized number
+ %% 2.22507385850720138309e-308
+ <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+ ?assertEqual("2.2250738585072014e-308",
+ digits(SmallNorm)),
+ ?assertEqual(SmallNorm,
+ list_to_float(digits(SmallNorm))),
+ %% large normalized number
+ %% 1.79769313486231570815e+308
+ <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+ ?assertEqual("1.7976931348623157e+308",
+ digits(LargeNorm)),
+ ?assertEqual(LargeNorm,
+ list_to_float(digits(LargeNorm))),
+ ok.
+
+frexp_test() ->
+ %% zero
+ {0.0, 0} = frexp(0.0),
+ %% one
+ {0.5, 1} = frexp(1.0),
+ %% negative one
+ {-0.5, 1} = frexp(-1.0),
+ %% small denormalized number
+ %% 4.94065645841246544177e-324
+ <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+ {0.5, -1073} = frexp(SmallDenorm),
+ %% large denormalized number
+ %% 2.22507385850720088902e-308
+ <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+ {0.99999999999999978, -1022} = frexp(BigDenorm),
+ %% small normalized number
+ %% 2.22507385850720138309e-308
+ <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+ {0.5, -1021} = frexp(SmallNorm),
+ %% large normalized number
+ %% 1.79769313486231570815e+308
+ <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+ {0.99999999999999989, 1024} = frexp(LargeNorm),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochitemp.erl b/deps/mochiweb/src/mochitemp.erl
new file mode 100644
index 00000000..bb23d2a6
--- /dev/null
+++ b/deps/mochiweb/src/mochitemp.erl
@@ -0,0 +1,310 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc Create temporary files and directories. Requires crypto to be started.
+
+-module(mochitemp).
+-export([gettempdir/0]).
+-export([mkdtemp/0, mkdtemp/3]).
+-export([rmtempdir/1]).
+%% -export([mkstemp/4]).
+-define(SAFE_CHARS, {$a, $b, $c, $d, $e, $f, $g, $h, $i, $j, $k, $l, $m,
+ $n, $o, $p, $q, $r, $s, $t, $u, $v, $w, $x, $y, $z,
+ $A, $B, $C, $D, $E, $F, $G, $H, $I, $J, $K, $L, $M,
+ $N, $O, $P, $Q, $R, $S, $T, $U, $V, $W, $X, $Y, $Z,
+ $0, $1, $2, $3, $4, $5, $6, $7, $8, $9, $_}).
+-define(TMP_MAX, 10000).
+
+-include_lib("kernel/include/file.hrl").
+
+%% TODO: An ugly wrapper over the mktemp tool with open_port and sadness?
+%% We can't implement this race-free in Erlang without the ability
+%% to issue O_CREAT|O_EXCL. I suppose we could hack something with
+%% mkdtemp, del_dir, open.
+%% mkstemp(Suffix, Prefix, Dir, Options) ->
+%% ok.
+
+rmtempdir(Dir) ->
+ case file:del_dir(Dir) of
+ {error, eexist} ->
+ ok = rmtempdirfiles(Dir),
+ ok = file:del_dir(Dir);
+ ok ->
+ ok
+ end.
+
+rmtempdirfiles(Dir) ->
+ {ok, Files} = file:list_dir(Dir),
+ ok = rmtempdirfiles(Dir, Files).
+
+rmtempdirfiles(_Dir, []) ->
+ ok;
+rmtempdirfiles(Dir, [Basename | Rest]) ->
+ Path = filename:join([Dir, Basename]),
+ case filelib:is_dir(Path) of
+ true ->
+ ok = rmtempdir(Path);
+ false ->
+ ok = file:delete(Path)
+ end,
+ rmtempdirfiles(Dir, Rest).
+
+mkdtemp() ->
+ mkdtemp("", "tmp", gettempdir()).
+
+mkdtemp(Suffix, Prefix, Dir) ->
+ mkdtemp_n(rngpath_fun(Suffix, Prefix, Dir), ?TMP_MAX).
+
+
+
+mkdtemp_n(RngPath, 1) ->
+ make_dir(RngPath());
+mkdtemp_n(RngPath, N) ->
+ try make_dir(RngPath())
+ catch throw:{error, eexist} ->
+ mkdtemp_n(RngPath, N - 1)
+ end.
+
+make_dir(Path) ->
+ case file:make_dir(Path) of
+ ok ->
+ ok;
+ E={error, eexist} ->
+ throw(E)
+ end,
+ %% Small window for a race condition here because dir is created 777
+ ok = file:write_file_info(Path, #file_info{mode=8#0700}),
+ Path.
+
+rngpath_fun(Prefix, Suffix, Dir) ->
+ fun () ->
+ filename:join([Dir, Prefix ++ rngchars(6) ++ Suffix])
+ end.
+
+rngchars(0) ->
+ "";
+rngchars(N) ->
+ [rngchar() | rngchars(N - 1)].
+
+rngchar() ->
+ rngchar(crypto:rand_uniform(0, tuple_size(?SAFE_CHARS))).
+
+rngchar(C) ->
+ element(1 + C, ?SAFE_CHARS).
+
+%% @spec gettempdir() -> string()
+%% @doc Get a usable temporary directory using the first of these that is a directory:
+%% $TMPDIR, $TMP, $TEMP, "/tmp", "/var/tmp", "/usr/tmp", ".".
+gettempdir() ->
+ gettempdir(gettempdir_checks(), fun normalize_dir/1).
+
+gettempdir_checks() ->
+ [{fun os:getenv/1, ["TMPDIR", "TMP", "TEMP"]},
+ {fun gettempdir_identity/1, ["/tmp", "/var/tmp", "/usr/tmp"]},
+ {fun gettempdir_cwd/1, [cwd]}].
+
+gettempdir_identity(L) ->
+ L.
+
+gettempdir_cwd(cwd) ->
+ {ok, L} = file:get_cwd(),
+ L.
+
+gettempdir([{_F, []} | RestF], Normalize) ->
+ gettempdir(RestF, Normalize);
+gettempdir([{F, [L | RestL]} | RestF], Normalize) ->
+ case Normalize(F(L)) of
+ false ->
+ gettempdir([{F, RestL} | RestF], Normalize);
+ Dir ->
+ Dir
+ end.
+
+normalize_dir(False) when False =:= false orelse False =:= "" ->
+ %% Erlang doesn't have an unsetenv, wtf.
+ false;
+normalize_dir(L) ->
+ Dir = filename:absname(L),
+ case filelib:is_dir(Dir) of
+ false ->
+ false;
+ true ->
+ Dir
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+pushenv(L) ->
+ [{K, os:getenv(K)} || K <- L].
+popenv(L) ->
+ F = fun ({K, false}) ->
+ %% Erlang doesn't have an unsetenv, wtf.
+ os:putenv(K, "");
+ ({K, V}) ->
+ os:putenv(K, V)
+ end,
+ lists:foreach(F, L).
+
+gettempdir_fallback_test() ->
+ ?assertEqual(
+ "/",
+ gettempdir([{fun gettempdir_identity/1, ["/--not-here--/"]},
+ {fun gettempdir_identity/1, ["/"]}],
+ fun normalize_dir/1)),
+ ?assertEqual(
+ "/",
+ %% simulate a true os:getenv unset env
+ gettempdir([{fun gettempdir_identity/1, [false]},
+ {fun gettempdir_identity/1, ["/"]}],
+ fun normalize_dir/1)),
+ ok.
+
+gettempdir_identity_test() ->
+ ?assertEqual(
+ "/",
+ gettempdir([{fun gettempdir_identity/1, ["/"]}], fun normalize_dir/1)),
+ ok.
+
+gettempdir_cwd_test() ->
+ {ok, Cwd} = file:get_cwd(),
+ ?assertEqual(
+ normalize_dir(Cwd),
+ gettempdir([{fun gettempdir_cwd/1, [cwd]}], fun normalize_dir/1)),
+ ok.
+
+rngchars_test() ->
+ crypto:start(),
+ ?assertEqual(
+ "",
+ rngchars(0)),
+ ?assertEqual(
+ 10,
+ length(rngchars(10))),
+ ok.
+
+rngchar_test() ->
+ ?assertEqual(
+ $a,
+ rngchar(0)),
+ ?assertEqual(
+ $A,
+ rngchar(26)),
+ ?assertEqual(
+ $_,
+ rngchar(62)),
+ ok.
+
+mkdtemp_n_failonce_test() ->
+ crypto:start(),
+ D = mkdtemp(),
+ Path = filename:join([D, "testdir"]),
+ %% Toggle the existence of a dir so that it fails
+ %% the first time and succeeds the second.
+ F = fun () ->
+ case filelib:is_dir(Path) of
+ true ->
+ file:del_dir(Path);
+ false ->
+ file:make_dir(Path)
+ end,
+ Path
+ end,
+ try
+ %% Fails the first time
+ ?assertThrow(
+ {error, eexist},
+ mkdtemp_n(F, 1)),
+ %% Reset state
+ file:del_dir(Path),
+ %% Succeeds the second time
+ ?assertEqual(
+ Path,
+ mkdtemp_n(F, 2))
+ after rmtempdir(D)
+ end,
+ ok.
+
+mkdtemp_n_fail_test() ->
+ {ok, Cwd} = file:get_cwd(),
+ ?assertThrow(
+ {error, eexist},
+ mkdtemp_n(fun () -> Cwd end, 1)),
+ ?assertThrow(
+ {error, eexist},
+ mkdtemp_n(fun () -> Cwd end, 2)),
+ ok.
+
+make_dir_fail_test() ->
+ {ok, Cwd} = file:get_cwd(),
+ ?assertThrow(
+ {error, eexist},
+ make_dir(Cwd)),
+ ok.
+
+mkdtemp_test() ->
+ crypto:start(),
+ D = mkdtemp(),
+ ?assertEqual(
+ true,
+ filelib:is_dir(D)),
+ ?assertEqual(
+ ok,
+ file:del_dir(D)),
+ ok.
+
+rmtempdir_test() ->
+ crypto:start(),
+ D1 = mkdtemp(),
+ ?assertEqual(
+ true,
+ filelib:is_dir(D1)),
+ ?assertEqual(
+ ok,
+ rmtempdir(D1)),
+ D2 = mkdtemp(),
+ ?assertEqual(
+ true,
+ filelib:is_dir(D2)),
+ ok = file:write_file(filename:join([D2, "foo"]), <<"bytes">>),
+ D3 = mkdtemp("suffix", "prefix", D2),
+ ?assertEqual(
+ true,
+ filelib:is_dir(D3)),
+ ok = file:write_file(filename:join([D3, "foo"]), <<"bytes">>),
+ ?assertEqual(
+ ok,
+ rmtempdir(D2)),
+ ?assertEqual(
+ {error, enoent},
+ file:consult(D3)),
+ ?assertEqual(
+ {error, enoent},
+ file:consult(D2)),
+ ok.
+
+gettempdir_env_test() ->
+ Env = pushenv(["TMPDIR", "TEMP", "TMP"]),
+ FalseEnv = [{"TMPDIR", false}, {"TEMP", false}, {"TMP", false}],
+ try
+ popenv(FalseEnv),
+ popenv([{"TMPDIR", "/"}]),
+ ?assertEqual(
+ "/",
+ os:getenv("TMPDIR")),
+ ?assertEqual(
+ "/",
+ gettempdir()),
+ {ok, Cwd} = file:get_cwd(),
+ popenv(FalseEnv),
+ popenv([{"TMP", Cwd}]),
+ ?assertEqual(
+ normalize_dir(Cwd),
+ gettempdir())
+ after popenv(Env)
+ end,
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochiutf8.erl b/deps/mochiweb/src/mochiutf8.erl
new file mode 100644
index 00000000..206e1186
--- /dev/null
+++ b/deps/mochiweb/src/mochiutf8.erl
@@ -0,0 +1,316 @@
+%% @copyright 2010 Mochi Media, Inc.
+%% @author Bob Ippolito <bob@mochimedia.com>
+
+%% @doc Algorithm to convert any binary to a valid UTF-8 sequence by ignoring
+%% invalid bytes.
+
+-module(mochiutf8).
+-export([valid_utf8_bytes/1, codepoint_to_bytes/1, bytes_to_codepoints/1]).
+-export([bytes_foldl/3, codepoint_foldl/3, read_codepoint/1, len/1]).
+
+%% External API
+
+-type unichar_low() :: 0..16#d7ff.
+-type unichar_high() :: 16#e000..16#10ffff.
+-type unichar() :: unichar_low() | unichar_high().
+
+-spec codepoint_to_bytes(unichar()) -> binary().
+%% @doc Convert a unicode codepoint to UTF-8 bytes.
+codepoint_to_bytes(C) when (C >= 16#00 andalso C =< 16#7f) ->
+ %% U+0000 - U+007F - 7 bits
+ <<C>>;
+codepoint_to_bytes(C) when (C >= 16#080 andalso C =< 16#07FF) ->
+ %% U+0080 - U+07FF - 11 bits
+ <<0:5, B1:5, B0:6>> = <<C:16>>,
+ <<2#110:3, B1:5,
+ 2#10:2, B0:6>>;
+codepoint_to_bytes(C) when (C >= 16#0800 andalso C =< 16#FFFF) andalso
+ (C < 16#D800 orelse C > 16#DFFF) ->
+ %% U+0800 - U+FFFF - 16 bits (excluding UTC-16 surrogate code points)
+ <<B2:4, B1:6, B0:6>> = <<C:16>>,
+ <<2#1110:4, B2:4,
+ 2#10:2, B1:6,
+ 2#10:2, B0:6>>;
+codepoint_to_bytes(C) when (C >= 16#010000 andalso C =< 16#10FFFF) ->
+ %% U+10000 - U+10FFFF - 21 bits
+ <<0:3, B3:3, B2:6, B1:6, B0:6>> = <<C:24>>,
+ <<2#11110:5, B3:3,
+ 2#10:2, B2:6,
+ 2#10:2, B1:6,
+ 2#10:2, B0:6>>.
+
+-spec codepoints_to_bytes([unichar()]) -> binary().
+%% @doc Convert a list of codepoints to a UTF-8 binary.
+codepoints_to_bytes(L) ->
+ <<<<(codepoint_to_bytes(C))/binary>> || C <- L>>.
+
+-spec read_codepoint(binary()) -> {unichar(), binary(), binary()}.
+read_codepoint(Bin = <<2#0:1, C:7, Rest/binary>>) ->
+ %% U+0000 - U+007F - 7 bits
+ <<B:1/binary, _/binary>> = Bin,
+ {C, B, Rest};
+read_codepoint(Bin = <<2#110:3, B1:5,
+ 2#10:2, B0:6,
+ Rest/binary>>) ->
+ %% U+0080 - U+07FF - 11 bits
+ case <<B1:5, B0:6>> of
+ <<C:11>> when C >= 16#80 ->
+ <<B:2/binary, _/binary>> = Bin,
+ {C, B, Rest}
+ end;
+read_codepoint(Bin = <<2#1110:4, B2:4,
+ 2#10:2, B1:6,
+ 2#10:2, B0:6,
+ Rest/binary>>) ->
+ %% U+0800 - U+FFFF - 16 bits (excluding UTC-16 surrogate code points)
+ case <<B2:4, B1:6, B0:6>> of
+ <<C:16>> when (C >= 16#0800 andalso C =< 16#FFFF) andalso
+ (C < 16#D800 orelse C > 16#DFFF) ->
+ <<B:3/binary, _/binary>> = Bin,
+ {C, B, Rest}
+ end;
+read_codepoint(Bin = <<2#11110:5, B3:3,
+ 2#10:2, B2:6,
+ 2#10:2, B1:6,
+ 2#10:2, B0:6,
+ Rest/binary>>) ->
+ %% U+10000 - U+10FFFF - 21 bits
+ case <<B3:3, B2:6, B1:6, B0:6>> of
+ <<C:21>> when (C >= 16#010000 andalso C =< 16#10FFFF) ->
+ <<B:4/binary, _/binary>> = Bin,
+ {C, B, Rest}
+ end.
+
+-spec codepoint_foldl(fun((unichar(), _) -> _), _, binary()) -> _.
+codepoint_foldl(F, Acc, <<>>) when is_function(F, 2) ->
+ Acc;
+codepoint_foldl(F, Acc, Bin) ->
+ {C, _, Rest} = read_codepoint(Bin),
+ codepoint_foldl(F, F(C, Acc), Rest).
+
+-spec bytes_foldl(fun((binary(), _) -> _), _, binary()) -> _.
+bytes_foldl(F, Acc, <<>>) when is_function(F, 2) ->
+ Acc;
+bytes_foldl(F, Acc, Bin) ->
+ {_, B, Rest} = read_codepoint(Bin),
+ bytes_foldl(F, F(B, Acc), Rest).
+
+-spec bytes_to_codepoints(binary()) -> [unichar()].
+bytes_to_codepoints(B) ->
+ lists:reverse(codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [], B)).
+
+-spec len(binary()) -> non_neg_integer().
+len(<<>>) ->
+ 0;
+len(B) ->
+ {_, _, Rest} = read_codepoint(B),
+ 1 + len(Rest).
+
+-spec valid_utf8_bytes(B::binary()) -> binary().
+%% @doc Return only the bytes in B that represent valid UTF-8. Uses
+%% the following recursive algorithm: skip one byte if B does not
+%% follow UTF-8 syntax (a 1-4 byte encoding of some number),
+%% skip sequence of 2-4 bytes if it represents an overlong encoding
+%% or bad code point (surrogate U+D800 - U+DFFF or > U+10FFFF).
+valid_utf8_bytes(B) when is_binary(B) ->
+ binary_skip_bytes(B, invalid_utf8_indexes(B)).
+
+%% Internal API
+
+-spec binary_skip_bytes(binary(), [non_neg_integer()]) -> binary().
+%% @doc Return B, but skipping the 0-based indexes in L.
+binary_skip_bytes(B, []) ->
+ B;
+binary_skip_bytes(B, L) ->
+ binary_skip_bytes(B, L, 0, []).
+
+%% @private
+-spec binary_skip_bytes(binary(), [non_neg_integer()], non_neg_integer(), iolist()) -> binary().
+binary_skip_bytes(B, [], _N, Acc) ->
+ iolist_to_binary(lists:reverse([B | Acc]));
+binary_skip_bytes(<<_, RestB/binary>>, [N | RestL], N, Acc) ->
+ binary_skip_bytes(RestB, RestL, 1 + N, Acc);
+binary_skip_bytes(<<C, RestB/binary>>, L, N, Acc) ->
+ binary_skip_bytes(RestB, L, 1 + N, [C | Acc]).
+
+-spec invalid_utf8_indexes(binary()) -> [non_neg_integer()].
+%% @doc Return the 0-based indexes in B that are not valid UTF-8.
+invalid_utf8_indexes(B) ->
+ invalid_utf8_indexes(B, 0, []).
+
+%% @private.
+-spec invalid_utf8_indexes(binary(), non_neg_integer(), [non_neg_integer()]) -> [non_neg_integer()].
+invalid_utf8_indexes(<<C, Rest/binary>>, N, Acc) when C < 16#80 ->
+ %% U+0000 - U+007F - 7 bits
+ invalid_utf8_indexes(Rest, 1 + N, Acc);
+invalid_utf8_indexes(<<C1, C2, Rest/binary>>, N, Acc)
+ when C1 band 16#E0 =:= 16#C0,
+ C2 band 16#C0 =:= 16#80 ->
+ %% U+0080 - U+07FF - 11 bits
+ case ((C1 band 16#1F) bsl 6) bor (C2 band 16#3F) of
+ C when C < 16#80 ->
+ %% Overlong encoding.
+ invalid_utf8_indexes(Rest, 2 + N, [1 + N, N | Acc]);
+ _ ->
+ %% Upper bound U+07FF does not need to be checked
+ invalid_utf8_indexes(Rest, 2 + N, Acc)
+ end;
+invalid_utf8_indexes(<<C1, C2, C3, Rest/binary>>, N, Acc)
+ when C1 band 16#F0 =:= 16#E0,
+ C2 band 16#C0 =:= 16#80,
+ C3 band 16#C0 =:= 16#80 ->
+ %% U+0800 - U+FFFF - 16 bits
+ case ((((C1 band 16#0F) bsl 6) bor (C2 band 16#3F)) bsl 6) bor
+ (C3 band 16#3F) of
+ C when (C < 16#800) orelse (C >= 16#D800 andalso C =< 16#DFFF) ->
+ %% Overlong encoding or surrogate.
+ invalid_utf8_indexes(Rest, 3 + N, [2 + N, 1 + N, N | Acc]);
+ _ ->
+ %% Upper bound U+FFFF does not need to be checked
+ invalid_utf8_indexes(Rest, 3 + N, Acc)
+ end;
+invalid_utf8_indexes(<<C1, C2, C3, C4, Rest/binary>>, N, Acc)
+ when C1 band 16#F8 =:= 16#F0,
+ C2 band 16#C0 =:= 16#80,
+ C3 band 16#C0 =:= 16#80,
+ C4 band 16#C0 =:= 16#80 ->
+ %% U+10000 - U+10FFFF - 21 bits
+ case ((((((C1 band 16#0F) bsl 6) bor (C2 band 16#3F)) bsl 6) bor
+ (C3 band 16#3F)) bsl 6) bor (C4 band 16#3F) of
+ C when (C < 16#10000) orelse (C > 16#10FFFF) ->
+ %% Overlong encoding or invalid code point.
+ invalid_utf8_indexes(Rest, 4 + N, [3 + N, 2 + N, 1 + N, N | Acc]);
+ _ ->
+ invalid_utf8_indexes(Rest, 4 + N, Acc)
+ end;
+invalid_utf8_indexes(<<_, Rest/binary>>, N, Acc) ->
+ %% Invalid char
+ invalid_utf8_indexes(Rest, 1 + N, [N | Acc]);
+invalid_utf8_indexes(<<>>, _N, Acc) ->
+ lists:reverse(Acc).
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+binary_skip_bytes_test() ->
+ ?assertEqual(<<"foo">>,
+ binary_skip_bytes(<<"foo">>, [])),
+ ?assertEqual(<<"foobar">>,
+ binary_skip_bytes(<<"foo bar">>, [3])),
+ ?assertEqual(<<"foo">>,
+ binary_skip_bytes(<<"foo bar">>, [3, 4, 5, 6])),
+ ?assertEqual(<<"oo bar">>,
+ binary_skip_bytes(<<"foo bar">>, [0])),
+ ok.
+
+invalid_utf8_indexes_test() ->
+ ?assertEqual(
+ [],
+ invalid_utf8_indexes(<<"unicode snowman for you: ", 226, 152, 131>>)),
+ ?assertEqual(
+ [0],
+ invalid_utf8_indexes(<<128>>)),
+ ?assertEqual(
+ [57,59,60,64,66,67],
+ invalid_utf8_indexes(<<"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; (",
+ 167, 65, 170, 186, 73, 83, 80, 166, 87, 186, 217, 41, 41>>)),
+ ok.
+
+codepoint_to_bytes_test() ->
+ %% U+0000 - U+007F - 7 bits
+ %% U+0080 - U+07FF - 11 bits
+ %% U+0800 - U+FFFF - 16 bits (excluding UTC-16 surrogate code points)
+ %% U+10000 - U+10FFFF - 21 bits
+ ?assertEqual(
+ <<"a">>,
+ codepoint_to_bytes($a)),
+ ?assertEqual(
+ <<16#c2, 16#80>>,
+ codepoint_to_bytes(16#80)),
+ ?assertEqual(
+ <<16#df, 16#bf>>,
+ codepoint_to_bytes(16#07ff)),
+ ?assertEqual(
+ <<16#ef, 16#bf, 16#bf>>,
+ codepoint_to_bytes(16#ffff)),
+ ?assertEqual(
+ <<16#f4, 16#8f, 16#bf, 16#bf>>,
+ codepoint_to_bytes(16#10ffff)),
+ ok.
+
+bytes_foldl_test() ->
+ ?assertEqual(
+ <<"abc">>,
+ bytes_foldl(fun (B, Acc) -> <<Acc/binary, B/binary>> end, <<>>, <<"abc">>)),
+ ?assertEqual(
+ <<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>,
+ bytes_foldl(fun (B, Acc) -> <<Acc/binary, B/binary>> end, <<>>,
+ <<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>)),
+ ok.
+
+bytes_to_codepoints_test() ->
+ ?assertEqual(
+ "abc" ++ [16#2603, 16#4e2d, 16#85, 16#10ffff],
+ bytes_to_codepoints(<<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>)),
+ ok.
+
+codepoint_foldl_test() ->
+ ?assertEqual(
+ "cba",
+ codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [], <<"abc">>)),
+ ?assertEqual(
+ [16#10ffff, 16#85, 16#4e2d, 16#2603 | "cba"],
+ codepoint_foldl(fun (C, Acc) -> [C | Acc] end, [],
+ <<"abc", 226, 152, 131, 228, 184, 173, 194, 133, 244,143,191,191>>)),
+ ok.
+
+len_test() ->
+ ?assertEqual(
+ 29,
+ len(<<"unicode snowman for you: ", 226, 152, 131, 228, 184, 173, 194, 133, 244, 143, 191, 191>>)),
+ ok.
+
+codepoints_to_bytes_test() ->
+ ?assertEqual(
+ iolist_to_binary(lists:map(fun codepoint_to_bytes/1, lists:seq(1, 1000))),
+ codepoints_to_bytes(lists:seq(1, 1000))),
+ ok.
+
+valid_utf8_bytes_test() ->
+ ?assertEqual(
+ <<"invalid U+11ffff: ">>,
+ valid_utf8_bytes(<<"invalid U+11ffff: ", 244, 159, 191, 191>>)),
+ ?assertEqual(
+ <<"U+10ffff: ", 244, 143, 191, 191>>,
+ valid_utf8_bytes(<<"U+10ffff: ", 244, 143, 191, 191>>)),
+ ?assertEqual(
+ <<"overlong 2-byte encoding (a): ">>,
+ valid_utf8_bytes(<<"overlong 2-byte encoding (a): ", 2#11000001, 2#10100001>>)),
+ ?assertEqual(
+ <<"overlong 2-byte encoding (!): ">>,
+ valid_utf8_bytes(<<"overlong 2-byte encoding (!): ", 2#11000000, 2#10100001>>)),
+ ?assertEqual(
+ <<"mu: ", 194, 181>>,
+ valid_utf8_bytes(<<"mu: ", 194, 181>>)),
+ ?assertEqual(
+ <<"bad coding bytes: ">>,
+ valid_utf8_bytes(<<"bad coding bytes: ", 2#10011111, 2#10111111, 2#11111111>>)),
+ ?assertEqual(
+ <<"low surrogate (unpaired): ">>,
+ valid_utf8_bytes(<<"low surrogate (unpaired): ", 237, 176, 128>>)),
+ ?assertEqual(
+ <<"high surrogate (unpaired): ">>,
+ valid_utf8_bytes(<<"high surrogate (unpaired): ", 237, 191, 191>>)),
+ ?assertEqual(
+ <<"unicode snowman for you: ", 226, 152, 131>>,
+ valid_utf8_bytes(<<"unicode snowman for you: ", 226, 152, 131>>)),
+ ?assertEqual(
+ <<"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; (AISPW))">>,
+ valid_utf8_bytes(<<"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; (",
+ 167, 65, 170, 186, 73, 83, 80, 166, 87, 186, 217, 41, 41>>)),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochiweb.app.src b/deps/mochiweb/src/mochiweb.app.src
new file mode 100644
index 00000000..1df31175
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb.app.src
@@ -0,0 +1,9 @@
+%% This is generated from src/mochiweb.app.src
+{application, mochiweb,
+ [{description, "MochiMedia Web Server"},
+ {vsn, git},
+ {modules, []},
+ {registered, []},
+ {mod, {mochiweb_app, []}},
+ {env, []},
+ {applications, [kernel, stdlib, crypto, inets]}]}.
diff --git a/deps/mochiweb/src/mochiweb.erl b/deps/mochiweb/src/mochiweb.erl
new file mode 100644
index 00000000..3118028b
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb.erl
@@ -0,0 +1,289 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Start and stop the MochiWeb server.
+
+-module(mochiweb).
+-author('bob@mochimedia.com').
+
+-export([start/0, stop/0]).
+-export([new_request/1, new_response/1]).
+-export([all_loaded/0, all_loaded/1, reload/0]).
+
+%% @spec start() -> ok
+%% @doc Start the MochiWeb server.
+start() ->
+ ensure_started(crypto),
+ application:start(mochiweb).
+
+%% @spec stop() -> ok
+%% @doc Stop the MochiWeb server.
+stop() ->
+ Res = application:stop(mochiweb),
+ application:stop(crypto),
+ Res.
+
+reload() ->
+ [c:l(Module) || Module <- all_loaded()].
+
+all_loaded() ->
+ all_loaded(filename:dirname(code:which(?MODULE))).
+
+all_loaded(Base) when is_atom(Base) ->
+ [];
+all_loaded(Base) ->
+ FullBase = Base ++ "/",
+ F = fun ({_Module, Loaded}, Acc) when is_atom(Loaded) ->
+ Acc;
+ ({Module, Loaded}, Acc) ->
+ case lists:prefix(FullBase, Loaded) of
+ true ->
+ [Module | Acc];
+ false ->
+ Acc
+ end
+ end,
+ lists:foldl(F, [], code:all_loaded()).
+
+
+%% @spec new_request({Socket, Request, Headers}) -> MochiWebRequest
+%% @doc Return a mochiweb_request data structure.
+new_request({Socket, {Method, {abs_path, Uri}, Version}, Headers}) ->
+ mochiweb_request:new(Socket,
+ Method,
+ Uri,
+ Version,
+ mochiweb_headers:make(Headers));
+% this case probably doesn't "exist".
+new_request({Socket, {Method, {absoluteURI, _Protocol, _Host, _Port, Uri},
+ Version}, Headers}) ->
+ mochiweb_request:new(Socket,
+ Method,
+ Uri,
+ Version,
+ mochiweb_headers:make(Headers));
+%% Request-URI is "*"
+%% From http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
+new_request({Socket, {Method, '*'=Uri, Version}, Headers}) ->
+ mochiweb_request:new(Socket,
+ Method,
+ Uri,
+ Version,
+ mochiweb_headers:make(Headers)).
+
+%% @spec new_response({Request, integer(), Headers}) -> MochiWebResponse
+%% @doc Return a mochiweb_response data structure.
+new_response({Request, Code, Headers}) ->
+ mochiweb_response:new(Request,
+ Code,
+ mochiweb_headers:make(Headers)).
+
+%% Internal API
+
+ensure_started(App) ->
+ case application:start(App) of
+ ok ->
+ ok;
+ {error, {already_started, App}} ->
+ ok
+ end.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+-record(treq, {path, body= <<>>, xreply= <<>>}).
+
+ssl_cert_opts() ->
+ EbinDir = filename:dirname(code:which(?MODULE)),
+ CertDir = filename:join([EbinDir, "..", "support", "test-materials"]),
+ CertFile = filename:join(CertDir, "test_ssl_cert.pem"),
+ KeyFile = filename:join(CertDir, "test_ssl_key.pem"),
+ [{certfile, CertFile}, {keyfile, KeyFile}].
+
+with_server(Transport, ServerFun, ClientFun) ->
+ ServerOpts0 = [{ip, "127.0.0.1"}, {port, 0}, {loop, ServerFun}],
+ ServerOpts = case Transport of
+ plain ->
+ ServerOpts0;
+ ssl ->
+ ServerOpts0 ++ [{ssl, true}, {ssl_opts, ssl_cert_opts()}]
+ end,
+ {ok, Server} = mochiweb_http:start(ServerOpts),
+ Port = mochiweb_socket_server:get(Server, port),
+ Res = (catch ClientFun(Transport, Port)),
+ mochiweb_http:stop(Server),
+ Res.
+
+request_test() ->
+ R = mochiweb_request:new(z, z, "/foo/bar/baz%20wibble+quux?qs=2", z, []),
+ "/foo/bar/baz wibble quux" = R:get(path),
+ ok.
+
+single_http_GET_test() ->
+ do_GET(plain, 1).
+
+single_https_GET_test() ->
+ do_GET(ssl, 1).
+
+multiple_http_GET_test() ->
+ do_GET(plain, 3).
+
+multiple_https_GET_test() ->
+ do_GET(ssl, 3).
+
+hundred_http_GET_test() ->
+ do_GET(plain, 100).
+
+hundred_https_GET_test() ->
+ do_GET(ssl, 100).
+
+single_128_http_POST_test() ->
+ do_POST(plain, 128, 1).
+
+single_128_https_POST_test() ->
+ do_POST(ssl, 128, 1).
+
+single_2k_http_POST_test() ->
+ do_POST(plain, 2048, 1).
+
+single_2k_https_POST_test() ->
+ do_POST(ssl, 2048, 1).
+
+single_100k_http_POST_test() ->
+ do_POST(plain, 102400, 1).
+
+single_100k_https_POST_test() ->
+ do_POST(ssl, 102400, 1).
+
+multiple_100k_http_POST_test() ->
+ do_POST(plain, 102400, 3).
+
+multiple_100K_https_POST_test() ->
+ do_POST(ssl, 102400, 3).
+
+hundred_128_http_POST_test() ->
+ do_POST(plain, 128, 100).
+
+hundred_128_https_POST_test() ->
+ do_POST(ssl, 128, 100).
+
+do_GET(Transport, Times) ->
+ PathPrefix = "/whatever/",
+ ReplyPrefix = "You requested: ",
+ ServerFun = fun (Req) ->
+ Reply = ReplyPrefix ++ Req:get(path),
+ Req:ok({"text/plain", Reply})
+ end,
+ TestReqs = [begin
+ Path = PathPrefix ++ integer_to_list(N),
+ ExpectedReply = list_to_binary(ReplyPrefix ++ Path),
+ #treq{path=Path, xreply=ExpectedReply}
+ end || N <- lists:seq(1, Times)],
+ ClientFun = new_client_fun('GET', TestReqs),
+ ok = with_server(Transport, ServerFun, ClientFun),
+ ok.
+
+do_POST(Transport, Size, Times) ->
+ ServerFun = fun (Req) ->
+ Body = Req:recv_body(),
+ Headers = [{"Content-Type", "application/octet-stream"}],
+ Req:respond({201, Headers, Body})
+ end,
+ TestReqs = [begin
+ Path = "/stuff/" ++ integer_to_list(N),
+ Body = crypto:rand_bytes(Size),
+ #treq{path=Path, body=Body, xreply=Body}
+ end || N <- lists:seq(1, Times)],
+ ClientFun = new_client_fun('POST', TestReqs),
+ ok = with_server(Transport, ServerFun, ClientFun),
+ ok.
+
+new_client_fun(Method, TestReqs) ->
+ fun (Transport, Port) ->
+ client_request(Transport, Port, Method, TestReqs)
+ end.
+
+client_request(Transport, Port, Method, TestReqs) ->
+ Opts = [binary, {active, false}, {packet, http}],
+ SockFun = case Transport of
+ plain ->
+ {ok, Socket} = gen_tcp:connect("127.0.0.1", Port, Opts),
+ fun (recv) ->
+ gen_tcp:recv(Socket, 0);
+ ({recv, Length}) ->
+ gen_tcp:recv(Socket, Length);
+ ({send, Data}) ->
+ gen_tcp:send(Socket, Data);
+ ({setopts, L}) ->
+ inet:setopts(Socket, L)
+ end;
+ ssl ->
+ {ok, Socket} = ssl:connect("127.0.0.1", Port, [{ssl_imp, new} | Opts]),
+ fun (recv) ->
+ ssl:recv(Socket, 0);
+ ({recv, Length}) ->
+ ssl:recv(Socket, Length);
+ ({send, Data}) ->
+ ssl:send(Socket, Data);
+ ({setopts, L}) ->
+ ssl:setopts(Socket, L)
+ end
+ end,
+ client_request(SockFun, Method, TestReqs).
+
+client_request(SockFun, _Method, []) ->
+ {the_end, {error, closed}} = {the_end, SockFun(recv)},
+ ok;
+client_request(SockFun, Method,
+ [#treq{path=Path, body=Body, xreply=ExReply} | Rest]) ->
+ Request = [atom_to_list(Method), " ", Path, " HTTP/1.1\r\n",
+ client_headers(Body, Rest =:= []),
+ "\r\n",
+ Body],
+ ok = SockFun({send, Request}),
+ case Method of
+ 'GET' ->
+ {ok, {http_response, {1,1}, 200, "OK"}} = SockFun(recv);
+ 'POST' ->
+ {ok, {http_response, {1,1}, 201, "Created"}} = SockFun(recv)
+ end,
+ ok = SockFun({setopts, [{packet, httph}]}),
+ {ok, {http_header, _, 'Server', _, "MochiWeb" ++ _}} = SockFun(recv),
+ {ok, {http_header, _, 'Date', _, _}} = SockFun(recv),
+ {ok, {http_header, _, 'Content-Type', _, _}} = SockFun(recv),
+ {ok, {http_header, _, 'Content-Length', _, ConLenStr}} = SockFun(recv),
+ ContentLength = list_to_integer(ConLenStr),
+ {ok, http_eoh} = SockFun(recv),
+ ok = SockFun({setopts, [{packet, raw}]}),
+ {payload, ExReply} = {payload, drain_reply(SockFun, ContentLength, <<>>)},
+ ok = SockFun({setopts, [{packet, http}]}),
+ client_request(SockFun, Method, Rest).
+
+client_headers(Body, IsLastRequest) ->
+ ["Host: localhost\r\n",
+ case Body of
+ <<>> ->
+ "";
+ _ ->
+ ["Content-Type: application/octet-stream\r\n",
+ "Content-Length: ", integer_to_list(byte_size(Body)), "\r\n"]
+ end,
+ case IsLastRequest of
+ true ->
+ "Connection: close\r\n";
+ false ->
+ ""
+ end].
+
+drain_reply(_SockFun, 0, Acc) ->
+ Acc;
+drain_reply(SockFun, Length, Acc) ->
+ Sz = erlang:min(Length, 1024),
+ {ok, B} = SockFun({recv, Sz}),
+ drain_reply(SockFun, Length - Sz, <<Acc/bytes, B/bytes>>).
+
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_acceptor.erl b/deps/mochiweb/src/mochiweb_acceptor.erl
new file mode 100644
index 00000000..79d172c3
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_acceptor.erl
@@ -0,0 +1,48 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc MochiWeb acceptor.
+
+-module(mochiweb_acceptor).
+-author('bob@mochimedia.com').
+
+-include("internal.hrl").
+
+-export([start_link/3, init/3]).
+
+start_link(Server, Listen, Loop) ->
+ proc_lib:spawn_link(?MODULE, init, [Server, Listen, Loop]).
+
+init(Server, Listen, Loop) ->
+ T1 = now(),
+ case catch mochiweb_socket:accept(Listen) of
+ {ok, Socket} ->
+ gen_server:cast(Server, {accepted, self(), timer:now_diff(now(), T1)}),
+ call_loop(Loop, Socket);
+ {error, closed} ->
+ exit(normal);
+ {error, timeout} ->
+ exit(normal);
+ {error, esslaccept} ->
+ exit(normal);
+ Other ->
+ error_logger:error_report(
+ [{application, mochiweb},
+ "Accept failed error",
+ lists:flatten(io_lib:format("~p", [Other]))]),
+ exit({error, accept_failed})
+ end.
+
+call_loop({M, F}, Socket) ->
+ M:F(Socket);
+call_loop({M, F, A}, Socket) ->
+ erlang:apply(M, F, [Socket | A]);
+call_loop(Loop, Socket) ->
+ Loop(Socket).
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_app.erl b/deps/mochiweb/src/mochiweb_app.erl
new file mode 100644
index 00000000..5d67787b
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_app.erl
@@ -0,0 +1,27 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Callbacks for the mochiweb application.
+
+-module(mochiweb_app).
+-author('bob@mochimedia.com').
+
+-behaviour(application).
+-export([start/2,stop/1]).
+
+%% @spec start(_Type, _StartArgs) -> ServerRet
+%% @doc application start callback for mochiweb.
+start(_Type, _StartArgs) ->
+ mochiweb_sup:start_link().
+
+%% @spec stop(_State) -> ServerRet
+%% @doc application stop callback for mochiweb.
+stop(_State) ->
+ ok.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_charref.erl b/deps/mochiweb/src/mochiweb_charref.erl
new file mode 100644
index 00000000..99cd5502
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_charref.erl
@@ -0,0 +1,308 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Converts HTML 4 charrefs and entities to codepoints.
+-module(mochiweb_charref).
+-export([charref/1]).
+
+%% External API.
+
+%% @spec charref(S) -> integer() | undefined
+%% @doc Convert a decimal charref, hex charref, or html entity to a unicode
+%% codepoint, or return undefined on failure.
+%% The input should not include an ampersand or semicolon.
+%% charref("#38") = 38, charref("#x26") = 38, charref("amp") = 38.
+charref(B) when is_binary(B) ->
+ charref(binary_to_list(B));
+charref([$#, C | L]) when C =:= $x orelse C =:= $X ->
+ try erlang:list_to_integer(L, 16)
+ catch
+ error:badarg -> undefined
+ end;
+charref([$# | L]) ->
+ try list_to_integer(L)
+ catch
+ error:badarg -> undefined
+ end;
+charref(L) ->
+ entity(L).
+
+%% Internal API.
+
+entity("nbsp") -> 160;
+entity("iexcl") -> 161;
+entity("cent") -> 162;
+entity("pound") -> 163;
+entity("curren") -> 164;
+entity("yen") -> 165;
+entity("brvbar") -> 166;
+entity("sect") -> 167;
+entity("uml") -> 168;
+entity("copy") -> 169;
+entity("ordf") -> 170;
+entity("laquo") -> 171;
+entity("not") -> 172;
+entity("shy") -> 173;
+entity("reg") -> 174;
+entity("macr") -> 175;
+entity("deg") -> 176;
+entity("plusmn") -> 177;
+entity("sup2") -> 178;
+entity("sup3") -> 179;
+entity("acute") -> 180;
+entity("micro") -> 181;
+entity("para") -> 182;
+entity("middot") -> 183;
+entity("cedil") -> 184;
+entity("sup1") -> 185;
+entity("ordm") -> 186;
+entity("raquo") -> 187;
+entity("frac14") -> 188;
+entity("frac12") -> 189;
+entity("frac34") -> 190;
+entity("iquest") -> 191;
+entity("Agrave") -> 192;
+entity("Aacute") -> 193;
+entity("Acirc") -> 194;
+entity("Atilde") -> 195;
+entity("Auml") -> 196;
+entity("Aring") -> 197;
+entity("AElig") -> 198;
+entity("Ccedil") -> 199;
+entity("Egrave") -> 200;
+entity("Eacute") -> 201;
+entity("Ecirc") -> 202;
+entity("Euml") -> 203;
+entity("Igrave") -> 204;
+entity("Iacute") -> 205;
+entity("Icirc") -> 206;
+entity("Iuml") -> 207;
+entity("ETH") -> 208;
+entity("Ntilde") -> 209;
+entity("Ograve") -> 210;
+entity("Oacute") -> 211;
+entity("Ocirc") -> 212;
+entity("Otilde") -> 213;
+entity("Ouml") -> 214;
+entity("times") -> 215;
+entity("Oslash") -> 216;
+entity("Ugrave") -> 217;
+entity("Uacute") -> 218;
+entity("Ucirc") -> 219;
+entity("Uuml") -> 220;
+entity("Yacute") -> 221;
+entity("THORN") -> 222;
+entity("szlig") -> 223;
+entity("agrave") -> 224;
+entity("aacute") -> 225;
+entity("acirc") -> 226;
+entity("atilde") -> 227;
+entity("auml") -> 228;
+entity("aring") -> 229;
+entity("aelig") -> 230;
+entity("ccedil") -> 231;
+entity("egrave") -> 232;
+entity("eacute") -> 233;
+entity("ecirc") -> 234;
+entity("euml") -> 235;
+entity("igrave") -> 236;
+entity("iacute") -> 237;
+entity("icirc") -> 238;
+entity("iuml") -> 239;
+entity("eth") -> 240;
+entity("ntilde") -> 241;
+entity("ograve") -> 242;
+entity("oacute") -> 243;
+entity("ocirc") -> 244;
+entity("otilde") -> 245;
+entity("ouml") -> 246;
+entity("divide") -> 247;
+entity("oslash") -> 248;
+entity("ugrave") -> 249;
+entity("uacute") -> 250;
+entity("ucirc") -> 251;
+entity("uuml") -> 252;
+entity("yacute") -> 253;
+entity("thorn") -> 254;
+entity("yuml") -> 255;
+entity("fnof") -> 402;
+entity("Alpha") -> 913;
+entity("Beta") -> 914;
+entity("Gamma") -> 915;
+entity("Delta") -> 916;
+entity("Epsilon") -> 917;
+entity("Zeta") -> 918;
+entity("Eta") -> 919;
+entity("Theta") -> 920;
+entity("Iota") -> 921;
+entity("Kappa") -> 922;
+entity("Lambda") -> 923;
+entity("Mu") -> 924;
+entity("Nu") -> 925;
+entity("Xi") -> 926;
+entity("Omicron") -> 927;
+entity("Pi") -> 928;
+entity("Rho") -> 929;
+entity("Sigma") -> 931;
+entity("Tau") -> 932;
+entity("Upsilon") -> 933;
+entity("Phi") -> 934;
+entity("Chi") -> 935;
+entity("Psi") -> 936;
+entity("Omega") -> 937;
+entity("alpha") -> 945;
+entity("beta") -> 946;
+entity("gamma") -> 947;
+entity("delta") -> 948;
+entity("epsilon") -> 949;
+entity("zeta") -> 950;
+entity("eta") -> 951;
+entity("theta") -> 952;
+entity("iota") -> 953;
+entity("kappa") -> 954;
+entity("lambda") -> 955;
+entity("mu") -> 956;
+entity("nu") -> 957;
+entity("xi") -> 958;
+entity("omicron") -> 959;
+entity("pi") -> 960;
+entity("rho") -> 961;
+entity("sigmaf") -> 962;
+entity("sigma") -> 963;
+entity("tau") -> 964;
+entity("upsilon") -> 965;
+entity("phi") -> 966;
+entity("chi") -> 967;
+entity("psi") -> 968;
+entity("omega") -> 969;
+entity("thetasym") -> 977;
+entity("upsih") -> 978;
+entity("piv") -> 982;
+entity("bull") -> 8226;
+entity("hellip") -> 8230;
+entity("prime") -> 8242;
+entity("Prime") -> 8243;
+entity("oline") -> 8254;
+entity("frasl") -> 8260;
+entity("weierp") -> 8472;
+entity("image") -> 8465;
+entity("real") -> 8476;
+entity("trade") -> 8482;
+entity("alefsym") -> 8501;
+entity("larr") -> 8592;
+entity("uarr") -> 8593;
+entity("rarr") -> 8594;
+entity("darr") -> 8595;
+entity("harr") -> 8596;
+entity("crarr") -> 8629;
+entity("lArr") -> 8656;
+entity("uArr") -> 8657;
+entity("rArr") -> 8658;
+entity("dArr") -> 8659;
+entity("hArr") -> 8660;
+entity("forall") -> 8704;
+entity("part") -> 8706;
+entity("exist") -> 8707;
+entity("empty") -> 8709;
+entity("nabla") -> 8711;
+entity("isin") -> 8712;
+entity("notin") -> 8713;
+entity("ni") -> 8715;
+entity("prod") -> 8719;
+entity("sum") -> 8721;
+entity("minus") -> 8722;
+entity("lowast") -> 8727;
+entity("radic") -> 8730;
+entity("prop") -> 8733;
+entity("infin") -> 8734;
+entity("ang") -> 8736;
+entity("and") -> 8743;
+entity("or") -> 8744;
+entity("cap") -> 8745;
+entity("cup") -> 8746;
+entity("int") -> 8747;
+entity("there4") -> 8756;
+entity("sim") -> 8764;
+entity("cong") -> 8773;
+entity("asymp") -> 8776;
+entity("ne") -> 8800;
+entity("equiv") -> 8801;
+entity("le") -> 8804;
+entity("ge") -> 8805;
+entity("sub") -> 8834;
+entity("sup") -> 8835;
+entity("nsub") -> 8836;
+entity("sube") -> 8838;
+entity("supe") -> 8839;
+entity("oplus") -> 8853;
+entity("otimes") -> 8855;
+entity("perp") -> 8869;
+entity("sdot") -> 8901;
+entity("lceil") -> 8968;
+entity("rceil") -> 8969;
+entity("lfloor") -> 8970;
+entity("rfloor") -> 8971;
+entity("lang") -> 9001;
+entity("rang") -> 9002;
+entity("loz") -> 9674;
+entity("spades") -> 9824;
+entity("clubs") -> 9827;
+entity("hearts") -> 9829;
+entity("diams") -> 9830;
+entity("quot") -> 34;
+entity("amp") -> 38;
+entity("lt") -> 60;
+entity("gt") -> 62;
+entity("OElig") -> 338;
+entity("oelig") -> 339;
+entity("Scaron") -> 352;
+entity("scaron") -> 353;
+entity("Yuml") -> 376;
+entity("circ") -> 710;
+entity("tilde") -> 732;
+entity("ensp") -> 8194;
+entity("emsp") -> 8195;
+entity("thinsp") -> 8201;
+entity("zwnj") -> 8204;
+entity("zwj") -> 8205;
+entity("lrm") -> 8206;
+entity("rlm") -> 8207;
+entity("ndash") -> 8211;
+entity("mdash") -> 8212;
+entity("lsquo") -> 8216;
+entity("rsquo") -> 8217;
+entity("sbquo") -> 8218;
+entity("ldquo") -> 8220;
+entity("rdquo") -> 8221;
+entity("bdquo") -> 8222;
+entity("dagger") -> 8224;
+entity("Dagger") -> 8225;
+entity("permil") -> 8240;
+entity("lsaquo") -> 8249;
+entity("rsaquo") -> 8250;
+entity("euro") -> 8364;
+entity(_) -> undefined.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+exhaustive_entity_test() ->
+ T = mochiweb_cover:clause_lookup_table(?MODULE, entity),
+ [?assertEqual(V, entity(K)) || {K, V} <- T].
+
+charref_test() ->
+ 1234 = charref("#1234"),
+ 255 = charref("#xfF"),
+ 255 = charref(<<"#XFf">>),
+ 38 = charref("amp"),
+ 38 = charref(<<"amp">>),
+ undefined = charref("not_an_entity"),
+ undefined = charref("#not_an_entity"),
+ undefined = charref("#xnot_an_entity"),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_cookies.erl b/deps/mochiweb/src/mochiweb_cookies.erl
new file mode 100644
index 00000000..c090b714
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_cookies.erl
@@ -0,0 +1,309 @@
+%% @author Emad El-Haraty <emad@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc HTTP Cookie parsing and generating (RFC 2109, RFC 2965).
+
+-module(mochiweb_cookies).
+-export([parse_cookie/1, cookie/3, cookie/2]).
+
+-define(QUOTE, $\").
+
+-define(IS_WHITESPACE(C),
+ (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
+
+%% RFC 2616 separators (called tspecials in RFC 2068)
+-define(IS_SEPARATOR(C),
+ (C < 32 orelse
+ C =:= $\s orelse C =:= $\t orelse
+ C =:= $( orelse C =:= $) orelse C =:= $< orelse C =:= $> orelse
+ C =:= $@ orelse C =:= $, orelse C =:= $; orelse C =:= $: orelse
+ C =:= $\\ orelse C =:= $\" orelse C =:= $/ orelse
+ C =:= $[ orelse C =:= $] orelse C =:= $? orelse C =:= $= orelse
+ C =:= ${ orelse C =:= $})).
+
+%% @type proplist() = [{Key::string(), Value::string()}].
+%% @type header() = {Name::string(), Value::string()}.
+
+%% @spec cookie(Key::string(), Value::string()) -> header()
+%% @doc Short-hand for <code>cookie(Key, Value, [])</code>.
+cookie(Key, Value) ->
+ cookie(Key, Value, []).
+
+%% @spec cookie(Key::string(), Value::string(), Options::[Option]) -> header()
+%% where Option = {max_age, integer()} | {local_time, {date(), time()}}
+%% | {domain, string()} | {path, string()}
+%% | {secure, true | false} | {http_only, true | false}
+%%
+%% @doc Generate a Set-Cookie header field tuple.
+cookie(Key, Value, Options) ->
+ Cookie = [any_to_list(Key), "=", quote(Value), "; Version=1"],
+ %% Set-Cookie:
+ %% Comment, Domain, Max-Age, Path, Secure, Version
+ %% Set-Cookie2:
+ %% Comment, CommentURL, Discard, Domain, Max-Age, Path, Port, Secure,
+ %% Version
+ ExpiresPart =
+ case proplists:get_value(max_age, Options) of
+ undefined ->
+ "";
+ RawAge ->
+ When = case proplists:get_value(local_time, Options) of
+ undefined ->
+ calendar:local_time();
+ LocalTime ->
+ LocalTime
+ end,
+ Age = case RawAge < 0 of
+ true ->
+ 0;
+ false ->
+ RawAge
+ end,
+ ["; Expires=", age_to_cookie_date(Age, When),
+ "; Max-Age=", quote(Age)]
+ end,
+ SecurePart =
+ case proplists:get_value(secure, Options) of
+ true ->
+ "; Secure";
+ _ ->
+ ""
+ end,
+ DomainPart =
+ case proplists:get_value(domain, Options) of
+ undefined ->
+ "";
+ Domain ->
+ ["; Domain=", quote(Domain)]
+ end,
+ PathPart =
+ case proplists:get_value(path, Options) of
+ undefined ->
+ "";
+ Path ->
+ ["; Path=", quote(Path)]
+ end,
+ HttpOnlyPart =
+ case proplists:get_value(http_only, Options) of
+ true ->
+ "; HttpOnly";
+ _ ->
+ ""
+ end,
+ CookieParts = [Cookie, ExpiresPart, SecurePart, DomainPart, PathPart, HttpOnlyPart],
+ {"Set-Cookie", lists:flatten(CookieParts)}.
+
+
+%% Every major browser incorrectly handles quoted strings in a
+%% different and (worse) incompatible manner. Instead of wasting time
+%% writing redundant code for each browser, we restrict cookies to
+%% only contain characters that browsers handle compatibly.
+%%
+%% By replacing the definition of quote with this, we generate
+%% RFC-compliant cookies:
+%%
+%% quote(V) ->
+%% Fun = fun(?QUOTE, Acc) -> [$\\, ?QUOTE | Acc];
+%% (Ch, Acc) -> [Ch | Acc]
+%% end,
+%% [?QUOTE | lists:foldr(Fun, [?QUOTE], V)].
+
+%% Convert to a string and raise an error if quoting is required.
+quote(V0) ->
+ V = any_to_list(V0),
+ lists:all(fun(Ch) -> Ch =:= $/ orelse not ?IS_SEPARATOR(Ch) end, V)
+ orelse erlang:error({cookie_quoting_required, V}),
+ V.
+
+add_seconds(Secs, LocalTime) ->
+ Greg = calendar:datetime_to_gregorian_seconds(LocalTime),
+ calendar:gregorian_seconds_to_datetime(Greg + Secs).
+
+age_to_cookie_date(Age, LocalTime) ->
+ httpd_util:rfc1123_date(add_seconds(Age, LocalTime)).
+
+%% @spec parse_cookie(string()) -> [{K::string(), V::string()}]
+%% @doc Parse the contents of a Cookie header field, ignoring cookie
+%% attributes, and return a simple property list.
+parse_cookie("") ->
+ [];
+parse_cookie(Cookie) ->
+ parse_cookie(Cookie, []).
+
+%% Internal API
+
+parse_cookie([], Acc) ->
+ lists:reverse(Acc);
+parse_cookie(String, Acc) ->
+ {{Token, Value}, Rest} = read_pair(String),
+ Acc1 = case Token of
+ "" ->
+ Acc;
+ "$" ++ _ ->
+ Acc;
+ _ ->
+ [{Token, Value} | Acc]
+ end,
+ parse_cookie(Rest, Acc1).
+
+read_pair(String) ->
+ {Token, Rest} = read_token(skip_whitespace(String)),
+ {Value, Rest1} = read_value(skip_whitespace(Rest)),
+ {{Token, Value}, skip_past_separator(Rest1)}.
+
+read_value([$= | Value]) ->
+ Value1 = skip_whitespace(Value),
+ case Value1 of
+ [?QUOTE | _] ->
+ read_quoted(Value1);
+ _ ->
+ read_token(Value1)
+ end;
+read_value(String) ->
+ {"", String}.
+
+read_quoted([?QUOTE | String]) ->
+ read_quoted(String, []).
+
+read_quoted([], Acc) ->
+ {lists:reverse(Acc), []};
+read_quoted([?QUOTE | Rest], Acc) ->
+ {lists:reverse(Acc), Rest};
+read_quoted([$\\, Any | Rest], Acc) ->
+ read_quoted(Rest, [Any | Acc]);
+read_quoted([C | Rest], Acc) ->
+ read_quoted(Rest, [C | Acc]).
+
+skip_whitespace(String) ->
+ F = fun (C) -> ?IS_WHITESPACE(C) end,
+ lists:dropwhile(F, String).
+
+read_token(String) ->
+ F = fun (C) -> not ?IS_SEPARATOR(C) end,
+ lists:splitwith(F, String).
+
+skip_past_separator([]) ->
+ [];
+skip_past_separator([$; | Rest]) ->
+ Rest;
+skip_past_separator([$, | Rest]) ->
+ Rest;
+skip_past_separator([_ | Rest]) ->
+ skip_past_separator(Rest).
+
+any_to_list(V) when is_list(V) ->
+ V;
+any_to_list(V) when is_atom(V) ->
+ atom_to_list(V);
+any_to_list(V) when is_binary(V) ->
+ binary_to_list(V);
+any_to_list(V) when is_integer(V) ->
+ integer_to_list(V).
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+quote_test() ->
+ %% ?assertError eunit macro is not compatible with coverage module
+ try quote(":wq")
+ catch error:{cookie_quoting_required, ":wq"} -> ok
+ end,
+ ?assertEqual(
+ "foo",
+ quote(foo)),
+ ok.
+
+parse_cookie_test() ->
+ %% RFC example
+ C1 = "$Version=\"1\"; Customer=\"WILE_E_COYOTE\"; $Path=\"/acme\";
+ Part_Number=\"Rocket_Launcher_0001\"; $Path=\"/acme\";
+ Shipping=\"FedEx\"; $Path=\"/acme\"",
+ ?assertEqual(
+ [{"Customer","WILE_E_COYOTE"},
+ {"Part_Number","Rocket_Launcher_0001"},
+ {"Shipping","FedEx"}],
+ parse_cookie(C1)),
+ %% Potential edge cases
+ ?assertEqual(
+ [{"foo", "x"}],
+ parse_cookie("foo=\"\\x\"")),
+ ?assertEqual(
+ [],
+ parse_cookie("=")),
+ ?assertEqual(
+ [{"foo", ""}, {"bar", ""}],
+ parse_cookie(" foo ; bar ")),
+ ?assertEqual(
+ [{"foo", ""}, {"bar", ""}],
+ parse_cookie("foo=;bar=")),
+ ?assertEqual(
+ [{"foo", "\";"}, {"bar", ""}],
+ parse_cookie("foo = \"\\\";\";bar ")),
+ ?assertEqual(
+ [{"foo", "\";bar"}],
+ parse_cookie("foo=\"\\\";bar")),
+ ?assertEqual(
+ [],
+ parse_cookie([])),
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble"}],
+ parse_cookie("foo=bar , baz=wibble ")),
+ ok.
+
+domain_test() ->
+ ?assertEqual(
+ {"Set-Cookie",
+ "Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Domain=acme.com; "
+ "HttpOnly"},
+ cookie("Customer", "WILE_E_COYOTE",
+ [{http_only, true}, {domain, "acme.com"}])),
+ ok.
+
+local_time_test() ->
+ {"Set-Cookie", S} = cookie("Customer", "WILE_E_COYOTE",
+ [{max_age, 111}, {secure, true}]),
+ ?assertMatch(
+ ["Customer=WILE_E_COYOTE",
+ " Version=1",
+ " Expires=" ++ _,
+ " Max-Age=111",
+ " Secure"],
+ string:tokens(S, ";")),
+ ok.
+
+cookie_test() ->
+ C1 = {"Set-Cookie",
+ "Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Path=/acme"},
+ C1 = cookie("Customer", "WILE_E_COYOTE", [{path, "/acme"}]),
+ C1 = cookie("Customer", "WILE_E_COYOTE",
+ [{path, "/acme"}, {badoption, "negatory"}]),
+ C1 = cookie('Customer', 'WILE_E_COYOTE', [{path, '/acme'}]),
+ C1 = cookie(<<"Customer">>, <<"WILE_E_COYOTE">>, [{path, <<"/acme">>}]),
+
+ {"Set-Cookie","=NoKey; Version=1"} = cookie("", "NoKey", []),
+ {"Set-Cookie","=NoKey; Version=1"} = cookie("", "NoKey"),
+ LocalTime = calendar:universal_time_to_local_time({{2007, 5, 15}, {13, 45, 33}}),
+ C2 = {"Set-Cookie",
+ "Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Expires=Tue, 15 May 2007 13:45:33 GMT; "
+ "Max-Age=0"},
+ C2 = cookie("Customer", "WILE_E_COYOTE",
+ [{max_age, -111}, {local_time, LocalTime}]),
+ C3 = {"Set-Cookie",
+ "Customer=WILE_E_COYOTE; "
+ "Version=1; "
+ "Expires=Wed, 16 May 2007 13:45:50 GMT; "
+ "Max-Age=86417"},
+ C3 = cookie("Customer", "WILE_E_COYOTE",
+ [{max_age, 86417}, {local_time, LocalTime}]),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_cover.erl b/deps/mochiweb/src/mochiweb_cover.erl
new file mode 100644
index 00000000..6a14ef51
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_cover.erl
@@ -0,0 +1,75 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc Workarounds for various cover deficiencies.
+-module(mochiweb_cover).
+-export([get_beam/1, get_abstract_code/1,
+ get_clauses/2, clause_lookup_table/1]).
+-export([clause_lookup_table/2]).
+
+%% Internal
+
+get_beam(Module) ->
+ {Module, Beam, _Path} = code:get_object_code(Module),
+ Beam.
+
+get_abstract_code(Beam) ->
+ {ok, {_Module,
+ [{abstract_code,
+ {raw_abstract_v1, L}}]}} = beam_lib:chunks(Beam, [abstract_code]),
+ L.
+
+get_clauses(Function, Code) ->
+ [L] = [Clauses || {function, _, FName, _, Clauses}
+ <- Code, FName =:= Function],
+ L.
+
+clause_lookup_table(Module, Function) ->
+ clause_lookup_table(
+ get_clauses(Function,
+ get_abstract_code(get_beam(Module)))).
+
+clause_lookup_table(Clauses) ->
+ lists:foldr(fun clause_fold/2, [], Clauses).
+
+clause_fold({clause, _,
+ [InTerm],
+ _Guards=[],
+ [OutTerm]},
+ Acc) ->
+ try [{erl_parse:normalise(InTerm), erl_parse:normalise(OutTerm)} | Acc]
+ catch error:_ -> Acc
+ end;
+clause_fold(_, Acc) ->
+ Acc.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+foo_table(a) -> b;
+foo_table("a") -> <<"b">>;
+foo_table(123) -> {4, 3, 2};
+foo_table([list]) -> [];
+foo_table([list1, list2]) -> [list1, list2, list3];
+foo_table(ignored) -> some, code, ignored;
+foo_table(Var) -> Var.
+
+foo_table_test() ->
+ T = clause_lookup_table(?MODULE, foo_table),
+ [?assertEqual(V, foo_table(K)) || {K, V} <- T].
+
+clause_lookup_table_test() ->
+ ?assertEqual(b, foo_table(a)),
+ ?assertEqual(ignored, foo_table(ignored)),
+ ?assertEqual('Var', foo_table('Var')),
+ ?assertEqual(
+ [{a, b},
+ {"a", <<"b">>},
+ {123, {4, 3, 2}},
+ {[list], []},
+ {[list1, list2], [list1, list2, list3]}],
+ clause_lookup_table(?MODULE, foo_table)).
+
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_echo.erl b/deps/mochiweb/src/mochiweb_echo.erl
new file mode 100644
index 00000000..6f7872b9
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_echo.erl
@@ -0,0 +1,38 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Simple and stupid echo server to demo mochiweb_socket_server.
+
+-module(mochiweb_echo).
+-author('bob@mochimedia.com').
+-export([start/0, stop/0, loop/1]).
+
+stop() ->
+ mochiweb_socket_server:stop(?MODULE).
+
+start() ->
+ mochiweb_socket_server:start([{name, ?MODULE},
+ {port, 6789},
+ {ip, "127.0.0.1"},
+ {max, 1},
+ {loop, {?MODULE, loop}}]).
+
+loop(Socket) ->
+ case mochiweb_socket:recv(Socket, 0, 30000) of
+ {ok, Data} ->
+ case mochiweb_socket:send(Socket, Data) of
+ ok ->
+ loop(Socket);
+ _ ->
+ exit(normal)
+ end;
+ _Other ->
+ exit(normal)
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_headers.erl b/deps/mochiweb/src/mochiweb_headers.erl
new file mode 100644
index 00000000..4fce9838
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_headers.erl
@@ -0,0 +1,299 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Case preserving (but case insensitive) HTTP Header dictionary.
+
+-module(mochiweb_headers).
+-author('bob@mochimedia.com').
+-export([empty/0, from_list/1, insert/3, enter/3, get_value/2, lookup/2]).
+-export([delete_any/2, get_primary_value/2]).
+-export([default/3, enter_from_list/2, default_from_list/2]).
+-export([to_list/1, make/1]).
+-export([from_binary/1]).
+
+%% @type headers().
+%% @type key() = atom() | binary() | string().
+%% @type value() = atom() | binary() | string() | integer().
+
+%% @spec empty() -> headers()
+%% @doc Create an empty headers structure.
+empty() ->
+ gb_trees:empty().
+
+%% @spec make(headers() | [{key(), value()}]) -> headers()
+%% @doc Construct a headers() from the given list.
+make(L) when is_list(L) ->
+ from_list(L);
+%% assume a tuple is already mochiweb_headers.
+make(T) when is_tuple(T) ->
+ T.
+
+%% @spec from_binary(iolist()) -> headers()
+%% @doc Transforms a raw HTTP header into a mochiweb headers structure.
+%%
+%% The given raw HTTP header can be one of the following:
+%%
+%% 1) A string or a binary representing a full HTTP header ending with
+%% double CRLF.
+%% Examples:
+%% ```
+%% "Content-Length: 47\r\nContent-Type: text/plain\r\n\r\n"
+%% <<"Content-Length: 47\r\nContent-Type: text/plain\r\n\r\n">>'''
+%%
+%% 2) A list of binaries or strings where each element represents a raw
+%% HTTP header line ending with a single CRLF.
+%% Examples:
+%% ```
+%% [<<"Content-Length: 47\r\n">>, <<"Content-Type: text/plain\r\n">>]
+%% ["Content-Length: 47\r\n", "Content-Type: text/plain\r\n"]
+%% ["Content-Length: 47\r\n", <<"Content-Type: text/plain\r\n">>]'''
+%%
+from_binary(RawHttpHeader) when is_binary(RawHttpHeader) ->
+ from_binary(RawHttpHeader, []);
+from_binary(RawHttpHeaderList) ->
+ from_binary(list_to_binary([RawHttpHeaderList, "\r\n"])).
+
+from_binary(RawHttpHeader, Acc) ->
+ case erlang:decode_packet(httph, RawHttpHeader, []) of
+ {ok, {http_header, _, H, _, V}, Rest} ->
+ from_binary(Rest, [{H, V} | Acc]);
+ _ ->
+ make(Acc)
+ end.
+
+%% @spec from_list([{key(), value()}]) -> headers()
+%% @doc Construct a headers() from the given list.
+from_list(List) ->
+ lists:foldl(fun ({K, V}, T) -> insert(K, V, T) end, empty(), List).
+
+%% @spec enter_from_list([{key(), value()}], headers()) -> headers()
+%% @doc Insert pairs into the headers, replace any values for existing keys.
+enter_from_list(List, T) ->
+ lists:foldl(fun ({K, V}, T1) -> enter(K, V, T1) end, T, List).
+
+%% @spec default_from_list([{key(), value()}], headers()) -> headers()
+%% @doc Insert pairs into the headers for keys that do not already exist.
+default_from_list(List, T) ->
+ lists:foldl(fun ({K, V}, T1) -> default(K, V, T1) end, T, List).
+
+%% @spec to_list(headers()) -> [{key(), string()}]
+%% @doc Return the contents of the headers. The keys will be the exact key
+%% that was first inserted (e.g. may be an atom or binary, case is
+%% preserved).
+to_list(T) ->
+ F = fun ({K, {array, L}}, Acc) ->
+ L1 = lists:reverse(L),
+ lists:foldl(fun (V, Acc1) -> [{K, V} | Acc1] end, Acc, L1);
+ (Pair, Acc) ->
+ [Pair | Acc]
+ end,
+ lists:reverse(lists:foldl(F, [], gb_trees:values(T))).
+
+%% @spec get_value(key(), headers()) -> string() | undefined
+%% @doc Return the value of the given header using a case insensitive search.
+%% undefined will be returned for keys that are not present.
+get_value(K, T) ->
+ case lookup(K, T) of
+ {value, {_, V}} ->
+ expand(V);
+ none ->
+ undefined
+ end.
+
+%% @spec get_primary_value(key(), headers()) -> string() | undefined
+%% @doc Return the value of the given header up to the first semicolon using
+%% a case insensitive search. undefined will be returned for keys
+%% that are not present.
+get_primary_value(K, T) ->
+ case get_value(K, T) of
+ undefined ->
+ undefined;
+ V ->
+ lists:takewhile(fun (C) -> C =/= $; end, V)
+ end.
+
+%% @spec lookup(key(), headers()) -> {value, {key(), string()}} | none
+%% @doc Return the case preserved key and value for the given header using
+%% a case insensitive search. none will be returned for keys that are
+%% not present.
+lookup(K, T) ->
+ case gb_trees:lookup(normalize(K), T) of
+ {value, {K0, V}} ->
+ {value, {K0, expand(V)}};
+ none ->
+ none
+ end.
+
+%% @spec default(key(), value(), headers()) -> headers()
+%% @doc Insert the pair into the headers if it does not already exist.
+default(K, V, T) ->
+ K1 = normalize(K),
+ V1 = any_to_list(V),
+ try gb_trees:insert(K1, {K, V1}, T)
+ catch
+ error:{key_exists, _} ->
+ T
+ end.
+
+%% @spec enter(key(), value(), headers()) -> headers()
+%% @doc Insert the pair into the headers, replacing any pre-existing key.
+enter(K, V, T) ->
+ K1 = normalize(K),
+ V1 = any_to_list(V),
+ gb_trees:enter(K1, {K, V1}, T).
+
+%% @spec insert(key(), value(), headers()) -> headers()
+%% @doc Insert the pair into the headers, merging with any pre-existing key.
+%% A merge is done with Value = V0 ++ ", " ++ V1.
+insert(K, V, T) ->
+ K1 = normalize(K),
+ V1 = any_to_list(V),
+ try gb_trees:insert(K1, {K, V1}, T)
+ catch
+ error:{key_exists, _} ->
+ {K0, V0} = gb_trees:get(K1, T),
+ V2 = merge(K1, V1, V0),
+ gb_trees:update(K1, {K0, V2}, T)
+ end.
+
+%% @spec delete_any(key(), headers()) -> headers()
+%% @doc Delete the header corresponding to key if it is present.
+delete_any(K, T) ->
+ K1 = normalize(K),
+ gb_trees:delete_any(K1, T).
+
+%% Internal API
+
+expand({array, L}) ->
+ mochiweb_util:join(lists:reverse(L), ", ");
+expand(V) ->
+ V.
+
+merge("set-cookie", V1, {array, L}) ->
+ {array, [V1 | L]};
+merge("set-cookie", V1, V0) ->
+ {array, [V1, V0]};
+merge(_, V1, V0) ->
+ V0 ++ ", " ++ V1.
+
+normalize(K) when is_list(K) ->
+ string:to_lower(K);
+normalize(K) when is_atom(K) ->
+ normalize(atom_to_list(K));
+normalize(K) when is_binary(K) ->
+ normalize(binary_to_list(K)).
+
+any_to_list(V) when is_list(V) ->
+ V;
+any_to_list(V) when is_atom(V) ->
+ atom_to_list(V);
+any_to_list(V) when is_binary(V) ->
+ binary_to_list(V);
+any_to_list(V) when is_integer(V) ->
+ integer_to_list(V).
+
+%%
+%% Tests.
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+make_test() ->
+ Identity = make([{hdr, foo}]),
+ ?assertEqual(
+ Identity,
+ make(Identity)).
+
+enter_from_list_test() ->
+ H = make([{hdr, foo}]),
+ ?assertEqual(
+ [{baz, "wibble"}, {hdr, "foo"}],
+ to_list(enter_from_list([{baz, wibble}], H))),
+ ?assertEqual(
+ [{hdr, "bar"}],
+ to_list(enter_from_list([{hdr, bar}], H))),
+ ok.
+
+default_from_list_test() ->
+ H = make([{hdr, foo}]),
+ ?assertEqual(
+ [{baz, "wibble"}, {hdr, "foo"}],
+ to_list(default_from_list([{baz, wibble}], H))),
+ ?assertEqual(
+ [{hdr, "foo"}],
+ to_list(default_from_list([{hdr, bar}], H))),
+ ok.
+
+get_primary_value_test() ->
+ H = make([{hdr, foo}, {baz, <<"wibble;taco">>}]),
+ ?assertEqual(
+ "foo",
+ get_primary_value(hdr, H)),
+ ?assertEqual(
+ undefined,
+ get_primary_value(bar, H)),
+ ?assertEqual(
+ "wibble",
+ get_primary_value(<<"baz">>, H)),
+ ok.
+
+set_cookie_test() ->
+ H = make([{"set-cookie", foo}, {"set-cookie", bar}, {"set-cookie", baz}]),
+ ?assertEqual(
+ [{"set-cookie", "foo"}, {"set-cookie", "bar"}, {"set-cookie", "baz"}],
+ to_list(H)),
+ ok.
+
+headers_test() ->
+ H = ?MODULE:make([{hdr, foo}, {"Hdr", "bar"}, {'Hdr', 2}]),
+ [{hdr, "foo, bar, 2"}] = ?MODULE:to_list(H),
+ H1 = ?MODULE:insert(taco, grande, H),
+ [{hdr, "foo, bar, 2"}, {taco, "grande"}] = ?MODULE:to_list(H1),
+ H2 = ?MODULE:make([{"Set-Cookie", "foo"}]),
+ [{"Set-Cookie", "foo"}] = ?MODULE:to_list(H2),
+ H3 = ?MODULE:insert("Set-Cookie", "bar", H2),
+ [{"Set-Cookie", "foo"}, {"Set-Cookie", "bar"}] = ?MODULE:to_list(H3),
+ "foo, bar" = ?MODULE:get_value("set-cookie", H3),
+ {value, {"Set-Cookie", "foo, bar"}} = ?MODULE:lookup("set-cookie", H3),
+ undefined = ?MODULE:get_value("shibby", H3),
+ none = ?MODULE:lookup("shibby", H3),
+ H4 = ?MODULE:insert("content-type",
+ "application/x-www-form-urlencoded; charset=utf8",
+ H3),
+ "application/x-www-form-urlencoded" = ?MODULE:get_primary_value(
+ "content-type", H4),
+ H4 = ?MODULE:delete_any("nonexistent-header", H4),
+ H3 = ?MODULE:delete_any("content-type", H4),
+ HB = <<"Content-Length: 47\r\nContent-Type: text/plain\r\n\r\n">>,
+ H_HB = ?MODULE:from_binary(HB),
+ H_HB = ?MODULE:from_binary(binary_to_list(HB)),
+ "47" = ?MODULE:get_value("Content-Length", H_HB),
+ "text/plain" = ?MODULE:get_value("Content-Type", H_HB),
+ L_H_HB = ?MODULE:to_list(H_HB),
+ 2 = length(L_H_HB),
+ true = lists:member({'Content-Length', "47"}, L_H_HB),
+ true = lists:member({'Content-Type', "text/plain"}, L_H_HB),
+ HL = [ <<"Content-Length: 47\r\n">>, <<"Content-Type: text/plain\r\n">> ],
+ HL2 = [ "Content-Length: 47\r\n", <<"Content-Type: text/plain\r\n">> ],
+ HL3 = [ <<"Content-Length: 47\r\n">>, "Content-Type: text/plain\r\n" ],
+ H_HL = ?MODULE:from_binary(HL),
+ H_HL = ?MODULE:from_binary(HL2),
+ H_HL = ?MODULE:from_binary(HL3),
+ "47" = ?MODULE:get_value("Content-Length", H_HL),
+ "text/plain" = ?MODULE:get_value("Content-Type", H_HL),
+ L_H_HL = ?MODULE:to_list(H_HL),
+ 2 = length(L_H_HL),
+ true = lists:member({'Content-Length', "47"}, L_H_HL),
+ true = lists:member({'Content-Type', "text/plain"}, L_H_HL),
+ [] = ?MODULE:to_list(?MODULE:from_binary(<<>>)),
+ [] = ?MODULE:to_list(?MODULE:from_binary(<<"">>)),
+ [] = ?MODULE:to_list(?MODULE:from_binary(<<"\r\n">>)),
+ [] = ?MODULE:to_list(?MODULE:from_binary(<<"\r\n\r\n">>)),
+ [] = ?MODULE:to_list(?MODULE:from_binary("")),
+ [] = ?MODULE:to_list(?MODULE:from_binary([<<>>])),
+ [] = ?MODULE:to_list(?MODULE:from_binary([<<"">>])),
+ [] = ?MODULE:to_list(?MODULE:from_binary([<<"\r\n">>])),
+ [] = ?MODULE:to_list(?MODULE:from_binary([<<"\r\n\r\n">>])),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_html.erl b/deps/mochiweb/src/mochiweb_html.erl
new file mode 100644
index 00000000..a15c359c
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_html.erl
@@ -0,0 +1,1061 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Loosely tokenizes and generates parse trees for HTML 4.
+-module(mochiweb_html).
+-export([tokens/1, parse/1, parse_tokens/1, to_tokens/1, escape/1,
+ escape_attr/1, to_html/1]).
+
+%% This is a macro to placate syntax highlighters..
+-define(QUOTE, $\").
+-define(SQUOTE, $\').
+-define(ADV_COL(S, N),
+ S#decoder{column=N+S#decoder.column,
+ offset=N+S#decoder.offset}).
+-define(INC_COL(S),
+ S#decoder{column=1+S#decoder.column,
+ offset=1+S#decoder.offset}).
+-define(INC_LINE(S),
+ S#decoder{column=1,
+ line=1+S#decoder.line,
+ offset=1+S#decoder.offset}).
+-define(INC_CHAR(S, C),
+ case C of
+ $\n ->
+ S#decoder{column=1,
+ line=1+S#decoder.line,
+ offset=1+S#decoder.offset};
+ _ ->
+ S#decoder{column=1+S#decoder.column,
+ offset=1+S#decoder.offset}
+ end).
+
+-define(IS_WHITESPACE(C),
+ (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
+-define(IS_LITERAL_SAFE(C),
+ ((C >= $A andalso C =< $Z) orelse (C >= $a andalso C =< $z)
+ orelse (C >= $0 andalso C =< $9))).
+-define(PROBABLE_CLOSE(C),
+ (C =:= $> orelse ?IS_WHITESPACE(C))).
+
+-record(decoder, {line=1,
+ column=1,
+ offset=0}).
+
+%% @type html_node() = {string(), [html_attr()], [html_node() | string()]}
+%% @type html_attr() = {string(), string()}
+%% @type html_token() = html_data() | start_tag() | end_tag() | inline_html() | html_comment() | html_doctype()
+%% @type html_data() = {data, string(), Whitespace::boolean()}
+%% @type start_tag() = {start_tag, Name, [html_attr()], Singleton::boolean()}
+%% @type end_tag() = {end_tag, Name}
+%% @type html_comment() = {comment, Comment}
+%% @type html_doctype() = {doctype, [Doctype]}
+%% @type inline_html() = {'=', iolist()}
+
+%% External API.
+
+%% @spec parse(string() | binary()) -> html_node()
+%% @doc tokenize and then transform the token stream into a HTML tree.
+parse(Input) ->
+ parse_tokens(tokens(Input)).
+
+%% @spec parse_tokens([html_token()]) -> html_node()
+%% @doc Transform the output of tokens(Doc) into a HTML tree.
+parse_tokens(Tokens) when is_list(Tokens) ->
+ %% Skip over doctype, processing instructions
+ F = fun (X) ->
+ case X of
+ {start_tag, _, _, false} ->
+ false;
+ _ ->
+ true
+ end
+ end,
+ [{start_tag, Tag, Attrs, false} | Rest] = lists:dropwhile(F, Tokens),
+ {Tree, _} = tree(Rest, [norm({Tag, Attrs})]),
+ Tree.
+
+%% @spec tokens(StringOrBinary) -> [html_token()]
+%% @doc Transform the input UTF-8 HTML into a token stream.
+tokens(Input) ->
+ tokens(iolist_to_binary(Input), #decoder{}, []).
+
+%% @spec to_tokens(html_node()) -> [html_token()]
+%% @doc Convert a html_node() tree to a list of tokens.
+to_tokens({Tag0}) ->
+ to_tokens({Tag0, [], []});
+to_tokens(T={'=', _}) ->
+ [T];
+to_tokens(T={doctype, _}) ->
+ [T];
+to_tokens(T={comment, _}) ->
+ [T];
+to_tokens({Tag0, Acc}) ->
+ %% This is only allowed in sub-tags: {p, [{"class", "foo"}]}
+ to_tokens({Tag0, [], Acc});
+to_tokens({Tag0, Attrs, Acc}) ->
+ Tag = to_tag(Tag0),
+ to_tokens([{Tag, Acc}], [{start_tag, Tag, Attrs, is_singleton(Tag)}]).
+
+%% @spec to_html([html_token()] | html_node()) -> iolist()
+%% @doc Convert a list of html_token() to a HTML document.
+to_html(Node) when is_tuple(Node) ->
+ to_html(to_tokens(Node));
+to_html(Tokens) when is_list(Tokens) ->
+ to_html(Tokens, []).
+
+%% @spec escape(string() | atom() | binary()) -> binary()
+%% @doc Escape a string such that it's safe for HTML (amp; lt; gt;).
+escape(B) when is_binary(B) ->
+ escape(binary_to_list(B), []);
+escape(A) when is_atom(A) ->
+ escape(atom_to_list(A), []);
+escape(S) when is_list(S) ->
+ escape(S, []).
+
+%% @spec escape_attr(string() | binary() | atom() | integer() | float()) -> binary()
+%% @doc Escape a string such that it's safe for HTML attrs
+%% (amp; lt; gt; quot;).
+escape_attr(B) when is_binary(B) ->
+ escape_attr(binary_to_list(B), []);
+escape_attr(A) when is_atom(A) ->
+ escape_attr(atom_to_list(A), []);
+escape_attr(S) when is_list(S) ->
+ escape_attr(S, []);
+escape_attr(I) when is_integer(I) ->
+ escape_attr(integer_to_list(I), []);
+escape_attr(F) when is_float(F) ->
+ escape_attr(mochinum:digits(F), []).
+
+to_html([], Acc) ->
+ lists:reverse(Acc);
+to_html([{'=', Content} | Rest], Acc) ->
+ to_html(Rest, [Content | Acc]);
+to_html([{pi, Tag, Attrs} | Rest], Acc) ->
+ Open = [<<"<?">>,
+ Tag,
+ attrs_to_html(Attrs, []),
+ <<"?>">>],
+ to_html(Rest, [Open | Acc]);
+to_html([{comment, Comment} | Rest], Acc) ->
+ to_html(Rest, [[<<"<!--">>, Comment, <<"-->">>] | Acc]);
+to_html([{doctype, Parts} | Rest], Acc) ->
+ Inside = doctype_to_html(Parts, Acc),
+ to_html(Rest, [[<<"<!DOCTYPE">>, Inside, <<">">>] | Acc]);
+to_html([{data, Data, _Whitespace} | Rest], Acc) ->
+ to_html(Rest, [escape(Data) | Acc]);
+to_html([{start_tag, Tag, Attrs, Singleton} | Rest], Acc) ->
+ Open = [<<"<">>,
+ Tag,
+ attrs_to_html(Attrs, []),
+ case Singleton of
+ true -> <<" />">>;
+ false -> <<">">>
+ end],
+ to_html(Rest, [Open | Acc]);
+to_html([{end_tag, Tag} | Rest], Acc) ->
+ to_html(Rest, [[<<"</">>, Tag, <<">">>] | Acc]).
+
+doctype_to_html([], Acc) ->
+ lists:reverse(Acc);
+doctype_to_html([Word | Rest], Acc) ->
+ case lists:all(fun (C) -> ?IS_LITERAL_SAFE(C) end,
+ binary_to_list(iolist_to_binary(Word))) of
+ true ->
+ doctype_to_html(Rest, [[<<" ">>, Word] | Acc]);
+ false ->
+ doctype_to_html(Rest, [[<<" \"">>, escape_attr(Word), ?QUOTE] | Acc])
+ end.
+
+attrs_to_html([], Acc) ->
+ lists:reverse(Acc);
+attrs_to_html([{K, V} | Rest], Acc) ->
+ attrs_to_html(Rest,
+ [[<<" ">>, escape(K), <<"=\"">>,
+ escape_attr(V), <<"\"">>] | Acc]).
+
+escape([], Acc) ->
+ list_to_binary(lists:reverse(Acc));
+escape("<" ++ Rest, Acc) ->
+ escape(Rest, lists:reverse("&lt;", Acc));
+escape(">" ++ Rest, Acc) ->
+ escape(Rest, lists:reverse("&gt;", Acc));
+escape("&" ++ Rest, Acc) ->
+ escape(Rest, lists:reverse("&amp;", Acc));
+escape([C | Rest], Acc) ->
+ escape(Rest, [C | Acc]).
+
+escape_attr([], Acc) ->
+ list_to_binary(lists:reverse(Acc));
+escape_attr("<" ++ Rest, Acc) ->
+ escape_attr(Rest, lists:reverse("&lt;", Acc));
+escape_attr(">" ++ Rest, Acc) ->
+ escape_attr(Rest, lists:reverse("&gt;", Acc));
+escape_attr("&" ++ Rest, Acc) ->
+ escape_attr(Rest, lists:reverse("&amp;", Acc));
+escape_attr([?QUOTE | Rest], Acc) ->
+ escape_attr(Rest, lists:reverse("&quot;", Acc));
+escape_attr([C | Rest], Acc) ->
+ escape_attr(Rest, [C | Acc]).
+
+to_tag(A) when is_atom(A) ->
+ norm(atom_to_list(A));
+to_tag(L) ->
+ norm(L).
+
+to_tokens([], Acc) ->
+ lists:reverse(Acc);
+to_tokens([{Tag, []} | Rest], Acc) ->
+ to_tokens(Rest, [{end_tag, to_tag(Tag)} | Acc]);
+to_tokens([{Tag0, [{T0} | R1]} | Rest], Acc) ->
+ %% Allow {br}
+ to_tokens([{Tag0, [{T0, [], []} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [T0={'=', _C0} | R1]} | Rest], Acc) ->
+ %% Allow {'=', iolist()}
+ to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
+to_tokens([{Tag0, [T0={comment, _C0} | R1]} | Rest], Acc) ->
+ %% Allow {comment, iolist()}
+ to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
+to_tokens([{Tag0, [T0={pi, _S0, _A0} | R1]} | Rest], Acc) ->
+ %% Allow {pi, binary(), list()}
+ to_tokens([{Tag0, R1} | Rest], [T0 | Acc]);
+to_tokens([{Tag0, [{T0, A0=[{_, _} | _]} | R1]} | Rest], Acc) ->
+ %% Allow {p, [{"class", "foo"}]}
+ to_tokens([{Tag0, [{T0, A0, []} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [{T0, C0} | R1]} | Rest], Acc) ->
+ %% Allow {p, "content"} and {p, <<"content">>}
+ to_tokens([{Tag0, [{T0, [], C0} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [{T0, A1, C0} | R1]} | Rest], Acc) when is_binary(C0) ->
+ %% Allow {"p", [{"class", "foo"}], <<"content">>}
+ to_tokens([{Tag0, [{T0, A1, binary_to_list(C0)} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [{T0, A1, C0=[C | _]} | R1]} | Rest], Acc)
+ when is_integer(C) ->
+ %% Allow {"p", [{"class", "foo"}], "content"}
+ to_tokens([{Tag0, [{T0, A1, [C0]} | R1]} | Rest], Acc);
+to_tokens([{Tag0, [{T0, A1, C1} | R1]} | Rest], Acc) ->
+ %% Native {"p", [{"class", "foo"}], ["content"]}
+ Tag = to_tag(Tag0),
+ T1 = to_tag(T0),
+ case is_singleton(norm(T1)) of
+ true ->
+ to_tokens([{Tag, R1} | Rest], [{start_tag, T1, A1, true} | Acc]);
+ false ->
+ to_tokens([{T1, C1}, {Tag, R1} | Rest],
+ [{start_tag, T1, A1, false} | Acc])
+ end;
+to_tokens([{Tag0, [L | R1]} | Rest], Acc) when is_list(L) ->
+ %% List text
+ Tag = to_tag(Tag0),
+ to_tokens([{Tag, R1} | Rest], [{data, iolist_to_binary(L), false} | Acc]);
+to_tokens([{Tag0, [B | R1]} | Rest], Acc) when is_binary(B) ->
+ %% Binary text
+ Tag = to_tag(Tag0),
+ to_tokens([{Tag, R1} | Rest], [{data, B, false} | Acc]).
+
+tokens(B, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary>> ->
+ lists:reverse(Acc);
+ _ ->
+ {Tag, S1} = tokenize(B, S),
+ case parse_flag(Tag) of
+ script ->
+ {Tag2, S2} = tokenize_script(B, S1),
+ tokens(B, S2, [Tag2, Tag | Acc]);
+ textarea ->
+ {Tag2, S2} = tokenize_textarea(B, S1),
+ tokens(B, S2, [Tag2, Tag | Acc]);
+ none ->
+ tokens(B, S1, [Tag | Acc])
+ end
+ end.
+
+parse_flag({start_tag, B, _, false}) ->
+ case string:to_lower(binary_to_list(B)) of
+ "script" ->
+ script;
+ "textarea" ->
+ textarea;
+ _ ->
+ none
+ end;
+parse_flag(_) ->
+ none.
+
+tokenize(B, S=#decoder{offset=O}) ->
+ case B of
+ <<_:O/binary, "<!--", _/binary>> ->
+ tokenize_comment(B, ?ADV_COL(S, 4));
+ <<_:O/binary, "<!DOCTYPE", _/binary>> ->
+ tokenize_doctype(B, ?ADV_COL(S, 10));
+ <<_:O/binary, "<![CDATA[", _/binary>> ->
+ tokenize_cdata(B, ?ADV_COL(S, 9));
+ <<_:O/binary, "<?", _/binary>> ->
+ {Tag, S1} = tokenize_literal(B, ?ADV_COL(S, 2)),
+ {Attrs, S2} = tokenize_attributes(B, S1),
+ S3 = find_qgt(B, S2),
+ {{pi, Tag, Attrs}, S3};
+ <<_:O/binary, "&", _/binary>> ->
+ tokenize_charref(B, ?INC_COL(S));
+ <<_:O/binary, "</", _/binary>> ->
+ {Tag, S1} = tokenize_literal(B, ?ADV_COL(S, 2)),
+ {S2, _} = find_gt(B, S1),
+ {{end_tag, Tag}, S2};
+ <<_:O/binary, "<", C, _/binary>> when ?IS_WHITESPACE(C) ->
+ %% This isn't really strict HTML
+ {{data, Data, _Whitespace}, S1} = tokenize_data(B, ?INC_COL(S)),
+ {{data, <<$<, Data/binary>>, false}, S1};
+ <<_:O/binary, "<", _/binary>> ->
+ {Tag, S1} = tokenize_literal(B, ?INC_COL(S)),
+ {Attrs, S2} = tokenize_attributes(B, S1),
+ {S3, HasSlash} = find_gt(B, S2),
+ Singleton = HasSlash orelse is_singleton(norm(binary_to_list(Tag))),
+ {{start_tag, Tag, Attrs, Singleton}, S3};
+ _ ->
+ tokenize_data(B, S)
+ end.
+
+tree_data([{data, Data, Whitespace} | Rest], AllWhitespace, Acc) ->
+ tree_data(Rest, (Whitespace andalso AllWhitespace), [Data | Acc]);
+tree_data(Rest, AllWhitespace, Acc) ->
+ {iolist_to_binary(lists:reverse(Acc)), AllWhitespace, Rest}.
+
+tree([], Stack) ->
+ {destack(Stack), []};
+tree([{end_tag, Tag} | Rest], Stack) ->
+ case destack(norm(Tag), Stack) of
+ S when is_list(S) ->
+ tree(Rest, S);
+ Result ->
+ {Result, []}
+ end;
+tree([{start_tag, Tag, Attrs, true} | Rest], S) ->
+ tree(Rest, append_stack_child(norm({Tag, Attrs}), S));
+tree([{start_tag, Tag, Attrs, false} | Rest], S) ->
+ tree(Rest, stack(norm({Tag, Attrs}), S));
+tree([T={pi, _Tag, _Attrs} | Rest], S) ->
+ tree(Rest, append_stack_child(T, S));
+tree([T={comment, _Comment} | Rest], S) ->
+ tree(Rest, append_stack_child(T, S));
+tree(L=[{data, _Data, _Whitespace} | _], S) ->
+ case tree_data(L, true, []) of
+ {_, true, Rest} ->
+ tree(Rest, S);
+ {Data, false, Rest} ->
+ tree(Rest, append_stack_child(Data, S))
+ end;
+tree([{doctype, _} | Rest], Stack) ->
+ tree(Rest, Stack).
+
+norm({Tag, Attrs}) ->
+ {norm(Tag), [{norm(K), iolist_to_binary(V)} || {K, V} <- Attrs], []};
+norm(Tag) when is_binary(Tag) ->
+ Tag;
+norm(Tag) ->
+ list_to_binary(string:to_lower(Tag)).
+
+stack(T1={TN, _, _}, Stack=[{TN, _, _} | _Rest])
+ when TN =:= <<"li">> orelse TN =:= <<"option">> ->
+ [T1 | destack(TN, Stack)];
+stack(T1={TN0, _, _}, Stack=[{TN1, _, _} | _Rest])
+ when (TN0 =:= <<"dd">> orelse TN0 =:= <<"dt">>) andalso
+ (TN1 =:= <<"dd">> orelse TN1 =:= <<"dt">>) ->
+ [T1 | destack(TN1, Stack)];
+stack(T1, Stack) ->
+ [T1 | Stack].
+
+append_stack_child(StartTag, [{Name, Attrs, Acc} | Stack]) ->
+ [{Name, Attrs, [StartTag | Acc]} | Stack].
+
+destack(TagName, Stack) when is_list(Stack) ->
+ F = fun (X) ->
+ case X of
+ {TagName, _, _} ->
+ false;
+ _ ->
+ true
+ end
+ end,
+ case lists:splitwith(F, Stack) of
+ {_, []} ->
+ %% If we're parsing something like XML we might find
+ %% a <link>tag</link> that is normally a singleton
+ %% in HTML but isn't here
+ case {is_singleton(TagName), Stack} of
+ {true, [{T0, A0, Acc0} | Post0]} ->
+ case lists:splitwith(F, Acc0) of
+ {_, []} ->
+ %% Actually was a singleton
+ Stack;
+ {Pre, [{T1, A1, []} | Post1]} ->
+ [{T0, A0, [{T1, A1, lists:reverse(Pre)} | Post1]}
+ | Post0]
+ end;
+ _ ->
+ %% No match, no state change
+ Stack
+ end;
+ {_Pre, [_T]} ->
+ %% Unfurl the whole stack, we're done
+ destack(Stack);
+ {Pre, [T, {T0, A0, Acc0} | Post]} ->
+ %% Unfurl up to the tag, then accumulate it
+ [{T0, A0, [destack(Pre ++ [T]) | Acc0]} | Post]
+ end.
+
+destack([{Tag, Attrs, Acc}]) ->
+ {Tag, Attrs, lists:reverse(Acc)};
+destack([{T1, A1, Acc1}, {T0, A0, Acc0} | Rest]) ->
+ destack([{T0, A0, [{T1, A1, lists:reverse(Acc1)} | Acc0]} | Rest]).
+
+is_singleton(<<"br">>) -> true;
+is_singleton(<<"hr">>) -> true;
+is_singleton(<<"img">>) -> true;
+is_singleton(<<"input">>) -> true;
+is_singleton(<<"base">>) -> true;
+is_singleton(<<"meta">>) -> true;
+is_singleton(<<"link">>) -> true;
+is_singleton(<<"area">>) -> true;
+is_singleton(<<"param">>) -> true;
+is_singleton(<<"col">>) -> true;
+is_singleton(_) -> false.
+
+tokenize_data(B, S=#decoder{offset=O}) ->
+ tokenize_data(B, S, O, true).
+
+tokenize_data(B, S=#decoder{offset=O}, Start, Whitespace) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when (C =/= $< andalso C =/= $&) ->
+ tokenize_data(B, ?INC_CHAR(S, C), Start,
+ (Whitespace andalso ?IS_WHITESPACE(C)));
+ _ ->
+ Len = O - Start,
+ <<_:Start/binary, Data:Len/binary, _/binary>> = B,
+ {{data, Data, Whitespace}, S}
+ end.
+
+tokenize_attributes(B, S) ->
+ tokenize_attributes(B, S, []).
+
+tokenize_attributes(B, S=#decoder{offset=O}, Acc) ->
+ case B of
+ <<_:O/binary>> ->
+ {lists:reverse(Acc), S};
+ <<_:O/binary, C, _/binary>> when (C =:= $> orelse C =:= $/) ->
+ {lists:reverse(Acc), S};
+ <<_:O/binary, "?>", _/binary>> ->
+ {lists:reverse(Acc), S};
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ tokenize_attributes(B, ?INC_CHAR(S, C), Acc);
+ _ ->
+ {Attr, S1} = tokenize_literal(B, S),
+ {Value, S2} = tokenize_attr_value(Attr, B, S1),
+ tokenize_attributes(B, S2, [{Attr, Value} | Acc])
+ end.
+
+tokenize_attr_value(Attr, B, S) ->
+ S1 = skip_whitespace(B, S),
+ O = S1#decoder.offset,
+ case B of
+ <<_:O/binary, "=", _/binary>> ->
+ S2 = skip_whitespace(B, ?INC_COL(S1)),
+ tokenize_word_or_literal(B, S2);
+ _ ->
+ {Attr, S1}
+ end.
+
+skip_whitespace(B, S=#decoder{offset=O}) ->
+ case B of
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ skip_whitespace(B, ?INC_CHAR(S, C));
+ _ ->
+ S
+ end.
+
+tokenize_literal(Bin, S) ->
+ tokenize_literal(Bin, S, []).
+
+tokenize_literal(Bin, S=#decoder{offset=O}, Acc) ->
+ case Bin of
+ <<_:O/binary, $&, _/binary>> ->
+ {{data, Data, false}, S1} = tokenize_charref(Bin, ?INC_COL(S)),
+ tokenize_literal(Bin, S1, [Data | Acc]);
+ <<_:O/binary, C, _/binary>> when not (?IS_WHITESPACE(C)
+ orelse C =:= $>
+ orelse C =:= $/
+ orelse C =:= $=) ->
+ tokenize_literal(Bin, ?INC_COL(S), [C | Acc]);
+ _ ->
+ {iolist_to_binary(lists:reverse(Acc)), S}
+ end.
+
+find_qgt(Bin, S=#decoder{offset=O}) ->
+ case Bin of
+ <<_:O/binary, "?>", _/binary>> ->
+ ?ADV_COL(S, 2);
+ %% tokenize_attributes takes care of this state:
+ %% <<_:O/binary, C, _/binary>> ->
+ %% find_qgt(Bin, ?INC_CHAR(S, C));
+ <<_:O/binary>> ->
+ S
+ end.
+
+find_gt(Bin, S) ->
+ find_gt(Bin, S, false).
+
+find_gt(Bin, S=#decoder{offset=O}, HasSlash) ->
+ case Bin of
+ <<_:O/binary, $/, _/binary>> ->
+ find_gt(Bin, ?INC_COL(S), true);
+ <<_:O/binary, $>, _/binary>> ->
+ {?INC_COL(S), HasSlash};
+ <<_:O/binary, C, _/binary>> ->
+ find_gt(Bin, ?INC_CHAR(S, C), HasSlash);
+ _ ->
+ {S, HasSlash}
+ end.
+
+tokenize_charref(Bin, S=#decoder{offset=O}) ->
+ tokenize_charref(Bin, S, O).
+
+tokenize_charref(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ <<_:O/binary>> ->
+ <<_:Start/binary, Raw/binary>> = Bin,
+ {{data, Raw, false}, S};
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C)
+ orelse C =:= ?SQUOTE
+ orelse C =:= ?QUOTE
+ orelse C =:= $/
+ orelse C =:= $> ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{data, Raw, false}, S};
+ <<_:O/binary, $;, _/binary>> ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ Data = case mochiweb_charref:charref(Raw) of
+ undefined ->
+ Start1 = Start - 1,
+ Len1 = Len + 2,
+ <<_:Start1/binary, R:Len1/binary, _/binary>> = Bin,
+ R;
+ Unichar ->
+ mochiutf8:codepoint_to_bytes(Unichar)
+ end,
+ {{data, Data, false}, ?INC_COL(S)};
+ _ ->
+ tokenize_charref(Bin, ?INC_COL(S), Start)
+ end.
+
+tokenize_doctype(Bin, S) ->
+ tokenize_doctype(Bin, S, []).
+
+tokenize_doctype(Bin, S=#decoder{offset=O}, Acc) ->
+ case Bin of
+ <<_:O/binary>> ->
+ {{doctype, lists:reverse(Acc)}, S};
+ <<_:O/binary, $>, _/binary>> ->
+ {{doctype, lists:reverse(Acc)}, ?INC_COL(S)};
+ <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
+ tokenize_doctype(Bin, ?INC_CHAR(S, C), Acc);
+ _ ->
+ {Word, S1} = tokenize_word_or_literal(Bin, S),
+ tokenize_doctype(Bin, S1, [Word | Acc])
+ end.
+
+tokenize_word_or_literal(Bin, S=#decoder{offset=O}) ->
+ case Bin of
+ <<_:O/binary, C, _/binary>> when C =:= ?QUOTE orelse C =:= ?SQUOTE ->
+ tokenize_word(Bin, ?INC_COL(S), C);
+ <<_:O/binary, C, _/binary>> when not ?IS_WHITESPACE(C) ->
+ %% Sanity check for whitespace
+ tokenize_literal(Bin, S, [])
+ end.
+
+tokenize_word(Bin, S, Quote) ->
+ tokenize_word(Bin, S, Quote, []).
+
+tokenize_word(Bin, S=#decoder{offset=O}, Quote, Acc) ->
+ case Bin of
+ <<_:O/binary>> ->
+ {iolist_to_binary(lists:reverse(Acc)), S};
+ <<_:O/binary, Quote, _/binary>> ->
+ {iolist_to_binary(lists:reverse(Acc)), ?INC_COL(S)};
+ <<_:O/binary, $&, _/binary>> ->
+ {{data, Data, false}, S1} = tokenize_charref(Bin, ?INC_COL(S)),
+ tokenize_word(Bin, S1, Quote, [Data | Acc]);
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_word(Bin, ?INC_CHAR(S, C), Quote, [C | Acc])
+ end.
+
+tokenize_cdata(Bin, S=#decoder{offset=O}) ->
+ tokenize_cdata(Bin, S, O).
+
+tokenize_cdata(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ <<_:O/binary, "]]>", _/binary>> ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{data, Raw, false}, ?ADV_COL(S, 3)};
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_cdata(Bin, ?INC_CHAR(S, C), Start);
+ _ ->
+ <<_:O/binary, Raw/binary>> = Bin,
+ {{data, Raw, false}, S}
+ end.
+
+tokenize_comment(Bin, S=#decoder{offset=O}) ->
+ tokenize_comment(Bin, S, O).
+
+tokenize_comment(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ <<_:O/binary, "-->", _/binary>> ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{comment, Raw}, ?ADV_COL(S, 3)};
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_comment(Bin, ?INC_CHAR(S, C), Start);
+ <<_:Start/binary, Raw/binary>> ->
+ {{comment, Raw}, S}
+ end.
+
+tokenize_script(Bin, S=#decoder{offset=O}) ->
+ tokenize_script(Bin, S, O).
+
+tokenize_script(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ %% Just a look-ahead, we want the end_tag separately
+ <<_:O/binary, $<, $/, SS, CC, RR, II, PP, TT, ZZ, _/binary>>
+ when (SS =:= $s orelse SS =:= $S) andalso
+ (CC =:= $c orelse CC =:= $C) andalso
+ (RR =:= $r orelse RR =:= $R) andalso
+ (II =:= $i orelse II =:= $I) andalso
+ (PP =:= $p orelse PP =:= $P) andalso
+ (TT=:= $t orelse TT =:= $T) andalso
+ ?PROBABLE_CLOSE(ZZ) ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{data, Raw, false}, S};
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_script(Bin, ?INC_CHAR(S, C), Start);
+ <<_:Start/binary, Raw/binary>> ->
+ {{data, Raw, false}, S}
+ end.
+
+tokenize_textarea(Bin, S=#decoder{offset=O}) ->
+ tokenize_textarea(Bin, S, O).
+
+tokenize_textarea(Bin, S=#decoder{offset=O}, Start) ->
+ case Bin of
+ %% Just a look-ahead, we want the end_tag separately
+ <<_:O/binary, $<, $/, TT, EE, XX, TT2, AA, RR, EE2, AA2, ZZ, _/binary>>
+ when (TT =:= $t orelse TT =:= $T) andalso
+ (EE =:= $e orelse EE =:= $E) andalso
+ (XX =:= $x orelse XX =:= $X) andalso
+ (TT2 =:= $t orelse TT2 =:= $T) andalso
+ (AA =:= $a orelse AA =:= $A) andalso
+ (RR =:= $r orelse RR =:= $R) andalso
+ (EE2 =:= $e orelse EE2 =:= $E) andalso
+ (AA2 =:= $a orelse AA2 =:= $A) andalso
+ ?PROBABLE_CLOSE(ZZ) ->
+ Len = O - Start,
+ <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin,
+ {{data, Raw, false}, S};
+ <<_:O/binary, C, _/binary>> ->
+ tokenize_textarea(Bin, ?INC_CHAR(S, C), Start);
+ <<_:Start/binary, Raw/binary>> ->
+ {{data, Raw, false}, S}
+ end.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+to_html_test() ->
+ ?assertEqual(
+ <<"<html><head><title>hey!</title></head><body><p class=\"foo\">what's up<br /></p><div>sucka</div>RAW!<!-- comment! --></body></html>">>,
+ iolist_to_binary(
+ to_html({html, [],
+ [{<<"head">>, [],
+ [{title, <<"hey!">>}]},
+ {body, [],
+ [{p, [{class, foo}], [<<"what's">>, <<" up">>, {br}]},
+ {'div', <<"sucka">>},
+ {'=', <<"RAW!">>},
+ {comment, <<" comment! ">>}]}]}))),
+ ?assertEqual(
+ <<"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">">>,
+ iolist_to_binary(
+ to_html({doctype,
+ [<<"html">>, <<"PUBLIC">>,
+ <<"-//W3C//DTD XHTML 1.0 Transitional//EN">>,
+ <<"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">>]}))),
+ ?assertEqual(
+ <<"<html><?xml:namespace prefix=\"o\" ns=\"urn:schemas-microsoft-com:office:office\"?></html>">>,
+ iolist_to_binary(
+ to_html({<<"html">>,[],
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}]}))),
+ ok.
+
+escape_test() ->
+ ?assertEqual(
+ <<"&amp;quot;\"word &gt;&lt;&lt;up!&amp;quot;">>,
+ escape(<<"&quot;\"word ><<up!&quot;">>)),
+ ?assertEqual(
+ <<"&amp;quot;\"word &gt;&lt;&lt;up!&amp;quot;">>,
+ escape("&quot;\"word ><<up!&quot;")),
+ ?assertEqual(
+ <<"&amp;quot;\"word &gt;&lt;&lt;up!&amp;quot;">>,
+ escape('&quot;\"word ><<up!&quot;')),
+ ok.
+
+escape_attr_test() ->
+ ?assertEqual(
+ <<"&amp;quot;&quot;word &gt;&lt;&lt;up!&amp;quot;">>,
+ escape_attr(<<"&quot;\"word ><<up!&quot;">>)),
+ ?assertEqual(
+ <<"&amp;quot;&quot;word &gt;&lt;&lt;up!&amp;quot;">>,
+ escape_attr("&quot;\"word ><<up!&quot;")),
+ ?assertEqual(
+ <<"&amp;quot;&quot;word &gt;&lt;&lt;up!&amp;quot;">>,
+ escape_attr('&quot;\"word ><<up!&quot;')),
+ ?assertEqual(
+ <<"12345">>,
+ escape_attr(12345)),
+ ?assertEqual(
+ <<"1.5">>,
+ escape_attr(1.5)),
+ ok.
+
+tokens_test() ->
+ ?assertEqual(
+ [{start_tag, <<"foo">>, [{<<"bar">>, <<"baz">>},
+ {<<"wibble">>, <<"wibble">>},
+ {<<"alice">>, <<"bob">>}], true}],
+ tokens(<<"<foo bar=baz wibble='wibble' alice=\"bob\"/>">>)),
+ ?assertEqual(
+ [{start_tag, <<"foo">>, [{<<"bar">>, <<"baz">>},
+ {<<"wibble">>, <<"wibble">>},
+ {<<"alice">>, <<"bob">>}], true}],
+ tokens(<<"<foo bar=baz wibble='wibble' alice=bob/>">>)),
+ ?assertEqual(
+ [{comment, <<"[if lt IE 7]>\n<style type=\"text/css\">\n.no_ie { display: none; }\n</style>\n<![endif]">>}],
+ tokens(<<"<!--[if lt IE 7]>\n<style type=\"text/css\">\n.no_ie { display: none; }\n</style>\n<![endif]-->">>)),
+ ?assertEqual(
+ [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
+ {data, <<" A= B <= C ">>, false},
+ {end_tag, <<"script">>}],
+ tokens(<<"<script type=\"text/javascript\"> A= B <= C </script>">>)),
+ ?assertEqual(
+ [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
+ {data, <<" A= B <= C ">>, false},
+ {end_tag, <<"script">>}],
+ tokens(<<"<script type =\"text/javascript\"> A= B <= C </script>">>)),
+ ?assertEqual(
+ [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
+ {data, <<" A= B <= C ">>, false},
+ {end_tag, <<"script">>}],
+ tokens(<<"<script type = \"text/javascript\"> A= B <= C </script>">>)),
+ ?assertEqual(
+ [{start_tag, <<"script">>, [{<<"type">>, <<"text/javascript">>}], false},
+ {data, <<" A= B <= C ">>, false},
+ {end_tag, <<"script">>}],
+ tokens(<<"<script type= \"text/javascript\"> A= B <= C </script>">>)),
+ ?assertEqual(
+ [{start_tag, <<"textarea">>, [], false},
+ {data, <<"<html></body>">>, false},
+ {end_tag, <<"textarea">>}],
+ tokens(<<"<textarea><html></body></textarea>">>)),
+ ?assertEqual(
+ [{start_tag, <<"textarea">>, [], false},
+ {data, <<"<html></body></textareaz>">>, false}],
+ tokens(<<"<textarea ><html></body></textareaz>">>)),
+ ?assertEqual(
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}],
+ tokens(<<"<?xml:namespace prefix=\"o\" ns=\"urn:schemas-microsoft-com:office:office\"?>">>)),
+ ?assertEqual(
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}],
+ tokens(<<"<?xml:namespace prefix=o ns=urn:schemas-microsoft-com:office:office \n?>">>)),
+ ?assertEqual(
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}],
+ tokens(<<"<?xml:namespace prefix=o ns=urn:schemas-microsoft-com:office:office">>)),
+ ?assertEqual(
+ [{data, <<"<">>, false}],
+ tokens(<<"&lt;">>)),
+ ?assertEqual(
+ [{data, <<"not html ">>, false},
+ {data, <<"< at all">>, false}],
+ tokens(<<"not html < at all">>)),
+ ok.
+
+parse_test() ->
+ D0 = <<"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/html4/strict.dtd\">
+<html>
+ <head>
+ <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\">
+ <title>Foo</title>
+ <link rel=\"stylesheet\" type=\"text/css\" href=\"/static/rel/dojo/resources/dojo.css\" media=\"screen\">
+ <link rel=\"stylesheet\" type=\"text/css\" href=\"/static/foo.css\" media=\"screen\">
+ <!--[if lt IE 7]>
+ <style type=\"text/css\">
+ .no_ie { display: none; }
+ </style>
+ <![endif]-->
+ <link rel=\"icon\" href=\"/static/images/favicon.ico\" type=\"image/x-icon\">
+ <link rel=\"shortcut icon\" href=\"/static/images/favicon.ico\" type=\"image/x-icon\">
+ </head>
+ <body id=\"home\" class=\"tundra\"><![CDATA[&lt;<this<!-- is -->CDATA>&gt;]]></body>
+</html>">>,
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"head">>, [],
+ [{<<"meta">>,
+ [{<<"http-equiv">>,<<"Content-Type">>},
+ {<<"content">>,<<"text/html; charset=UTF-8">>}],
+ []},
+ {<<"title">>,[],[<<"Foo">>]},
+ {<<"link">>,
+ [{<<"rel">>,<<"stylesheet">>},
+ {<<"type">>,<<"text/css">>},
+ {<<"href">>,<<"/static/rel/dojo/resources/dojo.css">>},
+ {<<"media">>,<<"screen">>}],
+ []},
+ {<<"link">>,
+ [{<<"rel">>,<<"stylesheet">>},
+ {<<"type">>,<<"text/css">>},
+ {<<"href">>,<<"/static/foo.css">>},
+ {<<"media">>,<<"screen">>}],
+ []},
+ {comment,<<"[if lt IE 7]>\n <style type=\"text/css\">\n .no_ie { display: none; }\n </style>\n <![endif]">>},
+ {<<"link">>,
+ [{<<"rel">>,<<"icon">>},
+ {<<"href">>,<<"/static/images/favicon.ico">>},
+ {<<"type">>,<<"image/x-icon">>}],
+ []},
+ {<<"link">>,
+ [{<<"rel">>,<<"shortcut icon">>},
+ {<<"href">>,<<"/static/images/favicon.ico">>},
+ {<<"type">>,<<"image/x-icon">>}],
+ []}]},
+ {<<"body">>,
+ [{<<"id">>,<<"home">>},
+ {<<"class">>,<<"tundra">>}],
+ [<<"&lt;<this<!-- is -->CDATA>&gt;">>]}]},
+ parse(D0)),
+ ?assertEqual(
+ {<<"html">>,[],
+ [{pi, <<"xml:namespace">>,
+ [{<<"prefix">>,<<"o">>},
+ {<<"ns">>,<<"urn:schemas-microsoft-com:office:office">>}]}]},
+ parse(
+ <<"<html><?xml:namespace prefix=\"o\" ns=\"urn:schemas-microsoft-com:office:office\"?></html>">>)),
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"dd">>, [], [<<"foo">>]},
+ {<<"dt">>, [], [<<"bar">>]}]},
+ parse(<<"<html><dd>foo<dt>bar</html>">>)),
+ %% Singleton sadness
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"link">>, [], []},
+ <<"foo">>,
+ {<<"br">>, [], []},
+ <<"bar">>]},
+ parse(<<"<html><link>foo<br>bar</html>">>)),
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"link">>, [], [<<"foo">>,
+ {<<"br">>, [], []},
+ <<"bar">>]}]},
+ parse(<<"<html><link>foo<br>bar</link></html>">>)),
+ ok.
+
+exhaustive_is_singleton_test() ->
+ T = mochiweb_cover:clause_lookup_table(?MODULE, is_singleton),
+ [?assertEqual(V, is_singleton(K)) || {K, V} <- T].
+
+tokenize_attributes_test() ->
+ ?assertEqual(
+ {<<"foo">>,
+ [{<<"bar">>, <<"b\"az">>},
+ {<<"wibble">>, <<"wibble">>},
+ {<<"taco", 16#c2, 16#a9>>, <<"bell">>},
+ {<<"quux">>, <<"quux">>}],
+ []},
+ parse(<<"<foo bar=\"b&quot;az\" wibble taco&copy;=bell quux">>)),
+ ok.
+
+tokens2_test() ->
+ D0 = <<"<channel><title>from __future__ import *</title><link>http://bob.pythonmac.org</link><description>Bob's Rants</description></channel>">>,
+ ?assertEqual(
+ [{start_tag,<<"channel">>,[],false},
+ {start_tag,<<"title">>,[],false},
+ {data,<<"from __future__ import *">>,false},
+ {end_tag,<<"title">>},
+ {start_tag,<<"link">>,[],true},
+ {data,<<"http://bob.pythonmac.org">>,false},
+ {end_tag,<<"link">>},
+ {start_tag,<<"description">>,[],false},
+ {data,<<"Bob's Rants">>,false},
+ {end_tag,<<"description">>},
+ {end_tag,<<"channel">>}],
+ tokens(D0)),
+ ok.
+
+to_tokens_test() ->
+ ?assertEqual(
+ [{start_tag, <<"p">>, [{class, 1}], false},
+ {end_tag, <<"p">>}],
+ to_tokens({p, [{class, 1}], []})),
+ ?assertEqual(
+ [{start_tag, <<"p">>, [], false},
+ {end_tag, <<"p">>}],
+ to_tokens({p})),
+ ?assertEqual(
+ [{'=', <<"data">>}],
+ to_tokens({'=', <<"data">>})),
+ ?assertEqual(
+ [{comment, <<"comment">>}],
+ to_tokens({comment, <<"comment">>})),
+ %% This is only allowed in sub-tags:
+ %% {p, [{"class", "foo"}]} as {p, [{"class", "foo"}], []}
+ %% On the outside it's always treated as follows:
+ %% {p, [], [{"class", "foo"}]} as {p, [], [{"class", "foo"}]}
+ ?assertEqual(
+ [{start_tag, <<"html">>, [], false},
+ {start_tag, <<"p">>, [{class, 1}], false},
+ {end_tag, <<"p">>},
+ {end_tag, <<"html">>}],
+ to_tokens({html, [{p, [{class, 1}]}]})),
+ ok.
+
+parse2_test() ->
+ D0 = <<"<channel><title>from __future__ import *</title><link>http://bob.pythonmac.org<br>foo</link><description>Bob's Rants</description></channel>">>,
+ ?assertEqual(
+ {<<"channel">>,[],
+ [{<<"title">>,[],[<<"from __future__ import *">>]},
+ {<<"link">>,[],[
+ <<"http://bob.pythonmac.org">>,
+ {<<"br">>,[],[]},
+ <<"foo">>]},
+ {<<"description">>,[],[<<"Bob's Rants">>]}]},
+ parse(D0)),
+ ok.
+
+parse_tokens_test() ->
+ D0 = [{doctype,[<<"HTML">>,<<"PUBLIC">>,<<"-//W3C//DTD HTML 4.01 Transitional//EN">>]},
+ {data,<<"\n">>,true},
+ {start_tag,<<"html">>,[],false}],
+ ?assertEqual(
+ {<<"html">>, [], []},
+ parse_tokens(D0)),
+ D1 = D0 ++ [{end_tag, <<"html">>}],
+ ?assertEqual(
+ {<<"html">>, [], []},
+ parse_tokens(D1)),
+ D2 = D0 ++ [{start_tag, <<"body">>, [], false}],
+ ?assertEqual(
+ {<<"html">>, [], [{<<"body">>, [], []}]},
+ parse_tokens(D2)),
+ D3 = D0 ++ [{start_tag, <<"head">>, [], false},
+ {end_tag, <<"head">>},
+ {start_tag, <<"body">>, [], false}],
+ ?assertEqual(
+ {<<"html">>, [], [{<<"head">>, [], []}, {<<"body">>, [], []}]},
+ parse_tokens(D3)),
+ D4 = D3 ++ [{data,<<"\n">>,true},
+ {start_tag,<<"div">>,[{<<"class">>,<<"a">>}],false},
+ {start_tag,<<"a">>,[{<<"name">>,<<"#anchor">>}],false},
+ {end_tag,<<"a">>},
+ {end_tag,<<"div">>},
+ {start_tag,<<"div">>,[{<<"class">>,<<"b">>}],false},
+ {start_tag,<<"div">>,[{<<"class">>,<<"c">>}],false},
+ {end_tag,<<"div">>},
+ {end_tag,<<"div">>}],
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"head">>, [], []},
+ {<<"body">>, [],
+ [{<<"div">>, [{<<"class">>, <<"a">>}], [{<<"a">>, [{<<"name">>, <<"#anchor">>}], []}]},
+ {<<"div">>, [{<<"class">>, <<"b">>}], [{<<"div">>, [{<<"class">>, <<"c">>}], []}]}
+ ]}]},
+ parse_tokens(D4)),
+ D5 = [{start_tag,<<"html">>,[],false},
+ {data,<<"\n">>,true},
+ {data,<<"boo">>,false},
+ {data,<<"hoo">>,false},
+ {data,<<"\n">>,true},
+ {end_tag,<<"html">>}],
+ ?assertEqual(
+ {<<"html">>, [], [<<"\nboohoo\n">>]},
+ parse_tokens(D5)),
+ D6 = [{start_tag,<<"html">>,[],false},
+ {data,<<"\n">>,true},
+ {data,<<"\n">>,true},
+ {end_tag,<<"html">>}],
+ ?assertEqual(
+ {<<"html">>, [], []},
+ parse_tokens(D6)),
+ D7 = [{start_tag,<<"html">>,[],false},
+ {start_tag,<<"ul">>,[],false},
+ {start_tag,<<"li">>,[],false},
+ {data,<<"word">>,false},
+ {start_tag,<<"li">>,[],false},
+ {data,<<"up">>,false},
+ {end_tag,<<"li">>},
+ {start_tag,<<"li">>,[],false},
+ {data,<<"fdsa">>,false},
+ {start_tag,<<"br">>,[],true},
+ {data,<<"asdf">>,false},
+ {end_tag,<<"ul">>},
+ {end_tag,<<"html">>}],
+ ?assertEqual(
+ {<<"html">>, [],
+ [{<<"ul">>, [],
+ [{<<"li">>, [], [<<"word">>]},
+ {<<"li">>, [], [<<"up">>]},
+ {<<"li">>, [], [<<"fdsa">>,{<<"br">>, [], []}, <<"asdf">>]}]}]},
+ parse_tokens(D7)),
+ ok.
+
+destack_test() ->
+ {<<"a">>, [], []} =
+ destack([{<<"a">>, [], []}]),
+ {<<"a">>, [], [{<<"b">>, [], []}]} =
+ destack([{<<"b">>, [], []}, {<<"a">>, [], []}]),
+ {<<"a">>, [], [{<<"b">>, [], [{<<"c">>, [], []}]}]} =
+ destack([{<<"c">>, [], []}, {<<"b">>, [], []}, {<<"a">>, [], []}]),
+ [{<<"a">>, [], [{<<"b">>, [], [{<<"c">>, [], []}]}]}] =
+ destack(<<"b">>,
+ [{<<"c">>, [], []}, {<<"b">>, [], []}, {<<"a">>, [], []}]),
+ [{<<"b">>, [], [{<<"c">>, [], []}]}, {<<"a">>, [], []}] =
+ destack(<<"c">>,
+ [{<<"c">>, [], []}, {<<"b">>, [], []},{<<"a">>, [], []}]),
+ ok.
+
+doctype_test() ->
+ ?assertEqual(
+ {<<"html">>,[],[{<<"head">>,[],[]}]},
+ mochiweb_html:parse("<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">"
+ "<html><head></head></body></html>")),
+ %% http://code.google.com/p/mochiweb/issues/detail?id=52
+ ?assertEqual(
+ {<<"html">>,[],[{<<"head">>,[],[]}]},
+ mochiweb_html:parse("<html>"
+ "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\">"
+ "<head></head></body></html>")),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_http.erl b/deps/mochiweb/src/mochiweb_http.erl
new file mode 100644
index 00000000..7607b304
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_http.erl
@@ -0,0 +1,273 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc HTTP server.
+
+-module(mochiweb_http).
+-author('bob@mochimedia.com').
+-export([start/0, start/1, stop/0, stop/1]).
+-export([loop/2, default_body/1]).
+-export([after_response/2, reentry/1]).
+-export([parse_range_request/1, range_skip_length/2]).
+
+-define(REQUEST_RECV_TIMEOUT, 300000). % timeout waiting for request line
+-define(HEADERS_RECV_TIMEOUT, 30000). % timeout waiting for headers
+
+-define(MAX_HEADERS, 1000).
+-define(DEFAULTS, [{name, ?MODULE},
+ {port, 8888}]).
+
+parse_options(Options) ->
+ {loop, HttpLoop} = proplists:lookup(loop, Options),
+ Loop = fun (S) ->
+ ?MODULE:loop(S, HttpLoop)
+ end,
+ Options1 = [{loop, Loop} | proplists:delete(loop, Options)],
+ mochilists:set_defaults(?DEFAULTS, Options1).
+
+stop() ->
+ mochiweb_socket_server:stop(?MODULE).
+
+stop(Name) ->
+ mochiweb_socket_server:stop(Name).
+
+start() ->
+ start([{ip, "127.0.0.1"},
+ {loop, {?MODULE, default_body}}]).
+
+start(Options) ->
+ mochiweb_socket_server:start(parse_options(Options)).
+
+frm(Body) ->
+ ["<html><head></head><body>"
+ "<form method=\"POST\">"
+ "<input type=\"hidden\" value=\"message\" name=\"hidden\"/>"
+ "<input type=\"submit\" value=\"regular POST\">"
+ "</form>"
+ "<br />"
+ "<form method=\"POST\" enctype=\"multipart/form-data\""
+ " action=\"/multipart\">"
+ "<input type=\"hidden\" value=\"multipart message\" name=\"hidden\"/>"
+ "<input type=\"file\" name=\"file\"/>"
+ "<input type=\"submit\" value=\"multipart POST\" />"
+ "</form>"
+ "<pre>", Body, "</pre>"
+ "</body></html>"].
+
+default_body(Req, M, "/chunked") when M =:= 'GET'; M =:= 'HEAD' ->
+ Res = Req:ok({"text/plain", [], chunked}),
+ Res:write_chunk("First chunk\r\n"),
+ timer:sleep(5000),
+ Res:write_chunk("Last chunk\r\n"),
+ Res:write_chunk("");
+default_body(Req, M, _Path) when M =:= 'GET'; M =:= 'HEAD' ->
+ Body = io_lib:format("~p~n", [[{parse_qs, Req:parse_qs()},
+ {parse_cookie, Req:parse_cookie()},
+ Req:dump()]]),
+ Req:ok({"text/html",
+ [mochiweb_cookies:cookie("mochiweb_http", "test_cookie")],
+ frm(Body)});
+default_body(Req, 'POST', "/multipart") ->
+ Body = io_lib:format("~p~n", [[{parse_qs, Req:parse_qs()},
+ {parse_cookie, Req:parse_cookie()},
+ {body, Req:recv_body()},
+ Req:dump()]]),
+ Req:ok({"text/html", [], frm(Body)});
+default_body(Req, 'POST', _Path) ->
+ Body = io_lib:format("~p~n", [[{parse_qs, Req:parse_qs()},
+ {parse_cookie, Req:parse_cookie()},
+ {parse_post, Req:parse_post()},
+ Req:dump()]]),
+ Req:ok({"text/html", [], frm(Body)});
+default_body(Req, _Method, _Path) ->
+ Req:respond({501, [], []}).
+
+default_body(Req) ->
+ default_body(Req, Req:get(method), Req:get(path)).
+
+loop(Socket, Body) ->
+ mochiweb_socket:setopts(Socket, [{packet, http}]),
+ request(Socket, Body).
+
+request(Socket, Body) ->
+ case mochiweb_socket:recv(Socket, 0, ?REQUEST_RECV_TIMEOUT) of
+ {ok, {http_request, Method, Path, Version}} ->
+ mochiweb_socket:setopts(Socket, [{packet, httph}]),
+ headers(Socket, {Method, Path, Version}, [], Body, 0);
+ {error, {http_error, "\r\n"}} ->
+ request(Socket, Body);
+ {error, {http_error, "\n"}} ->
+ request(Socket, Body);
+ {error, closed} ->
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ {error, timeout} ->
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ _Other ->
+ handle_invalid_request(Socket)
+ end.
+
+reentry(Body) ->
+ fun (Req) ->
+ ?MODULE:after_response(Body, Req)
+ end.
+
+headers(Socket, Request, Headers, _Body, ?MAX_HEADERS) ->
+ %% Too many headers sent, bad request.
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ handle_invalid_request(Socket, Request, Headers);
+headers(Socket, Request, Headers, Body, HeaderCount) ->
+ case mochiweb_socket:recv(Socket, 0, ?HEADERS_RECV_TIMEOUT) of
+ {ok, http_eoh} ->
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ Req = mochiweb:new_request({Socket, Request,
+ lists:reverse(Headers)}),
+ call_body(Body, Req),
+ ?MODULE:after_response(Body, Req);
+ {ok, {http_header, _, Name, _, Value}} ->
+ headers(Socket, Request, [{Name, Value} | Headers], Body,
+ 1 + HeaderCount);
+ {error, closed} ->
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ _Other ->
+ handle_invalid_request(Socket, Request, Headers)
+ end.
+
+call_body({M, F}, Req) ->
+ M:F(Req);
+call_body(Body, Req) ->
+ Body(Req).
+
+handle_invalid_request(Socket) ->
+ handle_invalid_request(Socket, {'GET', {abs_path, "/"}, {0,9}}, []).
+
+handle_invalid_request(Socket, Request, RevHeaders) ->
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ Req = mochiweb:new_request({Socket, Request,
+ lists:reverse(RevHeaders)}),
+ Req:respond({400, [], []}),
+ mochiweb_socket:close(Socket),
+ exit(normal).
+
+after_response(Body, Req) ->
+ Socket = Req:get(socket),
+ case Req:should_close() of
+ true ->
+ mochiweb_socket:close(Socket),
+ exit(normal);
+ false ->
+ Req:cleanup(),
+ ?MODULE:loop(Socket, Body)
+ end.
+
+parse_range_request(RawRange) when is_list(RawRange) ->
+ try
+ "bytes=" ++ RangeString = RawRange,
+ Ranges = string:tokens(RangeString, ","),
+ lists:map(fun ("-" ++ V) ->
+ {none, list_to_integer(V)};
+ (R) ->
+ case string:tokens(R, "-") of
+ [S1, S2] ->
+ {list_to_integer(S1), list_to_integer(S2)};
+ [S] ->
+ {list_to_integer(S), none}
+ end
+ end,
+ Ranges)
+ catch
+ _:_ ->
+ fail
+ end.
+
+range_skip_length(Spec, Size) ->
+ case Spec of
+ {none, R} when R =< Size, R >= 0 ->
+ {Size - R, R};
+ {none, _OutOfRange} ->
+ {0, Size};
+ {R, none} when R >= 0, R < Size ->
+ {R, Size - R};
+ {_OutOfRange, none} ->
+ invalid_range;
+ {Start, End} when 0 =< Start, Start =< End, End < Size ->
+ {Start, End - Start + 1};
+ {_OutOfRange, _End} ->
+ invalid_range
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+range_test() ->
+ %% valid, single ranges
+ ?assertEqual([{20, 30}], parse_range_request("bytes=20-30")),
+ ?assertEqual([{20, none}], parse_range_request("bytes=20-")),
+ ?assertEqual([{none, 20}], parse_range_request("bytes=-20")),
+
+ %% trivial single range
+ ?assertEqual([{0, none}], parse_range_request("bytes=0-")),
+
+ %% invalid, single ranges
+ ?assertEqual(fail, parse_range_request("")),
+ ?assertEqual(fail, parse_range_request("garbage")),
+ ?assertEqual(fail, parse_range_request("bytes=-20-30")),
+
+ %% valid, multiple range
+ ?assertEqual(
+ [{20, 30}, {50, 100}, {110, 200}],
+ parse_range_request("bytes=20-30,50-100,110-200")),
+ ?assertEqual(
+ [{20, none}, {50, 100}, {none, 200}],
+ parse_range_request("bytes=20-,50-100,-200")),
+
+ %% no ranges
+ ?assertEqual([], parse_range_request("bytes=")),
+ ok.
+
+range_skip_length_test() ->
+ Body = <<"012345678901234567890123456789012345678901234567890123456789">>,
+ BodySize = byte_size(Body), %% 60
+ BodySize = 60,
+
+ %% these values assume BodySize =:= 60
+ ?assertEqual({1,9}, range_skip_length({1,9}, BodySize)), %% 1-9
+ ?assertEqual({10,10}, range_skip_length({10,19}, BodySize)), %% 10-19
+ ?assertEqual({40, 20}, range_skip_length({none, 20}, BodySize)), %% -20
+ ?assertEqual({30, 30}, range_skip_length({30, none}, BodySize)), %% 30-
+
+ %% valid edge cases for range_skip_length
+ ?assertEqual({BodySize, 0}, range_skip_length({none, 0}, BodySize)),
+ ?assertEqual({0, BodySize}, range_skip_length({none, BodySize}, BodySize)),
+ ?assertEqual({0, BodySize}, range_skip_length({0, none}, BodySize)),
+ BodySizeLess1 = BodySize - 1,
+ ?assertEqual({BodySizeLess1, 1},
+ range_skip_length({BodySize - 1, none}, BodySize)),
+
+ %% out of range, return whole thing
+ ?assertEqual({0, BodySize},
+ range_skip_length({none, BodySize + 1}, BodySize)),
+ ?assertEqual({0, BodySize},
+ range_skip_length({none, -1}, BodySize)),
+
+ %% invalid ranges
+ ?assertEqual(invalid_range,
+ range_skip_length({-1, 30}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({0, BodySize + 1}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({-1, BodySize + 1}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({BodySize, 40}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({-1, none}, BodySize)),
+ ?assertEqual(invalid_range,
+ range_skip_length({BodySize, none}, BodySize)),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_io.erl b/deps/mochiweb/src/mochiweb_io.erl
new file mode 100644
index 00000000..6ce57ec8
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_io.erl
@@ -0,0 +1,46 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Utilities for dealing with IO devices (open files).
+
+-module(mochiweb_io).
+-author('bob@mochimedia.com').
+
+-export([iodevice_stream/3, iodevice_stream/2]).
+-export([iodevice_foldl/4, iodevice_foldl/3]).
+-export([iodevice_size/1]).
+-define(READ_SIZE, 8192).
+
+iodevice_foldl(F, Acc, IoDevice) ->
+ iodevice_foldl(F, Acc, IoDevice, ?READ_SIZE).
+
+iodevice_foldl(F, Acc, IoDevice, BufferSize) ->
+ case file:read(IoDevice, BufferSize) of
+ eof ->
+ Acc;
+ {ok, Data} ->
+ iodevice_foldl(F, F(Data, Acc), IoDevice, BufferSize)
+ end.
+
+iodevice_stream(Callback, IoDevice) ->
+ iodevice_stream(Callback, IoDevice, ?READ_SIZE).
+
+iodevice_stream(Callback, IoDevice, BufferSize) ->
+ F = fun (Data, ok) -> Callback(Data) end,
+ ok = iodevice_foldl(F, ok, IoDevice, BufferSize).
+
+iodevice_size(IoDevice) ->
+ {ok, Size} = file:position(IoDevice, eof),
+ {ok, 0} = file:position(IoDevice, bof),
+ Size.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+
+
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_mime.erl b/deps/mochiweb/src/mochiweb_mime.erl
new file mode 100644
index 00000000..5344aee7
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_mime.erl
@@ -0,0 +1,94 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Gives a good MIME type guess based on file extension.
+
+-module(mochiweb_mime).
+-author('bob@mochimedia.com').
+-export([from_extension/1]).
+
+%% @spec from_extension(S::string()) -> string() | undefined
+%% @doc Given a filename extension (e.g. ".html") return a guess for the MIME
+%% type such as "text/html". Will return the atom undefined if no good
+%% guess is available.
+from_extension(".html") ->
+ "text/html";
+from_extension(".xhtml") ->
+ "application/xhtml+xml";
+from_extension(".xml") ->
+ "application/xml";
+from_extension(".css") ->
+ "text/css";
+from_extension(".js") ->
+ "application/x-javascript";
+from_extension(".jpg") ->
+ "image/jpeg";
+from_extension(".gif") ->
+ "image/gif";
+from_extension(".png") ->
+ "image/png";
+from_extension(".swf") ->
+ "application/x-shockwave-flash";
+from_extension(".zip") ->
+ "application/zip";
+from_extension(".bz2") ->
+ "application/x-bzip2";
+from_extension(".gz") ->
+ "application/x-gzip";
+from_extension(".tar") ->
+ "application/x-tar";
+from_extension(".tgz") ->
+ "application/x-gzip";
+from_extension(".txt") ->
+ "text/plain";
+from_extension(".doc") ->
+ "application/msword";
+from_extension(".pdf") ->
+ "application/pdf";
+from_extension(".xls") ->
+ "application/vnd.ms-excel";
+from_extension(".rtf") ->
+ "application/rtf";
+from_extension(".mov") ->
+ "video/quicktime";
+from_extension(".mp3") ->
+ "audio/mpeg";
+from_extension(".z") ->
+ "application/x-compress";
+from_extension(".wav") ->
+ "audio/x-wav";
+from_extension(".ico") ->
+ "image/x-icon";
+from_extension(".bmp") ->
+ "image/bmp";
+from_extension(".m4a") ->
+ "audio/mpeg";
+from_extension(".m3u") ->
+ "audio/x-mpegurl";
+from_extension(".exe") ->
+ "application/octet-stream";
+from_extension(".csv") ->
+ "text/csv";
+from_extension(_) ->
+ undefined.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+exhaustive_from_extension_test() ->
+ T = mochiweb_cover:clause_lookup_table(?MODULE, from_extension),
+ [?assertEqual(V, from_extension(K)) || {K, V} <- T].
+
+from_extension_test() ->
+ ?assertEqual("text/html",
+ from_extension(".html")),
+ ?assertEqual(undefined,
+ from_extension("")),
+ ?assertEqual(undefined,
+ from_extension(".wtf")),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_multipart.erl b/deps/mochiweb/src/mochiweb_multipart.erl
new file mode 100644
index 00000000..3069cf4d
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_multipart.erl
@@ -0,0 +1,824 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Utilities for parsing multipart/form-data.
+
+-module(mochiweb_multipart).
+-author('bob@mochimedia.com').
+
+-export([parse_form/1, parse_form/2]).
+-export([parse_multipart_request/2]).
+-export([parts_to_body/3, parts_to_multipart_body/4]).
+-export([default_file_handler/2]).
+
+-define(CHUNKSIZE, 4096).
+
+-record(mp, {state, boundary, length, buffer, callback, req}).
+
+%% TODO: DOCUMENT THIS MODULE.
+%% @type key() = atom() | string() | binary().
+%% @type value() = atom() | iolist() | integer().
+%% @type header() = {key(), value()}.
+%% @type bodypart() = {Start::integer(), End::integer(), Body::iolist()}.
+%% @type formfile() = {Name::string(), ContentType::string(), Content::binary()}.
+%% @type request().
+%% @type file_handler() = (Filename::string(), ContentType::string()) -> file_handler_callback().
+%% @type file_handler_callback() = (binary() | eof) -> file_handler_callback() | term().
+
+%% @spec parts_to_body([bodypart()], ContentType::string(),
+%% Size::integer()) -> {[header()], iolist()}
+%% @doc Return {[header()], iolist()} representing the body for the given
+%% parts, may be a single part or multipart.
+parts_to_body([{Start, End, Body}], ContentType, Size) ->
+ HeaderList = [{"Content-Type", ContentType},
+ {"Content-Range",
+ ["bytes ",
+ mochiweb_util:make_io(Start), "-", mochiweb_util:make_io(End),
+ "/", mochiweb_util:make_io(Size)]}],
+ {HeaderList, Body};
+parts_to_body(BodyList, ContentType, Size) when is_list(BodyList) ->
+ parts_to_multipart_body(BodyList, ContentType, Size,
+ mochihex:to_hex(crypto:rand_bytes(8))).
+
+%% @spec parts_to_multipart_body([bodypart()], ContentType::string(),
+%% Size::integer(), Boundary::string()) ->
+%% {[header()], iolist()}
+%% @doc Return {[header()], iolist()} representing the body for the given
+%% parts, always a multipart response.
+parts_to_multipart_body(BodyList, ContentType, Size, Boundary) ->
+ HeaderList = [{"Content-Type",
+ ["multipart/byteranges; ",
+ "boundary=", Boundary]}],
+ MultiPartBody = multipart_body(BodyList, ContentType, Boundary, Size),
+
+ {HeaderList, MultiPartBody}.
+
+%% @spec multipart_body([bodypart()], ContentType::string(),
+%% Boundary::string(), Size::integer()) -> iolist()
+%% @doc Return the representation of a multipart body for the given [bodypart()].
+multipart_body([], _ContentType, Boundary, _Size) ->
+ ["--", Boundary, "--\r\n"];
+multipart_body([{Start, End, Body} | BodyList], ContentType, Boundary, Size) ->
+ ["--", Boundary, "\r\n",
+ "Content-Type: ", ContentType, "\r\n",
+ "Content-Range: ",
+ "bytes ", mochiweb_util:make_io(Start), "-", mochiweb_util:make_io(End),
+ "/", mochiweb_util:make_io(Size), "\r\n\r\n",
+ Body, "\r\n"
+ | multipart_body(BodyList, ContentType, Boundary, Size)].
+
+%% @spec parse_form(request()) -> [{string(), string() | formfile()}]
+%% @doc Parse a multipart form from the given request using the in-memory
+%% default_file_handler/2.
+parse_form(Req) ->
+ parse_form(Req, fun default_file_handler/2).
+
+%% @spec parse_form(request(), F::file_handler()) -> [{string(), string() | term()}]
+%% @doc Parse a multipart form from the given request using the given file_handler().
+parse_form(Req, FileHandler) ->
+ Callback = fun (Next) -> parse_form_outer(Next, FileHandler, []) end,
+ {_, _, Res} = parse_multipart_request(Req, Callback),
+ Res.
+
+parse_form_outer(eof, _, Acc) ->
+ lists:reverse(Acc);
+parse_form_outer({headers, H}, FileHandler, State) ->
+ {"form-data", H1} = proplists:get_value("content-disposition", H),
+ Name = proplists:get_value("name", H1),
+ Filename = proplists:get_value("filename", H1),
+ case Filename of
+ undefined ->
+ fun (Next) ->
+ parse_form_value(Next, {Name, []}, FileHandler, State)
+ end;
+ _ ->
+ ContentType = proplists:get_value("content-type", H),
+ Handler = FileHandler(Filename, ContentType),
+ fun (Next) ->
+ parse_form_file(Next, {Name, Handler}, FileHandler, State)
+ end
+ end.
+
+parse_form_value(body_end, {Name, Acc}, FileHandler, State) ->
+ Value = binary_to_list(iolist_to_binary(lists:reverse(Acc))),
+ State1 = [{Name, Value} | State],
+ fun (Next) -> parse_form_outer(Next, FileHandler, State1) end;
+parse_form_value({body, Data}, {Name, Acc}, FileHandler, State) ->
+ Acc1 = [Data | Acc],
+ fun (Next) -> parse_form_value(Next, {Name, Acc1}, FileHandler, State) end.
+
+parse_form_file(body_end, {Name, Handler}, FileHandler, State) ->
+ Value = Handler(eof),
+ State1 = [{Name, Value} | State],
+ fun (Next) -> parse_form_outer(Next, FileHandler, State1) end;
+parse_form_file({body, Data}, {Name, Handler}, FileHandler, State) ->
+ H1 = Handler(Data),
+ fun (Next) -> parse_form_file(Next, {Name, H1}, FileHandler, State) end.
+
+default_file_handler(Filename, ContentType) ->
+ default_file_handler_1(Filename, ContentType, []).
+
+default_file_handler_1(Filename, ContentType, Acc) ->
+ fun(eof) ->
+ Value = iolist_to_binary(lists:reverse(Acc)),
+ {Filename, ContentType, Value};
+ (Next) ->
+ default_file_handler_1(Filename, ContentType, [Next | Acc])
+ end.
+
+parse_multipart_request(Req, Callback) ->
+ %% TODO: Support chunked?
+ Length = list_to_integer(Req:get_header_value("content-length")),
+ Boundary = iolist_to_binary(
+ get_boundary(Req:get_header_value("content-type"))),
+ Prefix = <<"\r\n--", Boundary/binary>>,
+ BS = byte_size(Boundary),
+ Chunk = read_chunk(Req, Length),
+ Length1 = Length - byte_size(Chunk),
+ <<"--", Boundary:BS/binary, "\r\n", Rest/binary>> = Chunk,
+ feed_mp(headers, flash_multipart_hack(#mp{boundary=Prefix,
+ length=Length1,
+ buffer=Rest,
+ callback=Callback,
+ req=Req})).
+
+parse_headers(<<>>) ->
+ [];
+parse_headers(Binary) ->
+ parse_headers(Binary, []).
+
+parse_headers(Binary, Acc) ->
+ case find_in_binary(<<"\r\n">>, Binary) of
+ {exact, N} ->
+ <<Line:N/binary, "\r\n", Rest/binary>> = Binary,
+ parse_headers(Rest, [split_header(Line) | Acc]);
+ not_found ->
+ lists:reverse([split_header(Binary) | Acc])
+ end.
+
+split_header(Line) ->
+ {Name, [$: | Value]} = lists:splitwith(fun (C) -> C =/= $: end,
+ binary_to_list(Line)),
+ {string:to_lower(string:strip(Name)),
+ mochiweb_util:parse_header(Value)}.
+
+read_chunk(Req, Length) when Length > 0 ->
+ case Length of
+ Length when Length < ?CHUNKSIZE ->
+ Req:recv(Length);
+ _ ->
+ Req:recv(?CHUNKSIZE)
+ end.
+
+read_more(State=#mp{length=Length, buffer=Buffer, req=Req}) ->
+ Data = read_chunk(Req, Length),
+ Buffer1 = <<Buffer/binary, Data/binary>>,
+ flash_multipart_hack(State#mp{length=Length - byte_size(Data),
+ buffer=Buffer1}).
+
+flash_multipart_hack(State=#mp{length=0, buffer=Buffer, boundary=Prefix}) ->
+ %% http://code.google.com/p/mochiweb/issues/detail?id=22
+ %% Flash doesn't terminate multipart with \r\n properly so we fix it up here
+ PrefixSize = size(Prefix),
+ case size(Buffer) - (2 + PrefixSize) of
+ Seek when Seek >= 0 ->
+ case Buffer of
+ <<_:Seek/binary, Prefix:PrefixSize/binary, "--">> ->
+ Buffer1 = <<Buffer/binary, "\r\n">>,
+ State#mp{buffer=Buffer1};
+ _ ->
+ State
+ end;
+ _ ->
+ State
+ end;
+flash_multipart_hack(State) ->
+ State.
+
+feed_mp(headers, State=#mp{buffer=Buffer, callback=Callback}) ->
+ {State1, P} = case find_in_binary(<<"\r\n\r\n">>, Buffer) of
+ {exact, N} ->
+ {State, N};
+ _ ->
+ S1 = read_more(State),
+ %% Assume headers must be less than ?CHUNKSIZE
+ {exact, N} = find_in_binary(<<"\r\n\r\n">>,
+ S1#mp.buffer),
+ {S1, N}
+ end,
+ <<Headers:P/binary, "\r\n\r\n", Rest/binary>> = State1#mp.buffer,
+ NextCallback = Callback({headers, parse_headers(Headers)}),
+ feed_mp(body, State1#mp{buffer=Rest,
+ callback=NextCallback});
+feed_mp(body, State=#mp{boundary=Prefix, buffer=Buffer, callback=Callback}) ->
+ Boundary = find_boundary(Prefix, Buffer),
+ case Boundary of
+ {end_boundary, Start, Skip} ->
+ <<Data:Start/binary, _:Skip/binary, Rest/binary>> = Buffer,
+ C1 = Callback({body, Data}),
+ C2 = C1(body_end),
+ {State#mp.length, Rest, C2(eof)};
+ {next_boundary, Start, Skip} ->
+ <<Data:Start/binary, _:Skip/binary, Rest/binary>> = Buffer,
+ C1 = Callback({body, Data}),
+ feed_mp(headers, State#mp{callback=C1(body_end),
+ buffer=Rest});
+ {maybe, Start} ->
+ <<Data:Start/binary, Rest/binary>> = Buffer,
+ feed_mp(body, read_more(State#mp{callback=Callback({body, Data}),
+ buffer=Rest}));
+ not_found ->
+ {Data, Rest} = {Buffer, <<>>},
+ feed_mp(body, read_more(State#mp{callback=Callback({body, Data}),
+ buffer=Rest}))
+ end.
+
+get_boundary(ContentType) ->
+ {"multipart/form-data", Opts} = mochiweb_util:parse_header(ContentType),
+ case proplists:get_value("boundary", Opts) of
+ S when is_list(S) ->
+ S
+ end.
+
+find_in_binary(B, Data) when size(B) > 0 ->
+ case size(Data) - size(B) of
+ Last when Last < 0 ->
+ partial_find(B, Data, 0, size(Data));
+ Last ->
+ find_in_binary(B, size(B), Data, 0, Last)
+ end.
+
+find_in_binary(B, BS, D, N, Last) when N =< Last->
+ case D of
+ <<_:N/binary, B:BS/binary, _/binary>> ->
+ {exact, N};
+ _ ->
+ find_in_binary(B, BS, D, 1 + N, Last)
+ end;
+find_in_binary(B, BS, D, N, Last) when N =:= 1 + Last ->
+ partial_find(B, D, N, BS - 1).
+
+partial_find(_B, _D, _N, 0) ->
+ not_found;
+partial_find(B, D, N, K) ->
+ <<B1:K/binary, _/binary>> = B,
+ case D of
+ <<_Skip:N/binary, B1:K/binary>> ->
+ {partial, N, K};
+ _ ->
+ partial_find(B, D, 1 + N, K - 1)
+ end.
+
+find_boundary(Prefix, Data) ->
+ case find_in_binary(Prefix, Data) of
+ {exact, Skip} ->
+ PrefixSkip = Skip + size(Prefix),
+ case Data of
+ <<_:PrefixSkip/binary, "\r\n", _/binary>> ->
+ {next_boundary, Skip, size(Prefix) + 2};
+ <<_:PrefixSkip/binary, "--\r\n", _/binary>> ->
+ {end_boundary, Skip, size(Prefix) + 4};
+ _ when size(Data) < PrefixSkip + 4 ->
+ %% Underflow
+ {maybe, Skip};
+ _ ->
+ %% False positive
+ not_found
+ end;
+ {partial, Skip, Length} when (Skip + Length) =:= size(Data) ->
+ %% Underflow
+ {maybe, Skip};
+ _ ->
+ not_found
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+ssl_cert_opts() ->
+ EbinDir = filename:dirname(code:which(?MODULE)),
+ CertDir = filename:join([EbinDir, "..", "support", "test-materials"]),
+ CertFile = filename:join(CertDir, "test_ssl_cert.pem"),
+ KeyFile = filename:join(CertDir, "test_ssl_key.pem"),
+ [{certfile, CertFile}, {keyfile, KeyFile}].
+
+with_socket_server(Transport, ServerFun, ClientFun) ->
+ ServerOpts0 = [{ip, "127.0.0.1"}, {port, 0}, {loop, ServerFun}],
+ ServerOpts = case Transport of
+ plain ->
+ ServerOpts0;
+ ssl ->
+ ServerOpts0 ++ [{ssl, true}, {ssl_opts, ssl_cert_opts()}]
+ end,
+ {ok, Server} = mochiweb_socket_server:start(ServerOpts),
+ Port = mochiweb_socket_server:get(Server, port),
+ ClientOpts = [binary, {active, false}],
+ {ok, Client} = case Transport of
+ plain ->
+ gen_tcp:connect("127.0.0.1", Port, ClientOpts);
+ ssl ->
+ ClientOpts1 = [{ssl_imp, new} | ClientOpts],
+ {ok, SslSocket} = ssl:connect("127.0.0.1", Port, ClientOpts1),
+ {ok, {ssl, SslSocket}}
+ end,
+ Res = (catch ClientFun(Client)),
+ mochiweb_socket_server:stop(Server),
+ Res.
+
+fake_request(Socket, ContentType, Length) ->
+ mochiweb_request:new(Socket,
+ 'POST',
+ "/multipart",
+ {1,1},
+ mochiweb_headers:make(
+ [{"content-type", ContentType},
+ {"content-length", Length}])).
+
+test_callback({body, <<>>}, Rest=[body_end | _]) ->
+ %% When expecting the body_end we might get an empty binary
+ fun (Next) -> test_callback(Next, Rest) end;
+test_callback({body, Got}, [{body, Expect} | Rest]) when Got =/= Expect ->
+ %% Partial response
+ GotSize = size(Got),
+ <<Got:GotSize/binary, Expect1/binary>> = Expect,
+ fun (Next) -> test_callback(Next, [{body, Expect1} | Rest]) end;
+test_callback(Got, [Expect | Rest]) ->
+ ?assertEqual(Got, Expect),
+ case Rest of
+ [] ->
+ ok;
+ _ ->
+ fun (Next) -> test_callback(Next, Rest) end
+ end.
+
+parse3_http_test() ->
+ parse3(plain).
+
+parse3_https_test() ->
+ parse3(ssl).
+
+parse3(Transport) ->
+ ContentType = "multipart/form-data; boundary=---------------------------7386909285754635891697677882",
+ BinContent = <<"-----------------------------7386909285754635891697677882\r\nContent-Disposition: form-data; name=\"hidden\"\r\n\r\nmultipart message\r\n-----------------------------7386909285754635891697677882\r\nContent-Disposition: form-data; name=\"file\"; filename=\"test_file.txt\"\r\nContent-Type: text/plain\r\n\r\nWoo multiline text file\n\nLa la la\r\n-----------------------------7386909285754635891697677882--\r\n">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "hidden"}]}}]},
+ {body, <<"multipart message">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", "test_file.txt"}]}},
+ {"content-type", {"text/plain", []}}]},
+ {body, <<"Woo multiline text file\n\nLa la la">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse2_http_test() ->
+ parse2(plain).
+
+parse2_https_test() ->
+ parse2(ssl).
+
+parse2(Transport) ->
+ ContentType = "multipart/form-data; boundary=---------------------------6072231407570234361599764024",
+ BinContent = <<"-----------------------------6072231407570234361599764024\r\nContent-Disposition: form-data; name=\"hidden\"\r\n\r\nmultipart message\r\n-----------------------------6072231407570234361599764024\r\nContent-Disposition: form-data; name=\"file\"; filename=\"\"\r\nContent-Type: application/octet-stream\r\n\r\n\r\n-----------------------------6072231407570234361599764024--\r\n">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "hidden"}]}}]},
+ {body, <<"multipart message">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", ""}]}},
+ {"content-type", {"application/octet-stream", []}}]},
+ {body, <<>>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_form_http_test() ->
+ do_parse_form(plain).
+
+parse_form_https_test() ->
+ do_parse_form(ssl).
+
+do_parse_form(Transport) ->
+ ContentType = "multipart/form-data; boundary=AaB03x",
+ "AaB03x" = get_boundary(ContentType),
+ Content = mochiweb_util:join(
+ ["--AaB03x",
+ "Content-Disposition: form-data; name=\"submit-name\"",
+ "",
+ "Larry",
+ "--AaB03x",
+ "Content-Disposition: form-data; name=\"files\";"
+ ++ "filename=\"file1.txt\"",
+ "Content-Type: text/plain",
+ "",
+ "... contents of file1.txt ...",
+ "--AaB03x--",
+ ""], "\r\n"),
+ BinContent = iolist_to_binary(Content),
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_form(Req),
+ [{"submit-name", "Larry"},
+ {"files", {"file1.txt", {"text/plain",[]},
+ <<"... contents of file1.txt ...">>}
+ }] = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_http_test() ->
+ do_parse(plain).
+
+parse_https_test() ->
+ do_parse(ssl).
+
+do_parse(Transport) ->
+ ContentType = "multipart/form-data; boundary=AaB03x",
+ "AaB03x" = get_boundary(ContentType),
+ Content = mochiweb_util:join(
+ ["--AaB03x",
+ "Content-Disposition: form-data; name=\"submit-name\"",
+ "",
+ "Larry",
+ "--AaB03x",
+ "Content-Disposition: form-data; name=\"files\";"
+ ++ "filename=\"file1.txt\"",
+ "Content-Type: text/plain",
+ "",
+ "... contents of file1.txt ...",
+ "--AaB03x--",
+ ""], "\r\n"),
+ BinContent = iolist_to_binary(Content),
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "submit-name"}]}}]},
+ {body, <<"Larry">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "files"}, {"filename", "file1.txt"}]}},
+ {"content-type", {"text/plain", []}}]},
+ {body, <<"... contents of file1.txt ...">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_partial_body_boundary_http_test() ->
+ parse_partial_body_boundary(plain).
+
+parse_partial_body_boundary_https_test() ->
+ parse_partial_body_boundary(ssl).
+
+parse_partial_body_boundary(Transport) ->
+ Boundary = string:copies("$", 2048),
+ ContentType = "multipart/form-data; boundary=" ++ Boundary,
+ ?assertEqual(Boundary, get_boundary(ContentType)),
+ Content = mochiweb_util:join(
+ ["--" ++ Boundary,
+ "Content-Disposition: form-data; name=\"submit-name\"",
+ "",
+ "Larry",
+ "--" ++ Boundary,
+ "Content-Disposition: form-data; name=\"files\";"
+ ++ "filename=\"file1.txt\"",
+ "Content-Type: text/plain",
+ "",
+ "... contents of file1.txt ...",
+ "--" ++ Boundary ++ "--",
+ ""], "\r\n"),
+ BinContent = iolist_to_binary(Content),
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "submit-name"}]}}]},
+ {body, <<"Larry">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "files"}, {"filename", "file1.txt"}]}},
+ {"content-type", {"text/plain", []}}
+ ]},
+ {body, <<"... contents of file1.txt ...">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_large_header_http_test() ->
+ parse_large_header(plain).
+
+parse_large_header_https_test() ->
+ parse_large_header(ssl).
+
+parse_large_header(Transport) ->
+ ContentType = "multipart/form-data; boundary=AaB03x",
+ "AaB03x" = get_boundary(ContentType),
+ Content = mochiweb_util:join(
+ ["--AaB03x",
+ "Content-Disposition: form-data; name=\"submit-name\"",
+ "",
+ "Larry",
+ "--AaB03x",
+ "Content-Disposition: form-data; name=\"files\";"
+ ++ "filename=\"file1.txt\"",
+ "Content-Type: text/plain",
+ "x-large-header: " ++ string:copies("%", 4096),
+ "",
+ "... contents of file1.txt ...",
+ "--AaB03x--",
+ ""], "\r\n"),
+ BinContent = iolist_to_binary(Content),
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "submit-name"}]}}]},
+ {body, <<"Larry">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "files"}, {"filename", "file1.txt"}]}},
+ {"content-type", {"text/plain", []}},
+ {"x-large-header", {string:copies("%", 4096), []}}
+ ]},
+ {body, <<"... contents of file1.txt ...">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+find_boundary_test() ->
+ B = <<"\r\n--X">>,
+ {next_boundary, 0, 7} = find_boundary(B, <<"\r\n--X\r\nRest">>),
+ {next_boundary, 1, 7} = find_boundary(B, <<"!\r\n--X\r\nRest">>),
+ {end_boundary, 0, 9} = find_boundary(B, <<"\r\n--X--\r\nRest">>),
+ {end_boundary, 1, 9} = find_boundary(B, <<"!\r\n--X--\r\nRest">>),
+ not_found = find_boundary(B, <<"--X\r\nRest">>),
+ {maybe, 0} = find_boundary(B, <<"\r\n--X\r">>),
+ {maybe, 1} = find_boundary(B, <<"!\r\n--X\r">>),
+ P = <<"\r\n-----------------------------16037454351082272548568224146">>,
+ B0 = <<55,212,131,77,206,23,216,198,35,87,252,118,252,8,25,211,132,229,
+ 182,42,29,188,62,175,247,243,4,4,0,59, 13,10,45,45,45,45,45,45,45,
+ 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,
+ 49,54,48,51,55,52,53,52,51,53,49>>,
+ {maybe, 30} = find_boundary(P, B0),
+ not_found = find_boundary(B, <<"\r\n--XJOPKE">>),
+ ok.
+
+find_in_binary_test() ->
+ {exact, 0} = find_in_binary(<<"foo">>, <<"foobarbaz">>),
+ {exact, 1} = find_in_binary(<<"oo">>, <<"foobarbaz">>),
+ {exact, 8} = find_in_binary(<<"z">>, <<"foobarbaz">>),
+ not_found = find_in_binary(<<"q">>, <<"foobarbaz">>),
+ {partial, 7, 2} = find_in_binary(<<"azul">>, <<"foobarbaz">>),
+ {exact, 0} = find_in_binary(<<"foobarbaz">>, <<"foobarbaz">>),
+ {partial, 0, 3} = find_in_binary(<<"foobar">>, <<"foo">>),
+ {partial, 1, 3} = find_in_binary(<<"foobar">>, <<"afoo">>),
+ ok.
+
+flash_parse_http_test() ->
+ flash_parse(plain).
+
+flash_parse_https_test() ->
+ flash_parse(ssl).
+
+flash_parse(Transport) ->
+ ContentType = "multipart/form-data; boundary=----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5",
+ "----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5" = get_boundary(ContentType),
+ BinContent = <<"------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Filename\"\r\n\r\nhello.txt\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"success_action_status\"\r\n\r\n201\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"file\"; filename=\"hello.txt\"\r\nContent-Type: application/octet-stream\r\n\r\nhello\n\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Upload\"\r\n\r\nSubmit Query\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5--">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Filename"}]}}]},
+ {body, <<"hello.txt">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "success_action_status"}]}}]},
+ {body, <<"201">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", "hello.txt"}]}},
+ {"content-type", {"application/octet-stream", []}}]},
+ {body, <<"hello\n">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Upload"}]}}]},
+ {body, <<"Submit Query">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+flash_parse2_http_test() ->
+ flash_parse2(plain).
+
+flash_parse2_https_test() ->
+ flash_parse2(ssl).
+
+flash_parse2(Transport) ->
+ ContentType = "multipart/form-data; boundary=----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5",
+ "----------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5" = get_boundary(ContentType),
+ Chunk = iolist_to_binary(string:copies("%", 4096)),
+ BinContent = <<"------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Filename\"\r\n\r\nhello.txt\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"success_action_status\"\r\n\r\n201\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"file\"; filename=\"hello.txt\"\r\nContent-Type: application/octet-stream\r\n\r\n", Chunk/binary, "\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5\r\nContent-Disposition: form-data; name=\"Upload\"\r\n\r\nSubmit Query\r\n------------ei4GI3GI3Ij5Ef1ae0KM7Ij5ei4Ij5--">>,
+ Expect = [{headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Filename"}]}}]},
+ {body, <<"hello.txt">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "success_action_status"}]}}]},
+ {body, <<"201">>},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "file"}, {"filename", "hello.txt"}]}},
+ {"content-type", {"application/octet-stream", []}}]},
+ {body, Chunk},
+ body_end,
+ {headers,
+ [{"content-disposition",
+ {"form-data", [{"name", "Upload"}]}}]},
+ {body, <<"Submit Query">>},
+ body_end,
+ eof],
+ TestCallback = fun (Next) -> test_callback(Next, Expect) end,
+ ServerFun = fun (Socket) ->
+ ok = mochiweb_socket:send(Socket, BinContent),
+ exit(normal)
+ end,
+ ClientFun = fun (Socket) ->
+ Req = fake_request(Socket, ContentType,
+ byte_size(BinContent)),
+ Res = parse_multipart_request(Req, TestCallback),
+ {0, <<>>, ok} = Res,
+ ok
+ end,
+ ok = with_socket_server(Transport, ServerFun, ClientFun),
+ ok.
+
+parse_headers_test() ->
+ ?assertEqual([], parse_headers(<<>>)).
+
+flash_multipart_hack_test() ->
+ Buffer = <<"prefix-">>,
+ Prefix = <<"prefix">>,
+ State = #mp{length=0, buffer=Buffer, boundary=Prefix},
+ ?assertEqual(State,
+ flash_multipart_hack(State)).
+
+parts_to_body_single_test() ->
+ {HL, B} = parts_to_body([{0, 5, <<"01234">>}],
+ "text/plain",
+ 10),
+ [{"Content-Range", Range},
+ {"Content-Type", Type}] = lists:sort(HL),
+ ?assertEqual(
+ <<"bytes 0-5/10">>,
+ iolist_to_binary(Range)),
+ ?assertEqual(
+ <<"text/plain">>,
+ iolist_to_binary(Type)),
+ ?assertEqual(
+ <<"01234">>,
+ iolist_to_binary(B)),
+ ok.
+
+parts_to_body_multi_test() ->
+ {[{"Content-Type", Type}],
+ _B} = parts_to_body([{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
+ "text/plain",
+ 10),
+ ?assertMatch(
+ <<"multipart/byteranges; boundary=", _/binary>>,
+ iolist_to_binary(Type)),
+ ok.
+
+parts_to_multipart_body_test() ->
+ {[{"Content-Type", V}], B} = parts_to_multipart_body(
+ [{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
+ "text/plain",
+ 10,
+ "BOUNDARY"),
+ MB = multipart_body(
+ [{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
+ "text/plain",
+ "BOUNDARY",
+ 10),
+ ?assertEqual(
+ <<"multipart/byteranges; boundary=BOUNDARY">>,
+ iolist_to_binary(V)),
+ ?assertEqual(
+ iolist_to_binary(MB),
+ iolist_to_binary(B)),
+ ok.
+
+multipart_body_test() ->
+ ?assertEqual(
+ <<"--BOUNDARY--\r\n">>,
+ iolist_to_binary(multipart_body([], "text/plain", "BOUNDARY", 0))),
+ ?assertEqual(
+ <<"--BOUNDARY\r\n"
+ "Content-Type: text/plain\r\n"
+ "Content-Range: bytes 0-5/10\r\n\r\n"
+ "01234\r\n"
+ "--BOUNDARY\r\n"
+ "Content-Type: text/plain\r\n"
+ "Content-Range: bytes 5-10/10\r\n\r\n"
+ "56789\r\n"
+ "--BOUNDARY--\r\n">>,
+ iolist_to_binary(multipart_body([{0, 5, <<"01234">>}, {5, 10, <<"56789">>}],
+ "text/plain",
+ "BOUNDARY",
+ 10))),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_request.erl b/deps/mochiweb/src/mochiweb_request.erl
new file mode 100644
index 00000000..34bcba58
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_request.erl
@@ -0,0 +1,768 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc MochiWeb HTTP Request abstraction.
+
+-module(mochiweb_request, [Socket, Method, RawPath, Version, Headers]).
+-author('bob@mochimedia.com').
+
+-include_lib("kernel/include/file.hrl").
+-include("internal.hrl").
+
+-define(QUIP, "Any of you quaids got a smint?").
+
+-export([get_header_value/1, get_primary_header_value/1, get/1, dump/0]).
+-export([send/1, recv/1, recv/2, recv_body/0, recv_body/1, stream_body/3]).
+-export([start_response/1, start_response_length/1, start_raw_response/1]).
+-export([respond/1, ok/1]).
+-export([not_found/0, not_found/1]).
+-export([parse_post/0, parse_qs/0]).
+-export([should_close/0, cleanup/0]).
+-export([parse_cookie/0, get_cookie_value/1]).
+-export([serve_file/2, serve_file/3]).
+-export([accepted_encodings/1]).
+-export([accepts_content_type/1]).
+
+-define(SAVE_QS, mochiweb_request_qs).
+-define(SAVE_PATH, mochiweb_request_path).
+-define(SAVE_RECV, mochiweb_request_recv).
+-define(SAVE_BODY, mochiweb_request_body).
+-define(SAVE_BODY_LENGTH, mochiweb_request_body_length).
+-define(SAVE_POST, mochiweb_request_post).
+-define(SAVE_COOKIE, mochiweb_request_cookie).
+-define(SAVE_FORCE_CLOSE, mochiweb_request_force_close).
+
+%% @type iolist() = [iolist() | binary() | char()].
+%% @type iodata() = binary() | iolist().
+%% @type key() = atom() | string() | binary()
+%% @type value() = atom() | string() | binary() | integer()
+%% @type headers(). A mochiweb_headers structure.
+%% @type response(). A mochiweb_response parameterized module instance.
+%% @type ioheaders() = headers() | [{key(), value()}].
+
+% 5 minute default idle timeout
+-define(IDLE_TIMEOUT, 300000).
+
+% Maximum recv_body() length of 1MB
+-define(MAX_RECV_BODY, (1024*1024)).
+
+%% @spec get_header_value(K) -> undefined | Value
+%% @doc Get the value of a given request header.
+get_header_value(K) ->
+ mochiweb_headers:get_value(K, Headers).
+
+get_primary_header_value(K) ->
+ mochiweb_headers:get_primary_value(K, Headers).
+
+%% @type field() = socket | scheme | method | raw_path | version | headers | peer | path | body_length | range
+
+%% @spec get(field()) -> term()
+%% @doc Return the internal representation of the given field. If
+%% <code>socket</code> is requested on a HTTPS connection, then
+%% an ssl socket will be returned as <code>{ssl, SslSocket}</code>.
+%% You can use <code>SslSocket</code> with the <code>ssl</code>
+%% application, eg: <code>ssl:peercert(SslSocket)</code>.
+get(socket) ->
+ Socket;
+get(scheme) ->
+ case mochiweb_socket:type(Socket) of
+ plain ->
+ http;
+ ssl ->
+ https
+ end;
+get(method) ->
+ Method;
+get(raw_path) ->
+ RawPath;
+get(version) ->
+ Version;
+get(headers) ->
+ Headers;
+get(peer) ->
+ case mochiweb_socket:peername(Socket) of
+ {ok, {Addr={10, _, _, _}, _Port}} ->
+ case get_header_value("x-forwarded-for") of
+ undefined ->
+ inet_parse:ntoa(Addr);
+ Hosts ->
+ string:strip(lists:last(string:tokens(Hosts, ",")))
+ end;
+ {ok, {{127, 0, 0, 1}, _Port}} ->
+ case get_header_value("x-forwarded-for") of
+ undefined ->
+ "127.0.0.1";
+ Hosts ->
+ string:strip(lists:last(string:tokens(Hosts, ",")))
+ end;
+ {ok, {Addr, _Port}} ->
+ inet_parse:ntoa(Addr);
+ {error, enotconn} ->
+ exit(normal)
+ end;
+get(path) ->
+ case erlang:get(?SAVE_PATH) of
+ undefined ->
+ {Path0, _, _} = mochiweb_util:urlsplit_path(RawPath),
+ Path = mochiweb_util:unquote(Path0),
+ put(?SAVE_PATH, Path),
+ Path;
+ Cached ->
+ Cached
+ end;
+get(body_length) ->
+ case erlang:get(?SAVE_BODY_LENGTH) of
+ undefined ->
+ BodyLength = body_length(),
+ put(?SAVE_BODY_LENGTH, {cached, BodyLength}),
+ BodyLength;
+ {cached, Cached} ->
+ Cached
+ end;
+get(range) ->
+ case get_header_value(range) of
+ undefined ->
+ undefined;
+ RawRange ->
+ mochiweb_http:parse_range_request(RawRange)
+ end.
+
+%% @spec dump() -> {mochiweb_request, [{atom(), term()}]}
+%% @doc Dump the internal representation to a "human readable" set of terms
+%% for debugging/inspection purposes.
+dump() ->
+ {?MODULE, [{method, Method},
+ {version, Version},
+ {raw_path, RawPath},
+ {headers, mochiweb_headers:to_list(Headers)}]}.
+
+%% @spec send(iodata()) -> ok
+%% @doc Send data over the socket.
+send(Data) ->
+ case mochiweb_socket:send(Socket, Data) of
+ ok ->
+ ok;
+ _ ->
+ exit(normal)
+ end.
+
+%% @spec recv(integer()) -> binary()
+%% @doc Receive Length bytes from the client as a binary, with the default
+%% idle timeout.
+recv(Length) ->
+ recv(Length, ?IDLE_TIMEOUT).
+
+%% @spec recv(integer(), integer()) -> binary()
+%% @doc Receive Length bytes from the client as a binary, with the given
+%% Timeout in msec.
+recv(Length, Timeout) ->
+ case mochiweb_socket:recv(Socket, Length, Timeout) of
+ {ok, Data} ->
+ put(?SAVE_RECV, true),
+ Data;
+ _ ->
+ exit(normal)
+ end.
+
+%% @spec body_length() -> undefined | chunked | unknown_transfer_encoding | integer()
+%% @doc Infer body length from transfer-encoding and content-length headers.
+body_length() ->
+ case get_header_value("transfer-encoding") of
+ undefined ->
+ case get_header_value("content-length") of
+ undefined ->
+ undefined;
+ Length ->
+ list_to_integer(Length)
+ end;
+ "chunked" ->
+ chunked;
+ Unknown ->
+ {unknown_transfer_encoding, Unknown}
+ end.
+
+
+%% @spec recv_body() -> binary()
+%% @doc Receive the body of the HTTP request (defined by Content-Length).
+%% Will only receive up to the default max-body length of 1MB.
+recv_body() ->
+ recv_body(?MAX_RECV_BODY).
+
+%% @spec recv_body(integer()) -> binary()
+%% @doc Receive the body of the HTTP request (defined by Content-Length).
+%% Will receive up to MaxBody bytes.
+recv_body(MaxBody) ->
+ case erlang:get(?SAVE_BODY) of
+ undefined ->
+ % we could use a sane constant for max chunk size
+ Body = stream_body(?MAX_RECV_BODY, fun
+ ({0, _ChunkedFooter}, {_LengthAcc, BinAcc}) ->
+ iolist_to_binary(lists:reverse(BinAcc));
+ ({Length, Bin}, {LengthAcc, BinAcc}) ->
+ NewLength = Length + LengthAcc,
+ if NewLength > MaxBody ->
+ exit({body_too_large, chunked});
+ true ->
+ {NewLength, [Bin | BinAcc]}
+ end
+ end, {0, []}, MaxBody),
+ put(?SAVE_BODY, Body),
+ Body;
+ Cached -> Cached
+ end.
+
+stream_body(MaxChunkSize, ChunkFun, FunState) ->
+ stream_body(MaxChunkSize, ChunkFun, FunState, undefined).
+
+stream_body(MaxChunkSize, ChunkFun, FunState, MaxBodyLength) ->
+ Expect = case get_header_value("expect") of
+ undefined ->
+ undefined;
+ Value when is_list(Value) ->
+ string:to_lower(Value)
+ end,
+ case Expect of
+ "100-continue" ->
+ start_raw_response({100, gb_trees:empty()});
+ _Else ->
+ ok
+ end,
+ case body_length() of
+ undefined ->
+ undefined;
+ {unknown_transfer_encoding, Unknown} ->
+ exit({unknown_transfer_encoding, Unknown});
+ chunked ->
+ % In this case the MaxBody is actually used to
+ % determine the maximum allowed size of a single
+ % chunk.
+ stream_chunked_body(MaxChunkSize, ChunkFun, FunState);
+ 0 ->
+ <<>>;
+ Length when is_integer(Length) ->
+ case MaxBodyLength of
+ MaxBodyLength when is_integer(MaxBodyLength), MaxBodyLength < Length ->
+ exit({body_too_large, content_length});
+ _ ->
+ stream_unchunked_body(Length, ChunkFun, FunState)
+ end;
+ Length ->
+ exit({length_not_integer, Length})
+ end.
+
+
+%% @spec start_response({integer(), ioheaders()}) -> response()
+%% @doc Start the HTTP response by sending the Code HTTP response and
+%% ResponseHeaders. The server will set header defaults such as Server
+%% and Date if not present in ResponseHeaders.
+start_response({Code, ResponseHeaders}) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ HResponse1 = mochiweb_headers:default_from_list(server_headers(),
+ HResponse),
+ start_raw_response({Code, HResponse1}).
+
+%% @spec start_raw_response({integer(), headers()}) -> response()
+%% @doc Start the HTTP response by sending the Code HTTP response and
+%% ResponseHeaders.
+start_raw_response({Code, ResponseHeaders}) ->
+ F = fun ({K, V}, Acc) ->
+ [mochiweb_util:make_io(K), <<": ">>, V, <<"\r\n">> | Acc]
+ end,
+ End = lists:foldl(F, [<<"\r\n">>],
+ mochiweb_headers:to_list(ResponseHeaders)),
+ send([make_version(Version), make_code(Code), <<"\r\n">> | End]),
+ mochiweb:new_response({THIS, Code, ResponseHeaders}).
+
+
+%% @spec start_response_length({integer(), ioheaders(), integer()}) -> response()
+%% @doc Start the HTTP response by sending the Code HTTP response and
+%% ResponseHeaders including a Content-Length of Length. The server
+%% will set header defaults such as Server
+%% and Date if not present in ResponseHeaders.
+start_response_length({Code, ResponseHeaders, Length}) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ HResponse1 = mochiweb_headers:enter("Content-Length", Length, HResponse),
+ start_response({Code, HResponse1}).
+
+%% @spec respond({integer(), ioheaders(), iodata() | chunked | {file, IoDevice}}) -> response()
+%% @doc Start the HTTP response with start_response, and send Body to the
+%% client (if the get(method) /= 'HEAD'). The Content-Length header
+%% will be set by the Body length, and the server will insert header
+%% defaults.
+respond({Code, ResponseHeaders, {file, IoDevice}}) ->
+ Length = mochiweb_io:iodevice_size(IoDevice),
+ Response = start_response_length({Code, ResponseHeaders, Length}),
+ case Method of
+ 'HEAD' ->
+ ok;
+ _ ->
+ mochiweb_io:iodevice_stream(fun send/1, IoDevice)
+ end,
+ Response;
+respond({Code, ResponseHeaders, chunked}) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ HResponse1 = case Method of
+ 'HEAD' ->
+ %% This is what Google does, http://www.google.com/
+ %% is chunked but HEAD gets Content-Length: 0.
+ %% The RFC is ambiguous so emulating Google is smart.
+ mochiweb_headers:enter("Content-Length", "0",
+ HResponse);
+ _ when Version >= {1, 1} ->
+ %% Only use chunked encoding for HTTP/1.1
+ mochiweb_headers:enter("Transfer-Encoding", "chunked",
+ HResponse);
+ _ ->
+ %% For pre-1.1 clients we send the data as-is
+ %% without a Content-Length header and without
+ %% chunk delimiters. Since the end of the document
+ %% is now ambiguous we must force a close.
+ put(?SAVE_FORCE_CLOSE, true),
+ HResponse
+ end,
+ start_response({Code, HResponse1});
+respond({Code, ResponseHeaders, Body}) ->
+ Response = start_response_length({Code, ResponseHeaders, iolist_size(Body)}),
+ case Method of
+ 'HEAD' ->
+ ok;
+ _ ->
+ send(Body)
+ end,
+ Response.
+
+%% @spec not_found() -> response()
+%% @doc Alias for <code>not_found([])</code>.
+not_found() ->
+ not_found([]).
+
+%% @spec not_found(ExtraHeaders) -> response()
+%% @doc Alias for <code>respond({404, [{"Content-Type", "text/plain"}
+%% | ExtraHeaders], &lt;&lt;"Not found."&gt;&gt;})</code>.
+not_found(ExtraHeaders) ->
+ respond({404, [{"Content-Type", "text/plain"} | ExtraHeaders],
+ <<"Not found.">>}).
+
+%% @spec ok({value(), iodata()} | {value(), ioheaders(), iodata() | {file, IoDevice}}) ->
+%% response()
+%% @doc respond({200, [{"Content-Type", ContentType} | Headers], Body}).
+ok({ContentType, Body}) ->
+ ok({ContentType, [], Body});
+ok({ContentType, ResponseHeaders, Body}) ->
+ HResponse = mochiweb_headers:make(ResponseHeaders),
+ case THIS:get(range) of
+ X when (X =:= undefined orelse X =:= fail) orelse Body =:= chunked ->
+ %% http://code.google.com/p/mochiweb/issues/detail?id=54
+ %% Range header not supported when chunked, return 200 and provide
+ %% full response.
+ HResponse1 = mochiweb_headers:enter("Content-Type", ContentType,
+ HResponse),
+ respond({200, HResponse1, Body});
+ Ranges ->
+ {PartList, Size} = range_parts(Body, Ranges),
+ case PartList of
+ [] -> %% no valid ranges
+ HResponse1 = mochiweb_headers:enter("Content-Type",
+ ContentType,
+ HResponse),
+ %% could be 416, for now we'll just return 200
+ respond({200, HResponse1, Body});
+ PartList ->
+ {RangeHeaders, RangeBody} =
+ mochiweb_multipart:parts_to_body(PartList, ContentType, Size),
+ HResponse1 = mochiweb_headers:enter_from_list(
+ [{"Accept-Ranges", "bytes"} |
+ RangeHeaders],
+ HResponse),
+ respond({206, HResponse1, RangeBody})
+ end
+ end.
+
+%% @spec should_close() -> bool()
+%% @doc Return true if the connection must be closed. If false, using
+%% Keep-Alive should be safe.
+should_close() ->
+ ForceClose = erlang:get(mochiweb_request_force_close) =/= undefined,
+ DidNotRecv = erlang:get(mochiweb_request_recv) =:= undefined,
+ ForceClose orelse Version < {1, 0}
+ %% Connection: close
+ orelse get_header_value("connection") =:= "close"
+ %% HTTP 1.0 requires Connection: Keep-Alive
+ orelse (Version =:= {1, 0}
+ andalso get_header_value("connection") =/= "Keep-Alive")
+ %% unread data left on the socket, can't safely continue
+ orelse (DidNotRecv
+ andalso get_header_value("content-length") =/= undefined
+ andalso list_to_integer(get_header_value("content-length")) > 0)
+ orelse (DidNotRecv
+ andalso get_header_value("transfer-encoding") =:= "chunked").
+
+%% @spec cleanup() -> ok
+%% @doc Clean up any junk in the process dictionary, required before continuing
+%% a Keep-Alive request.
+cleanup() ->
+ [erase(K) || K <- [?SAVE_QS,
+ ?SAVE_PATH,
+ ?SAVE_RECV,
+ ?SAVE_BODY,
+ ?SAVE_POST,
+ ?SAVE_COOKIE,
+ ?SAVE_FORCE_CLOSE]],
+ ok.
+
+%% @spec parse_qs() -> [{Key::string(), Value::string()}]
+%% @doc Parse the query string of the URL.
+parse_qs() ->
+ case erlang:get(?SAVE_QS) of
+ undefined ->
+ {_, QueryString, _} = mochiweb_util:urlsplit_path(RawPath),
+ Parsed = mochiweb_util:parse_qs(QueryString),
+ put(?SAVE_QS, Parsed),
+ Parsed;
+ Cached ->
+ Cached
+ end.
+
+%% @spec get_cookie_value(Key::string) -> string() | undefined
+%% @doc Get the value of the given cookie.
+get_cookie_value(Key) ->
+ proplists:get_value(Key, parse_cookie()).
+
+%% @spec parse_cookie() -> [{Key::string(), Value::string()}]
+%% @doc Parse the cookie header.
+parse_cookie() ->
+ case erlang:get(?SAVE_COOKIE) of
+ undefined ->
+ Cookies = case get_header_value("cookie") of
+ undefined ->
+ [];
+ Value ->
+ mochiweb_cookies:parse_cookie(Value)
+ end,
+ put(?SAVE_COOKIE, Cookies),
+ Cookies;
+ Cached ->
+ Cached
+ end.
+
+%% @spec parse_post() -> [{Key::string(), Value::string()}]
+%% @doc Parse an application/x-www-form-urlencoded form POST. This
+%% has the side-effect of calling recv_body().
+parse_post() ->
+ case erlang:get(?SAVE_POST) of
+ undefined ->
+ Parsed = case recv_body() of
+ undefined ->
+ [];
+ Binary ->
+ case get_primary_header_value("content-type") of
+ "application/x-www-form-urlencoded" ++ _ ->
+ mochiweb_util:parse_qs(Binary);
+ _ ->
+ []
+ end
+ end,
+ put(?SAVE_POST, Parsed),
+ Parsed;
+ Cached ->
+ Cached
+ end.
+
+%% @spec stream_chunked_body(integer(), fun(), term()) -> term()
+%% @doc The function is called for each chunk.
+%% Used internally by read_chunked_body.
+stream_chunked_body(MaxChunkSize, Fun, FunState) ->
+ case read_chunk_length() of
+ 0 ->
+ Fun({0, read_chunk(0)}, FunState);
+ Length when Length > MaxChunkSize ->
+ NewState = read_sub_chunks(Length, MaxChunkSize, Fun, FunState),
+ stream_chunked_body(MaxChunkSize, Fun, NewState);
+ Length ->
+ NewState = Fun({Length, read_chunk(Length)}, FunState),
+ stream_chunked_body(MaxChunkSize, Fun, NewState)
+ end.
+
+stream_unchunked_body(0, Fun, FunState) ->
+ Fun({0, <<>>}, FunState);
+stream_unchunked_body(Length, Fun, FunState) when Length > 0 ->
+ PktSize = case Length > ?RECBUF_SIZE of
+ true ->
+ ?RECBUF_SIZE;
+ false ->
+ Length
+ end,
+ Bin = recv(PktSize),
+ NewState = Fun({PktSize, Bin}, FunState),
+ stream_unchunked_body(Length - PktSize, Fun, NewState).
+
+%% @spec read_chunk_length() -> integer()
+%% @doc Read the length of the next HTTP chunk.
+read_chunk_length() ->
+ mochiweb_socket:setopts(Socket, [{packet, line}]),
+ case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
+ {ok, Header} ->
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ Splitter = fun (C) ->
+ C =/= $\r andalso C =/= $\n andalso C =/= $
+ end,
+ {Hex, _Rest} = lists:splitwith(Splitter, binary_to_list(Header)),
+ mochihex:to_int(Hex);
+ _ ->
+ exit(normal)
+ end.
+
+%% @spec read_chunk(integer()) -> Chunk::binary() | [Footer::binary()]
+%% @doc Read in a HTTP chunk of the given length. If Length is 0, then read the
+%% HTTP footers (as a list of binaries, since they're nominal).
+read_chunk(0) ->
+ mochiweb_socket:setopts(Socket, [{packet, line}]),
+ F = fun (F1, Acc) ->
+ case mochiweb_socket:recv(Socket, 0, ?IDLE_TIMEOUT) of
+ {ok, <<"\r\n">>} ->
+ Acc;
+ {ok, Footer} ->
+ F1(F1, [Footer | Acc]);
+ _ ->
+ exit(normal)
+ end
+ end,
+ Footers = F(F, []),
+ mochiweb_socket:setopts(Socket, [{packet, raw}]),
+ put(?SAVE_RECV, true),
+ Footers;
+read_chunk(Length) ->
+ case mochiweb_socket:recv(Socket, 2 + Length, ?IDLE_TIMEOUT) of
+ {ok, <<Chunk:Length/binary, "\r\n">>} ->
+ Chunk;
+ _ ->
+ exit(normal)
+ end.
+
+read_sub_chunks(Length, MaxChunkSize, Fun, FunState) when Length > MaxChunkSize ->
+ Bin = recv(MaxChunkSize),
+ NewState = Fun({size(Bin), Bin}, FunState),
+ read_sub_chunks(Length - MaxChunkSize, MaxChunkSize, Fun, NewState);
+
+read_sub_chunks(Length, _MaxChunkSize, Fun, FunState) ->
+ Fun({Length, read_chunk(Length)}, FunState).
+
+%% @spec serve_file(Path, DocRoot) -> Response
+%% @doc Serve a file relative to DocRoot.
+serve_file(Path, DocRoot) ->
+ serve_file(Path, DocRoot, []).
+
+%% @spec serve_file(Path, DocRoot, ExtraHeaders) -> Response
+%% @doc Serve a file relative to DocRoot.
+serve_file(Path, DocRoot, ExtraHeaders) ->
+ case mochiweb_util:safe_relative_path(Path) of
+ undefined ->
+ not_found(ExtraHeaders);
+ RelPath ->
+ FullPath = filename:join([DocRoot, RelPath]),
+ case filelib:is_dir(FullPath) of
+ true ->
+ maybe_redirect(RelPath, FullPath, ExtraHeaders);
+ false ->
+ maybe_serve_file(FullPath, ExtraHeaders)
+ end
+ end.
+
+%% Internal API
+
+%% This has the same effect as the DirectoryIndex directive in httpd
+directory_index(FullPath) ->
+ filename:join([FullPath, "index.html"]).
+
+maybe_redirect([], FullPath, ExtraHeaders) ->
+ maybe_serve_file(directory_index(FullPath), ExtraHeaders);
+
+maybe_redirect(RelPath, FullPath, ExtraHeaders) ->
+ case string:right(RelPath, 1) of
+ "/" ->
+ maybe_serve_file(directory_index(FullPath), ExtraHeaders);
+ _ ->
+ Host = mochiweb_headers:get_value("host", Headers),
+ Location = "http://" ++ Host ++ "/" ++ RelPath ++ "/",
+ LocationBin = list_to_binary(Location),
+ MoreHeaders = [{"Location", Location},
+ {"Content-Type", "text/html"} | ExtraHeaders],
+ Top = <<"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">"
+ "<html><head>"
+ "<title>301 Moved Permanently</title>"
+ "</head><body>"
+ "<h1>Moved Permanently</h1>"
+ "<p>The document has moved <a href=\"">>,
+ Bottom = <<">here</a>.</p></body></html>\n">>,
+ Body = <<Top/binary, LocationBin/binary, Bottom/binary>>,
+ respond({301, MoreHeaders, Body})
+ end.
+
+maybe_serve_file(File, ExtraHeaders) ->
+ case file:read_file_info(File) of
+ {ok, FileInfo} ->
+ LastModified = httpd_util:rfc1123_date(FileInfo#file_info.mtime),
+ case get_header_value("if-modified-since") of
+ LastModified ->
+ respond({304, ExtraHeaders, ""});
+ _ ->
+ case file:open(File, [raw, binary]) of
+ {ok, IoDevice} ->
+ ContentType = mochiweb_util:guess_mime(File),
+ Res = ok({ContentType,
+ [{"last-modified", LastModified}
+ | ExtraHeaders],
+ {file, IoDevice}}),
+ file:close(IoDevice),
+ Res;
+ _ ->
+ not_found(ExtraHeaders)
+ end
+ end;
+ {error, _} ->
+ not_found(ExtraHeaders)
+ end.
+
+server_headers() ->
+ [{"Server", "MochiWeb/1.0 (" ++ ?QUIP ++ ")"},
+ {"Date", httpd_util:rfc1123_date()}].
+
+make_code(X) when is_integer(X) ->
+ [integer_to_list(X), [" " | httpd_util:reason_phrase(X)]];
+make_code(Io) when is_list(Io); is_binary(Io) ->
+ Io.
+
+make_version({1, 0}) ->
+ <<"HTTP/1.0 ">>;
+make_version(_) ->
+ <<"HTTP/1.1 ">>.
+
+range_parts({file, IoDevice}, Ranges) ->
+ Size = mochiweb_io:iodevice_size(IoDevice),
+ F = fun (Spec, Acc) ->
+ case mochiweb_http:range_skip_length(Spec, Size) of
+ invalid_range ->
+ Acc;
+ V ->
+ [V | Acc]
+ end
+ end,
+ LocNums = lists:foldr(F, [], Ranges),
+ {ok, Data} = file:pread(IoDevice, LocNums),
+ Bodies = lists:zipwith(fun ({Skip, Length}, PartialBody) ->
+ {Skip, Skip + Length - 1, PartialBody}
+ end,
+ LocNums, Data),
+ {Bodies, Size};
+range_parts(Body0, Ranges) ->
+ Body = iolist_to_binary(Body0),
+ Size = size(Body),
+ F = fun(Spec, Acc) ->
+ case mochiweb_http:range_skip_length(Spec, Size) of
+ invalid_range ->
+ Acc;
+ {Skip, Length} ->
+ <<_:Skip/binary, PartialBody:Length/binary, _/binary>> = Body,
+ [{Skip, Skip + Length - 1, PartialBody} | Acc]
+ end
+ end,
+ {lists:foldr(F, [], Ranges), Size}.
+
+%% @spec accepted_encodings([encoding()]) -> [encoding()] | bad_accept_encoding_value
+%% @type encoding() = string().
+%%
+%% @doc Returns a list of encodings accepted by a request. Encodings that are
+%% not supported by the server will not be included in the return list.
+%% This list is computed from the "Accept-Encoding" header and
+%% its elements are ordered, descendingly, according to their Q values.
+%%
+%% Section 14.3 of the RFC 2616 (HTTP 1.1) describes the "Accept-Encoding"
+%% header and the process of determining which server supported encodings
+%% can be used for encoding the body for the request's response.
+%%
+%% Examples
+%%
+%% 1) For a missing "Accept-Encoding" header:
+%% accepted_encodings(["gzip", "identity"]) -> ["identity"]
+%%
+%% 2) For an "Accept-Encoding" header with value "gzip, deflate":
+%% accepted_encodings(["gzip", "identity"]) -> ["gzip", "identity"]
+%%
+%% 3) For an "Accept-Encoding" header with value "gzip;q=0.5, deflate":
+%% accepted_encodings(["gzip", "deflate", "identity"]) ->
+%% ["deflate", "gzip", "identity"]
+%%
+accepted_encodings(SupportedEncodings) ->
+ AcceptEncodingHeader = case get_header_value("Accept-Encoding") of
+ undefined ->
+ "";
+ Value ->
+ Value
+ end,
+ case mochiweb_util:parse_qvalues(AcceptEncodingHeader) of
+ invalid_qvalue_string ->
+ bad_accept_encoding_value;
+ QList ->
+ mochiweb_util:pick_accepted_encodings(
+ QList, SupportedEncodings, "identity"
+ )
+ end.
+
+%% @spec accepts_content_type(string() | binary()) -> boolean() | bad_accept_header
+%%
+%% @doc Determines whether a request accepts a given media type by analyzing its
+%% "Accept" header.
+%%
+%% Examples
+%%
+%% 1) For a missing "Accept" header:
+%% accepts_content_type("application/json") -> true
+%%
+%% 2) For an "Accept" header with value "text/plain, application/*":
+%% accepts_content_type("application/json") -> true
+%%
+%% 3) For an "Accept" header with value "text/plain, */*; q=0.0":
+%% accepts_content_type("application/json") -> false
+%%
+%% 4) For an "Accept" header with value "text/plain; q=0.5, */*; q=0.1":
+%% accepts_content_type("application/json") -> true
+%%
+%% 5) For an "Accept" header with value "text/*; q=0.0, */*":
+%% accepts_content_type("text/plain") -> false
+%%
+accepts_content_type(ContentType) when is_binary(ContentType) ->
+ accepts_content_type(binary_to_list(ContentType));
+accepts_content_type(ContentType1) ->
+ ContentType = re:replace(ContentType1, "\\s", "", [global, {return, list}]),
+ AcceptHeader = case get_header_value("Accept") of
+ undefined ->
+ "*/*";
+ Value ->
+ Value
+ end,
+ case mochiweb_util:parse_qvalues(AcceptHeader) of
+ invalid_qvalue_string ->
+ bad_accept_header;
+ QList ->
+ [MainType, _SubType] = string:tokens(ContentType, "/"),
+ SuperType = MainType ++ "/*",
+ lists:any(
+ fun({"*/*", Q}) when Q > 0.0 ->
+ true;
+ ({Type, Q}) when Q > 0.0 ->
+ Type =:= ContentType orelse Type =:= SuperType;
+ (_) ->
+ false
+ end,
+ QList
+ ) andalso
+ (not lists:member({ContentType, 0.0}, QList)) andalso
+ (not lists:member({SuperType, 0.0}, QList))
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_request_tests.erl b/deps/mochiweb/src/mochiweb_request_tests.erl
new file mode 100644
index 00000000..b61a5839
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_request_tests.erl
@@ -0,0 +1,63 @@
+-module(mochiweb_request_tests).
+
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+accepts_content_type_test() ->
+ Req1 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "multipart/related"}])),
+ ?assertEqual(true, Req1:accepts_content_type("multipart/related")),
+
+ Req2 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html"}])),
+ ?assertEqual(false, Req2:accepts_content_type("multipart/related")),
+
+ Req3 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html, multipart/*"}])),
+ ?assertEqual(true, Req3:accepts_content_type("multipart/related")),
+
+ Req4 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html, multipart/*; q=0.0"}])),
+ ?assertEqual(false, Req4:accepts_content_type("multipart/related")),
+
+ Req5 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html, multipart/*; q=0"}])),
+ ?assertEqual(false, Req5:accepts_content_type("multipart/related")),
+
+ Req6 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html, */*; q=0.0"}])),
+ ?assertEqual(false, Req6:accepts_content_type("multipart/related")),
+
+ Req7 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "multipart/*; q=0.0, */*"}])),
+ ?assertEqual(false, Req7:accepts_content_type("multipart/related")),
+
+ Req8 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "*/*; q=0.0, multipart/*"}])),
+ ?assertEqual(true, Req8:accepts_content_type("multipart/related")),
+
+ Req9 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "*/*; q=0.0, multipart/related"}])),
+ ?assertEqual(true, Req9:accepts_content_type("multipart/related")),
+
+ Req10 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html; level=1"}])),
+ ?assertEqual(true, Req10:accepts_content_type("text/html;level=1")),
+
+ Req11 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html; level=1, text/html"}])),
+ ?assertEqual(true, Req11:accepts_content_type("text/html")),
+
+ Req12 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html; level=1; q=0.0, text/html"}])),
+ ?assertEqual(false, Req12:accepts_content_type("text/html;level=1")),
+
+ Req13 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html; level=1; q=0.0, text/html"}])),
+ ?assertEqual(false, Req13:accepts_content_type("text/html; level=1")),
+
+ Req14 = mochiweb_request:new(nil, 'GET', "/foo", {1, 1},
+ mochiweb_headers:make([{"Accept", "text/html;level=1;q=0.1, text/html"}])),
+ ?assertEqual(true, Req14:accepts_content_type("text/html; level=1")).
+
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_response.erl b/deps/mochiweb/src/mochiweb_response.erl
new file mode 100644
index 00000000..ab8ee61c
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_response.erl
@@ -0,0 +1,64 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Response abstraction.
+
+-module(mochiweb_response, [Request, Code, Headers]).
+-author('bob@mochimedia.com').
+
+-define(QUIP, "Any of you quaids got a smint?").
+
+-export([get_header_value/1, get/1, dump/0]).
+-export([send/1, write_chunk/1]).
+
+%% @spec get_header_value(string() | atom() | binary()) -> string() | undefined
+%% @doc Get the value of the given response header.
+get_header_value(K) ->
+ mochiweb_headers:get_value(K, Headers).
+
+%% @spec get(request | code | headers) -> term()
+%% @doc Return the internal representation of the given field.
+get(request) ->
+ Request;
+get(code) ->
+ Code;
+get(headers) ->
+ Headers.
+
+%% @spec dump() -> {mochiweb_request, [{atom(), term()}]}
+%% @doc Dump the internal representation to a "human readable" set of terms
+%% for debugging/inspection purposes.
+dump() ->
+ [{request, Request:dump()},
+ {code, Code},
+ {headers, mochiweb_headers:to_list(Headers)}].
+
+%% @spec send(iodata()) -> ok
+%% @doc Send data over the socket if the method is not HEAD.
+send(Data) ->
+ case Request:get(method) of
+ 'HEAD' ->
+ ok;
+ _ ->
+ Request:send(Data)
+ end.
+
+%% @spec write_chunk(iodata()) -> ok
+%% @doc Write a chunk of a HTTP chunked response. If Data is zero length,
+%% then the chunked response will be finished.
+write_chunk(Data) ->
+ case Request:get(version) of
+ Version when Version >= {1, 1} ->
+ Length = iolist_size(Data),
+ send([io_lib:format("~.16b\r\n", [Length]), Data, <<"\r\n">>]);
+ _ ->
+ send(Data)
+ end.
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_skel.erl b/deps/mochiweb/src/mochiweb_skel.erl
new file mode 100644
index 00000000..76eefa60
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_skel.erl
@@ -0,0 +1,86 @@
+-module(mochiweb_skel).
+-export([skelcopy/2]).
+
+-include_lib("kernel/include/file.hrl").
+
+%% External API
+
+skelcopy(DestDir, Name) ->
+ ok = ensuredir(DestDir),
+ LDst = case length(filename:dirname(DestDir)) of
+ 1 -> %% handle case when dirname returns "/"
+ 0;
+ N ->
+ N + 1
+ end,
+ skelcopy(src(), DestDir, Name, LDst),
+ DestLink = filename:join([DestDir, Name, "deps", "mochiweb-src"]),
+ ok = filelib:ensure_dir(DestLink),
+ ok = file:make_symlink(
+ filename:join(filename:dirname(code:which(?MODULE)), ".."),
+ DestLink).
+
+%% Internal API
+
+src() ->
+ Dir = filename:dirname(code:which(?MODULE)),
+ filename:join(Dir, "../priv/skel").
+
+skel() ->
+ "skel".
+
+skelcopy(Src, DestDir, Name, LDst) ->
+ Dest = re:replace(filename:basename(Src), skel(), Name,
+ [global, {return, list}]),
+ case file:read_file_info(Src) of
+ {ok, #file_info{type=directory, mode=Mode}} ->
+ Dir = DestDir ++ "/" ++ Dest,
+ EDst = lists:nthtail(LDst, Dir),
+ ok = ensuredir(Dir),
+ ok = file:write_file_info(Dir, #file_info{mode=Mode}),
+ case filename:basename(Src) of
+ "ebin" ->
+ ok;
+ _ ->
+ {ok, Files} = file:list_dir(Src),
+ io:format("~s/~n", [EDst]),
+ lists:foreach(fun ("." ++ _) -> ok;
+ (F) ->
+ skelcopy(filename:join(Src, F),
+ Dir,
+ Name,
+ LDst)
+ end,
+ Files),
+ ok
+ end;
+ {ok, #file_info{type=regular, mode=Mode}} ->
+ OutFile = filename:join(DestDir, Dest),
+ {ok, B} = file:read_file(Src),
+ S = re:replace(binary_to_list(B), skel(), Name,
+ [{return, list}, global]),
+ ok = file:write_file(OutFile, list_to_binary(S)),
+ ok = file:write_file_info(OutFile, #file_info{mode=Mode}),
+ io:format(" ~s~n", [filename:basename(Src)]),
+ ok;
+ {ok, _} ->
+ io:format("ignored source file: ~p~n", [Src]),
+ ok
+ end.
+
+ensuredir(Dir) ->
+ case file:make_dir(Dir) of
+ ok ->
+ ok;
+ {error, eexist} ->
+ ok;
+ E ->
+ E
+ end.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_socket.erl b/deps/mochiweb/src/mochiweb_socket.erl
new file mode 100644
index 00000000..76b018c8
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_socket.erl
@@ -0,0 +1,84 @@
+%% @copyright 2010 Mochi Media, Inc.
+
+%% @doc MochiWeb socket - wrapper for plain and ssl sockets.
+
+-module(mochiweb_socket).
+
+-export([listen/4, accept/1, recv/3, send/2, close/1, port/1, peername/1,
+ setopts/2, type/1]).
+
+-define(ACCEPT_TIMEOUT, 2000).
+
+listen(Ssl, Port, Opts, SslOpts) ->
+ case Ssl of
+ true ->
+ case ssl:listen(Port, Opts ++ SslOpts) of
+ {ok, ListenSocket} ->
+ {ok, {ssl, ListenSocket}};
+ {error, _} = Err ->
+ Err
+ end;
+ false ->
+ gen_tcp:listen(Port, Opts)
+ end.
+
+accept({ssl, ListenSocket}) ->
+ % There's a bug in ssl:transport_accept/2 at the moment, which is the
+ % reason for the try...catch block. Should be fixed in OTP R14.
+ try ssl:transport_accept(ListenSocket) of
+ {ok, Socket} ->
+ case ssl:ssl_accept(Socket) of
+ ok ->
+ {ok, {ssl, Socket}};
+ {error, _} = Err ->
+ Err
+ end;
+ {error, _} = Err ->
+ Err
+ catch
+ error:{badmatch, {error, Reason}} ->
+ {error, Reason}
+ end;
+accept(ListenSocket) ->
+ gen_tcp:accept(ListenSocket, ?ACCEPT_TIMEOUT).
+
+recv({ssl, Socket}, Length, Timeout) ->
+ ssl:recv(Socket, Length, Timeout);
+recv(Socket, Length, Timeout) ->
+ gen_tcp:recv(Socket, Length, Timeout).
+
+send({ssl, Socket}, Data) ->
+ ssl:send(Socket, Data);
+send(Socket, Data) ->
+ gen_tcp:send(Socket, Data).
+
+close({ssl, Socket}) ->
+ ssl:close(Socket);
+close(Socket) ->
+ gen_tcp:close(Socket).
+
+port({ssl, Socket}) ->
+ case ssl:sockname(Socket) of
+ {ok, {_, Port}} ->
+ {ok, Port};
+ {error, _} = Err ->
+ Err
+ end;
+port(Socket) ->
+ inet:port(Socket).
+
+peername({ssl, Socket}) ->
+ ssl:peername(Socket);
+peername(Socket) ->
+ inet:peername(Socket).
+
+setopts({ssl, Socket}, Opts) ->
+ ssl:setopts(Socket, Opts);
+setopts(Socket, Opts) ->
+ inet:setopts(Socket, Opts).
+
+type({ssl, _}) ->
+ ssl;
+type(_) ->
+ plain.
+
diff --git a/deps/mochiweb/src/mochiweb_socket_server.erl b/deps/mochiweb/src/mochiweb_socket_server.erl
new file mode 100644
index 00000000..1aae09ac
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_socket_server.erl
@@ -0,0 +1,272 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc MochiWeb socket server.
+
+-module(mochiweb_socket_server).
+-author('bob@mochimedia.com').
+-behaviour(gen_server).
+
+-include("internal.hrl").
+
+-export([start/1, stop/1]).
+-export([init/1, handle_call/3, handle_cast/2, terminate/2, code_change/3,
+ handle_info/2]).
+-export([get/2]).
+
+-record(mochiweb_socket_server,
+ {port,
+ loop,
+ name=undefined,
+ %% NOTE: This is currently ignored.
+ max=2048,
+ ip=any,
+ listen=null,
+ nodelay=false,
+ backlog=128,
+ active_sockets=0,
+ acceptor_pool_size=16,
+ ssl=false,
+ ssl_opts=[{ssl_imp, new}],
+ acceptor_pool=sets:new()}).
+
+start(State=#mochiweb_socket_server{}) ->
+ start_server(State);
+start(Options) ->
+ start(parse_options(Options)).
+
+get(Name, Property) ->
+ gen_server:call(Name, {get, Property}).
+
+stop(Name) when is_atom(Name) ->
+ gen_server:cast(Name, stop);
+stop(Pid) when is_pid(Pid) ->
+ gen_server:cast(Pid, stop);
+stop({local, Name}) ->
+ stop(Name);
+stop({global, Name}) ->
+ stop(Name);
+stop(Options) ->
+ State = parse_options(Options),
+ stop(State#mochiweb_socket_server.name).
+
+%% Internal API
+
+parse_options(Options) ->
+ parse_options(Options, #mochiweb_socket_server{}).
+
+parse_options([], State) ->
+ State;
+parse_options([{name, L} | Rest], State) when is_list(L) ->
+ Name = {local, list_to_atom(L)},
+ parse_options(Rest, State#mochiweb_socket_server{name=Name});
+parse_options([{name, A} | Rest], State) when A =:= undefined ->
+ parse_options(Rest, State#mochiweb_socket_server{name=A});
+parse_options([{name, A} | Rest], State) when is_atom(A) ->
+ Name = {local, A},
+ parse_options(Rest, State#mochiweb_socket_server{name=Name});
+parse_options([{name, Name} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{name=Name});
+parse_options([{port, L} | Rest], State) when is_list(L) ->
+ Port = list_to_integer(L),
+ parse_options(Rest, State#mochiweb_socket_server{port=Port});
+parse_options([{port, Port} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{port=Port});
+parse_options([{ip, Ip} | Rest], State) ->
+ ParsedIp = case Ip of
+ any ->
+ any;
+ Ip when is_tuple(Ip) ->
+ Ip;
+ Ip when is_list(Ip) ->
+ {ok, IpTuple} = inet_parse:address(Ip),
+ IpTuple
+ end,
+ parse_options(Rest, State#mochiweb_socket_server{ip=ParsedIp});
+parse_options([{loop, Loop} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{loop=Loop});
+parse_options([{backlog, Backlog} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{backlog=Backlog});
+parse_options([{nodelay, NoDelay} | Rest], State) ->
+ parse_options(Rest, State#mochiweb_socket_server{nodelay=NoDelay});
+parse_options([{acceptor_pool_size, Max} | Rest], State) ->
+ MaxInt = ensure_int(Max),
+ parse_options(Rest,
+ State#mochiweb_socket_server{acceptor_pool_size=MaxInt});
+parse_options([{max, Max} | Rest], State) ->
+ error_logger:info_report([{warning, "TODO: max is currently unsupported"},
+ {max, Max}]),
+ MaxInt = ensure_int(Max),
+ parse_options(Rest, State#mochiweb_socket_server{max=MaxInt});
+parse_options([{ssl, Ssl} | Rest], State) when is_boolean(Ssl) ->
+ parse_options(Rest, State#mochiweb_socket_server{ssl=Ssl});
+parse_options([{ssl_opts, SslOpts} | Rest], State) when is_list(SslOpts) ->
+ SslOpts1 = [{ssl_imp, new} | proplists:delete(ssl_imp, SslOpts)],
+ parse_options(Rest, State#mochiweb_socket_server{ssl_opts=SslOpts1}).
+
+start_server(State=#mochiweb_socket_server{ssl=Ssl, name=Name}) ->
+ case Ssl of
+ true ->
+ application:start(crypto),
+ application:start(public_key),
+ application:start(ssl);
+ false ->
+ void
+ end,
+ case Name of
+ undefined ->
+ gen_server:start_link(?MODULE, State, []);
+ _ ->
+ gen_server:start_link(Name, ?MODULE, State, [])
+ end.
+
+ensure_int(N) when is_integer(N) ->
+ N;
+ensure_int(S) when is_list(S) ->
+ integer_to_list(S).
+
+ipv6_supported() ->
+ case (catch inet:getaddr("localhost", inet6)) of
+ {ok, _Addr} ->
+ true;
+ {error, _} ->
+ false
+ end.
+
+init(State=#mochiweb_socket_server{ip=Ip, port=Port, backlog=Backlog, nodelay=NoDelay}) ->
+ process_flag(trap_exit, true),
+ BaseOpts = [binary,
+ {reuseaddr, true},
+ {packet, 0},
+ {backlog, Backlog},
+ {recbuf, ?RECBUF_SIZE},
+ {active, false},
+ {nodelay, NoDelay}],
+ Opts = case Ip of
+ any ->
+ case ipv6_supported() of % IPv4, and IPv6 if supported
+ true -> [inet, inet6 | BaseOpts];
+ _ -> BaseOpts
+ end;
+ {_, _, _, _} -> % IPv4
+ [inet, {ip, Ip} | BaseOpts];
+ {_, _, _, _, _, _, _, _} -> % IPv6
+ [inet6, {ip, Ip} | BaseOpts]
+ end,
+ case listen(Port, Opts, State) of
+ {stop, eacces} ->
+ case Port < 1024 of
+ true ->
+ case fdsrv:start() of
+ {ok, _} ->
+ case fdsrv:bind_socket(tcp, Port) of
+ {ok, Fd} ->
+ listen(Port, [{fd, Fd} | Opts], State);
+ _ ->
+ {stop, fdsrv_bind_failed}
+ end;
+ _ ->
+ {stop, fdsrv_start_failed}
+ end;
+ false ->
+ {stop, eacces}
+ end;
+ Other ->
+ Other
+ end.
+
+new_acceptor_pool(Listen,
+ State=#mochiweb_socket_server{acceptor_pool=Pool,
+ acceptor_pool_size=Size,
+ loop=Loop}) ->
+ F = fun (_, S) ->
+ Pid = mochiweb_acceptor:start_link(self(), Listen, Loop),
+ sets:add_element(Pid, S)
+ end,
+ Pool1 = lists:foldl(F, Pool, lists:seq(1, Size)),
+ State#mochiweb_socket_server{acceptor_pool=Pool1}.
+
+listen(Port, Opts, State=#mochiweb_socket_server{ssl=Ssl, ssl_opts=SslOpts}) ->
+ case mochiweb_socket:listen(Ssl, Port, Opts, SslOpts) of
+ {ok, Listen} ->
+ {ok, ListenPort} = mochiweb_socket:port(Listen),
+ {ok, new_acceptor_pool(
+ Listen,
+ State#mochiweb_socket_server{listen=Listen,
+ port=ListenPort})};
+ {error, Reason} ->
+ {stop, Reason}
+ end.
+
+do_get(port, #mochiweb_socket_server{port=Port}) ->
+ Port;
+do_get(active_sockets, #mochiweb_socket_server{active_sockets=ActiveSockets}) ->
+ ActiveSockets.
+
+handle_call({get, Property}, _From, State) ->
+ Res = do_get(Property, State),
+ {reply, Res, State};
+handle_call(_Message, _From, State) ->
+ Res = error,
+ {reply, Res, State}.
+
+handle_cast({accepted, Pid, _Timing},
+ State=#mochiweb_socket_server{active_sockets=ActiveSockets}) ->
+ State1 = State#mochiweb_socket_server{active_sockets=1 + ActiveSockets},
+ {noreply, recycle_acceptor(Pid, State1)};
+handle_cast(stop, State) ->
+ {stop, normal, State}.
+
+terminate(_Reason, #mochiweb_socket_server{listen=Listen, port=Port}) ->
+ mochiweb_socket:close(Listen),
+ case Port < 1024 of
+ true ->
+ catch fdsrv:stop(),
+ ok;
+ false ->
+ ok
+ end.
+
+code_change(_OldVsn, State, _Extra) ->
+ State.
+
+recycle_acceptor(Pid, State=#mochiweb_socket_server{
+ acceptor_pool=Pool,
+ listen=Listen,
+ loop=Loop,
+ active_sockets=ActiveSockets}) ->
+ case sets:is_element(Pid, Pool) of
+ true ->
+ Acceptor = mochiweb_acceptor:start_link(self(), Listen, Loop),
+ Pool1 = sets:add_element(Acceptor, sets:del_element(Pid, Pool)),
+ State#mochiweb_socket_server{acceptor_pool=Pool1};
+ false ->
+ State#mochiweb_socket_server{active_sockets=ActiveSockets - 1}
+ end.
+
+handle_info({'EXIT', Pid, normal}, State) ->
+ {noreply, recycle_acceptor(Pid, State)};
+handle_info({'EXIT', Pid, Reason},
+ State=#mochiweb_socket_server{acceptor_pool=Pool}) ->
+ case sets:is_element(Pid, Pool) of
+ true ->
+ %% If there was an unexpected error accepting, log and sleep.
+ error_logger:error_report({?MODULE, ?LINE,
+ {acceptor_error, Reason}}),
+ timer:sleep(100);
+ false ->
+ ok
+ end,
+ {noreply, recycle_acceptor(Pid, State)};
+handle_info(Info, State) ->
+ error_logger:info_report([{'INFO', Info}, {'State', State}]),
+ {noreply, State}.
+
+
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_sup.erl b/deps/mochiweb/src/mochiweb_sup.erl
new file mode 100644
index 00000000..af7df9b3
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_sup.erl
@@ -0,0 +1,41 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Supervisor for the mochiweb application.
+
+-module(mochiweb_sup).
+-author('bob@mochimedia.com').
+
+-behaviour(supervisor).
+
+%% External exports
+-export([start_link/0, upgrade/0]).
+
+%% supervisor callbacks
+-export([init/1]).
+
+%% @spec start_link() -> ServerRet
+%% @doc API for starting the supervisor.
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%% @spec upgrade() -> ok
+%% @doc Add processes if necessary.
+upgrade() ->
+ {ok, {_, Specs}} = init([]),
+ [supervisor:start_child(?MODULE, Spec) || Spec <- Specs],
+ ok.
+
+%% @spec init([]) -> SupervisorTree
+%% @doc supervisor callback, ensures yaws is in embedded mode and then
+%% returns the supervisor tree.
+init([]) ->
+ Processes = [],
+ {ok, {{one_for_one, 10, 10}, Processes}}.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/src/mochiweb_util.erl b/deps/mochiweb/src/mochiweb_util.erl
new file mode 100644
index 00000000..62ff0d06
--- /dev/null
+++ b/deps/mochiweb/src/mochiweb_util.erl
@@ -0,0 +1,973 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Utilities for parsing and quoting.
+
+-module(mochiweb_util).
+-author('bob@mochimedia.com').
+-export([join/2, quote_plus/1, urlencode/1, parse_qs/1, unquote/1]).
+-export([path_split/1]).
+-export([urlsplit/1, urlsplit_path/1, urlunsplit/1, urlunsplit_path/1]).
+-export([guess_mime/1, parse_header/1]).
+-export([shell_quote/1, cmd/1, cmd_string/1, cmd_port/2, cmd_status/1]).
+-export([record_to_proplist/2, record_to_proplist/3]).
+-export([safe_relative_path/1, partition/2]).
+-export([parse_qvalues/1, pick_accepted_encodings/3]).
+-export([make_io/1]).
+
+-define(PERCENT, 37). % $\%
+-define(FULLSTOP, 46). % $\.
+-define(IS_HEX(C), ((C >= $0 andalso C =< $9) orelse
+ (C >= $a andalso C =< $f) orelse
+ (C >= $A andalso C =< $F))).
+-define(QS_SAFE(C), ((C >= $a andalso C =< $z) orelse
+ (C >= $A andalso C =< $Z) orelse
+ (C >= $0 andalso C =< $9) orelse
+ (C =:= ?FULLSTOP orelse C =:= $- orelse C =:= $~ orelse
+ C =:= $_))).
+
+hexdigit(C) when C < 10 -> $0 + C;
+hexdigit(C) when C < 16 -> $A + (C - 10).
+
+unhexdigit(C) when C >= $0, C =< $9 -> C - $0;
+unhexdigit(C) when C >= $a, C =< $f -> C - $a + 10;
+unhexdigit(C) when C >= $A, C =< $F -> C - $A + 10.
+
+%% @spec partition(String, Sep) -> {String, [], []} | {Prefix, Sep, Postfix}
+%% @doc Inspired by Python 2.5's str.partition:
+%% partition("foo/bar", "/") = {"foo", "/", "bar"},
+%% partition("foo", "/") = {"foo", "", ""}.
+partition(String, Sep) ->
+ case partition(String, Sep, []) of
+ undefined ->
+ {String, "", ""};
+ Result ->
+ Result
+ end.
+
+partition("", _Sep, _Acc) ->
+ undefined;
+partition(S, Sep, Acc) ->
+ case partition2(S, Sep) of
+ undefined ->
+ [C | Rest] = S,
+ partition(Rest, Sep, [C | Acc]);
+ Rest ->
+ {lists:reverse(Acc), Sep, Rest}
+ end.
+
+partition2(Rest, "") ->
+ Rest;
+partition2([C | R1], [C | R2]) ->
+ partition2(R1, R2);
+partition2(_S, _Sep) ->
+ undefined.
+
+
+
+%% @spec safe_relative_path(string()) -> string() | undefined
+%% @doc Return the reduced version of a relative path or undefined if it
+%% is not safe. safe relative paths can be joined with an absolute path
+%% and will result in a subdirectory of the absolute path.
+safe_relative_path("/" ++ _) ->
+ undefined;
+safe_relative_path(P) ->
+ safe_relative_path(P, []).
+
+safe_relative_path("", Acc) ->
+ case Acc of
+ [] ->
+ "";
+ _ ->
+ string:join(lists:reverse(Acc), "/")
+ end;
+safe_relative_path(P, Acc) ->
+ case partition(P, "/") of
+ {"", "/", _} ->
+ %% /foo or foo//bar
+ undefined;
+ {"..", _, _} when Acc =:= [] ->
+ undefined;
+ {"..", _, Rest} ->
+ safe_relative_path(Rest, tl(Acc));
+ {Part, "/", ""} ->
+ safe_relative_path("", ["", Part | Acc]);
+ {Part, _, Rest} ->
+ safe_relative_path(Rest, [Part | Acc])
+ end.
+
+%% @spec shell_quote(string()) -> string()
+%% @doc Quote a string according to UNIX shell quoting rules, returns a string
+%% surrounded by double quotes.
+shell_quote(L) ->
+ shell_quote(L, [$\"]).
+
+%% @spec cmd_port([string()], Options) -> port()
+%% @doc open_port({spawn, mochiweb_util:cmd_string(Argv)}, Options).
+cmd_port(Argv, Options) ->
+ open_port({spawn, cmd_string(Argv)}, Options).
+
+%% @spec cmd([string()]) -> string()
+%% @doc os:cmd(cmd_string(Argv)).
+cmd(Argv) ->
+ os:cmd(cmd_string(Argv)).
+
+%% @spec cmd_string([string()]) -> string()
+%% @doc Create a shell quoted command string from a list of arguments.
+cmd_string(Argv) ->
+ string:join([shell_quote(X) || X <- Argv], " ").
+
+%% @spec cmd_status([string()]) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from the given application, will be
+%% spawned with cmd_port/2.
+cmd_status(Argv) ->
+ Port = cmd_port(Argv, [exit_status, stderr_to_stdout,
+ use_stdio, binary]),
+ try cmd_loop(Port, [])
+ after catch port_close(Port)
+ end.
+
+%% @spec cmd_loop(port(), list()) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from a port.
+cmd_loop(Port, Acc) ->
+ receive
+ {Port, {exit_status, Status}} ->
+ {Status, iolist_to_binary(lists:reverse(Acc))};
+ {Port, {data, Data}} ->
+ cmd_loop(Port, [Data | Acc])
+ end.
+
+%% @spec join([iolist()], iolist()) -> iolist()
+%% @doc Join a list of strings or binaries together with the given separator
+%% string or char or binary. The output is flattened, but may be an
+%% iolist() instead of a string() if any of the inputs are binary().
+join([], _Separator) ->
+ [];
+join([S], _Separator) ->
+ lists:flatten(S);
+join(Strings, Separator) ->
+ lists:flatten(revjoin(lists:reverse(Strings), Separator, [])).
+
+revjoin([], _Separator, Acc) ->
+ Acc;
+revjoin([S | Rest], Separator, []) ->
+ revjoin(Rest, Separator, [S]);
+revjoin([S | Rest], Separator, Acc) ->
+ revjoin(Rest, Separator, [S, Separator | Acc]).
+
+%% @spec quote_plus(atom() | integer() | float() | string() | binary()) -> string()
+%% @doc URL safe encoding of the given term.
+quote_plus(Atom) when is_atom(Atom) ->
+ quote_plus(atom_to_list(Atom));
+quote_plus(Int) when is_integer(Int) ->
+ quote_plus(integer_to_list(Int));
+quote_plus(Binary) when is_binary(Binary) ->
+ quote_plus(binary_to_list(Binary));
+quote_plus(Float) when is_float(Float) ->
+ quote_plus(mochinum:digits(Float));
+quote_plus(String) ->
+ quote_plus(String, []).
+
+quote_plus([], Acc) ->
+ lists:reverse(Acc);
+quote_plus([C | Rest], Acc) when ?QS_SAFE(C) ->
+ quote_plus(Rest, [C | Acc]);
+quote_plus([$\s | Rest], Acc) ->
+ quote_plus(Rest, [$+ | Acc]);
+quote_plus([C | Rest], Acc) ->
+ <<Hi:4, Lo:4>> = <<C>>,
+ quote_plus(Rest, [hexdigit(Lo), hexdigit(Hi), ?PERCENT | Acc]).
+
+%% @spec urlencode([{Key, Value}]) -> string()
+%% @doc URL encode the property list.
+urlencode(Props) ->
+ Pairs = lists:foldr(
+ fun ({K, V}, Acc) ->
+ [quote_plus(K) ++ "=" ++ quote_plus(V) | Acc]
+ end, [], Props),
+ string:join(Pairs, "&").
+
+%% @spec parse_qs(string() | binary()) -> [{Key, Value}]
+%% @doc Parse a query string or application/x-www-form-urlencoded.
+parse_qs(Binary) when is_binary(Binary) ->
+ parse_qs(binary_to_list(Binary));
+parse_qs(String) ->
+ parse_qs(String, []).
+
+parse_qs([], Acc) ->
+ lists:reverse(Acc);
+parse_qs(String, Acc) ->
+ {Key, Rest} = parse_qs_key(String),
+ {Value, Rest1} = parse_qs_value(Rest),
+ parse_qs(Rest1, [{Key, Value} | Acc]).
+
+parse_qs_key(String) ->
+ parse_qs_key(String, []).
+
+parse_qs_key([], Acc) ->
+ {qs_revdecode(Acc), ""};
+parse_qs_key([$= | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key(Rest=[$; | _], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key(Rest=[$& | _], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key([C | Rest], Acc) ->
+ parse_qs_key(Rest, [C | Acc]).
+
+parse_qs_value(String) ->
+ parse_qs_value(String, []).
+
+parse_qs_value([], Acc) ->
+ {qs_revdecode(Acc), ""};
+parse_qs_value([$; | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_value([$& | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_value([C | Rest], Acc) ->
+ parse_qs_value(Rest, [C | Acc]).
+
+%% @spec unquote(string() | binary()) -> string()
+%% @doc Unquote a URL encoded string.
+unquote(Binary) when is_binary(Binary) ->
+ unquote(binary_to_list(Binary));
+unquote(String) ->
+ qs_revdecode(lists:reverse(String)).
+
+qs_revdecode(S) ->
+ qs_revdecode(S, []).
+
+qs_revdecode([], Acc) ->
+ Acc;
+qs_revdecode([$+ | Rest], Acc) ->
+ qs_revdecode(Rest, [$\s | Acc]);
+qs_revdecode([Lo, Hi, ?PERCENT | Rest], Acc) when ?IS_HEX(Lo), ?IS_HEX(Hi) ->
+ qs_revdecode(Rest, [(unhexdigit(Lo) bor (unhexdigit(Hi) bsl 4)) | Acc]);
+qs_revdecode([C | Rest], Acc) ->
+ qs_revdecode(Rest, [C | Acc]).
+
+%% @spec urlsplit(Url) -> {Scheme, Netloc, Path, Query, Fragment}
+%% @doc Return a 5-tuple, does not expand % escapes. Only supports HTTP style
+%% URLs.
+urlsplit(Url) ->
+ {Scheme, Url1} = urlsplit_scheme(Url),
+ {Netloc, Url2} = urlsplit_netloc(Url1),
+ {Path, Query, Fragment} = urlsplit_path(Url2),
+ {Scheme, Netloc, Path, Query, Fragment}.
+
+urlsplit_scheme(Url) ->
+ case urlsplit_scheme(Url, []) of
+ no_scheme ->
+ {"", Url};
+ Res ->
+ Res
+ end.
+
+urlsplit_scheme([C | Rest], Acc) when ((C >= $a andalso C =< $z) orelse
+ (C >= $A andalso C =< $Z) orelse
+ (C >= $0 andalso C =< $9) orelse
+ C =:= $+ orelse C =:= $- orelse
+ C =:= $.) ->
+ urlsplit_scheme(Rest, [C | Acc]);
+urlsplit_scheme([$: | Rest], Acc=[_ | _]) ->
+ {string:to_lower(lists:reverse(Acc)), Rest};
+urlsplit_scheme(_Rest, _Acc) ->
+ no_scheme.
+
+urlsplit_netloc("//" ++ Rest) ->
+ urlsplit_netloc(Rest, []);
+urlsplit_netloc(Path) ->
+ {"", Path}.
+
+urlsplit_netloc("", Acc) ->
+ {lists:reverse(Acc), ""};
+urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
+ {lists:reverse(Acc), Rest};
+urlsplit_netloc([C | Rest], Acc) ->
+ urlsplit_netloc(Rest, [C | Acc]).
+
+
+%% @spec path_split(string()) -> {Part, Rest}
+%% @doc Split a path starting from the left, as in URL traversal.
+%% path_split("foo/bar") = {"foo", "bar"},
+%% path_split("/foo/bar") = {"", "foo/bar"}.
+path_split(S) ->
+ path_split(S, []).
+
+path_split("", Acc) ->
+ {lists:reverse(Acc), ""};
+path_split("/" ++ Rest, Acc) ->
+ {lists:reverse(Acc), Rest};
+path_split([C | Rest], Acc) ->
+ path_split(Rest, [C | Acc]).
+
+
+%% @spec urlunsplit({Scheme, Netloc, Path, Query, Fragment}) -> string()
+%% @doc Assemble a URL from the 5-tuple. Path must be absolute.
+urlunsplit({Scheme, Netloc, Path, Query, Fragment}) ->
+ lists:flatten([case Scheme of "" -> ""; _ -> [Scheme, "://"] end,
+ Netloc,
+ urlunsplit_path({Path, Query, Fragment})]).
+
+%% @spec urlunsplit_path({Path, Query, Fragment}) -> string()
+%% @doc Assemble a URL path from the 3-tuple.
+urlunsplit_path({Path, Query, Fragment}) ->
+ lists:flatten([Path,
+ case Query of "" -> ""; _ -> [$? | Query] end,
+ case Fragment of "" -> ""; _ -> [$# | Fragment] end]).
+
+%% @spec urlsplit_path(Url) -> {Path, Query, Fragment}
+%% @doc Return a 3-tuple, does not expand % escapes. Only supports HTTP style
+%% paths.
+urlsplit_path(Path) ->
+ urlsplit_path(Path, []).
+
+urlsplit_path("", Acc) ->
+ {lists:reverse(Acc), "", ""};
+urlsplit_path("?" ++ Rest, Acc) ->
+ {Query, Fragment} = urlsplit_query(Rest),
+ {lists:reverse(Acc), Query, Fragment};
+urlsplit_path("#" ++ Rest, Acc) ->
+ {lists:reverse(Acc), "", Rest};
+urlsplit_path([C | Rest], Acc) ->
+ urlsplit_path(Rest, [C | Acc]).
+
+urlsplit_query(Query) ->
+ urlsplit_query(Query, []).
+
+urlsplit_query("", Acc) ->
+ {lists:reverse(Acc), ""};
+urlsplit_query("#" ++ Rest, Acc) ->
+ {lists:reverse(Acc), Rest};
+urlsplit_query([C | Rest], Acc) ->
+ urlsplit_query(Rest, [C | Acc]).
+
+%% @spec guess_mime(string()) -> string()
+%% @doc Guess the mime type of a file by the extension of its filename.
+guess_mime(File) ->
+ case mochiweb_mime:from_extension(filename:extension(File)) of
+ undefined ->
+ "text/plain";
+ Mime ->
+ Mime
+ end.
+
+%% @spec parse_header(string()) -> {Type, [{K, V}]}
+%% @doc Parse a Content-Type like header, return the main Content-Type
+%% and a property list of options.
+parse_header(String) ->
+ %% TODO: This is exactly as broken as Python's cgi module.
+ %% Should parse properly like mochiweb_cookies.
+ [Type | Parts] = [string:strip(S) || S <- string:tokens(String, ";")],
+ F = fun (S, Acc) ->
+ case lists:splitwith(fun (C) -> C =/= $= end, S) of
+ {"", _} ->
+ %% Skip anything with no name
+ Acc;
+ {_, ""} ->
+ %% Skip anything with no value
+ Acc;
+ {Name, [$\= | Value]} ->
+ [{string:to_lower(string:strip(Name)),
+ unquote_header(string:strip(Value))} | Acc]
+ end
+ end,
+ {string:to_lower(Type),
+ lists:foldr(F, [], Parts)}.
+
+unquote_header("\"" ++ Rest) ->
+ unquote_header(Rest, []);
+unquote_header(S) ->
+ S.
+
+unquote_header("", Acc) ->
+ lists:reverse(Acc);
+unquote_header("\"", Acc) ->
+ lists:reverse(Acc);
+unquote_header([$\\, C | Rest], Acc) ->
+ unquote_header(Rest, [C | Acc]);
+unquote_header([C | Rest], Acc) ->
+ unquote_header(Rest, [C | Acc]).
+
+%% @spec record_to_proplist(Record, Fields) -> proplist()
+%% @doc calls record_to_proplist/3 with a default TypeKey of '__record'
+record_to_proplist(Record, Fields) ->
+ record_to_proplist(Record, Fields, '__record').
+
+%% @spec record_to_proplist(Record, Fields, TypeKey) -> proplist()
+%% @doc Return a proplist of the given Record with each field in the
+%% Fields list set as a key with the corresponding value in the Record.
+%% TypeKey is the key that is used to store the record type
+%% Fields should be obtained by calling record_info(fields, record_type)
+%% where record_type is the record type of Record
+record_to_proplist(Record, Fields, TypeKey)
+ when tuple_size(Record) - 1 =:= length(Fields) ->
+ lists:zip([TypeKey | Fields], tuple_to_list(Record)).
+
+
+shell_quote([], Acc) ->
+ lists:reverse([$\" | Acc]);
+shell_quote([C | Rest], Acc) when C =:= $\" orelse C =:= $\` orelse
+ C =:= $\\ orelse C =:= $\$ ->
+ shell_quote(Rest, [C, $\\ | Acc]);
+shell_quote([C | Rest], Acc) ->
+ shell_quote(Rest, [C | Acc]).
+
+%% @spec parse_qvalues(string()) -> [qvalue()] | invalid_qvalue_string
+%% @type qvalue() = {media_type() | encoding() , float()}.
+%% @type media_type() = string().
+%% @type encoding() = string().
+%%
+%% @doc Parses a list (given as a string) of elements with Q values associated
+%% to them. Elements are separated by commas and each element is separated
+%% from its Q value by a semicolon. Q values are optional but when missing
+%% the value of an element is considered as 1.0. A Q value is always in the
+%% range [0.0, 1.0]. A Q value list is used for example as the value of the
+%% HTTP "Accept" and "Accept-Encoding" headers.
+%%
+%% Q values are described in section 2.9 of the RFC 2616 (HTTP 1.1).
+%%
+%% Example:
+%%
+%% parse_qvalues("gzip; q=0.5, deflate, identity;q=0.0") ->
+%% [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}]
+%%
+parse_qvalues(QValuesStr) ->
+ try
+ lists:map(
+ fun(Pair) ->
+ [Type | Params] = string:tokens(Pair, ";"),
+ NormParams = normalize_media_params(Params),
+ {Q, NonQParams} = extract_q(NormParams),
+ {string:join([string:strip(Type) | NonQParams], ";"), Q}
+ end,
+ string:tokens(string:to_lower(QValuesStr), ",")
+ )
+ catch
+ _Type:_Error ->
+ invalid_qvalue_string
+ end.
+
+normalize_media_params(Params) ->
+ {ok, Re} = re:compile("\\s"),
+ normalize_media_params(Re, Params, []).
+
+normalize_media_params(_Re, [], Acc) ->
+ lists:reverse(Acc);
+normalize_media_params(Re, [Param | Rest], Acc) ->
+ NormParam = re:replace(Param, Re, "", [global, {return, list}]),
+ normalize_media_params(Re, Rest, [NormParam | Acc]).
+
+extract_q(NormParams) ->
+ {ok, KVRe} = re:compile("^([^=]+)=([^=]+)$"),
+ {ok, QRe} = re:compile("^((?:0|1)(?:\\.\\d{1,3})?)$"),
+ extract_q(KVRe, QRe, NormParams, []).
+
+extract_q(_KVRe, _QRe, [], Acc) ->
+ {1.0, lists:reverse(Acc)};
+extract_q(KVRe, QRe, [Param | Rest], Acc) ->
+ case re:run(Param, KVRe, [{capture, [1, 2], list}]) of
+ {match, [Name, Value]} ->
+ case Name of
+ "q" ->
+ {match, [Q]} = re:run(Value, QRe, [{capture, [1], list}]),
+ QVal = case Q of
+ "0" ->
+ 0.0;
+ "1" ->
+ 1.0;
+ Else ->
+ list_to_float(Else)
+ end,
+ case QVal < 0.0 orelse QVal > 1.0 of
+ false ->
+ {QVal, lists:reverse(Acc) ++ Rest}
+ end;
+ _ ->
+ extract_q(KVRe, QRe, Rest, [Param | Acc])
+ end
+ end.
+
+%% @spec pick_accepted_encodings([qvalue()], [encoding()], encoding()) ->
+%% [encoding()]
+%%
+%% @doc Determines which encodings specified in the given Q values list are
+%% valid according to a list of supported encodings and a default encoding.
+%%
+%% The returned list of encodings is sorted, descendingly, according to the
+%% Q values of the given list. The last element of this list is the given
+%% default encoding unless this encoding is explicitily or implicitily
+%% marked with a Q value of 0.0 in the given Q values list.
+%% Note: encodings with the same Q value are kept in the same order as
+%% found in the input Q values list.
+%%
+%% This encoding picking process is described in section 14.3 of the
+%% RFC 2616 (HTTP 1.1).
+%%
+%% Example:
+%%
+%% pick_accepted_encodings(
+%% [{"gzip", 0.5}, {"deflate", 1.0}],
+%% ["gzip", "identity"],
+%% "identity"
+%% ) ->
+%% ["gzip", "identity"]
+%%
+pick_accepted_encodings(AcceptedEncs, SupportedEncs, DefaultEnc) ->
+ SortedQList = lists:reverse(
+ lists:sort(fun({_, Q1}, {_, Q2}) -> Q1 < Q2 end, AcceptedEncs)
+ ),
+ {Accepted, Refused} = lists:foldr(
+ fun({E, Q}, {A, R}) ->
+ case Q > 0.0 of
+ true ->
+ {[E | A], R};
+ false ->
+ {A, [E | R]}
+ end
+ end,
+ {[], []},
+ SortedQList
+ ),
+ Refused1 = lists:foldr(
+ fun(Enc, Acc) ->
+ case Enc of
+ "*" ->
+ lists:subtract(SupportedEncs, Accepted) ++ Acc;
+ _ ->
+ [Enc | Acc]
+ end
+ end,
+ [],
+ Refused
+ ),
+ Accepted1 = lists:foldr(
+ fun(Enc, Acc) ->
+ case Enc of
+ "*" ->
+ lists:subtract(SupportedEncs, Accepted ++ Refused1) ++ Acc;
+ _ ->
+ [Enc | Acc]
+ end
+ end,
+ [],
+ Accepted
+ ),
+ Accepted2 = case lists:member(DefaultEnc, Accepted1) of
+ true ->
+ Accepted1;
+ false ->
+ Accepted1 ++ [DefaultEnc]
+ end,
+ [E || E <- Accepted2, lists:member(E, SupportedEncs),
+ not lists:member(E, Refused1)].
+
+make_io(Atom) when is_atom(Atom) ->
+ atom_to_list(Atom);
+make_io(Integer) when is_integer(Integer) ->
+ integer_to_list(Integer);
+make_io(Io) when is_list(Io); is_binary(Io) ->
+ Io.
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+
+make_io_test() ->
+ ?assertEqual(
+ <<"atom">>,
+ iolist_to_binary(make_io(atom))),
+ ?assertEqual(
+ <<"20">>,
+ iolist_to_binary(make_io(20))),
+ ?assertEqual(
+ <<"list">>,
+ iolist_to_binary(make_io("list"))),
+ ?assertEqual(
+ <<"binary">>,
+ iolist_to_binary(make_io(<<"binary">>))),
+ ok.
+
+-record(test_record, {field1=f1, field2=f2}).
+record_to_proplist_test() ->
+ ?assertEqual(
+ [{'__record', test_record},
+ {field1, f1},
+ {field2, f2}],
+ record_to_proplist(#test_record{}, record_info(fields, test_record))),
+ ?assertEqual(
+ [{'typekey', test_record},
+ {field1, f1},
+ {field2, f2}],
+ record_to_proplist(#test_record{},
+ record_info(fields, test_record),
+ typekey)),
+ ok.
+
+shell_quote_test() ->
+ ?assertEqual(
+ "\"foo \\$bar\\\"\\`' baz\"",
+ shell_quote("foo $bar\"`' baz")),
+ ok.
+
+cmd_port_test_spool(Port, Acc) ->
+ receive
+ {Port, eof} ->
+ Acc;
+ {Port, {data, {eol, Data}}} ->
+ cmd_port_test_spool(Port, ["\n", Data | Acc]);
+ {Port, Unknown} ->
+ throw({unknown, Unknown})
+ after 100 ->
+ throw(timeout)
+ end.
+
+cmd_port_test() ->
+ Port = cmd_port(["echo", "$bling$ `word`!"],
+ [eof, stream, {line, 4096}]),
+ Res = try lists:append(lists:reverse(cmd_port_test_spool(Port, [])))
+ after catch port_close(Port)
+ end,
+ self() ! {Port, wtf},
+ try cmd_port_test_spool(Port, [])
+ catch throw:{unknown, wtf} -> ok
+ end,
+ try cmd_port_test_spool(Port, [])
+ catch throw:timeout -> ok
+ end,
+ ?assertEqual(
+ "$bling$ `word`!\n",
+ Res).
+
+cmd_test() ->
+ ?assertEqual(
+ "$bling$ `word`!\n",
+ cmd(["echo", "$bling$ `word`!"])),
+ ok.
+
+cmd_string_test() ->
+ ?assertEqual(
+ "\"echo\" \"\\$bling\\$ \\`word\\`!\"",
+ cmd_string(["echo", "$bling$ `word`!"])),
+ ok.
+
+cmd_status_test() ->
+ ?assertEqual(
+ {0, <<"$bling$ `word`!\n">>},
+ cmd_status(["echo", "$bling$ `word`!"])),
+ ok.
+
+
+parse_header_test() ->
+ ?assertEqual(
+ {"multipart/form-data", [{"boundary", "AaB03x"}]},
+ parse_header("multipart/form-data; boundary=AaB03x")),
+ %% This tests (currently) intentionally broken behavior
+ ?assertEqual(
+ {"multipart/form-data",
+ [{"b", ""},
+ {"cgi", "is"},
+ {"broken", "true\"e"}]},
+ parse_header("multipart/form-data;b=;cgi=\"i\\s;broken=true\"e;=z;z")),
+ ok.
+
+guess_mime_test() ->
+ "text/plain" = guess_mime(""),
+ "text/plain" = guess_mime(".text"),
+ "application/zip" = guess_mime(".zip"),
+ "application/zip" = guess_mime("x.zip"),
+ "text/html" = guess_mime("x.html"),
+ "application/xhtml+xml" = guess_mime("x.xhtml"),
+ ok.
+
+path_split_test() ->
+ {"", "foo/bar"} = path_split("/foo/bar"),
+ {"foo", "bar"} = path_split("foo/bar"),
+ {"bar", ""} = path_split("bar"),
+ ok.
+
+urlsplit_test() ->
+ {"", "", "/foo", "", "bar?baz"} = urlsplit("/foo#bar?baz"),
+ {"http", "host:port", "/foo", "", "bar?baz"} =
+ urlsplit("http://host:port/foo#bar?baz"),
+ {"http", "host", "", "", ""} = urlsplit("http://host"),
+ {"", "", "/wiki/Category:Fruit", "", ""} =
+ urlsplit("/wiki/Category:Fruit"),
+ ok.
+
+urlsplit_path_test() ->
+ {"/foo/bar", "", ""} = urlsplit_path("/foo/bar"),
+ {"/foo", "baz", ""} = urlsplit_path("/foo?baz"),
+ {"/foo", "", "bar?baz"} = urlsplit_path("/foo#bar?baz"),
+ {"/foo", "", "bar?baz#wibble"} = urlsplit_path("/foo#bar?baz#wibble"),
+ {"/foo", "bar", "baz"} = urlsplit_path("/foo?bar#baz"),
+ {"/foo", "bar?baz", "baz"} = urlsplit_path("/foo?bar?baz#baz"),
+ ok.
+
+urlunsplit_test() ->
+ "/foo#bar?baz" = urlunsplit({"", "", "/foo", "", "bar?baz"}),
+ "http://host:port/foo#bar?baz" =
+ urlunsplit({"http", "host:port", "/foo", "", "bar?baz"}),
+ ok.
+
+urlunsplit_path_test() ->
+ "/foo/bar" = urlunsplit_path({"/foo/bar", "", ""}),
+ "/foo?baz" = urlunsplit_path({"/foo", "baz", ""}),
+ "/foo#bar?baz" = urlunsplit_path({"/foo", "", "bar?baz"}),
+ "/foo#bar?baz#wibble" = urlunsplit_path({"/foo", "", "bar?baz#wibble"}),
+ "/foo?bar#baz" = urlunsplit_path({"/foo", "bar", "baz"}),
+ "/foo?bar?baz#baz" = urlunsplit_path({"/foo", "bar?baz", "baz"}),
+ ok.
+
+join_test() ->
+ ?assertEqual("foo,bar,baz",
+ join(["foo", "bar", "baz"], $,)),
+ ?assertEqual("foo,bar,baz",
+ join(["foo", "bar", "baz"], ",")),
+ ?assertEqual("foo bar",
+ join([["foo", " bar"]], ",")),
+ ?assertEqual("foo bar,baz",
+ join([["foo", " bar"], "baz"], ",")),
+ ?assertEqual("foo",
+ join(["foo"], ",")),
+ ?assertEqual("foobarbaz",
+ join(["foo", "bar", "baz"], "")),
+ ?assertEqual("foo" ++ [<<>>] ++ "bar" ++ [<<>>] ++ "baz",
+ join(["foo", "bar", "baz"], <<>>)),
+ ?assertEqual("foobar" ++ [<<"baz">>],
+ join(["foo", "bar", <<"baz">>], "")),
+ ?assertEqual("",
+ join([], "any")),
+ ok.
+
+quote_plus_test() ->
+ "foo" = quote_plus(foo),
+ "1" = quote_plus(1),
+ "1.1" = quote_plus(1.1),
+ "foo" = quote_plus("foo"),
+ "foo+bar" = quote_plus("foo bar"),
+ "foo%0A" = quote_plus("foo\n"),
+ "foo%0A" = quote_plus("foo\n"),
+ "foo%3B%26%3D" = quote_plus("foo;&="),
+ "foo%3B%26%3D" = quote_plus(<<"foo;&=">>),
+ ok.
+
+unquote_test() ->
+ ?assertEqual("foo bar",
+ unquote("foo+bar")),
+ ?assertEqual("foo bar",
+ unquote("foo%20bar")),
+ ?assertEqual("foo\r\n",
+ unquote("foo%0D%0A")),
+ ?assertEqual("foo\r\n",
+ unquote(<<"foo%0D%0A">>)),
+ ok.
+
+urlencode_test() ->
+ "foo=bar&baz=wibble+%0D%0A&z=1" = urlencode([{foo, "bar"},
+ {"baz", "wibble \r\n"},
+ {z, 1}]),
+ ok.
+
+parse_qs_test() ->
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
+ parse_qs("foo=bar&baz=wibble+%0D%0a&z=1")),
+ ?assertEqual(
+ [{"", "bar"}, {"baz", "wibble \r\n"}, {"z", ""}],
+ parse_qs("=bar&baz=wibble+%0D%0a&z=")),
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
+ parse_qs(<<"foo=bar&baz=wibble+%0D%0a&z=1">>)),
+ ?assertEqual(
+ [],
+ parse_qs("")),
+ ?assertEqual(
+ [{"foo", ""}, {"bar", ""}, {"baz", ""}],
+ parse_qs("foo;bar&baz")),
+ ok.
+
+partition_test() ->
+ {"foo", "", ""} = partition("foo", "/"),
+ {"foo", "/", "bar"} = partition("foo/bar", "/"),
+ {"foo", "/", ""} = partition("foo/", "/"),
+ {"", "/", "bar"} = partition("/bar", "/"),
+ {"f", "oo/ba", "r"} = partition("foo/bar", "oo/ba"),
+ ok.
+
+safe_relative_path_test() ->
+ "foo" = safe_relative_path("foo"),
+ "foo/" = safe_relative_path("foo/"),
+ "foo" = safe_relative_path("foo/bar/.."),
+ "bar" = safe_relative_path("foo/../bar"),
+ "bar/" = safe_relative_path("foo/../bar/"),
+ "" = safe_relative_path("foo/.."),
+ "" = safe_relative_path("foo/../"),
+ undefined = safe_relative_path("/foo"),
+ undefined = safe_relative_path("../foo"),
+ undefined = safe_relative_path("foo/../.."),
+ undefined = safe_relative_path("foo//"),
+ ok.
+
+parse_qvalues_test() ->
+ [] = parse_qvalues(""),
+ [{"identity", 0.0}] = parse_qvalues("identity;q=0"),
+ [{"identity", 0.0}] = parse_qvalues("identity ;q=0"),
+ [{"identity", 0.0}] = parse_qvalues(" identity; q =0 "),
+ [{"identity", 0.0}] = parse_qvalues("identity ; q = 0"),
+ [{"identity", 0.0}] = parse_qvalues("identity ; q= 0.0"),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip,deflate,identity;q=0.0"
+ ),
+ [{"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "deflate,gzip,identity;q=0.0"
+ ),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] =
+ parse_qvalues("gzip,deflate,gzip,identity;q=0"),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip, deflate , identity; q=0.0"
+ ),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=1, deflate;q=1.0, identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate;q=1.0, identity;q=0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate , identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 0.8}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate;q=0.8, identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}] = parse_qvalues(
+ "gzip; q=0.5,deflate,identity"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}, {"identity", 1.0}] =
+ parse_qvalues("gzip; q=0.5,deflate,identity, identity "),
+ [{"text/html;level=1", 1.0}, {"text/plain", 0.5}] =
+ parse_qvalues("text/html;level=1, text/plain;q=0.5"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html;level=1;q=0.3, text/plain"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html; level = 1; q = 0.3, text/plain"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html;q=0.3;level=1, text/plain"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=1.1, deflate"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=0.5, deflate;q=2"),
+ invalid_qvalue_string = parse_qvalues("gzip, deflate;q=AB"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=2.1, deflate"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=0.1234, deflate"),
+ invalid_qvalue_string = parse_qvalues("text/html;level=1;q=0.3, text/html;level"),
+ ok.
+
+pick_accepted_encodings_test() ->
+ ["identity"] = pick_accepted_encodings(
+ [],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["identity"] = pick_accepted_encodings(
+ [{"gzip", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.5}, {"deflate", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["identity"] = pick_accepted_encodings(
+ [{"gzip", 0.0}, {"deflate", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}, {"gzip", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 0.9}, {"gzip", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ [] = pick_accepted_encodings(
+ [{"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"*", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"*", 0.6}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 0.6}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip"] = pick_accepted_encodings(
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"deflate", 0.0}, {"*", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"*", 1.0}, {"deflate", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ok.
+
+-endif.
diff --git a/deps/mochiweb/src/reloader.erl b/deps/mochiweb/src/reloader.erl
new file mode 100644
index 00000000..c0f5de88
--- /dev/null
+++ b/deps/mochiweb/src/reloader.erl
@@ -0,0 +1,161 @@
+%% @copyright 2007 Mochi Media, Inc.
+%% @author Matthew Dempsky <matthew@mochimedia.com>
+%%
+%% @doc Erlang module for automatically reloading modified modules
+%% during development.
+
+-module(reloader).
+-author("Matthew Dempsky <matthew@mochimedia.com>").
+
+-include_lib("kernel/include/file.hrl").
+
+-behaviour(gen_server).
+-export([start/0, start_link/0]).
+-export([stop/0]).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
+-export([all_changed/0]).
+-export([is_changed/1]).
+-export([reload_modules/1]).
+-record(state, {last, tref}).
+
+%% External API
+
+%% @spec start() -> ServerRet
+%% @doc Start the reloader.
+start() ->
+ gen_server:start({local, ?MODULE}, ?MODULE, [], []).
+
+%% @spec start_link() -> ServerRet
+%% @doc Start the reloader.
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+%% @spec stop() -> ok
+%% @doc Stop the reloader.
+stop() ->
+ gen_server:call(?MODULE, stop).
+
+%% gen_server callbacks
+
+%% @spec init([]) -> {ok, State}
+%% @doc gen_server init, opens the server in an initial state.
+init([]) ->
+ {ok, TRef} = timer:send_interval(timer:seconds(1), doit),
+ {ok, #state{last = stamp(), tref = TRef}}.
+
+%% @spec handle_call(Args, From, State) -> tuple()
+%% @doc gen_server callback.
+handle_call(stop, _From, State) ->
+ {stop, shutdown, stopped, State};
+handle_call(_Req, _From, State) ->
+ {reply, {error, badrequest}, State}.
+
+%% @spec handle_cast(Cast, State) -> tuple()
+%% @doc gen_server callback.
+handle_cast(_Req, State) ->
+ {noreply, State}.
+
+%% @spec handle_info(Info, State) -> tuple()
+%% @doc gen_server callback.
+handle_info(doit, State) ->
+ Now = stamp(),
+ doit(State#state.last, Now),
+ {noreply, State#state{last = Now}};
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%% @spec terminate(Reason, State) -> ok
+%% @doc gen_server termination callback.
+terminate(_Reason, State) ->
+ {ok, cancel} = timer:cancel(State#state.tref),
+ ok.
+
+
+%% @spec code_change(_OldVsn, State, _Extra) -> State
+%% @doc gen_server code_change callback (trivial).
+code_change(_Vsn, State, _Extra) ->
+ {ok, State}.
+
+%% @spec reload_modules([atom()]) -> [{module, atom()} | {error, term()}]
+%% @doc code:purge/1 and code:load_file/1 the given list of modules in order,
+%% return the results of code:load_file/1.
+reload_modules(Modules) ->
+ [begin code:purge(M), code:load_file(M) end || M <- Modules].
+
+%% @spec all_changed() -> [atom()]
+%% @doc Return a list of beam modules that have changed.
+all_changed() ->
+ [M || {M, Fn} <- code:all_loaded(), is_list(Fn), is_changed(M)].
+
+%% @spec is_changed(atom()) -> boolean()
+%% @doc true if the loaded module is a beam with a vsn attribute
+%% and does not match the on-disk beam file, returns false otherwise.
+is_changed(M) ->
+ try
+ module_vsn(M:module_info()) =/= module_vsn(code:get_object_code(M))
+ catch _:_ ->
+ false
+ end.
+
+%% Internal API
+
+module_vsn({M, Beam, _Fn}) ->
+ {ok, {M, Vsn}} = beam_lib:version(Beam),
+ Vsn;
+module_vsn(L) when is_list(L) ->
+ {_, Attrs} = lists:keyfind(attributes, 1, L),
+ {_, Vsn} = lists:keyfind(vsn, 1, Attrs),
+ Vsn.
+
+doit(From, To) ->
+ [case file:read_file_info(Filename) of
+ {ok, #file_info{mtime = Mtime}} when Mtime >= From, Mtime < To ->
+ reload(Module);
+ {ok, _} ->
+ unmodified;
+ {error, enoent} ->
+ %% The Erlang compiler deletes existing .beam files if
+ %% recompiling fails. Maybe it's worth spitting out a
+ %% warning here, but I'd want to limit it to just once.
+ gone;
+ {error, Reason} ->
+ io:format("Error reading ~s's file info: ~p~n",
+ [Filename, Reason]),
+ error
+ end || {Module, Filename} <- code:all_loaded(), is_list(Filename)].
+
+reload(Module) ->
+ io:format("Reloading ~p ...", [Module]),
+ code:purge(Module),
+ case code:load_file(Module) of
+ {module, Module} ->
+ io:format(" ok.~n"),
+ case erlang:function_exported(Module, test, 0) of
+ true ->
+ io:format(" - Calling ~p:test() ...", [Module]),
+ case catch Module:test() of
+ ok ->
+ io:format(" ok.~n"),
+ reload;
+ Reason ->
+ io:format(" fail: ~p.~n", [Reason]),
+ reload_but_test_failed
+ end;
+ false ->
+ reload
+ end;
+ {error, Reason} ->
+ io:format(" fail: ~p.~n", [Reason]),
+ error
+ end.
+
+
+stamp() ->
+ erlang:localtime().
+
+%%
+%% Tests
+%%
+-include_lib("eunit/include/eunit.hrl").
+-ifdef(TEST).
+-endif.
diff --git a/deps/mochiweb/support/include.mk b/deps/mochiweb/support/include.mk
new file mode 100644
index 00000000..bdfdd840
--- /dev/null
+++ b/deps/mochiweb/support/include.mk
@@ -0,0 +1,41 @@
+## -*- makefile -*-
+
+######################################################################
+## Erlang
+
+ERL := erl
+ERLC := $(ERL)c
+
+INCLUDE_DIRS := ../include $(wildcard ../deps/*/include)
+EBIN_DIRS := $(wildcard ../deps/*/ebin)
+ERLC_FLAGS := -W $(INCLUDE_DIRS:../%=-I ../%) $(EBIN_DIRS:%=-pa %)
+
+ifndef no_debug_info
+ ERLC_FLAGS += +debug_info
+endif
+
+ifdef debug
+ ERLC_FLAGS += -Ddebug
+endif
+
+EBIN_DIR := ../ebin
+TEST_DIR := ../_test
+EMULATOR := beam
+
+ERL_SOURCES := $(wildcard *.erl)
+ERL_HEADERS := $(wildcard *.hrl) $(wildcard ../include/*.hrl)
+ERL_OBJECTS := $(ERL_SOURCES:%.erl=$(EBIN_DIR)/%.$(EMULATOR))
+ERL_OBJECTS_LOCAL := $(ERL_SOURCES:%.erl=./%.$(EMULATOR))
+APP_FILES := $(wildcard *.app.src)
+EBIN_FILES = $(ERL_OBJECTS) $(APP_FILES:%.app.src=../ebin/%.app)
+MODULES = $(ERL_SOURCES:%.erl=%)
+
+../ebin/%.app: %.app.src
+ ../support/make_app.escript $< $@ "" "$(MODULES)"
+
+
+$(EBIN_DIR)/%.$(EMULATOR): %.erl
+ $(ERLC) $(ERLC_FLAGS) -o $(EBIN_DIR) $<
+
+./%.$(EMULATOR): %.erl
+ $(ERLC) $(ERLC_FLAGS) -o . $<
diff --git a/deps/mochiweb/support/make_app.escript b/deps/mochiweb/support/make_app.escript
new file mode 100755
index 00000000..0d0b84b3
--- /dev/null
+++ b/deps/mochiweb/support/make_app.escript
@@ -0,0 +1,86 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+main(Args) ->
+ [AppSrc,AppF,Vsn,Modules] = Args,
+ {Comments, L, App} = parse_appsrc(AppSrc),
+ {application, A, Attrs} = App,
+ Attrs1 = [vsn(Vsn, Attrs),
+ descr(Attrs),
+ {modules, lists:sort([list_to_atom(M) || M <- string:tokens(Modules," ")])} |
+ [Attr || {K,_} = Attr <- Attrs,
+ not lists:member(K, [vsn, modules, description])]],
+ write_app(AppF, Comments, L, {application, A, Attrs1}).
+
+write_app(F, Comments, TermL, App) ->
+ case file:open(F, [write]) of
+ {ok, Fd} ->
+ try L = write_comments(Comments, Fd),
+ write_term(App, L, TermL, Fd)
+ after
+ file:close(Fd)
+ end;
+ Error ->
+ error(Error)
+ end.
+
+parse_appsrc(F) ->
+ case file:read_file(F) of
+ {ok, B} ->
+ case erl_scan:string(binary_to_list(B), 1, [return_comments]) of
+ {ok, Toks, _} ->
+ Comments = lists:takewhile(
+ fun({comment,_,_}) -> true;
+ (_) -> false
+ end, Toks),
+ TermToks = [T || T <- Toks, element(1,T) =/= comment],
+ TermL = element(2, hd(TermToks)),
+ case erl_parse:parse_term(TermToks) of
+ {ok, {application, _A, _Attrs} = App} ->
+ {Comments, TermL, App};
+ Error ->
+ error(Error)
+ end;
+ ScanErr ->
+ error(ScanErr)
+ end;
+ ReadErr ->
+ error(ReadErr)
+ end.
+
+write_comments(Comments, Fd) ->
+ lists:foldl(
+ fun({comment, L, C}, L0) ->
+ S = ["\n" || _ <- lists:seq(1,L-L0)] ++ C,
+ io:put_chars(Fd, S),
+ L
+ end, 1, Comments).
+
+write_term(T, L0, TermL, Fd) ->
+ case ["\n" || _ <- lists:seq(1,TermL-L0)] of
+ [] -> ok;
+ S -> io:put_chars(Fd, S)
+ end,
+ io:fwrite(Fd, "~p.~n", [T]).
+
+vsn(Vsn, Attrs) when Vsn =:= '' orelse Vsn =:= "" ->
+ case lists:keyfind(vsn, 1, Attrs) of
+ false ->
+ {vsn, "0.00"};
+ V ->
+ V
+ end;
+vsn(Vsn, _Attrs) ->
+ {vsn, Vsn}.
+
+descr(Attrs) ->
+ case lists:keyfind(description, 1, Attrs) of
+ false ->
+ {description, "auto_generated .app file"};
+ D ->
+ D
+ end.
+
+error(E) ->
+ io:fwrite("*** ~p~n", [E]),
+ halt(1).
diff --git a/deps/mochiweb/support/run_tests.escript b/deps/mochiweb/support/run_tests.escript
new file mode 100755
index 00000000..ff49c064
--- /dev/null
+++ b/deps/mochiweb/support/run_tests.escript
@@ -0,0 +1,94 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+%%! -name mochiweb__test@127.0.0.1
+main([Ebin]) ->
+ code:add_path(Ebin),
+ code:add_paths(filelib:wildcard("../deps/*/ebin", Ebin)),
+ code:add_paths(filelib:wildcard("../deps/*/deps/*/ebin", Ebin)),
+
+ ModuleNames = [hd(string:tokens(M, "."))
+ || "../src/" ++ M <- filelib:wildcard("../src/*.erl")],
+
+ {ok, NonTestRe} = re:compile("_tests$"),
+ Modules = [list_to_atom(M) ||
+ M <- lists:filter(
+ fun(M) ->
+ nomatch == re:run(M, NonTestRe)
+ end,
+ ModuleNames)],
+
+
+ crypto:start(),
+ start_cover(Modules),
+ eunit:test(Modules, [verbose,{report,{eunit_surefire,[{dir,"../_test"}]}}]),
+ analyze_cover(Modules);
+main(_) ->
+ io:format("usage: run_tests.escript EBIN_DIR~n"),
+ halt(1).
+
+start_cover(Modules) ->
+ {ok, _Cover} = cover:start(),
+ io:format("Cover compiling...~n"),
+ Compiled = [ M || {ok, M} <- [ cover:compile(
+ M,
+ [{i, "include"}
+ ])
+ || M <- Modules ] ],
+ case length(Modules) == length(Compiled) of
+ true -> ok;
+ false ->
+ io:format("Warning: the following modules were not"
+ " cover-compiled:~n ~p~n", [Compiled])
+ end.
+
+analyze_cover(Modules) ->
+ io:format("Analyzing cover...~n"),
+ CoverBase = filename:join(["..", "_test", "cover"]),
+ ok = filelib:ensure_dir(filename:join([CoverBase, "fake"])),
+ Coverages = lists:foldl(
+ fun(M, Acc) ->
+ [analyze_module(CoverBase, M)|Acc]
+ end,
+ [], Modules),
+ IndexFilename = filename:join([CoverBase, "index.html"]),
+ {ok, Index} = file:open(IndexFilename, [write]),
+ {LineTotal, CoverTotal} =
+ lists:foldl(fun({_,_,Lines,Covered}, {LineAcc, CovAcc}) ->
+ {LineAcc+Lines, CovAcc+Covered}
+ end, {0,0}, Coverages),
+ file:write(Index,
+ "<html><head><title>Coverage</title></head>\n"
+ "<body><h1>Coverage</h1><ul>\n"),
+ file:write(Index,
+ io_lib:format("<h2>Total: ~.2f%</h2>\n",
+ [percentage(CoverTotal, LineTotal)])),
+ [ file:write(Index,
+ io_lib:format(
+ "<li><a href=\"~s\">~p</a>: ~.2f%</li>~n",
+ [Filename, Module, percentage(Covered, Lines)]))
+ || {Filename, Module, Lines, Covered} <- Coverages ],
+ file:write(Index,"</ul></body></html>"),
+ file:close(Index),
+ io:format("Cover analysis in ~s~n", [IndexFilename]).
+
+analyze_module(CoverBase, Module) ->
+ {ok, Filename} =
+ cover:analyze_to_file(
+ Module,
+ filename:join(CoverBase, atom_to_list(Module)++".COVER.html"),
+ [html]),
+ Lines = count_lines(Filename, "[[:digit:]]\.\.|"),
+ Covered = count_lines(Filename, "[[:space:]]0\.\.|"),
+ {filename:basename(Filename), Module, Lines, Lines-Covered}.
+
+count_lines(Filename, Pattern) ->
+ {ok, [Lines],_} = io_lib:fread(
+ "~d",
+ os:cmd(io_lib:format("grep -e \"~s\" ~s | wc -l",
+ [Pattern, Filename]))),
+ Lines.
+
+percentage(_, 0) -> 1000.0;
+percentage(Part, Total) ->
+ (Part/Total)*100.
+
diff --git a/deps/mochiweb/support/test-materials/test_ssl_cert.pem b/deps/mochiweb/support/test-materials/test_ssl_cert.pem
new file mode 100644
index 00000000..f84ccca7
--- /dev/null
+++ b/deps/mochiweb/support/test-materials/test_ssl_cert.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAgigAwIBAgIJAJLkNZzERPIUMA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNV
+BAMTCWxvY2FsaG9zdDAeFw0xMDAzMTgxOTM5MThaFw0yMDAzMTUxOTM5MThaMBQx
+EjAQBgNVBAMTCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAJeUCOZxbmtngF4S5lXckjSDLc+8C+XjMBYBPyy5eKdJY20AQ1s9/hhp3ulI
+8pAvl+xVo4wQ+iBSvOzcy248Q+Xi6+zjceF7UNRgoYPgtJjKhdwcHV3mvFFrS/fp
+9ggoAChaJQWDO1OCfUgTWXImhkw+vcDR11OVMAJ/h73dqzJPI9mfq44PTTHfYtgr
+v4LAQAOlhXIAa2B+a6PlF6sqDqJaW5jLTcERjsBwnRhUGi7JevQzkejujX/vdA+N
+jRBjKH/KLU5h3Q7wUchvIez0PXWVTCnZjpA9aR4m7YV05nKQfxtGd71czYDYk+j8
+hd005jetT4ir7JkAWValBybJVksCAwEAAaN1MHMwHQYDVR0OBBYEFJl9s51SnjJt
+V/wgKWqV5Q6jnv1ZMEQGA1UdIwQ9MDuAFJl9s51SnjJtV/wgKWqV5Q6jnv1ZoRik
+FjAUMRIwEAYDVQQDEwlsb2NhbGhvc3SCCQCS5DWcxETyFDAMBgNVHRMEBTADAQH/
+MA0GCSqGSIb3DQEBBQUAA4IBAQB2ldLeLCc+lxK5i0EZquLamMBJwDIjGpT0JMP9
+b4XQOK2JABIu54BQIZhwcjk3FDJz/uOW5vm8k1kYni8FCjNZAaRZzCUfiUYTbTKL
+Rq9LuIAODyP2dnTqyKaQOOJHvrx9MRZ3XVecXPS0Tib4aO57vCaAbIkmhtYpTWmw
+e3t8CAIDVtgvjR6Se0a1JA4LktR7hBu22tDImvCSJn1nVAaHpani6iPBPPdMuMsP
+TBoeQfj8VpqBUjCStqJGa8ytjDFX73YaxV2mgrtGwPNme1x3YNRR11yTu7tksyMO
+GrmgxNriqYRchBhNEf72AKF0LR1ByKwfbDB9rIsV00HtCgOp
+-----END CERTIFICATE-----
diff --git a/deps/mochiweb/support/test-materials/test_ssl_key.pem b/deps/mochiweb/support/test-materials/test_ssl_key.pem
new file mode 100644
index 00000000..69bbf823
--- /dev/null
+++ b/deps/mochiweb/support/test-materials/test_ssl_key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAl5QI5nFua2eAXhLmVdySNIMtz7wL5eMwFgE/LLl4p0ljbQBD
+Wz3+GGne6UjykC+X7FWjjBD6IFK87NzLbjxD5eLr7ONx4XtQ1GChg+C0mMqF3Bwd
+Xea8UWtL9+n2CCgAKFolBYM7U4J9SBNZciaGTD69wNHXU5UwAn+Hvd2rMk8j2Z+r
+jg9NMd9i2Cu/gsBAA6WFcgBrYH5ro+UXqyoOolpbmMtNwRGOwHCdGFQaLsl69DOR
+6O6Nf+90D42NEGMof8otTmHdDvBRyG8h7PQ9dZVMKdmOkD1pHibthXTmcpB/G0Z3
+vVzNgNiT6PyF3TTmN61PiKvsmQBZVqUHJslWSwIDAQABAoIBACI8Ky5xHDFh9RpK
+Rn/KC7OUlTpADKflgizWJ0Cgu2F9L9mkn5HyFHvLHa+u7CootbWJOiEejH/UcBtH
+WyMQtX0snYCpdkUpJv5wvMoebGu+AjHOn8tfm9T/2O6rhwgckLyMb6QpGbMo28b1
+p9QiY17BJPZx7qJQJcHKsAvwDwSThlb7MFmWf42LYWlzybpeYQvwpd+UY4I0WXLu
+/dqJIS9Npq+5Y5vbo2kAEAssb2hSCvhCfHmwFdKmBzlvgOn4qxgZ1iHQgfKI6Z3Y
+J0573ZgOVTuacn+lewtdg5AaHFcl/zIYEr9SNqRoPNGbPliuv6k6N2EYcufWL5lR
+sCmmmHECgYEAxm+7OpepGr++K3+O1e1MUhD7vSPkKJrCzNtUxbOi2NWj3FFUSPRU
+adWhuxvUnZgTcgM1+KuQ0fB2VmxXe9IDcrSFS7PKFGtd2kMs/5mBw4UgDZkOQh+q
+kDiBEV3HYYJWRq0w3NQ/9Iy1jxxdENHtGmG9aqamHxNtuO608wGW2S8CgYEAw4yG
+ZyAic0Q/U9V2OHI0MLxLCzuQz17C2wRT1+hBywNZuil5YeTuIt2I46jro6mJmWI2
+fH4S/geSZzg2RNOIZ28+aK79ab2jWBmMnvFCvaru+odAuser4N9pfAlHZvY0pT+S
+1zYX3f44ygiio+oosabLC5nWI0zB2gG8pwaJlaUCgYEAgr7poRB+ZlaCCY0RYtjo
+mYYBKD02vp5BzdKSB3V1zeLuBWM84pjB6b3Nw0fyDig+X7fH3uHEGN+USRs3hSj6
+BqD01s1OT6fyfbYXNw5A1r+nP+5h26Wbr0zblcKxdQj4qbbBZC8hOJNhqTqqA0Qe
+MmzF7jiBaiZV/Cyj4x1f9BcCgYEAhjL6SeuTuOctTqs/5pz5lDikh6DpUGcH8qaV
+o6aRAHHcMhYkZzpk8yh1uUdD7516APmVyvn6rrsjjhLVq4ZAJjwB6HWvE9JBN0TR
+bILF+sREHUqU8Zn2Ku0nxyfXCKIOnxlx/J/y4TaGYqBqfXNFWiXNUrjQbIlQv/xR
+K48g/MECgYBZdQlYbMSDmfPCC5cxkdjrkmAl0EgV051PWAi4wR+hLxIMRjHBvAk7
+IweobkFvT4TICulgroLkYcSa5eOZGxB/DHqcQCbWj3reFV0VpzmTDoFKG54sqBRl
+vVntGt0pfA40fF17VoS7riAdHF53ippTtsovHEsg5tq5NrBl5uKm2g==
+-----END RSA PRIVATE KEY-----
diff --git a/deps/oauth/Emakefile b/deps/oauth/Emakefile
new file mode 100644
index 00000000..a961122c
--- /dev/null
+++ b/deps/oauth/Emakefile
@@ -0,0 +1 @@
+{"src/*", [debug_info, warn_unused_vars, warn_unused_import, {outdir, "ebin"}]}. \ No newline at end of file
diff --git a/deps/oauth/License.txt b/deps/oauth/License.txt
new file mode 100644
index 00000000..08b71726
--- /dev/null
+++ b/deps/oauth/License.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2008-2009 Tim Fletcher <http://tfletcher.com/>
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/deps/oauth/Makefile b/deps/oauth/Makefile
new file mode 100644
index 00000000..f6aedc90
--- /dev/null
+++ b/deps/oauth/Makefile
@@ -0,0 +1,7 @@
+all:
+ @test -d ebin || mkdir ebin
+ @cp src/oauth.app ebin/
+ @erl -make
+
+clean:
+ @rm -rf ebin/* erl_crash.dump
diff --git a/deps/oauth/README.txt b/deps/oauth/README.txt
new file mode 100644
index 00000000..0145e451
--- /dev/null
+++ b/deps/oauth/README.txt
@@ -0,0 +1,36 @@
+An Erlang OAuth implementation.
+
+Quick start (client usage):
+
+ $ make
+ ...
+ $ erl -pa ebin -s crypto -s inets
+ ...
+ 1> Consumer = {"key", "secret", hmac_sha1}.
+ ...
+ 2> RequestTokenURL = "http://term.ie/oauth/example/request_token.php".
+ ...
+ 3> {ok, ResponseR} = oauth:get(RequestTokenURL, [], Consumer, "", "").
+ ...
+ 4> ParamsR = oauth_http:response_params(ResponseR).
+ ...
+ 5> TokenR = oauth:token(ParamsR).
+ ...
+ 6> TokenSecretR = oauth:token_secret(ParamsR).
+ ...
+ 7> AccessTokenURL = "http://term.ie/oauth/example/access_token.php".
+ ...
+ 8> {ok, ResponseA} = oauth:get(AccessTokenURL, [], Consumer, TokenR, TokenSecretR).
+ ...
+
+
+Thanks to Jason Davies, Paul Bonser, and Roberto Aloi for their patches.
+
+The percent encoding/decoding implementations are based on those found in
+the ibrowse library, written by Chandrashekhar Mullaparthi.
+
+Example client/server code is at http://github.com/tim/erlang-oauth-examples.
+
+Unit tests are at http://github.com/tim/erlang-oauth-tests.
+
+Erlang R12B-5 or greater is required for RSA-SHA1.
diff --git a/deps/oauth/src/oauth.app.src b/deps/oauth/src/oauth.app.src
new file mode 100644
index 00000000..9ff22b1a
--- /dev/null
+++ b/deps/oauth/src/oauth.app.src
@@ -0,0 +1,21 @@
+{application, oauth, [
+ {description, "An Erlang OAuth 1.0 implementation"},
+ {vsn, git},
+ {modules, [
+ oauth,
+ oauth_client,
+ oauth_hmac_sha1,
+ oauth_http,
+ oauth_plaintext,
+ oauth_rsa_sha1,
+ oauth_unix,
+ oauth_uri
+ ]},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib,
+ crypto,
+ inets
+ ]}
+]}.
diff --git a/deps/oauth/src/oauth.erl b/deps/oauth/src/oauth.erl
new file mode 100644
index 00000000..866655c9
--- /dev/null
+++ b/deps/oauth/src/oauth.erl
@@ -0,0 +1,107 @@
+-module(oauth).
+
+-export(
+ [ get/5
+ , header/1
+ , post/5
+ , signature/5
+ , signature_base_string/3
+ , signed_params/6
+ , token/1
+ , token_secret/1
+ , uri/2
+ , verify/6
+ ]).
+
+
+get(URL, ExtraParams, Consumer, Token, TokenSecret) ->
+ SignedParams = signed_params("GET", URL, ExtraParams, Consumer, Token, TokenSecret),
+ oauth_http:get(uri(URL, SignedParams)).
+
+post(URL, ExtraParams, Consumer, Token, TokenSecret) ->
+ SignedParams = signed_params("POST", URL, ExtraParams, Consumer, Token, TokenSecret),
+ oauth_http:post(URL, oauth_uri:params_to_string(SignedParams)).
+
+uri(Base, []) ->
+ Base;
+uri(Base, Params) ->
+ lists:concat([Base, "?", oauth_uri:params_to_string(Params)]).
+
+header(Params) ->
+ {"Authorization", "OAuth " ++ oauth_uri:params_to_header_string(Params)}.
+
+token(Params) ->
+ proplists:get_value("oauth_token", Params).
+
+token_secret(Params) ->
+ proplists:get_value("oauth_token_secret", Params).
+
+verify(Signature, HttpMethod, URL, Params, Consumer, TokenSecret) ->
+ case signature_method(Consumer) of
+ plaintext ->
+ oauth_plaintext:verify(Signature, consumer_secret(Consumer), TokenSecret);
+ hmac_sha1 ->
+ BaseString = signature_base_string(HttpMethod, URL, Params),
+ oauth_hmac_sha1:verify(Signature, BaseString, consumer_secret(Consumer), TokenSecret);
+ rsa_sha1 ->
+ BaseString = signature_base_string(HttpMethod, URL, Params),
+ oauth_rsa_sha1:verify(Signature, BaseString, consumer_secret(Consumer))
+ end.
+
+signed_params(HttpMethod, URL, ExtraParams, Consumer, Token, TokenSecret) ->
+ Params = token_param(Token, params(Consumer, ExtraParams)),
+ [{"oauth_signature", signature(HttpMethod, URL, Params, Consumer, TokenSecret)}|Params].
+
+signature(HttpMethod, URL, Params, Consumer, TokenSecret) ->
+ case signature_method(Consumer) of
+ plaintext ->
+ oauth_plaintext:signature(consumer_secret(Consumer), TokenSecret);
+ hmac_sha1 ->
+ BaseString = signature_base_string(HttpMethod, URL, Params),
+ oauth_hmac_sha1:signature(BaseString, consumer_secret(Consumer), TokenSecret);
+ rsa_sha1 ->
+ BaseString = signature_base_string(HttpMethod, URL, Params),
+ oauth_rsa_sha1:signature(BaseString, consumer_secret(Consumer))
+ end.
+
+signature_base_string(HttpMethod, URL, Params) ->
+ NormalizedURL = oauth_uri:normalize(URL),
+ NormalizedParams = oauth_uri:params_to_string(lists:sort(Params)),
+ oauth_uri:calate("&", [HttpMethod, NormalizedURL, NormalizedParams]).
+
+token_param("", Params) ->
+ Params;
+token_param(Token, Params) ->
+ [{"oauth_token", Token}|Params].
+
+params(Consumer, Params) ->
+ Nonce = base64:encode_to_string(crypto:rand_bytes(32)), % cf. ruby-oauth
+ params(Consumer, oauth_unix:timestamp(), Nonce, Params).
+
+params(Consumer, Timestamp, Nonce, Params) ->
+ [ {"oauth_version", "1.0"}
+ , {"oauth_nonce", Nonce}
+ , {"oauth_timestamp", integer_to_list(Timestamp)}
+ , {"oauth_signature_method", signature_method_string(Consumer)}
+ , {"oauth_consumer_key", consumer_key(Consumer)}
+ | Params
+ ].
+
+signature_method_string(Consumer) ->
+ case signature_method(Consumer) of
+ plaintext ->
+ "PLAINTEXT";
+ hmac_sha1 ->
+ "HMAC-SHA1";
+ rsa_sha1 ->
+ "RSA-SHA1"
+ end.
+
+signature_method(_Consumer={_, _, Method}) ->
+ Method.
+
+consumer_secret(_Consumer={_, Secret, _}) ->
+ Secret.
+
+consumer_key(_Consumer={Key, _, _}) ->
+ Key.
diff --git a/deps/oauth/src/oauth_client.erl b/deps/oauth/src/oauth_client.erl
new file mode 100644
index 00000000..2bd24c03
--- /dev/null
+++ b/deps/oauth/src/oauth_client.erl
@@ -0,0 +1,149 @@
+-module(oauth_client).
+
+-behaviour(gen_server).
+
+-export([access_token_params/1, deauthorize/1, get/2, get/3, get/4, get_access_token/2,
+ get_access_token/3, get_access_token/4, get_request_token/2, get_request_token/3,
+ get_request_token/4, start/1, start/2, start_link/1, start_link/2, stop/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, code_change/3, terminate/2]).
+
+%%============================================================================
+%% API functions
+%%============================================================================
+
+start(Consumer) ->
+ gen_server:start(?MODULE, Consumer, []).
+
+start(ServerName, Consumer) ->
+ gen_server:start(ServerName, ?MODULE, Consumer, []).
+
+start_link(Consumer) ->
+ gen_server:start_link(?MODULE, Consumer, []).
+
+start_link(ServerName, Consumer) ->
+ gen_server:start_link(ServerName, ?MODULE, Consumer, []).
+
+get_request_token(Client, URL) ->
+ get_request_token(Client, URL, [], header).
+
+get_request_token(Client, URL, Params) ->
+ gen_server:call(Client, {get_request_token, URL, Params, header}).
+
+get_request_token(Client, URL, Params, ParamsMethod) ->
+ gen_server:call(Client, {get_request_token, URL, Params, ParamsMethod}).
+
+get_access_token(Client, URL) ->
+ get_access_token(Client, URL, [], header).
+
+get_access_token(Client, URL, Params) ->
+ gen_server:call(Client, {get_access_token, URL, Params, header}).
+
+get_access_token(Client, URL, Params, ParamsMethod) ->
+ gen_server:call(Client, {get_access_token, URL, Params, ParamsMethod}).
+
+get(Client, URL) ->
+ get(Client, URL, [], header).
+
+get(Client, URL, Params) ->
+ gen_server:call(Client, {get, URL, Params, header}).
+
+get(Client, URL, Params, ParamsMethod) ->
+ gen_server:call(Client, {get, URL, Params, ParamsMethod}).
+
+access_token_params(Client) ->
+ gen_server:call(Client, {access_token_params}).
+
+deauthorize(Client) ->
+ gen_server:cast(Client, deauthorize).
+
+stop(Client) ->
+ gen_server:cast(Client, stop).
+
+%%============================================================================
+%% Helper functions
+%%============================================================================
+
+oauth_get(header, URL, Params, Consumer, Token, TokenSecret) ->
+ Signed = oauth:signed_params("GET", URL, Params, Consumer, Token, TokenSecret),
+ {AuthorizationParams, QueryParams} = lists:partition(fun({K, _}) -> lists:prefix("oauth_", K) end, Signed),
+ Request = {oauth:uri(URL, QueryParams), [oauth:header(AuthorizationParams)]},
+ httpc:request(get, Request, [{autoredirect, false}], []);
+oauth_get(querystring, URL, Params, Consumer, Token, TokenSecret) ->
+ oauth:get(URL, Params, Consumer, Token, TokenSecret).
+
+%%============================================================================
+%% gen_server callbacks
+%%============================================================================
+
+init(Consumer) ->
+ {ok, {Consumer}}.
+
+handle_call({get_request_token, URL, Params, ParamsMethod}, _From, State={Consumer}) ->
+ case oauth_get(ParamsMethod, URL, Params, Consumer, "", "") of
+ {ok, Response} ->
+ case oauth_http:response_code(Response) of
+ 200 ->
+ RParams = oauth_http:response_params(Response),
+ {reply, {ok, oauth:token(RParams)}, {Consumer, RParams}};
+ _ ->
+ {reply, Response, State}
+ end;
+ Error ->
+ {reply, Error, State}
+ end;
+handle_call({get_access_token, URL, Params, ParamsMethod}, _From, State={Consumer, RParams}) ->
+ case oauth_get(ParamsMethod, URL, Params, Consumer, oauth:token(RParams), oauth:token_secret(RParams)) of
+ {ok, Response} ->
+ case oauth_http:response_code(Response) of
+ 200 ->
+ AParams = oauth_http:response_params(Response),
+ {reply, ok, {Consumer, RParams, AParams}};
+ _ ->
+ {reply, Response, State}
+ end;
+ Error ->
+ {reply, Error, State}
+ end;
+handle_call({get, URL, Params, ParamsMethod}, _From, State={Consumer, _RParams, AParams}) ->
+ case oauth_get(ParamsMethod, URL, Params, Consumer, oauth:token(AParams), oauth:token_secret(AParams)) of
+ {ok, Response={{_, Status, _}, Headers, Body}} ->
+ case Status of
+ 200 ->
+ case proplists:get_value("content-type", Headers) of
+ undefined ->
+ {reply, {ok, Headers, Body}, State};
+ ContentType ->
+ MediaType = hd(string:tokens(ContentType, ";")),
+ case lists:suffix("/xml", MediaType) orelse lists:suffix("+xml", MediaType) of
+ true ->
+ {XML, []} = xmerl_scan:string(Body),
+ {reply, {ok, Headers, XML}, State};
+ false ->
+ {reply, {ok, Headers, Body}, State}
+ end
+ end;
+ _ ->
+ {reply, Response, State}
+ end;
+ Error ->
+ {reply, Error, State}
+ end;
+handle_call({access_token_params}, _From, State={_Consumer, _RParams, AParams}) ->
+ {reply, AParams, State}.
+
+handle_cast(deauthorize, {Consumer, _RParams}) ->
+ {noreply, {Consumer}};
+handle_cast(deauthorize, {Consumer, _RParams, _AParams}) ->
+ {noreply, {Consumer}};
+handle_cast(stop, State) ->
+ {stop, normal, State}.
+
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(normal, _State) ->
+ ok.
diff --git a/deps/oauth/src/oauth_hmac_sha1.erl b/deps/oauth/src/oauth_hmac_sha1.erl
new file mode 100644
index 00000000..79d59f37
--- /dev/null
+++ b/deps/oauth/src/oauth_hmac_sha1.erl
@@ -0,0 +1,11 @@
+-module(oauth_hmac_sha1).
+
+-export([signature/3, verify/4]).
+
+
+signature(BaseString, CS, TS) ->
+ Key = oauth_uri:calate("&", [CS, TS]),
+ base64:encode_to_string(crypto:sha_mac(Key, BaseString)).
+
+verify(Signature, BaseString, CS, TS) ->
+ couch_util:verify(signature(BaseString, CS, TS), Signature).
diff --git a/deps/oauth/src/oauth_http.erl b/deps/oauth/src/oauth_http.erl
new file mode 100644
index 00000000..92c806cc
--- /dev/null
+++ b/deps/oauth/src/oauth_http.erl
@@ -0,0 +1,22 @@
+-module(oauth_http).
+
+-export([get/1, post/2, response_params/1, response_body/1, response_code/1]).
+
+
+get(URL) ->
+ request(get, {URL, []}).
+
+post(URL, Data) ->
+ request(post, {URL, [], "application/x-www-form-urlencoded", Data}).
+
+request(Method, Request) ->
+ httpc:request(Method, Request, [{autoredirect, false}], []).
+
+response_params(Response) ->
+ oauth_uri:params_from_string(response_body(Response)).
+
+response_body({{_, _, _}, _, Body}) ->
+ Body.
+
+response_code({{_, Code, _}, _, _}) ->
+ Code.
diff --git a/deps/oauth/src/oauth_plaintext.erl b/deps/oauth/src/oauth_plaintext.erl
new file mode 100644
index 00000000..41a1e9b2
--- /dev/null
+++ b/deps/oauth/src/oauth_plaintext.erl
@@ -0,0 +1,10 @@
+-module(oauth_plaintext).
+
+-export([signature/2, verify/3]).
+
+
+signature(CS, TS) ->
+ oauth_uri:calate("&", [CS, TS]).
+
+verify(Signature, CS, TS) ->
+ couch_util:verify(signature(CS, TS), Signature).
diff --git a/deps/oauth/src/oauth_rsa_sha1.erl b/deps/oauth/src/oauth_rsa_sha1.erl
new file mode 100644
index 00000000..6f4828e0
--- /dev/null
+++ b/deps/oauth/src/oauth_rsa_sha1.erl
@@ -0,0 +1,30 @@
+-module(oauth_rsa_sha1).
+
+-export([signature/2, verify/3]).
+
+-include_lib("public_key/include/public_key.hrl").
+
+
+signature(BaseString, PrivateKeyPath) ->
+ {ok, [Info]} = public_key:pem_to_der(PrivateKeyPath),
+ {ok, PrivateKey} = public_key:decode_private_key(Info),
+ base64:encode_to_string(public_key:sign(list_to_binary(BaseString), PrivateKey)).
+
+verify(Signature, BaseString, PublicKey) ->
+ public_key:verify_signature(to_binary(BaseString), sha, base64:decode(Signature), public_key(PublicKey)).
+
+to_binary(Term) when is_list(Term) ->
+ list_to_binary(Term);
+to_binary(Term) when is_binary(Term) ->
+ Term.
+
+public_key(Path) when is_list(Path) ->
+ {ok, [{cert, DerCert, not_encrypted}]} = public_key:pem_to_der(Path),
+ {ok, Cert} = public_key:pkix_decode_cert(DerCert, otp),
+ public_key(Cert);
+public_key(#'OTPCertificate'{tbsCertificate=Cert}) ->
+ public_key(Cert);
+public_key(#'OTPTBSCertificate'{subjectPublicKeyInfo=Info}) ->
+ public_key(Info);
+public_key(#'OTPSubjectPublicKeyInfo'{subjectPublicKey=Key}) ->
+ Key.
diff --git a/deps/oauth/src/oauth_unix.erl b/deps/oauth/src/oauth_unix.erl
new file mode 100644
index 00000000..73ca3143
--- /dev/null
+++ b/deps/oauth/src/oauth_unix.erl
@@ -0,0 +1,16 @@
+-module(oauth_unix).
+
+-export([timestamp/0]).
+
+
+timestamp() ->
+ timestamp(calendar:universal_time()).
+
+timestamp(DateTime) ->
+ seconds(DateTime) - epoch().
+
+epoch() ->
+ seconds({{1970,1,1},{00,00,00}}).
+
+seconds(DateTime) ->
+ calendar:datetime_to_gregorian_seconds(DateTime).
diff --git a/deps/oauth/src/oauth_uri.erl b/deps/oauth/src/oauth_uri.erl
new file mode 100644
index 00000000..5023f983
--- /dev/null
+++ b/deps/oauth/src/oauth_uri.erl
@@ -0,0 +1,98 @@
+-module(oauth_uri).
+
+-export([normalize/1, calate/2, encode/1]).
+-export([params_from_string/1, params_to_string/1,
+ params_from_header_string/1, params_to_header_string/1]).
+
+-import(lists, [concat/1]).
+
+
+normalize(URI) ->
+ case http_uri:parse(URI) of
+ {Scheme, UserInfo, Host, Port, Path, _Query} ->
+ normalize(Scheme, UserInfo, string:to_lower(Host), Port, [Path]);
+ Else ->
+ Else
+ end.
+
+normalize(http, UserInfo, Host, 80, Acc) ->
+ normalize(http, UserInfo, [Host|Acc]);
+normalize(https, UserInfo, Host, 443, Acc) ->
+ normalize(https, UserInfo, [Host|Acc]);
+normalize(Scheme, UserInfo, Host, Port, Acc) ->
+ normalize(Scheme, UserInfo, [Host, ":", Port|Acc]).
+
+normalize(Scheme, [], Acc) ->
+ concat([Scheme, "://"|Acc]);
+normalize(Scheme, UserInfo, Acc) ->
+ concat([Scheme, "://", UserInfo, "@"|Acc]).
+
+params_to_header_string(Params) ->
+ intercalate(", ", [concat([encode(K), "=\"", encode(V), "\""]) || {K, V} <- Params]).
+
+params_from_header_string(String) ->
+ [param_from_header_string(Param) || Param <- re:split(String, ",\\s*", [{return, list}]), Param =/= ""].
+
+param_from_header_string(Param) ->
+ [Key, QuotedValue] = string:tokens(Param, "="),
+ Value = string:substr(QuotedValue, 2, length(QuotedValue) - 2),
+ {decode(Key), decode(Value)}.
+
+params_from_string(Params) ->
+ [param_from_string(Param) || Param <- string:tokens(Params, "&")].
+
+param_from_string(Param) ->
+ list_to_tuple([decode(Value) || Value <- string:tokens(Param, "=")]).
+
+params_to_string(Params) ->
+ intercalate("&", [calate("=", [K, V]) || {K, V} <- Params]).
+
+calate(Sep, Xs) ->
+ intercalate(Sep, [encode(X) || X <- Xs]).
+
+intercalate(Sep, Xs) ->
+ concat(intersperse(Sep, Xs)).
+
+intersperse(_, []) -> [];
+intersperse(_, [X]) -> [X];
+intersperse(Sep, [X|Xs]) ->
+ [X, Sep|intersperse(Sep, Xs)].
+
+-define(is_alphanum(C), C >= $A, C =< $Z; C >= $a, C =< $z; C >= $0, C =< $9).
+
+encode(Term) when is_integer(Term) ->
+ integer_to_list(Term);
+encode(Term) when is_atom(Term) ->
+ encode(atom_to_list(Term));
+encode(Term) when is_list(Term) ->
+ encode(lists:reverse(Term, []), []).
+
+encode([X | T], Acc) when ?is_alphanum(X); X =:= $-; X =:= $_; X =:= $.; X =:= $~ ->
+ encode(T, [X | Acc]);
+encode([X | T], Acc) ->
+ NewAcc = [$%, dec2hex(X bsr 4), dec2hex(X band 16#0f) | Acc],
+ encode(T, NewAcc);
+encode([], Acc) ->
+ Acc.
+
+decode(Str) when is_list(Str) ->
+ decode(Str, []).
+
+decode([$%, A, B | T], Acc) ->
+ decode(T, [(hex2dec(A) bsl 4) + hex2dec(B) | Acc]);
+decode([X | T], Acc) ->
+ decode(T, [X | Acc]);
+decode([], Acc) ->
+ lists:reverse(Acc, []).
+
+-compile({inline, [{dec2hex, 1}, {hex2dec, 1}]}).
+
+dec2hex(N) when N >= 10 andalso N =< 15 ->
+ N + $A - 10;
+dec2hex(N) when N >= 0 andalso N =< 9 ->
+ N + $0.
+
+hex2dec(C) when C >= $A andalso C =< $F ->
+ C - $A + 10;
+hex2dec(C) when C >= $0 andalso C =< $9 ->
+ C - $0.
diff --git a/deps/rexi/README.md b/deps/rexi/README.md
new file mode 100644
index 00000000..b2eeaea2
--- /dev/null
+++ b/deps/rexi/README.md
@@ -0,0 +1,23 @@
+Rexi is a tailor-made RPC server application for sending [CouchDB][1] operations to nodes in a cluster. It is used in [BigCouch][2] as the remote procedure vehicle to get [fabric][6] functions to execute on remote cluster nodes.
+
+Rexi better fits the needs of the BigCouch distributed data store by dropping some unneeded overhead in rex, the RPC server that ships with Erlang/OTP. Rexi is optimized for the case when you need to spawn a bunch of remote processes. Cast messages are sent from the origin to the remote rexi server, and local processes are spawned from there, which is vastly more efficient than spawning remote processes from the origin. You still get monitoring of the remote processes, but the request-handling process doesn't get stuck trying to connect to an overloaded/dead node. 'rexi_DOWN' messages will arrive at the client eventually. This has been an extremely advantageous mix of latency and failure detection, vastly improving the performance of BigCouch.
+
+Rexi is used in conjunction with 'Fabric' which is also an application within BigCouch, but can be used on a stand-alone basis.
+
+### Getting Started
+Rexi requires R13B03 or higher and can be built with [rebar][7], which comes bundled in the repository.
+
+### License
+[Apache 2.0][3]
+
+### Contact
+ * [http://cloudant.com][4]
+ * [info@cloudant.com][5]
+
+[1]: http://couchdb.apache.org
+[2]: http://github.com/cloudant/BigCouch
+[3]: http://www.apache.org/licenses/LICENSE-2.0.html
+[4]: http://cloudant.com
+[5]: mailto:info@cloudant.com
+[6]: http://github.com/cloudant/fabric
+[7]: http://github.com/basho/rebar
diff --git a/deps/rexi/include/rexi.hrl b/deps/rexi/include/rexi.hrl
new file mode 100644
index 00000000..b51c5af7
--- /dev/null
+++ b/deps/rexi/include/rexi.hrl
@@ -0,0 +1,22 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(error, {
+ timestamp,
+ reason,
+ mfa,
+ nonce,
+ stack
+}).
+
diff --git a/deps/rexi/rebar b/deps/rexi/rebar
new file mode 100755
index 00000000..30c43ba5
--- /dev/null
+++ b/deps/rexi/rebar
Binary files differ
diff --git a/deps/rexi/rebar.config b/deps/rexi/rebar.config
new file mode 100644
index 00000000..4af6b852
--- /dev/null
+++ b/deps/rexi/rebar.config
@@ -0,0 +1,17 @@
+% Copyright 2011 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+{deps, [
+ {twig, ".*", {git, "https://github.com/cloudant/twig.git", {tag, "0.2.1"}}}
+]}.
diff --git a/deps/rexi/src/rexi.app.src b/deps/rexi/src/rexi.app.src
new file mode 100644
index 00000000..75baa77f
--- /dev/null
+++ b/deps/rexi/src/rexi.app.src
@@ -0,0 +1,7 @@
+{application, rexi, [
+ {description, "Lightweight RPC server"},
+ {vsn, git},
+ {registered, [rexi_sup, rexi_server]},
+ {applications, [kernel, stdlib]},
+ {mod, {rexi_app,[]}}
+]}.
diff --git a/deps/rexi/src/rexi.erl b/deps/rexi/src/rexi.erl
new file mode 100644
index 00000000..a98b5610
--- /dev/null
+++ b/deps/rexi/src/rexi.erl
@@ -0,0 +1,125 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(rexi).
+-export([start/0, stop/0, restart/0]).
+-export([cast/2, cast/3, kill/2]).
+-export([reply/1, sync_reply/1, sync_reply/2]).
+-export([async_server_call/2, async_server_call/3]).
+-export([get_errors/0, get_last_error/0, set_error_limit/1]).
+
+-include("rexi.hrl").
+
+-define(SERVER, rexi_server).
+
+start() ->
+ application:start(rexi).
+
+stop() ->
+ application:stop(rexi).
+
+restart() ->
+ stop(), start().
+
+-spec get_errors() -> {ok, [#error{}]}.
+get_errors() ->
+ gen_server:call(?SERVER, get_errors).
+
+-spec get_last_error() -> {ok, #error{}} | {error, empty}.
+get_last_error() ->
+ gen_server:call(?SERVER, get_last_error).
+
+-spec set_error_limit(pos_integer()) -> ok.
+set_error_limit(N) when is_integer(N), N > 0 ->
+ gen_server:call(?SERVER, {set_error_limit, N}).
+
+%% @equiv cast(Node, self(), MFA)
+-spec cast(node(), {atom(), atom(), list()}) -> reference().
+cast(Node, MFA) ->
+ cast(Node, self(), MFA).
+
+%% @doc Executes apply(M, F, A) on Node.
+%% You might want to use this instead of rpc:cast/4 for two reasons. First,
+%% the Caller pid and the returned reference are inserted into the remote
+%% process' dictionary as `rexi_from', so it has a way to communicate with you.
+%% Second, the remote process is monitored. If it exits with a Reason other
+%% than normal, Caller will receive a message of the form
+%% `{Ref, {rexi_EXIT, Reason}}' where Ref is the returned reference.
+-spec cast(node(), pid(), {atom(), atom(), list()}) -> reference().
+cast(Node, Caller, MFA) ->
+ Ref = make_ref(),
+ do_send({?SERVER, Node}, cast_msg({doit, {Caller, Ref}, get(nonce), MFA})),
+ Ref.
+
+%% @doc Sends an async kill signal to the remote process associated with Ref.
+%% No rexi_EXIT message will be sent.
+-spec kill(node(), reference()) -> ok.
+kill(Node, Ref) ->
+ do_send({?SERVER, Node}, cast_msg({kill, Ref})),
+ ok.
+
+%% @equiv async_server_call(Server, self(), Request)
+-spec async_server_call(pid() | {atom(),node()}, any()) -> reference().
+async_server_call(Server, Request) ->
+ async_server_call(Server, self(), Request).
+
+%% @doc Sends a properly formatted gen_server:call Request to the Server and
+%% returns the reference which the Server will include in its reply. The
+%% function acts more like cast() than call() in that the server process
+%% is not monitored. Clients who want to know if the server is alive should
+%% monitor it themselves before calling this function.
+-spec async_server_call(pid() | {atom(),node()}, pid(), any()) -> reference().
+async_server_call(Server, Caller, Request) ->
+ Ref = make_ref(),
+ do_send(Server, {'$gen_call', {Caller,Ref}, Request}),
+ Ref.
+
+%% @doc convenience function to reply to the original rexi Caller.
+-spec reply(any()) -> any().
+reply(Reply) ->
+ {Caller, Ref} = get(rexi_from),
+ erlang:send(Caller, {Ref,Reply}).
+
+%% @equiv sync_reply(Reply, 300000)
+sync_reply(Reply) ->
+ sync_reply(Reply, 300000).
+
+%% @doc convenience function to reply to caller and wait for response. Message
+%% is of the form {OriginalRef, {self(),reference()}, Reply}, which enables the
+%% original caller to respond back.
+-spec sync_reply(any(), pos_integer() | infinity) -> any().
+sync_reply(Reply, Timeout) ->
+ {Caller, Ref} = get(rexi_from),
+ Tag = make_ref(),
+ erlang:send(Caller, {Ref, {self(),Tag}, Reply}),
+ receive {Tag, Response} ->
+ Response
+ after Timeout ->
+ timeout
+ end.
+
+%% internal functions %%
+
+cast_msg(Msg) -> {'$gen_cast', Msg}.
+
+% send a message as quickly as possible
+do_send(Dest, Msg) ->
+ case erlang:send(Dest, Msg, [noconnect, nosuspend]) of
+ noconnect ->
+ spawn(erlang, send, [Dest, Msg]);
+ nosuspend ->
+ spawn(erlang, send, [Dest, Msg]);
+ ok ->
+ ok
+ end.
diff --git a/deps/rexi/src/rexi_app.erl b/deps/rexi/src/rexi_app.erl
new file mode 100644
index 00000000..2dd99c23
--- /dev/null
+++ b/deps/rexi/src/rexi_app.erl
@@ -0,0 +1,25 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(rexi_app).
+-behaviour(application).
+-export([start/2, stop/1]).
+
+-include_lib("eunit/include/eunit.hrl").
+
+start(_Type, StartArgs) ->
+ rexi_sup:start_link(StartArgs).
+
+stop(_State) ->
+ ok.
diff --git a/deps/rexi/src/rexi_monitor.erl b/deps/rexi/src/rexi_monitor.erl
new file mode 100644
index 00000000..ab33fb87
--- /dev/null
+++ b/deps/rexi/src/rexi_monitor.erl
@@ -0,0 +1,66 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(rexi_monitor).
+-export([start/1, stop/1]).
+
+-include_lib("eunit/include/eunit.hrl").
+
+%% @doc spawn_links a process which monitors the supplied list of items and
+%% returns the process ID. If a monitored process exits, the caller will
+%% receive a {rexi_DOWN, MonitoringPid, DeadPid, Reason} message.
+-spec start([pid() | atom() | {atom(),node()}]) -> pid().
+start(Procs) ->
+ Parent = self(),
+ Nodes = [node() | nodes()],
+ {Mon, Skip} = lists:partition(fun(P) -> should_monitor(P, Nodes) end,
+ Procs),
+ spawn_link(fun() ->
+ [notify_parent(Parent, P, noconnect) || P <- Skip],
+ [erlang:monitor(process, P) || P <- Mon],
+ wait_monitors(Parent)
+ end).
+
+%% @doc Cleanly shut down the monitoring process and flush all rexi_DOWN
+%% messages from our mailbox.
+-spec stop(pid()) -> ok.
+stop(MonitoringPid) ->
+ MonitoringPid ! {self(), shutdown},
+ flush_down_messages().
+
+%% internal functions %%
+
+notify_parent(Parent, Pid, Reason) ->
+ erlang:send(Parent, {rexi_DOWN, self(), Pid, Reason}).
+
+should_monitor(Pid, Nodes) when is_pid(Pid) ->
+ lists:member(node(Pid), Nodes);
+should_monitor({_, Node}, Nodes) ->
+ lists:member(Node, Nodes).
+
+wait_monitors(Parent) ->
+ receive
+ {'DOWN', _, process, Pid, Reason} ->
+ notify_parent(Parent, Pid, Reason),
+ wait_monitors(Parent);
+ {Parent, shutdown} ->
+ ok
+ end.
+
+flush_down_messages() ->
+ receive {rexi_DOWN, _, _, _} ->
+ flush_down_messages()
+ after 0 ->
+ ok
+ end.
diff --git a/deps/rexi/src/rexi_server.erl b/deps/rexi/src/rexi_server.erl
new file mode 100644
index 00000000..aa417aa4
--- /dev/null
+++ b/deps/rexi/src/rexi_server.erl
@@ -0,0 +1,190 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(rexi_server).
+-behaviour(gen_server).
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([start_link/0, init_p/2, init_p/3]).
+
+-include("rexi.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-record(job, {
+ client::reference(),
+ worker::reference(),
+ client_pid::pid(),
+ worker_pid::pid()
+}).
+
+-record(st, {
+ workers = ets:new(workers, [private, {keypos, #job.worker}]),
+ clients = ets:new(clients, [private, {keypos, #job.client}]),
+ errors = queue:new(),
+ error_limit = 20,
+ error_count = 0
+}).
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+init([]) ->
+ {ok, #st{}}.
+
+handle_call(get_errors, _From, #st{errors = Errors} = St) ->
+ {reply, {ok, lists:reverse(queue:to_list(Errors))}, St};
+
+handle_call(get_last_error, _From, #st{errors = Errors} = St) ->
+ try
+ {reply, {ok, queue:get_r(Errors)}, St}
+ catch error:empty ->
+ {reply, {error, empty}, St}
+ end;
+
+handle_call({set_error_limit, N}, _From, #st{error_count=Len, errors=Q} = St) ->
+ if N < Len ->
+ {NewQ, _} = queue:split(N, Q);
+ true ->
+ NewQ = Q
+ end,
+ NewLen = queue:len(NewQ),
+ {reply, ok, St#st{error_limit=N, error_count=NewLen, errors=NewQ}};
+
+handle_call(_Request, _From, St) ->
+ {reply, ignored, St}.
+
+
+handle_cast({doit, From, MFA}, St) ->
+ handle_cast({doit, From, undefined, MFA}, St);
+
+handle_cast({doit, {ClientPid, ClientRef} = From, Nonce, MFA}, State) ->
+ {LocalPid, Ref} = spawn_monitor(?MODULE, init_p, [From, MFA, Nonce]),
+ Job = #job{
+ client = ClientRef,
+ worker = Ref,
+ client_pid = ClientPid,
+ worker_pid = LocalPid
+ },
+ {noreply, add_job(Job, State)};
+
+
+handle_cast({kill, FromRef}, #st{clients = Clients} = St) ->
+ case find_worker(FromRef, Clients) of
+ #job{worker = KeyRef, worker_pid = Pid} = Job ->
+ erlang:demonitor(KeyRef),
+ exit(Pid, kill),
+ {noreply, remove_job(Job, St)};
+ false ->
+ {noreply, St}
+ end;
+
+handle_cast(_, St) ->
+ twig:log(notice, "rexi_server ignored_cast"),
+ {noreply, St}.
+
+handle_info({'DOWN', Ref, process, _, normal}, #st{workers=Workers} = St) ->
+ case find_worker(Ref, Workers) of
+ #job{} = Job ->
+ {noreply, remove_job(Job, St)};
+ false ->
+ {noreply, St}
+ end;
+
+handle_info({'DOWN', Ref, process, Pid, Error}, #st{workers=Workers} = St) ->
+ case find_worker(Ref, Workers) of
+ #job{worker_pid=Pid, worker=Ref, client_pid=CPid, client=CRef} =Job ->
+ case Error of #error{reason = {_Class, Reason}, stack = Stack} ->
+ notify_caller({CPid, CRef}, {Reason, Stack}),
+ St1 = save_error(Error, St),
+ {noreply, remove_job(Job, St1)};
+ _ ->
+ notify_caller({CPid, CRef}, Error),
+ {noreply, remove_job(Job, St)}
+ end;
+ false ->
+ {noreply, St}
+ end;
+
+handle_info(_Info, St) ->
+ {noreply, St}.
+
+terminate(_Reason, St) ->
+ ets:foldl(fun(#job{worker_pid=Pid},_) -> exit(Pid,kill) end, nil,
+ St#st.workers),
+ ok.
+
+code_change(_OldVsn, {st, Workers}, _Extra) ->
+ {ok, #st{workers = Workers}};
+
+code_change(_OldVsn, {st, Workers0, Errors, Limit, Count}, _Extra) ->
+ Jobs = [#job{worker_pid=A, worker=B, client_pid=C, client=D}
+ || {A, B, {C, D}} <- ets:tab2list(Workers0)],
+ ets:delete(Workers0),
+ State = #st{errors = Errors, error_limit = Limit, error_count = Count},
+ ets:insert(State#st.workers, Jobs),
+ ets:insert(State#st.clients, Jobs),
+ {ok, State};
+
+code_change(_OldVsn, St, _Extra) ->
+ {ok, St}.
+
+init_p(From, MFA) ->
+ init_p(From, MFA, undefined).
+
+%% @doc initializes a process started by rexi_server.
+-spec init_p({pid(), reference()}, {atom(), atom(), list()},
+ string() | undefined) -> any().
+init_p(From, {M,F,A}, Nonce) ->
+ put(rexi_from, From),
+ put(initial_call, {M,F,length(A)}),
+ put(nonce, Nonce),
+ try apply(M, F, A) catch exit:normal -> ok; Class:Reason ->
+ Stack = clean_stack(),
+ twig:log(error, "rexi_server ~p:~p ~100p", [Class, Reason, Stack]),
+ exit(#error{
+ timestamp = now(),
+ reason = {Class, Reason},
+ mfa = {M,F,A},
+ nonce = Nonce,
+ stack = Stack
+ })
+ end.
+
+%% internal
+
+save_error(E, #st{errors=Q, error_limit=L, error_count=C} = St) when C >= L ->
+ St#st{errors = queue:in(E, queue:drop(Q))};
+save_error(E, #st{errors=Q, error_count=C} = St) ->
+ St#st{errors = queue:in(E, Q), error_count = C+1}.
+
+clean_stack() ->
+ lists:map(fun({M,F,A}) when is_list(A) -> {M,F,length(A)}; (X) -> X end,
+ erlang:get_stacktrace()).
+
+add_job(Job, #st{workers = Workers, clients = Clients} = State) ->
+ ets:insert(Workers, Job),
+ ets:insert(Clients, Job),
+ State.
+
+remove_job(Job, #st{workers = Workers, clients = Clients} = State) ->
+ ets:delete_object(Workers, Job),
+ ets:delete_object(Clients, Job),
+ State.
+
+find_worker(Ref, Tab) ->
+ case ets:lookup(Tab, Ref) of [] -> false; [Worker] -> Worker end.
+
+notify_caller({Caller, Ref}, Reason) ->
+ Caller ! {Ref, {rexi_EXIT, Reason}}.
diff --git a/deps/rexi/src/rexi_sup.erl b/deps/rexi/src/rexi_sup.erl
new file mode 100644
index 00000000..828ee54d
--- /dev/null
+++ b/deps/rexi/src/rexi_sup.erl
@@ -0,0 +1,29 @@
+% Copyright 2010 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(rexi_sup).
+-behaviour(supervisor).
+-export([init/1]).
+
+-export([start_link/1]).
+
+-include_lib("eunit/include/eunit.hrl").
+
+start_link(Args) ->
+ supervisor:start_link({local,?MODULE}, ?MODULE, Args).
+
+init([]) ->
+ Mod = rexi_server,
+ Spec = {Mod, {Mod,start_link,[]}, permanent, 100, worker, [Mod]},
+ {ok, {{one_for_one, 3, 10}, [Spec]}}.
diff --git a/deps/rexi/src/rexi_utils.erl b/deps/rexi/src/rexi_utils.erl
new file mode 100644
index 00000000..7791866d
--- /dev/null
+++ b/deps/rexi/src/rexi_utils.erl
@@ -0,0 +1,52 @@
+-module(rexi_utils).
+
+-export([recv/6]).
+
+%% @doc set up the receive loop with an overall timeout
+-spec recv([any()], integer(), function(), any(), timeout(), timeout()) ->
+ {ok, any()} | {timeout, any()} | {error, atom()} | {error, atom(), any()}.
+recv(Refs, Keypos, Fun, Acc0, infinity, PerMsgTO) ->
+ process_mailbox(Refs, Keypos, Fun, Acc0, nil, PerMsgTO);
+recv(Refs, Keypos, Fun, Acc0, GlobalTimeout, PerMsgTO) ->
+ TimeoutRef = erlang:make_ref(),
+ TRef = erlang:send_after(GlobalTimeout, self(), {timeout, TimeoutRef}),
+ try
+ process_mailbox(Refs, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO)
+ after
+ erlang:cancel_timer(TRef)
+ end.
+
+process_mailbox(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) ->
+ case process_message(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) of
+ {ok, Acc} ->
+ process_mailbox(RefList, Keypos, Fun, Acc, TimeoutRef, PerMsgTO);
+ {stop, Acc} ->
+ {ok, Acc};
+ Error ->
+ Error
+ end.
+
+process_message(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) ->
+ receive
+ {timeout, TimeoutRef} ->
+ {timeout, Acc0};
+ {Ref, Msg} ->
+ case lists:keyfind(Ref, Keypos, RefList) of
+ false ->
+ % this was some non-matching message which we will ignore
+ {ok, Acc0};
+ Worker ->
+ Fun(Msg, Worker, Acc0)
+ end;
+ {Ref, From, Msg} ->
+ case lists:keyfind(Ref, Keypos, RefList) of
+ false ->
+ {ok, Acc0};
+ Worker ->
+ Fun(Msg, {Worker, From}, Acc0)
+ end;
+ {rexi_DOWN, _, _, _} = Msg ->
+ Fun(Msg, nil, Acc0)
+ after PerMsgTO ->
+ {timeout, Acc0}
+ end.
diff --git a/deps/twig/README.md b/deps/twig/README.md
new file mode 100644
index 00000000..97852efa
--- /dev/null
+++ b/deps/twig/README.md
@@ -0,0 +1,11 @@
+Twig is a SASL-compliant Erlang/OTP logger. It installs a gen_event handler in the error_logger event manager, where it consumes standard OTP reports and messages as well as events generated by twig:log/2,3,4. Log messages are written to a syslog server over UDP using the format specified in RFC 5424.
+
+Twig's behavior is controlled using the application environment:
+
+* host (undefined): the hostname of the syslog server
+* port (514): the port of the syslog server
+* facility (local2): syslog facility to be used
+* level (info): logging threshold. Messages "above" this threshold (in syslog parlance) will be discarded. Acceptable values are debug, info, notice, warn, err, crit, alert, and emerg.
+* appid ("twig"): inserted as the APPID in the syslog message
+* max_term_size (8192): raw data size below which we format normally
+* max_message_size (16000): approx. max size of truncated string
diff --git a/deps/twig/rebar b/deps/twig/rebar
new file mode 100755
index 00000000..1f55c738
--- /dev/null
+++ b/deps/twig/rebar
Binary files differ
diff --git a/deps/twig/src/trunc_io.erl b/deps/twig/src/trunc_io.erl
new file mode 100644
index 00000000..cfa6c972
--- /dev/null
+++ b/deps/twig/src/trunc_io.erl
@@ -0,0 +1,215 @@
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with your Erlang distribution. If not, it can be
+%% retrieved via the world wide web at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Corelatus AB.
+%% Portions created by Corelatus are Copyright 2003, Corelatus
+%% AB. All Rights Reserved.''
+%%
+%% Module to print out terms for logging. Limits by length rather than depth.
+%%
+%% The resulting string may be slightly larger than the limit; the intention
+%% is to provide predictable CPU and memory consumption for formatting
+%% terms, not produce precise string lengths.
+%%
+%% Typical use:
+%%
+%% trunc_io:print(Term, 500).
+%%
+-module(trunc_io).
+-author('matthias@corelatus.se').
+%% And thanks to Chris Newcombe for a bug fix
+-export([print/2, fprint/2, safe/2]). % interface functions
+-export([perf/0, perf/3, perf1/0, test/0, test/2]). % testing functions
+-version("$Id: trunc_io.erl,v 1.11 2009-02-23 12:01:06 matthias Exp $").
+
+
+%% Returns an flattened list containing the ASCII representation of the given
+%% term.
+fprint(T, Max) ->
+ {L, _} = print(T, Max),
+ lists:flatten(L).
+
+%% Same as print, but never crashes.
+%%
+%% This is a tradeoff. Print might conceivably crash if it's asked to
+%% print something it doesn't understand, for example some new data
+%% type in a future version of Erlang. If print crashes, we fall back
+%% to io_lib to format the term, but then the formatting is
+%% depth-limited instead of length limited, so you might run out
+%% memory printing it. Out of the frying pan and into the fire.
+%%
+safe(What, Len) ->
+ case catch print(What, Len) of
+ {L, Used} when is_list(L) -> {L, Used};
+ _ -> {"unable to print" ++ io_lib:write(What, 99)}
+ end.
+
+%% Returns {List, Length}
+print(_, Max) when Max < 0 -> {"...", 3};
+print(Tuple, Max) when is_tuple(Tuple) ->
+ {TC, Len} = tuple_contents(Tuple, Max-2),
+ {[${, TC, $}], Len + 2};
+
+%% We assume atoms, floats, funs, integers, PIDs, ports and refs never need
+%% to be truncated. This isn't strictly true, someone could make an
+%% arbitrarily long bignum. Let's assume that won't happen unless someone
+%% is being malicious.
+%%
+print(Atom, _Max) when is_atom(Atom) ->
+ L = atom_to_list(Atom),
+ {L, length(L)};
+
+print(<<>>, _Max) ->
+ {"<<>>", 4};
+
+print(Binary, Max) when is_binary(Binary) ->
+ B = binary_to_list(Binary, 1, lists:min([Max, size(Binary)])),
+ {L, Len} = alist_start(B, Max-4),
+ {["<<", L, ">>"], Len};
+
+print(Float, _Max) when is_float(Float) ->
+ L = float_to_list(Float),
+ {L, length(L)};
+
+print(Fun, _Max) when is_function(Fun) ->
+ L = erlang:fun_to_list(Fun),
+ {L, length(L)};
+
+print(Integer, _Max) when is_integer(Integer) ->
+ L = integer_to_list(Integer),
+ {L, length(L)};
+
+print(Pid, _Max) when is_pid(Pid) ->
+ L = pid_to_list(Pid),
+ {L, length(L)};
+
+print(Ref, _Max) when is_reference(Ref) ->
+ L = erlang:ref_to_list(Ref),
+ {L, length(L)};
+
+print(Port, _Max) when is_port(Port) ->
+ L = erlang:port_to_list(Port),
+ {L, length(L)};
+
+print(List, Max) when is_list(List) ->
+ alist_start(List, Max).
+
+%% Returns {List, Length}
+tuple_contents(Tuple, Max) ->
+ L = tuple_to_list(Tuple),
+ list_body(L, Max).
+
+%% Format the inside of a list, i.e. do not add a leading [ or trailing ].
+%% Returns {List, Length}
+list_body([], _) -> {[], 0};
+list_body(_, Max) when Max < 4 -> {"...", 3};
+list_body([H|T], Max) ->
+ {List, Len} = print(H, Max),
+ {Final, FLen} = list_bodyc(T, Max - Len),
+ {[List|Final], FLen + Len};
+list_body(X, Max) -> %% improper list
+ {List, Len} = print(X, Max - 1),
+ {[$|,List], Len + 1}.
+
+list_bodyc([], _) -> {[], 0};
+list_bodyc(_, Max) when Max < 4 -> {"...", 3};
+list_bodyc([H|T], Max) ->
+ {List, Len} = print(H, Max),
+ {Final, FLen} = list_bodyc(T, Max - Len - 1),
+ {[$,, List|Final], FLen + Len + 1};
+list_bodyc(X,Max) -> %% improper list
+ {List, Len} = print(X, Max - 1),
+ {[$|,List], Len + 1}.
+
+%% The head of a list we hope is ascii. Examples:
+%%
+%% [65,66,67] -> "ABC"
+%% [65,0,67] -> "A"[0,67]
+%% [0,65,66] -> [0,65,66]
+%% [65,b,66] -> "A"[b,66]
+%%
+alist_start([], _) -> {"[]", 2};
+alist_start(_, Max) when Max < 4 -> {"...", 3};
+alist_start([H|T], Max) when H >= 16#20, H =< 16#7e -> % definitely printable
+ {L, Len} = alist([H|T], Max-1),
+ {[$\"|L], Len + 1};
+alist_start([H|T], Max) when H == 9; H == 10; H == 13 -> % show as space
+ {L, Len} = alist(T, Max-1),
+ {[$ |L], Len + 1};
+alist_start(L, Max) ->
+ {R, Len} = list_body(L, Max-2),
+ {[$[, R, $]], Len + 2}.
+
+alist([], _) -> {"\"", 1};
+alist(_, Max) when Max < 5 -> {"...\"", 4};
+alist([H|T], Max) when H >= 16#20, H =< 16#7e -> % definitely printable
+ {L, Len} = alist(T, Max-1),
+ {[H|L], Len + 1};
+alist([H|T], Max) when H == 9; H == 10; H == 13 -> % show as space
+ {L, Len} = alist(T, Max-1),
+ {[$ |L], Len + 1};
+alist(L, Max) ->
+ {R, Len} = list_body(L, Max-3),
+ {[$\", $[, R, $]], Len + 3}.
+
+
+%%--------------------
+%% The start of a test suite. So far, it only checks for not crashing.
+test() ->
+ test(trunc_io, print).
+
+test(Mod, Func) ->
+ Simple_items = [atom, 1234, 1234.0, {tuple}, [], [list], "string", self(),
+ <<1,2,3>>, make_ref(), fun() -> ok end],
+ F = fun(A) ->
+ Mod:Func(A, 100),
+ Mod:Func(A, 2),
+ Mod:Func(A, 20)
+ end,
+
+ G = fun(A) ->
+ case catch F(A) of
+ {'EXIT', _} -> exit({failed, A});
+ _ -> ok
+ end
+ end,
+
+ lists:foreach(G, Simple_items),
+
+ Tuples = [ {1,2,3,a,b,c}, {"abc", def, 1234},
+ {{{{a},b,c,{d},e}},f}],
+
+ Lists = [ [1,2,3,4,5,6,7], lists:seq(1,1000),
+ [{a}, {a,b}, {a, [b,c]}, "def"], [a|b], [$a|$b] ],
+
+
+ lists:foreach(G, Tuples),
+ lists:foreach(G, Lists).
+
+perf() ->
+ {New, _} = timer:tc(trunc_io, perf, [trunc_io, print, 1000]),
+ {Old, _} = timer:tc(trunc_io, perf, [io_lib, write, 1000]),
+ io:fwrite("New code took ~p us, old code ~p\n", [New, Old]).
+
+perf(M, F, Reps) when Reps > 0 ->
+ test(M,F),
+ perf(M,F,Reps-1);
+perf(_,_,_) ->
+ done.
+
+%% Performance test. Needs a particularly large term I saved as a binary...
+perf1() ->
+ {ok, Bin} = file:read_file("bin"),
+ A = binary_to_term(Bin),
+ {N, _} = timer:tc(trunc_io, print, [A, 1500]),
+ {M, _} = timer:tc(io_lib, write, [A]),
+ {N, M}.
+
diff --git a/deps/twig/src/twig.app.src b/deps/twig/src/twig.app.src
new file mode 100644
index 00000000..7e1da747
--- /dev/null
+++ b/deps/twig/src/twig.app.src
@@ -0,0 +1,8 @@
+{application, twig, [
+ {description, "Logger"},
+ {vsn, git},
+ {registered, []},
+ {applications, [kernel, stdlib]},
+ {mod, {twig_app, []}},
+ {env, []}
+]}.
diff --git a/deps/twig/src/twig.erl b/deps/twig/src/twig.erl
new file mode 100644
index 00000000..6a24e6bc
--- /dev/null
+++ b/deps/twig/src/twig.erl
@@ -0,0 +1,55 @@
+% Copyright 2011 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(twig).
+
+-export([log/2, log/3, log/4, set_level/1]).
+
+-include("twig_int.hrl").
+
+set_level(LevelAtom) ->
+ application:set_env(twig, level, twig_util:level(LevelAtom)).
+
+log(LevelAtom, String) ->
+ log(LevelAtom, String, [], []).
+
+log(LevelAtom, Format, Data) ->
+ log(LevelAtom, Format, Data, []).
+
+log(LevelAtom, Format, Data, _Options) ->
+ %% TODO do something useful with options
+ Level = twig_util:level(LevelAtom),
+ case application:get_env(twig, level) of
+ {ok, Threshold} when Level =< Threshold ->
+ send_message(Level, Format, Data);
+ undefined when Level =< ?LEVEL_INFO ->
+ send_message(Level, Format, Data);
+ _ ->
+ ok
+ end.
+
+%% internal
+
+send_message(Level, Format, Data) ->
+ gen_event:sync_notify(error_logger, format(Level, Format, Data)).
+
+format(Level, Format, Data) ->
+ %% TODO truncate large messages
+ #twig{
+ level = Level,
+ msg = iolist_to_binary(twig_util:format(Format, Data)),
+ msgid = erlang:get(nonce),
+ pid = self()
+ }.
+
diff --git a/deps/twig/src/twig_app.erl b/deps/twig/src/twig_app.erl
new file mode 100644
index 00000000..209391cc
--- /dev/null
+++ b/deps/twig/src/twig_app.erl
@@ -0,0 +1,23 @@
+% Copyright 2011 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(twig_app).
+-behaviour(application).
+-export([start/2, stop/1]).
+
+start(_StartType, _StartArgs) ->
+ twig_sup:start_link().
+
+stop(_State) ->
+ ok.
diff --git a/deps/twig/src/twig_event_handler.erl b/deps/twig/src/twig_event_handler.erl
new file mode 100644
index 00000000..cd61b0d3
--- /dev/null
+++ b/deps/twig/src/twig_event_handler.erl
@@ -0,0 +1,164 @@
+% Copyright 2011 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(twig_event_handler).
+
+-behaviour(gen_event).
+
+-export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-import(twig_util, [get_env/2]).
+
+-record(state, {
+ socket,
+ host,
+ port,
+ hostname,
+ os_pid,
+ appid,
+ facility,
+ level
+}).
+
+-include("twig_int.hrl").
+
+init([]) ->
+ {ok, Socket} = gen_udp:open(0),
+ {ok, ok, State} = handle_call(load_config, #state{socket=Socket}),
+ {ok, State}.
+
+handle_event(#twig{level=Level, msgid=MsgId, msg=Msg, pid=Pid}, State) ->
+ write(Level, MsgId, Msg, Pid, State),
+ {ok, State};
+
+% OTP standard events
+handle_event({Class, _GL, {Pid, Format, Args}}, #state{level=Max} = State) ->
+ case otp_event_level(Class, Format) of
+ undefined ->
+ {ok, State};
+ Level when Level > Max ->
+ {ok, State};
+ Level ->
+ {MsgId, Msg} = message(Format, Args),
+ write(Level, MsgId, Msg, Pid, State),
+ {ok, State}
+ end;
+
+handle_event(_Event, State) ->
+ {ok, State}.
+
+handle_call({set_level, Level}, State) ->
+ {ok, ok, State#state{level = Level}};
+
+handle_call(load_config, State) ->
+ Host = case inet:getaddr(get_env(host, undefined), inet) of
+ {ok, Address} ->
+ Address;
+ {error, _} ->
+ undefined
+ end,
+ NewState = State#state{
+ host = Host,
+ port = get_env(port, 514),
+ hostname = net_adm:localhost(),
+ os_pid = os:getpid(),
+ appid = get_env(appid, "twig"),
+ facility = twig_util:facility(get_env(facility, local2)),
+ level = twig_util:level(get_env(level, info))
+ },
+ {ok, ok, NewState};
+
+handle_call(_Call, State) ->
+ {ok, ignored, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Reason, State) ->
+ gen_udp:close(State#state.socket).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+write(Level, undefined, Msg, Pid, State) ->
+ write(Level, "--------", Msg, Pid, State);
+write(Level, MsgId, Msg, Pid, State) when is_list(Msg); is_binary(Msg) ->
+ #state{facility=Facil, appid=App, hostname=Hostname, host=Host, port=Port,
+ socket=Socket} = State,
+ Pre = io_lib:format("<~B>~B ~s ~s ~s ~p ~s - ", [Facil bor Level,
+ ?SYSLOG_VERSION, twig_util:iso8601_timestamp(), Hostname, App, Pid,
+ MsgId]),
+ send(Socket, Host, Port, [Pre, Msg, $\n]).
+
+send(_, undefined, _, Packet) ->
+ io:put_chars(Packet);
+send(Socket, Host, Port, Packet) ->
+ gen_udp:send(Socket, Host, Port, Packet).
+
+message(crash_report, Report) ->
+ Msg = case erts_debug:flat_size(Report) > get_env(max_term_size, 8192) of
+ true ->
+ MaxString = get_env(max_message_size, 16000),
+ ["*Truncated* - ", trunc_io:print(Report, MaxString)];
+ false ->
+ proc_lib:format(Report)
+ end,
+ {crash_report, Msg};
+message(supervisor_report, Report) ->
+ Name = get_value(supervisor, Report),
+ Error = get_value(errorContext, Report),
+ Reason = get_value(reason, Report),
+ Offender = get_value(offender, Report),
+ ChildPid = get_value(pid, Offender),
+ ChildName = get_value(name, Offender),
+ case get_value(mfa, Offender) of
+ undefined ->
+ {M,F,_} = get_value(mfargs, Offender);
+ {M,F,_} ->
+ ok
+ end,
+ {supervisor_report, twig_util:format("~p ~p (~p) child: ~p [~p] ~p:~p",
+ [Name, Error, Reason, ChildName, ChildPid, M, F])};
+message(Type, Report) when Type == std_error;
+ Type == std_info;
+ Type == std_warning;
+ Type == progress_report;
+ Type == progress ->
+ {Type, twig_util:format("~2048.0p", [Report])};
+message(Format, Args) when is_list(Format) ->
+ {msg, twig_util:format(Format, Args)};
+message(Format, Args) ->
+ {unknown, twig_util:format("~2048.0p ~2048.0p", [Format, Args])}.
+
+otp_event_level(_, crash_report) -> ?LEVEL_CRIT;
+otp_event_level(_, supervisor_report) -> ?LEVEL_WARN;
+otp_event_level(_, supervisor) -> ?LEVEL_WARN;
+otp_event_level(_, progress_report) -> ?LEVEL_DEBUG;
+otp_event_level(_, progress) -> ?LEVEL_DEBUG;
+otp_event_level(error, _) -> ?LEVEL_ERR;
+otp_event_level(warning_msg, _) -> ?LEVEL_WARN;
+otp_event_level(info_msg, _) -> ?LEVEL_NOTICE;
+otp_event_level(error_report, _) -> ?LEVEL_ERR;
+otp_event_level(warning_report, _) -> ?LEVEL_WARN;
+otp_event_level(info_report, _) -> ?LEVEL_NOTICE;
+otp_event_level(_, _) -> ?LEVEL_DEBUG.
+
+get_value(Key, Props) ->
+ case lists:keyfind(Key, 1, Props) of
+ {Key, Value} ->
+ Value;
+ false ->
+ undefined
+ end.
diff --git a/deps/twig/src/twig_int.hrl b/deps/twig/src/twig_int.hrl
new file mode 100644
index 00000000..a510d405
--- /dev/null
+++ b/deps/twig/src/twig_int.hrl
@@ -0,0 +1,26 @@
+% Copyright 2011 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-define(SYSLOG_VERSION, 1).
+
+-define(LEVEL_DEBUG, 7).
+-define(LEVEL_INFO, 6).
+-define(LEVEL_NOTICE, 5).
+-define(LEVEL_WARN, 4).
+-define(LEVEL_ERR, 3).
+-define(LEVEL_CRIT, 2).
+-define(LEVEL_ALERT, 1).
+-define(LEVEL_EMERG, 0).
+
+-record(twig, {level, msgid, msg, pid}).
diff --git a/deps/twig/src/twig_monitor.erl b/deps/twig/src/twig_monitor.erl
new file mode 100644
index 00000000..c32a0c69
--- /dev/null
+++ b/deps/twig/src/twig_monitor.erl
@@ -0,0 +1,48 @@
+% Copyright 2011 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(twig_monitor).
+
+-behaviour(gen_server).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([start_link/0]).
+
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+init(_) ->
+ ok = gen_event:add_sup_handler(error_logger, twig_event_handler, []),
+ {ok, nil}.
+
+handle_call(_Call, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Cast, State) ->
+ {noreply, State}.
+
+handle_info({gen_event_EXIT, twig_event_handler, Reason} = Msg, State) ->
+ io:format("~p~n", [Msg]),
+ {stop, Reason, State};
+
+handle_info(_Msg, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_, State, _) ->
+ {ok, State}.
diff --git a/deps/twig/src/twig_sup.erl b/deps/twig/src/twig_sup.erl
new file mode 100644
index 00000000..0fe73ef4
--- /dev/null
+++ b/deps/twig/src/twig_sup.erl
@@ -0,0 +1,26 @@
+% Copyright 2011 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(twig_sup).
+-behaviour(supervisor).
+-export([start_link/0, init/1]).
+
+%% Helper macro for declaring children of supervisor
+-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ {ok, { {one_for_one, 5, 10}, [?CHILD(twig_monitor, worker)]} }.
diff --git a/deps/twig/src/twig_util.erl b/deps/twig/src/twig_util.erl
new file mode 100644
index 00000000..b4f830c9
--- /dev/null
+++ b/deps/twig/src/twig_util.erl
@@ -0,0 +1,82 @@
+% Copyright 2011 Cloudant
+%
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(twig_util).
+
+-export([format/2, get_env/2, level/1, facility/1, iso8601_timestamp/0]).
+
+level(debug) -> 7;
+level(info) -> 6;
+level(notice) -> 5;
+level(warn) -> 4;
+level(warning) -> 4;
+level(err) -> 3;
+level(error) -> 3;
+level(crit) -> 2;
+level(alert) -> 1;
+level(emerg) -> 0;
+level(panic) -> 0;
+
+level(I) when is_integer(I), I >= 0, I =< 7 ->
+ I;
+level(_BadLevel) ->
+ 3.
+
+facility(kern) -> (0 bsl 3) ; % kernel messages
+facility(user) -> (1 bsl 3) ; % random user-level messages
+facility(mail) -> (2 bsl 3) ; % mail system
+facility(daemon) -> (3 bsl 3) ; % system daemons
+facility(auth) -> (4 bsl 3) ; % security/authorization messages
+facility(syslog) -> (5 bsl 3) ; % messages generated internally by syslogd
+facility(lpr) -> (6 bsl 3) ; % line printer subsystem
+facility(news) -> (7 bsl 3) ; % network news subsystem
+facility(uucp) -> (8 bsl 3) ; % UUCP subsystem
+facility(cron) -> (9 bsl 3) ; % clock daemon
+facility(authpriv) -> (10 bsl 3); % security/authorization messages (private)
+facility(ftp) -> (11 bsl 3); % ftp daemon
+
+facility(local0) -> (16 bsl 3);
+facility(local1) -> (17 bsl 3);
+facility(local2) -> (18 bsl 3);
+facility(local3) -> (19 bsl 3);
+facility(local4) -> (20 bsl 3);
+facility(local5) -> (21 bsl 3);
+facility(local6) -> (22 bsl 3);
+facility(local7) -> (23 bsl 3).
+
+
+iso8601_timestamp() ->
+ {_,_,Micro} = Now = os:timestamp(),
+ {{Year,Month,Date},{Hour,Minute,Second}} = calendar:now_to_datetime(Now),
+ Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0B.~6.10.0BZ",
+ io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second, Micro]).
+
+format(Format, Data) ->
+ MaxTermSize = get_env(max_term_size, 8192),
+ case erts_debug:flat_size(Data) > MaxTermSize of
+ true ->
+ MaxString = get_env(max_message_size, 16000),
+ {Truncated, _} = trunc_io:print(Data, MaxString),
+ ["*Truncated* ", Format, " - ", Truncated];
+ false ->
+ io_lib:format(Format, Data)
+ end.
+
+get_env(Key, Default) ->
+ case application:get_env(twig, Key) of
+ {ok, Value} ->
+ Value;
+ undefined ->
+ Default
+ end.