summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFilipe David Borba Manana <fdmanana@apache.org>2010-08-15 18:37:45 +0000
committerFilipe David Borba Manana <fdmanana@apache.org>2010-08-15 18:37:45 +0000
commit64da83d7ae72bb23030c3b5c0e810225e09cb706 (patch)
tree857d893ed55476e7508faa0587dab3f2052fdb54
parent0eaf3b12c5da1fdc8d19e08e1768239204418aa2 (diff)
Bumping ibrowse library to version 1.6.2 (latest).
It has a few important bug fixes and new features, such as, for example: 1) fixes https requests not going via the proxy; 2) added SSL support for direct connections; 3) fixes to URL parsing; 4) added option headers_as_is git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@985730 13f79535-47bb-0310-9956-ffa450edef68
-rw-r--r--src/couchdb/couch_rep_httpc.erl3
-rw-r--r--src/ibrowse/ibrowse.app.in10
-rw-r--r--src/ibrowse/ibrowse.erl516
-rw-r--r--src/ibrowse/ibrowse_app.erl9
-rw-r--r--src/ibrowse/ibrowse_http_client.erl1686
-rw-r--r--src/ibrowse/ibrowse_lb.erl8
-rw-r--r--src/ibrowse/ibrowse_lib.erl292
-rw-r--r--src/ibrowse/ibrowse_sup.erl6
-rw-r--r--src/ibrowse/ibrowse_test.erl7
9 files changed, 1359 insertions, 1178 deletions
diff --git a/src/couchdb/couch_rep_httpc.erl b/src/couchdb/couch_rep_httpc.erl
index 768d88a3..1f2b49ac 100644
--- a/src/couchdb/couch_rep_httpc.erl
+++ b/src/couchdb/couch_rep_httpc.erl
@@ -203,8 +203,7 @@ spawn_worker_process(Req) ->
Pid.
spawn_link_worker_process(Req) ->
- Url = ibrowse_lib:parse_url(Req#http_db.url),
- {ok, Pid} = ibrowse_http_client:start_link(Url),
+ {ok, Pid} = ibrowse:spawn_link_worker_process(Req#http_db.url),
Pid.
maybe_decompress(Headers, Body) ->
diff --git a/src/ibrowse/ibrowse.app.in b/src/ibrowse/ibrowse.app.in
index 4f43dd92..208c311b 100644
--- a/src/ibrowse/ibrowse.app.in
+++ b/src/ibrowse/ibrowse.app.in
@@ -1,10 +1,10 @@
{application, ibrowse,
[{description, "HTTP client application"},
- {vsn, "1.5.1"},
- {modules, [ ibrowse,
- ibrowse_http_client,
- ibrowse_app,
- ibrowse_sup,
+ {vsn, "1.6.2"},
+ {modules, [ ibrowse,
+ ibrowse_http_client,
+ ibrowse_app,
+ ibrowse_sup,
ibrowse_lib,
ibrowse_lb ]},
{registered, []},
diff --git a/src/ibrowse/ibrowse.erl b/src/ibrowse/ibrowse.erl
index 1913ef59..09d36a36 100644
--- a/src/ibrowse/ibrowse.erl
+++ b/src/ibrowse/ibrowse.erl
@@ -6,8 +6,8 @@
%%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
%%%-------------------------------------------------------------------
%% @author Chandrashekhar Mullaparthi <chandrashekhar dot mullaparthi at gmail dot com>
-%% @copyright 2005-2009 Chandrashekhar Mullaparthi
-%% @version 1.5.2
+%% @copyright 2005-2010 Chandrashekhar Mullaparthi
+%% @version 1.6.0
%% @doc The ibrowse application implements an HTTP 1.1 client. This
%% module implements the API of the HTTP client. There is one named
%% process called 'ibrowse' which assists in load balancing and maintaining configuration. There is one load balancing process per unique webserver. There is
@@ -21,22 +21,22 @@
%% <p>Here are a few sample invocations.</p>
%%
%% <code>
-%% ibrowse:send_req("http://intranet/messenger/", [], get).
+%% ibrowse:send_req("http://intranet/messenger/", [], get).
%% <br/><br/>
-%%
-%% ibrowse:send_req("http://www.google.com/", [], get, [],
-%% [{proxy_user, "XXXXX"},
-%% {proxy_password, "XXXXX"},
-%% {proxy_host, "proxy"},
-%% {proxy_port, 8080}], 1000).
+%%
+%% ibrowse:send_req("http://www.google.com/", [], get, [],
+%% [{proxy_user, "XXXXX"},
+%% {proxy_password, "XXXXX"},
+%% {proxy_host, "proxy"},
+%% {proxy_port, 8080}], 1000).
%% <br/><br/>
%%
%%ibrowse:send_req("http://www.erlang.org/download/otp_src_R10B-3.tar.gz", [], get, [],
-%% [{proxy_user, "XXXXX"},
-%% {proxy_password, "XXXXX"},
-%% {proxy_host, "proxy"},
-%% {proxy_port, 8080},
-%% {save_response_to_file, true}], 1000).
+%% [{proxy_user, "XXXXX"},
+%% {proxy_password, "XXXXX"},
+%% {proxy_host, "proxy"},
+%% {proxy_port, 8080},
+%% {save_response_to_file, true}], 1000).
%% <br/><br/>
%%
%% ibrowse:send_req("http://www.erlang.org", [], head).
@@ -48,17 +48,12 @@
%% ibrowse:send_req("http://www.bbc.co.uk", [], trace).
%%
%% <br/><br/>
-%% ibrowse:send_req("http://www.google.com", [], get, [],
+%% ibrowse:send_req("http://www.google.com", [], get, [],
%% [{stream_to, self()}]).
%% </code>
%%
-%% <p>A driver exists which implements URL encoding in C, but the
-%% speed achieved using only erlang has been good enough, so the
-%% driver isn't actually used.</p>
-module(ibrowse).
--vsn('$Id: ibrowse.erl,v 1.8 2009/07/01 22:43:19 chandrusf Exp $ ').
-
-behaviour(gen_server).
%%--------------------------------------------------------------------
%% Include files
@@ -70,48 +65,50 @@
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+ terminate/2, code_change/3]).
%% API interface
-export([
- rescan_config/0,
- rescan_config/1,
- get_config_value/1,
- get_config_value/2,
- spawn_worker_process/2,
- spawn_link_worker_process/2,
- stop_worker_process/1,
- send_req/3,
- send_req/4,
- send_req/5,
- send_req/6,
- send_req_direct/4,
- send_req_direct/5,
- send_req_direct/6,
- send_req_direct/7,
- stream_next/1,
- set_max_sessions/3,
- set_max_pipeline_size/3,
- set_dest/3,
- trace_on/0,
- trace_off/0,
- trace_on/2,
- trace_off/2,
- all_trace_off/0,
- show_dest_status/0,
- show_dest_status/2
- ]).
+ rescan_config/0,
+ rescan_config/1,
+ get_config_value/1,
+ get_config_value/2,
+ spawn_worker_process/1,
+ spawn_worker_process/2,
+ spawn_link_worker_process/1,
+ spawn_link_worker_process/2,
+ stop_worker_process/1,
+ send_req/3,
+ send_req/4,
+ send_req/5,
+ send_req/6,
+ send_req_direct/4,
+ send_req_direct/5,
+ send_req_direct/6,
+ send_req_direct/7,
+ stream_next/1,
+ set_max_sessions/3,
+ set_max_pipeline_size/3,
+ set_dest/3,
+ trace_on/0,
+ trace_off/0,
+ trace_on/2,
+ trace_off/2,
+ all_trace_off/0,
+ show_dest_status/0,
+ show_dest_status/2
+ ]).
-ifdef(debug).
-compile(export_all).
-endif.
-import(ibrowse_lib, [
- parse_url/1,
- get_value/3,
- do_trace/2
- ]).
-
+ parse_url/1,
+ get_value/3,
+ do_trace/2
+ ]).
+
-record(state, {trace = false}).
-include("ibrowse.hrl").
@@ -159,7 +156,7 @@ stop() ->
send_req(Url, Headers, Method) ->
send_req(Url, Headers, Method, [], []).
-%% @doc Same as send_req/3.
+%% @doc Same as send_req/3.
%% If a list is specified for the body it has to be a flat list. The body can also be a fun/0 or a fun/1. <br/>
%% If fun/0, the connection handling process will repeatdely call the fun until it returns an error or eof. <pre>Fun() = {ok, Data} | eof</pre><br/>
%% If fun/1, the connection handling process will repeatedly call the fun with the supplied state until it returns an error or eof. <pre>Fun(State) = {ok, Data} | {ok, Data, NewState} | eof</pre>
@@ -169,19 +166,19 @@ send_req(Url, Headers, Method) ->
send_req(Url, Headers, Method, Body) ->
send_req(Url, Headers, Method, Body, []).
-%% @doc Same as send_req/4.
+%% @doc Same as send_req/4.
%% For a description of SSL Options, look in the <a href="http://www.erlang.org/doc/apps/ssl/index.html">ssl</a> manpage. If the
%% HTTP Version to use is not specified, the default is 1.1.
%% <br/>
-%% <p>The <code>host_header</code> option is useful in the case where ibrowse is
+%% <ul>
+%% <li>The <code>host_header</code> option is useful in the case where ibrowse is
%% connecting to a component such as <a
%% href="http://www.stunnel.org">stunnel</a> which then sets up a
%% secure connection to a webserver. In this case, the URL supplied to
%% ibrowse must have the stunnel host/port details, but that won't
%% make sense to the destination webserver. This option can then be
%% used to specify what should go in the <code>Host</code> header in
-%% the request.</p>
-%% <ul>
+%% the request.</li>
%% <li>The <code>stream_to</code> option can be used to have the HTTP
%% response streamed to a process as messages as data arrives on the
%% socket. If the calling process wishes to control the rate at which
@@ -220,12 +217,25 @@ send_req(Url, Headers, Method, Body) ->
%% ibrowse:send_req("http://www.example.com/cgi-bin/request", [], get, [], [{connect_timeout, 100}], 1000).
%% </code>
%% In the above invocation, if the connection isn't established within
-%% 100 milliseconds, the request will fail with
+%% 100 milliseconds, the request will fail with
%% <code>{error, conn_failed}</code>.<br/>
%% If connection setup succeeds, the total time allowed for the
%% request to complete will be 1000 milliseconds minus the time taken
%% for connection setup.
%% </li>
+%%
+%% <li> The <code>socket_options</code> option can be used to set
+%% specific options on the socket. The <code>{active, true | false | once}</code>
+%% and <code>{packet_type, Packet_type}</code> will be filtered out by ibrowse. </li>
+%%
+%% <li> The <code>headers_as_is</code> option is to enable the caller
+%% to send headers exactly as specified in the request without ibrowse
+%% adding some of its own. Required for some picky servers apparently. </li>
+%%
+%% <li>The <code>give_raw_headers</code> option is to enable the
+%% caller to get access to the raw status line and raw unparsed
+%% headers. Not quite sure why someone would want this, but one of my
+%% users asked for it, so here it is. </li>
%% </ul>
%%
%% @spec send_req(Url::string(), Headers::headerList(), Method::method(), Body::body(), Options::optionList()) -> response()
@@ -234,7 +244,7 @@ send_req(Url, Headers, Method, Body) ->
%% {response_format,response_format()}|
%% {stream_chunk_size, integer()} |
%% {max_pipeline_size, integer()} |
-%% {trace, boolean()} |
+%% {trace, boolean()} |
%% {is_ssl, boolean()} |
%% {ssl_options, [SSLOpt]} |
%% {pool_name, atom()} |
@@ -253,13 +263,18 @@ send_req(Url, Headers, Method, Body) ->
%% {host_header, string()} |
%% {inactivity_timeout, integer()} |
%% {connect_timeout, integer()} |
-%% {transfer_encoding, {chunked, ChunkSize}}
+%% {socket_options, Sock_opts} |
+%% {transfer_encoding, {chunked, ChunkSize}} |
+%% {headers_as_is, boolean()} |
+%% {give_raw_headers, boolean()}
%%
%% stream_to() = process() | {process(), once}
%% process() = pid() | atom()
%% username() = string()
%% password() = string()
%% SSLOpt = term()
+%% Sock_opts = [Sock_opt]
+%% Sock_opt = term()
%% ChunkSize = integer()
%% srtf() = boolean() | filename()
%% filename() = string()
@@ -267,54 +282,54 @@ send_req(Url, Headers, Method, Body) ->
send_req(Url, Headers, Method, Body, Options) ->
send_req(Url, Headers, Method, Body, Options, 30000).
-%% @doc Same as send_req/5.
+%% @doc Same as send_req/5.
%% All timeout values are in milliseconds.
%% @spec send_req(Url, Headers::headerList(), Method::method(), Body::body(), Options::optionList(), Timeout) -> response()
%% Timeout = integer() | infinity
send_req(Url, Headers, Method, Body, Options, Timeout) ->
case catch parse_url(Url) of
- #url{host = Host,
- port = Port,
- protocol = Protocol} = Parsed_url ->
- Lb_pid = case ets:lookup(ibrowse_lb, {Host, Port}) of
- [] ->
- get_lb_pid(Parsed_url);
- [#lb_pid{pid = Lb_pid_1}] ->
- Lb_pid_1
- end,
- Max_sessions = get_max_sessions(Host, Port, Options),
- Max_pipeline_size = get_max_pipeline_size(Host, Port, Options),
- Options_1 = merge_options(Host, Port, Options),
- {SSLOptions, IsSSL} =
- case (Protocol == https) orelse
- get_value(is_ssl, Options_1, false) of
- false -> {[], false};
- true -> {get_value(ssl_options, Options_1, []), true}
- end,
- case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
- Max_sessions,
- Max_pipeline_size,
- {SSLOptions, IsSSL}) of
- {ok, Conn_Pid} ->
- do_send_req(Conn_Pid, Parsed_url, Headers,
- Method, Body, Options_1, Timeout);
- Err ->
- Err
- end;
- Err ->
- {error, {url_parsing_failed, Err}}
+ #url{host = Host,
+ port = Port,
+ protocol = Protocol} = Parsed_url ->
+ Lb_pid = case ets:lookup(ibrowse_lb, {Host, Port}) of
+ [] ->
+ get_lb_pid(Parsed_url);
+ [#lb_pid{pid = Lb_pid_1}] ->
+ Lb_pid_1
+ end,
+ Max_sessions = get_max_sessions(Host, Port, Options),
+ Max_pipeline_size = get_max_pipeline_size(Host, Port, Options),
+ Options_1 = merge_options(Host, Port, Options),
+ {SSLOptions, IsSSL} =
+ case (Protocol == https) orelse
+ get_value(is_ssl, Options_1, false) of
+ false -> {[], false};
+ true -> {get_value(ssl_options, Options_1, []), true}
+ end,
+ case ibrowse_lb:spawn_connection(Lb_pid, Parsed_url,
+ Max_sessions,
+ Max_pipeline_size,
+ {SSLOptions, IsSSL}) of
+ {ok, Conn_Pid} ->
+ do_send_req(Conn_Pid, Parsed_url, Headers,
+ Method, Body, Options_1, Timeout);
+ Err ->
+ Err
+ end;
+ Err ->
+ {error, {url_parsing_failed, Err}}
end.
merge_options(Host, Port, Options) ->
Config_options = get_config_value({options, Host, Port}, []),
lists:foldl(
fun({Key, Val}, Acc) ->
- case lists:keysearch(Key, 1, Options) of
- false ->
- [{Key, Val} | Acc];
- _ ->
- Acc
- end
+ case lists:keysearch(Key, 1, Options) of
+ false ->
+ [{Key, Val} | Acc];
+ _ ->
+ Acc
+ end
end, Options, Config_options).
get_lb_pid(Url) ->
@@ -322,11 +337,11 @@ get_lb_pid(Url) ->
get_max_sessions(Host, Port, Options) ->
get_value(max_sessions, Options,
- get_config_value({max_sessions, Host, Port}, ?DEF_MAX_SESSIONS)).
+ get_config_value({max_sessions, Host, Port}, ?DEF_MAX_SESSIONS)).
get_max_pipeline_size(Host, Port, Options) ->
get_value(max_pipeline_size, Options,
- get_config_value({max_pipeline_size, Host, Port}, ?DEF_MAX_PIPELINE_SIZE)).
+ get_config_value({max_pipeline_size, Host, Port}, ?DEF_MAX_PIPELINE_SIZE)).
%% @doc Deprecated. Use set_max_sessions/3 and set_max_pipeline_size/3
%% for achieving the same effect.
@@ -343,7 +358,7 @@ set_dest(_Host, _Port, [H | _]) ->
exit({invalid_option, H});
set_dest(_, _, []) ->
ok.
-
+
%% @doc Set the maximum number of connections allowed to a specific Host:Port.
%% @spec set_max_sessions(Host::string(), Port::integer(), Max::integer()) -> ok
set_max_sessions(Host, Port, Max) when is_integer(Max), Max > 0 ->
@@ -356,21 +371,21 @@ set_max_pipeline_size(Host, Port, Max) when is_integer(Max), Max > 0 ->
do_send_req(Conn_Pid, Parsed_url, Headers, Method, Body, Options, Timeout) ->
case catch ibrowse_http_client:send_req(Conn_Pid, Parsed_url,
- Headers, Method, ensure_bin(Body),
- Options, Timeout) of
- {'EXIT', {timeout, _}} ->
- {error, req_timedout};
- {'EXIT', Reason} ->
- {error, {'EXIT', Reason}};
- {ok, St_code, Headers, Body} = Ret when is_binary(Body) ->
- case get_value(response_format, Options, list) of
- list ->
- {ok, St_code, Headers, binary_to_list(Body)};
- binary ->
- Ret
- end;
- Ret ->
- Ret
+ Headers, Method, ensure_bin(Body),
+ Options, Timeout) of
+ {'EXIT', {timeout, _}} ->
+ {error, req_timedout};
+ {'EXIT', Reason} ->
+ {error, {'EXIT', Reason}};
+ {ok, St_code, Headers, Body} = Ret when is_binary(Body) ->
+ case get_value(response_format, Options, list) of
+ list ->
+ {ok, St_code, Headers, binary_to_list(Body)};
+ binary ->
+ Ret
+ end;
+ Ret ->
+ Ret
end.
ensure_bin(L) when is_list(L) -> list_to_binary(L);
@@ -391,12 +406,21 @@ ensure_bin({Fun, _} = Body) when is_function(Fun) -> Body.
%% <b>Note:</b> It is the responsibility of the calling process to control
%% pipeline size on such connections.
%%
+%% @spec spawn_worker_process(Url::string()) -> {ok, pid()}
+spawn_worker_process(Url) ->
+ ibrowse_http_client:start(Url).
+
%% @spec spawn_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
spawn_worker_process(Host, Port) ->
ibrowse_http_client:start({Host, Port}).
-%% @doc Same as spawn_worker_process/2 except the the calling process
+%% @doc Same as spawn_worker_process/1 except the the calling process
%% is linked to the worker process which is spawned.
+%% @spec spawn_link_worker_process(Url::string()) -> {ok, pid()}
+spawn_link_worker_process(Url) ->
+ ibrowse_http_client:start_link(Url).
+
+%% @spec spawn_link_worker_process(Host::string(), Port::integer()) -> {ok, pid()}
spawn_link_worker_process(Host, Port) ->
ibrowse_http_client:start_link({Host, Port}).
@@ -426,30 +450,30 @@ send_req_direct(Conn_pid, Url, Headers, Method, Body, Options) ->
%% returned by spawn_worker_process/2 or spawn_link_worker_process/2
send_req_direct(Conn_pid, Url, Headers, Method, Body, Options, Timeout) ->
case catch parse_url(Url) of
- #url{host = Host,
- port = Port} = Parsed_url ->
- Options_1 = merge_options(Host, Port, Options),
- case do_send_req(Conn_pid, Parsed_url, Headers, Method, Body, Options_1, Timeout) of
- {error, {'EXIT', {noproc, _}}} ->
- {error, worker_is_dead};
- Ret ->
- Ret
- end;
- Err ->
- {error, {url_parsing_failed, Err}}
+ #url{host = Host,
+ port = Port} = Parsed_url ->
+ Options_1 = merge_options(Host, Port, Options),
+ case do_send_req(Conn_pid, Parsed_url, Headers, Method, Body, Options_1, Timeout) of
+ {error, {'EXIT', {noproc, _}}} ->
+ {error, worker_is_dead};
+ Ret ->
+ Ret
+ end;
+ Err ->
+ {error, {url_parsing_failed, Err}}
end.
%% @doc Tell ibrowse to stream the next chunk of data to the
%% caller. Should be used in conjunction with the
%% <code>stream_to</code> option
%% @spec stream_next(Req_id :: req_id()) -> ok | {error, unknown_req_id}
-stream_next(Req_id) ->
+stream_next(Req_id) ->
case ets:lookup(ibrowse_stream, {req_id_pid, Req_id}) of
- [] ->
- {error, unknown_req_id};
- [{_, Pid}] ->
- catch Pid ! {stream_next, Req_id},
- ok
+ [] ->
+ {error, unknown_req_id};
+ [{_, Pid}] ->
+ catch Pid ! {stream_next, Req_id},
+ ok
end.
%% @doc Turn tracing on for the ibrowse process
@@ -462,7 +486,7 @@ trace_off() ->
%% @doc Turn tracing on for all connections to the specified HTTP
%% server. Host is whatever is specified as the domain name in the URL
%% @spec trace_on(Host, Port) -> ok
-%% Host = string()
+%% Host = string()
%% Port = integer()
trace_on(Host, Port) ->
ibrowse ! {trace, true, Host, Port},
@@ -483,75 +507,75 @@ all_trace_off() ->
show_dest_status() ->
Dests = lists:filter(fun({lb_pid, {Host, Port}, _}) when is_list(Host),
- is_integer(Port) ->
- true;
- (_) ->
- false
- end, ets:tab2list(ibrowse_lb)),
+ is_integer(Port) ->
+ true;
+ (_) ->
+ false
+ end, ets:tab2list(ibrowse_lb)),
All_ets = ets:all(),
io:format("~-40.40s | ~-5.5s | ~-10.10s | ~s~n",
- ["Server:port", "ETS", "Num conns", "LB Pid"]),
+ ["Server:port", "ETS", "Num conns", "LB Pid"]),
io:format("~80.80.=s~n", [""]),
lists:foreach(fun({lb_pid, {Host, Port}, Lb_pid}) ->
- case lists:dropwhile(
- fun(Tid) ->
- ets:info(Tid, owner) /= Lb_pid
- end, All_ets) of
- [] ->
- io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
- [Host ++ ":" ++ integer_to_list(Port),
- "",
- "",
- io_lib:format("~p", [Lb_pid])]
- );
- [Tid | _] ->
- catch (
- begin
- Size = ets:info(Tid, size),
- io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
- [Host ++ ":" ++ integer_to_list(Port),
- integer_to_list(Tid),
- integer_to_list(Size),
- io_lib:format("~p", [Lb_pid])]
- )
- end
- )
- end
- end, Dests).
-
+ case lists:dropwhile(
+ fun(Tid) ->
+ ets:info(Tid, owner) /= Lb_pid
+ end, All_ets) of
+ [] ->
+ io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
+ [Host ++ ":" ++ integer_to_list(Port),
+ "",
+ "",
+ io_lib:format("~p", [Lb_pid])]
+ );
+ [Tid | _] ->
+ catch (
+ begin
+ Size = ets:info(Tid, size),
+ io:format("~40.40s | ~-5.5s | ~-5.5s | ~s~n",
+ [Host ++ ":" ++ integer_to_list(Port),
+ io_lib:format("~p", [Tid]),
+ integer_to_list(Size),
+ io_lib:format("~p", [Lb_pid])]
+ )
+ end
+ )
+ end
+ end, Dests).
+
%% @doc Shows some internal information about load balancing to a
%% specified Host:Port. Info about workers spawned using
%% spawn_worker_process/2 or spawn_link_worker_process/2 is not
%% included.
show_dest_status(Host, Port) ->
case ets:lookup(ibrowse_lb, {Host, Port}) of
- [] ->
- no_active_processes;
- [#lb_pid{pid = Lb_pid}] ->
- io:format("Load Balancer Pid : ~p~n", [Lb_pid]),
- io:format("LB process msg q size : ~p~n", [(catch process_info(Lb_pid, message_queue_len))]),
- case lists:dropwhile(
- fun(Tid) ->
- ets:info(Tid, owner) /= Lb_pid
- end, ets:all()) of
- [] ->
- io:format("Couldn't locate ETS table for ~p~n", [Lb_pid]);
- [Tid | _] ->
- First = ets:first(Tid),
- Last = ets:last(Tid),
- Size = ets:info(Tid, size),
- io:format("LB ETS table id : ~p~n", [Tid]),
- io:format("Num Connections : ~p~n", [Size]),
- case Size of
- 0 ->
- ok;
- _ ->
- {First_p_sz, _} = First,
- {Last_p_sz, _} = Last,
- io:format("Smallest pipeline : ~1000.p~n", [First_p_sz]),
- io:format("Largest pipeline : ~1000.p~n", [Last_p_sz])
- end
- end
+ [] ->
+ no_active_processes;
+ [#lb_pid{pid = Lb_pid}] ->
+ io:format("Load Balancer Pid : ~p~n", [Lb_pid]),
+ io:format("LB process msg q size : ~p~n", [(catch process_info(Lb_pid, message_queue_len))]),
+ case lists:dropwhile(
+ fun(Tid) ->
+ ets:info(Tid, owner) /= Lb_pid
+ end, ets:all()) of
+ [] ->
+ io:format("Couldn't locate ETS table for ~p~n", [Lb_pid]);
+ [Tid | _] ->
+ First = ets:first(Tid),
+ Last = ets:last(Tid),
+ Size = ets:info(Tid, size),
+ io:format("LB ETS table id : ~p~n", [Tid]),
+ io:format("Num Connections : ~p~n", [Size]),
+ case Size of
+ 0 ->
+ ok;
+ _ ->
+ {First_p_sz, _} = First,
+ {Last_p_sz, _} = Last,
+ io:format("Smallest pipeline : ~1000.p~n", [First_p_sz]),
+ io:format("Largest pipeline : ~1000.p~n", [Last_p_sz])
+ end
+ end
end.
%% @doc Clear current configuration for ibrowse and load from the file
@@ -592,40 +616,40 @@ init(_) ->
import_config() ->
case code:priv_dir(ibrowse) of
- {error, _} = Err ->
- Err;
- PrivDir ->
- Filename = filename:join(PrivDir, "ibrowse.conf"),
- import_config(Filename)
+ {error, _} = Err ->
+ Err;
+ PrivDir ->
+ Filename = filename:join(PrivDir, "ibrowse.conf"),
+ import_config(Filename)
end.
import_config(Filename) ->
case file:consult(Filename) of
- {ok, Terms} ->
- ets:delete_all_objects(ibrowse_conf),
- Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options})
- when is_list(Host), is_integer(Port),
- is_integer(MaxSess), MaxSess > 0,
- is_integer(MaxPipe), MaxPipe > 0, is_list(Options) ->
- I = [{{max_sessions, Host, Port}, MaxSess},
- {{max_pipeline_size, Host, Port}, MaxPipe},
- {{options, Host, Port}, Options}],
- lists:foreach(
- fun({X, Y}) ->
- ets:insert(ibrowse_conf,
- #ibrowse_conf{key = X,
- value = Y})
- end, I);
- ({K, V}) ->
- ets:insert(ibrowse_conf,
- #ibrowse_conf{key = K,
- value = V});
- (X) ->
- io:format("Skipping unrecognised term: ~p~n", [X])
- end,
- lists:foreach(Fun, Terms);
- Err ->
- Err
+ {ok, Terms} ->
+ ets:delete_all_objects(ibrowse_conf),
+ Fun = fun({dest, Host, Port, MaxSess, MaxPipe, Options})
+ when is_list(Host), is_integer(Port),
+ is_integer(MaxSess), MaxSess > 0,
+ is_integer(MaxPipe), MaxPipe > 0, is_list(Options) ->
+ I = [{{max_sessions, Host, Port}, MaxSess},
+ {{max_pipeline_size, Host, Port}, MaxPipe},
+ {{options, Host, Port}, Options}],
+ lists:foreach(
+ fun({X, Y}) ->
+ ets:insert(ibrowse_conf,
+ #ibrowse_conf{key = X,
+ value = Y})
+ end, I);
+ ({K, V}) ->
+ ets:insert(ibrowse_conf,
+ #ibrowse_conf{key = K,
+ value = V});
+ (X) ->
+ io:format("Skipping unrecognised term: ~p~n", [X])
+ end,
+ lists:foreach(Fun, Terms);
+ Err ->
+ Err
end.
%% @doc Internal export
@@ -636,10 +660,10 @@ get_config_value(Key) ->
%% @doc Internal export
get_config_value(Key, DefVal) ->
case ets:lookup(ibrowse_conf, Key) of
- [] ->
- DefVal;
- [#ibrowse_conf{value = V}] ->
- V
+ [] ->
+ DefVal;
+ [#ibrowse_conf{value = V}] ->
+ V
end.
set_config_value(Key, Val) ->
@@ -700,36 +724,36 @@ handle_info(all_trace_off, State) ->
Mspec = [{{ibrowse_conf,{trace,'$1','$2'},true},[],[{{'$1','$2'}}]}],
Trace_on_dests = ets:select(ibrowse_conf, Mspec),
Fun = fun(#lb_pid{host_port = {H, P}, pid = Pid}, _) ->
- case lists:member({H, P}, Trace_on_dests) of
- false ->
- ok;
- true ->
- catch Pid ! {trace, false}
- end;
- (_, Acc) ->
- Acc
- end,
+ case lists:member({H, P}, Trace_on_dests) of
+ false ->
+ ok;
+ true ->
+ catch Pid ! {trace, false}
+ end;
+ (_, Acc) ->
+ Acc
+ end,
ets:foldl(Fun, undefined, ibrowse_lb),
ets:select_delete(ibrowse_conf, [{{ibrowse_conf,{trace,'$1','$2'},true},[],['true']}]),
{noreply, State};
-
+
handle_info({trace, Bool}, State) ->
put(my_trace_flag, Bool),
{noreply, State};
handle_info({trace, Bool, Host, Port}, State) ->
Fun = fun(#lb_pid{host_port = {H, P}, pid = Pid}, _)
- when H == Host,
- P == Port ->
- catch Pid ! {trace, Bool};
- (_, Acc) ->
- Acc
- end,
+ when H == Host,
+ P == Port ->
+ catch Pid ! {trace, Bool};
+ (_, Acc) ->
+ Acc
+ end,
ets:foldl(Fun, undefined, ibrowse_lb),
ets:insert(ibrowse_conf, #ibrowse_conf{key = {trace, Host, Port},
- value = Bool}),
+ value = Bool}),
{noreply, State};
-
+
handle_info(_Info, State) ->
{noreply, State}.
diff --git a/src/ibrowse/ibrowse_app.erl b/src/ibrowse/ibrowse_app.erl
index 8c83e8f1..d3a0f7bb 100644
--- a/src/ibrowse/ibrowse_app.erl
+++ b/src/ibrowse/ibrowse_app.erl
@@ -1,12 +1,11 @@
%%%-------------------------------------------------------------------
%%% File : ibrowse_app.erl
%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
-%%% Description :
+%%% Description :
%%%
%%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
%%%-------------------------------------------------------------------
-module(ibrowse_app).
--vsn('$Id: ibrowse_app.erl,v 1.1 2005/05/05 22:28:28 chandrusf Exp $ ').
-behaviour(application).
%%--------------------------------------------------------------------
@@ -42,11 +41,11 @@
%% Func: start/2
%% Returns: {ok, Pid} |
%% {ok, Pid, State} |
-%% {error, Reason}
+%% {error, Reason}
%%--------------------------------------------------------------------
start(_Type, _StartArgs) ->
case ibrowse_sup:start_link() of
- {ok, Pid} ->
+ {ok, Pid} ->
{ok, Pid};
Error ->
Error
@@ -54,7 +53,7 @@ start(_Type, _StartArgs) ->
%%--------------------------------------------------------------------
%% Func: stop/1
-%% Returns: any
+%% Returns: any
%%--------------------------------------------------------------------
stop(_State) ->
ok.
diff --git a/src/ibrowse/ibrowse_http_client.erl b/src/ibrowse/ibrowse_http_client.erl
index 65d9cb9c..1633e5b3 100644
--- a/src/ibrowse/ibrowse_http_client.erl
+++ b/src/ibrowse/ibrowse_http_client.erl
@@ -6,8 +6,6 @@
%%% Created : 11 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
%%%-------------------------------------------------------------------
-module(ibrowse_http_client).
--vsn('$Id: ibrowse_http_client.erl,v 1.19 2009/07/01 22:43:19 chandrusf Exp $ ').
-
-behaviour(gen_server).
%%--------------------------------------------------------------------
%% Include files
@@ -16,11 +14,11 @@
%%--------------------------------------------------------------------
%% External exports
-export([
- start_link/1,
- start/1,
- stop/1,
- send_req/7
- ]).
+ start_link/1,
+ start/1,
+ stop/1,
+ send_req/7
+ ]).
-ifdef(debug).
-compile(export_all).
@@ -28,41 +26,45 @@
%% gen_server callbacks
-export([
- init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2,
- code_change/3
- ]).
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+ ]).
-include("ibrowse.hrl").
--record(state, {host, port,
- use_proxy = false, proxy_auth_digest,
- ssl_options = [], is_ssl = false, socket,
- reqs=queue:new(), cur_req, status=idle, http_status_code,
- reply_buffer = <<>>, rep_buf_size=0, streamed_size = 0,
- recvd_headers=[],
- is_closing, send_timer, content_length,
- deleted_crlf = false, transfer_encoding,
- chunk_size, chunk_size_buffer = <<>>, recvd_chunk_size,
- lb_ets_tid, cur_pipeline_size = 0, prev_req_id
- }).
+-record(state, {host, port, connect_timeout,
+ use_proxy = false, proxy_auth_digest,
+ ssl_options = [], is_ssl = false, socket,
+ proxy_tunnel_setup = false,
+ tunnel_setup_queue = [],
+ reqs=queue:new(), cur_req, status=idle, http_status_code,
+ reply_buffer = <<>>, rep_buf_size=0, streamed_size = 0,
+ recvd_headers=[],
+ status_line, raw_headers,
+ is_closing, send_timer, content_length,
+ deleted_crlf = false, transfer_encoding,
+ chunk_size, chunk_size_buffer = <<>>, recvd_chunk_size,
+ lb_ets_tid, cur_pipeline_size = 0, prev_req_id
+ }).
-record(request, {url, method, options, from,
- stream_to, caller_controls_socket = false,
- req_id,
- stream_chunk_size,
- save_response_to_file = false,
- tmp_file_name, tmp_file_fd,
- response_format}).
+ stream_to, caller_controls_socket = false,
+ caller_socket_options = [],
+ req_id,
+ stream_chunk_size,
+ save_response_to_file = false,
+ tmp_file_name, tmp_file_fd,
+ response_format}).
-import(ibrowse_lib, [
- get_value/2,
- get_value/3,
- do_trace/2
- ]).
+ get_value/2,
+ get_value/3,
+ do_trace/2
+ ]).
-define(DEFAULT_STREAM_CHUNK_SIZE, 1024*1024).
@@ -80,7 +82,8 @@ start_link(Args) ->
gen_server:start_link(?MODULE, Args, []).
stop(Conn_pid) ->
- gen_server:call(Conn_pid, stop).
+ catch gen_server:call(Conn_pid, stop),
+ ok.
send_req(Conn_Pid, Url, Headers, Method, Body, Options, Timeout) ->
gen_server:call(
@@ -101,26 +104,23 @@ send_req(Conn_Pid, Url, Headers, Method, Body, Options, Timeout) ->
%%--------------------------------------------------------------------
init({Lb_Tid, #url{host = Host, port = Port}, {SSLOptions, Is_ssl}}) ->
State = #state{host = Host,
- port = Port,
- ssl_options = SSLOptions,
- is_ssl = Is_ssl,
- lb_ets_tid = Lb_Tid},
+ port = Port,
+ ssl_options = SSLOptions,
+ is_ssl = Is_ssl,
+ lb_ets_tid = Lb_Tid},
put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
{ok, State};
+init(Url) when is_list(Url) ->
+ case catch ibrowse_lib:parse_url(Url) of
+ #url{protocol = Protocol} = Url_rec ->
+ init({undefined, Url_rec, {[], Protocol == https}});
+ {'EXIT', _} ->
+ {error, invalid_url}
+ end;
init({Host, Port}) ->
State = #state{host = Host,
- port = Port},
- put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
- put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
- {ok, State};
-init(#url{host=Host, port=Port, protocol=Protocol}) ->
- State = #state{
- host = Host,
- port = Port,
- is_ssl = (Protocol == https),
- ssl_options = [{ssl_imp, new}, {depth, 9}]
- },
+ port = Port},
put(ibrowse_trace_token, [Host, $:, integer_to_list(Port)]),
put(my_trace_flag, ibrowse_lib:get_trace_status(Host, Port)),
{ok, State}.
@@ -141,13 +141,13 @@ handle_call({send_req, _}, _From, #state{is_closing = true} = State) ->
{reply, {error, connection_closing}, State};
handle_call({send_req, {Url, Headers, Method, Body, Options, Timeout}},
- From, State) ->
+ From, State) ->
send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State);
handle_call(stop, _From, State) ->
do_close(State),
do_error_reply(State, closing_on_request),
- {stop, normal, ok, State#state{socket=undefined}};
+ {stop, normal, ok, State};
handle_call(Request, _From, State) ->
Reply = {unknown_request, Request},
@@ -177,9 +177,8 @@ handle_info({ssl, _Sock, Data}, State) ->
handle_sock_data(Data, State);
handle_info({stream_next, Req_id}, #state{socket = Socket,
- is_ssl = Is_ssl,
- cur_req = #request{req_id = Req_id}} = State) ->
- do_setopts(Socket, [{active, once}], Is_ssl),
+ cur_req = #request{req_id = Req_id}} = State) ->
+ do_setopts(Socket, [{active, once}], State),
{noreply, State};
handle_info({stream_next, _Req_id}, State) ->
@@ -204,13 +203,13 @@ handle_info({ssl_error, _Sock}, State) ->
{stop, normal, State};
handle_info({req_timedout, From}, State) ->
- case lists:keysearch(From, #request.from, queue:to_list(State#state.reqs)) of
- false ->
- {noreply, State};
- {value, _} ->
- shutting_down(State),
- do_error_reply(State, req_timedout),
- {stop, normal, State}
+ case lists:keymember(From, #request.from, queue:to_list(State#state.reqs)) of
+ false ->
+ {noreply, State};
+ true ->
+ shutting_down(State),
+ do_error_reply(State, req_timedout),
+ {stop, normal, State}
end;
handle_info(timeout, State) ->
@@ -224,7 +223,7 @@ handle_info({trace, Bool}, State) ->
handle_info(Info, State) ->
io:format("Unknown message recvd for ~1000.p:~1000.p -> ~p~n",
- [State#state.host, State#state.port, Info]),
+ [State#state.host, State#state.port, Info]),
io:format("Recvd unknown message ~p when in state: ~p~n", [Info, State]),
{noreply, State}.
@@ -260,133 +259,132 @@ handle_sock_data(Data, #state{status=idle}=State) ->
handle_sock_data(Data, #state{status = get_header}=State) ->
case parse_response(Data, State) of
- {error, _Reason} ->
- shutting_down(State),
- {stop, normal, State};
- stop ->
- shutting_down(State),
- {stop, normal, State};
- State_1 ->
- active_once(State_1),
- {noreply, State_1, get_inac_timeout(State_1)}
+ {error, _Reason} ->
+ shutting_down(State),
+ {stop, normal, State};
+ State_1 ->
+ active_once(State_1),
+ set_inac_timer(State_1),
+ {noreply, State_1}
end;
handle_sock_data(Data, #state{status = get_body,
- content_length = CL,
- http_status_code = StatCode,
- recvd_headers = Headers,
- chunk_size = CSz} = State) ->
+ content_length = CL,
+ http_status_code = StatCode,
+ recvd_headers = Headers,
+ chunk_size = CSz} = State) ->
case (CL == undefined) and (CSz == undefined) of
- true ->
- case accumulate_response(Data, State) of
- {error, Reason} ->
- shutting_down(State),
- fail_pipelined_requests(State,
- {error, {Reason, {stat_code, StatCode}, Headers}}),
- {stop, normal, State};
- State_1 ->
- active_once(State_1),
- {noreply, State_1, get_inac_timeout(State_1)}
- end;
- _ ->
- case parse_11_response(Data, State) of
- {error, Reason} ->
- shutting_down(State),
- fail_pipelined_requests(State,
- {error, {Reason, {stat_code, StatCode}, Headers}}),
- {stop, normal, State};
- stop ->
- shutting_down(State),
- {stop, normal, State};
- State_1 ->
- active_once(State_1),
- {noreply, State_1, get_inac_timeout(State_1)}
- end
+ true ->
+ case accumulate_response(Data, State) of
+ {error, Reason} ->
+ shutting_down(State),
+ fail_pipelined_requests(State,
+ {error, {Reason, {stat_code, StatCode}, Headers}}),
+ {stop, normal, State};
+ State_1 ->
+ active_once(State_1),
+ set_inac_timer(State_1),
+ {noreply, State_1}
+ end;
+ _ ->
+ case parse_11_response(Data, State) of
+ {error, Reason} ->
+ shutting_down(State),
+ fail_pipelined_requests(State,
+ {error, {Reason, {stat_code, StatCode}, Headers}}),
+ {stop, normal, State};
+ State_1 ->
+ active_once(State_1),
+ set_inac_timer(State_1),
+ {noreply, State_1}
+ end
end.
accumulate_response(Data,
- #state{
- cur_req = #request{save_response_to_file = true,
- tmp_file_fd = undefined} = CurReq,
- http_status_code=[$2 | _]}=State) ->
- TmpFilename = make_tmp_filename(),
+ #state{
+ cur_req = #request{save_response_to_file = Srtf,
+ tmp_file_fd = undefined} = CurReq,
+ http_status_code=[$2 | _]}=State) when Srtf /= false ->
+ TmpFilename = make_tmp_filename(Srtf),
case file:open(TmpFilename, [write, delayed_write, raw]) of
- {ok, Fd} ->
- accumulate_response(Data, State#state{
- cur_req = CurReq#request{
- tmp_file_fd = Fd,
- tmp_file_name = TmpFilename}});
- {error, Reason} ->
- {error, {file_open_error, Reason}}
+ {ok, Fd} ->
+ accumulate_response(Data, State#state{
+ cur_req = CurReq#request{
+ tmp_file_fd = Fd,
+ tmp_file_name = TmpFilename}});
+ {error, Reason} ->
+ {error, {file_open_error, Reason}}
end;
-accumulate_response(Data, #state{cur_req = #request{save_response_to_file = true,
- tmp_file_fd = Fd},
- transfer_encoding=chunked,
- reply_buffer = Reply_buf,
- http_status_code=[$2 | _]
- } = State) ->
+accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf,
+ tmp_file_fd = Fd},
+ transfer_encoding=chunked,
+ reply_buffer = Reply_buf,
+ http_status_code=[$2 | _]
+ } = State) when Srtf /= false ->
case file:write(Fd, [Reply_buf, Data]) of
- ok ->
- State#state{reply_buffer = <<>>};
- {error, Reason} ->
- {error, {file_write_error, Reason}}
+ ok ->
+ State#state{reply_buffer = <<>>};
+ {error, Reason} ->
+ {error, {file_write_error, Reason}}
end;
-accumulate_response(Data, #state{cur_req = #request{save_response_to_file = true,
- tmp_file_fd = Fd},
- reply_buffer = RepBuf,
- http_status_code=[$2 | _]
- } = State) ->
+accumulate_response(Data, #state{cur_req = #request{save_response_to_file = Srtf,
+ tmp_file_fd = Fd},
+ reply_buffer = RepBuf,
+ http_status_code=[$2 | _]
+ } = State) when Srtf /= false ->
case file:write(Fd, [RepBuf, Data]) of
- ok ->
- State#state{reply_buffer = <<>>};
- {error, Reason} ->
- {error, {file_write_error, Reason}}
+ ok ->
+ State#state{reply_buffer = <<>>};
+ {error, Reason} ->
+ {error, {file_write_error, Reason}}
end;
accumulate_response(<<>>, State) ->
State;
accumulate_response(Data, #state{reply_buffer = RepBuf,
- rep_buf_size = RepBufSize,
- streamed_size = Streamed_size,
- cur_req = CurReq}=State) ->
+ rep_buf_size = RepBufSize,
+ streamed_size = Streamed_size,
+ cur_req = CurReq}=State) ->
#request{stream_to=StreamTo, req_id=ReqId,
- stream_chunk_size = Stream_chunk_size,
- response_format = Response_format,
- caller_controls_socket = Caller_controls_socket} = CurReq,
+ stream_chunk_size = Stream_chunk_size,
+ response_format = Response_format,
+ caller_controls_socket = Caller_controls_socket} = CurReq,
RepBuf_1 = list_to_binary([RepBuf, Data]),
New_data_size = RepBufSize - Streamed_size,
case StreamTo of
- undefined ->
- State#state{reply_buffer = RepBuf_1};
- _ when Caller_controls_socket == true ->
- do_interim_reply(StreamTo, Response_format, ReqId, RepBuf_1),
- State#state{reply_buffer = <<>>,
- streamed_size = Streamed_size + size(RepBuf_1)};
- _ when New_data_size >= Stream_chunk_size ->
- {Stream_chunk, Rem_data} = split_binary(RepBuf_1, Stream_chunk_size),
- do_interim_reply(StreamTo, Response_format, ReqId, Stream_chunk),
- accumulate_response(
- Rem_data,
- State#state{
- reply_buffer = <<>>,
- streamed_size = Streamed_size + Stream_chunk_size});
- _ ->
- State#state{reply_buffer = RepBuf_1}
+ undefined ->
+ State#state{reply_buffer = RepBuf_1};
+ _ when Caller_controls_socket == true ->
+ do_interim_reply(StreamTo, Response_format, ReqId, RepBuf_1),
+ State#state{reply_buffer = <<>>,
+ streamed_size = Streamed_size + size(RepBuf_1)};
+ _ when New_data_size >= Stream_chunk_size ->
+ {Stream_chunk, Rem_data} = split_binary(RepBuf_1, Stream_chunk_size),
+ do_interim_reply(StreamTo, Response_format, ReqId, Stream_chunk),
+ accumulate_response(
+ Rem_data,
+ State#state{
+ reply_buffer = <<>>,
+ streamed_size = Streamed_size + Stream_chunk_size});
+ _ ->
+ State#state{reply_buffer = RepBuf_1}
end.
-make_tmp_filename() ->
+make_tmp_filename(true) ->
DownloadDir = ibrowse:get_config_value(download_dir, filename:absname("./")),
{A,B,C} = now(),
filename:join([DownloadDir,
- "ibrowse_tmp_file_"++
- integer_to_list(A) ++
- integer_to_list(B) ++
- integer_to_list(C)]).
+ "ibrowse_tmp_file_"++
+ integer_to_list(A) ++
+ integer_to_list(B) ++
+ integer_to_list(C)]);
+make_tmp_filename(File) when is_list(File) ->
+ File.
%%--------------------------------------------------------------------
%% Handles the case when the server closes the socket
%%--------------------------------------------------------------------
-handle_sock_closed(#state{status=get_header}=State) ->
+handle_sock_closed(#state{status=get_header} = State) ->
shutting_down(State),
do_error_reply(State, connection_closed);
@@ -397,40 +395,73 @@ handle_sock_closed(#state{cur_req=undefined} = State) ->
%% Connection-Close header and has closed the socket to indicate end
%% of response. There maybe requests pipelined which need a response.
handle_sock_closed(#state{reply_buffer = Buf, reqs = Reqs, http_status_code = SC,
- is_closing = IsClosing,
- cur_req = #request{tmp_file_name=TmpFilename,
- tmp_file_fd=Fd} = CurReq,
- status = get_body, recvd_headers = Headers}=State) ->
+ is_closing = IsClosing,
+ cur_req = #request{tmp_file_name=TmpFilename,
+ tmp_file_fd=Fd} = CurReq,
+ status = get_body,
+ recvd_headers = Headers,
+ status_line = Status_line,
+ raw_headers = Raw_headers
+ }=State) ->
#request{from=From, stream_to=StreamTo, req_id=ReqId,
- response_format = Resp_format} = CurReq,
+ response_format = Resp_format,
+ options = Options} = CurReq,
case IsClosing of
- true ->
- {_, Reqs_1} = queue:out(Reqs),
- case TmpFilename of
- undefined ->
- do_reply(State, From, StreamTo, ReqId, Resp_format,
- {ok, SC, Headers, Buf});
- _ ->
- file:close(Fd),
- do_reply(State, From, StreamTo, ReqId, Resp_format,
- {ok, SC, Headers, {file, TmpFilename}})
- end,
- do_error_reply(State#state{reqs = Reqs_1}, connection_closed),
- State;
- _ ->
- do_error_reply(State, connection_closed),
- State
+ true ->
+ {_, Reqs_1} = queue:out(Reqs),
+ Body = case TmpFilename of
+ undefined ->
+ Buf;
+ _ ->
+ file:close(Fd),
+ {file, TmpFilename}
+ end,
+ Reply = case get_value(give_raw_headers, Options, false) of
+ true ->
+ {ok, Status_line, Raw_headers, Body};
+ false ->
+ {ok, SC, Headers, Buf}
+ end,
+ do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+ do_error_reply(State#state{reqs = Reqs_1}, connection_closed),
+ State;
+ _ ->
+ do_error_reply(State, connection_closed),
+ State
end.
-do_connect(Host, Port, _Options, #state{is_ssl=true, ssl_options=SSLOptions}, Timeout) ->
+do_connect(Host, Port, Options, #state{is_ssl = true,
+ use_proxy = false,
+ ssl_options = SSLOptions},
+ Timeout) ->
+ Caller_socket_options = get_value(socket_options, Options, []),
+ Other_sock_options = filter_sock_options(SSLOptions ++ Caller_socket_options),
ssl:connect(Host, Port,
- [binary, {nodelay, true}, {active, false} | SSLOptions],
- Timeout);
-do_connect(Host, Port, _Options, _State, Timeout) ->
- gen_tcp:connect(Host, Port,
- [binary, {nodelay, true}, {active, false}],
- Timeout).
-
+ [binary, {nodelay, true}, {active, false} | Other_sock_options],
+ Timeout);
+do_connect(Host, Port, Options, _State, Timeout) ->
+ Caller_socket_options = get_value(socket_options, Options, []),
+ Other_sock_options = filter_sock_options(Caller_socket_options),
+ gen_tcp:connect(Host, to_integer(Port),
+ [binary, {nodelay, true}, {active, false} | Other_sock_options],
+ Timeout).
+
+%% We don't want the caller to specify certain options
+filter_sock_options(Opts) ->
+ lists:filter(fun({active, _}) ->
+ false;
+ ({packet, _}) ->
+ false;
+ (list) ->
+ false;
+ (_) ->
+ true
+ end, Opts).
+
+do_send(Req, #state{socket = Sock,
+ is_ssl = true,
+ use_proxy = true,
+ proxy_tunnel_setup = Pts}) when Pts /= done -> gen_tcp:send(Sock, Req);
do_send(Req, #state{socket = Sock, is_ssl = true}) -> ssl:send(Sock, Req);
do_send(Req, #state{socket = Sock, is_ssl = false}) -> gen_tcp:send(Sock, Req).
@@ -450,261 +481,328 @@ do_send_body(Body, State) ->
do_send_body1(Source, Resp, State) ->
case Resp of
- {ok, Data} ->
- do_send(Data, State),
- do_send_body({Source}, State);
- {ok, Data, New_source_state} ->
- do_send(Data, State),
- do_send_body({Source, New_source_state}, State);
- eof ->
- ok;
- Err ->
- Err
+ {ok, Data} ->
+ do_send(Data, State),
+ do_send_body({Source}, State);
+ {ok, Data, New_source_state} ->
+ do_send(Data, State),
+ do_send_body({Source, New_source_state}, State);
+ eof ->
+ ok;
+ Err ->
+ Err
end.
do_close(#state{socket = undefined}) -> ok;
+do_close(#state{socket = Sock,
+ is_ssl = true,
+ use_proxy = true,
+ proxy_tunnel_setup = Pts
+ }) when Pts /= done -> gen_tcp:close(Sock);
do_close(#state{socket = Sock, is_ssl = true}) -> ssl:close(Sock);
do_close(#state{socket = Sock, is_ssl = false}) -> gen_tcp:close(Sock).
active_once(#state{cur_req = #request{caller_controls_socket = true}}) ->
ok;
-active_once(#state{socket = Socket, is_ssl = Is_ssl}) ->
- do_setopts(Socket, [{active, once}], Is_ssl).
+active_once(#state{socket = Socket} = State) ->
+ do_setopts(Socket, [{active, once}], State).
-do_setopts(Sock, Opts, true) -> ssl:setopts(Sock, Opts);
-do_setopts(Sock, Opts, false) -> inet:setopts(Sock, Opts).
+do_setopts(_Sock, [], _) -> ok;
+do_setopts(Sock, Opts, #state{is_ssl = true,
+ use_proxy = true,
+ proxy_tunnel_setup = Pts}
+ ) when Pts /= done -> inet:setopts(Sock, Opts);
+do_setopts(Sock, Opts, #state{is_ssl = true}) -> ssl:setopts(Sock, Opts);
+do_setopts(Sock, Opts, _) -> inet:setopts(Sock, Opts).
check_ssl_options(Options, State) ->
case get_value(is_ssl, Options, false) of
- false ->
- State;
- true ->
- State#state{is_ssl=true, ssl_options=get_value(ssl_options, Options)}
+ false ->
+ State;
+ true ->
+ State#state{is_ssl=true, ssl_options=get_value(ssl_options, Options)}
end.
send_req_1(From,
- #url{host = Host,
- port = Port} = Url,
- Headers, Method, Body, Options, Timeout,
- #state{socket = undefined} = State) ->
+ #url{host = Host,
+ port = Port} = Url,
+ Headers, Method, Body, Options, Timeout,
+ #state{socket = undefined} = State) ->
{Host_1, Port_1, State_1} =
- case get_value(proxy_host, Options, false) of
- false ->
- {Host, Port, State};
- PHost ->
- ProxyUser = get_value(proxy_user, Options, []),
- ProxyPassword = get_value(proxy_password, Options, []),
- Digest = http_auth_digest(ProxyUser, ProxyPassword),
- {PHost, get_value(proxy_port, Options, 80),
- State#state{use_proxy = true,
- proxy_auth_digest = Digest}}
- end,
+ case get_value(proxy_host, Options, false) of
+ false ->
+ {Host, Port, State};
+ PHost ->
+ ProxyUser = get_value(proxy_user, Options, []),
+ ProxyPassword = get_value(proxy_password, Options, []),
+ Digest = http_auth_digest(ProxyUser, ProxyPassword),
+ {PHost, get_value(proxy_port, Options, 80),
+ State#state{use_proxy = true,
+ proxy_auth_digest = Digest}}
+ end,
State_2 = check_ssl_options(Options, State_1),
do_trace("Connecting...~n", []),
Start_ts = now(),
Conn_timeout = get_value(connect_timeout, Options, Timeout),
case do_connect(Host_1, Port_1, Options, State_2, Conn_timeout) of
- {ok, Sock} ->
- do_trace("Connected!~n", []),
- End_ts = now(),
- Timeout_1 = case Timeout of
- infinity ->
- infinity;
- _ ->
- Timeout - trunc(round(timer:now_diff(End_ts, Start_ts) / 1000))
- end,
- State_3 = State_2#state{socket = Sock},
- send_req_1(From, Url, Headers, Method, Body, Options, Timeout_1, State_3);
- Err ->
- shutting_down(State_2),
- do_trace("Error connecting. Reason: ~1000.p~n", [Err]),
- gen_server:reply(From, {error, conn_failed}),
- {stop, normal, State_2}
+ {ok, Sock} ->
+ do_trace("Connected!~n", []),
+ End_ts = now(),
+ Timeout_1 = case Timeout of
+ infinity ->
+ infinity;
+ _ ->
+ Timeout - trunc(round(timer:now_diff(End_ts, Start_ts) / 1000))
+ end,
+ State_3 = State_2#state{socket = Sock,
+ connect_timeout = Conn_timeout},
+ send_req_1(From, Url, Headers, Method, Body, Options, Timeout_1, State_3);
+ Err ->
+ shutting_down(State_2),
+ do_trace("Error connecting. Reason: ~1000.p~n", [Err]),
+ gen_server:reply(From, {error, conn_failed}),
+ {stop, normal, State_2}
end;
+
+%% Send a CONNECT request.
+%% Wait for 200 OK
+%% Upgrade to SSL connection
+%% Then send request
+
send_req_1(From,
- #url{abspath = AbsPath,
- host = Host,
- port = Port,
- path = RelPath} = Url,
- Headers, Method, Body, Options, Timeout,
- #state{status = Status} = State) ->
+ #url{
+ host = Server_host,
+ port = Server_port
+ } = Url,
+ Headers, Method, Body, Options, Timeout,
+ #state{
+ proxy_tunnel_setup = false,
+ use_proxy = true,
+ is_ssl = true} = State) ->
+ NewReq = #request{
+ method = connect,
+ options = Options
+ },
+ State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
+ Pxy_auth_headers = maybe_modify_headers(Url, Method, Options, [], State_1),
+ Path = [Server_host, $:, integer_to_list(Server_port)],
+ {Req, Body_1} = make_request(connect, Pxy_auth_headers,
+ Path, Path,
+ [], Options, State_1),
+ trace_request(Req),
+ case do_send(Req, State) of
+ ok ->
+ case do_send_body(Body_1, State_1) of
+ ok ->
+ active_once(State_1),
+ Ref = case Timeout of
+ infinity ->
+ undefined;
+ _ ->
+ erlang:send_after(Timeout, self(), {req_timedout, From})
+ end,
+ State_2 = State_1#state{status = get_header,
+ cur_req = NewReq,
+ send_timer = Ref,
+ proxy_tunnel_setup = in_progress,
+ tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout}]},
+ set_inac_timer(State_1),
+ {noreply, State_2};
+ Err ->
+ shutting_down(State_1),
+ do_trace("Send failed... Reason: ~p~n", [Err]),
+ gen_server:reply(From, {error, send_failed}),
+ {stop, normal, State_1}
+ end;
+ Err ->
+ shutting_down(State_1),
+ do_trace("Send failed... Reason: ~p~n", [Err]),
+ gen_server:reply(From, {error, send_failed}),
+ {stop, normal, State_1}
+ end;
+
+send_req_1(From, Url, Headers, Method, Body, Options, Timeout,
+ #state{proxy_tunnel_setup = in_progress,
+ tunnel_setup_queue = Q} = State) ->
+ do_trace("Queued SSL request awaiting tunnel setup: ~n"
+ "URL : ~s~n"
+ "Method : ~p~n"
+ "Headers : ~p~n", [Url, Method, Headers]),
+ {noreply, State#state{tunnel_setup_queue = [{From, Url, Headers, Method, Body, Options, Timeout} | Q]}};
+
+send_req_1(From,
+ #url{abspath = AbsPath,
+ path = RelPath} = Url,
+ Headers, Method, Body, Options, Timeout,
+ #state{status = Status,
+ socket = Socket,
+ is_ssl = Is_ssl} = State) ->
ReqId = make_req_id(),
Resp_format = get_value(response_format, Options, list),
+ Caller_socket_options = get_value(socket_options, Options, []),
{StreamTo, Caller_controls_socket} =
- case get_value(stream_to, Options, undefined) of
- {Caller, once} when is_pid(Caller) or
- is_atom(Caller) ->
- Async_pid_rec = {{req_id_pid, ReqId}, self()},
- true = ets:insert(ibrowse_stream, Async_pid_rec),
- {Caller, true};
- undefined ->
- {undefined, false};
- Caller when is_pid(Caller) or
- is_atom(Caller) ->
- {Caller, false};
- Stream_to_inv ->
- exit({invalid_option, {stream_to, Stream_to_inv}})
- end,
+ case get_value(stream_to, Options, undefined) of
+ {Caller, once} when is_pid(Caller) or
+ is_atom(Caller) ->
+ Async_pid_rec = {{req_id_pid, ReqId}, self()},
+ true = ets:insert(ibrowse_stream, Async_pid_rec),
+ {Caller, true};
+ undefined ->
+ {undefined, false};
+ Caller when is_pid(Caller) or
+ is_atom(Caller) ->
+ {Caller, false};
+ Stream_to_inv ->
+ exit({invalid_option, {stream_to, Stream_to_inv}})
+ end,
SaveResponseToFile = get_value(save_response_to_file, Options, false),
NewReq = #request{url = Url,
- method = Method,
- stream_to = StreamTo,
- caller_controls_socket = Caller_controls_socket,
- options = Options,
- req_id = ReqId,
- save_response_to_file = SaveResponseToFile,
- stream_chunk_size = get_stream_chunk_size(Options),
- response_format = Resp_format,
- from = From},
+ method = Method,
+ stream_to = StreamTo,
+ caller_controls_socket = Caller_controls_socket,
+ caller_socket_options = Caller_socket_options,
+ options = Options,
+ req_id = ReqId,
+ save_response_to_file = SaveResponseToFile,
+ stream_chunk_size = get_stream_chunk_size(Options),
+ response_format = Resp_format,
+ from = From},
State_1 = State#state{reqs=queue:in(NewReq, State#state.reqs)},
- Headers_1 = add_auth_headers(Url, Options, Headers, State),
- HostHeaderValue = case lists:keysearch(host_header, 1, Options) of
- false ->
- case Port of
- 80 -> Host;
- _ -> [Host, ":", integer_to_list(Port)]
- end;
- {value, {_, Host_h_val}} ->
- Host_h_val
- end,
+ Headers_1 = maybe_modify_headers(Url, Method, Options, Headers, State_1),
{Req, Body_1} = make_request(Method,
- [{"Host", HostHeaderValue} | Headers_1],
- AbsPath, RelPath, Body, Options, State#state.use_proxy),
- case get(my_trace_flag) of
- true ->
- %%Avoid the binary operations if trace is not on...
- NReq = binary_to_list(list_to_binary(Req)),
- do_trace("Sending request: ~n"
- "--- Request Begin ---~n~s~n"
- "--- Request End ---~n", [NReq]);
- _ -> ok
- end,
- case do_send(Req, State) of
- ok ->
- case do_send_body(Body_1, State) of
- ok ->
- State_2 = inc_pipeline_counter(State_1),
- active_once(State_1),
- Ref = case Timeout of
- infinity ->
- undefined;
- _ ->
- erlang:send_after(Timeout, self(), {req_timedout, From})
- end,
- State_3 = case Status of
- idle ->
- State_2#state{status = get_header,
- cur_req = NewReq,
- send_timer = Ref};
- _ ->
- State_2#state{send_timer = Ref}
- end,
- case StreamTo of
- undefined ->
- ok;
- _ ->
- gen_server:reply(From, {ibrowse_req_id, ReqId})
- end,
- {noreply, State_3, get_inac_timeout(State_3)};
- Err ->
- shutting_down(State_1),
- do_trace("Send failed... Reason: ~p~n", [Err]),
- gen_server:reply(From, {error, send_failed}),
- {stop, normal, State_1}
- end;
- Err ->
- shutting_down(State_1),
- do_trace("Send failed... Reason: ~p~n", [Err]),
- gen_server:reply(From, {error, send_failed}),
- {stop, normal, State_1}
+ Headers_1,
+ AbsPath, RelPath, Body, Options, State_1),
+ trace_request(Req),
+ do_setopts(Socket, Caller_socket_options, Is_ssl),
+ case do_send(Req, State_1) of
+ ok ->
+ case do_send_body(Body_1, State_1) of
+ ok ->
+ State_2 = inc_pipeline_counter(State_1),
+ active_once(State_2),
+ Ref = case Timeout of
+ infinity ->
+ undefined;
+ _ ->
+ erlang:send_after(Timeout, self(), {req_timedout, From})
+ end,
+ State_3 = case Status of
+ idle ->
+ State_2#state{status = get_header,
+ cur_req = NewReq,
+ send_timer = Ref};
+ _ ->
+ State_2#state{send_timer = Ref}
+ end,
+ case StreamTo of
+ undefined ->
+ ok;
+ _ ->
+ gen_server:reply(From, {ibrowse_req_id, ReqId})
+ end,
+ set_inac_timer(State_1),
+ {noreply, State_3};
+ Err ->
+ shutting_down(State_1),
+ do_trace("Send failed... Reason: ~p~n", [Err]),
+ gen_server:reply(From, {error, send_failed}),
+ {stop, normal, State_1}
+ end;
+ Err ->
+ shutting_down(State_1),
+ do_trace("Send failed... Reason: ~p~n", [Err]),
+ gen_server:reply(From, {error, send_failed}),
+ {stop, normal, State_1}
+ end.
+
+maybe_modify_headers(#url{}, connect, _, Headers, State) ->
+ add_proxy_auth_headers(State, Headers);
+maybe_modify_headers(#url{host = Host, port = Port} = Url,
+ _Method,
+ Options, Headers, State) ->
+ case get_value(headers_as_is, Options, false) of
+ false ->
+ Headers_1 = add_auth_headers(Url, Options, Headers, State),
+ HostHeaderValue = case lists:keysearch(host_header, 1, Options) of
+ false ->
+ case Port of
+ 80 -> Host;
+ _ -> [Host, ":", integer_to_list(Port)]
+ end;
+ {value, {_, Host_h_val}} ->
+ Host_h_val
+ end,
+ [{"Host", HostHeaderValue} | Headers_1];
+ true ->
+ Headers
end.
add_auth_headers(#url{username = User,
- password = UPw},
- Options,
- Headers,
- #state{use_proxy = UseProxy,
- proxy_auth_digest = ProxyAuthDigest}) ->
+ password = UPw},
+ Options,
+ Headers,
+ State) ->
Headers_1 = case User of
- undefined ->
- case get_value(basic_auth, Options, undefined) of
- undefined ->
- Headers;
- {U,P} ->
- [{"Authorization", ["Basic ", http_auth_digest(U, P)]} | Headers]
- end;
- _ ->
- [{"Authorization", ["Basic ", http_auth_digest(User, UPw)]} | Headers]
- end,
- case UseProxy of
- false ->
- Headers_1;
- true when ProxyAuthDigest == [] ->
- Headers_1;
- true ->
- [{"Proxy-Authorization", ["Basic ", ProxyAuthDigest]} | Headers_1]
- end.
+ undefined ->
+ case get_value(basic_auth, Options, undefined) of
+ undefined ->
+ Headers;
+ {U,P} ->
+ [{"Authorization", ["Basic ", http_auth_digest(U, P)]} | Headers]
+ end;
+ _ ->
+ [{"Authorization", ["Basic ", http_auth_digest(User, UPw)]} | Headers]
+ end,
+ add_proxy_auth_headers(State, Headers_1).
+
+add_proxy_auth_headers(#state{use_proxy = false}, Headers) ->
+ Headers;
+add_proxy_auth_headers(#state{proxy_auth_digest = []}, Headers) ->
+ Headers;
+add_proxy_auth_headers(#state{proxy_auth_digest = Auth_digest}, Headers) ->
+ [{"Proxy-Authorization", ["Basic ", Auth_digest]} | Headers].
http_auth_digest([], []) ->
[];
http_auth_digest(Username, Password) ->
- encode_base64(Username ++ [$: | Password]).
+ ibrowse_lib:encode_base64(Username ++ [$: | Password]).
-encode_base64([]) ->
- [];
-encode_base64([A]) ->
- [e(A bsr 2), e((A band 3) bsl 4), $=, $=];
-encode_base64([A,B]) ->
- [e(A bsr 2), e(((A band 3) bsl 4) bor (B bsr 4)), e((B band 15) bsl 2), $=];
-encode_base64([A,B,C|Ls]) ->
- encode_base64_do(A,B,C, Ls).
-encode_base64_do(A,B,C, Rest) ->
- BB = (A bsl 16) bor (B bsl 8) bor C,
- [e(BB bsr 18), e((BB bsr 12) band 63),
- e((BB bsr 6) band 63), e(BB band 63)|encode_base64(Rest)].
-
-e(X) when X >= 0, X < 26 -> X+65;
-e(X) when X>25, X<52 -> X+71;
-e(X) when X>51, X<62 -> X-4;
-e(62) -> $+;
-e(63) -> $/;
-e(X) -> exit({bad_encode_base64_token, X}).
-
-make_request(Method, Headers, AbsPath, RelPath, Body, Options, UseProxy) ->
+make_request(Method, Headers, AbsPath, RelPath, Body, Options,
+ #state{use_proxy = UseProxy}) ->
HttpVsn = http_vsn_string(get_value(http_vsn, Options, {1,1})),
Headers_1 =
- case get_value(content_length, Headers, false) of
- false when (Body == []) or
- (Body == <<>>) or
- is_tuple(Body) or
- is_function(Body) ->
- Headers;
- false when is_binary(Body) ->
- [{"content-length", integer_to_list(size(Body))} | Headers];
- false ->
- [{"content-length", integer_to_list(length(Body))} | Headers];
- _ ->
- Headers
- end,
+ case get_value(content_length, Headers, false) of
+ false when (Body == []) or
+ (Body == <<>>) or
+ is_tuple(Body) or
+ is_function(Body) ->
+ Headers;
+ false when is_binary(Body) ->
+ [{"content-length", integer_to_list(size(Body))} | Headers];
+ false ->
+ [{"content-length", integer_to_list(length(Body))} | Headers];
+ _ ->
+ Headers
+ end,
{Headers_2, Body_1} =
- case get_value(transfer_encoding, Options, false) of
- false ->
- {Headers_1, Body};
- {chunked, ChunkSize} ->
- {[{X, Y} || {X, Y} <- Headers_1,
- X /= "Content-Length",
- X /= "content-length",
- X /= content_length] ++
- [{"Transfer-Encoding", "chunked"}],
- chunk_request_body(Body, ChunkSize)}
- end,
+ case get_value(transfer_encoding, Options, false) of
+ false ->
+ {Headers_1, Body};
+ {chunked, ChunkSize} ->
+ {[{X, Y} || {X, Y} <- Headers_1,
+ X /= "Content-Length",
+ X /= "content-length",
+ X /= content_length] ++
+ [{"Transfer-Encoding", "chunked"}],
+ chunk_request_body(Body, ChunkSize)}
+ end,
Headers_3 = cons_headers(Headers_2),
Uri = case get_value(use_absolute_uri, Options, false) or UseProxy of
- true ->
- AbsPath;
- false ->
- RelPath
- end,
+ true ->
+ AbsPath;
+ false ->
+ RelPath
+ end,
{[method(Method), " ", Uri, " ", HttpVsn, crnl(), Headers_3, crnl()], Body_1}.
http_vsn_string({0,9}) -> "HTTP/0.9";
@@ -717,7 +815,7 @@ cons_headers([], Acc) ->
encode_headers(Acc);
cons_headers([{basic_auth, {U,P}} | T], Acc) ->
cons_headers(T, [{"Authorization",
- ["Basic ", ibrowse_lib:encode_base64(U++":"++P)]} | Acc]);
+ ["Basic ", ibrowse_lib:encode_base64(U++":"++P)]} | Acc]);
cons_headers([{cookie, Cookie} | T], Acc) ->
cons_headers(T, [{"Cookie", Cookie} | Acc]);
cons_headers([{content_length, L} | T], Acc) ->
@@ -748,24 +846,23 @@ chunk_request_body(Body, ChunkSize, Acc) when is_binary(Body),
size(Body) >= ChunkSize ->
<<ChunkBody:ChunkSize/binary, Rest/binary>> = Body,
Chunk = [ibrowse_lib:dec2hex(4, ChunkSize),"\r\n",
- ChunkBody, "\r\n"],
+ ChunkBody, "\r\n"],
chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
chunk_request_body(Body, _ChunkSize, Acc) when is_binary(Body) ->
BodySize = size(Body),
Chunk = [ibrowse_lib:dec2hex(4, BodySize),"\r\n",
- Body, "\r\n"],
+ Body, "\r\n"],
LastChunk = "0\r\n",
lists:reverse(["\r\n", LastChunk, Chunk | Acc]);
-chunk_request_body(Body, ChunkSize, Acc) when is_list(Body),
- length(Body) >= ChunkSize ->
+chunk_request_body(Body, ChunkSize, Acc) when length(Body) >= ChunkSize ->
{ChunkBody, Rest} = split_list_at(Body, ChunkSize),
Chunk = [ibrowse_lib:dec2hex(4, ChunkSize),"\r\n",
- ChunkBody, "\r\n"],
+ ChunkBody, "\r\n"],
chunk_request_body(Rest, ChunkSize, [Chunk | Acc]);
chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) ->
BodySize = length(Body),
Chunk = [ibrowse_lib:dec2hex(4, BodySize),"\r\n",
- Body, "\r\n"],
+ Body, "\r\n"],
LastChunk = "0\r\n",
lists:reverse(["\r\n", LastChunk, Chunk | Acc]).
@@ -773,114 +870,172 @@ chunk_request_body(Body, _ChunkSize, Acc) when is_list(Body) ->
parse_response(_Data, #state{cur_req = undefined}=State) ->
State#state{status = idle};
parse_response(Data, #state{reply_buffer = Acc, reqs = Reqs,
- cur_req = CurReq} = State) ->
+ cur_req = CurReq} = State) ->
#request{from=From, stream_to=StreamTo, req_id=ReqId,
- method=Method, response_format = Resp_format} = CurReq,
+ method=Method, response_format = Resp_format,
+ options = Options
+ } = CurReq,
MaxHeaderSize = ibrowse:get_config_value(max_headers_size, infinity),
case scan_header(Acc, Data) of
- {yes, Headers, Data_1} ->
- do_trace("Recvd Header Data -> ~s~n----~n", [Headers]),
- do_trace("Recvd headers~n--- Headers Begin ---~n~s~n--- Headers End ---~n~n", [Headers]),
- {HttpVsn, StatCode, Headers_1} = parse_headers(Headers),
- do_trace("HttpVsn: ~p StatusCode: ~p Headers_1 -> ~1000.p~n", [HttpVsn, StatCode, Headers_1]),
- LCHeaders = [{to_lower(X), Y} || {X,Y} <- Headers_1],
- ConnClose = to_lower(get_value("connection", LCHeaders, "false")),
- IsClosing = is_connection_closing(HttpVsn, ConnClose),
- case IsClosing of
- true ->
+ {yes, Headers, Data_1} ->
+ do_trace("Recvd Header Data -> ~s~n----~n", [Headers]),
+ do_trace("Recvd headers~n--- Headers Begin ---~n~s~n--- Headers End ---~n~n", [Headers]),
+ {HttpVsn, StatCode, Headers_1, Status_line, Raw_headers} = parse_headers(Headers),
+ do_trace("HttpVsn: ~p StatusCode: ~p Headers_1 -> ~1000.p~n", [HttpVsn, StatCode, Headers_1]),
+ LCHeaders = [{to_lower(X), Y} || {X,Y} <- Headers_1],
+ ConnClose = to_lower(get_value("connection", LCHeaders, "false")),
+ IsClosing = is_connection_closing(HttpVsn, ConnClose),
+ case IsClosing of
+ true ->
shutting_down(State);
- false ->
- ok
- end,
- State_1 = State#state{recvd_headers=Headers_1, status=get_body,
- reply_buffer = <<>>,
- http_status_code=StatCode, is_closing=IsClosing},
- put(conn_close, ConnClose),
- TransferEncoding = to_lower(get_value("transfer-encoding", LCHeaders, "false")),
- case get_value("content-length", LCHeaders, undefined) of
- _ when Method == head ->
- {_, Reqs_1} = queue:out(Reqs),
- send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
- State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
- {ok, StatCode, Headers_1, []}),
- cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
- State_2 = reset_state(State_1_1),
- State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
- parse_response(Data_1, State_3);
- _ when hd(StatCode) == $1 ->
- %% No message body is expected. Server may send
- %% one or more 1XX responses before a proper
- %% response.
- send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
- do_trace("Recvd a status code of ~p. Ignoring and waiting for a proper response~n", [StatCode]),
- parse_response(Data_1, State_1#state{recvd_headers = [],
- status = get_header});
- _ when StatCode == "204";
- StatCode == "304" ->
- %% No message body is expected for these Status Codes.
- %% RFC2616 - Sec 4.4
- {_, Reqs_1} = queue:out(Reqs),
- send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
- State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
- {ok, StatCode, Headers_1, []}),
- cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
- State_2 = reset_state(State_1_1),
- State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
- parse_response(Data_1, State_3);
- _ when TransferEncoding == "chunked" ->
- do_trace("Chunked encoding detected...~n",[]),
- send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
- case parse_11_response(Data_1, State_1#state{transfer_encoding=chunked,
- chunk_size=chunk_start,
- reply_buffer = <<>>}) of
- {error, Reason} ->
- fail_pipelined_requests(State_1,
- {error, {Reason,
- {stat_code, StatCode}, Headers_1}}),
- {error, Reason};
- State_2 ->
- State_2
- end;
- undefined when HttpVsn == "HTTP/1.0";
- ConnClose == "close" ->
- send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
- State_1#state{reply_buffer = Data_1};
- undefined ->
- fail_pipelined_requests(State_1,
- {error, {content_length_undefined,
- {stat_code, StatCode}, Headers}}),
- {error, content_length_undefined};
- V ->
- case catch list_to_integer(V) of
- V_1 when is_integer(V_1), V_1 >= 0 ->
- send_async_headers(ReqId, StreamTo, StatCode, Headers_1),
- do_trace("Recvd Content-Length of ~p~n", [V_1]),
- State_2 = State_1#state{rep_buf_size=0,
- reply_buffer = <<>>,
- content_length=V_1},
- case parse_11_response(Data_1, State_2) of
- {error, Reason} ->
- fail_pipelined_requests(State_1,
- {error, {Reason,
- {stat_code, StatCode}, Headers_1}}),
- {error, Reason};
- State_3 ->
- State_3
- end;
- _ ->
- fail_pipelined_requests(State_1,
- {error, {content_length_undefined,
- {stat_code, StatCode}, Headers}}),
- {error, content_length_undefined}
- end
- end;
- {no, Acc_1} when MaxHeaderSize == infinity ->
- State#state{reply_buffer = Acc_1};
- {no, Acc_1} when size(Acc_1) < MaxHeaderSize ->
- State#state{reply_buffer = Acc_1};
- {no, _Acc_1} ->
- fail_pipelined_requests(State, {error, max_headers_size_exceeded}),
- {error, max_headers_size_exceeded}
+ false ->
+ ok
+ end,
+ Give_raw_headers = get_value(give_raw_headers, Options, false),
+ State_1 = case Give_raw_headers of
+ true ->
+ State#state{recvd_headers=Headers_1, status=get_body,
+ reply_buffer = <<>>,
+ status_line = Status_line,
+ raw_headers = Raw_headers,
+ http_status_code=StatCode, is_closing=IsClosing};
+ false ->
+ State#state{recvd_headers=Headers_1, status=get_body,
+ reply_buffer = <<>>,
+ http_status_code=StatCode, is_closing=IsClosing}
+ end,
+ put(conn_close, ConnClose),
+ TransferEncoding = to_lower(get_value("transfer-encoding", LCHeaders, "false")),
+ case get_value("content-length", LCHeaders, undefined) of
+ _ when Method == connect,
+ hd(StatCode) == $2 ->
+ cancel_timer(State#state.send_timer),
+ {_, Reqs_1} = queue:out(Reqs),
+ upgrade_to_ssl(set_cur_request(State#state{reqs = Reqs_1,
+ recvd_headers = [],
+ status = idle
+ }));
+ _ when Method == connect ->
+ {_, Reqs_1} = queue:out(Reqs),
+ do_error_reply(State#state{reqs = Reqs_1},
+ {error, proxy_tunnel_failed}),
+ {error, proxy_tunnel_failed};
+ _ when Method == head ->
+ {_, Reqs_1} = queue:out(Reqs),
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
+ {ok, StatCode, Headers_1, []}),
+ cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
+ State_2 = reset_state(State_1_1),
+ State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
+ parse_response(Data_1, State_3);
+ _ when hd(StatCode) =:= $1 ->
+ %% No message body is expected. Server may send
+ %% one or more 1XX responses before a proper
+ %% response.
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ do_trace("Recvd a status code of ~p. Ignoring and waiting for a proper response~n", [StatCode]),
+ parse_response(Data_1, State_1#state{recvd_headers = [],
+ status = get_header});
+ _ when StatCode =:= "204";
+ StatCode =:= "304" ->
+ %% No message body is expected for these Status Codes.
+ %% RFC2616 - Sec 4.4
+ {_, Reqs_1} = queue:out(Reqs),
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ State_1_1 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
+ {ok, StatCode, Headers_1, []}),
+ cancel_timer(State_1_1#state.send_timer, {eat_message, {req_timedout, From}}),
+ State_2 = reset_state(State_1_1),
+ State_3 = set_cur_request(State_2#state{reqs = Reqs_1}),
+ parse_response(Data_1, State_3);
+ _ when TransferEncoding =:= "chunked" ->
+ do_trace("Chunked encoding detected...~n",[]),
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ case parse_11_response(Data_1, State_1#state{transfer_encoding=chunked,
+ chunk_size=chunk_start,
+ reply_buffer = <<>>}) of
+ {error, Reason} ->
+ fail_pipelined_requests(State_1,
+ {error, {Reason,
+ {stat_code, StatCode}, Headers_1}}),
+ {error, Reason};
+ State_2 ->
+ State_2
+ end;
+ undefined when HttpVsn =:= "HTTP/1.0";
+ ConnClose =:= "close" ->
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ State_1#state{reply_buffer = Data_1};
+ undefined ->
+ fail_pipelined_requests(State_1,
+ {error, {content_length_undefined,
+ {stat_code, StatCode}, Headers}}),
+ {error, content_length_undefined};
+ V ->
+ case catch list_to_integer(V) of
+ V_1 when is_integer(V_1), V_1 >= 0 ->
+ send_async_headers(ReqId, StreamTo, Give_raw_headers, State_1),
+ do_trace("Recvd Content-Length of ~p~n", [V_1]),
+ State_2 = State_1#state{rep_buf_size=0,
+ reply_buffer = <<>>,
+ content_length=V_1},
+ case parse_11_response(Data_1, State_2) of
+ {error, Reason} ->
+ fail_pipelined_requests(State_1,
+ {error, {Reason,
+ {stat_code, StatCode}, Headers_1}}),
+ {error, Reason};
+ State_3 ->
+ State_3
+ end;
+ _ ->
+ fail_pipelined_requests(State_1,
+ {error, {content_length_undefined,
+ {stat_code, StatCode}, Headers}}),
+ {error, content_length_undefined}
+ end
+ end;
+ {no, Acc_1} when MaxHeaderSize == infinity ->
+ State#state{reply_buffer = Acc_1};
+ {no, Acc_1} when size(Acc_1) < MaxHeaderSize ->
+ State#state{reply_buffer = Acc_1};
+ {no, _Acc_1} ->
+ fail_pipelined_requests(State, {error, max_headers_size_exceeded}),
+ {error, max_headers_size_exceeded}
+ end.
+
+upgrade_to_ssl(#state{socket = Socket,
+ connect_timeout = Conn_timeout,
+ ssl_options = Ssl_options,
+ tunnel_setup_queue = Q} = State) ->
+ case ssl:connect(Socket, Ssl_options, Conn_timeout) of
+ {ok, Ssl_socket} ->
+ do_trace("Upgraded to SSL socket!!~n", []),
+ State_1 = State#state{socket = Ssl_socket,
+ proxy_tunnel_setup = done},
+ send_queued_requests(lists:reverse(Q), State_1);
+ Err ->
+ do_trace("Upgrade to SSL socket failed. Reson: ~p~n", [Err]),
+ do_error_reply(State, {error, send_failed}),
+ {error, send_failed}
+ end.
+
+send_queued_requests([], State) ->
+ do_trace("Sent all queued requests via SSL connection~n", []),
+ State#state{tunnel_setup_queue = done};
+send_queued_requests([{From, Url, Headers, Method, Body, Options, Timeout} | Q],
+ State) ->
+ case send_req_1(From, Url, Headers, Method, Body, Options, Timeout, State) of
+ {noreply, State_1} ->
+ send_queued_requests(Q, State_1);
+ _ ->
+ do_trace("Error sending queued SSL request: ~n"
+ "URL : ~s~n"
+ "Method : ~p~n"
+ "Headers : ~p~n", [Url, Method, Headers]),
+ do_error_reply(State, {error, send_failed}),
+ {error, send_failed}
end.
is_connection_closing("HTTP/0.9", _) -> true;
@@ -890,200 +1045,215 @@ is_connection_closing(_, _) -> false.
%% This clause determines the chunk size when given data from the beginning of the chunk
parse_11_response(DataRecvd,
- #state{transfer_encoding = chunked,
- chunk_size = chunk_start,
- chunk_size_buffer = Chunk_sz_buf
- } = State) ->
+ #state{transfer_encoding = chunked,
+ chunk_size = chunk_start,
+ chunk_size_buffer = Chunk_sz_buf
+ } = State) ->
case scan_crlf(Chunk_sz_buf, DataRecvd) of
- {yes, ChunkHeader, Data_1} ->
- case parse_chunk_header(ChunkHeader) of
- {error, Reason} ->
- {error, Reason};
- ChunkSize ->
- %%
- %% Do we have to preserve the chunk encoding when
- %% streaming? NO. This should be transparent to the client
- %% process. Chunked encoding was only introduced to make
- %% it efficient for the server.
- %%
- RemLen = size(Data_1),
- do_trace("Determined chunk size: ~p. Already recvd: ~p~n", [ChunkSize, RemLen]),
- parse_11_response(Data_1, State#state{chunk_size_buffer = <<>>,
- deleted_crlf = true,
- recvd_chunk_size = 0,
- chunk_size = ChunkSize})
- end;
- {no, Data_1} ->
- State#state{chunk_size_buffer = Data_1}
+ {yes, ChunkHeader, Data_1} ->
+ ChunkSize = parse_chunk_header(ChunkHeader),
+ %%
+ %% Do we have to preserve the chunk encoding when
+ %% streaming? NO. This should be transparent to the client
+ %% process. Chunked encoding was only introduced to make
+ %% it efficient for the server.
+ %%
+ RemLen = size(Data_1),
+ do_trace("Determined chunk size: ~p. Already recvd: ~p~n",
+ [ChunkSize, RemLen]),
+ parse_11_response(Data_1, State#state{chunk_size_buffer = <<>>,
+ deleted_crlf = true,
+ recvd_chunk_size = 0,
+ chunk_size = ChunkSize});
+ {no, Data_1} ->
+ State#state{chunk_size_buffer = Data_1}
end;
%% This clause is to remove the CRLF between two chunks
%%
parse_11_response(DataRecvd,
- #state{transfer_encoding = chunked,
- chunk_size = tbd,
- chunk_size_buffer = Buf}=State) ->
+ #state{transfer_encoding = chunked,
+ chunk_size = tbd,
+ chunk_size_buffer = Buf}=State) ->
case scan_crlf(Buf, DataRecvd) of
- {yes, _, NextChunk} ->
- State_1 = State#state{chunk_size = chunk_start,
- chunk_size_buffer = <<>>,
- deleted_crlf = true},
- parse_11_response(NextChunk, State_1);
- {no, Data_1} ->
- State#state{chunk_size_buffer = Data_1}
+ {yes, _, NextChunk} ->
+ State_1 = State#state{chunk_size = chunk_start,
+ chunk_size_buffer = <<>>,
+ deleted_crlf = true},
+ parse_11_response(NextChunk, State_1);
+ {no, Data_1} ->
+ State#state{chunk_size_buffer = Data_1}
end;
%% This clause deals with the end of a chunked transfer. ibrowse does
%% not support Trailers in the Chunked Transfer encoding. Any trailer
%% received is silently discarded.
parse_11_response(DataRecvd,
- #state{transfer_encoding = chunked, chunk_size = 0,
- cur_req = CurReq,
- deleted_crlf = DelCrlf,
- chunk_size_buffer = Trailer, reqs = Reqs}=State) ->
+ #state{transfer_encoding = chunked, chunk_size = 0,
+ cur_req = CurReq,
+ deleted_crlf = DelCrlf,
+ chunk_size_buffer = Trailer, reqs = Reqs}=State) ->
do_trace("Detected end of chunked transfer...~n", []),
DataRecvd_1 = case DelCrlf of
- false ->
- DataRecvd;
- true ->
- <<$\r, $\n, DataRecvd/binary>>
+ false ->
+ DataRecvd;
+ true ->
+ <<$\r, $\n, DataRecvd/binary>>
end,
case scan_header(Trailer, DataRecvd_1) of
- {yes, _TEHeaders, Rem} ->
- {_, Reqs_1} = queue:out(Reqs),
- State_1 = handle_response(CurReq, State#state{reqs = Reqs_1}),
- parse_response(Rem, reset_state(State_1));
- {no, Rem} ->
- State#state{chunk_size_buffer = Rem, deleted_crlf = false}
+ {yes, _TEHeaders, Rem} ->
+ {_, Reqs_1} = queue:out(Reqs),
+ State_1 = handle_response(CurReq, State#state{reqs = Reqs_1}),
+ parse_response(Rem, reset_state(State_1));
+ {no, Rem} ->
+ State#state{chunk_size_buffer = Rem, deleted_crlf = false}
end;
%% This clause extracts a chunk, given the size.
parse_11_response(DataRecvd,
- #state{transfer_encoding = chunked,
- chunk_size = CSz,
- recvd_chunk_size = Recvd_csz,
- rep_buf_size = RepBufSz} = State) ->
+ #state{transfer_encoding = chunked,
+ chunk_size = CSz,
+ recvd_chunk_size = Recvd_csz,
+ rep_buf_size = RepBufSz} = State) ->
NeedBytes = CSz - Recvd_csz,
DataLen = size(DataRecvd),
do_trace("Recvd more data: size: ~p. NeedBytes: ~p~n", [DataLen, NeedBytes]),
case DataLen >= NeedBytes of
- true ->
- {RemChunk, RemData} = split_binary(DataRecvd, NeedBytes),
- do_trace("Recvd another chunk...~n", []),
- do_trace("RemData -> ~p~n", [RemData]),
- case accumulate_response(RemChunk, State) of
- {error, Reason} ->
- do_trace("Error accumulating response --> ~p~n", [Reason]),
- {error, Reason};
- #state{} = State_1 ->
- State_2 = State_1#state{chunk_size=tbd},
- parse_11_response(RemData, State_2)
- end;
- false ->
- accumulate_response(DataRecvd,
- State#state{rep_buf_size = RepBufSz + DataLen,
- recvd_chunk_size = Recvd_csz + DataLen})
+ true ->
+ {RemChunk, RemData} = split_binary(DataRecvd, NeedBytes),
+ do_trace("Recvd another chunk...~n", []),
+ do_trace("RemData -> ~p~n", [RemData]),
+ case accumulate_response(RemChunk, State) of
+ {error, Reason} ->
+ do_trace("Error accumulating response --> ~p~n", [Reason]),
+ {error, Reason};
+ #state{} = State_1 ->
+ State_2 = State_1#state{chunk_size=tbd},
+ parse_11_response(RemData, State_2)
+ end;
+ false ->
+ accumulate_response(DataRecvd,
+ State#state{rep_buf_size = RepBufSz + DataLen,
+ recvd_chunk_size = Recvd_csz + DataLen})
end;
%% This clause to extract the body when Content-Length is specified
parse_11_response(DataRecvd,
- #state{content_length=CL, rep_buf_size=RepBufSz,
- reqs=Reqs}=State) ->
+ #state{content_length=CL, rep_buf_size=RepBufSz,
+ reqs=Reqs}=State) ->
NeedBytes = CL - RepBufSz,
DataLen = size(DataRecvd),
case DataLen >= NeedBytes of
- true ->
- {RemBody, Rem} = split_binary(DataRecvd, NeedBytes),
- {_, Reqs_1} = queue:out(Reqs),
- State_1 = accumulate_response(RemBody, State),
- State_2 = handle_response(State_1#state.cur_req, State_1#state{reqs=Reqs_1}),
- State_3 = reset_state(State_2),
- parse_response(Rem, State_3);
- false ->
- accumulate_response(DataRecvd, State#state{rep_buf_size = (RepBufSz+DataLen)})
+ true ->
+ {RemBody, Rem} = split_binary(DataRecvd, NeedBytes),
+ {_, Reqs_1} = queue:out(Reqs),
+ State_1 = accumulate_response(RemBody, State),
+ State_2 = handle_response(State_1#state.cur_req, State_1#state{reqs=Reqs_1}),
+ State_3 = reset_state(State_2),
+ parse_response(Rem, State_3);
+ false ->
+ accumulate_response(DataRecvd, State#state{rep_buf_size = (RepBufSz+DataLen)})
end.
handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
- response_format = Resp_format,
- save_response_to_file = SaveResponseToFile,
- tmp_file_name = TmpFilename,
- tmp_file_fd = Fd
- },
- #state{http_status_code = SCode,
- send_timer = ReqTimer,
- reply_buffer = RepBuf,
- recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
+ response_format = Resp_format,
+ save_response_to_file = SaveResponseToFile,
+ tmp_file_name = TmpFilename,
+ tmp_file_fd = Fd,
+ options = Options
+ },
+ #state{http_status_code = SCode,
+ status_line = Status_line,
+ raw_headers = Raw_headers,
+ send_timer = ReqTimer,
+ reply_buffer = RepBuf,
+ recvd_headers = RespHeaders}=State) when SaveResponseToFile /= false ->
Body = RepBuf,
State_1 = set_cur_request(State),
file:close(Fd),
ResponseBody = case TmpFilename of
- undefined ->
- Body;
- _ ->
- {file, TmpFilename}
- end,
- State_2 = do_reply(State_1, From, StreamTo, ReqId, Resp_format,
- {ok, SCode, RespHeaders, ResponseBody}),
+ undefined ->
+ Body;
+ _ ->
+ {file, TmpFilename}
+ end,
+ Reply = case get_value(give_raw_headers, Options, false) of
+ true ->
+ {ok, Status_line, Raw_headers, ResponseBody};
+ false ->
+ {ok, SCode, RespHeaders, ResponseBody}
+ end,
+ State_2 = do_reply(State_1, From, StreamTo, ReqId, Resp_format, Reply),
cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
State_2;
handle_response(#request{from=From, stream_to=StreamTo, req_id=ReqId,
- response_format = Resp_format},
- #state{http_status_code=SCode, recvd_headers=RespHeaders,
- reply_buffer = RepBuf,
- send_timer=ReqTimer}=State) ->
+ response_format = Resp_format,
+ options = Options},
+ #state{http_status_code = SCode,
+ status_line = Status_line,
+ raw_headers = Raw_headers,
+ recvd_headers = RespHeaders,
+ reply_buffer = RepBuf,
+ send_timer = ReqTimer} = State) ->
Body = RepBuf,
%% State_1 = set_cur_request(State),
+ Reply = case get_value(give_raw_headers, Options, false) of
+ true ->
+ {ok, Status_line, Raw_headers, Body};
+ false ->
+ {ok, SCode, RespHeaders, Body}
+ end,
State_1 = case get(conn_close) of
- "close" ->
- do_reply(State, From, StreamTo, ReqId, Resp_format,
- {ok, SCode, RespHeaders, Body}),
- exit(normal);
- _ ->
- State_1_1 = do_reply(State, From, StreamTo, ReqId, Resp_format,
- {ok, SCode, RespHeaders, Body}),
- cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
- State_1_1
+ "close" ->
+ do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+ exit(normal);
+ _ ->
+ State_1_1 = do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
+ cancel_timer(ReqTimer, {eat_message, {req_timedout, From}}),
+ State_1_1
end,
set_cur_request(State_1).
reset_state(State) ->
State#state{status = get_header,
- rep_buf_size = 0,
- streamed_size = 0,
- content_length = undefined,
- reply_buffer = <<>>,
- chunk_size_buffer = <<>>,
- recvd_headers = [],
- deleted_crlf = false,
- http_status_code = undefined,
- chunk_size = undefined,
- transfer_encoding = undefined}.
+ rep_buf_size = 0,
+ streamed_size = 0,
+ content_length = undefined,
+ reply_buffer = <<>>,
+ chunk_size_buffer = <<>>,
+ recvd_headers = [],
+ status_line = undefined,
+ raw_headers = undefined,
+ deleted_crlf = false,
+ http_status_code = undefined,
+ chunk_size = undefined,
+ transfer_encoding = undefined}.
set_cur_request(#state{reqs = Reqs} = State) ->
case queue:to_list(Reqs) of
- [] ->
- State#state{cur_req = undefined};
- [NextReq | _] ->
- State#state{cur_req = NextReq}
+ [] ->
+ State#state{cur_req = undefined};
+ [NextReq | _] ->
+ State#state{cur_req = NextReq}
end.
parse_headers(Headers) ->
case scan_crlf(Headers) of
- {yes, StatusLine, T} ->
- parse_headers(StatusLine, T);
- {no, StatusLine} ->
- parse_headers(StatusLine, <<>>)
+ {yes, StatusLine, T} ->
+ parse_headers(StatusLine, T);
+ {no, StatusLine} ->
+ parse_headers(StatusLine, <<>>)
end.
parse_headers(StatusLine, Headers) ->
Headers_1 = parse_headers_1(Headers),
case parse_status_line(StatusLine) of
- {ok, HttpVsn, StatCode, _Msg} ->
- put(http_prot_vsn, HttpVsn),
- {HttpVsn, StatCode, Headers_1};
- _ -> %% A HTTP 0.9 response?
- put(http_prot_vsn, "HTTP/0.9"),
- {"HTTP/0.9", undefined, Headers}
+ {ok, HttpVsn, StatCode, _Msg} ->
+ put(http_prot_vsn, HttpVsn),
+ {HttpVsn, StatCode, Headers_1, StatusLine, Headers};
+ _ -> %% A HTTP 0.9 response?
+ put(http_prot_vsn, "HTTP/0.9"),
+ {"HTTP/0.9", undefined, Headers, StatusLine, Headers}
end.
% From RFC 2616
@@ -1094,22 +1264,22 @@ parse_headers(StatusLine, Headers) ->
% SP. A recipient MAY replace any linear white space with a single
% SP before interpreting the field value or forwarding the message
% downstream.
- parse_headers_1(B) when is_binary(B) ->
- parse_headers_1(binary_to_list(B));
- parse_headers_1(String) ->
- parse_headers_1(String, [], []).
+parse_headers_1(B) when is_binary(B) ->
+ parse_headers_1(binary_to_list(B));
+parse_headers_1(String) ->
+ parse_headers_1(String, [], []).
-parse_headers_1([$\n, H |T], [$\r | L], Acc) when H == 32;
- H == $\t ->
+parse_headers_1([$\n, H |T], [$\r | L], Acc) when H =:= 32;
+ H =:= $\t ->
parse_headers_1(lists:dropwhile(fun(X) ->
- is_whitespace(X)
- end, T), [32 | L], Acc);
+ is_whitespace(X)
+ end, T), [32 | L], Acc);
parse_headers_1([$\n|T], [$\r | L], Acc) ->
case parse_header(lists:reverse(L)) of
- invalid ->
- parse_headers_1(T, [], Acc);
- NewHeader ->
- parse_headers_1(T, [], [NewHeader | Acc])
+ invalid ->
+ parse_headers_1(T, [], Acc);
+ NewHeader ->
+ parse_headers_1(T, [], [NewHeader | Acc])
end;
parse_headers_1([H|T], L, Acc) ->
parse_headers_1(T, [H|L], Acc);
@@ -1117,11 +1287,11 @@ parse_headers_1([], [], Acc) ->
lists:reverse(Acc);
parse_headers_1([], L, Acc) ->
Acc_1 = case parse_header(lists:reverse(L)) of
- invalid ->
- Acc;
- NewHeader ->
- [NewHeader | Acc]
- end,
+ invalid ->
+ Acc;
+ NewHeader ->
+ [NewHeader | Acc]
+ end,
lists:reverse(Acc_1).
parse_status_line(Line) when is_binary(Line) ->
@@ -1139,10 +1309,9 @@ parse_status_line([H | T], get_status_code, ProtVsn, StatCode) ->
parse_status_line([], _, _, _) ->
http_09.
-parse_header(B) when is_binary(B) ->
- parse_header(binary_to_list(B));
parse_header(L) ->
parse_header(L, []).
+
parse_header([$: | V], Acc) ->
{lists:reverse(Acc), string:strip(V)};
parse_header([H | T], Acc) ->
@@ -1152,11 +1321,11 @@ parse_header([], _) ->
scan_header(Bin) ->
case get_crlf_crlf_pos(Bin, 0) of
- {yes, Pos} ->
- {Headers, <<_:4/binary, Body/binary>>} = split_binary(Bin, Pos),
- {yes, Headers, Body};
- no ->
- {no, Bin}
+ {yes, Pos} ->
+ {Headers, <<_:4/binary, Body/binary>>} = split_binary(Bin, Pos),
+ {yes, Headers, Body};
+ no ->
+ {no, Bin}
end.
scan_header(Bin1, Bin2) when size(Bin1) < 4 ->
@@ -1168,11 +1337,11 @@ scan_header(Bin1, Bin2) ->
<<Headers_prefix:Bin1_already_scanned_size/binary, Rest/binary>> = Bin1,
Bin_to_scan = <<Rest/binary, Bin2/binary>>,
case get_crlf_crlf_pos(Bin_to_scan, 0) of
- {yes, Pos} ->
- {Headers_suffix, <<_:4/binary, Body/binary>>} = split_binary(Bin_to_scan, Pos),
- {yes, <<Headers_prefix/binary, Headers_suffix/binary>>, Body};
- no ->
- {no, <<Bin1/binary, Bin2/binary>>}
+ {yes, Pos} ->
+ {Headers_suffix, <<_:4/binary, Body/binary>>} = split_binary(Bin_to_scan, Pos),
+ {yes, <<Headers_prefix/binary, Headers_suffix/binary>>, Body};
+ no ->
+ {no, <<Bin1/binary, Bin2/binary>>}
end.
get_crlf_crlf_pos(<<$\r, $\n, $\r, $\n, _/binary>>, Pos) -> {yes, Pos};
@@ -1181,11 +1350,11 @@ get_crlf_crlf_pos(<<>>, _) -> no.
scan_crlf(Bin) ->
case get_crlf_pos(Bin) of
- {yes, Pos} ->
- {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin, Pos),
- {yes, Prefix, Suffix};
- no ->
- {no, Bin}
+ {yes, Pos} ->
+ {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin, Pos),
+ {yes, Prefix, Suffix};
+ no ->
+ {no, Bin}
end.
scan_crlf(<<>>, Bin2) ->
@@ -1199,11 +1368,11 @@ scan_crlf_1(Bin1_head_size, Bin1, Bin2) ->
<<Bin1_head:Bin1_head_size/binary, Bin1_tail/binary>> = Bin1,
Bin3 = <<Bin1_tail/binary, Bin2/binary>>,
case get_crlf_pos(Bin3) of
- {yes, Pos} ->
- {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin3, Pos),
- {yes, list_to_binary([Bin1_head, Prefix]), Suffix};
- no ->
- {no, list_to_binary([Bin1, Bin2])}
+ {yes, Pos} ->
+ {Prefix, <<_, _, Suffix/binary>>} = split_binary(Bin3, Pos),
+ {yes, list_to_binary([Bin1_head, Prefix]), Suffix};
+ no ->
+ {no, list_to_binary([Bin1, Bin2])}
end.
get_crlf_pos(Bin) ->
@@ -1213,13 +1382,6 @@ get_crlf_pos(<<$\r, $\n, _/binary>>, Pos) -> {yes, Pos};
get_crlf_pos(<<_, Rest/binary>>, Pos) -> get_crlf_pos(Rest, Pos + 1);
get_crlf_pos(<<>>, _) -> no.
-%% scan_crlf(<<$\n, T/binary>>, [$\r | L]) -> {yes, lists:reverse(L), T};
-%% scan_crlf(<<H, T/binary>>, L) -> scan_crlf(T, [H|L]);
-%% scan_crlf(<<>>, L) -> {no, L};
-%% scan_crlf([$\n|T], [$\r | L]) -> {yes, lists:reverse(L), T};
-%% scan_crlf([H|T], L) -> scan_crlf(T, [H|L]);
-%% scan_crlf([], L) -> {no, L}.
-
fmt_val(L) when is_list(L) -> L;
fmt_val(I) when is_integer(I) -> integer_to_list(I);
fmt_val(A) when is_atom(A) -> atom_to_list(A);
@@ -1240,7 +1402,8 @@ method(proppatch) -> "PROPPATCH";
method(lock) -> "LOCK";
method(unlock) -> "UNLOCK";
method(move) -> "MOVE";
-method(copy) -> "COPY".
+method(copy) -> "COPY";
+method(connect) -> "CONNECT".
%% From RFC 2616
%%
@@ -1250,19 +1413,19 @@ method(copy) -> "COPY".
% fields. This allows dynamically produced content to be transferred
% along with the information necessary for the recipient to verify
% that it has received the full message.
-% Chunked-Body = *chunk
-% last-chunk
-% trailer
-% CRLF
-% chunk = chunk-size [ chunk-extension ] CRLF
-% chunk-data CRLF
-% chunk-size = 1*HEX
-% last-chunk = 1*("0") [ chunk-extension ] CRLF
-% chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
-% chunk-ext-name = token
-% chunk-ext-val = token | quoted-string
-% chunk-data = chunk-size(OCTET)
-% trailer = *(entity-header CRLF)
+% Chunked-Body = *chunk
+% last-chunk
+% trailer
+% CRLF
+% chunk = chunk-size [ chunk-extension ] CRLF
+% chunk-data CRLF
+% chunk-size = 1*HEX
+% last-chunk = 1*("0") [ chunk-extension ] CRLF
+% chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
+% chunk-ext-name = token
+% chunk-ext-val = token | quoted-string
+% chunk-data = chunk-size(OCTET)
+% trailer = *(entity-header CRLF)
% The chunk-size field is a string of hex digits indicating the size
% of the chunk. The chunked encoding is ended by any chunk whose size
% is zero, followed by the trailer, which is terminated by an empty
@@ -1271,8 +1434,6 @@ method(copy) -> "COPY".
%% The parsing implemented here discards all chunk extensions. It also
%% strips trailing spaces from the chunk size fields as Apache 1.3.27 was
%% sending them.
-parse_chunk_header([]) ->
- throw({error, invalid_chunk_size});
parse_chunk_header(ChunkHeader) ->
parse_chunk_header(ChunkHeader, []).
@@ -1280,10 +1441,10 @@ parse_chunk_header(<<$;, _/binary>>, Acc) ->
hexlist_to_integer(lists:reverse(Acc));
parse_chunk_header(<<H, T/binary>>, Acc) ->
case is_whitespace(H) of
- true ->
- parse_chunk_header(T, Acc);
- false ->
- parse_chunk_header(T, [H | Acc])
+ true ->
+ parse_chunk_header(T, Acc);
+ false ->
+ parse_chunk_header(T, [H | Acc])
end;
parse_chunk_header(<<>>, Acc) ->
hexlist_to_integer(lists:reverse(Acc)).
@@ -1294,24 +1455,31 @@ is_whitespace($\n) -> true;
is_whitespace($\t) -> true;
is_whitespace(_) -> false.
-
-send_async_headers(_ReqId, undefined, _StatCode, _Headers) ->
+send_async_headers(_ReqId, undefined, _, _State) ->
ok;
-send_async_headers(ReqId, StreamTo, StatCode, Headers) ->
- catch StreamTo ! {ibrowse_async_headers, ReqId, StatCode, Headers}.
+send_async_headers(ReqId, StreamTo, Give_raw_headers,
+ #state{status_line = Status_line, raw_headers = Raw_headers,
+ recvd_headers = Headers, http_status_code = StatCode
+ }) ->
+ case Give_raw_headers of
+ false ->
+ catch StreamTo ! {ibrowse_async_headers, ReqId, StatCode, Headers};
+ true ->
+ catch StreamTo ! {ibrowse_async_headers, ReqId, Status_line, Raw_headers}
+ end.
format_response_data(Resp_format, Body) ->
case Resp_format of
- list when is_list(Body) ->
- flatten(Body);
- list when is_binary(Body) ->
- binary_to_list(Body);
- binary when is_list(Body) ->
- list_to_binary(Body);
- _ ->
- %% This is to cater for sending messages such as
- %% {chunk_start, _}, chunk_end etc
- Body
+ list when is_list(Body) ->
+ flatten(Body);
+ list when is_binary(Body) ->
+ binary_to_list(Body);
+ binary when is_list(Body) ->
+ list_to_binary(Body);
+ _ ->
+ %% This is to cater for sending messages such as
+ %% {chunk_start, _}, chunk_end etc
+ Body
end.
do_reply(State, From, undefined, _, Resp_format, {ok, St_code, Headers, Body}) ->
@@ -1322,14 +1490,14 @@ do_reply(State, From, undefined, _, _, Msg) ->
gen_server:reply(From, Msg),
dec_pipeline_counter(State);
do_reply(#state{prev_req_id = Prev_req_id} = State,
- _From, StreamTo, ReqId, Resp_format, {ok, _, _, Body}) ->
+ _From, StreamTo, ReqId, Resp_format, {ok, _, _, Body}) ->
State_1 = dec_pipeline_counter(State),
case Body of
- [] ->
- ok;
- _ ->
- Body_1 = format_response_data(Resp_format, Body),
- catch StreamTo ! {ibrowse_async_response, ReqId, Body_1}
+ [] ->
+ ok;
+ _ ->
+ Body_1 = format_response_data(Resp_format, Body),
+ catch StreamTo ! {ibrowse_async_response, ReqId, Body_1}
end,
catch StreamTo ! {ibrowse_async_response_end, ReqId},
%% We don't want to delete the Req-id to Pid mapping straightaway
@@ -1356,23 +1524,28 @@ do_interim_reply(StreamTo, Response_format, ReqId, Msg) ->
Msg_1 = format_response_data(Response_format, Msg),
catch StreamTo ! {ibrowse_async_response, ReqId, Msg_1}.
-do_error_reply(#state{reqs = Reqs} = State, Err) ->
+do_error_reply(#state{reqs = Reqs, tunnel_setup_queue = Tun_q} = State, Err) ->
ReqList = queue:to_list(Reqs),
lists:foreach(fun(#request{from=From, stream_to=StreamTo, req_id=ReqId,
- response_format = Resp_format}) ->
- ets:delete(ibrowse_stream, {req_id_pid, ReqId}),
+ response_format = Resp_format}) ->
+ ets:delete(ibrowse_stream, {req_id_pid, ReqId}),
do_reply(State, From, StreamTo, ReqId, Resp_format, {error, Err})
- end, ReqList).
+ end, ReqList),
+ lists:foreach(
+ fun({From, _Url, _Headers, _Method, _Body, _Options, _Timeout}) ->
+ do_reply(State, From, undefined, undefined, undefined, Err)
+ end, Tun_q).
fail_pipelined_requests(#state{reqs = Reqs, cur_req = CurReq} = State, Reply) ->
{_, Reqs_1} = queue:out(Reqs),
#request{from=From, stream_to=StreamTo, req_id=ReqId,
- response_format = Resp_format} = CurReq,
+ response_format = Resp_format} = CurReq,
do_reply(State, From, StreamTo, ReqId, Resp_format, Reply),
do_error_reply(State#state{reqs = Reqs_1}, previous_request_failed).
split_list_at(List, N) ->
split_list_at(List, N, []).
+
split_list_at([], _, Acc) ->
{lists:reverse(Acc), []};
split_list_at(List2, 0, List1) ->
@@ -1382,6 +1555,7 @@ split_list_at([H | List2], N, List1) ->
hexlist_to_integer(List) ->
hexlist_to_integer(lists:reverse(List), 1, 0).
+
hexlist_to_integer([H | T], Multiplier, Acc) ->
hexlist_to_integer(T, Multiplier*16, Multiplier*to_ascii(H) + Acc);
hexlist_to_integer([], _, Acc) ->
@@ -1416,10 +1590,10 @@ cancel_timer(Ref) -> erlang:cancel_timer(Ref).
cancel_timer(Ref, {eat_message, Msg}) ->
cancel_timer(Ref),
receive
- Msg ->
- ok
+ Msg ->
+ ok
after 0 ->
- ok
+ ok
end.
make_req_id() ->
@@ -1437,7 +1611,7 @@ to_lower([], Acc) ->
shutting_down(#state{lb_ets_tid = undefined}) ->
ok;
shutting_down(#state{lb_ets_tid = Tid,
- cur_pipeline_size = Sz}) ->
+ cur_pipeline_size = Sz}) ->
catch ets:delete(Tid, {Sz, self()}).
inc_pipeline_counter(#state{is_closing = true} = State) ->
@@ -1450,7 +1624,7 @@ dec_pipeline_counter(#state{is_closing = true} = State) ->
dec_pipeline_counter(#state{lb_ets_tid = undefined} = State) ->
State;
dec_pipeline_counter(#state{cur_pipeline_size = Pipe_sz,
- lb_ets_tid = Tid} = State) ->
+ lb_ets_tid = Tid} = State) ->
ets:delete(Tid, {Pipe_sz, self()}),
ets:insert(Tid, {{Pipe_sz - 1, self()}, []}),
State#state{cur_pipeline_size = Pipe_sz - 1}.
@@ -1464,13 +1638,35 @@ flatten([]) ->
get_stream_chunk_size(Options) ->
case lists:keysearch(stream_chunk_size, 1, Options) of
- {value, {_, V}} when V > 0 ->
- V;
- _ ->
- ?DEFAULT_STREAM_CHUNK_SIZE
+ {value, {_, V}} when V > 0 ->
+ V;
+ _ ->
+ ?DEFAULT_STREAM_CHUNK_SIZE
end.
-get_inac_timeout(#state{cur_req = #request{options = Opts}}) ->
+set_inac_timer(State) ->
+ set_inac_timer(State, get_inac_timeout(State)).
+
+set_inac_timer(_State, Timeout) when is_integer(Timeout) ->
+ erlang:send_after(Timeout, self(), timeout);
+set_inac_timer(_, _) ->
+ undefined.
+
+get_inac_timeout(#state{cur_req = #request{options = Opts}}) ->
get_value(inactivity_timeout, Opts, infinity);
get_inac_timeout(#state{cur_req = undefined}) ->
infinity.
+
+trace_request(Req) ->
+ case get(my_trace_flag) of
+ true ->
+ %%Avoid the binary operations if trace is not on...
+ NReq = binary_to_list(list_to_binary(Req)),
+ do_trace("Sending request: ~n"
+ "--- Request Begin ---~n~s~n"
+ "--- Request End ---~n", [NReq]);
+ _ -> ok
+ end.
+
+to_integer(X) when is_list(X) -> list_to_integer(X);
+to_integer(X) when is_integer(X) -> X.
diff --git a/src/ibrowse/ibrowse_lb.erl b/src/ibrowse/ibrowse_lb.erl
index 834054a7..6bc600be 100644
--- a/src/ibrowse/ibrowse_lb.erl
+++ b/src/ibrowse/ibrowse_lb.erl
@@ -1,13 +1,11 @@
%%%-------------------------------------------------------------------
%%% File : ibrowse_lb.erl
%%% Author : chandru <chandrashekhar.mullaparthi@t-mobile.co.uk>
-%%% Description :
+%%% Description :
%%%
%%% Created : 6 Mar 2008 by chandru <chandrashekhar.mullaparthi@t-mobile.co.uk>
%%%-------------------------------------------------------------------
-module(ibrowse_lb).
-
--vsn('$Id: ibrowse_lb.erl,v 1.2 2009/07/01 22:43:19 chandrusf Exp $ ').
-author(chandru).
-behaviour(gen_server).
%%--------------------------------------------------------------------
@@ -101,14 +99,14 @@ spawn_connection(Lb_pid, Url,
% #state{max_sessions = Max_sess,
% ets_tid = Tid,
% max_pipeline_size = Max_pipe_sz,
-% num_cur_sessions = Num} = State)
+% num_cur_sessions = Num} = State)
% when Num >= Max ->
% Reply = find_best_connection(Tid),
% {reply, sorry_dude_reuse, State};
%% Update max_sessions in #state with supplied value
handle_call({spawn_connection, _Url, Max_sess, Max_pipe, _}, _From,
- #state{num_cur_sessions = Num} = State)
+ #state{num_cur_sessions = Num} = State)
when Num >= Max_sess ->
State_1 = maybe_create_ets(State),
Reply = find_best_connection(State_1#state.ets_tid, Max_pipe),
diff --git a/src/ibrowse/ibrowse_lib.erl b/src/ibrowse/ibrowse_lib.erl
index 6c7b1546..fbb9c34b 100644
--- a/src/ibrowse/ibrowse_lib.erl
+++ b/src/ibrowse/ibrowse_lib.erl
@@ -1,11 +1,10 @@
%%% File : ibrowse_lib.erl
%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
-%%% Description :
+%%% Description :
%%% Created : 27 Feb 2004 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
%% @doc Module with a few useful functions
-module(ibrowse_lib).
--vsn('$Id: ibrowse_lib.erl,v 1.6 2008/03/27 01:35:50 chandrusf Exp $ ').
-author('chandru').
-ifdef(debug).
-compile(export_all).
@@ -14,22 +13,22 @@
-include("ibrowse.hrl").
-export([
- get_trace_status/2,
- do_trace/2,
- do_trace/3,
- url_encode/1,
- decode_rfc822_date/1,
- status_code/1,
- dec2hex/2,
- drv_ue/1,
- drv_ue/2,
- encode_base64/1,
- decode_base64/1,
- get_value/2,
- get_value/3,
- parse_url/1,
- printable_date/0
- ]).
+ get_trace_status/2,
+ do_trace/2,
+ do_trace/3,
+ url_encode/1,
+ decode_rfc822_date/1,
+ status_code/1,
+ dec2hex/2,
+ drv_ue/1,
+ drv_ue/2,
+ encode_base64/1,
+ decode_base64/1,
+ get_value/2,
+ get_value/3,
+ parse_url/1,
+ printable_date/0
+ ]).
get_trace_status(Host, Port) ->
ibrowse:get_config_value({trace, Host, Port}, false).
@@ -39,10 +38,10 @@ drv_ue(Str) ->
drv_ue(Str, Port).
drv_ue(Str, Port) ->
case erlang:port_control(Port, 1, Str) of
- [] ->
- Str;
- Res ->
- Res
+ [] ->
+ Str;
+ Res ->
+ Res
end.
%% @doc URL-encodes a string based on RFC 1738. Returns a flat list.
@@ -72,10 +71,10 @@ d2h(N) -> N+$a-10.
decode_rfc822_date(String) when is_list(String) ->
case catch decode_rfc822_date_1(string:tokens(String, ", \t\r\n")) of
- {'EXIT', _} ->
- {error, invalid_date};
- Res ->
- Res
+ {'EXIT', _} ->
+ {error, invalid_date};
+ Res ->
+ Res
end.
% TODO: Have to handle the Zone
@@ -86,15 +85,15 @@ decode_rfc822_date_1([Day,Month,Year, Time,_Zone]) ->
MonthI = month_int(Month),
YearI = list_to_integer(Year),
TimeTup = case string:tokens(Time, ":") of
- [H,M] ->
- {list_to_integer(H),
- list_to_integer(M),
- 0};
- [H,M,S] ->
- {list_to_integer(H),
- list_to_integer(M),
- list_to_integer(S)}
- end,
+ [H,M] ->
+ {list_to_integer(H),
+ list_to_integer(M),
+ 0};
+ [H,M,S] ->
+ {list_to_integer(H),
+ list_to_integer(M),
+ list_to_integer(S)}
+ end,
{{YearI,MonthI,DayI}, TimeTup}.
month_int("Jan") -> 1;
@@ -110,7 +109,7 @@ month_int("Oct") -> 10;
month_int("Nov") -> 11;
month_int("Dec") -> 12.
-%% @doc Given a status code, returns an atom describing the status code.
+%% @doc Given a status code, returns an atom describing the status code.
%% @spec status_code(StatusCode::status_code()) -> StatusDescription
%% status_code() = string() | integer()
%% StatusDescription = atom()
@@ -178,86 +177,25 @@ dec2hex(M,N,Ack) -> dec2hex(M-1,N bsr 4,[d2h(N band 15)|Ack]).
%% In = string() | binary()
%% Out = string() | binary()
encode_base64(List) when is_list(List) ->
- encode_base64_1(list_to_binary(List));
+ binary_to_list(base64:encode(List));
encode_base64(Bin) when is_binary(Bin) ->
- List = encode_base64_1(Bin),
- list_to_binary(List).
-
-encode_base64_1(<<A:6, B:6, C:6, D:6, Rest/binary>>) ->
- [int_to_b64(A), int_to_b64(B),
- int_to_b64(C), int_to_b64(D) | encode_base64_1(Rest)];
-encode_base64_1(<<A:6, B:6, C:4>>) ->
- [int_to_b64(A), int_to_b64(B), int_to_b64(C bsl 2), $=];
-encode_base64_1(<<A:6, B:2>>) ->
- [int_to_b64(A), int_to_b64(B bsl 4), $=, $=];
-encode_base64_1(<<>>) ->
- [].
+ base64:encode(Bin).
%% @doc Implements the base64 decoding algorithm. The output data type matches in the input data type.
%% @spec decode_base64(In) -> Out | exit({error, invalid_input})
%% In = string() | binary()
%% Out = string() | binary()
decode_base64(List) when is_list(List) ->
- decode_base64_1(List, []);
+ binary_to_list(base64:decode(List));
decode_base64(Bin) when is_binary(Bin) ->
- List = decode_base64_1(binary_to_list(Bin), []),
- list_to_binary(List).
-
-decode_base64_1([H | T], Acc) when ((H == $\t) or
- (H == 32) or
- (H == $\r) or
- (H == $\n)) ->
- decode_base64_1(T, Acc);
-
-decode_base64_1([$=, $=], Acc) ->
- lists:reverse(Acc);
-decode_base64_1([$=, _ | _], _Acc) ->
- exit({error, invalid_input});
-
-decode_base64_1([A1, B1, $=, $=], Acc) ->
- A = b64_to_int(A1),
- B = b64_to_int(B1),
- Oct1 = (A bsl 2) bor (B bsr 4),
- decode_base64_1([], [Oct1 | Acc]);
-decode_base64_1([A1, B1, C1, $=], Acc) ->
- A = b64_to_int(A1),
- B = b64_to_int(B1),
- C = b64_to_int(C1),
- Oct1 = (A bsl 2) bor (B bsr 4),
- Oct2 = ((B band 16#f) bsl 6) bor (C bsr 2),
- decode_base64_1([], [Oct2, Oct1 | Acc]);
-decode_base64_1([A1, B1, C1, D1 | T], Acc) ->
- A = b64_to_int(A1),
- B = b64_to_int(B1),
- C = b64_to_int(C1),
- D = b64_to_int(D1),
- Oct1 = (A bsl 2) bor (B bsr 4),
- Oct2 = ((B band 16#f) bsl 4) bor (C bsr 2),
- Oct3 = ((C band 2#11) bsl 6) bor D,
- decode_base64_1(T, [Oct3, Oct2, Oct1 | Acc]);
-decode_base64_1([], Acc) ->
- lists:reverse(Acc).
-
-%% Taken from httpd_util.erl
-int_to_b64(X) when X >= 0, X =< 25 -> X + $A;
-int_to_b64(X) when X >= 26, X =< 51 -> X - 26 + $a;
-int_to_b64(X) when X >= 52, X =< 61 -> X - 52 + $0;
-int_to_b64(62) -> $+;
-int_to_b64(63) -> $/.
-
-%% Taken from httpd_util.erl
-b64_to_int(X) when X >= $A, X =< $Z -> X - $A;
-b64_to_int(X) when X >= $a, X =< $z -> X - $a + 26;
-b64_to_int(X) when X >= $0, X =< $9 -> X - $0 + 52;
-b64_to_int($+) -> 62;
-b64_to_int($/) -> 63.
+ base64:decode(Bin).
get_value(Tag, TVL, DefVal) ->
case lists:keysearch(Tag, 1, TVL) of
- false ->
- DefVal;
- {value, {_, Val}} ->
- Val
+ false ->
+ DefVal;
+ {value, {_, Val}} ->
+ Val
end.
get_value(Tag, TVL) ->
@@ -271,92 +209,120 @@ parse_url([$:, $/, $/ | _], get_protocol, Url, []) ->
{invalid_uri_1, Url};
parse_url([$:, $/, $/ | T], get_protocol, Url, TmpAcc) ->
Prot = list_to_atom(lists:reverse(TmpAcc)),
- parse_url(T, get_username,
- Url#url{protocol = Prot},
- []);
-parse_url([$/ | T], get_username, Url, TmpAcc) ->
+ parse_url(T, get_username,
+ Url#url{protocol = Prot},
+ []);
+parse_url([H | T], get_username, Url, TmpAcc) when H == $/;
+ H == $? ->
+ Path = case H of
+ $/ ->
+ [$/ | T];
+ $? ->
+ [$/, $? | T]
+ end,
%% No username/password. No port number
Url#url{host = lists:reverse(TmpAcc),
- port = default_port(Url#url.protocol),
- path = [$/ | T]};
+ port = default_port(Url#url.protocol),
+ path = Path};
parse_url([$: | T], get_username, Url, TmpAcc) ->
%% It is possible that no username/password has been
%% specified. But we'll continue with the assumption that there is
%% a username/password. If we encounter a '@' later on, there is a
%% username/password indeed. If we encounter a '/', it was
%% actually the hostname
- parse_url(T, get_password,
- Url#url{username = lists:reverse(TmpAcc)},
- []);
+ parse_url(T, get_password,
+ Url#url{username = lists:reverse(TmpAcc)},
+ []);
parse_url([$@ | T], get_username, Url, TmpAcc) ->
- parse_url(T, get_host,
- Url#url{username = lists:reverse(TmpAcc),
- password = ""},
- []);
+ parse_url(T, get_host,
+ Url#url{username = lists:reverse(TmpAcc),
+ password = ""},
+ []);
parse_url([$@ | T], get_password, Url, TmpAcc) ->
- parse_url(T, get_host,
- Url#url{password = lists:reverse(TmpAcc)},
- []);
-parse_url([$/ | T], get_password, Url, TmpAcc) ->
+ parse_url(T, get_host,
+ Url#url{password = lists:reverse(TmpAcc)},
+ []);
+parse_url([H | T], get_password, Url, TmpAcc) when H == $/;
+ H == $? ->
%% Ok, what we thought was the username/password was the hostname
%% and portnumber
#url{username=User} = Url,
Port = list_to_integer(lists:reverse(TmpAcc)),
+ Path = case H of
+ $/ ->
+ [$/ | T];
+ $? ->
+ [$/, $? | T]
+ end,
Url#url{host = User,
- port = Port,
- username = undefined,
- password = undefined,
- path = [$/ | T]};
+ port = Port,
+ username = undefined,
+ password = undefined,
+ path = Path};
parse_url([$: | T], get_host, #url{} = Url, TmpAcc) ->
- parse_url(T, get_port,
- Url#url{host = lists:reverse(TmpAcc)},
- []);
-parse_url([$/ | T], get_host, #url{protocol=Prot} = Url, TmpAcc) ->
+ parse_url(T, get_port,
+ Url#url{host = lists:reverse(TmpAcc)},
+ []);
+parse_url([H | T], get_host, #url{protocol=Prot} = Url, TmpAcc) when H == $/;
+ H == $? ->
+ Path = case H of
+ $/ ->
+ [$/ | T];
+ $? ->
+ [$/, $? | T]
+ end,
Url#url{host = lists:reverse(TmpAcc),
- port = default_port(Prot),
- path = [$/ | T]};
-parse_url([$/ | T], get_port, #url{protocol=Prot} = Url, TmpAcc) ->
+ port = default_port(Prot),
+ path = Path};
+parse_url([H | T], get_port, #url{protocol=Prot} = Url, TmpAcc) when H == $/;
+ H == $? ->
+ Path = case H of
+ $/ ->
+ [$/ | T];
+ $? ->
+ [$/, $? | T]
+ end,
Port = case TmpAcc of
- [] ->
- default_port(Prot);
- _ ->
- list_to_integer(lists:reverse(TmpAcc))
- end,
- Url#url{port = Port, path = [$/ | T]};
+ [] ->
+ default_port(Prot);
+ _ ->
+ list_to_integer(lists:reverse(TmpAcc))
+ end,
+ Url#url{port = Port, path = Path};
parse_url([H | T], State, Url, TmpAcc) ->
parse_url(T, State, Url, [H | TmpAcc]);
parse_url([], get_host, Url, TmpAcc) when TmpAcc /= [] ->
Url#url{host = lists:reverse(TmpAcc),
- port = default_port(Url#url.protocol),
- path = "/"};
+ port = default_port(Url#url.protocol),
+ path = "/"};
parse_url([], get_username, Url, TmpAcc) when TmpAcc /= [] ->
Url#url{host = lists:reverse(TmpAcc),
- port = default_port(Url#url.protocol),
- path = "/"};
+ port = default_port(Url#url.protocol),
+ path = "/"};
parse_url([], get_port, #url{protocol=Prot} = Url, TmpAcc) ->
Port = case TmpAcc of
- [] ->
- default_port(Prot);
- _ ->
- list_to_integer(lists:reverse(TmpAcc))
- end,
- Url#url{port = Port,
- path = "/"};
+ [] ->
+ default_port(Prot);
+ _ ->
+ list_to_integer(lists:reverse(TmpAcc))
+ end,
+ Url#url{port = Port,
+ path = "/"};
parse_url([], get_password, Url, TmpAcc) ->
%% Ok, what we thought was the username/password was the hostname
%% and portnumber
#url{username=User} = Url,
Port = case TmpAcc of
- [] ->
- default_port(Url#url.protocol);
- _ ->
- list_to_integer(lists:reverse(TmpAcc))
- end,
+ [] ->
+ default_port(Url#url.protocol);
+ _ ->
+ list_to_integer(lists:reverse(TmpAcc))
+ end,
Url#url{host = User,
- port = Port,
- username = undefined,
- password = undefined,
- path = "/"};
+ port = Port,
+ username = undefined,
+ password = undefined,
+ path = "/"};
parse_url([], State, Url, TmpAcc) ->
{invalid_uri_2, State, Url, TmpAcc}.
@@ -387,13 +353,13 @@ do_trace(Fmt, Args) ->
-ifdef(DEBUG).
do_trace(_, Fmt, Args) ->
io:format("~s -- (~s) - "++Fmt,
- [printable_date(),
- get(ibrowse_trace_token) | Args]).
+ [printable_date(),
+ get(ibrowse_trace_token) | Args]).
-else.
do_trace(true, Fmt, Args) ->
io:format("~s -- (~s) - "++Fmt,
- [printable_date(),
- get(ibrowse_trace_token) | Args]);
+ [printable_date(),
+ get(ibrowse_trace_token) | Args]);
do_trace(_, _, _) ->
ok.
-endif.
diff --git a/src/ibrowse/ibrowse_sup.erl b/src/ibrowse/ibrowse_sup.erl
index 1b9b863a..ace33d16 100644
--- a/src/ibrowse/ibrowse_sup.erl
+++ b/src/ibrowse/ibrowse_sup.erl
@@ -1,13 +1,11 @@
%%%-------------------------------------------------------------------
%%% File : ibrowse_sup.erl
%%% Author : Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
-%%% Description :
+%%% Description :
%%%
%%% Created : 15 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
%%%-------------------------------------------------------------------
-module(ibrowse_sup).
--vsn('$Id: ibrowse_sup.erl,v 1.1 2005/05/05 22:28:28 chandrusf Exp $ ').
-
-behaviour(supervisor).
%%--------------------------------------------------------------------
%% Include files
@@ -53,7 +51,7 @@ start_link() ->
%% Func: init/1
%% Returns: {ok, {SupFlags, [ChildSpec]}} |
%% ignore |
-%% {error, Reason}
+%% {error, Reason}
%%--------------------------------------------------------------------
init([]) ->
AChild = {ibrowse,{ibrowse,start_link,[]},
diff --git a/src/ibrowse/ibrowse_test.erl b/src/ibrowse/ibrowse_test.erl
index 3dc66ecf..00b0244f 100644
--- a/src/ibrowse/ibrowse_test.erl
+++ b/src/ibrowse/ibrowse_test.erl
@@ -4,7 +4,6 @@
%%% Created : 14 Oct 2003 by Chandrashekhar Mullaparthi <chandrashekhar.mullaparthi@t-mobile.co.uk>
-module(ibrowse_test).
--vsn('$Id: ibrowse_test.erl,v 1.4 2009/07/01 22:43:19 chandrusf Exp $ ').
-export([
load_test/3,
send_reqs_1/3,
@@ -193,6 +192,7 @@ dump_errors(Key, Iod) ->
{"http://www.google.co.uk", get},
{"http://www.google.com", get},
{"http://www.google.com", options},
+ {"https://mail.google.com", get},
{"http://www.sun.com", get},
{"http://www.oracle.com", get},
{"http://www.bbc.co.uk", get},
@@ -223,9 +223,10 @@ unit_tests() ->
unit_tests([]).
unit_tests(Options) ->
+ application:start(ssl),
Options_1 = Options ++ [{connect_timeout, 5000}],
{Pid, Ref} = erlang:spawn_monitor(?MODULE, unit_tests_1, [self(), Options_1]),
- receive
+ receive
{done, Pid} ->
ok;
{'DOWN', Ref, _, _, Info} ->
@@ -293,7 +294,7 @@ compare_responses(R1, R2, R3) ->
do_async_req_list(Url, Method, Options) ->
{Pid,_} = erlang:spawn_monitor(?MODULE, i_do_async_req_list,
- [self(), Url, Method,
+ [self(), Url, Method,
Options ++ [{stream_chunk_size, 1000}]]),
io:format("Spawned process ~p~n", [Pid]),
wait_for_resp(Pid).