diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/couchdb/couch_db.erl | 16 | ||||
-rw-r--r-- | src/couchdb/couch_rep_att.erl | 8 |
2 files changed, 5 insertions, 19 deletions
diff --git a/src/couchdb/couch_db.erl b/src/couchdb/couch_db.erl index b445046f..80f0d7bf 100644 --- a/src/couchdb/couch_db.erl +++ b/src/couchdb/couch_db.erl @@ -901,20 +901,10 @@ with_stream(Fd, #att{md5=InMd5,type=Type,encoding=Enc}=Att, Fun) -> write_streamed_attachment(_Stream, _F, 0) -> ok; -% LenLeft might be different from the total size of what function F returns. -% This happens when doing a pull replication of compressed attachments from a -% 0.11.0 server, where LenLeft will match the uncompressed size but we end up -% receiving the attachment compressed (therefore a size different from LenLeft). -% This is because 0.11.0 doesn't understand the query parameter -% "?att_encoding_info=true" when we do a doc request (GET /somedb/somedoc). write_streamed_attachment(Stream, F, LenLeft) -> - case F() of - Bin when is_binary(Bin) -> - ok = couch_stream:write(Stream, Bin), - write_streamed_attachment(Stream, F, LenLeft - size(Bin)); - eof -> - ok - end. + Bin = F(), + ok = couch_stream:write(Stream, Bin), + write_streamed_attachment(Stream, F, LenLeft - size(Bin)). enum_docs_since_reduce_to_count(Reds) -> couch_btree:final_reduce( diff --git a/src/couchdb/couch_rep_att.erl b/src/couchdb/couch_rep_att.erl index 367afbb5..28b8945c 100644 --- a/src/couchdb/couch_rep_att.erl +++ b/src/couchdb/couch_rep_att.erl @@ -81,12 +81,8 @@ receive_data(Ref, ReqId, ContentEncoding) -> % ?LOG_DEBUG("got ~p bytes for ~p", [size(Data), ReqId]), Data; {ibrowse_async_response_end, ReqId} -> - % This means ibrowse received all the data it was supposed to. - % In case of not receiving the whole data, due to a network link - % failure for example, we would have received an error message. - % In other words, this message doesn't represent an error - look into - % ibrowse_http_client.erl. - eof + ?LOG_ERROR("streaming att. ended but more data requested ~p", [ReqId]), + throw({attachment_request_failed, premature_end}) after 31000 -> throw({attachment_request_failed, timeout}) end. |