From 8b95942a2be4a65222b1758f2cb63b9dd86ea69d Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Thu, 13 Aug 2015 15:10:37 -0300 Subject: [bug] process put after last BadRequest check If we check for a BadRequest after calling meth_put we will end up on a scenario where the server replies with an error, but everything got processed. --- server/src/leap/soledad/server/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/leap/soledad/server/__init__.py b/server/src/leap/soledad/server/__init__.py index 7a03f6fb..1b795016 100644 --- a/server/src/leap/soledad/server/__init__.py +++ b/server/src/leap/soledad/server/__init__.py @@ -238,6 +238,7 @@ class HTTPInvocationByMethodWithBody( if content_type == 'application/x-soledad-sync-put': meth_put = self._lookup('%s_put' % method) meth_end = self._lookup('%s_end' % method) + entries = [] while True: line = body_getline() entry = line.strip() @@ -246,9 +247,11 @@ class HTTPInvocationByMethodWithBody( if not entry or not comma: # empty or no prec comma raise http_app.BadRequest entry, comma = utils.check_and_strip_comma(entry) - meth_put({}, entry) + entries.append(entry) if comma or body_getline(): # extra comma or data raise http_app.BadRequest + for entry in entries: + meth_put({}, entry) return meth_end() # handle outgoing documents elif content_type == 'application/x-soledad-sync-get': -- cgit v1.2.3 From fd6c054bf11deba4ca5680cc406db7d4ce98d58d Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 14 Aug 2015 22:08:27 -0300 Subject: [refactor] split put_doc_if_newer in two operations Those two operations were mixed on put_doc_if_newer, extracting should make it more clear. --- common/src/leap/soledad/common/couch.py | 37 ++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 6c28e0be..52fc2169 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -1140,9 +1140,11 @@ class CouchDatabase(CommonBackend): :param sync_id: The id of the current sync session. :type sync_id: str """ - self._do_set_replica_gen_and_trans_id( - other_replica_uid, other_generation, other_transaction_id, - number_of_docs=number_of_docs, doc_idx=doc_idx, sync_id=sync_id) + if other_replica_uid is not None and other_generation is not None: + self._do_set_replica_gen_and_trans_id( + other_replica_uid, other_generation, other_transaction_id, + number_of_docs=number_of_docs, doc_idx=doc_idx, + sync_id=sync_id) def _do_set_replica_gen_and_trans_id( self, other_replica_uid, other_generation, other_transaction_id, @@ -1392,6 +1394,27 @@ class CouchDatabase(CommonBackend): 'converged', at_gen is the insertion/current generation. :rtype: (str, int) """ + self._save_source_info(replica_uid, replica_gen, + replica_trans_id, number_of_docs, + doc_idx, sync_id) + state = self._process_incoming_doc(doc, save_conflict) + return state, self._get_generation() + + def _save_source_info(self, replica_uid, replica_gen, replica_trans_id, + number_of_docs, doc_idx, sync_id): + """ + Validate and save source information. + """ + self._validate_source(replica_uid, replica_gen, replica_trans_id) + self._set_replica_gen_and_trans_id( + replica_uid, replica_gen, replica_trans_id, + number_of_docs=number_of_docs, doc_idx=doc_idx, + sync_id=sync_id) + + def _process_incoming_doc(self, doc, save_conflict): + """ + Check document, save and return state. + """ cur_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) # at this point, `doc` has arrived from the other syncing party, and # we will decide what to do with it. @@ -1408,7 +1431,6 @@ class CouchDatabase(CommonBackend): cur_vcr = vectorclock.VectorClockRev(None) else: cur_vcr = vectorclock.VectorClockRev(cur_doc.rev) - self._validate_source(replica_uid, replica_gen, replica_trans_id) if doc_vcr.is_newer(cur_vcr): rev = doc.rev self._prune_conflicts(doc, doc_vcr) @@ -1437,11 +1459,6 @@ class CouchDatabase(CommonBackend): state = 'conflicted' if save_conflict: self._force_doc_sync_conflict(doc) - if replica_uid is not None and replica_gen is not None: - self._set_replica_gen_and_trans_id( - replica_uid, replica_gen, replica_trans_id, - number_of_docs=number_of_docs, doc_idx=doc_idx, - sync_id=sync_id) # update info old_doc.rev = doc.rev if doc.is_tombstone(): @@ -1449,7 +1466,7 @@ class CouchDatabase(CommonBackend): else: old_doc.content = doc.content old_doc.has_conflicts = doc.has_conflicts - return state, self._get_generation() + return state def get_docs(self, doc_ids, check_for_conflicts=True, include_deleted=False): -- cgit v1.2.3 From e5d2beafe62c2f654bf39ba6cbfa9a2e7d9c9c8b Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 14 Aug 2015 23:18:19 -0300 Subject: [refactor] extract logging and emitting on target Creating a message, emitting an event and logging afterwards is a single operation outside of of those method's responsabilities. --- client/src/leap/soledad/client/http_target.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/client/src/leap/soledad/client/http_target.py b/client/src/leap/soledad/client/http_target.py index a6ef2b0d..c9670711 100644 --- a/client/src/leap/soledad/client/http_target.py +++ b/client/src/leap/soledad/client/http_target.py @@ -418,12 +418,7 @@ class SoledadHTTPSyncTarget(SyncTarget): if self._defer_encryption: self._sync_enc_pool.delete_encrypted_doc( doc.doc_id, doc.rev) - - msg = "%d/%d" % (idx, total) - content = {'sent': idx, 'total': total} - emit(SOLEDAD_SYNC_SEND_STATUS, content) - logger.debug("Sync send status: %s" % msg) - + _emit_send(idx, total) response_dict = json.loads(result)[0] gen_after_send = response_dict['new_generation'] trans_id_after_send = response_dict['new_transaction_id'] @@ -619,10 +614,7 @@ class SoledadHTTPSyncTarget(SyncTarget): # end of symmetric decryption # ------------------------------------------------------------- self._received_docs += 1 - msg = "%d/%d" % (self._received_docs, total) - content = {'received': self._received_docs, 'total': total} - emit(SOLEDAD_SYNC_RECEIVE_STATUS, content) - logger.debug("Sync receive status: %s" % msg) + _emit_received(self._received_docs, total) return number_of_changes, new_generation, new_transaction_id def _parse_received_doc_response(self, response): @@ -709,3 +701,17 @@ def _unauth_to_invalid_token_error(failure): if failure.getErrorMessage() == "401 Unauthorized": raise InvalidAuthTokenError return failure + + +def _emit_send(idx, total): + msg = "%d/%d" % (idx, total) + emit( + SOLEDAD_SYNC_SEND_STATUS, + "Soledad sync send status: %s" % msg) + logger.debug("Sync send status: %s" % msg) + + +def _emit_received(received_docs, total): + msg = "%d/%d" % (received_docs, total) + emit(SOLEDAD_SYNC_RECEIVE_STATUS, msg) + logger.debug("Sync receive status: %s" % msg) -- cgit v1.2.3 From a734b994446a8c0c3cadf175a71f0d61d18c408b Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 14 Aug 2015 23:20:59 -0300 Subject: [refactor] simplify the case of a brand new doc If a doc is not present on database at all, it will simply get inserted. This commit makes this clear and skips unnecessary checks. --- common/src/leap/soledad/common/couch.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 52fc2169..d61eac51 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -1421,16 +1421,16 @@ class CouchDatabase(CommonBackend): # First, we prepare the arriving doc to update couch database. old_doc = doc doc = self._factory(doc.doc_id, doc.rev, doc.get_json()) - if cur_doc is not None: + if cur_doc is None: + self._put_doc(cur_doc, doc) + return 'inserted' + else: doc.couch_rev = cur_doc.couch_rev # fetch conflicts because we will eventually manipulate them doc._ensure_fetch_conflicts(self._get_conflicts) # from now on, it works just like u1db sqlite backend doc_vcr = vectorclock.VectorClockRev(doc.rev) - if cur_doc is None: - cur_vcr = vectorclock.VectorClockRev(None) - else: - cur_vcr = vectorclock.VectorClockRev(cur_doc.rev) + cur_vcr = vectorclock.VectorClockRev(cur_doc.rev) if doc_vcr.is_newer(cur_vcr): rev = doc.rev self._prune_conflicts(doc, doc_vcr) -- cgit v1.2.3 From dd70bec46df36c98b959246394a438759d55ba05 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sat, 15 Aug 2015 01:53:17 -0300 Subject: [refactor] simplify entity and content type _prepare was being used to concatenate and prepare request body to send or receive data on the format expected by the server. This behavior wasnt clear, so I added a new class to abstract this out. Content type and auth headers was being copied around methods. Now the request method accepts a content_type parameter to remove this duplication. --- client/src/leap/soledad/client/http_target.py | 85 +++++++++++++++------------ 1 file changed, 46 insertions(+), 39 deletions(-) diff --git a/client/src/leap/soledad/client/http_target.py b/client/src/leap/soledad/client/http_target.py index c9670711..74ff3311 100644 --- a/client/src/leap/soledad/client/http_target.py +++ b/client/src/leap/soledad/client/http_target.py @@ -231,6 +231,10 @@ class SoledadHTTPSyncTarget(SyncTarget): b64_token = base64.b64encode(auth) self._auth_header = {'Authorization': ['Token %s' % b64_token]} + @property + def _base_header(self): + return self._auth_header.copy() if self._auth_header else {} + @property def _defer_encryption(self): return self._sync_enc_pool is not None @@ -257,7 +261,7 @@ class SoledadHTTPSyncTarget(SyncTarget): source_replica_last_known_transaction_id) :rtype: twisted.internet.defer.Deferred """ - raw = yield self._http_request(self._url, headers=self._auth_header) + raw = yield self._http_request(self._url) res = json.loads(raw) defer.returnValue(( res['target_replica_uid'], @@ -300,13 +304,11 @@ class SoledadHTTPSyncTarget(SyncTarget): 'generation': source_replica_generation, 'transaction_id': source_replica_transaction_id }) - headers = self._auth_header.copy() - headers.update({'content-type': ['application/json']}) return self._http_request( self._url, method='PUT', - headers=headers, - body=data) + body=data, + content_type='application/json') @defer.inlineCallbacks def sync_exchange(self, docs_by_generation, source_replica_uid, @@ -386,11 +388,6 @@ class SoledadHTTPSyncTarget(SyncTarget): # methods to send docs # - def _prepare(self, comma, entries, **dic): - entry = comma + '\r\n' + json.dumps(dic) - entries.append(entry) - return len(entry) - @defer.inlineCallbacks def _send_docs(self, docs_by_generation, last_known_generation, last_known_trans_id, sync_id): @@ -398,12 +395,9 @@ class SoledadHTTPSyncTarget(SyncTarget): if not docs_by_generation: defer.returnValue([None, None]) - headers = self._auth_header.copy() - headers.update({'content-type': ['application/x-soledad-sync-put']}) # add remote replica metadata to the request - first_entries = ['['] - self._prepare( - '', first_entries, + header_entry = Entries() + header_entry.update( last_known_generation=last_known_generation, last_known_trans_id=last_known_trans_id, sync_id=sync_id, @@ -413,7 +407,7 @@ class SoledadHTTPSyncTarget(SyncTarget): for doc, gen, trans_id in docs_by_generation: idx += 1 result = yield self._send_one_doc( - headers, first_entries, doc, + header_entry, doc, gen, trans_id, total, idx) if self._defer_encryption: self._sync_enc_pool.delete_encrypted_doc( @@ -425,23 +419,21 @@ class SoledadHTTPSyncTarget(SyncTarget): defer.returnValue([gen_after_send, trans_id_after_send]) @defer.inlineCallbacks - def _send_one_doc(self, headers, first_entries, doc, gen, trans_id, + def _send_one_doc(self, header_entry, doc, gen, trans_id, number_of_docs, doc_idx): - entries = first_entries[:] + entries = header_entry.copy() # add the document to the request content = yield self._encrypt_doc(doc) - self._prepare( - ',', entries, + entries.update( + ',', id=doc.doc_id, rev=doc.rev, content=content, gen=gen, trans_id=trans_id, number_of_docs=number_of_docs, doc_idx=doc_idx) - entries.append('\r\n]') - data = ''.join(entries) result = yield self._http_request( self._url, method='POST', - headers=headers, - body=data) + body=str(entries), + content_type='application/x-soledad-sync-put') defer.returnValue(result) def _encrypt_doc(self, doc): @@ -486,9 +478,6 @@ class SoledadHTTPSyncTarget(SyncTarget): if defer_decryption: self._setup_sync_decr_pool() - headers = self._auth_header.copy() - headers.update({'content-type': ['application/x-soledad-sync-get']}) - # --------------------------------------------------------------------- # maybe receive the first document # --------------------------------------------------------------------- @@ -498,7 +487,7 @@ class SoledadHTTPSyncTarget(SyncTarget): # information comes as metadata to each request. doc = yield self._receive_one_doc( - headers, last_known_generation, last_known_trans_id, + last_known_generation, last_known_trans_id, sync_id, 0) self._received_docs = 0 number_of_changes, ngen, ntrans = self._insert_received_doc(doc, 1, 1) @@ -523,7 +512,7 @@ class SoledadHTTPSyncTarget(SyncTarget): deferreds = [] while received < number_of_changes: d = self._receive_one_doc( - headers, last_known_generation, + last_known_generation, last_known_trans_id, sync_id, received) d.addCallback( self._insert_received_doc, @@ -547,26 +536,24 @@ class SoledadHTTPSyncTarget(SyncTarget): defer.returnValue([new_generation, new_transaction_id]) - def _receive_one_doc(self, headers, last_known_generation, + def _receive_one_doc(self, last_known_generation, last_known_trans_id, sync_id, received): - entries = ['['] + entries = Entries() # add remote replica metadata to the request - self._prepare( - '', entries, + entries.update( last_known_generation=last_known_generation, last_known_trans_id=last_known_trans_id, sync_id=sync_id, ensure=self._ensure_callback is not None) # inform server of how many documents have already been received - self._prepare( - ',', entries, received=received) - entries.append('\r\n]') + entries.update( + ',', received=received) # send headers return self._http_request( self._url, method='POST', - headers=headers, - body=''.join(entries)) + body=str(entries), + content_type='application/x-soledad-sync-get') def _insert_received_doc(self, response, idx, total): """ @@ -680,7 +667,10 @@ class SoledadHTTPSyncTarget(SyncTarget): insert_doc_cb=self._insert_doc_cb, source_replica_uid=self.source_replica_uid) - def _http_request(self, url, method='GET', body=None, headers={}): + def _http_request(self, url, method='GET', body=None, headers=None, content_type=None): + headers = headers or self._base_header + if content_type: + headers.update({'content-type': [content_type]}) d = self._http.request(url, method, body, headers, readBody) d.addErrback(_unauth_to_invalid_token_error) return d @@ -715,3 +705,20 @@ def _emit_received(received_docs, total): msg = "%d/%d" % (received_docs, total) emit(SOLEDAD_SYNC_RECEIVE_STATUS, msg) logger.debug("Sync receive status: %s" % msg) + + +class Entries(object): + + def __init__(self, entries='['): + self.entries = entries + + def update(self, separator='', **dic): + entry = separator + '\r\n' + json.dumps(dic) + self.entries += entry + return len(entry) + + def __str__(self): + return self.entries + '\r\n]' + + def copy(self): + return Entries(self.entries) -- cgit v1.2.3 From 855def25b1c2f1f7278b6f6e0b1415ab26a995ef Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sun, 16 Aug 2015 00:41:30 -0300 Subject: [refactor] simplify send_docs operations Just extracted some common logic to create u1db formatted requests into RequestBody class and created new methods to represent operations done during send_docs. This also removes send_one_doc, but does not add batching yet. The single send behavior still the same, represented by the parameter passed into RequestBody 'remove' method. --- client/src/leap/soledad/client/http_target.py | 86 ++++++++++++++------------- 1 file changed, 46 insertions(+), 40 deletions(-) diff --git a/client/src/leap/soledad/client/http_target.py b/client/src/leap/soledad/client/http_target.py index 74ff3311..ed538add 100644 --- a/client/src/leap/soledad/client/http_target.py +++ b/client/src/leap/soledad/client/http_target.py @@ -396,45 +396,43 @@ class SoledadHTTPSyncTarget(SyncTarget): defer.returnValue([None, None]) # add remote replica metadata to the request - header_entry = Entries() - header_entry.update( + initial_body = RequestBody( last_known_generation=last_known_generation, last_known_trans_id=last_known_trans_id, sync_id=sync_id, ensure=self._ensure_callback is not None) - idx = 0 total = len(docs_by_generation) - for doc, gen, trans_id in docs_by_generation: - idx += 1 - result = yield self._send_one_doc( - header_entry, doc, - gen, trans_id, total, idx) + entries = yield self._entries_from_docs(initial_body, docs_by_generation) + while len(entries): + result = yield self._http_request( + self._url, + method='POST', + body=entries.remove(1), + content_type='application/x-soledad-sync-put') + idx = total - len(entries) if self._defer_encryption: - self._sync_enc_pool.delete_encrypted_doc( - doc.doc_id, doc.rev) + self._delete_sent(idx, docs_by_generation) _emit_send(idx, total) response_dict = json.loads(result)[0] gen_after_send = response_dict['new_generation'] trans_id_after_send = response_dict['new_transaction_id'] defer.returnValue([gen_after_send, trans_id_after_send]) + def _delete_sent(self, idx, docs_by_generation): + doc = docs_by_generation[idx][0] + self._sync_enc_pool.delete_encrypted_doc( + doc.doc_id, doc.rev) + @defer.inlineCallbacks - def _send_one_doc(self, header_entry, doc, gen, trans_id, - number_of_docs, doc_idx): - entries = header_entry.copy() - # add the document to the request - content = yield self._encrypt_doc(doc) - entries.update( - ',', - id=doc.doc_id, rev=doc.rev, content=content, gen=gen, - trans_id=trans_id, number_of_docs=number_of_docs, - doc_idx=doc_idx) - result = yield self._http_request( - self._url, - method='POST', - body=str(entries), - content_type='application/x-soledad-sync-put') - defer.returnValue(result) + def _entries_from_docs(self, initial_body, docs_by_generation): + number_of_docs = len(docs_by_generation) + for idx, (doc, gen, trans_id) in enumerate(docs_by_generation, 1): + content = yield self._encrypt_doc(doc) + initial_body.insert_info( + id=doc.doc_id, rev=doc.rev, content=content, gen=gen, + trans_id=trans_id, number_of_docs=number_of_docs, + doc_idx=idx) + defer.returnValue(initial_body) def _encrypt_doc(self, doc): d = None @@ -538,21 +536,19 @@ class SoledadHTTPSyncTarget(SyncTarget): def _receive_one_doc(self, last_known_generation, last_known_trans_id, sync_id, received): - entries = Entries() # add remote replica metadata to the request - entries.update( + body = RequestBody( last_known_generation=last_known_generation, last_known_trans_id=last_known_trans_id, sync_id=sync_id, ensure=self._ensure_callback is not None) # inform server of how many documents have already been received - entries.update( - ',', received=received) + body.insert_info(received=received) # send headers return self._http_request( self._url, method='POST', - body=str(entries), + body=str(body), content_type='application/x-soledad-sync-get') def _insert_received_doc(self, response, idx, total): @@ -707,18 +703,28 @@ def _emit_received(received_docs, total): logger.debug("Sync receive status: %s" % msg) -class Entries(object): +class RequestBody(object): - def __init__(self, entries='['): - self.entries = entries + def __init__(self, **header_dict): + self.headers = header_dict + self.entries = [] - def update(self, separator='', **dic): - entry = separator + '\r\n' + json.dumps(dic) - self.entries += entry + def insert_info(self, **entry_dict): + entry = json.dumps(entry_dict) + self.entries.append(entry) return len(entry) + def remove(self, number=1): + entries = [self.entries.pop(0) for i in xrange(number)] + return self.entries_to_str(entries) + def __str__(self): - return self.entries + '\r\n]' + return self.entries_to_str(self.entries) + + def __len__(self): + return len(self.entries) - def copy(self): - return Entries(self.entries) + def entries_to_str(self, entries=None): + data = '[\r\n' + json.dumps(self.headers) + data += ''.join(',\r\n' + entry for entry in entries) + return data + '\r\n]' -- cgit v1.2.3 From 8adf2dedb74941352520d8de42326b0c59818728 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sun, 16 Aug 2015 01:57:25 -0300 Subject: [refactor] splits http_target into 4 modules SoledadHTTPSyncTarget is composed of 4 main groups of responsibility: * api.py - Public and main methods of a SyncTarget * fetch.py - Document fetching logic * send.py - Document sending logic * support.py - Support functions and patches Previous single file had ~600 lines with those 4 logic groups mixed, making it harder to read and understand. --- .../leap/soledad/client/http_target/__init__.py | 45 ++++ client/src/leap/soledad/client/http_target/api.py | 251 +++++++++++++++++++++ .../src/leap/soledad/client/http_target/fetch.py | 237 +++++++++++++++++++ client/src/leap/soledad/client/http_target/send.py | 101 +++++++++ .../src/leap/soledad/client/http_target/support.py | 154 +++++++++++++ 5 files changed, 788 insertions(+) create mode 100644 client/src/leap/soledad/client/http_target/__init__.py create mode 100644 client/src/leap/soledad/client/http_target/api.py create mode 100644 client/src/leap/soledad/client/http_target/fetch.py create mode 100644 client/src/leap/soledad/client/http_target/send.py create mode 100644 client/src/leap/soledad/client/http_target/support.py diff --git a/client/src/leap/soledad/client/http_target/__init__.py b/client/src/leap/soledad/client/http_target/__init__.py new file mode 100644 index 00000000..e77d20f5 --- /dev/null +++ b/client/src/leap/soledad/client/http_target/__init__.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# http_target.py +# Copyright (C) 2015 LEAP +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +""" +A U1DB backend for encrypting data before sending to server and decrypting +after receiving. +""" + + +import logging + +from leap.soledad.client.http_target.send import HTTPDocSender +from leap.soledad.client.http_target.api import SyncTargetAPI +from leap.soledad.client.http_target.fetch import HTTPDocFetcher + + +logger = logging.getLogger(__name__) + + +class SoledadHTTPSyncTarget(SyncTargetAPI, HTTPDocSender, HTTPDocFetcher): + + """ + A SyncTarget that encrypts data before sending and decrypts data after + receiving. + + Normally encryption will have been written to the sync database upon + document modification. The sync database is also used to write temporarily + the parsed documents that the remote send us, before being decrypted and + written to the main database. + """ diff --git a/client/src/leap/soledad/client/http_target/api.py b/client/src/leap/soledad/client/http_target/api.py new file mode 100644 index 00000000..9e677304 --- /dev/null +++ b/client/src/leap/soledad/client/http_target/api.py @@ -0,0 +1,251 @@ +import json +import base64 + +from uuid import uuid4 +from u1db import SyncTarget + +from twisted.web.error import Error +from twisted.internet import defer + +from leap.soledad.common.errors import InvalidAuthTokenError +from leap.soledad.client.http_target.support import readBody + +from leap.common.http import HTTPClient + + +class SyncTargetAPI(SyncTarget): + + def __init__(self, url, source_replica_uid, creds, crypto, cert_file, + sync_db=None, sync_enc_pool=None): + """ + Initialize the sync target. + + :param url: The server sync url. + :type url: str + :param source_replica_uid: The source replica uid which we use when + deferring decryption. + :type source_replica_uid: str + :param creds: A dictionary containing the uuid and token. + :type creds: creds + :param crypto: An instance of SoledadCrypto so we can encrypt/decrypt + document contents when syncing. + :type crypto: soledad.crypto.SoledadCrypto + :param cert_file: Path to the certificate of the ca used to validate + the SSL certificate used by the remote soledad + server. + :type cert_file: str + :param sync_db: Optional. handler for the db with the symmetric + encryption of the syncing documents. If + None, encryption will be done in-place, + instead of retreiving it from the dedicated + database. + :type sync_db: Sqlite handler + :param sync_enc_pool: The encryption pool to use to defer encryption. + If None is passed the encryption will not be + deferred. + :type sync_enc_pool: leap.soledad.client.encdecpool.SyncEncrypterPool + """ + if url.endswith("/"): + url = url[:-1] + self._url = str(url) + "/sync-from/" + str(source_replica_uid) + self.source_replica_uid = source_replica_uid + self._auth_header = None + self.set_creds(creds) + self._crypto = crypto + self._sync_db = sync_db + self._sync_enc_pool = sync_enc_pool + self._insert_doc_cb = None + # asynchronous encryption/decryption attributes + self._decryption_callback = None + self._sync_decr_pool = None + self._http = HTTPClient(cert_file) + + def close(self): + self._http.close() + + def set_creds(self, creds): + """ + Update credentials. + + :param creds: A dictionary containing the uuid and token. + :type creds: dict + """ + uuid = creds['token']['uuid'] + token = creds['token']['token'] + auth = '%s:%s' % (uuid, token) + b64_token = base64.b64encode(auth) + self._auth_header = {'Authorization': ['Token %s' % b64_token]} + + @property + def _base_header(self): + return self._auth_header.copy() if self._auth_header else {} + + @property + def _defer_encryption(self): + return self._sync_enc_pool is not None + + def _http_request(self, url, method='GET', body=None, headers=None, content_type=None): + headers = headers or self._base_header + if content_type: + headers.update({'content-type': [content_type]}) + d = self._http.request(url, method, body, headers, readBody) + d.addErrback(_unauth_to_invalid_token_error) + return d + + @defer.inlineCallbacks + def get_sync_info(self, source_replica_uid): + """ + Return information about known state of remote database. + + Return the replica_uid and the current database generation of the + remote database, and its last-seen database generation for the client + replica. + + :param source_replica_uid: The client-size replica uid. + :type source_replica_uid: str + + :return: A deferred which fires with (target_replica_uid, + target_replica_generation, target_trans_id, + source_replica_last_known_generation, + source_replica_last_known_transaction_id) + :rtype: twisted.internet.defer.Deferred + """ + raw = yield self._http_request(self._url) + res = json.loads(raw) + defer.returnValue([ + res['target_replica_uid'], + res['target_replica_generation'], + res['target_replica_transaction_id'], + res['source_replica_generation'], + res['source_transaction_id'] + ]) + + def record_sync_info( + self, source_replica_uid, source_replica_generation, + source_replica_transaction_id): + """ + Record tip information for another replica. + + After sync_exchange has been processed, the caller will have + received new content from this replica. This call allows the + source replica instigating the sync to inform us what their + generation became after applying the documents we returned. + + This is used to allow future sync operations to not need to repeat data + that we just talked about. It also means that if this is called at the + wrong time, there can be database records that will never be + synchronized. + + :param source_replica_uid: The identifier for the source replica. + :type source_replica_uid: str + :param source_replica_generation: The database generation for the + source replica. + :type source_replica_generation: int + :param source_replica_transaction_id: The transaction id associated + with the source replica + generation. + :type source_replica_transaction_id: str + + :return: A deferred which fires with the result of the query. + :rtype: twisted.internet.defer.Deferred + """ + data = json.dumps({ + 'generation': source_replica_generation, + 'transaction_id': source_replica_transaction_id + }) + return self._http_request( + self._url, + method='PUT', + body=data, + content_type='application/json') + + @defer.inlineCallbacks + def sync_exchange(self, docs_by_generation, source_replica_uid, + last_known_generation, last_known_trans_id, + insert_doc_cb, ensure_callback=None, + defer_decryption=True, sync_id=None): + """ + Find out which documents the remote database does not know about, + encrypt and send them. After that, receive documents from the remote + database. + + :param docs_by_generations: A list of (doc_id, generation, trans_id) + of local documents that were changed since + the last local generation the remote + replica knows about. + :type docs_by_generations: list of tuples + + :param source_replica_uid: The uid of the source replica. + :type source_replica_uid: str + + :param last_known_generation: Target's last known generation. + :type last_known_generation: int + + :param last_known_trans_id: Target's last known transaction id. + :type last_known_trans_id: str + + :param insert_doc_cb: A callback for inserting received documents from + target. If not overriden, this will call u1db + insert_doc_from_target in synchronizer, which + implements the TAKE OTHER semantics. + :type insert_doc_cb: function + + :param ensure_callback: A callback that ensures we know the target + replica uid if the target replica was just + created. + :type ensure_callback: function + + :param defer_decryption: Whether to defer the decryption process using + the intermediate database. If False, + decryption will be done inline. + :type defer_decryption: bool + + :return: A deferred which fires with the new generation and + transaction id of the target replica. + :rtype: twisted.internet.defer.Deferred + """ + + self._ensure_callback = ensure_callback + + if sync_id is None: + sync_id = str(uuid4()) + self.source_replica_uid = source_replica_uid + + # save a reference to the callback so we can use it after decrypting + self._insert_doc_cb = insert_doc_cb + + gen_after_send, trans_id_after_send = yield self._send_docs( + docs_by_generation, + last_known_generation, + last_known_trans_id, + sync_id) + + cur_target_gen, cur_target_trans_id = yield self._receive_docs( + last_known_generation, last_known_trans_id, + ensure_callback, sync_id, + defer_decryption=defer_decryption) + + # update gen and trans id info in case we just sent and did not + # receive docs. + if gen_after_send is not None and gen_after_send > cur_target_gen: + cur_target_gen = gen_after_send + cur_target_trans_id = trans_id_after_send + + defer.returnValue([cur_target_gen, cur_target_trans_id]) + + +def _unauth_to_invalid_token_error(failure): + """ + An errback to translate unauthorized errors to our own invalid token + class. + + :param failure: The original failure. + :type failure: twisted.python.failure.Failure + + :return: Either the original failure or an invalid auth token error. + :rtype: twisted.python.failure.Failure + """ + failure.trap(Error) + if failure.getErrorMessage() == "401 Unauthorized": + raise InvalidAuthTokenError + return failure diff --git a/client/src/leap/soledad/client/http_target/fetch.py b/client/src/leap/soledad/client/http_target/fetch.py new file mode 100644 index 00000000..c4bb79a0 --- /dev/null +++ b/client/src/leap/soledad/client/http_target/fetch.py @@ -0,0 +1,237 @@ +# -*- coding: utf-8 -*- +# http_target.py +# Copyright (C) 2015 LEAP +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +import logging +import json +from u1db import errors +from u1db.remote import utils +from twisted.internet import defer +from leap.soledad.common.document import SoledadDocument +from leap.soledad.client.events import SOLEDAD_SYNC_RECEIVE_STATUS +from leap.soledad.client.events import emit +from leap.soledad.client.crypto import is_symmetrically_encrypted +from leap.soledad.client.encdecpool import SyncDecrypterPool +from leap.soledad.client.http_target.support import RequestBody + +logger = logging.getLogger(__name__) + + +class HTTPDocFetcher(object): + + @defer.inlineCallbacks + def _receive_docs(self, last_known_generation, last_known_trans_id, + ensure_callback, sync_id, defer_decryption): + + self._queue_for_decrypt = defer_decryption \ + and self._sync_db is not None + + new_generation = last_known_generation + new_transaction_id = last_known_trans_id + + if self._queue_for_decrypt: + logger.debug( + "Soledad sync: will queue received docs for decrypting.") + + if defer_decryption: + self._setup_sync_decr_pool() + + # --------------------------------------------------------------------- + # maybe receive the first document + # --------------------------------------------------------------------- + + # we fetch the first document before fetching the rest because we need + # to know the total number of documents to be received, and this + # information comes as metadata to each request. + + doc = yield self._receive_one_doc( + last_known_generation, last_known_trans_id, + sync_id, 0) + self._received_docs = 0 + number_of_changes, ngen, ntrans = self._insert_received_doc(doc, 1, 1) + + if defer_decryption: + self._sync_decr_pool.start(number_of_changes) + + # --------------------------------------------------------------------- + # maybe receive the rest of the documents + # --------------------------------------------------------------------- + + # launch many asynchronous fetches and inserts of received documents + # in the temporary sync db. Will wait for all results before + # continuing. + + received = 1 + deferreds = [] + while received < number_of_changes: + d = self._receive_one_doc( + last_known_generation, + last_known_trans_id, sync_id, received) + d.addCallback( + self._insert_received_doc, + received + 1, # the index of the current received doc + number_of_changes) + deferreds.append(d) + received += 1 + results = yield defer.gatherResults(deferreds) + + # get generation and transaction id of target after insertions + if deferreds: + _, new_generation, new_transaction_id = results.pop() + + # --------------------------------------------------------------------- + # wait for async decryption to finish + # --------------------------------------------------------------------- + + if defer_decryption: + yield self._sync_decr_pool.deferred + self._sync_decr_pool.stop() + + defer.returnValue([new_generation, new_transaction_id]) + + def _receive_one_doc(self, last_known_generation, + last_known_trans_id, sync_id, received): + # add remote replica metadata to the request + body = RequestBody( + last_known_generation=last_known_generation, + last_known_trans_id=last_known_trans_id, + sync_id=sync_id, + ensure=self._ensure_callback is not None) + # inform server of how many documents have already been received + body.insert_info(received=received) + # send headers + return self._http_request( + self._url, + method='POST', + body=str(body), + content_type='application/x-soledad-sync-get') + + def _insert_received_doc(self, response, idx, total): + """ + Insert a received document into the local replica. + + :param response: The body and headers of the response. + :type response: tuple(str, dict) + :param idx: The index count of the current operation. + :type idx: int + :param total: The total number of operations. + :type total: int + """ + new_generation, new_transaction_id, number_of_changes, doc_id, \ + rev, content, gen, trans_id = \ + self._parse_received_doc_response(response) + if doc_id is not None: + # decrypt incoming document and insert into local database + # ------------------------------------------------------------- + # symmetric decryption of document's contents + # ------------------------------------------------------------- + # If arriving content was symmetrically encrypted, we decrypt it. + # We do it inline if defer_decryption flag is False or no sync_db + # was defined, otherwise we defer it writing it to the received + # docs table. + doc = SoledadDocument(doc_id, rev, content) + if is_symmetrically_encrypted(doc): + if self._queue_for_decrypt: + self._sync_decr_pool.insert_encrypted_received_doc( + doc.doc_id, doc.rev, doc.content, gen, trans_id, + idx) + else: + # defer_decryption is False or no-sync-db fallback + doc.set_json(self._crypto.decrypt_doc(doc)) + self._insert_doc_cb(doc, gen, trans_id) + else: + # not symmetrically encrypted doc, insert it directly + # or save it in the decrypted stage. + if self._queue_for_decrypt: + self._sync_decr_pool.insert_received_doc( + doc.doc_id, doc.rev, doc.content, gen, trans_id, + idx) + else: + self._insert_doc_cb(doc, gen, trans_id) + # ------------------------------------------------------------- + # end of symmetric decryption + # ------------------------------------------------------------- + self._received_docs += 1 + _emit_received(self._received_docs, total) + return number_of_changes, new_generation, new_transaction_id + + def _parse_received_doc_response(self, response): + """ + Parse the response from the server containing the received document. + + :param response: The body and headers of the response. + :type response: tuple(str, dict) + + :return: (new_gen, new_trans_id, number_of_changes, doc_id, rev, + content, gen, trans_id) + :rtype: tuple + """ + # decode incoming stream + parts = response.splitlines() + if not parts or parts[0] != '[' or parts[-1] != ']': + raise errors.BrokenSyncStream + data = parts[1:-1] + # decode metadata + try: + line, comma = utils.check_and_strip_comma(data[0]) + metadata = None + except (IndexError): + raise errors.BrokenSyncStream + try: + metadata = json.loads(line) + new_generation = metadata['new_generation'] + new_transaction_id = metadata['new_transaction_id'] + number_of_changes = metadata['number_of_changes'] + except (ValueError, KeyError): + raise errors.BrokenSyncStream + # make sure we have replica_uid from fresh new dbs + if self._ensure_callback and 'replica_uid' in metadata: + self._ensure_callback(metadata['replica_uid']) + # parse incoming document info + doc_id = None + rev = None + content = None + gen = None + trans_id = None + if number_of_changes > 0: + try: + entry = json.loads(data[1]) + doc_id = entry['id'] + rev = entry['rev'] + content = entry['content'] + gen = entry['gen'] + trans_id = entry['trans_id'] + except (IndexError, KeyError): + raise errors.BrokenSyncStream + return new_generation, new_transaction_id, number_of_changes, \ + doc_id, rev, content, gen, trans_id + + def _setup_sync_decr_pool(self): + """ + Set up the SyncDecrypterPool for deferred decryption. + """ + if self._sync_decr_pool is None and self._sync_db is not None: + # initialize syncing queue decryption pool + self._sync_decr_pool = SyncDecrypterPool( + self._crypto, + self._sync_db, + insert_doc_cb=self._insert_doc_cb, + source_replica_uid=self.source_replica_uid) + + +def _emit_received(received_docs, total): + msg = "%d/%d" % (received_docs, total) + emit(SOLEDAD_SYNC_RECEIVE_STATUS, msg) + logger.debug("Sync receive status: %s" % msg) diff --git a/client/src/leap/soledad/client/http_target/send.py b/client/src/leap/soledad/client/http_target/send.py new file mode 100644 index 00000000..de18df8b --- /dev/null +++ b/client/src/leap/soledad/client/http_target/send.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# http_target.py +# Copyright (C) 2015 LEAP +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +import json +import logging +from twisted.internet import defer +from leap.soledad.client.events import emit +from leap.soledad.client.events import SOLEDAD_SYNC_SEND_STATUS +from leap.soledad.client.http_target.support import RequestBody +logger = logging.getLogger(__name__) + + +class HTTPDocSender(object): + + @defer.inlineCallbacks + def _send_docs(self, docs_by_generation, last_known_generation, + last_known_trans_id, sync_id): + + if not docs_by_generation: + defer.returnValue([None, None]) + + # add remote replica metadata to the request + initial_body = RequestBody( + last_known_generation=last_known_generation, + last_known_trans_id=last_known_trans_id, + sync_id=sync_id, + ensure=self._ensure_callback is not None) + total = len(docs_by_generation) + entries = yield self._entries_from_docs(initial_body, docs_by_generation) + while len(entries): + result = yield self._http_request( + self._url, + method='POST', + body=entries.remove(1), + content_type='application/x-soledad-sync-put') + idx = total - len(entries) + if self._defer_encryption: + self._delete_sent(idx, docs_by_generation) + _emit_send(idx, total) + response_dict = json.loads(result)[0] + gen_after_send = response_dict['new_generation'] + trans_id_after_send = response_dict['new_transaction_id'] + defer.returnValue([gen_after_send, trans_id_after_send]) + + def _delete_sent(self, idx, docs_by_generation): + doc = docs_by_generation[idx][0] + self._sync_enc_pool.delete_encrypted_doc( + doc.doc_id, doc.rev) + + @defer.inlineCallbacks + def _entries_from_docs(self, initial_body, docs_by_generation): + number_of_docs = len(docs_by_generation) + for idx, (doc, gen, trans_id) in enumerate(docs_by_generation, 1): + content = yield self._encrypt_doc(doc) + initial_body.insert_info( + id=doc.doc_id, rev=doc.rev, content=content, gen=gen, + trans_id=trans_id, number_of_docs=number_of_docs, + doc_idx=idx) + defer.returnValue(initial_body) + + def _encrypt_doc(self, doc): + d = None + if doc.is_tombstone(): + d = defer.succeed(None) + elif not self._defer_encryption: + # fallback case, for tests + d = defer.succeed(self._crypto.encrypt_doc(doc)) + else: + + def _maybe_encrypt_doc_inline(doc_json): + if doc_json is None: + # the document is not marked as tombstone, but we got + # nothing from the sync db. As it is not encrypted + # yet, we force inline encryption. + return self._crypto.encrypt_doc(doc) + return doc_json + + d = self._sync_enc_pool.get_encrypted_doc(doc.doc_id, doc.rev) + d.addCallback(_maybe_encrypt_doc_inline) + return d + + +def _emit_send(idx, total): + msg = "%d/%d" % (idx, total) + emit( + SOLEDAD_SYNC_SEND_STATUS, + "Soledad sync send status: %s" % msg) + logger.debug("Sync send status: %s" % msg) diff --git a/client/src/leap/soledad/client/http_target/support.py b/client/src/leap/soledad/client/http_target/support.py new file mode 100644 index 00000000..88934636 --- /dev/null +++ b/client/src/leap/soledad/client/http_target/support.py @@ -0,0 +1,154 @@ +# -*- coding: utf-8 -*- +# http_target.py +# Copyright (C) 2015 LEAP +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +import warnings +import json +from u1db import errors +from u1db.remote import http_errors +from twisted.internet import defer +from twisted.web.client import _ReadBodyProtocol +from twisted.web.client import PartialDownloadError +from twisted.web._newclient import ResponseDone +from twisted.web._newclient import PotentialDataLoss + + +# we want to make sure that HTTP errors will raise appropriate u1db errors, +# that is, fire errbacks with the appropriate failures, in the context of +# twisted. Because of that, we redefine the http body reader used by the HTTP +# client below. + +class ReadBodyProtocol(_ReadBodyProtocol): + + def __init__(self, response, deferred): + """ + Initialize the protocol, additionally storing the response headers. + """ + _ReadBodyProtocol.__init__( + self, response.code, response.phrase, deferred) + self.headers = response.headers + + # ---8<--- snippet from u1db.remote.http_client, modified to use errbacks + def _error(self, respdic): + descr = respdic.get("error") + exc_cls = errors.wire_description_to_exc.get(descr) + if exc_cls is not None: + message = respdic.get("message") + self.deferred.errback(exc_cls(message)) + # ---8<--- end of snippet from u1db.remote.http_client + + def connectionLost(self, reason): + """ + Deliver the accumulated response bytes to the waiting L{Deferred}, if + the response body has been completely received without error. + """ + if reason.check(ResponseDone): + + body = b''.join(self.dataBuffer) + + # ---8<--- snippet from u1db.remote.http_client + if self.status in (200, 201): + self.deferred.callback(body) + elif self.status in http_errors.ERROR_STATUSES: + try: + respdic = json.loads(body) + except ValueError: + self.deferred.errback( + errors.HTTPError(self.status, body, self.headers)) + else: + self._error(respdic) + # special cases + elif self.status == 503: + self.deferred.errback(errors.Unavailable(body, self.headers)) + else: + self.deferred.errback( + errors.HTTPError(self.status, body, self.headers)) + # ---8<--- end of snippet from u1db.remote.http_client + + elif reason.check(PotentialDataLoss): + self.deferred.errback( + PartialDownloadError(self.status, self.message, + b''.join(self.dataBuffer))) + else: + self.deferred.errback(reason) + + +def readBody(response): + """ + Get the body of an L{IResponse} and return it as a byte string. + + This is a helper function for clients that don't want to incrementally + receive the body of an HTTP response. + + @param response: The HTTP response for which the body will be read. + @type response: L{IResponse} provider + + @return: A L{Deferred} which will fire with the body of the response. + Cancelling it will close the connection to the server immediately. + """ + def cancel(deferred): + """ + Cancel a L{readBody} call, close the connection to the HTTP server + immediately, if it is still open. + + @param deferred: The cancelled L{defer.Deferred}. + """ + abort = getAbort() + if abort is not None: + abort() + + d = defer.Deferred(cancel) + protocol = ReadBodyProtocol(response, d) + + def getAbort(): + return getattr(protocol.transport, 'abortConnection', None) + + response.deliverBody(protocol) + + if protocol.transport is not None and getAbort() is None: + warnings.warn( + 'Using readBody with a transport that does not have an ' + 'abortConnection method', + category=DeprecationWarning, + stacklevel=2) + + return d + + +class RequestBody(object): + + def __init__(self, **header_dict): + self.headers = header_dict + self.entries = [] + + def insert_info(self, **entry_dict): + entry = json.dumps(entry_dict) + self.entries.append(entry) + return len(entry) + + def remove(self, number=1): + entries = [self.entries.pop(0) for i in xrange(number)] + return self.entries_to_str(entries) + + def __str__(self): + return self.entries_to_str(self.entries) + + def __len__(self): + return len(self.entries) + + def entries_to_str(self, entries=None): + data = '[\r\n' + json.dumps(self.headers) + data += ''.join(',\r\n' + entry for entry in entries) + return data + '\r\n]' -- cgit v1.2.3 From 25cca13779f69bdd665b37e6ce9296540be823e3 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 17 Aug 2015 14:25:35 -0300 Subject: [refactor] removing getters and setters from couch.py This is not needed, the behavior under them is the same as an assignment. --- common/src/leap/soledad/common/couch.py | 43 +++------------------------------ 1 file changed, 4 insertions(+), 39 deletions(-) diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index d61eac51..96e00fa2 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -104,9 +104,9 @@ class CouchDocument(SoledadDocument): :type syncable: bool """ SoledadDocument.__init__(self, doc_id, rev, json, has_conflicts) - self._couch_rev = None + self.couch_rev = None self._conflicts = None - self._transactions = None + self.transactions = None def _ensure_fetch_conflicts(self, get_conflicts_fun): """ @@ -167,22 +167,6 @@ class CouchDocument(SoledadDocument): self._conflicts) self.has_conflicts = len(self._conflicts) > 0 - def _get_couch_rev(self): - return self._couch_rev - - def _set_couch_rev(self, rev): - self._couch_rev = rev - - couch_rev = property(_get_couch_rev, _set_couch_rev) - - def _get_transactions(self): - return self._transactions - - def _set_transactions(self, rev): - self._transactions = rev - - transactions = property(_get_transactions, _set_transactions) - # monkey-patch the u1db http app to use CouchDocument http_app.Document = CouchDocument @@ -1563,7 +1547,7 @@ class CouchServerState(ServerState): :param couch_url: The URL for the couch database. :type couch_url: str """ - self._couch_url = couch_url + self.couch_url = couch_url def open_database(self, dbname): """ @@ -1576,7 +1560,7 @@ class CouchServerState(ServerState): :rtype: CouchDatabase """ return CouchDatabase( - self._couch_url, + self.couch_url, dbname, ensure_ddocs=False) @@ -1610,22 +1594,3 @@ class CouchServerState(ServerState): delete databases. """ raise Unauthorized() - - def _set_couch_url(self, url): - """ - Set the couchdb URL - - :param url: CouchDB URL - :type url: str - """ - self._couch_url = url - - def _get_couch_url(self): - """ - Return CouchDB URL - - :rtype: str - """ - return self._couch_url - - couch_url = property(_get_couch_url, _set_couch_url, doc='CouchDB URL') -- cgit v1.2.3 From b767750d5aff2c9175a0bcc247c2b9c0d8ca08be Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 17 Aug 2015 15:07:21 -0300 Subject: [refactor] remove unused parameter --- common/src/leap/soledad/common/couch.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 96e00fa2..c003b14e 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -87,8 +87,7 @@ class CouchDocument(SoledadDocument): atomic and consistent update of the database. """ - def __init__(self, doc_id=None, rev=None, json='{}', has_conflicts=False, - syncable=True): + def __init__(self, doc_id=None, rev=None, json='{}', has_conflicts=False): """ Container for handling a document that is stored in couch backend. @@ -100,8 +99,6 @@ class CouchDocument(SoledadDocument): :type json: str :param has_conflicts: Boolean indicating if this document has conflicts :type has_conflicts: bool - :param syncable: Should this document be synced with remote replicas? - :type syncable: bool """ SoledadDocument.__init__(self, doc_id, rev, json, has_conflicts) self.couch_rev = None -- cgit v1.2.3 From 673835b1dc53dc0e1d0363afdc57c7c6917f8628 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 17 Aug 2015 17:35:42 -0300 Subject: [refactor] simplify conflicts management Adding, removing and checking conflicts is an operation done by the model, the Database shouldn't be aware of that. Fetching and saving also is not model's responsability. Repetition remove as well. --- common/src/leap/soledad/common/couch.py | 98 ++++++--------------------------- 1 file changed, 16 insertions(+), 82 deletions(-) diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index c003b14e..126c6b51 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -102,23 +102,8 @@ class CouchDocument(SoledadDocument): """ SoledadDocument.__init__(self, doc_id, rev, json, has_conflicts) self.couch_rev = None - self._conflicts = None self.transactions = None - def _ensure_fetch_conflicts(self, get_conflicts_fun): - """ - Ensure conflict data has been fetched from the server. - - :param get_conflicts_fun: A function which, given the document id and - the couch revision, return the conflicted - versions of the current document. - :type get_conflicts_fun: function - """ - if self._conflicts is None: - self._conflicts = get_conflicts_fun(self.doc_id, - couch_rev=self.couch_rev) - self.has_conflicts = len(self._conflicts) > 0 - def get_conflicts(self): """ Get the conflicted versions of the document. @@ -146,7 +131,7 @@ class CouchDocument(SoledadDocument): :type doc: CouchDocument """ if self._conflicts is None: - raise Exception("Run self._ensure_fetch_conflicts first!") + raise Exception("Fetch conflicts first!") self._conflicts.append(doc) self.has_conflicts = len(self._conflicts) > 0 @@ -158,7 +143,7 @@ class CouchDocument(SoledadDocument): :type conflict_revs: [str] """ if self._conflicts is None: - raise Exception("Run self._ensure_fetch_conflicts first!") + raise Exception("Fetch conflicts first!") self._conflicts = filter( lambda doc: doc.rev not in conflict_revs, self._conflicts) @@ -731,7 +716,6 @@ class CouchDatabase(CommonBackend): if check_for_conflicts \ and '_attachments' in result \ and 'u1db_conflicts' in result['_attachments']: - doc.has_conflicts = True doc.set_conflicts( self._build_conflicts( doc.doc_id, @@ -1025,7 +1009,7 @@ class CouchDatabase(CommonBackend): conflicts.append(doc) return conflicts - def _get_conflicts(self, doc_id, couch_rev=None): + def get_doc_conflicts(self, doc_id, couch_rev=None): """ Get the conflicted versions of a document. @@ -1040,32 +1024,21 @@ class CouchDatabase(CommonBackend): """ # request conflicts attachment from server params = {} + conflicts = [] if couch_rev is not None: params['rev'] = couch_rev # restric document's couch revision + else: + # TODO: move into resource logic! + first_entry = self._get_doc(doc_id, check_for_conflicts=True) + conflicts.append(first_entry) resource = self._database.resource(doc_id, 'u1db_conflicts') try: response = resource.get_json(**params) - return self._build_conflicts( + return conflicts + self._build_conflicts( doc_id, json.loads(response[2].read())) except ResourceNotFound: return [] - def get_doc_conflicts(self, doc_id): - """ - Get the list of conflicts for the given document. - - The order of the conflicts is such that the first entry is the value - that would be returned by "get_doc". - - :return: A list of the document entries that are conflicted. - :rtype: [CouchDocument] - """ - conflict_docs = self._get_conflicts(doc_id) - if len(conflict_docs) == 0: - return [] - this_doc = self._get_doc(doc_id, check_for_conflicts=True) - return [this_doc] + conflict_docs - def _get_replica_gen_and_trans_id(self, other_replica_uid): """ Return the last known generation and transaction id for the other db @@ -1189,43 +1162,6 @@ class CouchDatabase(CommonBackend): except ResourceNotFound as e: raise_missing_design_doc_error(e, ddoc_path) - def _add_conflict(self, doc, my_doc_rev, my_content): - """ - Add a conflict to the document. - - Note that this method does not actually update the backend; rather, it - updates the CouchDocument object which will provide the conflict data - when the atomic document update is made. - - :param doc: The document to have conflicts added to. - :type doc: CouchDocument - :param my_doc_rev: The revision of the conflicted document. - :type my_doc_rev: str - :param my_content: The content of the conflicted document as a JSON - serialized string. - :type my_content: str - """ - doc._ensure_fetch_conflicts(self._get_conflicts) - doc.add_conflict( - self._factory(doc_id=doc.doc_id, rev=my_doc_rev, - json=my_content)) - - def _delete_conflicts(self, doc, conflict_revs): - """ - Delete the conflicted revisions from the list of conflicts of C{doc}. - - Note that this method does not actually update the backend; rather, it - updates the CouchDocument object which will provide the conflict data - when the atomic document update is made. - - :param doc: The document to have conflicts deleted. - :type doc: CouchDocument - :param conflict_revs: A list of the revisions to be deleted. - :param conflict_revs: [str] - """ - doc._ensure_fetch_conflicts(self._get_conflicts) - doc.delete_conflicts(conflict_revs) - def _prune_conflicts(self, doc, doc_vcr): """ Prune conflicts that are older then the current document's revision, or @@ -1251,7 +1187,7 @@ class CouchDatabase(CommonBackend): if autoresolved: doc_vcr.increment(self._replica_uid) doc.rev = doc_vcr.as_str() - self._delete_conflicts(doc, c_revs_to_prune) + doc.delete_conflicts(c_revs_to_prune) def _force_doc_sync_conflict(self, doc): """ @@ -1262,8 +1198,7 @@ class CouchDatabase(CommonBackend): """ my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) self._prune_conflicts(doc, vectorclock.VectorClockRev(doc.rev)) - self._add_conflict(doc, my_doc.rev, my_doc.get_json()) - doc.has_conflicts = True + doc.add_conflict(my_doc) self._put_doc(my_doc, doc) def resolve_doc(self, doc, conflicted_doc_revs): @@ -1308,14 +1243,14 @@ class CouchDatabase(CommonBackend): # the newer doc version will supersede the one in the database, so # we copy conflicts before updating the backend. doc.set_conflicts(cur_doc.get_conflicts()) # copy conflicts over. - self._delete_conflicts(doc, superseded_revs) + doc.delete_conflicts(superseded_revs) self._put_doc(cur_doc, doc) else: # the newer doc version does not supersede the one in the # database, so we will add a conflict to the database and copy # those over to the document the user has in her hands. - self._add_conflict(cur_doc, new_rev, doc.get_json()) - self._delete_conflicts(cur_doc, superseded_revs) + cur_doc.add_conflict(doc) + cur_doc.delete_conflicts(superseded_revs) self._put_doc(cur_doc, cur_doc) # just update conflicts # backend has been updated with current conflicts, now copy them # to the current document. @@ -1405,10 +1340,9 @@ class CouchDatabase(CommonBackend): if cur_doc is None: self._put_doc(cur_doc, doc) return 'inserted' - else: - doc.couch_rev = cur_doc.couch_rev + doc.couch_rev = cur_doc.couch_rev + doc.set_conflicts(self.get_doc_conflicts(doc.doc_id, doc.couch_rev)) # fetch conflicts because we will eventually manipulate them - doc._ensure_fetch_conflicts(self._get_conflicts) # from now on, it works just like u1db sqlite backend doc_vcr = vectorclock.VectorClockRev(doc.rev) cur_vcr = vectorclock.VectorClockRev(cur_doc.rev) -- cgit v1.2.3 From 7f262d68c0afe43a3bd3864a19aac46051de7571 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 17 Aug 2015 18:53:38 -0300 Subject: [refactor] prune conflicts is Document's responsibility --- common/src/leap/soledad/common/couch.py | 58 ++++++++++++++++----------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 126c6b51..6183be6e 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -149,6 +149,33 @@ class CouchDocument(SoledadDocument): self._conflicts) self.has_conflicts = len(self._conflicts) > 0 + def _prune_conflicts(self, doc_vcr, autoresolved_increment): + """ + Prune conflicts that are older then the current document's revision, or + whose content match to the current document's content. + + :param doc: The document to have conflicts pruned. + :type doc: CouchDocument + :param doc_vcr: A vector clock representing the current document's + revision. + :type doc_vcr: u1db.vectorclock.VectorClock + """ + if self.has_conflicts: + autoresolved = False + c_revs_to_prune = [] + for c_doc in self._conflicts: + c_vcr = vectorclock.VectorClockRev(c_doc.rev) + if doc_vcr.is_newer(c_vcr): + c_revs_to_prune.append(c_doc.rev) + elif self.same_content_as(c_doc): + c_revs_to_prune.append(c_doc.rev) + doc_vcr.maximize(c_vcr) + autoresolved = True + if autoresolved: + doc_vcr.increment(autoresolved_increment) + self.rev = doc_vcr.as_str() + self.delete_conflicts(c_revs_to_prune) + # monkey-patch the u1db http app to use CouchDocument http_app.Document = CouchDocument @@ -1162,33 +1189,6 @@ class CouchDatabase(CommonBackend): except ResourceNotFound as e: raise_missing_design_doc_error(e, ddoc_path) - def _prune_conflicts(self, doc, doc_vcr): - """ - Prune conflicts that are older then the current document's revision, or - whose content match to the current document's content. - - :param doc: The document to have conflicts pruned. - :type doc: CouchDocument - :param doc_vcr: A vector clock representing the current document's - revision. - :type doc_vcr: u1db.vectorclock.VectorClock - """ - if doc.has_conflicts is True: - autoresolved = False - c_revs_to_prune = [] - for c_doc in doc.get_conflicts(): - c_vcr = vectorclock.VectorClockRev(c_doc.rev) - if doc_vcr.is_newer(c_vcr): - c_revs_to_prune.append(c_doc.rev) - elif doc.same_content_as(c_doc): - c_revs_to_prune.append(c_doc.rev) - doc_vcr.maximize(c_vcr) - autoresolved = True - if autoresolved: - doc_vcr.increment(self._replica_uid) - doc.rev = doc_vcr.as_str() - doc.delete_conflicts(c_revs_to_prune) - def _force_doc_sync_conflict(self, doc): """ Add a conflict and force a document put. @@ -1197,7 +1197,7 @@ class CouchDatabase(CommonBackend): :type doc: CouchDocument """ my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) - self._prune_conflicts(doc, vectorclock.VectorClockRev(doc.rev)) + doc._prune_conflicts(vectorclock.VectorClockRev(doc.rev), self._replica_uid) doc.add_conflict(my_doc) self._put_doc(my_doc, doc) @@ -1348,7 +1348,7 @@ class CouchDatabase(CommonBackend): cur_vcr = vectorclock.VectorClockRev(cur_doc.rev) if doc_vcr.is_newer(cur_vcr): rev = doc.rev - self._prune_conflicts(doc, doc_vcr) + doc._prune_conflicts(doc_vcr, self._replica_uid) if doc.rev != rev: # conflicts have been autoresolved state = 'superseded' -- cgit v1.2.3 From 6ce4114a19ca8a363f264bfc654d839d266dde5c Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 17 Aug 2015 20:23:14 -0300 Subject: [refactor] _process_incoming_doc is a function now This was being calculated inside CouchDatabase, but it is not a persistence responsibility. It clearly doesn't belong to this persistence layer and seeing both sides separated allow us to work better on both parts. --- common/src/leap/soledad/common/couch.py | 121 +++++++++++++++++--------------- 1 file changed, 64 insertions(+), 57 deletions(-) diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 6183be6e..13808502 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -149,6 +149,15 @@ class CouchDocument(SoledadDocument): self._conflicts) self.has_conflicts = len(self._conflicts) > 0 + def update(self, new_doc): + # update info + self.rev = new_doc.rev + if new_doc.is_tombstone(): + self.is_tombstone() + else: + self.content = new_doc.content + self.has_conflicts = new_doc.has_conflicts + def _prune_conflicts(self, doc_vcr, autoresolved_increment): """ Prune conflicts that are older then the current document's revision, or @@ -1313,7 +1322,13 @@ class CouchDatabase(CommonBackend): self._save_source_info(replica_uid, replica_gen, replica_trans_id, number_of_docs, doc_idx, sync_id) - state = self._process_incoming_doc(doc, save_conflict) + my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) + if my_doc is not None: + my_doc.set_conflicts(self.get_doc_conflicts(my_doc.doc_id, my_doc.couch_rev)) + state, save_doc = _process_incoming_doc(my_doc, doc, save_conflict, self.replica_uid) + if save_doc: + self._put_doc(my_doc, save_doc) + doc.update(save_doc) return state, self._get_generation() def _save_source_info(self, replica_uid, replica_gen, replica_trans_id, @@ -1327,62 +1342,6 @@ class CouchDatabase(CommonBackend): number_of_docs=number_of_docs, doc_idx=doc_idx, sync_id=sync_id) - def _process_incoming_doc(self, doc, save_conflict): - """ - Check document, save and return state. - """ - cur_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) - # at this point, `doc` has arrived from the other syncing party, and - # we will decide what to do with it. - # First, we prepare the arriving doc to update couch database. - old_doc = doc - doc = self._factory(doc.doc_id, doc.rev, doc.get_json()) - if cur_doc is None: - self._put_doc(cur_doc, doc) - return 'inserted' - doc.couch_rev = cur_doc.couch_rev - doc.set_conflicts(self.get_doc_conflicts(doc.doc_id, doc.couch_rev)) - # fetch conflicts because we will eventually manipulate them - # from now on, it works just like u1db sqlite backend - doc_vcr = vectorclock.VectorClockRev(doc.rev) - cur_vcr = vectorclock.VectorClockRev(cur_doc.rev) - if doc_vcr.is_newer(cur_vcr): - rev = doc.rev - doc._prune_conflicts(doc_vcr, self._replica_uid) - if doc.rev != rev: - # conflicts have been autoresolved - state = 'superseded' - else: - state = 'inserted' - self._put_doc(cur_doc, doc) - elif doc.rev == cur_doc.rev: - # magical convergence - state = 'converged' - elif cur_vcr.is_newer(doc_vcr): - # Don't add this to seen_ids, because we have something newer, - # so we should send it back, and we should not generate a - # conflict - state = 'superseded' - elif cur_doc.same_content_as(doc): - # the documents have been edited to the same thing at both ends - doc_vcr.maximize(cur_vcr) - doc_vcr.increment(self._replica_uid) - doc.rev = doc_vcr.as_str() - self._put_doc(cur_doc, doc) - state = 'superseded' - else: - state = 'conflicted' - if save_conflict: - self._force_doc_sync_conflict(doc) - # update info - old_doc.rev = doc.rev - if doc.is_tombstone(): - old_doc.is_tombstone() - else: - old_doc.content = doc.content - old_doc.has_conflicts = doc.has_conflicts - return state - def get_docs(self, doc_ids, check_for_conflicts=True, include_deleted=False): """ @@ -1525,3 +1484,51 @@ class CouchServerState(ServerState): delete databases. """ raise Unauthorized() + + +def _process_incoming_doc(my_doc, other_doc, save_conflict, replica_uid): + """ + Check document, save and return state. + """ + # at this point, `doc` has arrived from the other syncing party, and + # we will decide what to do with it. + # First, we prepare the arriving doc to update couch database. + new_doc = CouchDocument(other_doc.doc_id, other_doc.rev, other_doc.get_json()) + if my_doc is None: + return 'inserted', new_doc + new_doc.couch_rev = my_doc.couch_rev + new_doc.set_conflicts(my_doc.get_conflicts()) + # fetch conflicts because we will eventually manipulate them + # from now on, it works just like u1db sqlite backend + doc_vcr = vectorclock.VectorClockRev(new_doc.rev) + cur_vcr = vectorclock.VectorClockRev(my_doc.rev) + if doc_vcr.is_newer(cur_vcr): + rev = new_doc.rev + new_doc._prune_conflicts(doc_vcr, replica_uid) + if new_doc.rev != rev: + # conflicts have been autoresolved + return 'superseded', new_doc + else: + return'inserted', new_doc + elif new_doc.rev == my_doc.rev: + # magical convergence + return 'converged', None + elif cur_vcr.is_newer(doc_vcr): + # Don't add this to seen_ids, because we have something newer, + # so we should send it back, and we should not generate a + # conflict + other_doc.update(new_doc) + return 'superseded', None + elif my_doc.same_content_as(new_doc): + # the documents have been edited to the same thing at both ends + doc_vcr.maximize(cur_vcr) + doc_vcr.increment(replica_uid) + new_doc.rev = doc_vcr.as_str() + return 'superseded', new_doc + else: + if save_conflict: + new_doc._prune_conflicts(vectorclock.VectorClockRev(new_doc.rev), replica_uid) + new_doc.add_conflict(my_doc) + return 'conflicted', new_doc + other_doc.update(new_doc) + return 'conflicted', None -- cgit v1.2.3 From 5926e11838a6d71aa3b7865b52b2ba2fab30b203 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 17 Aug 2015 21:16:35 -0300 Subject: [bug] check type before processing Necessary methods are on CouchDocument, but we accept a Document as well, in this case self._factory is needed. Will be simpler soon. --- common/src/leap/soledad/common/couch.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 13808502..3ae79382 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -1319,6 +1319,8 @@ class CouchDatabase(CommonBackend): 'converged', at_gen is the insertion/current generation. :rtype: (str, int) """ + if type(doc) is not CouchDocument: + doc = self._factory(doc.doc_id, doc.rev, doc.get_json()) self._save_source_info(replica_uid, replica_gen, replica_trans_id, number_of_docs, doc_idx, sync_id) -- cgit v1.2.3 From 8654021f8719cf9d0f17f9d58e4455074aa43bb9 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 19 Aug 2015 19:09:59 -0300 Subject: [bug] fixes small issues pointed by drebs * file headers * variable names * missing docstrings * prune_conflicts ** extra: tests failed on a 1-based index bug --- .../leap/soledad/client/http_target/__init__.py | 2 +- client/src/leap/soledad/client/http_target/api.py | 16 +++++++++ .../src/leap/soledad/client/http_target/fetch.py | 6 ++-- client/src/leap/soledad/client/http_target/send.py | 12 +++---- .../src/leap/soledad/client/http_target/support.py | 40 +++++++++++++++++++++- common/src/leap/soledad/common/couch.py | 16 ++++++--- 6 files changed, 77 insertions(+), 15 deletions(-) diff --git a/client/src/leap/soledad/client/http_target/__init__.py b/client/src/leap/soledad/client/http_target/__init__.py index e77d20f5..7fa33153 100644 --- a/client/src/leap/soledad/client/http_target/__init__.py +++ b/client/src/leap/soledad/client/http_target/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# http_target.py +# __init__.py # Copyright (C) 2015 LEAP # # This program is free software: you can redistribute it and/or modify diff --git a/client/src/leap/soledad/client/http_target/api.py b/client/src/leap/soledad/client/http_target/api.py index 9e677304..344d999c 100644 --- a/client/src/leap/soledad/client/http_target/api.py +++ b/client/src/leap/soledad/client/http_target/api.py @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# api.py +# Copyright (C) 2015 LEAP +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . import json import base64 diff --git a/client/src/leap/soledad/client/http_target/fetch.py b/client/src/leap/soledad/client/http_target/fetch.py index c4bb79a0..aa02063a 100644 --- a/client/src/leap/soledad/client/http_target/fetch.py +++ b/client/src/leap/soledad/client/http_target/fetch.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# http_target.py +# fetch.py # Copyright (C) 2015 LEAP # # This program is free software: you can redistribute it and/or modify @@ -164,7 +164,7 @@ class HTTPDocFetcher(object): # end of symmetric decryption # ------------------------------------------------------------- self._received_docs += 1 - _emit_received(self._received_docs, total) + _emit_receive_status(self._received_docs, total) return number_of_changes, new_generation, new_transaction_id def _parse_received_doc_response(self, response): @@ -231,7 +231,7 @@ class HTTPDocFetcher(object): source_replica_uid=self.source_replica_uid) -def _emit_received(received_docs, total): +def _emit_receive_status(received_docs, total): msg = "%d/%d" % (received_docs, total) emit(SOLEDAD_SYNC_RECEIVE_STATUS, msg) logger.debug("Sync receive status: %s" % msg) diff --git a/client/src/leap/soledad/client/http_target/send.py b/client/src/leap/soledad/client/http_target/send.py index de18df8b..a6e64908 100644 --- a/client/src/leap/soledad/client/http_target/send.py +++ b/client/src/leap/soledad/client/http_target/send.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# http_target.py +# send.py # Copyright (C) 2015 LEAP # # This program is free software: you can redistribute it and/or modify @@ -33,13 +33,13 @@ class HTTPDocSender(object): defer.returnValue([None, None]) # add remote replica metadata to the request - initial_body = RequestBody( + metadata = RequestBody( last_known_generation=last_known_generation, last_known_trans_id=last_known_trans_id, sync_id=sync_id, ensure=self._ensure_callback is not None) total = len(docs_by_generation) - entries = yield self._entries_from_docs(initial_body, docs_by_generation) + entries = yield self._entries_from_docs(metadata, docs_by_generation) while len(entries): result = yield self._http_request( self._url, @@ -49,14 +49,14 @@ class HTTPDocSender(object): idx = total - len(entries) if self._defer_encryption: self._delete_sent(idx, docs_by_generation) - _emit_send(idx, total) + _emit_send_status(idx, total) response_dict = json.loads(result)[0] gen_after_send = response_dict['new_generation'] trans_id_after_send = response_dict['new_transaction_id'] defer.returnValue([gen_after_send, trans_id_after_send]) def _delete_sent(self, idx, docs_by_generation): - doc = docs_by_generation[idx][0] + doc = docs_by_generation[idx - 1][0] self._sync_enc_pool.delete_encrypted_doc( doc.doc_id, doc.rev) @@ -93,7 +93,7 @@ class HTTPDocSender(object): return d -def _emit_send(idx, total): +def _emit_send_status(idx, total): msg = "%d/%d" % (idx, total) emit( SOLEDAD_SYNC_SEND_STATUS, diff --git a/client/src/leap/soledad/client/http_target/support.py b/client/src/leap/soledad/client/http_target/support.py index 88934636..363a4f7d 100644 --- a/client/src/leap/soledad/client/http_target/support.py +++ b/client/src/leap/soledad/client/http_target/support.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# http_target.py +# support.py # Copyright (C) 2015 LEAP # # This program is free software: you can redistribute it and/or modify @@ -128,17 +128,47 @@ def readBody(response): class RequestBody(object): + """ + This class is a helper to generate send and fetch requests. + The expected format is something like: + [ + {headers}, + {entry1}, + {...}, + {entryN}, + ] + """ def __init__(self, **header_dict): + """ + Creates a new RequestBody holding header information. + + @param header_dict: A dictionary with the headers. + """ self.headers = header_dict self.entries = [] def insert_info(self, **entry_dict): + """ + Dumps an entry into JSON format and add it to entries list. + + @param entry_dicts: Entry as a dictionary + + @return: length of the entry after JSON dumps + """ entry = json.dumps(entry_dict) self.entries.append(entry) return len(entry) def remove(self, number=1): + """ + Removes an amount of entries and returns it formatted and ready + to be sent. + + @param number: number of entries to remove and format + + @return: formatted body ready to be sent + """ entries = [self.entries.pop(0) for i in xrange(number)] return self.entries_to_str(entries) @@ -149,6 +179,14 @@ class RequestBody(object): return len(self.entries) def entries_to_str(self, entries=None): + """ + Format a list of entries into the body format expected + by the server. + + @param entries: entries to format + + @return: formatted body ready to be sent + """ data = '[\r\n' + json.dumps(self.headers) data += ''.join(',\r\n' + entry for entry in entries) return data + '\r\n]' diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 3ae79382..90f1a36f 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -158,10 +158,11 @@ class CouchDocument(SoledadDocument): self.content = new_doc.content self.has_conflicts = new_doc.has_conflicts - def _prune_conflicts(self, doc_vcr, autoresolved_increment): + def prune_conflicts(self, doc_vcr, autoresolved_increment): """ Prune conflicts that are older then the current document's revision, or whose content match to the current document's content. + Originally in u1db.CommonBackend :param doc: The document to have conflicts pruned. :type doc: CouchDocument @@ -1206,7 +1207,7 @@ class CouchDatabase(CommonBackend): :type doc: CouchDocument """ my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) - doc._prune_conflicts(vectorclock.VectorClockRev(doc.rev), self._replica_uid) + doc.prune_conflicts(vectorclock.VectorClockRev(doc.rev), self._replica_uid) doc.add_conflict(my_doc) self._put_doc(my_doc, doc) @@ -1388,6 +1389,13 @@ class CouchDatabase(CommonBackend): continue yield t._doc + def _prune_conflicts(self, doc, doc_vcr): + """ + Overrides original method, but it is implemented elsewhere for + simplicity. + """ + doc.prune_conflicts(doc_vcr, self._replica_uid) + def _new_resource(self, *path): """ Return a new resource for accessing a couch database. @@ -1506,7 +1514,7 @@ def _process_incoming_doc(my_doc, other_doc, save_conflict, replica_uid): cur_vcr = vectorclock.VectorClockRev(my_doc.rev) if doc_vcr.is_newer(cur_vcr): rev = new_doc.rev - new_doc._prune_conflicts(doc_vcr, replica_uid) + new_doc.prune_conflicts(doc_vcr, replica_uid) if new_doc.rev != rev: # conflicts have been autoresolved return 'superseded', new_doc @@ -1529,7 +1537,7 @@ def _process_incoming_doc(my_doc, other_doc, save_conflict, replica_uid): return 'superseded', new_doc else: if save_conflict: - new_doc._prune_conflicts(vectorclock.VectorClockRev(new_doc.rev), replica_uid) + new_doc.prune_conflicts(vectorclock.VectorClockRev(new_doc.rev), replica_uid) new_doc.add_conflict(my_doc) return 'conflicted', new_doc other_doc.update(new_doc) -- cgit v1.2.3 From d1b47b03661be1341cbaf28c2f37663b50ba24f9 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Thu, 20 Aug 2015 14:46:25 -0300 Subject: [docs] Fix docstrings There were some missing or on incorrect format (sphinx) as drebs and kaliy pointed out. --- client/src/leap/soledad/client/http_target/api.py | 50 ++-------------------- .../src/leap/soledad/client/http_target/fetch.py | 8 ++++ client/src/leap/soledad/client/http_target/send.py | 4 ++ .../src/leap/soledad/client/http_target/support.py | 25 ++++++++--- 4 files changed, 33 insertions(+), 54 deletions(-) diff --git a/client/src/leap/soledad/client/http_target/api.py b/client/src/leap/soledad/client/http_target/api.py index 344d999c..d83250ee 100644 --- a/client/src/leap/soledad/client/http_target/api.py +++ b/client/src/leap/soledad/client/http_target/api.py @@ -26,55 +26,11 @@ from twisted.internet import defer from leap.soledad.common.errors import InvalidAuthTokenError from leap.soledad.client.http_target.support import readBody -from leap.common.http import HTTPClient - class SyncTargetAPI(SyncTarget): - - def __init__(self, url, source_replica_uid, creds, crypto, cert_file, - sync_db=None, sync_enc_pool=None): - """ - Initialize the sync target. - - :param url: The server sync url. - :type url: str - :param source_replica_uid: The source replica uid which we use when - deferring decryption. - :type source_replica_uid: str - :param creds: A dictionary containing the uuid and token. - :type creds: creds - :param crypto: An instance of SoledadCrypto so we can encrypt/decrypt - document contents when syncing. - :type crypto: soledad.crypto.SoledadCrypto - :param cert_file: Path to the certificate of the ca used to validate - the SSL certificate used by the remote soledad - server. - :type cert_file: str - :param sync_db: Optional. handler for the db with the symmetric - encryption of the syncing documents. If - None, encryption will be done in-place, - instead of retreiving it from the dedicated - database. - :type sync_db: Sqlite handler - :param sync_enc_pool: The encryption pool to use to defer encryption. - If None is passed the encryption will not be - deferred. - :type sync_enc_pool: leap.soledad.client.encdecpool.SyncEncrypterPool - """ - if url.endswith("/"): - url = url[:-1] - self._url = str(url) + "/sync-from/" + str(source_replica_uid) - self.source_replica_uid = source_replica_uid - self._auth_header = None - self.set_creds(creds) - self._crypto = crypto - self._sync_db = sync_db - self._sync_enc_pool = sync_enc_pool - self._insert_doc_cb = None - # asynchronous encryption/decryption attributes - self._decryption_callback = None - self._sync_decr_pool = None - self._http = HTTPClient(cert_file) + """ + Declares public methods and implements u1db.SyncTarget. + """ def close(self): self._http.close() diff --git a/client/src/leap/soledad/client/http_target/fetch.py b/client/src/leap/soledad/client/http_target/fetch.py index aa02063a..a991d2a2 100644 --- a/client/src/leap/soledad/client/http_target/fetch.py +++ b/client/src/leap/soledad/client/http_target/fetch.py @@ -30,6 +30,14 @@ logger = logging.getLogger(__name__) class HTTPDocFetcher(object): + """ + Handles Document fetching from Soledad server, using HTTP as transport. + Steps: + * Prepares metadata by asking server for one document + * Fetch the total on response and prepare to ask all remaining + * (async) Documents will come encrypted. + So we parse, decrypt and insert locally as they arrive. + """ @defer.inlineCallbacks def _receive_docs(self, last_known_generation, last_known_trans_id, diff --git a/client/src/leap/soledad/client/http_target/send.py b/client/src/leap/soledad/client/http_target/send.py index a6e64908..fe3a753f 100644 --- a/client/src/leap/soledad/client/http_target/send.py +++ b/client/src/leap/soledad/client/http_target/send.py @@ -24,6 +24,10 @@ logger = logging.getLogger(__name__) class HTTPDocSender(object): + """ + Handles Document uploading from Soledad server, using HTTP as transport. + They need to be encrypted and metadata prepared before sending. + """ @defer.inlineCallbacks def _send_docs(self, docs_by_generation, last_known_generation, diff --git a/client/src/leap/soledad/client/http_target/support.py b/client/src/leap/soledad/client/http_target/support.py index 363a4f7d..5daabb61 100644 --- a/client/src/leap/soledad/client/http_target/support.py +++ b/client/src/leap/soledad/client/http_target/support.py @@ -31,6 +31,10 @@ from twisted.web._newclient import PotentialDataLoss # client below. class ReadBodyProtocol(_ReadBodyProtocol): + """ + From original Twisted implementation, focused on adding our error + handling and ensuring that the proper u1db error is raised. + """ def __init__(self, response, deferred): """ @@ -143,7 +147,8 @@ class RequestBody(object): """ Creates a new RequestBody holding header information. - @param header_dict: A dictionary with the headers. + :param header_dict: A dictionary with the headers. + :type header_dict: dict """ self.headers = header_dict self.entries = [] @@ -152,9 +157,11 @@ class RequestBody(object): """ Dumps an entry into JSON format and add it to entries list. - @param entry_dicts: Entry as a dictionary + :param entry_dict: Entry as a dictionary + :type entry_dict: dict - @return: length of the entry after JSON dumps + :return: length of the entry after JSON dumps + :rtype: int """ entry = json.dumps(entry_dict) self.entries.append(entry) @@ -165,9 +172,11 @@ class RequestBody(object): Removes an amount of entries and returns it formatted and ready to be sent. - @param number: number of entries to remove and format + :param number: number of entries to remove and format + :type number: int - @return: formatted body ready to be sent + :return: formatted body ready to be sent + :rtype: str """ entries = [self.entries.pop(0) for i in xrange(number)] return self.entries_to_str(entries) @@ -183,9 +192,11 @@ class RequestBody(object): Format a list of entries into the body format expected by the server. - @param entries: entries to format + :param entries: entries to format + :type entries: list - @return: formatted body ready to be sent + :return: formatted body ready to be sent + :rtype: str """ data = '[\r\n' + json.dumps(self.headers) data += ''.join(',\r\n' + entry for entry in entries) -- cgit v1.2.3 From de73fc6969433a69ec6ba12ec508c3c93b83fcc6 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Thu, 20 Aug 2015 14:47:14 -0300 Subject: [refactor] Move constructor, use isinstance isinstance is better, as kaliy pointed out, and the constructor is also in a safer place on __init__.py to be explicit. Also re-apply a change from last rebase; --- .../leap/soledad/client/http_target/__init__.py | 45 ++++++++++++++++++++++ client/src/leap/soledad/client/http_target/api.py | 4 +- .../src/leap/soledad/client/http_target/fetch.py | 4 ++ common/src/leap/soledad/common/couch.py | 2 +- 4 files changed, 52 insertions(+), 3 deletions(-) diff --git a/client/src/leap/soledad/client/http_target/__init__.py b/client/src/leap/soledad/client/http_target/__init__.py index 7fa33153..7a5cea9f 100644 --- a/client/src/leap/soledad/client/http_target/__init__.py +++ b/client/src/leap/soledad/client/http_target/__init__.py @@ -24,6 +24,7 @@ after receiving. import logging +from leap.common.http import HTTPClient from leap.soledad.client.http_target.send import HTTPDocSender from leap.soledad.client.http_target.api import SyncTargetAPI from leap.soledad.client.http_target.fetch import HTTPDocFetcher @@ -43,3 +44,47 @@ class SoledadHTTPSyncTarget(SyncTargetAPI, HTTPDocSender, HTTPDocFetcher): the parsed documents that the remote send us, before being decrypted and written to the main database. """ + def __init__(self, url, source_replica_uid, creds, crypto, cert_file, + sync_db=None, sync_enc_pool=None): + """ + Initialize the sync target. + + :param url: The server sync url. + :type url: str + :param source_replica_uid: The source replica uid which we use when + deferring decryption. + :type source_replica_uid: str + :param creds: A dictionary containing the uuid and token. + :type creds: creds + :param crypto: An instance of SoledadCrypto so we can encrypt/decrypt + document contents when syncing. + :type crypto: soledad.crypto.SoledadCrypto + :param cert_file: Path to the certificate of the ca used to validate + the SSL certificate used by the remote soledad + server. + :type cert_file: str + :param sync_db: Optional. handler for the db with the symmetric + encryption of the syncing documents. If + None, encryption will be done in-place, + instead of retreiving it from the dedicated + database. + :type sync_db: Sqlite handler + :param sync_enc_pool: The encryption pool to use to defer encryption. + If None is passed the encryption will not be + deferred. + :type sync_enc_pool: leap.soledad.client.encdecpool.SyncEncrypterPool + """ + if url.endswith("/"): + url = url[:-1] + self._url = str(url) + "/sync-from/" + str(source_replica_uid) + self.source_replica_uid = source_replica_uid + self._auth_header = None + self.set_creds(creds) + self._crypto = crypto + self._sync_db = sync_db + self._sync_enc_pool = sync_enc_pool + self._insert_doc_cb = None + # asynchronous encryption/decryption attributes + self._decryption_callback = None + self._sync_decr_pool = None + self._http = HTTPClient(cert_file) diff --git a/client/src/leap/soledad/client/http_target/api.py b/client/src/leap/soledad/client/http_target/api.py index d83250ee..98a5b47e 100644 --- a/client/src/leap/soledad/client/http_target/api.py +++ b/client/src/leap/soledad/client/http_target/api.py @@ -84,13 +84,13 @@ class SyncTargetAPI(SyncTarget): """ raw = yield self._http_request(self._url) res = json.loads(raw) - defer.returnValue([ + defer.returnValue(( res['target_replica_uid'], res['target_replica_generation'], res['target_replica_transaction_id'], res['source_replica_generation'], res['source_transaction_id'] - ]) + )) def record_sync_info( self, source_replica_uid, source_replica_generation, diff --git a/client/src/leap/soledad/client/http_target/fetch.py b/client/src/leap/soledad/client/http_target/fetch.py index a991d2a2..d38ecb19 100644 --- a/client/src/leap/soledad/client/http_target/fetch.py +++ b/client/src/leap/soledad/client/http_target/fetch.py @@ -70,6 +70,10 @@ class HTTPDocFetcher(object): self._received_docs = 0 number_of_changes, ngen, ntrans = self._insert_received_doc(doc, 1, 1) + if ngen: + new_generation = ngen + new_transaction_id = ntrans + if defer_decryption: self._sync_decr_pool.start(number_of_changes) diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 90f1a36f..08096f86 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -1320,7 +1320,7 @@ class CouchDatabase(CommonBackend): 'converged', at_gen is the insertion/current generation. :rtype: (str, int) """ - if type(doc) is not CouchDocument: + if not isinstance(doc, CouchDocument): doc = self._factory(doc.doc_id, doc.rev, doc.get_json()) self._save_source_info(replica_uid, replica_gen, replica_trans_id, number_of_docs, -- cgit v1.2.3 From 23ea0193a521a1f5cb539a342be594b7b7acedcf Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 21 Aug 2015 12:52:12 -0300 Subject: [bug] reduce memory usage by consuming single doc Preparing many docs is useful for batching only. As we are sendind one by one I will leave prepare_one_doc method to do the encrypt as the docs goes to upload. Also fixes method name as kaliy suggested. --- client/src/leap/soledad/client/http_target/send.py | 25 ++++++++++------------ .../src/leap/soledad/client/http_target/support.py | 4 ++-- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/client/src/leap/soledad/client/http_target/send.py b/client/src/leap/soledad/client/http_target/send.py index fe3a753f..72c33c6c 100644 --- a/client/src/leap/soledad/client/http_target/send.py +++ b/client/src/leap/soledad/client/http_target/send.py @@ -37,20 +37,19 @@ class HTTPDocSender(object): defer.returnValue([None, None]) # add remote replica metadata to the request - metadata = RequestBody( + body = RequestBody( last_known_generation=last_known_generation, last_known_trans_id=last_known_trans_id, sync_id=sync_id, ensure=self._ensure_callback is not None) total = len(docs_by_generation) - entries = yield self._entries_from_docs(metadata, docs_by_generation) - while len(entries): + for idx, entry in enumerate(docs_by_generation, 1): + yield self._prepare_one_doc(entry, body, idx, total) result = yield self._http_request( self._url, method='POST', - body=entries.remove(1), + body=body.pop(1), content_type='application/x-soledad-sync-put') - idx = total - len(entries) if self._defer_encryption: self._delete_sent(idx, docs_by_generation) _emit_send_status(idx, total) @@ -65,15 +64,13 @@ class HTTPDocSender(object): doc.doc_id, doc.rev) @defer.inlineCallbacks - def _entries_from_docs(self, initial_body, docs_by_generation): - number_of_docs = len(docs_by_generation) - for idx, (doc, gen, trans_id) in enumerate(docs_by_generation, 1): - content = yield self._encrypt_doc(doc) - initial_body.insert_info( - id=doc.doc_id, rev=doc.rev, content=content, gen=gen, - trans_id=trans_id, number_of_docs=number_of_docs, - doc_idx=idx) - defer.returnValue(initial_body) + def _prepare_one_doc(self, entry, body, idx, total): + doc, gen, trans_id = entry + content = yield self._encrypt_doc(doc) + body.insert_info( + id=doc.doc_id, rev=doc.rev, content=content, gen=gen, + trans_id=trans_id, number_of_docs=total, + doc_idx=idx) def _encrypt_doc(self, doc): d = None diff --git a/client/src/leap/soledad/client/http_target/support.py b/client/src/leap/soledad/client/http_target/support.py index 5daabb61..44cd7089 100644 --- a/client/src/leap/soledad/client/http_target/support.py +++ b/client/src/leap/soledad/client/http_target/support.py @@ -167,12 +167,12 @@ class RequestBody(object): self.entries.append(entry) return len(entry) - def remove(self, number=1): + def pop(self, number=1): """ Removes an amount of entries and returns it formatted and ready to be sent. - :param number: number of entries to remove and format + :param number: number of entries to pop and format :type number: int :return: formatted body ready to be sent -- cgit v1.2.3 From 057f9c02894c05de4d1d4fc1f93ba86ec6bea96d Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 26 Aug 2015 17:48:57 -0300 Subject: [tests] fix variable name from refactor From: [refactor] removing getters and setters from couch.py _couch_url was a private variable with getter and setter doing the same as a public variable. It is accessed all over the code, so being private with getters and setters didnt make sense. This commit fixes the tests to also follow this style from now on. --- client/changes/refactor_improve_http_target | 3 +++ common/changes/refactor_couch | 2 ++ .../common/tests/test_couch_operations_atomicity.py | 6 +++--- common/src/leap/soledad/common/tests/test_server.py | 18 +++++++++--------- .../leap/soledad/common/tests/test_sqlcipher_sync.py | 4 ++-- common/src/leap/soledad/common/tests/test_sync.py | 8 ++++---- .../leap/soledad/common/tests/test_sync_deferred.py | 2 +- .../src/leap/soledad/common/tests/test_sync_mutex.py | 6 +++--- .../src/leap/soledad/common/tests/test_sync_target.py | 6 +++--- common/src/leap/soledad/common/tests/util.py | 6 +++--- server/changes/bug_badrequest | 1 + 11 files changed, 34 insertions(+), 28 deletions(-) create mode 100644 client/changes/refactor_improve_http_target create mode 100644 common/changes/refactor_couch create mode 100644 server/changes/bug_badrequest diff --git a/client/changes/refactor_improve_http_target b/client/changes/refactor_improve_http_target new file mode 100644 index 00000000..a8fe5f60 --- /dev/null +++ b/client/changes/refactor_improve_http_target @@ -0,0 +1,3 @@ + o Split http_target into 4 modules, separating those responsibilities. + o Refactor details of making an HTTP request body and headers out of the + send/fetch logic. This also makes it easier to enable batching. diff --git a/common/changes/refactor_couch b/common/changes/refactor_couch new file mode 100644 index 00000000..2f36b97f --- /dev/null +++ b/common/changes/refactor_couch @@ -0,0 +1,2 @@ + o Refactor couch.py to separate persistence from logic while saving uploaded + documents. Also simplify logic while checking for conflicts. diff --git a/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py b/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py index c488822e..0a06cc39 100644 --- a/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py +++ b/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py @@ -83,15 +83,15 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer): return soledad def make_app(self): - self.request_state = CouchServerState(self._couch_url) + self.request_state = CouchServerState(self.couch_url) return self.make_app_after_state(self.request_state) def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) - self._couch_url = 'http://localhost:' + str(self.wrapper.port) + self.couch_url = 'http://localhost:' + str(self.wrapper.port) self.db = CouchDatabase.open_database( - urljoin(self._couch_url, 'user-user-uuid'), + urljoin(self.couch_url, 'user-user-uuid'), create=True, replica_uid='replica', ensure_ddocs=True) diff --git a/common/src/leap/soledad/common/tests/test_server.py b/common/src/leap/soledad/common/tests/test_server.py index 5ffa2a63..5e8e5f90 100644 --- a/common/src/leap/soledad/common/tests/test_server.py +++ b/common/src/leap/soledad/common/tests/test_server.py @@ -50,7 +50,7 @@ from leap.soledad.server.auth import URLToAuthorization def _couch_ensure_database(self, dbname): db = CouchDatabase.open_database( - self._couch_url + '/' + dbname, + self.couch_url + '/' + dbname, create=True, ensure_ddocs=True) return db, db._replica_uid @@ -325,7 +325,7 @@ class EncryptedSyncTestCase( shared_db=self.get_default_shared_mock(_put_doc_side_effect)) def make_app(self): - self.request_state = CouchServerState(self._couch_url) + self.request_state = CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def setUp(self): @@ -333,7 +333,7 @@ class EncryptedSyncTestCase( # dependencies. # XXX explain better CouchDBTestCase.setUp(self) - self._couch_url = 'http://localhost:' + str(self.wrapper.port) + self.couch_url = 'http://localhost:' + str(self.wrapper.port) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) @@ -368,7 +368,7 @@ class EncryptedSyncTestCase( # ensure remote db exists before syncing db = CouchDatabase.open_database( - urljoin(self._couch_url, 'user-' + user), + urljoin(self.couch_url, 'user-' + user), create=True, ensure_ddocs=True) @@ -494,24 +494,24 @@ class LockResourceTestCase( # dependencies. # XXX explain better CouchDBTestCase.setUp(self) - self._couch_url = 'http://localhost:' + str(self.wrapper.port) + self.couch_url = 'http://localhost:' + str(self.wrapper.port) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) # create the databases CouchDatabase.open_database( - urljoin(self._couch_url, 'shared'), + urljoin(self.couch_url, 'shared'), create=True, ensure_ddocs=True) CouchDatabase.open_database( - urljoin(self._couch_url, 'tokens'), + urljoin(self.couch_url, 'tokens'), create=True, ensure_ddocs=True) - self._state = CouchServerState(self._couch_url) + self._state = CouchServerState(self.couch_url) def tearDown(self): # delete remote database db = CouchDatabase.open_database( - urljoin(self._couch_url, 'shared'), + urljoin(self.couch_url, 'shared'), create=True, ensure_ddocs=True) db.delete_database() diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py index c57d6f61..af2d0e2a 100644 --- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py +++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py @@ -733,7 +733,7 @@ def _make_local_db_and_token_http_target(test, path='test'): test.startTwistedServer() # ensure remote db exists before syncing db = couch.CouchDatabase.open_database( - urljoin(test._couch_url, 'test'), + urljoin(test.couch_url, 'test'), create=True, replica_uid='test', ensure_ddocs=True) @@ -790,7 +790,7 @@ class SQLCipherSyncTargetTests( (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id)) def make_app(self): - self.request_state = couch.CouchServerState(self._couch_url) + self.request_state = couch.CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def set_trace_hook(self, callback, shallow=False): diff --git a/common/src/leap/soledad/common/tests/test_sync.py b/common/src/leap/soledad/common/tests/test_sync.py index 14152370..61f3879f 100644 --- a/common/src/leap/soledad/common/tests/test_sync.py +++ b/common/src/leap/soledad/common/tests/test_sync.py @@ -56,14 +56,14 @@ class InterruptableSyncTestCase( sync_target = soledad_sync_target def make_app(self): - self.request_state = couch.CouchServerState(self._couch_url) + self.request_state = couch.CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") - self._couch_url = 'http://localhost:' + str(self.wrapper.port) + self.couch_url = 'http://localhost:' + str(self.wrapper.port) def tearDown(self): CouchDBTestCase.tearDown(self) @@ -103,7 +103,7 @@ class InterruptableSyncTestCase( # ensure remote db exists before syncing db = couch.CouchDatabase.open_database( - urljoin(self._couch_url, 'user-user-uuid'), + urljoin(self.couch_url, 'user-user-uuid'), create=True, ensure_ddocs=True) @@ -167,7 +167,7 @@ class TestSoledadDbSync( token = False def make_app(self): - self.request_state = couch.CouchServerState(self._couch_url) + self.request_state = couch.CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def setUp(self): diff --git a/common/src/leap/soledad/common/tests/test_sync_deferred.py b/common/src/leap/soledad/common/tests/test_sync_deferred.py index ffb8a4ae..0065413a 100644 --- a/common/src/leap/soledad/common/tests/test_sync_deferred.py +++ b/common/src/leap/soledad/common/tests/test_sync_deferred.py @@ -129,7 +129,7 @@ class TestSoledadDbSyncDeferredEncDecr( token = True def make_app(self): - self.request_state = couch.CouchServerState(self._couch_url) + self.request_state = couch.CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def setUp(self): diff --git a/common/src/leap/soledad/common/tests/test_sync_mutex.py b/common/src/leap/soledad/common/tests/test_sync_mutex.py index a904a940..aa46d5b7 100644 --- a/common/src/leap/soledad/common/tests/test_sync_mutex.py +++ b/common/src/leap/soledad/common/tests/test_sync_mutex.py @@ -84,14 +84,14 @@ class TestSyncMutex( sync_target = soledad_sync_target def make_app(self): - self.request_state = couch.CouchServerState(self._couch_url) + self.request_state = couch.CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") - self._couch_url = 'http://localhost:' + str(self.wrapper.port) + self.couch_url = 'http://localhost:' + str(self.wrapper.port) def tearDown(self): CouchDBTestCase.tearDown(self) @@ -103,7 +103,7 @@ class TestSyncMutex( # ensure remote db exists before syncing db = couch.CouchDatabase.open_database( - urljoin(self._couch_url, 'user-user-uuid'), + urljoin(self.couch_url, 'user-user-uuid'), create=True, ensure_ddocs=True) diff --git a/common/src/leap/soledad/common/tests/test_sync_target.py b/common/src/leap/soledad/common/tests/test_sync_target.py index d855fb52..79c350cd 100644 --- a/common/src/leap/soledad/common/tests/test_sync_target.py +++ b/common/src/leap/soledad/common/tests/test_sync_target.py @@ -63,13 +63,13 @@ class TestSoledadParseReceivedDocResponse(SoledadWithCouchServerMixin): def setUp(self): SoledadWithCouchServerMixin.setUp(self) - self._couch_url = 'http://localhost:' + str(self.wrapper.port) + self.couch_url = 'http://localhost:' + str(self.wrapper.port) creds = {'token': { 'uuid': 'user-uuid', 'token': 'auth-token', }} self.target = target.SoledadHTTPSyncTarget( - self._couch_url, + self.couch_url, uuid4().hex, creds, self._soledad._crypto, @@ -819,7 +819,7 @@ class TestSoledadDbSync( token = False def make_app(self): - self.request_state = couch.CouchServerState(self._couch_url) + self.request_state = couch.CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def setUp(self): diff --git a/common/src/leap/soledad/common/tests/util.py b/common/src/leap/soledad/common/tests/util.py index daa9c558..2190eeaa 100644 --- a/common/src/leap/soledad/common/tests/util.py +++ b/common/src/leap/soledad/common/tests/util.py @@ -486,7 +486,7 @@ class CouchServerStateForTests(CouchServerState): def _create_database(self, dbname): return CouchDatabase.open_database( - urljoin(self._couch_url, dbname), + urljoin(self.couch_url, dbname), True, replica_uid=dbname, ensure_ddocs=True) @@ -506,7 +506,7 @@ class SoledadWithCouchServerMixin( main_test_class = getattr(self, 'main_test_class', None) if main_test_class is not None: main_test_class.setUp(self) - self._couch_url = 'http://localhost:%d' % self.wrapper.port + self.couch_url = 'http://localhost:%d' % self.wrapper.port def tearDown(self): main_test_class = getattr(self, 'main_test_class', None) @@ -514,7 +514,7 @@ class SoledadWithCouchServerMixin( main_test_class.tearDown(self) # delete the test database try: - db = CouchDatabase(self._couch_url, 'test') + db = CouchDatabase(self.couch_url, 'test') db.delete_database() except DatabaseDoesNotExist: pass diff --git a/server/changes/bug_badrequest b/server/changes/bug_badrequest new file mode 100644 index 00000000..74901476 --- /dev/null +++ b/server/changes/bug_badrequest @@ -0,0 +1 @@ + o Fix a bug where BadRequest could be raised after everything was persisted -- cgit v1.2.3 From af611822aa0ed3fa4be7089fa9835820f489431f Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Fri, 28 Aug 2015 12:37:01 -0400 Subject: [style] pep8 fixes --- client/src/leap/soledad/client/http_target.py | 6 ++++-- client/src/leap/soledad/client/http_target/api.py | 3 ++- common/src/leap/soledad/common/couch.py | 15 ++++++++++----- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/client/src/leap/soledad/client/http_target.py b/client/src/leap/soledad/client/http_target.py index ed538add..046af089 100644 --- a/client/src/leap/soledad/client/http_target.py +++ b/client/src/leap/soledad/client/http_target.py @@ -402,7 +402,8 @@ class SoledadHTTPSyncTarget(SyncTarget): sync_id=sync_id, ensure=self._ensure_callback is not None) total = len(docs_by_generation) - entries = yield self._entries_from_docs(initial_body, docs_by_generation) + entries = yield self._entries_from_docs( + initial_body, docs_by_generation) while len(entries): result = yield self._http_request( self._url, @@ -663,7 +664,8 @@ class SoledadHTTPSyncTarget(SyncTarget): insert_doc_cb=self._insert_doc_cb, source_replica_uid=self.source_replica_uid) - def _http_request(self, url, method='GET', body=None, headers=None, content_type=None): + def _http_request(self, url, method='GET', body=None, headers=None, + content_type=None): headers = headers or self._base_header if content_type: headers.update({'content-type': [content_type]}) diff --git a/client/src/leap/soledad/client/http_target/api.py b/client/src/leap/soledad/client/http_target/api.py index 98a5b47e..dc13e9cc 100644 --- a/client/src/leap/soledad/client/http_target/api.py +++ b/client/src/leap/soledad/client/http_target/api.py @@ -56,7 +56,8 @@ class SyncTargetAPI(SyncTarget): def _defer_encryption(self): return self._sync_enc_pool is not None - def _http_request(self, url, method='GET', body=None, headers=None, content_type=None): + def _http_request(self, url, method='GET', body=None, headers=None, + content_type=None): headers = headers or self._base_header if content_type: headers.update({'content-type': [content_type]}) diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 08096f86..82b5aca8 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -1207,7 +1207,8 @@ class CouchDatabase(CommonBackend): :type doc: CouchDocument """ my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) - doc.prune_conflicts(vectorclock.VectorClockRev(doc.rev), self._replica_uid) + doc.prune_conflicts( + vectorclock.VectorClockRev(doc.rev), self._replica_uid) doc.add_conflict(my_doc) self._put_doc(my_doc, doc) @@ -1327,8 +1328,10 @@ class CouchDatabase(CommonBackend): doc_idx, sync_id) my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) if my_doc is not None: - my_doc.set_conflicts(self.get_doc_conflicts(my_doc.doc_id, my_doc.couch_rev)) - state, save_doc = _process_incoming_doc(my_doc, doc, save_conflict, self.replica_uid) + my_doc.set_conflicts( + self.get_doc_conflicts(my_doc.doc_id, my_doc.couch_rev)) + state, save_doc = _process_incoming_doc( + my_doc, doc, save_conflict, self.replica_uid) if save_doc: self._put_doc(my_doc, save_doc) doc.update(save_doc) @@ -1503,7 +1506,8 @@ def _process_incoming_doc(my_doc, other_doc, save_conflict, replica_uid): # at this point, `doc` has arrived from the other syncing party, and # we will decide what to do with it. # First, we prepare the arriving doc to update couch database. - new_doc = CouchDocument(other_doc.doc_id, other_doc.rev, other_doc.get_json()) + new_doc = CouchDocument( + other_doc.doc_id, other_doc.rev, other_doc.get_json()) if my_doc is None: return 'inserted', new_doc new_doc.couch_rev = my_doc.couch_rev @@ -1537,7 +1541,8 @@ def _process_incoming_doc(my_doc, other_doc, save_conflict, replica_uid): return 'superseded', new_doc else: if save_conflict: - new_doc.prune_conflicts(vectorclock.VectorClockRev(new_doc.rev), replica_uid) + new_doc.prune_conflicts( + vectorclock.VectorClockRev(new_doc.rev), replica_uid) new_doc.add_conflict(my_doc) return 'conflicted', new_doc other_doc.update(new_doc) -- cgit v1.2.3 From 8a80c8a2475580d3d6b4ae365cc00e09059ec587 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 28 Aug 2015 14:15:32 -0300 Subject: [bug] cleanup http_target.py file from refactor The http_target.py refactor started in 8adf2dedb74941352520d8de42326b0c59818728 forgot to remove the old file. --- client/src/leap/soledad/client/http_target.py | 732 -------------------------- 1 file changed, 732 deletions(-) delete mode 100644 client/src/leap/soledad/client/http_target.py diff --git a/client/src/leap/soledad/client/http_target.py b/client/src/leap/soledad/client/http_target.py deleted file mode 100644 index 046af089..00000000 --- a/client/src/leap/soledad/client/http_target.py +++ /dev/null @@ -1,732 +0,0 @@ -# -*- coding: utf-8 -*- -# http_target.py -# Copyright (C) 2015 LEAP -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -""" -A U1DB backend for encrypting data before sending to server and decrypting -after receiving. -""" - - -import json -import base64 -import logging -import warnings - -from uuid import uuid4 - -from twisted.internet import defer -from twisted.web.error import Error -from twisted.web.client import _ReadBodyProtocol -from twisted.web.client import PartialDownloadError -from twisted.web._newclient import ResponseDone -from twisted.web._newclient import PotentialDataLoss - -from u1db import errors -from u1db import SyncTarget -from u1db.remote import utils -from u1db.remote import http_errors - -from leap.common.http import HTTPClient - -from leap.soledad.common.document import SoledadDocument -from leap.soledad.common.errors import InvalidAuthTokenError - -from leap.soledad.client.crypto import is_symmetrically_encrypted -from leap.soledad.client.events import SOLEDAD_SYNC_SEND_STATUS -from leap.soledad.client.events import SOLEDAD_SYNC_RECEIVE_STATUS -from leap.soledad.client.events import emit -from leap.soledad.client.encdecpool import SyncDecrypterPool - - -logger = logging.getLogger(__name__) - - -# we want to make sure that HTTP errors will raise appropriate u1db errors, -# that is, fire errbacks with the appropriate failures, in the context of -# twisted. Because of that, we redefine the http body reader used by the HTTP -# client below. - -class ReadBodyProtocol(_ReadBodyProtocol): - - def __init__(self, response, deferred): - """ - Initialize the protocol, additionally storing the response headers. - """ - _ReadBodyProtocol.__init__( - self, response.code, response.phrase, deferred) - self.headers = response.headers - - # ---8<--- snippet from u1db.remote.http_client, modified to use errbacks - def _error(self, respdic): - descr = respdic.get("error") - exc_cls = errors.wire_description_to_exc.get(descr) - if exc_cls is not None: - message = respdic.get("message") - self.deferred.errback(exc_cls(message)) - # ---8<--- end of snippet from u1db.remote.http_client - - def connectionLost(self, reason): - """ - Deliver the accumulated response bytes to the waiting L{Deferred}, if - the response body has been completely received without error. - """ - if reason.check(ResponseDone): - - body = b''.join(self.dataBuffer) - - # ---8<--- snippet from u1db.remote.http_client - if self.status in (200, 201): - self.deferred.callback(body) - elif self.status in http_errors.ERROR_STATUSES: - try: - respdic = json.loads(body) - except ValueError: - self.deferred.errback( - errors.HTTPError(self.status, body, self.headers)) - else: - self._error(respdic) - # special cases - elif self.status == 503: - self.deferred.errback(errors.Unavailable(body, self.headers)) - else: - self.deferred.errback( - errors.HTTPError(self.status, body, self.headers)) - # ---8<--- end of snippet from u1db.remote.http_client - - elif reason.check(PotentialDataLoss): - self.deferred.errback( - PartialDownloadError(self.status, self.message, - b''.join(self.dataBuffer))) - else: - self.deferred.errback(reason) - - -def readBody(response): - """ - Get the body of an L{IResponse} and return it as a byte string. - - This is a helper function for clients that don't want to incrementally - receive the body of an HTTP response. - - @param response: The HTTP response for which the body will be read. - @type response: L{IResponse} provider - - @return: A L{Deferred} which will fire with the body of the response. - Cancelling it will close the connection to the server immediately. - """ - def cancel(deferred): - """ - Cancel a L{readBody} call, close the connection to the HTTP server - immediately, if it is still open. - - @param deferred: The cancelled L{defer.Deferred}. - """ - abort = getAbort() - if abort is not None: - abort() - - d = defer.Deferred(cancel) - protocol = ReadBodyProtocol(response, d) - - def getAbort(): - return getattr(protocol.transport, 'abortConnection', None) - - response.deliverBody(protocol) - - if protocol.transport is not None and getAbort() is None: - warnings.warn( - 'Using readBody with a transport that does not have an ' - 'abortConnection method', - category=DeprecationWarning, - stacklevel=2) - - return d - - -class SoledadHTTPSyncTarget(SyncTarget): - - """ - A SyncTarget that encrypts data before sending and decrypts data after - receiving. - - Normally encryption will have been written to the sync database upon - document modification. The sync database is also used to write temporarily - the parsed documents that the remote send us, before being decrypted and - written to the main database. - """ - - def __init__(self, url, source_replica_uid, creds, crypto, cert_file, - sync_db=None, sync_enc_pool=None): - """ - Initialize the sync target. - - :param url: The server sync url. - :type url: str - :param source_replica_uid: The source replica uid which we use when - deferring decryption. - :type source_replica_uid: str - :param creds: A dictionary containing the uuid and token. - :type creds: creds - :param crypto: An instance of SoledadCrypto so we can encrypt/decrypt - document contents when syncing. - :type crypto: soledad.crypto.SoledadCrypto - :param cert_file: Path to the certificate of the ca used to validate - the SSL certificate used by the remote soledad - server. - :type cert_file: str - :param sync_db: Optional. handler for the db with the symmetric - encryption of the syncing documents. If - None, encryption will be done in-place, - instead of retreiving it from the dedicated - database. - :type sync_db: Sqlite handler - :param sync_enc_pool: The encryption pool to use to defer encryption. - If None is passed the encryption will not be - deferred. - :type sync_enc_pool: leap.soledad.client.encdecpool.SyncEncrypterPool - """ - if url.endswith("/"): - url = url[:-1] - self._url = str(url) + "/sync-from/" + str(source_replica_uid) - self.source_replica_uid = source_replica_uid - self._auth_header = None - self.set_creds(creds) - self._crypto = crypto - self._sync_db = sync_db - self._sync_enc_pool = sync_enc_pool - self._insert_doc_cb = None - # asynchronous encryption/decryption attributes - self._decryption_callback = None - self._sync_decr_pool = None - self._http = HTTPClient(cert_file) - - def close(self): - self._http.close() - - def set_creds(self, creds): - """ - Update credentials. - - :param creds: A dictionary containing the uuid and token. - :type creds: dict - """ - uuid = creds['token']['uuid'] - token = creds['token']['token'] - auth = '%s:%s' % (uuid, token) - b64_token = base64.b64encode(auth) - self._auth_header = {'Authorization': ['Token %s' % b64_token]} - - @property - def _base_header(self): - return self._auth_header.copy() if self._auth_header else {} - - @property - def _defer_encryption(self): - return self._sync_enc_pool is not None - - # - # SyncTarget API - # - - @defer.inlineCallbacks - def get_sync_info(self, source_replica_uid): - """ - Return information about known state of remote database. - - Return the replica_uid and the current database generation of the - remote database, and its last-seen database generation for the client - replica. - - :param source_replica_uid: The client-size replica uid. - :type source_replica_uid: str - - :return: A deferred which fires with (target_replica_uid, - target_replica_generation, target_trans_id, - source_replica_last_known_generation, - source_replica_last_known_transaction_id) - :rtype: twisted.internet.defer.Deferred - """ - raw = yield self._http_request(self._url) - res = json.loads(raw) - defer.returnValue(( - res['target_replica_uid'], - res['target_replica_generation'], - res['target_replica_transaction_id'], - res['source_replica_generation'], - res['source_transaction_id'] - )) - - def record_sync_info( - self, source_replica_uid, source_replica_generation, - source_replica_transaction_id): - """ - Record tip information for another replica. - - After sync_exchange has been processed, the caller will have - received new content from this replica. This call allows the - source replica instigating the sync to inform us what their - generation became after applying the documents we returned. - - This is used to allow future sync operations to not need to repeat data - that we just talked about. It also means that if this is called at the - wrong time, there can be database records that will never be - synchronized. - - :param source_replica_uid: The identifier for the source replica. - :type source_replica_uid: str - :param source_replica_generation: The database generation for the - source replica. - :type source_replica_generation: int - :param source_replica_transaction_id: The transaction id associated - with the source replica - generation. - :type source_replica_transaction_id: str - - :return: A deferred which fires with the result of the query. - :rtype: twisted.internet.defer.Deferred - """ - data = json.dumps({ - 'generation': source_replica_generation, - 'transaction_id': source_replica_transaction_id - }) - return self._http_request( - self._url, - method='PUT', - body=data, - content_type='application/json') - - @defer.inlineCallbacks - def sync_exchange(self, docs_by_generation, source_replica_uid, - last_known_generation, last_known_trans_id, - insert_doc_cb, ensure_callback=None, - defer_decryption=True, sync_id=None): - """ - Find out which documents the remote database does not know about, - encrypt and send them. After that, receive documents from the remote - database. - - :param docs_by_generations: A list of (doc_id, generation, trans_id) - of local documents that were changed since - the last local generation the remote - replica knows about. - :type docs_by_generations: list of tuples - - :param source_replica_uid: The uid of the source replica. - :type source_replica_uid: str - - :param last_known_generation: Target's last known generation. - :type last_known_generation: int - - :param last_known_trans_id: Target's last known transaction id. - :type last_known_trans_id: str - - :param insert_doc_cb: A callback for inserting received documents from - target. If not overriden, this will call u1db - insert_doc_from_target in synchronizer, which - implements the TAKE OTHER semantics. - :type insert_doc_cb: function - - :param ensure_callback: A callback that ensures we know the target - replica uid if the target replica was just - created. - :type ensure_callback: function - - :param defer_decryption: Whether to defer the decryption process using - the intermediate database. If False, - decryption will be done inline. - :type defer_decryption: bool - - :return: A deferred which fires with the new generation and - transaction id of the target replica. - :rtype: twisted.internet.defer.Deferred - """ - - self._ensure_callback = ensure_callback - - if sync_id is None: - sync_id = str(uuid4()) - self.source_replica_uid = source_replica_uid - - # save a reference to the callback so we can use it after decrypting - self._insert_doc_cb = insert_doc_cb - - gen_after_send, trans_id_after_send = yield self._send_docs( - docs_by_generation, - last_known_generation, - last_known_trans_id, - sync_id) - - cur_target_gen, cur_target_trans_id = yield self._receive_docs( - last_known_generation, last_known_trans_id, - ensure_callback, sync_id, - defer_decryption=defer_decryption) - - # update gen and trans id info in case we just sent and did not - # receive docs. - if gen_after_send is not None and gen_after_send > cur_target_gen: - cur_target_gen = gen_after_send - cur_target_trans_id = trans_id_after_send - - defer.returnValue([cur_target_gen, cur_target_trans_id]) - - # - # methods to send docs - # - - @defer.inlineCallbacks - def _send_docs(self, docs_by_generation, last_known_generation, - last_known_trans_id, sync_id): - - if not docs_by_generation: - defer.returnValue([None, None]) - - # add remote replica metadata to the request - initial_body = RequestBody( - last_known_generation=last_known_generation, - last_known_trans_id=last_known_trans_id, - sync_id=sync_id, - ensure=self._ensure_callback is not None) - total = len(docs_by_generation) - entries = yield self._entries_from_docs( - initial_body, docs_by_generation) - while len(entries): - result = yield self._http_request( - self._url, - method='POST', - body=entries.remove(1), - content_type='application/x-soledad-sync-put') - idx = total - len(entries) - if self._defer_encryption: - self._delete_sent(idx, docs_by_generation) - _emit_send(idx, total) - response_dict = json.loads(result)[0] - gen_after_send = response_dict['new_generation'] - trans_id_after_send = response_dict['new_transaction_id'] - defer.returnValue([gen_after_send, trans_id_after_send]) - - def _delete_sent(self, idx, docs_by_generation): - doc = docs_by_generation[idx][0] - self._sync_enc_pool.delete_encrypted_doc( - doc.doc_id, doc.rev) - - @defer.inlineCallbacks - def _entries_from_docs(self, initial_body, docs_by_generation): - number_of_docs = len(docs_by_generation) - for idx, (doc, gen, trans_id) in enumerate(docs_by_generation, 1): - content = yield self._encrypt_doc(doc) - initial_body.insert_info( - id=doc.doc_id, rev=doc.rev, content=content, gen=gen, - trans_id=trans_id, number_of_docs=number_of_docs, - doc_idx=idx) - defer.returnValue(initial_body) - - def _encrypt_doc(self, doc): - d = None - if doc.is_tombstone(): - d = defer.succeed(None) - elif not self._defer_encryption: - # fallback case, for tests - d = defer.succeed(self._crypto.encrypt_doc(doc)) - else: - - def _maybe_encrypt_doc_inline(doc_json): - if doc_json is None: - # the document is not marked as tombstone, but we got - # nothing from the sync db. As it is not encrypted - # yet, we force inline encryption. - return self._crypto.encrypt_doc(doc) - return doc_json - - d = self._sync_enc_pool.get_encrypted_doc(doc.doc_id, doc.rev) - d.addCallback(_maybe_encrypt_doc_inline) - return d - - # - # methods to receive doc - # - - @defer.inlineCallbacks - def _receive_docs(self, last_known_generation, last_known_trans_id, - ensure_callback, sync_id, defer_decryption): - - self._queue_for_decrypt = defer_decryption \ - and self._sync_db is not None - - new_generation = last_known_generation - new_transaction_id = last_known_trans_id - - if self._queue_for_decrypt: - logger.debug( - "Soledad sync: will queue received docs for decrypting.") - - if defer_decryption: - self._setup_sync_decr_pool() - - # --------------------------------------------------------------------- - # maybe receive the first document - # --------------------------------------------------------------------- - - # we fetch the first document before fetching the rest because we need - # to know the total number of documents to be received, and this - # information comes as metadata to each request. - - doc = yield self._receive_one_doc( - last_known_generation, last_known_trans_id, - sync_id, 0) - self._received_docs = 0 - number_of_changes, ngen, ntrans = self._insert_received_doc(doc, 1, 1) - - # update the target gen and trans_id in case a document was received - if ngen: - new_generation = ngen - new_transaction_id = ntrans - - if defer_decryption: - self._sync_decr_pool.start(number_of_changes) - - # --------------------------------------------------------------------- - # maybe receive the rest of the documents - # --------------------------------------------------------------------- - - # launch many asynchronous fetches and inserts of received documents - # in the temporary sync db. Will wait for all results before - # continuing. - - received = 1 - deferreds = [] - while received < number_of_changes: - d = self._receive_one_doc( - last_known_generation, - last_known_trans_id, sync_id, received) - d.addCallback( - self._insert_received_doc, - received + 1, # the index of the current received doc - number_of_changes) - deferreds.append(d) - received += 1 - results = yield defer.gatherResults(deferreds) - - # get generation and transaction id of target after insertions - if deferreds: - _, new_generation, new_transaction_id = results.pop() - - # --------------------------------------------------------------------- - # wait for async decryption to finish - # --------------------------------------------------------------------- - - if defer_decryption: - yield self._sync_decr_pool.deferred - self._sync_decr_pool.stop() - - defer.returnValue([new_generation, new_transaction_id]) - - def _receive_one_doc(self, last_known_generation, - last_known_trans_id, sync_id, received): - # add remote replica metadata to the request - body = RequestBody( - last_known_generation=last_known_generation, - last_known_trans_id=last_known_trans_id, - sync_id=sync_id, - ensure=self._ensure_callback is not None) - # inform server of how many documents have already been received - body.insert_info(received=received) - # send headers - return self._http_request( - self._url, - method='POST', - body=str(body), - content_type='application/x-soledad-sync-get') - - def _insert_received_doc(self, response, idx, total): - """ - Insert a received document into the local replica. - - :param response: The body and headers of the response. - :type response: tuple(str, dict) - :param idx: The index count of the current operation. - :type idx: int - :param total: The total number of operations. - :type total: int - """ - new_generation, new_transaction_id, number_of_changes, doc_id, \ - rev, content, gen, trans_id = \ - self._parse_received_doc_response(response) - if doc_id is not None: - # decrypt incoming document and insert into local database - # ------------------------------------------------------------- - # symmetric decryption of document's contents - # ------------------------------------------------------------- - # If arriving content was symmetrically encrypted, we decrypt it. - # We do it inline if defer_decryption flag is False or no sync_db - # was defined, otherwise we defer it writing it to the received - # docs table. - doc = SoledadDocument(doc_id, rev, content) - if is_symmetrically_encrypted(doc): - if self._queue_for_decrypt: - self._sync_decr_pool.insert_encrypted_received_doc( - doc.doc_id, doc.rev, doc.content, gen, trans_id, - idx) - else: - # defer_decryption is False or no-sync-db fallback - doc.set_json(self._crypto.decrypt_doc(doc)) - self._insert_doc_cb(doc, gen, trans_id) - else: - # not symmetrically encrypted doc, insert it directly - # or save it in the decrypted stage. - if self._queue_for_decrypt: - self._sync_decr_pool.insert_received_doc( - doc.doc_id, doc.rev, doc.content, gen, trans_id, - idx) - else: - self._insert_doc_cb(doc, gen, trans_id) - # ------------------------------------------------------------- - # end of symmetric decryption - # ------------------------------------------------------------- - self._received_docs += 1 - _emit_received(self._received_docs, total) - return number_of_changes, new_generation, new_transaction_id - - def _parse_received_doc_response(self, response): - """ - Parse the response from the server containing the received document. - - :param response: The body and headers of the response. - :type response: tuple(str, dict) - - :return: (new_gen, new_trans_id, number_of_changes, doc_id, rev, - content, gen, trans_id) - :rtype: tuple - """ - # decode incoming stream - parts = response.splitlines() - if not parts or parts[0] != '[' or parts[-1] != ']': - raise errors.BrokenSyncStream - data = parts[1:-1] - # decode metadata - try: - line, comma = utils.check_and_strip_comma(data[0]) - metadata = None - except (IndexError): - raise errors.BrokenSyncStream - try: - metadata = json.loads(line) - new_generation = metadata['new_generation'] - new_transaction_id = metadata['new_transaction_id'] - number_of_changes = metadata['number_of_changes'] - except (ValueError, KeyError): - raise errors.BrokenSyncStream - # make sure we have replica_uid from fresh new dbs - if self._ensure_callback and 'replica_uid' in metadata: - self._ensure_callback(metadata['replica_uid']) - # parse incoming document info - doc_id = None - rev = None - content = None - gen = None - trans_id = None - if number_of_changes > 0: - try: - entry = json.loads(data[1]) - doc_id = entry['id'] - rev = entry['rev'] - content = entry['content'] - gen = entry['gen'] - trans_id = entry['trans_id'] - except (IndexError, KeyError): - raise errors.BrokenSyncStream - return new_generation, new_transaction_id, number_of_changes, \ - doc_id, rev, content, gen, trans_id - - def _setup_sync_decr_pool(self): - """ - Set up the SyncDecrypterPool for deferred decryption. - """ - if self._sync_decr_pool is None and self._sync_db is not None: - # initialize syncing queue decryption pool - self._sync_decr_pool = SyncDecrypterPool( - self._crypto, - self._sync_db, - insert_doc_cb=self._insert_doc_cb, - source_replica_uid=self.source_replica_uid) - - def _http_request(self, url, method='GET', body=None, headers=None, - content_type=None): - headers = headers or self._base_header - if content_type: - headers.update({'content-type': [content_type]}) - d = self._http.request(url, method, body, headers, readBody) - d.addErrback(_unauth_to_invalid_token_error) - return d - - -def _unauth_to_invalid_token_error(failure): - """ - An errback to translate unauthorized errors to our own invalid token - class. - - :param failure: The original failure. - :type failure: twisted.python.failure.Failure - - :return: Either the original failure or an invalid auth token error. - :rtype: twisted.python.failure.Failure - """ - failure.trap(Error) - if failure.getErrorMessage() == "401 Unauthorized": - raise InvalidAuthTokenError - return failure - - -def _emit_send(idx, total): - msg = "%d/%d" % (idx, total) - emit( - SOLEDAD_SYNC_SEND_STATUS, - "Soledad sync send status: %s" % msg) - logger.debug("Sync send status: %s" % msg) - - -def _emit_received(received_docs, total): - msg = "%d/%d" % (received_docs, total) - emit(SOLEDAD_SYNC_RECEIVE_STATUS, msg) - logger.debug("Sync receive status: %s" % msg) - - -class RequestBody(object): - - def __init__(self, **header_dict): - self.headers = header_dict - self.entries = [] - - def insert_info(self, **entry_dict): - entry = json.dumps(entry_dict) - self.entries.append(entry) - return len(entry) - - def remove(self, number=1): - entries = [self.entries.pop(0) for i in xrange(number)] - return self.entries_to_str(entries) - - def __str__(self): - return self.entries_to_str(self.entries) - - def __len__(self): - return len(self.entries) - - def entries_to_str(self, entries=None): - data = '[\r\n' + json.dumps(self.headers) - data += ''.join(',\r\n' + entry for entry in entries) - return data + '\r\n]' -- cgit v1.2.3 From 3342cc75c8ad63a0a08d1116e0e4b9bc890271a2 Mon Sep 17 00:00:00 2001 From: Ivan Alejandro Date: Mon, 31 Aug 2015 15:48:23 -0300 Subject: [bug] emit dict instead of str - Resolves: #7412 --- client/changes/bug-7412_sync-status-broken | 1 + client/src/leap/soledad/client/http_target/fetch.py | 4 +++- client/src/leap/soledad/client/http_target/send.py | 6 +++--- 3 files changed, 7 insertions(+), 4 deletions(-) create mode 100644 client/changes/bug-7412_sync-status-broken diff --git a/client/changes/bug-7412_sync-status-broken b/client/changes/bug-7412_sync-status-broken new file mode 100644 index 00000000..b6800bc5 --- /dev/null +++ b/client/changes/bug-7412_sync-status-broken @@ -0,0 +1 @@ +o Fix refactor code loss. Closes #7412. diff --git a/client/src/leap/soledad/client/http_target/fetch.py b/client/src/leap/soledad/client/http_target/fetch.py index d38ecb19..34d7cb0b 100644 --- a/client/src/leap/soledad/client/http_target/fetch.py +++ b/client/src/leap/soledad/client/http_target/fetch.py @@ -244,6 +244,8 @@ class HTTPDocFetcher(object): def _emit_receive_status(received_docs, total): + content = {'received': received_docs, 'total': total} + emit(SOLEDAD_SYNC_RECEIVE_STATUS, content) + msg = "%d/%d" % (received_docs, total) - emit(SOLEDAD_SYNC_RECEIVE_STATUS, msg) logger.debug("Sync receive status: %s" % msg) diff --git a/client/src/leap/soledad/client/http_target/send.py b/client/src/leap/soledad/client/http_target/send.py index 72c33c6c..71157da5 100644 --- a/client/src/leap/soledad/client/http_target/send.py +++ b/client/src/leap/soledad/client/http_target/send.py @@ -95,8 +95,8 @@ class HTTPDocSender(object): def _emit_send_status(idx, total): + content = {'sent': idx, 'total': total} + emit(SOLEDAD_SYNC_SEND_STATUS, content) + msg = "%d/%d" % (idx, total) - emit( - SOLEDAD_SYNC_SEND_STATUS, - "Soledad sync send status: %s" % msg) logger.debug("Sync send status: %s" % msg) -- cgit v1.2.3 From ac3b1e9b845c1df77b08fc11ba16e870435751b8 Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Tue, 8 Sep 2015 14:47:35 -0400 Subject: [docs] add documentation about soledad sync process --- docs/sphinx/sync.rst | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 docs/sphinx/sync.rst diff --git a/docs/sphinx/sync.rst b/docs/sphinx/sync.rst new file mode 100644 index 00000000..f243befb --- /dev/null +++ b/docs/sphinx/sync.rst @@ -0,0 +1,32 @@ +Soledad sync process +==================== + +Phases of sync: + +(1) client acquires knowledge about server state. http GET +(2) client sends its documents to the server. http POSTs, or a single POST. +(3) client downloads documents from the server. +(4) client records its new state on the server. + +Originally in u1db: +    (1) is a GET, +    (2) and (3) are one POST (send in body, receive in response), +    (4) is a PUT. + +In soledad: + +(1) is a GET. +(2) is either 1 or a series of sequential POSTS. +  (2.1) encrypt asynchronously +  (2.2) store in temp sync db +  (2.3) upload sequentially ***THIS IS SLOW*** +(3) is a series of concurrent POSTS, insert sequentially on local client db. +  (3.1) download concurrently +  (3.2) store in temp sync db +  (3.3) decrypt asynchronously +  (3.4) insert sequentially in local client db +(4) is a PUT. + +This difference between u1db and soledad was made in order to be able to gracefully interrupt the sync in the middle of the upload or the download. + +it is essential that all the uploads and downloads are sequential: documents must be added in order. the download happens in parallel, but then locally they are added sequentially to the local db. -- cgit v1.2.3 From 1e00c9966ed2a5cb4a4b1075e450f5e1ce13f188 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 9 Sep 2015 14:37:58 -0300 Subject: [bug] check threadpool state before closing it Code is trying to close a closed threadpool. This raises errors on Twisted 15.4. --- client/src/leap/soledad/client/adbapi.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/src/leap/soledad/client/adbapi.py b/client/src/leap/soledad/client/adbapi.py index 237159bd..77822247 100644 --- a/client/src/leap/soledad/client/adbapi.py +++ b/client/src/leap/soledad/client/adbapi.py @@ -285,7 +285,8 @@ class U1DBConnectionPool(adbapi.ConnectionPool): A final close, only called by the shutdown trigger. """ self.shutdownID = None - self.threadpool.stop() + if self.threadpool.started: + self.threadpool.stop() self.running = False for conn in self.connections.values(): self._close(conn) -- cgit v1.2.3 From faa2eb201ef85e28ed713497cbf1abc33bb32e4b Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Wed, 9 Sep 2015 14:08:08 -0400 Subject: [docs] fix broken pypi badges --- README.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.rst b/README.rst index 887b3df1..5a29e3fa 100644 --- a/README.rst +++ b/README.rst @@ -13,18 +13,18 @@ repository: **leap.soledad.common** common pieces. -.. image:: https://pypip.in/v/leap.soledad.common/badge.png - :target: https://crate.io/packages/leap.soledad.common +.. image:: https://badge.fury.io/py/leap.soledad.common.svg + :target: http://badge.fury.io/py/leap.soledad.common **leap.soledad.client** where the soledad client lives. -.. image:: https://pypip.in/v/leap.soledad.client/badge.png - :target: https://crate.io/packages/leap.soledad.client +.. image:: https://badge.fury.io/py/leap.soledad.client.svg + :target: http://badge.fury.io/py/leap.soledad.client **leap.soledad.server** oh surprise! bits needed for the soledad server. -.. image:: https://pypip.in/v/leap.soledad.server/badge.png - :target: https://crate.io/packages/leap.soledad.server +.. image:: https://badge.fury.io/py/leap.soledad.server.svg + :target: http://badge.fury.io/py/leap.soledad.server Compatibility -- cgit v1.2.3 From d83ca327d5d660fba1864a5a9b68795e013e9b89 Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Wed, 9 Sep 2015 15:41:03 -0400 Subject: [docs] add download badges --- README.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.rst b/README.rst index 5a29e3fa..b98eec06 100644 --- a/README.rst +++ b/README.rst @@ -15,16 +15,22 @@ repository: .. image:: https://badge.fury.io/py/leap.soledad.common.svg :target: http://badge.fury.io/py/leap.soledad.common +.. image:: https://img.shields.io/pypi/dm/leap.soledad.common.svg + :target: http://badge.fury.io/py/leap.soledad.common **leap.soledad.client** where the soledad client lives. .. image:: https://badge.fury.io/py/leap.soledad.client.svg :target: http://badge.fury.io/py/leap.soledad.client +.. image:: https://img.shields.io/pypi/dm/leap.soledad.client.svg + :target: http://badge.fury.io/py/leap.soledad.client **leap.soledad.server** oh surprise! bits needed for the soledad server. .. image:: https://badge.fury.io/py/leap.soledad.server.svg :target: http://badge.fury.io/py/leap.soledad.server +.. image:: https://img.shields.io/pypi/dm/leap.soledad.server.svg + :target: http://badge.fury.io/py/leap.soledad.server Compatibility -- cgit v1.2.3 From 6956525a0f325765d9ef0e8dcd3ad5f4a55545ed Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:17:49 -0300 Subject: [tests] get rid of CouchDB python process This process per test gives a lot of headache, this is why we are removing it. With it we would need to try to start and stop properly on each test case. This fails badly when a test fail and, depending on how it fails, it freezes my pc. Also, it is very heavy for a CI to run a database process for each test case. --- common/src/leap/soledad/common/tests/util.py | 169 ++++++--------------------- 1 file changed, 36 insertions(+), 133 deletions(-) diff --git a/common/src/leap/soledad/common/tests/util.py b/common/src/leap/soledad/common/tests/util.py index 2190eeaa..3187472f 100644 --- a/common/src/leap/soledad/common/tests/util.py +++ b/common/src/leap/soledad/common/tests/util.py @@ -27,10 +27,8 @@ import shutil import random import string import u1db -import subprocess -import time -import re import traceback +import couchdb from uuid import uuid4 from mock import Mock @@ -337,119 +335,6 @@ class BaseSoledadTest(BaseLeapTest, MockedSharedDBTest): self.assertEqual(exp_doc.content, doc.content) -# ----------------------------------------------------------------------------- -# A wrapper for running couchdb locally. -# ----------------------------------------------------------------------------- - -# from: https://github.com/smcq/paisley/blob/master/paisley/test/util.py -# TODO: include license of above project. -class CouchDBWrapper(object): - - """ - Wrapper for external CouchDB instance which is started and stopped for - testing. - """ - BOOT_TIMEOUT_SECONDS = 5 - RETRY_LIMIT = 3 - - def start(self): - tries = 0 - while tries < self.RETRY_LIMIT and not hasattr(self, 'port'): - try: - self._try_start() - return - except Exception, e: - print traceback.format_exc() - self.stop() - tries += 1 - raise Exception( - "Check your couchdb: Tried to start 3 times and failed badly") - - def _try_start(self): - """ - Start a CouchDB instance for a test. - """ - self.tempdir = tempfile.mkdtemp(suffix='.couch.test') - - path = os.path.join(os.path.dirname(__file__), - 'couchdb.ini.template') - handle = open(path) - conf = handle.read() % { - 'tempdir': self.tempdir, - } - handle.close() - - shutil.copy('/etc/couchdb/default.ini', self.tempdir) - defaultConfPath = os.path.join(self.tempdir, 'default.ini') - - confPath = os.path.join(self.tempdir, 'test.ini') - handle = open(confPath, 'w') - handle.write(conf) - handle.close() - - # create the dirs from the template - mkdir_p(os.path.join(self.tempdir, 'lib')) - mkdir_p(os.path.join(self.tempdir, 'log')) - args = ['/usr/bin/couchdb', '-n', - '-a', defaultConfPath, '-a', confPath] - null = open('/dev/null', 'w') - - self.process = subprocess.Popen( - args, env=None, stdout=null.fileno(), stderr=null.fileno(), - close_fds=True) - boot_time = time.time() - # find port - logPath = os.path.join(self.tempdir, 'log', 'couch.log') - while not os.path.exists(logPath): - if self.process.poll() is not None: - got_stdout, got_stderr = "", "" - if self.process.stdout is not None: - got_stdout = self.process.stdout.read() - - if self.process.stderr is not None: - got_stderr = self.process.stderr.read() - raise Exception(""" -couchdb exited with code %d. -stdout: -%s -stderr: -%s""" % ( - self.process.returncode, got_stdout, got_stderr)) - time.sleep(0.01) - if (time.time() - boot_time) > self.BOOT_TIMEOUT_SECONDS: - self.stop() - raise Exception("Timeout starting couch") - while os.stat(logPath).st_size == 0: - time.sleep(0.01) - if (time.time() - boot_time) > self.BOOT_TIMEOUT_SECONDS: - self.stop() - raise Exception("Timeout starting couch") - PORT_RE = re.compile( - 'Apache CouchDB has started on http://127.0.0.1:(?P\d+)') - - handle = open(logPath) - line = handle.read() - handle.close() - m = PORT_RE.search(line) - if not m: - self.stop() - raise Exception("Cannot find port in line %s" % line) - self.port = int(m.group('port')) - - def stop(self): - """ - Terminate the CouchDB instance. - """ - try: - self.process.terminate() - self.process.communicate() - except: - # just to clean up - # if it can't, the process wasn't created anyway - pass - shutil.rmtree(self.tempdir) - - class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest): """ @@ -460,15 +345,26 @@ class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest): """ Make sure we have a CouchDB instance for a test. """ - self.wrapper = CouchDBWrapper() - self.wrapper.start() - # self.db = self.wrapper.db + server = self.couch_server = couchdb.Server() + self.previous_dbs = set([db for db in server]) + self.couch_port = 5984 + self.couch_url = 'http://localhost:%d' % self.couch_port def tearDown(self): """ Stop CouchDB instance for test. """ - self.wrapper.stop() + current_dbs = set([db for db in self.couch_server]) + remaining_dbs = current_dbs - self.previous_dbs + if remaining_dbs and False: + raise Exception("tests created %s and didn't clean up!", remaining_dbs) + + def delete_db(self, name): + try: + self.couch_server.delete(name) + except: + # ignore if already missing + pass class CouchServerStateForTests(CouchServerState): @@ -484,15 +380,25 @@ class CouchServerStateForTests(CouchServerState): which is less pleasant than allowing the db to be automatically created. """ - def _create_database(self, dbname): - return CouchDatabase.open_database( + def __init__(self, *args, **kwargs): + self.dbs = [] + super(CouchServerStateForTests, self).__init__(*args, **kwargs) + + def _create_database(self, replica_uid=None, dbname=None): + """ + Create db and append to a list, allowing test to close it later + """ + dbname = dbname or ('test-%s' % uuid4().hex) + db = CouchDatabase.open_database( urljoin(self.couch_url, dbname), True, - replica_uid=dbname, + replica_uid=replica_uid or 'test', ensure_ddocs=True) + self.dbs.append(db) + return db def ensure_database(self, dbname): - db = self._create_database(dbname) + db = self._create_database(dbname=dbname) return db, db.replica_uid @@ -506,23 +412,20 @@ class SoledadWithCouchServerMixin( main_test_class = getattr(self, 'main_test_class', None) if main_test_class is not None: main_test_class.setUp(self) - self.couch_url = 'http://localhost:%d' % self.wrapper.port def tearDown(self): main_test_class = getattr(self, 'main_test_class', None) if main_test_class is not None: main_test_class.tearDown(self) # delete the test database - try: - db = CouchDatabase(self.couch_url, 'test') - db.delete_database() - except DatabaseDoesNotExist: - pass BaseSoledadTest.tearDown(self) CouchDBTestCase.tearDown(self) def make_app(self): - couch_url = urljoin( - 'http://localhost:' + str(self.wrapper.port), 'tests') - self.request_state = CouchServerStateForTests(couch_url) + self.request_state = CouchServerStateForTests(self.couch_url) + self.addCleanup(self.delete_dbs) return self.make_app_with_state(self.request_state) + + def delete_dbs(self): + for db in self.request_state.dbs: + self.delete_db(db._dbname) -- cgit v1.2.3 From 4856c49e93a4ba67055c0dc3e8b4cdbaeabc4940 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:28:33 -0300 Subject: [tests] isolate database names, use uuid We are using a single CouchDB install, which may cause tests to overlap since many of them uses the same database name, hurting isolation. This change tries to use uuid on most of it. Also changes for couch_url and couch_port introduced by removal of CouchDB process. --- common/src/leap/soledad/common/tests/test_couch.py | 19 +++++++----- .../tests/test_couch_operations_atomicity.py | 8 +++-- .../src/leap/soledad/common/tests/test_server.py | 2 -- .../soledad/common/tests/test_sqlcipher_sync.py | 2 ++ common/src/leap/soledad/common/tests/test_sync.py | 11 ++----- .../soledad/common/tests/test_sync_deferred.py | 10 ++----- .../leap/soledad/common/tests/test_sync_mutex.py | 6 ++-- .../leap/soledad/common/tests/test_sync_target.py | 34 +++++++++------------- 8 files changed, 39 insertions(+), 53 deletions(-) diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index 468ad8d8..08a14d02 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -25,6 +25,7 @@ import json from urlparse import urljoin from couchdb.client import Server +from uuid import uuid4 from testscenarios import TestWithScenarios @@ -56,8 +57,8 @@ class TestCouchBackendImpl(CouchDBTestCase): def test__allocate_doc_id(self): db = couch.CouchDatabase.open_database( urljoin( - 'http://localhost:' + str(self.wrapper.port), - 'u1db_tests' + 'http://localhost:' + str(self.couch_port), + ('test-%s' % uuid4().hex) ), create=True, ensure_ddocs=True) @@ -66,6 +67,7 @@ class TestCouchBackendImpl(CouchDBTestCase): self.assertEqual(34, len(doc_id1)) int(doc_id1[len('D-'):], 16) self.assertNotEqual(doc_id1, db._allocate_doc_id()) + self.delete_db(db._dbname) # ----------------------------------------------------------------------------- @@ -73,25 +75,26 @@ class TestCouchBackendImpl(CouchDBTestCase): # ----------------------------------------------------------------------------- def make_couch_database_for_test(test, replica_uid): - port = str(test.wrapper.port) - return couch.CouchDatabase.open_database( - urljoin('http://localhost:' + port, replica_uid), + port = str(test.couch_port) + dbname = ('test-%s' % uuid4().hex) + db = couch.CouchDatabase.open_database( + urljoin('http://localhost:' + port, dbname), create=True, replica_uid=replica_uid or 'test', ensure_ddocs=True) def copy_couch_database_for_test(test, db): - port = str(test.wrapper.port) + port = str(test.couch_port) couch_url = 'http://localhost:' + port - new_dbname = db._replica_uid + '_copy' + new_dbname = db._dbname + '_copy' new_db = couch.CouchDatabase.open_database( urljoin(couch_url, new_dbname), create=True, replica_uid=db._replica_uid or 'test') # copy all docs session = couch.Session() - old_couch_db = Server(couch_url, session=session)[db._replica_uid] + old_couch_db = Server(couch_url, session=session)[db._dbname] new_couch_db = Server(couch_url, session=session)[new_dbname] for doc_id in old_couch_db: doc = old_couch_db.get(doc_id) diff --git a/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py b/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py index 0a06cc39..25f709ca 100644 --- a/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py +++ b/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py @@ -23,6 +23,7 @@ import threading from urlparse import urljoin from twisted.internet import defer +from uuid import uuid4 from leap.soledad.client import Soledad from leap.soledad.common.couch import CouchDatabase, CouchServerState @@ -55,7 +56,7 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer): sync_target = soledad_sync_target - def _soledad_instance(self, user='user-uuid', passphrase=u'123', + def _soledad_instance(self, user=None, passphrase=u'123', prefix='', secrets_path='secrets.json', local_db_path='soledad.u1db', server_url='', @@ -63,6 +64,7 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer): """ Instantiate Soledad. """ + user = user or self.user # this callback ensures we save a document which is sent to the shared # db. @@ -89,9 +91,9 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer): def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) - self.couch_url = 'http://localhost:' + str(self.wrapper.port) + self.user = ('user-%s' % uuid4().hex) self.db = CouchDatabase.open_database( - urljoin(self.couch_url, 'user-user-uuid'), + urljoin(self.couch_url, 'user-' + self.user), create=True, replica_uid='replica', ensure_ddocs=True) diff --git a/common/src/leap/soledad/common/tests/test_server.py b/common/src/leap/soledad/common/tests/test_server.py index 5e8e5f90..0667459e 100644 --- a/common/src/leap/soledad/common/tests/test_server.py +++ b/common/src/leap/soledad/common/tests/test_server.py @@ -333,7 +333,6 @@ class EncryptedSyncTestCase( # dependencies. # XXX explain better CouchDBTestCase.setUp(self) - self.couch_url = 'http://localhost:' + str(self.wrapper.port) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) @@ -494,7 +493,6 @@ class LockResourceTestCase( # dependencies. # XXX explain better CouchDBTestCase.setUp(self) - self.couch_url = 'http://localhost:' + str(self.wrapper.port) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) # create the databases diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py index af2d0e2a..ead4ee5f 100644 --- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py +++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py @@ -20,10 +20,12 @@ Test sqlcipher backend sync. import json +import os from u1db import sync from u1db import vectorclock from u1db import errors +from uuid import uuid4 from testscenarios import TestWithScenarios from urlparse import urljoin diff --git a/common/src/leap/soledad/common/tests/test_sync.py b/common/src/leap/soledad/common/tests/test_sync.py index 61f3879f..04e8b163 100644 --- a/common/src/leap/soledad/common/tests/test_sync.py +++ b/common/src/leap/soledad/common/tests/test_sync.py @@ -63,7 +63,6 @@ class InterruptableSyncTestCase( TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") - self.couch_url = 'http://localhost:' + str(self.wrapper.port) def tearDown(self): CouchDBTestCase.tearDown(self) @@ -177,13 +176,7 @@ class TestSoledadDbSync( SoledadWithCouchServerMixin.setUp(self) self.startTwistedServer() self.db = self.make_database_for_test(self, 'test1') - self.db2 = couch.CouchDatabase.open_database( - urljoin( - 'http://localhost:' + str(self.wrapper.port), - 'test' - ), - create=True, - ensure_ddocs=True) + self.db2 = self.request_state._create_database(replica_uid='test') def tearDown(self): """ @@ -199,7 +192,7 @@ class TestSoledadDbSync( and Token auth. """ target = soledad_sync_target( - self, target_name, + self, self.db2._dbname, source_replica_uid=self._soledad._dbpool.replica_uid) self.addCleanup(target.close) return sync.SoledadSynchronizer( diff --git a/common/src/leap/soledad/common/tests/test_sync_deferred.py b/common/src/leap/soledad/common/tests/test_sync_deferred.py index 0065413a..a07be66f 100644 --- a/common/src/leap/soledad/common/tests/test_sync_deferred.py +++ b/common/src/leap/soledad/common/tests/test_sync_deferred.py @@ -85,13 +85,7 @@ class BaseSoledadDeferredEncTest(SoledadWithCouchServerMixin): defer_encryption=True, sync_db_key=sync_db_key) self.db1 = SQLCipherDatabase(self.opts) - self.db2 = couch.CouchDatabase.open_database( - urljoin( - 'http://localhost:' + str(self.wrapper.port), - 'test' - ), - create=True, - ensure_ddocs=True) + self.db2 = self.request_state._create_database('test') def tearDown(self): # XXX should not access "private" attrs @@ -159,7 +153,7 @@ class TestSoledadDbSyncDeferredEncDecr( sync_db = self._soledad._sync_db sync_enc_pool = self._soledad._sync_enc_pool target = soledad_sync_target( - self, target_name, + self, self.db2._dbname, source_replica_uid=replica_uid, sync_db=sync_db, sync_enc_pool=sync_enc_pool) diff --git a/common/src/leap/soledad/common/tests/test_sync_mutex.py b/common/src/leap/soledad/common/tests/test_sync_mutex.py index aa46d5b7..2e2123a7 100644 --- a/common/src/leap/soledad/common/tests/test_sync_mutex.py +++ b/common/src/leap/soledad/common/tests/test_sync_mutex.py @@ -91,7 +91,7 @@ class TestSyncMutex( TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") - self.couch_url = 'http://localhost:' + str(self.wrapper.port) + self.user = ('user-%s' % uuid.uuid4().hex) def tearDown(self): CouchDBTestCase.tearDown(self) @@ -103,12 +103,12 @@ class TestSyncMutex( # ensure remote db exists before syncing db = couch.CouchDatabase.open_database( - urljoin(self.couch_url, 'user-user-uuid'), + urljoin(self.couch_url, 'user-' + self.user), create=True, ensure_ddocs=True) sol = self._soledad_instance( - user='user-uuid', server_url=self.getURL()) + user=self.user, server_url=self.getURL()) d1 = sol.sync() d2 = sol.sync() diff --git a/common/src/leap/soledad/common/tests/test_sync_target.py b/common/src/leap/soledad/common/tests/test_sync_target.py index 79c350cd..da4ff034 100644 --- a/common/src/leap/soledad/common/tests/test_sync_target.py +++ b/common/src/leap/soledad/common/tests/test_sync_target.py @@ -63,7 +63,6 @@ class TestSoledadParseReceivedDocResponse(SoledadWithCouchServerMixin): def setUp(self): SoledadWithCouchServerMixin.setUp(self) - self.couch_url = 'http://localhost:' + str(self.wrapper.port) creds = {'token': { 'uuid': 'user-uuid', 'token': 'auth-token', @@ -151,11 +150,11 @@ def make_local_db_and_soledad_target( test, path='test', source_replica_uid=uuid4().hex): test.startTwistedServer() - db = test.request_state._create_database(os.path.basename(path)) + db = test.request_state._create_database(replica_uid=os.path.basename(path)) sync_db = test._soledad._sync_db sync_enc_pool = test._soledad._sync_enc_pool st = soledad_sync_target( - test, path, + test, db._dbname, source_replica_uid=source_replica_uid, sync_db=sync_db, sync_enc_pool=sync_enc_pool) @@ -191,6 +190,8 @@ class TestSoledadSyncTarget( self.startTwistedServer() sync_db = self._soledad._sync_db sync_enc_pool = self._soledad._sync_enc_pool + if path is None: + path = self.db2._dbname target = self.sync_target( self, path, source_replica_uid=source_replica_uid, @@ -204,11 +205,11 @@ class TestSoledadSyncTarget( SoledadWithCouchServerMixin.setUp(self) self.startTwistedServer() self.db1 = make_sqlcipher_database_for_test(self, 'test1') - self.db2 = self.request_state._create_database('test2') + self.db2 = self.request_state._create_database('test') def tearDown(self): # db2, _ = self.request_state.ensure_database('test2') - self.db2.delete_database() + self.delete_db(self.db2._dbname) self.db1.close() SoledadWithCouchServerMixin.tearDown(self) TestWithScenarios.tearDown(self) @@ -220,8 +221,8 @@ class TestSoledadSyncTarget( This test was adapted to decrypt remote content before assert. """ - db = self.request_state._create_database('test') - remote_target = self.getSyncTarget('test') + db = self.db2 + remote_target = self.getSyncTarget() other_docs = [] def receive_doc(doc, gen, trans_id): @@ -247,7 +248,7 @@ class TestSoledadSyncTarget( def blackhole_getstderr(inst): return cStringIO.StringIO() - db = self.request_state._create_database('test') + db = self.db2 _put_doc_if_newer = db._put_doc_if_newer trigger_ids = ['doc-here2'] @@ -333,7 +334,7 @@ class TestSoledadSyncTarget( last_known_trans_id=None, insert_doc_cb=receive_doc, ensure_callback=ensure_cb, defer_decryption=False) self.assertEqual(1, new_gen) - db = self.request_state.open_database('test') + db = self.db2 self.assertEqual(1, len(replica_uid_box)) self.assertEqual(db._replica_uid, replica_uid_box[0]) self.assertGetEncryptedDoc( @@ -358,17 +359,16 @@ class TestSoledadSyncTarget( @defer.inlineCallbacks def test_record_sync_info(self): - db = self.request_state._create_database('test') remote_target = self.getSyncTarget( 'test', source_replica_uid='other-id') yield remote_target.record_sync_info('other-id', 2, 'T-transid') self.assertEqual( - (2, 'T-transid'), db._get_replica_gen_and_trans_id('other-id')) + (2, 'T-transid'), self.db2._get_replica_gen_and_trans_id('other-id')) @defer.inlineCallbacks def test_sync_exchange_receive(self): - db = self.request_state._create_database('test') + db = self.db2 doc = db.create_doc_from_json('{"value": "there"}') remote_target = self.getSyncTarget('test') other_changes = [] @@ -857,13 +857,7 @@ class TestSoledadDbSync( defer_encryption=True, sync_db_key=sync_db_key) self.db1 = SQLCipherDatabase(self.opts) - self.db2 = couch.CouchDatabase.open_database( - urljoin( - 'http://localhost:' + str(self.wrapper.port), - 'test' - ), - create=True, - ensure_ddocs=True) + self.db2 = self.request_state._create_database(replica_uid='test') def tearDown(self): """ @@ -890,7 +884,7 @@ class TestSoledadDbSync( 'uuid': 'user-uuid', 'token': 'auth-token', }} - target_url = self.getURL(target_name) + target_url = self.getURL(self.db2._dbname) # get a u1db syncer crypto = self._soledad._crypto -- cgit v1.2.3 From a5742807539c66577f48d6cfbb411046c17c1978 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:35:45 -0300 Subject: [tests] use addCleanup to ensure db deletion Test case has a addCleanup method, which provides a way to clean resources up and express this need as soon as you create. We are now using it to simplify some logic on database deletion during the test and to make sure that as soon as it gets created a cleanup is there to delete after. --- common/src/leap/soledad/common/tests/test_couch.py | 55 ++-------------------- 1 file changed, 5 insertions(+), 50 deletions(-) diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index 08a14d02..52d1edd4 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -82,6 +82,8 @@ def make_couch_database_for_test(test, replica_uid): create=True, replica_uid=replica_uid or 'test', ensure_ddocs=True) + test.addCleanup(test.delete_db, dbname) + return db def copy_couch_database_for_test(test, db): @@ -146,24 +148,6 @@ class CouchTests( scenarios = COUCH_SCENARIOS - def setUp(self): - test_backends.AllDatabaseTests.setUp(self) - # save db info because of test_close - self._url = self.db._url - self._dbname = self.db._dbname - - def tearDown(self): - # if current test is `test_close` we have to use saved objects to - # delete the database because the close() method will have removed the - # references needed to do it using the CouchDatabase. - if self.id().endswith('test_couch.CouchTests.test_close(couch)'): - session = couch.Session() - server = Server(url=self._url, session=session) - del(server[self._dbname]) - else: - self.db.delete_database() - test_backends.AllDatabaseTests.tearDown(self) - class CouchDatabaseTests( TestWithScenarios, @@ -172,10 +156,6 @@ class CouchDatabaseTests( scenarios = COUCH_SCENARIOS - def tearDown(self): - self.db.delete_database() - test_backends.LocalDatabaseTests.tearDown(self) - class CouchValidateGenNTransIdTests( TestWithScenarios, @@ -184,10 +164,6 @@ class CouchValidateGenNTransIdTests( scenarios = COUCH_SCENARIOS - def tearDown(self): - self.db.delete_database() - test_backends.LocalDatabaseValidateGenNTransIdTests.tearDown(self) - class CouchValidateSourceGenTests( TestWithScenarios, @@ -196,10 +172,6 @@ class CouchValidateSourceGenTests( scenarios = COUCH_SCENARIOS - def tearDown(self): - self.db.delete_database() - test_backends.LocalDatabaseValidateSourceGenTests.tearDown(self) - class CouchWithConflictsTests( TestWithScenarios, @@ -208,10 +180,6 @@ class CouchWithConflictsTests( scenarios = COUCH_SCENARIOS - def tearDown(self): - self.db.delete_database() - test_backends.LocalDatabaseWithConflictsTests.tearDown(self) - # Notice: the CouchDB backend does not have indexing capabilities, so we do # not test indexing now. @@ -263,8 +231,6 @@ class CouchDatabaseSyncTargetTests( def setUp(self): CouchDBTestCase.setUp(self) - # from DatabaseBaseTests.setUp - self.db = self.create_database('test') # from TestCaseWithServer.setUp self.server = self.server_thread = self.port = None # other stuff @@ -272,7 +238,6 @@ class CouchDatabaseSyncTargetTests( self.other_changes = [] def tearDown(self): - CouchDBTestCase.tearDown(self) # from TestCaseWithServer.tearDown if self.server is not None: self.server.shutdown() @@ -280,9 +245,8 @@ class CouchDatabaseSyncTargetTests( self.server.server_close() if self.port: self.port.stopListening() - # from DatabaseBaseTests.tearDown - if hasattr(self, 'db') and self.db is not None: - self.db.close() + self.db.close() + CouchDBTestCase.tearDown(self) def receive_doc(self, doc, gen, trans_id): self.other_changes.append( @@ -727,17 +691,8 @@ class CouchDatabaseSyncTests( self.db3, self.db1_copy, self.db2_copy ]: if db is not None: - db.delete_database() + self.delete_db(db._dbname) db.close() - for replica_uid, dbname in [ - ('test1_copy', 'source'), - ('test2_copy', 'target'), - ('test3', 'target') - ]: - db = self.create_database(replica_uid, dbname) - db.delete_database() - # cleanup connections to avoid leaking of file descriptors - db.close() DatabaseBaseTests.tearDown(self) def assertLastExchangeLog(self, db, expected): -- cgit v1.2.3 From a2e0825700c52a63b987286dc8a512fc2dadc7f4 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:40:41 -0300 Subject: [tests] extract db creation with doc ensure method This was a duplicate, but also was getting on the way to improve isolation. With this small refactor it should be cleaner and have unique names. --- common/src/leap/soledad/common/tests/test_couch.py | 28 ++++++++-------------- 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index 52d1edd4..fb92a290 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -1331,10 +1331,13 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): def setUp(self): CouchDBTestCase.setUp(self) + + def create_db(self, ensure=True): + dbname = ('test-%s' % uuid4().hex) self.db = couch.CouchDatabase.open_database( - urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'), + urljoin('http://127.0.0.1:%d' % self.couch_port, dbname), create=True, - ensure_ddocs=False) # note that we don't enforce ddocs here + ensure_ddocs=ensure) def tearDown(self): self.db.delete_database() @@ -1346,6 +1349,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): Test that all methods that access design documents will raise if the design docs are not present. """ + self.create_db(ensure=False) # _get_generation() self.assertRaises( errors.MissingDesignDocError, @@ -1376,10 +1380,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): Test that all methods that access design documents list functions will raise if the functions are not present. """ - self.db = couch.CouchDatabase.open_database( - urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'), - create=True, - ensure_ddocs=True) + self.create_db(ensure=True) # erase views from _design/transactions transactions = self.db._database['_design/transactions'] transactions['lists'] = {} @@ -1406,10 +1407,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): Test that all methods that access design documents list functions will raise if the functions are not present. """ - self.db = couch.CouchDatabase.open_database( - urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'), - create=True, - ensure_ddocs=True) + self.create_db(ensure=True) # erase views from _design/transactions transactions = self.db._database['_design/transactions'] del transactions['lists'] @@ -1436,10 +1434,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): Test that all methods that access design documents' named views will raise if the views are not present. """ - self.db = couch.CouchDatabase.open_database( - urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'), - create=True, - ensure_ddocs=True) + self.create_db(ensure=True) # erase views from _design/docs docs = self.db._database['_design/docs'] del docs['views'] @@ -1478,10 +1473,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): Test that all methods that access design documents will raise if the design docs are not present. """ - self.db = couch.CouchDatabase.open_database( - urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'), - create=True, - ensure_ddocs=True) + self.create_db(ensure=True) # delete _design/docs del self.db._database['_design/docs'] # delete _design/syncs -- cgit v1.2.3 From f578b9b931858f3d95588f1a982fc77275853cda Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:42:57 -0300 Subject: [tests] isolate LockResource tests using a mock 'shared' has to be used as a DB name just because of a constant, but it is used on only one point. This changes mock this point to have unique names for better tests isolation. 'tokens' was removed as unnecessary. --- common/src/leap/soledad/common/tests/test_server.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/common/src/leap/soledad/common/tests/test_server.py b/common/src/leap/soledad/common/tests/test_server.py index 0667459e..f512d6c1 100644 --- a/common/src/leap/soledad/common/tests/test_server.py +++ b/common/src/leap/soledad/common/tests/test_server.py @@ -496,23 +496,15 @@ class LockResourceTestCase( self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) # create the databases - CouchDatabase.open_database( - urljoin(self.couch_url, 'shared'), - create=True, - ensure_ddocs=True) - CouchDatabase.open_database( - urljoin(self.couch_url, 'tokens'), + db = CouchDatabase.open_database( + urljoin(self.couch_url, ('shared-%s' % (uuid4().hex))), create=True, ensure_ddocs=True) + self.addCleanup(db.delete_database) self._state = CouchServerState(self.couch_url) + self._state.open_database = mock.Mock(return_value=db) def tearDown(self): - # delete remote database - db = CouchDatabase.open_database( - urljoin(self.couch_url, 'shared'), - create=True, - ensure_ddocs=True) - db.delete_database() CouchDBTestCase.tearDown(self) TestCaseWithServer.tearDown(self) -- cgit v1.2.3 From 83a59cb5c29695f321354d2d0f1c0097ff34693d Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:45:12 -0300 Subject: [tests] simplify make_app and getTarget code It was hardcoded for 'test', but the database name is now random. What is useful for test code is the replica_uid, database name for the SyncTarget is now coming from database name. --- .../soledad/common/tests/test_sqlcipher_sync.py | 31 ++++++++-------------- common/src/leap/soledad/common/tests/test_sync.py | 13 +++------ .../soledad/common/tests/test_sync_deferred.py | 14 ++++------ .../leap/soledad/common/tests/test_sync_target.py | 17 ++++-------- 4 files changed, 25 insertions(+), 50 deletions(-) diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py index ead4ee5f..343b915c 100644 --- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py +++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py @@ -731,28 +731,23 @@ class SQLCipherDatabaseSyncTests( errors.InvalidTransactionId, self.sync, self.db1, self.db2_copy) -def _make_local_db_and_token_http_target(test, path='test'): +def make_local_db_and_soledad_target( + test, path='test', + source_replica_uid=uuid4().hex): test.startTwistedServer() - # ensure remote db exists before syncing - db = couch.CouchDatabase.open_database( - urljoin(test.couch_url, 'test'), - create=True, - replica_uid='test', - ensure_ddocs=True) - - replica_uid = test._soledad._dbpool.replica_uid + db = test.request_state._create_database(replica_uid=os.path.basename(path)) sync_db = test._soledad._sync_db sync_enc_pool = test._soledad._sync_enc_pool st = soledad_sync_target( - test, path, - source_replica_uid=replica_uid, + test, db._dbname, + source_replica_uid=source_replica_uid, sync_db=sync_db, sync_enc_pool=sync_enc_pool) return db, st target_scenarios = [ ('leap', { - 'create_db_and_target': _make_local_db_and_token_http_target, + 'create_db_and_target': make_local_db_and_soledad_target, 'make_app_with_state': make_soledad_app, 'do_sync': sync_via_synchronizer_and_soledad}), ] @@ -761,8 +756,8 @@ target_scenarios = [ class SQLCipherSyncTargetTests( TestWithScenarios, tests.DatabaseBaseTests, - tests.TestCaseWithServer, - SoledadWithCouchServerMixin): + SoledadWithCouchServerMixin, + tests.TestCaseWithServer): # TODO: implement _set_trace_hook(_shallow) in SoledadHTTPSyncTarget so # skipped tests can be succesfully executed. @@ -773,13 +768,13 @@ class SQLCipherSyncTargetTests( whitebox = False def setUp(self): - super(tests.DatabaseBaseTests, self).setUp() + super(SQLCipherSyncTargetTests, self).setUp() self.db, self.st = self.create_db_and_target(self) self.addCleanup(self.st.close) self.other_changes = [] def tearDown(self): - super(tests.DatabaseBaseTests, self).tearDown() + super(SQLCipherSyncTargetTests, self).setUp() def assertLastExchangeLog(self, db, expected): log = getattr(db, '_last_exchange_log', None) @@ -791,10 +786,6 @@ class SQLCipherSyncTargetTests( self.other_changes.append( (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id)) - def make_app(self): - self.request_state = couch.CouchServerState(self.couch_url) - return self.make_app_with_state(self.request_state) - def set_trace_hook(self, callback, shallow=False): setter = (self.st._set_trace_hook if not shallow else self.st._set_trace_hook_shallow) diff --git a/common/src/leap/soledad/common/tests/test_sync.py b/common/src/leap/soledad/common/tests/test_sync.py index 04e8b163..1041367b 100644 --- a/common/src/leap/soledad/common/tests/test_sync.py +++ b/common/src/leap/soledad/common/tests/test_sync.py @@ -147,8 +147,8 @@ class InterruptableSyncTestCase( class TestSoledadDbSync( TestWithScenarios, - tests.TestCaseWithServer, - SoledadWithCouchServerMixin): + SoledadWithCouchServerMixin, + tests.TestCaseWithServer): """ Test db.sync remote sync shortcut @@ -165,10 +165,6 @@ class TestSoledadDbSync( oauth = False token = False - def make_app(self): - self.request_state = couch.CouchServerState(self.couch_url) - return self.make_app_with_state(self.request_state) - def setUp(self): """ Need to explicitely invoke inicialization on all bases. @@ -182,11 +178,10 @@ class TestSoledadDbSync( """ Need to explicitely invoke destruction on all bases. """ - self.db2.delete_database() SoledadWithCouchServerMixin.tearDown(self) # tests.TestCaseWithServer.tearDown(self) - def do_sync(self, target_name): + def do_sync(self): """ Perform sync using SoledadSynchronizer, SoledadSyncTarget and Token auth. @@ -210,7 +205,7 @@ class TestSoledadDbSync( doc1 = self.db.create_doc_from_json(tests.simple_doc) doc2 = self.db2.create_doc_from_json(tests.nested_doc) - local_gen_before_sync = yield self.do_sync('test') + local_gen_before_sync = yield self.do_sync() gen, _, changes = self.db.whats_changed(local_gen_before_sync) self.assertEqual(1, len(changes)) self.assertEqual(doc2.doc_id, changes[0][0]) diff --git a/common/src/leap/soledad/common/tests/test_sync_deferred.py b/common/src/leap/soledad/common/tests/test_sync_deferred.py index a07be66f..90b00670 100644 --- a/common/src/leap/soledad/common/tests/test_sync_deferred.py +++ b/common/src/leap/soledad/common/tests/test_sync_deferred.py @@ -59,6 +59,7 @@ class BaseSoledadDeferredEncTest(SoledadWithCouchServerMixin): def setUp(self): SoledadWithCouchServerMixin.setUp(self) + self.startTwistedServer() # config info self.db1_file = os.path.join(self.tempdir, "db1.u1db") os.unlink(self.db1_file) @@ -103,8 +104,8 @@ class SyncTimeoutError(Exception): class TestSoledadDbSyncDeferredEncDecr( TestWithScenarios, - tests.TestCaseWithServer, - BaseSoledadDeferredEncTest): + BaseSoledadDeferredEncTest, + tests.TestCaseWithServer): """ Test db.sync remote sync shortcut. @@ -122,17 +123,12 @@ class TestSoledadDbSyncDeferredEncDecr( oauth = False token = True - def make_app(self): - self.request_state = couch.CouchServerState(self.couch_url) - return self.make_app_with_state(self.request_state) - def setUp(self): """ Need to explicitely invoke inicialization on all bases. """ BaseSoledadDeferredEncTest.setUp(self) self.server = self.server_thread = None - self.startTwistedServer() self.syncer = None def tearDown(self): @@ -144,7 +140,7 @@ class TestSoledadDbSyncDeferredEncDecr( dbsyncer.close() BaseSoledadDeferredEncTest.tearDown(self) - def do_sync(self, target_name): + def do_sync(self): """ Perform sync using SoledadSynchronizer, SoledadSyncTarget and Token auth. @@ -184,7 +180,7 @@ class TestSoledadDbSyncDeferredEncDecr( """ doc1 = self.db1.create_doc_from_json(tests.simple_doc) doc2 = self.db2.create_doc_from_json(tests.nested_doc) - local_gen_before_sync = yield self.do_sync('test') + local_gen_before_sync = yield self.do_sync() gen, _, changes = self.db1.whats_changed(local_gen_before_sync) self.assertEqual(1, len(changes)) diff --git a/common/src/leap/soledad/common/tests/test_sync_target.py b/common/src/leap/soledad/common/tests/test_sync_target.py index da4ff034..ba556ea4 100644 --- a/common/src/leap/soledad/common/tests/test_sync_target.py +++ b/common/src/leap/soledad/common/tests/test_sync_target.py @@ -268,7 +268,6 @@ class TestSoledadSyncTarget( self.patch( IndexedCouchDatabase, '_put_doc_if_newer', bomb_put_doc_if_newer) remote_target = self.getSyncTarget( - 'test', source_replica_uid='replica') other_changes = [] @@ -318,7 +317,7 @@ class TestSoledadSyncTarget( This test was adapted to decrypt remote content before assert. """ - remote_target = self.getSyncTarget('test') + remote_target = self.getSyncTarget() other_docs = [] replica_uid_box = [] @@ -347,10 +346,9 @@ class TestSoledadSyncTarget( @defer.inlineCallbacks def test_get_sync_info(self): - db = self.request_state._create_database('test') + db = self.db2 db._set_replica_gen_and_trans_id('other-id', 1, 'T-transid') remote_target = self.getSyncTarget( - 'test', source_replica_uid='other-id') sync_info = yield remote_target.get_sync_info('other-id') self.assertEqual( @@ -360,7 +358,6 @@ class TestSoledadSyncTarget( @defer.inlineCallbacks def test_record_sync_info(self): remote_target = self.getSyncTarget( - 'test', source_replica_uid='other-id') yield remote_target.record_sync_info('other-id', 2, 'T-transid') self.assertEqual( @@ -370,7 +367,7 @@ class TestSoledadSyncTarget( def test_sync_exchange_receive(self): db = self.db2 doc = db.create_doc_from_json('{"value": "there"}') - remote_target = self.getSyncTarget('test') + remote_target = self.getSyncTarget() other_changes = [] def receive_doc(doc, gen, trans_id): @@ -423,10 +420,10 @@ class SoledadDatabaseSyncTargetTests( self.db, self.st = make_local_db_and_soledad_target(self) def tearDown(self): - tests.TestCaseWithServer.tearDown(self) - SoledadWithCouchServerMixin.tearDown(self) self.db.close() self.st.close() + tests.TestCaseWithServer.tearDown(self) + SoledadWithCouchServerMixin.tearDown(self) def set_trace_hook(self, callback, shallow=False): setter = (self.st._set_trace_hook if not shallow else @@ -818,10 +815,6 @@ class TestSoledadDbSync( oauth = False token = False - def make_app(self): - self.request_state = couch.CouchServerState(self.couch_url) - return self.make_app_with_state(self.request_state) - def setUp(self): """ Need to explicitely invoke inicialization on all bases. -- cgit v1.2.3 From 46af1bd22a1c0dbe506614a09af003d595b37ead Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:49:04 -0300 Subject: [tests] db3 is expected to be an attribute self.db3 is closed on tearDown. This test was creating it as a local variable, making close possibly fail. --- common/src/leap/soledad/common/tests/test_couch.py | 81 +++++++++++----------- 1 file changed, 39 insertions(+), 42 deletions(-) diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index fb92a290..28f90e5d 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -1161,7 +1161,7 @@ class CouchDatabaseSyncTests( self.db1 = self.create_database('test1', 'both') self.db2 = self.create_database('test2', 'both') doc1 = self.db1.create_doc_from_json('{"a": 1}', doc_id='the-doc') - db3 = self.create_database('test3', 'both') + self.db3 = self.create_database('test3', 'both') self.sync(self.db2, self.db1) self.assertEqual( self.db1._get_generation_info(), @@ -1169,20 +1169,20 @@ class CouchDatabaseSyncTests( self.assertEqual( self.db2._get_generation_info(), self.db1._get_replica_gen_and_trans_id(self.db2._replica_uid)) - self.sync(db3, self.db1) + self.sync(self.db3, self.db1) # update on 2 doc2 = self.make_document('the-doc', doc1.rev, '{"a": 2}') self.db2.put_doc(doc2) - self.sync(self.db2, db3) - self.assertEqual(db3.get_doc('the-doc').rev, doc2.rev) + self.sync(self.db2, self.db3) + self.assertEqual(self.db3.get_doc('the-doc').rev, doc2.rev) # update on 1 doc1.set_json('{"a": 3}') self.db1.put_doc(doc1) # conflicts self.sync(self.db2, self.db1) - self.sync(db3, self.db1) + self.sync(self.db3, self.db1) self.assertTrue(self.db2.get_doc('the-doc').has_conflicts) - self.assertTrue(db3.get_doc('the-doc').has_conflicts) + self.assertTrue(self.db3.get_doc('the-doc').has_conflicts) # resolve conflicts = self.db2.get_doc_conflicts('the-doc') doc4 = self.make_document('the-doc', None, '{"a": 4}') @@ -1191,38 +1191,38 @@ class CouchDatabaseSyncTests( doc2 = self.db2.get_doc('the-doc') self.assertEqual(doc4.get_json(), doc2.get_json()) self.assertFalse(doc2.has_conflicts) - self.sync(self.db2, db3) - doc3 = db3.get_doc('the-doc') + self.sync(self.db2, self.db3) + doc3 = self.db3.get_doc('the-doc') self.assertEqual(doc4.get_json(), doc3.get_json()) self.assertFalse(doc3.has_conflicts) def test_sync_supersedes_conflicts(self): self.db1 = self.create_database('test1', 'both') self.db2 = self.create_database('test2', 'target') - db3 = self.create_database('test3', 'both') + self.db3 = self.create_database('test3', 'both') doc1 = self.db1.create_doc_from_json('{"a": 1}', doc_id='the-doc') self.db2.create_doc_from_json('{"b": 1}', doc_id='the-doc') - db3.create_doc_from_json('{"c": 1}', doc_id='the-doc') - self.sync(db3, self.db1) + self.db3.create_doc_from_json('{"c": 1}', doc_id='the-doc') + self.sync(self.db3, self.db1) self.assertEqual( self.db1._get_generation_info(), - db3._get_replica_gen_and_trans_id(self.db1._replica_uid)) + self.db3._get_replica_gen_and_trans_id(self.db1._replica_uid)) self.assertEqual( - db3._get_generation_info(), - self.db1._get_replica_gen_and_trans_id(db3._replica_uid)) - self.sync(db3, self.db2) + self.db3._get_generation_info(), + self.db1._get_replica_gen_and_trans_id(self.db3._replica_uid)) + self.sync(self.db3, self.db2) self.assertEqual( self.db2._get_generation_info(), - db3._get_replica_gen_and_trans_id(self.db2._replica_uid)) + self.db3._get_replica_gen_and_trans_id(self.db2._replica_uid)) self.assertEqual( - db3._get_generation_info(), - self.db2._get_replica_gen_and_trans_id(db3._replica_uid)) - self.assertEqual(3, len(db3.get_doc_conflicts('the-doc'))) + self.db3._get_generation_info(), + self.db2._get_replica_gen_and_trans_id(self.db3._replica_uid)) + self.assertEqual(3, len(self.db3.get_doc_conflicts('the-doc'))) doc1.set_json('{"a": 2}') self.db1.put_doc(doc1) - self.sync(db3, self.db1) + self.sync(self.db3, self.db1) # original doc1 should have been removed from conflicts - self.assertEqual(3, len(db3.get_doc_conflicts('the-doc'))) + self.assertEqual(3, len(self.db3.get_doc_conflicts('the-doc'))) def test_sync_stops_after_get_sync_info(self): self.db1 = self.create_database('test1', 'source') @@ -1241,79 +1241,76 @@ class CouchDatabaseSyncTests( self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1') self.assertRaises( u1db_errors.InvalidReplicaUID, self.sync, self.db1, self.db2) - # remove the reference to db2 to avoid double deleting on tearDown - self.db2.close() - self.db2 = None def test_sync_detects_rollback_in_source(self): self.db1 = self.create_database('test1', 'source') self.db2 = self.create_database('test2', 'target') self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1') self.sync(self.db1, self.db2) - db1_copy = self.copy_database(self.db1) + self.db1_copy = self.copy_database(self.db1) self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc2') self.sync(self.db1, self.db2) self.assertRaises( - u1db_errors.InvalidGeneration, self.sync, db1_copy, self.db2) + u1db_errors.InvalidGeneration, self.sync, self.db1_copy, self.db2) def test_sync_detects_rollback_in_target(self): self.db1 = self.create_database('test1', 'source') self.db2 = self.create_database('test2', 'target') self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent") self.sync(self.db1, self.db2) - db2_copy = self.copy_database(self.db2) + self.db2_copy = self.copy_database(self.db2) self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc2') self.sync(self.db1, self.db2) self.assertRaises( - u1db_errors.InvalidGeneration, self.sync, self.db1, db2_copy) + u1db_errors.InvalidGeneration, self.sync, self.db1, self.db2_copy) def test_sync_detects_diverged_source(self): self.db1 = self.create_database('test1', 'source') self.db2 = self.create_database('test2', 'target') - db3 = self.copy_database(self.db1) + self.db3 = self.copy_database(self.db1) self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent") - db3.create_doc_from_json(tests.simple_doc, doc_id="divergent") + self.db3.create_doc_from_json(tests.simple_doc, doc_id="divergent") self.sync(self.db1, self.db2) self.assertRaises( - u1db_errors.InvalidTransactionId, self.sync, db3, self.db2) + u1db_errors.InvalidTransactionId, self.sync, self.db3, self.db2) def test_sync_detects_diverged_target(self): self.db1 = self.create_database('test1', 'source') self.db2 = self.create_database('test2', 'target') - db3 = self.copy_database(self.db2) - db3.create_doc_from_json(tests.nested_doc, doc_id="divergent") + self.db3 = self.copy_database(self.db2) + self.db3.create_doc_from_json(tests.nested_doc, doc_id="divergent") self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent") self.sync(self.db1, self.db2) self.assertRaises( - u1db_errors.InvalidTransactionId, self.sync, self.db1, db3) + u1db_errors.InvalidTransactionId, self.sync, self.db1, self.db3) def test_sync_detects_rollback_and_divergence_in_source(self): self.db1 = self.create_database('test1', 'source') self.db2 = self.create_database('test2', 'target') self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1') self.sync(self.db1, self.db2) - db1_copy = self.copy_database(self.db1) + self.db1_copy = self.copy_database(self.db1) self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc2') self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc3') self.sync(self.db1, self.db2) - db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2') - db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3') + self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2') + self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3') self.assertRaises( - u1db_errors.InvalidTransactionId, self.sync, db1_copy, self.db2) + u1db_errors.InvalidTransactionId, self.sync, self.db1_copy, self.db2) def test_sync_detects_rollback_and_divergence_in_target(self): self.db1 = self.create_database('test1', 'source') self.db2 = self.create_database('test2', 'target') self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent") self.sync(self.db1, self.db2) - db2_copy = self.copy_database(self.db2) + self.db2_copy = self.copy_database(self.db2) self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc2') self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc3') self.sync(self.db1, self.db2) - db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2') - db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3') + self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2') + self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3') self.assertRaises( - u1db_errors.InvalidTransactionId, self.sync, self.db1, db2_copy) + u1db_errors.InvalidTransactionId, self.sync, self.db1, self.db2_copy) def test_optional_sync_preserve_json(self): self.db1 = self.create_database('test1', 'source') -- cgit v1.2.3 From 66fab1ba7ff4f73af512fea2fd80aef53cbdd2c6 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sat, 12 Sep 2015 22:15:40 -0300 Subject: [tests] subclass instead of copy test code This test only defines a set of different scenarios, all other methods are the same as this subclass. --- .../soledad/common/tests/test_sqlcipher_sync.py | 368 +-------------------- 1 file changed, 2 insertions(+), 366 deletions(-) diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py index 343b915c..15148f3f 100644 --- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py +++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py @@ -41,6 +41,7 @@ from leap.soledad.client.sqlcipher import SQLCipherDatabase from leap.soledad.common.tests import u1db_tests as tests from leap.soledad.common.tests.test_sqlcipher import SQLCIPHER_SCENARIOS from leap.soledad.common.tests.util import make_soledad_app +from leap.soledad.common.tests.test_sync_target import SoledadDatabaseSyncTargetTests from leap.soledad.common.tests.util import soledad_sync_target from leap.soledad.common.tests.util import BaseSoledadTest from leap.soledad.common.tests.util import SoledadWithCouchServerMixin @@ -753,11 +754,7 @@ target_scenarios = [ ] -class SQLCipherSyncTargetTests( - TestWithScenarios, - tests.DatabaseBaseTests, - SoledadWithCouchServerMixin, - tests.TestCaseWithServer): +class SQLCipherSyncTargetTests(SoledadDatabaseSyncTargetTests): # TODO: implement _set_trace_hook(_shallow) in SoledadHTTPSyncTarget so # skipped tests can be succesfully executed. @@ -766,364 +763,3 @@ class SQLCipherSyncTargetTests( target_scenarios)) whitebox = False - - def setUp(self): - super(SQLCipherSyncTargetTests, self).setUp() - self.db, self.st = self.create_db_and_target(self) - self.addCleanup(self.st.close) - self.other_changes = [] - - def tearDown(self): - super(SQLCipherSyncTargetTests, self).setUp() - - def assertLastExchangeLog(self, db, expected): - log = getattr(db, '_last_exchange_log', None) - if log is None: - return - self.assertEqual(expected, log) - - def receive_doc(self, doc, gen, trans_id): - self.other_changes.append( - (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id)) - - def set_trace_hook(self, callback, shallow=False): - setter = (self.st._set_trace_hook if not shallow else - self.st._set_trace_hook_shallow) - try: - setter(callback) - except NotImplementedError: - self.skipTest("%s does not implement _set_trace_hook" - % (self.st.__class__.__name__,)) - - def test_get_sync_target(self): - self.assertIsNot(None, self.st) - - @defer.inlineCallbacks - def test_get_sync_info(self): - sync_info = yield self.st.get_sync_info('other') - self.assertEqual( - ('test', 0, '', 0, ''), sync_info) - - @defer.inlineCallbacks - def test_create_doc_updates_sync_info(self): - sync_info = yield self.st.get_sync_info('other') - self.assertEqual( - ('test', 0, '', 0, ''), sync_info) - self.db.create_doc_from_json(tests.simple_doc) - sync_info = yield self.st.get_sync_info('other') - self.assertEqual(1, sync_info[1]) - - @defer.inlineCallbacks - def test_record_sync_info(self): - yield self.st.record_sync_info('replica', 10, 'T-transid') - sync_info = yield self.st.get_sync_info('other') - self.assertEqual( - ('test', 0, '', 10, 'T-transid'), sync_info) - - @defer.inlineCallbacks - def test_sync_exchange(self): - """ - Modified to account for possibly receiving encrypted documents from - sever-side. - """ - - docs_by_gen = [ - (self.make_document('doc-id', 'replica:1', tests.simple_doc), 10, - 'T-sid')] - new_gen, trans_id = yield self.st.sync_exchange( - docs_by_gen, 'replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertGetEncryptedDoc( - self.db, 'doc-id', 'replica:1', tests.simple_doc, False) - self.assertTransactionLog(['doc-id'], self.db) - last_trans_id = self.getLastTransId(self.db) - self.assertEqual(([], 1, last_trans_id), - (self.other_changes, new_gen, last_trans_id)) - sync_info = yield self.st.get_sync_info('replica') - self.assertEqual(10, sync_info[3]) - - @defer.inlineCallbacks - def test_sync_exchange_push_many(self): - """ - Modified to account for possibly receiving encrypted documents from - sever-side. - """ - docs_by_gen = [ - (self.make_document( - 'doc-id', 'replica:1', tests.simple_doc), 10, 'T-1'), - (self.make_document('doc-id2', 'replica:1', tests.nested_doc), 11, - 'T-2')] - new_gen, trans_id = yield self.st.sync_exchange( - docs_by_gen, 'replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertGetEncryptedDoc( - self.db, 'doc-id', 'replica:1', tests.simple_doc, False) - self.assertGetEncryptedDoc( - self.db, 'doc-id2', 'replica:1', tests.nested_doc, False) - self.assertTransactionLog(['doc-id', 'doc-id2'], self.db) - last_trans_id = self.getLastTransId(self.db) - self.assertEqual(([], 2, last_trans_id), - (self.other_changes, new_gen, trans_id)) - sync_info = yield self.st.get_sync_info('replica') - self.assertEqual(11, sync_info[3]) - - @defer.inlineCallbacks - def test_sync_exchange_returns_many_new_docs(self): - """ - Modified to account for JSON serialization differences. - """ - doc = self.db.create_doc_from_json(tests.simple_doc) - doc2 = self.db.create_doc_from_json(tests.nested_doc) - self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db) - new_gen, _ = yield self.st.sync_exchange( - [], 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db) - self.assertEqual(2, new_gen) - self.assertEqual( - [(doc.doc_id, doc.rev, 1), - (doc2.doc_id, doc2.rev, 2)], - [c[:2] + c[3:4] for c in self.other_changes]) - self.assertEqual( - json.dumps(tests.simple_doc), - json.dumps(self.other_changes[0][2])) - self.assertEqual( - json.loads(tests.nested_doc), - json.loads(self.other_changes[1][2])) - if self.whitebox: - self.assertEqual( - self.db._last_exchange_log['return'], - {'last_gen': 2, 'docs': - [(doc.doc_id, doc.rev), (doc2.doc_id, doc2.rev)]}) - - @defer.inlineCallbacks - def test_sync_exchange_deleted(self): - doc = self.db.create_doc_from_json('{}') - edit_rev = 'replica:1|' + doc.rev - docs_by_gen = [ - (self.make_document(doc.doc_id, edit_rev, None), 10, 'T-sid')] - new_gen, trans_id = yield self.st.sync_exchange( - docs_by_gen, 'replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertGetDocIncludeDeleted( - self.db, doc.doc_id, edit_rev, None, False) - self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db) - last_trans_id = self.getLastTransId(self.db) - self.assertEqual(([], 2, last_trans_id), - (self.other_changes, new_gen, trans_id)) - sync_info = yield self.st.get_sync_info('replica') - self.assertEqual(10, sync_info[3]) - - @defer.inlineCallbacks - def test_sync_exchange_refuses_conflicts(self): - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - new_doc = '{"key": "altval"}' - docs_by_gen = [ - (self.make_document(doc.doc_id, 'replica:1', new_doc), 10, - 'T-sid')] - new_gen, _ = yield self.st.sync_exchange( - docs_by_gen, 'replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertTransactionLog([doc.doc_id], self.db) - self.assertEqual( - (doc.doc_id, doc.rev, tests.simple_doc, 1), - self.other_changes[0][:-1]) - self.assertEqual(1, new_gen) - if self.whitebox: - self.assertEqual(self.db._last_exchange_log['return'], - {'last_gen': 1, 'docs': [(doc.doc_id, doc.rev)]}) - - @defer.inlineCallbacks - def test_sync_exchange_ignores_convergence(self): - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - gen, txid = self.db._get_generation_info() - docs_by_gen = [ - (self.make_document( - doc.doc_id, doc.rev, tests.simple_doc), 10, 'T-sid')] - new_gen, _ = yield self.st.sync_exchange( - docs_by_gen, 'replica', last_known_generation=gen, - last_known_trans_id=txid, insert_doc_cb=self.receive_doc) - self.assertTransactionLog([doc.doc_id], self.db) - self.assertEqual(([], 1), (self.other_changes, new_gen)) - - @defer.inlineCallbacks - def test_sync_exchange_returns_new_docs(self): - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - new_gen, _ = yield self.st.sync_exchange( - [], 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertTransactionLog([doc.doc_id], self.db) - self.assertEqual( - (doc.doc_id, doc.rev, tests.simple_doc, 1), - self.other_changes[0][:-1]) - self.assertEqual(1, new_gen) - if self.whitebox: - self.assertEqual(self.db._last_exchange_log['return'], - {'last_gen': 1, 'docs': [(doc.doc_id, doc.rev)]}) - - @defer.inlineCallbacks - def test_sync_exchange_returns_deleted_docs(self): - doc = self.db.create_doc_from_json(tests.simple_doc) - self.db.delete_doc(doc) - self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db) - new_gen, _ = yield self.st.sync_exchange( - [], 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db) - self.assertEqual( - (doc.doc_id, doc.rev, None, 2), self.other_changes[0][:-1]) - self.assertEqual(2, new_gen) - if self.whitebox: - self.assertEqual(self.db._last_exchange_log['return'], - {'last_gen': 2, 'docs': [(doc.doc_id, doc.rev)]}) - - @defer.inlineCallbacks - def test_sync_exchange_getting_newer_docs(self): - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - new_doc = '{"key": "altval"}' - docs_by_gen = [ - (self.make_document(doc.doc_id, 'test:1|z:2', new_doc), 10, - 'T-sid')] - new_gen, _ = yield self.st.sync_exchange( - docs_by_gen, 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db) - self.assertEqual(([], 2), (self.other_changes, new_gen)) - - @defer.inlineCallbacks - def test_sync_exchange_with_concurrent_updates_of_synced_doc(self): - expected = [] - - def before_whatschanged_cb(state): - if state != 'before whats_changed': - return - cont = '{"key": "cuncurrent"}' - conc_rev = self.db.put_doc( - self.make_document(doc.doc_id, 'test:1|z:2', cont)) - expected.append((doc.doc_id, conc_rev, cont, 3)) - - self.set_trace_hook(before_whatschanged_cb) - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - new_doc = '{"key": "altval"}' - docs_by_gen = [ - (self.make_document(doc.doc_id, 'test:1|z:2', new_doc), 10, - 'T-sid')] - new_gen, _ = yield self.st.sync_exchange( - docs_by_gen, 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertEqual(expected, [c[:-1] for c in self.other_changes]) - self.assertEqual(3, new_gen) - - @defer.inlineCallbacks - def test_sync_exchange_with_concurrent_updates(self): - - def after_whatschanged_cb(state): - if state != 'after whats_changed': - return - self.db.create_doc_from_json('{"new": "doc"}') - - self.set_trace_hook(after_whatschanged_cb) - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - new_doc = '{"key": "altval"}' - docs_by_gen = [ - (self.make_document(doc.doc_id, 'test:1|z:2', new_doc), 10, - 'T-sid')] - new_gen, _ = yield self.st.sync_exchange( - docs_by_gen, 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertEqual(([], 2), (self.other_changes, new_gen)) - - @defer.inlineCallbacks - def test_sync_exchange_converged_handling(self): - doc = self.db.create_doc_from_json(tests.simple_doc) - docs_by_gen = [ - (self.make_document('new', 'other:1', '{}'), 4, 'T-foo'), - (self.make_document(doc.doc_id, doc.rev, doc.get_json()), 5, - 'T-bar')] - new_gen, _ = yield self.st.sync_exchange( - docs_by_gen, 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertEqual(([], 2), (self.other_changes, new_gen)) - - @defer.inlineCallbacks - def test_sync_exchange_detect_incomplete_exchange(self): - def before_get_docs_explode(state): - if state != 'before get_docs': - return - raise errors.U1DBError("fail") - self.set_trace_hook(before_get_docs_explode) - # suppress traceback printing in the wsgiref server - # self.patch(simple_server.ServerHandler, - # 'log_exception', lambda h, exc_info: None) - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - with self.assertRaises((errors.U1DBError, errors.BrokenSyncStream)): - yield self.st.sync_exchange( - [], 'other-replica', - last_known_generation=0, last_known_trans_id=None, - insert_doc_cb=self.receive_doc) - - @defer.inlineCallbacks - def test_sync_exchange_doc_ids(self): - sync_exchange_doc_ids = getattr(self.st, 'sync_exchange_doc_ids', None) - if sync_exchange_doc_ids is None: - self.skipTest("sync_exchange_doc_ids not implemented") - db2 = self.create_database('test2') - doc = db2.create_doc_from_json(tests.simple_doc) - new_gen, trans_id = sync_exchange_doc_ids( - db2, [(doc.doc_id, 10, 'T-sid')], 0, None, - insert_doc_cb=self.receive_doc) - self.assertGetDoc(self.db, doc.doc_id, doc.rev, - tests.simple_doc, False) - self.assertTransactionLog([doc.doc_id], self.db) - last_trans_id = self.getLastTransId(self.db) - self.assertEqual(([], 1, last_trans_id), - (self.other_changes, new_gen, trans_id)) - self.assertEqual(10, self.st.get_sync_info(db2._replica_uid)[3]) - - @defer.inlineCallbacks - def test__set_trace_hook(self): - called = [] - - def cb(state): - called.append(state) - - self.set_trace_hook(cb) - yield self.st.sync_exchange([], 'replica', 0, None, self.receive_doc) - yield self.st.record_sync_info('replica', 0, 'T-sid') - self.assertEqual(['before whats_changed', - 'after whats_changed', - 'before get_docs', - 'record_sync_info', - ], - called) - - @defer.inlineCallbacks - def test__set_trace_hook_shallow(self): - if (self.st._set_trace_hook_shallow == self.st._set_trace_hook or - self.st._set_trace_hook_shallow.im_func == - SoledadHTTPSyncTarget._set_trace_hook_shallow.im_func): - # shallow same as full - expected = ['before whats_changed', - 'after whats_changed', - 'before get_docs', - 'record_sync_info', - ] - else: - expected = ['sync_exchange', 'record_sync_info'] - - called = [] - - def cb(state): - called.append(state) - - self.set_trace_hook(cb, shallow=True) - self.st.sync_exchange([], 'replica', 0, None, self.receive_doc) - self.st.record_sync_info('replica', 0, 'T-sid') - self.assertEqual(expected, called) -- cgit v1.2.3 From 197ad8b7c6a5d180b798f4dfcbe7f6d5fdd18298 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sun, 13 Sep 2015 02:41:00 -0300 Subject: [tests] change big tearDown for simple addCleanup Big tearDown logic can be replaced by a simple addCleanup. Also remove unused imports and fix a small typo on a database cleanup check. --- .../soledad/common/tests/test_sqlcipher_sync.py | 25 +--------------------- common/src/leap/soledad/common/tests/util.py | 2 +- 2 files changed, 2 insertions(+), 25 deletions(-) diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py index 15148f3f..2e703023 100644 --- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py +++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py @@ -19,7 +19,6 @@ Test sqlcipher backend sync. """ -import json import os from u1db import sync @@ -28,15 +27,10 @@ from u1db import errors from uuid import uuid4 from testscenarios import TestWithScenarios -from urlparse import urljoin -from twisted.internet import defer - -from leap.soledad.common import couch from leap.soledad.common.crypto import ENC_SCHEME_KEY from leap.soledad.client.http_target import SoledadHTTPSyncTarget from leap.soledad.client.crypto import decrypt_doc_dict -from leap.soledad.client.sqlcipher import SQLCipherDatabase from leap.soledad.common.tests import u1db_tests as tests from leap.soledad.common.tests.test_sqlcipher import SQLCIPHER_SCENARIOS @@ -44,7 +38,6 @@ from leap.soledad.common.tests.util import make_soledad_app from leap.soledad.common.tests.test_sync_target import SoledadDatabaseSyncTargetTests from leap.soledad.common.tests.util import soledad_sync_target from leap.soledad.common.tests.util import BaseSoledadTest -from leap.soledad.common.tests.util import SoledadWithCouchServerMixin # ----------------------------------------------------------------------------- @@ -100,23 +93,6 @@ class SQLCipherDatabaseSyncTests( self._use_tracking = {} super(tests.DatabaseBaseTests, self).setUp() - def tearDown(self): - super(tests.DatabaseBaseTests, self).tearDown() - if hasattr(self, 'db1') and isinstance(self.db1, SQLCipherDatabase): - self.db1.close() - if hasattr(self, 'db1_copy') \ - and isinstance(self.db1_copy, SQLCipherDatabase): - self.db1_copy.close() - if hasattr(self, 'db2') \ - and isinstance(self.db2, SQLCipherDatabase): - self.db2.close() - if hasattr(self, 'db2_copy') \ - and isinstance(self.db2_copy, SQLCipherDatabase): - self.db2_copy.close() - if hasattr(self, 'db3') \ - and isinstance(self.db3, SQLCipherDatabase): - self.db3.close() - def create_database(self, replica_uid, sync_role=None): if replica_uid == 'test' and sync_role is None: # created up the chain by base class but unused @@ -124,6 +100,7 @@ class SQLCipherDatabaseSyncTests( db = self.create_database_for_role(replica_uid, sync_role) if sync_role: self._use_tracking[db] = (replica_uid, sync_role) + self.addCleanup(db.close) return db def create_database_for_role(self, replica_uid, sync_role): diff --git a/common/src/leap/soledad/common/tests/util.py b/common/src/leap/soledad/common/tests/util.py index 3187472f..617cccef 100644 --- a/common/src/leap/soledad/common/tests/util.py +++ b/common/src/leap/soledad/common/tests/util.py @@ -356,7 +356,7 @@ class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest): """ current_dbs = set([db for db in self.couch_server]) remaining_dbs = current_dbs - self.previous_dbs - if remaining_dbs and False: + if remaining_dbs: raise Exception("tests created %s and didn't clean up!", remaining_dbs) def delete_db(self, name): -- cgit v1.2.3 From 3f5003afee45c02bb9668c150506447cf0d2f52c Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sun, 13 Sep 2015 03:27:12 -0300 Subject: [tests] test_couch does not need a server Removing unused code as this test case does not need a server. --- common/src/leap/soledad/common/tests/test_couch.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index 28f90e5d..ae7933c3 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -43,7 +43,6 @@ from leap.soledad.common.tests.util import sync_via_synchronizer from leap.soledad.common.tests.u1db_tests import test_backends from leap.soledad.common.tests.u1db_tests import DatabaseBaseTests -from leap.soledad.common.tests.u1db_tests import TestCaseWithServer from u1db.backends.inmemory import InMemoryIndex @@ -208,7 +207,6 @@ nested_doc = tests.nested_doc class CouchDatabaseSyncTargetTests( TestWithScenarios, DatabaseBaseTests, - TestCaseWithServer, CouchDBTestCase): # TODO: implement _set_trace_hook(_shallow) in CouchSyncTarget so @@ -231,20 +229,11 @@ class CouchDatabaseSyncTargetTests( def setUp(self): CouchDBTestCase.setUp(self) - # from TestCaseWithServer.setUp - self.server = self.server_thread = self.port = None # other stuff self.db, self.st = self.create_db_and_target(self) self.other_changes = [] def tearDown(self): - # from TestCaseWithServer.tearDown - if self.server is not None: - self.server.shutdown() - self.server_thread.join() - self.server.server_close() - if self.port: - self.port.stopListening() self.db.close() CouchDBTestCase.tearDown(self) -- cgit v1.2.3 From cd4c4f868e1c10e44f825efc0e870edf9fe8e2c1 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 14 Sep 2015 11:21:24 -0300 Subject: [style] pep8 fixes --- common/src/leap/soledad/common/tests/test_couch.py | 6 ++++-- common/src/leap/soledad/common/tests/test_sqlcipher_sync.py | 6 ++++-- common/src/leap/soledad/common/tests/test_sync_target.py | 7 ++++--- common/src/leap/soledad/common/tests/util.py | 3 ++- 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index ae7933c3..845f1602 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -1285,7 +1285,8 @@ class CouchDatabaseSyncTests( self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2') self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3') self.assertRaises( - u1db_errors.InvalidTransactionId, self.sync, self.db1_copy, self.db2) + u1db_errors.InvalidTransactionId, self.sync, + self.db1_copy, self.db2) def test_sync_detects_rollback_and_divergence_in_target(self): self.db1 = self.create_database('test1', 'source') @@ -1299,7 +1300,8 @@ class CouchDatabaseSyncTests( self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2') self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3') self.assertRaises( - u1db_errors.InvalidTransactionId, self.sync, self.db1, self.db2_copy) + u1db_errors.InvalidTransactionId, self.sync, + self.db1, self.db2_copy) def test_optional_sync_preserve_json(self): self.db1 = self.create_database('test1', 'source') diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py index 2e703023..439fc070 100644 --- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py +++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py @@ -35,7 +35,8 @@ from leap.soledad.client.crypto import decrypt_doc_dict from leap.soledad.common.tests import u1db_tests as tests from leap.soledad.common.tests.test_sqlcipher import SQLCIPHER_SCENARIOS from leap.soledad.common.tests.util import make_soledad_app -from leap.soledad.common.tests.test_sync_target import SoledadDatabaseSyncTargetTests +from leap.soledad.common.tests.test_sync_target import \ + SoledadDatabaseSyncTargetTests from leap.soledad.common.tests.util import soledad_sync_target from leap.soledad.common.tests.util import BaseSoledadTest @@ -713,7 +714,8 @@ def make_local_db_and_soledad_target( test, path='test', source_replica_uid=uuid4().hex): test.startTwistedServer() - db = test.request_state._create_database(replica_uid=os.path.basename(path)) + replica_uid = os.path.basename(path) + db = test.request_state._create_database(replica_uid) sync_db = test._soledad._sync_db sync_enc_pool = test._soledad._sync_enc_pool st = soledad_sync_target( diff --git a/common/src/leap/soledad/common/tests/test_sync_target.py b/common/src/leap/soledad/common/tests/test_sync_target.py index ba556ea4..c0987e90 100644 --- a/common/src/leap/soledad/common/tests/test_sync_target.py +++ b/common/src/leap/soledad/common/tests/test_sync_target.py @@ -150,7 +150,8 @@ def make_local_db_and_soledad_target( test, path='test', source_replica_uid=uuid4().hex): test.startTwistedServer() - db = test.request_state._create_database(replica_uid=os.path.basename(path)) + replica_uid = os.path.basename(path) + db = test.request_state._create_database(replica_uid) sync_db = test._soledad._sync_db sync_enc_pool = test._soledad._sync_enc_pool st = soledad_sync_target( @@ -360,8 +361,8 @@ class TestSoledadSyncTarget( remote_target = self.getSyncTarget( source_replica_uid='other-id') yield remote_target.record_sync_info('other-id', 2, 'T-transid') - self.assertEqual( - (2, 'T-transid'), self.db2._get_replica_gen_and_trans_id('other-id')) + self.assertEqual((2, 'T-transid'), + self.db2._get_replica_gen_and_trans_id('other-id')) @defer.inlineCallbacks def test_sync_exchange_receive(self): diff --git a/common/src/leap/soledad/common/tests/util.py b/common/src/leap/soledad/common/tests/util.py index 617cccef..41307eb7 100644 --- a/common/src/leap/soledad/common/tests/util.py +++ b/common/src/leap/soledad/common/tests/util.py @@ -357,7 +357,8 @@ class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest): current_dbs = set([db for db in self.couch_server]) remaining_dbs = current_dbs - self.previous_dbs if remaining_dbs: - raise Exception("tests created %s and didn't clean up!", remaining_dbs) + raise Exception("tests created %s and didn't clean up!", + remaining_dbs) def delete_db(self, name): try: -- cgit v1.2.3 From 9a68d9a1db0e3d2ddbea9c194d4ad0d006bf94e3 Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Mon, 14 Sep 2015 23:04:18 -0400 Subject: [feat] use async events client in this way we use the reactor pattern to dispatch the events, instead of having the overhead of running a separate client thread. - Resolves: #7274 --- client/src/leap/soledad/client/api.py | 2 +- client/src/leap/soledad/client/events.py | 4 ++-- client/src/leap/soledad/client/http_target/fetch.py | 4 ++-- client/src/leap/soledad/client/http_target/send.py | 4 ++-- client/src/leap/soledad/client/secrets.py | 12 ++++++------ 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/client/src/leap/soledad/client/api.py b/client/src/leap/soledad/client/api.py index a6a98551..a558addd 100644 --- a/client/src/leap/soledad/client/api.py +++ b/client/src/leap/soledad/client/api.py @@ -718,7 +718,7 @@ class Soledad(object): return failure def _emit_done_data_sync(passthrough): - soledad_events.emit( + soledad_events.emit_async( soledad_events.SOLEDAD_DONE_DATA_SYNC, self.uuid) return passthrough diff --git a/client/src/leap/soledad/client/events.py b/client/src/leap/soledad/client/events.py index b1379521..058be59c 100644 --- a/client/src/leap/soledad/client/events.py +++ b/client/src/leap/soledad/client/events.py @@ -20,7 +20,7 @@ Signaling functions. """ -from leap.common.events import emit +from leap.common.events import emit_async from leap.common.events import catalog @@ -40,7 +40,7 @@ SOLEDAD_SYNC_RECEIVE_STATUS = catalog.SOLEDAD_SYNC_RECEIVE_STATUS __all__ = [ "catalog", - "emit", + "emit_async", "SOLEDAD_CREATING_KEYS", "SOLEDAD_DONE_CREATING_KEYS", "SOLEDAD_DOWNLOADING_KEYS", diff --git a/client/src/leap/soledad/client/http_target/fetch.py b/client/src/leap/soledad/client/http_target/fetch.py index 34d7cb0b..57578563 100644 --- a/client/src/leap/soledad/client/http_target/fetch.py +++ b/client/src/leap/soledad/client/http_target/fetch.py @@ -21,7 +21,7 @@ from u1db.remote import utils from twisted.internet import defer from leap.soledad.common.document import SoledadDocument from leap.soledad.client.events import SOLEDAD_SYNC_RECEIVE_STATUS -from leap.soledad.client.events import emit +from leap.soledad.client.events import emit_async from leap.soledad.client.crypto import is_symmetrically_encrypted from leap.soledad.client.encdecpool import SyncDecrypterPool from leap.soledad.client.http_target.support import RequestBody @@ -245,7 +245,7 @@ class HTTPDocFetcher(object): def _emit_receive_status(received_docs, total): content = {'received': received_docs, 'total': total} - emit(SOLEDAD_SYNC_RECEIVE_STATUS, content) + emit_async(SOLEDAD_SYNC_RECEIVE_STATUS, content) msg = "%d/%d" % (received_docs, total) logger.debug("Sync receive status: %s" % msg) diff --git a/client/src/leap/soledad/client/http_target/send.py b/client/src/leap/soledad/client/http_target/send.py index 71157da5..80483f0d 100644 --- a/client/src/leap/soledad/client/http_target/send.py +++ b/client/src/leap/soledad/client/http_target/send.py @@ -17,7 +17,7 @@ import json import logging from twisted.internet import defer -from leap.soledad.client.events import emit +from leap.soledad.client.events import emit_async from leap.soledad.client.events import SOLEDAD_SYNC_SEND_STATUS from leap.soledad.client.http_target.support import RequestBody logger = logging.getLogger(__name__) @@ -96,7 +96,7 @@ class HTTPDocSender(object): def _emit_send_status(idx, total): content = {'sent': idx, 'total': total} - emit(SOLEDAD_SYNC_SEND_STATUS, content) + emit_async(SOLEDAD_SYNC_SEND_STATUS, content) msg = "%d/%d" % (idx, total) logger.debug("Sync send status: %s" % msg) diff --git a/client/src/leap/soledad/client/secrets.py b/client/src/leap/soledad/client/secrets.py index ee3aacdb..9aadd72a 100644 --- a/client/src/leap/soledad/client/secrets.py +++ b/client/src/leap/soledad/client/secrets.py @@ -432,13 +432,13 @@ class SoledadSecrets(object): :return: a document with encrypted key material in its contents :rtype: document.SoledadDocument """ - events.emit(events.SOLEDAD_DOWNLOADING_KEYS, self._uuid) + events.emit_async(events.SOLEDAD_DOWNLOADING_KEYS, self._uuid) db = self._shared_db if not db: logger.warning('No shared db found') return doc = db.get_doc(self._shared_db_doc_id()) - events.emit(events.SOLEDAD_DONE_DOWNLOADING_KEYS, self._uuid) + events.emit_async(events.SOLEDAD_DONE_DOWNLOADING_KEYS, self._uuid) return doc def _put_secrets_in_shared_db(self): @@ -461,13 +461,13 @@ class SoledadSecrets(object): # fill doc with encrypted secrets doc.content = self._export_recovery_document() # upload secrets to server - events.emit(events.SOLEDAD_UPLOADING_KEYS, self._uuid) + events.emit_async(events.SOLEDAD_UPLOADING_KEYS, self._uuid) db = self._shared_db if not db: logger.warning('No shared db found') return db.put_doc(doc) - events.emit(events.SOLEDAD_DONE_UPLOADING_KEYS, self._uuid) + events.emit_async(events.SOLEDAD_DONE_UPLOADING_KEYS, self._uuid) # # Management of secret for symmetric encryption. @@ -587,13 +587,13 @@ class SoledadSecrets(object): :return: The id of the generated secret. :rtype: str """ - events.emit(events.SOLEDAD_CREATING_KEYS, self._uuid) + events.emit_async(events.SOLEDAD_CREATING_KEYS, self._uuid) # generate random secret secret = os.urandom(self.GEN_SECRET_LENGTH) secret_id = sha256(secret).hexdigest() self._secrets[secret_id] = secret self._store_secrets() - events.emit(events.SOLEDAD_DONE_CREATING_KEYS, self._uuid) + events.emit_async(events.SOLEDAD_DONE_CREATING_KEYS, self._uuid) return secret_id def _store_secrets(self): -- cgit v1.2.3 From e5e572bee81bec4bc60f4a538269f9cf5061965a Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Mon, 14 Sep 2015 23:32:13 -0400 Subject: [pkg] bump leap versions needed for client --- client/pkg/requirements-leap.pip | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/pkg/requirements-leap.pip b/client/pkg/requirements-leap.pip index c5fbcd5f..52d1263b 100644 --- a/client/pkg/requirements-leap.pip +++ b/client/pkg/requirements-leap.pip @@ -1,2 +1,2 @@ -leap.common>=0.4.1 -leap.soledad.common>=0.6.5 +leap.common>=0.4.3 +leap.soledad.common>=0.7.0 -- cgit v1.2.3 From 98f99d1106a8941b701acda78095c3e4d1cd5f9e Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 14 Sep 2015 15:33:01 -0300 Subject: [bug] review some of the close methods We are getting "too many files open" while running tests with 1024 max files open. This is a leak from close methods. Some of them should be fixed on this commit, but further investigation may be necessary. --- client/src/leap/soledad/client/encdecpool.py | 2 ++ client/src/leap/soledad/client/http_target/api.py | 7 ++++++- client/src/leap/soledad/client/sqlcipher.py | 1 + 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/client/src/leap/soledad/client/encdecpool.py b/client/src/leap/soledad/client/encdecpool.py index 2ad98767..6d3c11b9 100644 --- a/client/src/leap/soledad/client/encdecpool.py +++ b/client/src/leap/soledad/client/encdecpool.py @@ -74,6 +74,8 @@ class SyncEncryptDecryptPool(object): self._started = True def stop(self): + if not self._started: + return self._started = False self._destroy_pool() # maybe cancel the next delayed call diff --git a/client/src/leap/soledad/client/http_target/api.py b/client/src/leap/soledad/client/http_target/api.py index dc13e9cc..dcc762f6 100644 --- a/client/src/leap/soledad/client/http_target/api.py +++ b/client/src/leap/soledad/client/http_target/api.py @@ -32,8 +32,13 @@ class SyncTargetAPI(SyncTarget): Declares public methods and implements u1db.SyncTarget. """ + @defer.inlineCallbacks def close(self): - self._http.close() + if self._sync_enc_pool: + self._sync_enc_pool.stop() + if self._sync_decr_pool: + self._sync_decr_pool.stop() + yield self._http.close() def set_creds(self, creds): """ diff --git a/client/src/leap/soledad/client/sqlcipher.py b/client/src/leap/soledad/client/sqlcipher.py index 2151884a..22ddc87d 100644 --- a/client/src/leap/soledad/client/sqlcipher.py +++ b/client/src/leap/soledad/client/sqlcipher.py @@ -559,6 +559,7 @@ class SQLCipherU1DBSync(SQLCipherDatabase): """ Close the syncer and syncdb orderly """ + super(SQLCipherU1DBSync, self).close() # close all open syncers for url in self._syncers.keys(): _, syncer = self._syncers[url] -- cgit v1.2.3 From 223f26eb2df3378ce275da2774a7ae923519afa1 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 14 Sep 2015 17:35:35 -0300 Subject: [tests] remove remaining dbs check This was used during db isolation to make sure that everything created was destroyed, but it fails with -j (multiprocess). Removing it allows parallelism. --- common/src/leap/soledad/common/tests/util.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/common/src/leap/soledad/common/tests/util.py b/common/src/leap/soledad/common/tests/util.py index 41307eb7..1c7adb91 100644 --- a/common/src/leap/soledad/common/tests/util.py +++ b/common/src/leap/soledad/common/tests/util.py @@ -345,20 +345,9 @@ class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest): """ Make sure we have a CouchDB instance for a test. """ - server = self.couch_server = couchdb.Server() - self.previous_dbs = set([db for db in server]) self.couch_port = 5984 self.couch_url = 'http://localhost:%d' % self.couch_port - - def tearDown(self): - """ - Stop CouchDB instance for test. - """ - current_dbs = set([db for db in self.couch_server]) - remaining_dbs = current_dbs - self.previous_dbs - if remaining_dbs: - raise Exception("tests created %s and didn't clean up!", - remaining_dbs) + self.couch_server = couchdb.Server(self.couch_url) def delete_db(self, name): try: -- cgit v1.2.3 From 8acc8c9058ff5983a78d6df4f66d19de1762cf4d Mon Sep 17 00:00:00 2001 From: Ruben Pollan Date: Wed, 16 Sep 2015 12:38:52 +0200 Subject: [test] fix tests with the new emit_async --- .../src/leap/soledad/common/tests/test_soledad.py | 60 +++++++++++----------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/common/src/leap/soledad/common/tests/test_soledad.py b/common/src/leap/soledad/common/tests/test_soledad.py index bd356858..85d6734e 100644 --- a/common/src/leap/soledad/common/tests/test_soledad.py +++ b/common/src/leap/soledad/common/tests/test_soledad.py @@ -223,7 +223,7 @@ class SoledadSignalingTestCase(BaseSoledadTest): def setUp(self): # mock signaling soledad.client.signal = Mock() - soledad.client.secrets.events.emit = Mock() + soledad.client.secrets.events.emit_async = Mock() # run parent's setUp BaseSoledadTest.setUp(self) @@ -245,57 +245,57 @@ class SoledadSignalingTestCase(BaseSoledadTest): - downloading keys / done downloading keys. - uploading keys / done uploading keys. """ - soledad.client.secrets.events.emit.reset_mock() + soledad.client.secrets.events.emit_async.reset_mock() # get a fresh instance so it emits all bootstrap signals sol = self._soledad_instance( secrets_path='alternative_stage3.json', local_db_path='alternative_stage3.u1db') # reverse call order so we can verify in the order the signals were # expected - soledad.client.secrets.events.emit.mock_calls.reverse() - soledad.client.secrets.events.emit.call_args = \ - soledad.client.secrets.events.emit.call_args_list[0] - soledad.client.secrets.events.emit.call_args_list.reverse() + soledad.client.secrets.events.emit_async.mock_calls.reverse() + soledad.client.secrets.events.emit_async.call_args = \ + soledad.client.secrets.events.emit_async.call_args_list[0] + soledad.client.secrets.events.emit_async.call_args_list.reverse() # downloading keys signals - soledad.client.secrets.events.emit.assert_called_with( + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DOWNLOADING_KEYS, ADDRESS, ) - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DONE_DOWNLOADING_KEYS, ADDRESS, ) # creating keys signals - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_CREATING_KEYS, ADDRESS, ) - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DONE_CREATING_KEYS, ADDRESS, ) # downloading once more (inside _put_keys_in_shared_db) - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DOWNLOADING_KEYS, ADDRESS, ) - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DONE_DOWNLOADING_KEYS, ADDRESS, ) # uploading keys signals - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_UPLOADING_KEYS, ADDRESS, ) - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DONE_UPLOADING_KEYS, ADDRESS, ) @@ -316,7 +316,7 @@ class SoledadSignalingTestCase(BaseSoledadTest): doc.content = sol.secrets._export_recovery_document() sol.close() # reset mock - soledad.client.secrets.events.emit.reset_mock() + soledad.client.secrets.events.emit_async.reset_mock() # get a fresh instance so it emits all bootstrap signals shared_db = self.get_default_shared_mock(get_doc_return_value=doc) sol = self._soledad_instance( @@ -325,17 +325,17 @@ class SoledadSignalingTestCase(BaseSoledadTest): shared_db_class=shared_db) # reverse call order so we can verify in the order the signals were # expected - soledad.client.secrets.events.emit.mock_calls.reverse() - soledad.client.secrets.events.emit.call_args = \ - soledad.client.secrets.events.emit.call_args_list[0] - soledad.client.secrets.events.emit.call_args_list.reverse() + soledad.client.secrets.events.emit_async.mock_calls.reverse() + soledad.client.secrets.events.emit_async.call_args = \ + soledad.client.secrets.events.emit_async.call_args_list[0] + soledad.client.secrets.events.emit_async.call_args_list.reverse() # assert download keys signals - soledad.client.secrets.events.emit.assert_called_with( + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DOWNLOADING_KEYS, ADDRESS, ) - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DONE_DOWNLOADING_KEYS, ADDRESS, ) @@ -369,7 +369,7 @@ class SoledadSignalingTestCase(BaseSoledadTest): yield sol.sync() # assert the signal has been emitted - soledad.client.events.emit.assert_called_with( + soledad.client.events.emit_async.assert_called_with( catalog.SOLEDAD_DONE_DATA_SYNC, ADDRESS, ) -- cgit v1.2.3 From 703fa8efdfa6a201921bae97edc653e60b977b85 Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Wed, 16 Sep 2015 16:52:13 -0400 Subject: [pkg] fix typo in changelog --- CHANGELOG | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG b/CHANGELOG index a3a824cc..877a97d1 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -6,7 +6,7 @@ Client: o Improve how we send information on SOLEDAD_SYNC_SEND_STATUS and in SOLEDAD_SYNC_RECEIVE_STATUS. Related to Feature #7353. o Fix hanging sync by properly waiting db initialization on sync decrypter - pool. Closes #7686. + pool. Closes #7386. o Avoid double decryption of documents. o Fix the order of the events emited for incoming documents. o bugfix: move sync db and encpool creation to api. -- cgit v1.2.3 From d38d0aa5836080cfaad90df430ab4a4d14822856 Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Thu, 17 Sep 2015 17:42:47 -0400 Subject: [refactor] decrease verbosity of sync logs --- client/src/leap/soledad/client/http_target/fetch.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/client/src/leap/soledad/client/http_target/fetch.py b/client/src/leap/soledad/client/http_target/fetch.py index 57578563..65e576d9 100644 --- a/client/src/leap/soledad/client/http_target/fetch.py +++ b/client/src/leap/soledad/client/http_target/fetch.py @@ -247,5 +247,6 @@ def _emit_receive_status(received_docs, total): content = {'received': received_docs, 'total': total} emit_async(SOLEDAD_SYNC_RECEIVE_STATUS, content) - msg = "%d/%d" % (received_docs, total) - logger.debug("Sync receive status: %s" % msg) + if received_docs % 20 == 0: + msg = "%d/%d" % (received_docs, total) + logger.debug("Sync receive status: %s" % msg) -- cgit v1.2.3 From ca7f6737e03143ae80add83d41860246f5459eae Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Thu, 17 Sep 2015 19:30:15 -0300 Subject: [bug] ensure needs to check into all design docs This code only checks for 'docs' presence, while we have 3 design documents. If one of them is missing, but 'docs' is not, then it will not ensure the others. This is needed to properly ensure ddocs on create command line script. --- common/src/leap/soledad/common/couch.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 82b5aca8..1c762036 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -485,13 +485,10 @@ class CouchDatabase(CommonBackend): Ensure that the design documents used by the backend exist on the couch database. """ - # we check for existence of one of the files, and put all of them if - # that one does not exist - try: - self._database['_design/docs'] - return - except ResourceNotFound: - for ddoc_name in ['docs', 'syncs', 'transactions']: + for ddoc_name in ['docs', 'syncs', 'transactions']: + try: + self._database.info(ddoc_name) + except ResourceNotFound: ddoc = json.loads( binascii.a2b_base64( getattr(ddocs, ddoc_name))) -- cgit v1.2.3 From 5b0e854096f35655037f4da07031741087155a7d Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Thu, 17 Sep 2015 19:40:49 -0300 Subject: [tests] test for ensure ddocs independently This tests the previous fix on ensuring a db that is missing a doc other than 'docs'. --- common/src/leap/soledad/common/tests/test_couch.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index 845f1602..a08ffd16 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -1320,8 +1320,9 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): def setUp(self): CouchDBTestCase.setUp(self) - def create_db(self, ensure=True): - dbname = ('test-%s' % uuid4().hex) + def create_db(self, ensure=True, dbname=None): + if not dbname: + dbname = ('test-%s' % uuid4().hex) self.db = couch.CouchDatabase.open_database( urljoin('http://127.0.0.1:%d' % self.couch_port, dbname), create=True, @@ -1492,3 +1493,16 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): self.assertRaises( errors.MissingDesignDocDeletedError, self.db._do_set_replica_gen_and_trans_id, 1, 2, 3) + + def test_ensure_ddoc_independently(self): + """ + Test that a missing ddocs other than _design/docs will be ensured + even if _design/docs is there. + """ + self.create_db(ensure=True) + del self.db._database['_design/transactions'] + self.assertRaises( + errors.MissingDesignDocDeletedError, + self.db._get_transaction_log) + self.create_db(ensure=True, dbname=self.db._dbname) + self.db._get_transaction_log() -- cgit v1.2.3 From 733893d2fe39c2573c896d0e05cd29f9983cdbce Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Fri, 18 Sep 2015 00:59:43 -0400 Subject: [bug] set the received active secret before saving local file - bug: we were dumping the received secrets locally to disk *before* setting the received property for the active secret, and therefore the 'active_secret' was always marked as null. - refactor common code into an utility method. --- client/changes/bug_set-active-secret | 1 + client/src/leap/soledad/client/secrets.py | 23 ++++++++++++----------- 2 files changed, 13 insertions(+), 11 deletions(-) create mode 100644 client/changes/bug_set-active-secret diff --git a/client/changes/bug_set-active-secret b/client/changes/bug_set-active-secret new file mode 100644 index 00000000..8c234b25 --- /dev/null +++ b/client/changes/bug_set-active-secret @@ -0,0 +1 @@ +o [bug] Set active secret before saving local file. diff --git a/client/src/leap/soledad/client/secrets.py b/client/src/leap/soledad/client/secrets.py index 9aadd72a..c3c3dff5 100644 --- a/client/src/leap/soledad/client/secrets.py +++ b/client/src/leap/soledad/client/secrets.py @@ -261,6 +261,16 @@ class SoledadSecrets(object): logger.info("Could not find a secret in local storage.") return False + def _maybe_set_active_secret(self, active_secret): + """ + If no secret_id is already set, choose the passed active secret, or + just choose first secret available if none. + """ + if not self._secret_id: + if not active_secret: + active_secret = self._secrets.items()[0][0] + self.set_secret_id(active_secret) + def _load_secrets(self): """ Load storage secrets from local file. @@ -270,12 +280,7 @@ class SoledadSecrets(object): with open(self._secrets_path, 'r') as f: content = json.loads(f.read()) _, active_secret = self._import_recovery_document(content) - # choose first secret if no secret_id was given - if self._secret_id is None: - if active_secret is None: - self.set_secret_id(self._secrets.items()[0][0]) - else: - self.set_secret_id(active_secret) + self._maybe_set_active_secret(active_secret) # enlarge secret if needed enlarged = False if len(self._secrets[self._secret_id]) < self.GEN_SECRET_LENGTH: @@ -306,12 +311,8 @@ class SoledadSecrets(object): 'Found cryptographic secrets in shared recovery ' 'database.') _, active_secret = self._import_recovery_document(doc.content) + self._maybe_set_active_secret(active_secret) self._store_secrets() # save new secrets in local file - if self._secret_id is None: - if active_secret is None: - self.set_secret_id(self._secrets.items()[0][0]) - else: - self.set_secret_id(active_secret) else: # STAGE 3 - there are no secrets in server also, so # generate a secret and store it in remote db. -- cgit v1.2.3 From 4be6f05d91891122e83f74d21c83c5f8fcd3a618 Mon Sep 17 00:00:00 2001 From: Ivan Alejandro Date: Mon, 21 Sep 2015 19:06:51 -0300 Subject: [pkg] fold in changes --- CHANGELOG | 16 ++++++++++++++++ client/changes/bug-7412_sync-status-broken | 1 - client/changes/bug_set-active-secret | 1 - client/changes/refactor_improve_http_target | 3 --- common/changes/refactor_couch | 2 -- server/changes/bug_badrequest | 1 - 6 files changed, 16 insertions(+), 8 deletions(-) delete mode 100644 client/changes/bug-7412_sync-status-broken delete mode 100644 client/changes/bug_set-active-secret delete mode 100644 client/changes/refactor_improve_http_target delete mode 100644 common/changes/refactor_couch delete mode 100644 server/changes/bug_badrequest diff --git a/CHANGELOG b/CHANGELOG index 877a97d1..f35f7830 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,19 @@ +0.7.3 Sep 22, 2015: +Client: + o Bugfix: refactor code loss. Closes #7412. + o Bugfix: Set active secret before saving local file. + o Split http_target into 4 modules, separating those responsibilities. + o Refactor details of making an HTTP request body and headers out of the + send/fetch logic. This also makes it easier to enable batching. + +Server: + o Fix a bug where BadRequest could be raised after everything was persisted. + +Common: + o Refactor couch.py to separate persistence from logic while saving uploaded + documents. Also simplify logic while checking for conflicts. + + 0.7.2 Aug 26, 2015: Client: o Remove MAC from secrets file. Closes #6980. diff --git a/client/changes/bug-7412_sync-status-broken b/client/changes/bug-7412_sync-status-broken deleted file mode 100644 index b6800bc5..00000000 --- a/client/changes/bug-7412_sync-status-broken +++ /dev/null @@ -1 +0,0 @@ -o Fix refactor code loss. Closes #7412. diff --git a/client/changes/bug_set-active-secret b/client/changes/bug_set-active-secret deleted file mode 100644 index 8c234b25..00000000 --- a/client/changes/bug_set-active-secret +++ /dev/null @@ -1 +0,0 @@ -o [bug] Set active secret before saving local file. diff --git a/client/changes/refactor_improve_http_target b/client/changes/refactor_improve_http_target deleted file mode 100644 index a8fe5f60..00000000 --- a/client/changes/refactor_improve_http_target +++ /dev/null @@ -1,3 +0,0 @@ - o Split http_target into 4 modules, separating those responsibilities. - o Refactor details of making an HTTP request body and headers out of the - send/fetch logic. This also makes it easier to enable batching. diff --git a/common/changes/refactor_couch b/common/changes/refactor_couch deleted file mode 100644 index 2f36b97f..00000000 --- a/common/changes/refactor_couch +++ /dev/null @@ -1,2 +0,0 @@ - o Refactor couch.py to separate persistence from logic while saving uploaded - documents. Also simplify logic while checking for conflicts. diff --git a/server/changes/bug_badrequest b/server/changes/bug_badrequest deleted file mode 100644 index 74901476..00000000 --- a/server/changes/bug_badrequest +++ /dev/null @@ -1 +0,0 @@ - o Fix a bug where BadRequest could be raised after everything was persisted -- cgit v1.2.3