From fd6c054bf11deba4ca5680cc406db7d4ce98d58d Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 14 Aug 2015 22:08:27 -0300 Subject: [refactor] split put_doc_if_newer in two operations Those two operations were mixed on put_doc_if_newer, extracting should make it more clear. --- common/src/leap/soledad/common/couch.py | 37 ++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 10 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 6c28e0be..52fc2169 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -1140,9 +1140,11 @@ class CouchDatabase(CommonBackend): :param sync_id: The id of the current sync session. :type sync_id: str """ - self._do_set_replica_gen_and_trans_id( - other_replica_uid, other_generation, other_transaction_id, - number_of_docs=number_of_docs, doc_idx=doc_idx, sync_id=sync_id) + if other_replica_uid is not None and other_generation is not None: + self._do_set_replica_gen_and_trans_id( + other_replica_uid, other_generation, other_transaction_id, + number_of_docs=number_of_docs, doc_idx=doc_idx, + sync_id=sync_id) def _do_set_replica_gen_and_trans_id( self, other_replica_uid, other_generation, other_transaction_id, @@ -1392,6 +1394,27 @@ class CouchDatabase(CommonBackend): 'converged', at_gen is the insertion/current generation. :rtype: (str, int) """ + self._save_source_info(replica_uid, replica_gen, + replica_trans_id, number_of_docs, + doc_idx, sync_id) + state = self._process_incoming_doc(doc, save_conflict) + return state, self._get_generation() + + def _save_source_info(self, replica_uid, replica_gen, replica_trans_id, + number_of_docs, doc_idx, sync_id): + """ + Validate and save source information. + """ + self._validate_source(replica_uid, replica_gen, replica_trans_id) + self._set_replica_gen_and_trans_id( + replica_uid, replica_gen, replica_trans_id, + number_of_docs=number_of_docs, doc_idx=doc_idx, + sync_id=sync_id) + + def _process_incoming_doc(self, doc, save_conflict): + """ + Check document, save and return state. + """ cur_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) # at this point, `doc` has arrived from the other syncing party, and # we will decide what to do with it. @@ -1408,7 +1431,6 @@ class CouchDatabase(CommonBackend): cur_vcr = vectorclock.VectorClockRev(None) else: cur_vcr = vectorclock.VectorClockRev(cur_doc.rev) - self._validate_source(replica_uid, replica_gen, replica_trans_id) if doc_vcr.is_newer(cur_vcr): rev = doc.rev self._prune_conflicts(doc, doc_vcr) @@ -1437,11 +1459,6 @@ class CouchDatabase(CommonBackend): state = 'conflicted' if save_conflict: self._force_doc_sync_conflict(doc) - if replica_uid is not None and replica_gen is not None: - self._set_replica_gen_and_trans_id( - replica_uid, replica_gen, replica_trans_id, - number_of_docs=number_of_docs, doc_idx=doc_idx, - sync_id=sync_id) # update info old_doc.rev = doc.rev if doc.is_tombstone(): @@ -1449,7 +1466,7 @@ class CouchDatabase(CommonBackend): else: old_doc.content = doc.content old_doc.has_conflicts = doc.has_conflicts - return state, self._get_generation() + return state def get_docs(self, doc_ids, check_for_conflicts=True, include_deleted=False): -- cgit v1.2.3 From a734b994446a8c0c3cadf175a71f0d61d18c408b Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 14 Aug 2015 23:20:59 -0300 Subject: [refactor] simplify the case of a brand new doc If a doc is not present on database at all, it will simply get inserted. This commit makes this clear and skips unnecessary checks. --- common/src/leap/soledad/common/couch.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 52fc2169..d61eac51 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -1421,16 +1421,16 @@ class CouchDatabase(CommonBackend): # First, we prepare the arriving doc to update couch database. old_doc = doc doc = self._factory(doc.doc_id, doc.rev, doc.get_json()) - if cur_doc is not None: + if cur_doc is None: + self._put_doc(cur_doc, doc) + return 'inserted' + else: doc.couch_rev = cur_doc.couch_rev # fetch conflicts because we will eventually manipulate them doc._ensure_fetch_conflicts(self._get_conflicts) # from now on, it works just like u1db sqlite backend doc_vcr = vectorclock.VectorClockRev(doc.rev) - if cur_doc is None: - cur_vcr = vectorclock.VectorClockRev(None) - else: - cur_vcr = vectorclock.VectorClockRev(cur_doc.rev) + cur_vcr = vectorclock.VectorClockRev(cur_doc.rev) if doc_vcr.is_newer(cur_vcr): rev = doc.rev self._prune_conflicts(doc, doc_vcr) -- cgit v1.2.3 From 25cca13779f69bdd665b37e6ce9296540be823e3 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 17 Aug 2015 14:25:35 -0300 Subject: [refactor] removing getters and setters from couch.py This is not needed, the behavior under them is the same as an assignment. --- common/src/leap/soledad/common/couch.py | 43 +++------------------------------ 1 file changed, 4 insertions(+), 39 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index d61eac51..96e00fa2 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -104,9 +104,9 @@ class CouchDocument(SoledadDocument): :type syncable: bool """ SoledadDocument.__init__(self, doc_id, rev, json, has_conflicts) - self._couch_rev = None + self.couch_rev = None self._conflicts = None - self._transactions = None + self.transactions = None def _ensure_fetch_conflicts(self, get_conflicts_fun): """ @@ -167,22 +167,6 @@ class CouchDocument(SoledadDocument): self._conflicts) self.has_conflicts = len(self._conflicts) > 0 - def _get_couch_rev(self): - return self._couch_rev - - def _set_couch_rev(self, rev): - self._couch_rev = rev - - couch_rev = property(_get_couch_rev, _set_couch_rev) - - def _get_transactions(self): - return self._transactions - - def _set_transactions(self, rev): - self._transactions = rev - - transactions = property(_get_transactions, _set_transactions) - # monkey-patch the u1db http app to use CouchDocument http_app.Document = CouchDocument @@ -1563,7 +1547,7 @@ class CouchServerState(ServerState): :param couch_url: The URL for the couch database. :type couch_url: str """ - self._couch_url = couch_url + self.couch_url = couch_url def open_database(self, dbname): """ @@ -1576,7 +1560,7 @@ class CouchServerState(ServerState): :rtype: CouchDatabase """ return CouchDatabase( - self._couch_url, + self.couch_url, dbname, ensure_ddocs=False) @@ -1610,22 +1594,3 @@ class CouchServerState(ServerState): delete databases. """ raise Unauthorized() - - def _set_couch_url(self, url): - """ - Set the couchdb URL - - :param url: CouchDB URL - :type url: str - """ - self._couch_url = url - - def _get_couch_url(self): - """ - Return CouchDB URL - - :rtype: str - """ - return self._couch_url - - couch_url = property(_get_couch_url, _set_couch_url, doc='CouchDB URL') -- cgit v1.2.3 From b767750d5aff2c9175a0bcc247c2b9c0d8ca08be Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 17 Aug 2015 15:07:21 -0300 Subject: [refactor] remove unused parameter --- common/src/leap/soledad/common/couch.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 96e00fa2..c003b14e 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -87,8 +87,7 @@ class CouchDocument(SoledadDocument): atomic and consistent update of the database. """ - def __init__(self, doc_id=None, rev=None, json='{}', has_conflicts=False, - syncable=True): + def __init__(self, doc_id=None, rev=None, json='{}', has_conflicts=False): """ Container for handling a document that is stored in couch backend. @@ -100,8 +99,6 @@ class CouchDocument(SoledadDocument): :type json: str :param has_conflicts: Boolean indicating if this document has conflicts :type has_conflicts: bool - :param syncable: Should this document be synced with remote replicas? - :type syncable: bool """ SoledadDocument.__init__(self, doc_id, rev, json, has_conflicts) self.couch_rev = None -- cgit v1.2.3 From 673835b1dc53dc0e1d0363afdc57c7c6917f8628 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 17 Aug 2015 17:35:42 -0300 Subject: [refactor] simplify conflicts management Adding, removing and checking conflicts is an operation done by the model, the Database shouldn't be aware of that. Fetching and saving also is not model's responsability. Repetition remove as well. --- common/src/leap/soledad/common/couch.py | 98 ++++++--------------------------- 1 file changed, 16 insertions(+), 82 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index c003b14e..126c6b51 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -102,23 +102,8 @@ class CouchDocument(SoledadDocument): """ SoledadDocument.__init__(self, doc_id, rev, json, has_conflicts) self.couch_rev = None - self._conflicts = None self.transactions = None - def _ensure_fetch_conflicts(self, get_conflicts_fun): - """ - Ensure conflict data has been fetched from the server. - - :param get_conflicts_fun: A function which, given the document id and - the couch revision, return the conflicted - versions of the current document. - :type get_conflicts_fun: function - """ - if self._conflicts is None: - self._conflicts = get_conflicts_fun(self.doc_id, - couch_rev=self.couch_rev) - self.has_conflicts = len(self._conflicts) > 0 - def get_conflicts(self): """ Get the conflicted versions of the document. @@ -146,7 +131,7 @@ class CouchDocument(SoledadDocument): :type doc: CouchDocument """ if self._conflicts is None: - raise Exception("Run self._ensure_fetch_conflicts first!") + raise Exception("Fetch conflicts first!") self._conflicts.append(doc) self.has_conflicts = len(self._conflicts) > 0 @@ -158,7 +143,7 @@ class CouchDocument(SoledadDocument): :type conflict_revs: [str] """ if self._conflicts is None: - raise Exception("Run self._ensure_fetch_conflicts first!") + raise Exception("Fetch conflicts first!") self._conflicts = filter( lambda doc: doc.rev not in conflict_revs, self._conflicts) @@ -731,7 +716,6 @@ class CouchDatabase(CommonBackend): if check_for_conflicts \ and '_attachments' in result \ and 'u1db_conflicts' in result['_attachments']: - doc.has_conflicts = True doc.set_conflicts( self._build_conflicts( doc.doc_id, @@ -1025,7 +1009,7 @@ class CouchDatabase(CommonBackend): conflicts.append(doc) return conflicts - def _get_conflicts(self, doc_id, couch_rev=None): + def get_doc_conflicts(self, doc_id, couch_rev=None): """ Get the conflicted versions of a document. @@ -1040,32 +1024,21 @@ class CouchDatabase(CommonBackend): """ # request conflicts attachment from server params = {} + conflicts = [] if couch_rev is not None: params['rev'] = couch_rev # restric document's couch revision + else: + # TODO: move into resource logic! + first_entry = self._get_doc(doc_id, check_for_conflicts=True) + conflicts.append(first_entry) resource = self._database.resource(doc_id, 'u1db_conflicts') try: response = resource.get_json(**params) - return self._build_conflicts( + return conflicts + self._build_conflicts( doc_id, json.loads(response[2].read())) except ResourceNotFound: return [] - def get_doc_conflicts(self, doc_id): - """ - Get the list of conflicts for the given document. - - The order of the conflicts is such that the first entry is the value - that would be returned by "get_doc". - - :return: A list of the document entries that are conflicted. - :rtype: [CouchDocument] - """ - conflict_docs = self._get_conflicts(doc_id) - if len(conflict_docs) == 0: - return [] - this_doc = self._get_doc(doc_id, check_for_conflicts=True) - return [this_doc] + conflict_docs - def _get_replica_gen_and_trans_id(self, other_replica_uid): """ Return the last known generation and transaction id for the other db @@ -1189,43 +1162,6 @@ class CouchDatabase(CommonBackend): except ResourceNotFound as e: raise_missing_design_doc_error(e, ddoc_path) - def _add_conflict(self, doc, my_doc_rev, my_content): - """ - Add a conflict to the document. - - Note that this method does not actually update the backend; rather, it - updates the CouchDocument object which will provide the conflict data - when the atomic document update is made. - - :param doc: The document to have conflicts added to. - :type doc: CouchDocument - :param my_doc_rev: The revision of the conflicted document. - :type my_doc_rev: str - :param my_content: The content of the conflicted document as a JSON - serialized string. - :type my_content: str - """ - doc._ensure_fetch_conflicts(self._get_conflicts) - doc.add_conflict( - self._factory(doc_id=doc.doc_id, rev=my_doc_rev, - json=my_content)) - - def _delete_conflicts(self, doc, conflict_revs): - """ - Delete the conflicted revisions from the list of conflicts of C{doc}. - - Note that this method does not actually update the backend; rather, it - updates the CouchDocument object which will provide the conflict data - when the atomic document update is made. - - :param doc: The document to have conflicts deleted. - :type doc: CouchDocument - :param conflict_revs: A list of the revisions to be deleted. - :param conflict_revs: [str] - """ - doc._ensure_fetch_conflicts(self._get_conflicts) - doc.delete_conflicts(conflict_revs) - def _prune_conflicts(self, doc, doc_vcr): """ Prune conflicts that are older then the current document's revision, or @@ -1251,7 +1187,7 @@ class CouchDatabase(CommonBackend): if autoresolved: doc_vcr.increment(self._replica_uid) doc.rev = doc_vcr.as_str() - self._delete_conflicts(doc, c_revs_to_prune) + doc.delete_conflicts(c_revs_to_prune) def _force_doc_sync_conflict(self, doc): """ @@ -1262,8 +1198,7 @@ class CouchDatabase(CommonBackend): """ my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) self._prune_conflicts(doc, vectorclock.VectorClockRev(doc.rev)) - self._add_conflict(doc, my_doc.rev, my_doc.get_json()) - doc.has_conflicts = True + doc.add_conflict(my_doc) self._put_doc(my_doc, doc) def resolve_doc(self, doc, conflicted_doc_revs): @@ -1308,14 +1243,14 @@ class CouchDatabase(CommonBackend): # the newer doc version will supersede the one in the database, so # we copy conflicts before updating the backend. doc.set_conflicts(cur_doc.get_conflicts()) # copy conflicts over. - self._delete_conflicts(doc, superseded_revs) + doc.delete_conflicts(superseded_revs) self._put_doc(cur_doc, doc) else: # the newer doc version does not supersede the one in the # database, so we will add a conflict to the database and copy # those over to the document the user has in her hands. - self._add_conflict(cur_doc, new_rev, doc.get_json()) - self._delete_conflicts(cur_doc, superseded_revs) + cur_doc.add_conflict(doc) + cur_doc.delete_conflicts(superseded_revs) self._put_doc(cur_doc, cur_doc) # just update conflicts # backend has been updated with current conflicts, now copy them # to the current document. @@ -1405,10 +1340,9 @@ class CouchDatabase(CommonBackend): if cur_doc is None: self._put_doc(cur_doc, doc) return 'inserted' - else: - doc.couch_rev = cur_doc.couch_rev + doc.couch_rev = cur_doc.couch_rev + doc.set_conflicts(self.get_doc_conflicts(doc.doc_id, doc.couch_rev)) # fetch conflicts because we will eventually manipulate them - doc._ensure_fetch_conflicts(self._get_conflicts) # from now on, it works just like u1db sqlite backend doc_vcr = vectorclock.VectorClockRev(doc.rev) cur_vcr = vectorclock.VectorClockRev(cur_doc.rev) -- cgit v1.2.3 From 7f262d68c0afe43a3bd3864a19aac46051de7571 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 17 Aug 2015 18:53:38 -0300 Subject: [refactor] prune conflicts is Document's responsibility --- common/src/leap/soledad/common/couch.py | 58 ++++++++++++++++----------------- 1 file changed, 29 insertions(+), 29 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 126c6b51..6183be6e 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -149,6 +149,33 @@ class CouchDocument(SoledadDocument): self._conflicts) self.has_conflicts = len(self._conflicts) > 0 + def _prune_conflicts(self, doc_vcr, autoresolved_increment): + """ + Prune conflicts that are older then the current document's revision, or + whose content match to the current document's content. + + :param doc: The document to have conflicts pruned. + :type doc: CouchDocument + :param doc_vcr: A vector clock representing the current document's + revision. + :type doc_vcr: u1db.vectorclock.VectorClock + """ + if self.has_conflicts: + autoresolved = False + c_revs_to_prune = [] + for c_doc in self._conflicts: + c_vcr = vectorclock.VectorClockRev(c_doc.rev) + if doc_vcr.is_newer(c_vcr): + c_revs_to_prune.append(c_doc.rev) + elif self.same_content_as(c_doc): + c_revs_to_prune.append(c_doc.rev) + doc_vcr.maximize(c_vcr) + autoresolved = True + if autoresolved: + doc_vcr.increment(autoresolved_increment) + self.rev = doc_vcr.as_str() + self.delete_conflicts(c_revs_to_prune) + # monkey-patch the u1db http app to use CouchDocument http_app.Document = CouchDocument @@ -1162,33 +1189,6 @@ class CouchDatabase(CommonBackend): except ResourceNotFound as e: raise_missing_design_doc_error(e, ddoc_path) - def _prune_conflicts(self, doc, doc_vcr): - """ - Prune conflicts that are older then the current document's revision, or - whose content match to the current document's content. - - :param doc: The document to have conflicts pruned. - :type doc: CouchDocument - :param doc_vcr: A vector clock representing the current document's - revision. - :type doc_vcr: u1db.vectorclock.VectorClock - """ - if doc.has_conflicts is True: - autoresolved = False - c_revs_to_prune = [] - for c_doc in doc.get_conflicts(): - c_vcr = vectorclock.VectorClockRev(c_doc.rev) - if doc_vcr.is_newer(c_vcr): - c_revs_to_prune.append(c_doc.rev) - elif doc.same_content_as(c_doc): - c_revs_to_prune.append(c_doc.rev) - doc_vcr.maximize(c_vcr) - autoresolved = True - if autoresolved: - doc_vcr.increment(self._replica_uid) - doc.rev = doc_vcr.as_str() - doc.delete_conflicts(c_revs_to_prune) - def _force_doc_sync_conflict(self, doc): """ Add a conflict and force a document put. @@ -1197,7 +1197,7 @@ class CouchDatabase(CommonBackend): :type doc: CouchDocument """ my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) - self._prune_conflicts(doc, vectorclock.VectorClockRev(doc.rev)) + doc._prune_conflicts(vectorclock.VectorClockRev(doc.rev), self._replica_uid) doc.add_conflict(my_doc) self._put_doc(my_doc, doc) @@ -1348,7 +1348,7 @@ class CouchDatabase(CommonBackend): cur_vcr = vectorclock.VectorClockRev(cur_doc.rev) if doc_vcr.is_newer(cur_vcr): rev = doc.rev - self._prune_conflicts(doc, doc_vcr) + doc._prune_conflicts(doc_vcr, self._replica_uid) if doc.rev != rev: # conflicts have been autoresolved state = 'superseded' -- cgit v1.2.3 From 6ce4114a19ca8a363f264bfc654d839d266dde5c Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 17 Aug 2015 20:23:14 -0300 Subject: [refactor] _process_incoming_doc is a function now This was being calculated inside CouchDatabase, but it is not a persistence responsibility. It clearly doesn't belong to this persistence layer and seeing both sides separated allow us to work better on both parts. --- common/src/leap/soledad/common/couch.py | 121 +++++++++++++++++--------------- 1 file changed, 64 insertions(+), 57 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 6183be6e..13808502 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -149,6 +149,15 @@ class CouchDocument(SoledadDocument): self._conflicts) self.has_conflicts = len(self._conflicts) > 0 + def update(self, new_doc): + # update info + self.rev = new_doc.rev + if new_doc.is_tombstone(): + self.is_tombstone() + else: + self.content = new_doc.content + self.has_conflicts = new_doc.has_conflicts + def _prune_conflicts(self, doc_vcr, autoresolved_increment): """ Prune conflicts that are older then the current document's revision, or @@ -1313,7 +1322,13 @@ class CouchDatabase(CommonBackend): self._save_source_info(replica_uid, replica_gen, replica_trans_id, number_of_docs, doc_idx, sync_id) - state = self._process_incoming_doc(doc, save_conflict) + my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) + if my_doc is not None: + my_doc.set_conflicts(self.get_doc_conflicts(my_doc.doc_id, my_doc.couch_rev)) + state, save_doc = _process_incoming_doc(my_doc, doc, save_conflict, self.replica_uid) + if save_doc: + self._put_doc(my_doc, save_doc) + doc.update(save_doc) return state, self._get_generation() def _save_source_info(self, replica_uid, replica_gen, replica_trans_id, @@ -1327,62 +1342,6 @@ class CouchDatabase(CommonBackend): number_of_docs=number_of_docs, doc_idx=doc_idx, sync_id=sync_id) - def _process_incoming_doc(self, doc, save_conflict): - """ - Check document, save and return state. - """ - cur_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) - # at this point, `doc` has arrived from the other syncing party, and - # we will decide what to do with it. - # First, we prepare the arriving doc to update couch database. - old_doc = doc - doc = self._factory(doc.doc_id, doc.rev, doc.get_json()) - if cur_doc is None: - self._put_doc(cur_doc, doc) - return 'inserted' - doc.couch_rev = cur_doc.couch_rev - doc.set_conflicts(self.get_doc_conflicts(doc.doc_id, doc.couch_rev)) - # fetch conflicts because we will eventually manipulate them - # from now on, it works just like u1db sqlite backend - doc_vcr = vectorclock.VectorClockRev(doc.rev) - cur_vcr = vectorclock.VectorClockRev(cur_doc.rev) - if doc_vcr.is_newer(cur_vcr): - rev = doc.rev - doc._prune_conflicts(doc_vcr, self._replica_uid) - if doc.rev != rev: - # conflicts have been autoresolved - state = 'superseded' - else: - state = 'inserted' - self._put_doc(cur_doc, doc) - elif doc.rev == cur_doc.rev: - # magical convergence - state = 'converged' - elif cur_vcr.is_newer(doc_vcr): - # Don't add this to seen_ids, because we have something newer, - # so we should send it back, and we should not generate a - # conflict - state = 'superseded' - elif cur_doc.same_content_as(doc): - # the documents have been edited to the same thing at both ends - doc_vcr.maximize(cur_vcr) - doc_vcr.increment(self._replica_uid) - doc.rev = doc_vcr.as_str() - self._put_doc(cur_doc, doc) - state = 'superseded' - else: - state = 'conflicted' - if save_conflict: - self._force_doc_sync_conflict(doc) - # update info - old_doc.rev = doc.rev - if doc.is_tombstone(): - old_doc.is_tombstone() - else: - old_doc.content = doc.content - old_doc.has_conflicts = doc.has_conflicts - return state - def get_docs(self, doc_ids, check_for_conflicts=True, include_deleted=False): """ @@ -1525,3 +1484,51 @@ class CouchServerState(ServerState): delete databases. """ raise Unauthorized() + + +def _process_incoming_doc(my_doc, other_doc, save_conflict, replica_uid): + """ + Check document, save and return state. + """ + # at this point, `doc` has arrived from the other syncing party, and + # we will decide what to do with it. + # First, we prepare the arriving doc to update couch database. + new_doc = CouchDocument(other_doc.doc_id, other_doc.rev, other_doc.get_json()) + if my_doc is None: + return 'inserted', new_doc + new_doc.couch_rev = my_doc.couch_rev + new_doc.set_conflicts(my_doc.get_conflicts()) + # fetch conflicts because we will eventually manipulate them + # from now on, it works just like u1db sqlite backend + doc_vcr = vectorclock.VectorClockRev(new_doc.rev) + cur_vcr = vectorclock.VectorClockRev(my_doc.rev) + if doc_vcr.is_newer(cur_vcr): + rev = new_doc.rev + new_doc._prune_conflicts(doc_vcr, replica_uid) + if new_doc.rev != rev: + # conflicts have been autoresolved + return 'superseded', new_doc + else: + return'inserted', new_doc + elif new_doc.rev == my_doc.rev: + # magical convergence + return 'converged', None + elif cur_vcr.is_newer(doc_vcr): + # Don't add this to seen_ids, because we have something newer, + # so we should send it back, and we should not generate a + # conflict + other_doc.update(new_doc) + return 'superseded', None + elif my_doc.same_content_as(new_doc): + # the documents have been edited to the same thing at both ends + doc_vcr.maximize(cur_vcr) + doc_vcr.increment(replica_uid) + new_doc.rev = doc_vcr.as_str() + return 'superseded', new_doc + else: + if save_conflict: + new_doc._prune_conflicts(vectorclock.VectorClockRev(new_doc.rev), replica_uid) + new_doc.add_conflict(my_doc) + return 'conflicted', new_doc + other_doc.update(new_doc) + return 'conflicted', None -- cgit v1.2.3 From 5926e11838a6d71aa3b7865b52b2ba2fab30b203 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 17 Aug 2015 21:16:35 -0300 Subject: [bug] check type before processing Necessary methods are on CouchDocument, but we accept a Document as well, in this case self._factory is needed. Will be simpler soon. --- common/src/leap/soledad/common/couch.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 13808502..3ae79382 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -1319,6 +1319,8 @@ class CouchDatabase(CommonBackend): 'converged', at_gen is the insertion/current generation. :rtype: (str, int) """ + if type(doc) is not CouchDocument: + doc = self._factory(doc.doc_id, doc.rev, doc.get_json()) self._save_source_info(replica_uid, replica_gen, replica_trans_id, number_of_docs, doc_idx, sync_id) -- cgit v1.2.3 From 8654021f8719cf9d0f17f9d58e4455074aa43bb9 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 19 Aug 2015 19:09:59 -0300 Subject: [bug] fixes small issues pointed by drebs * file headers * variable names * missing docstrings * prune_conflicts ** extra: tests failed on a 1-based index bug --- common/src/leap/soledad/common/couch.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 3ae79382..90f1a36f 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -158,10 +158,11 @@ class CouchDocument(SoledadDocument): self.content = new_doc.content self.has_conflicts = new_doc.has_conflicts - def _prune_conflicts(self, doc_vcr, autoresolved_increment): + def prune_conflicts(self, doc_vcr, autoresolved_increment): """ Prune conflicts that are older then the current document's revision, or whose content match to the current document's content. + Originally in u1db.CommonBackend :param doc: The document to have conflicts pruned. :type doc: CouchDocument @@ -1206,7 +1207,7 @@ class CouchDatabase(CommonBackend): :type doc: CouchDocument """ my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) - doc._prune_conflicts(vectorclock.VectorClockRev(doc.rev), self._replica_uid) + doc.prune_conflicts(vectorclock.VectorClockRev(doc.rev), self._replica_uid) doc.add_conflict(my_doc) self._put_doc(my_doc, doc) @@ -1388,6 +1389,13 @@ class CouchDatabase(CommonBackend): continue yield t._doc + def _prune_conflicts(self, doc, doc_vcr): + """ + Overrides original method, but it is implemented elsewhere for + simplicity. + """ + doc.prune_conflicts(doc_vcr, self._replica_uid) + def _new_resource(self, *path): """ Return a new resource for accessing a couch database. @@ -1506,7 +1514,7 @@ def _process_incoming_doc(my_doc, other_doc, save_conflict, replica_uid): cur_vcr = vectorclock.VectorClockRev(my_doc.rev) if doc_vcr.is_newer(cur_vcr): rev = new_doc.rev - new_doc._prune_conflicts(doc_vcr, replica_uid) + new_doc.prune_conflicts(doc_vcr, replica_uid) if new_doc.rev != rev: # conflicts have been autoresolved return 'superseded', new_doc @@ -1529,7 +1537,7 @@ def _process_incoming_doc(my_doc, other_doc, save_conflict, replica_uid): return 'superseded', new_doc else: if save_conflict: - new_doc._prune_conflicts(vectorclock.VectorClockRev(new_doc.rev), replica_uid) + new_doc.prune_conflicts(vectorclock.VectorClockRev(new_doc.rev), replica_uid) new_doc.add_conflict(my_doc) return 'conflicted', new_doc other_doc.update(new_doc) -- cgit v1.2.3 From de73fc6969433a69ec6ba12ec508c3c93b83fcc6 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Thu, 20 Aug 2015 14:47:14 -0300 Subject: [refactor] Move constructor, use isinstance isinstance is better, as kaliy pointed out, and the constructor is also in a safer place on __init__.py to be explicit. Also re-apply a change from last rebase; --- common/src/leap/soledad/common/couch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 90f1a36f..08096f86 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -1320,7 +1320,7 @@ class CouchDatabase(CommonBackend): 'converged', at_gen is the insertion/current generation. :rtype: (str, int) """ - if type(doc) is not CouchDocument: + if not isinstance(doc, CouchDocument): doc = self._factory(doc.doc_id, doc.rev, doc.get_json()) self._save_source_info(replica_uid, replica_gen, replica_trans_id, number_of_docs, -- cgit v1.2.3 From 057f9c02894c05de4d1d4fc1f93ba86ec6bea96d Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 26 Aug 2015 17:48:57 -0300 Subject: [tests] fix variable name from refactor From: [refactor] removing getters and setters from couch.py _couch_url was a private variable with getter and setter doing the same as a public variable. It is accessed all over the code, so being private with getters and setters didnt make sense. This commit fixes the tests to also follow this style from now on. --- .../common/tests/test_couch_operations_atomicity.py | 6 +++--- common/src/leap/soledad/common/tests/test_server.py | 18 +++++++++--------- .../leap/soledad/common/tests/test_sqlcipher_sync.py | 4 ++-- common/src/leap/soledad/common/tests/test_sync.py | 8 ++++---- .../leap/soledad/common/tests/test_sync_deferred.py | 2 +- .../src/leap/soledad/common/tests/test_sync_mutex.py | 6 +++--- .../src/leap/soledad/common/tests/test_sync_target.py | 6 +++--- common/src/leap/soledad/common/tests/util.py | 6 +++--- 8 files changed, 28 insertions(+), 28 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py b/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py index c488822e..0a06cc39 100644 --- a/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py +++ b/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py @@ -83,15 +83,15 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer): return soledad def make_app(self): - self.request_state = CouchServerState(self._couch_url) + self.request_state = CouchServerState(self.couch_url) return self.make_app_after_state(self.request_state) def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) - self._couch_url = 'http://localhost:' + str(self.wrapper.port) + self.couch_url = 'http://localhost:' + str(self.wrapper.port) self.db = CouchDatabase.open_database( - urljoin(self._couch_url, 'user-user-uuid'), + urljoin(self.couch_url, 'user-user-uuid'), create=True, replica_uid='replica', ensure_ddocs=True) diff --git a/common/src/leap/soledad/common/tests/test_server.py b/common/src/leap/soledad/common/tests/test_server.py index 5ffa2a63..5e8e5f90 100644 --- a/common/src/leap/soledad/common/tests/test_server.py +++ b/common/src/leap/soledad/common/tests/test_server.py @@ -50,7 +50,7 @@ from leap.soledad.server.auth import URLToAuthorization def _couch_ensure_database(self, dbname): db = CouchDatabase.open_database( - self._couch_url + '/' + dbname, + self.couch_url + '/' + dbname, create=True, ensure_ddocs=True) return db, db._replica_uid @@ -325,7 +325,7 @@ class EncryptedSyncTestCase( shared_db=self.get_default_shared_mock(_put_doc_side_effect)) def make_app(self): - self.request_state = CouchServerState(self._couch_url) + self.request_state = CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def setUp(self): @@ -333,7 +333,7 @@ class EncryptedSyncTestCase( # dependencies. # XXX explain better CouchDBTestCase.setUp(self) - self._couch_url = 'http://localhost:' + str(self.wrapper.port) + self.couch_url = 'http://localhost:' + str(self.wrapper.port) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) @@ -368,7 +368,7 @@ class EncryptedSyncTestCase( # ensure remote db exists before syncing db = CouchDatabase.open_database( - urljoin(self._couch_url, 'user-' + user), + urljoin(self.couch_url, 'user-' + user), create=True, ensure_ddocs=True) @@ -494,24 +494,24 @@ class LockResourceTestCase( # dependencies. # XXX explain better CouchDBTestCase.setUp(self) - self._couch_url = 'http://localhost:' + str(self.wrapper.port) + self.couch_url = 'http://localhost:' + str(self.wrapper.port) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) # create the databases CouchDatabase.open_database( - urljoin(self._couch_url, 'shared'), + urljoin(self.couch_url, 'shared'), create=True, ensure_ddocs=True) CouchDatabase.open_database( - urljoin(self._couch_url, 'tokens'), + urljoin(self.couch_url, 'tokens'), create=True, ensure_ddocs=True) - self._state = CouchServerState(self._couch_url) + self._state = CouchServerState(self.couch_url) def tearDown(self): # delete remote database db = CouchDatabase.open_database( - urljoin(self._couch_url, 'shared'), + urljoin(self.couch_url, 'shared'), create=True, ensure_ddocs=True) db.delete_database() diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py index c57d6f61..af2d0e2a 100644 --- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py +++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py @@ -733,7 +733,7 @@ def _make_local_db_and_token_http_target(test, path='test'): test.startTwistedServer() # ensure remote db exists before syncing db = couch.CouchDatabase.open_database( - urljoin(test._couch_url, 'test'), + urljoin(test.couch_url, 'test'), create=True, replica_uid='test', ensure_ddocs=True) @@ -790,7 +790,7 @@ class SQLCipherSyncTargetTests( (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id)) def make_app(self): - self.request_state = couch.CouchServerState(self._couch_url) + self.request_state = couch.CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def set_trace_hook(self, callback, shallow=False): diff --git a/common/src/leap/soledad/common/tests/test_sync.py b/common/src/leap/soledad/common/tests/test_sync.py index 14152370..61f3879f 100644 --- a/common/src/leap/soledad/common/tests/test_sync.py +++ b/common/src/leap/soledad/common/tests/test_sync.py @@ -56,14 +56,14 @@ class InterruptableSyncTestCase( sync_target = soledad_sync_target def make_app(self): - self.request_state = couch.CouchServerState(self._couch_url) + self.request_state = couch.CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") - self._couch_url = 'http://localhost:' + str(self.wrapper.port) + self.couch_url = 'http://localhost:' + str(self.wrapper.port) def tearDown(self): CouchDBTestCase.tearDown(self) @@ -103,7 +103,7 @@ class InterruptableSyncTestCase( # ensure remote db exists before syncing db = couch.CouchDatabase.open_database( - urljoin(self._couch_url, 'user-user-uuid'), + urljoin(self.couch_url, 'user-user-uuid'), create=True, ensure_ddocs=True) @@ -167,7 +167,7 @@ class TestSoledadDbSync( token = False def make_app(self): - self.request_state = couch.CouchServerState(self._couch_url) + self.request_state = couch.CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def setUp(self): diff --git a/common/src/leap/soledad/common/tests/test_sync_deferred.py b/common/src/leap/soledad/common/tests/test_sync_deferred.py index ffb8a4ae..0065413a 100644 --- a/common/src/leap/soledad/common/tests/test_sync_deferred.py +++ b/common/src/leap/soledad/common/tests/test_sync_deferred.py @@ -129,7 +129,7 @@ class TestSoledadDbSyncDeferredEncDecr( token = True def make_app(self): - self.request_state = couch.CouchServerState(self._couch_url) + self.request_state = couch.CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def setUp(self): diff --git a/common/src/leap/soledad/common/tests/test_sync_mutex.py b/common/src/leap/soledad/common/tests/test_sync_mutex.py index a904a940..aa46d5b7 100644 --- a/common/src/leap/soledad/common/tests/test_sync_mutex.py +++ b/common/src/leap/soledad/common/tests/test_sync_mutex.py @@ -84,14 +84,14 @@ class TestSyncMutex( sync_target = soledad_sync_target def make_app(self): - self.request_state = couch.CouchServerState(self._couch_url) + self.request_state = couch.CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") - self._couch_url = 'http://localhost:' + str(self.wrapper.port) + self.couch_url = 'http://localhost:' + str(self.wrapper.port) def tearDown(self): CouchDBTestCase.tearDown(self) @@ -103,7 +103,7 @@ class TestSyncMutex( # ensure remote db exists before syncing db = couch.CouchDatabase.open_database( - urljoin(self._couch_url, 'user-user-uuid'), + urljoin(self.couch_url, 'user-user-uuid'), create=True, ensure_ddocs=True) diff --git a/common/src/leap/soledad/common/tests/test_sync_target.py b/common/src/leap/soledad/common/tests/test_sync_target.py index d855fb52..79c350cd 100644 --- a/common/src/leap/soledad/common/tests/test_sync_target.py +++ b/common/src/leap/soledad/common/tests/test_sync_target.py @@ -63,13 +63,13 @@ class TestSoledadParseReceivedDocResponse(SoledadWithCouchServerMixin): def setUp(self): SoledadWithCouchServerMixin.setUp(self) - self._couch_url = 'http://localhost:' + str(self.wrapper.port) + self.couch_url = 'http://localhost:' + str(self.wrapper.port) creds = {'token': { 'uuid': 'user-uuid', 'token': 'auth-token', }} self.target = target.SoledadHTTPSyncTarget( - self._couch_url, + self.couch_url, uuid4().hex, creds, self._soledad._crypto, @@ -819,7 +819,7 @@ class TestSoledadDbSync( token = False def make_app(self): - self.request_state = couch.CouchServerState(self._couch_url) + self.request_state = couch.CouchServerState(self.couch_url) return self.make_app_with_state(self.request_state) def setUp(self): diff --git a/common/src/leap/soledad/common/tests/util.py b/common/src/leap/soledad/common/tests/util.py index daa9c558..2190eeaa 100644 --- a/common/src/leap/soledad/common/tests/util.py +++ b/common/src/leap/soledad/common/tests/util.py @@ -486,7 +486,7 @@ class CouchServerStateForTests(CouchServerState): def _create_database(self, dbname): return CouchDatabase.open_database( - urljoin(self._couch_url, dbname), + urljoin(self.couch_url, dbname), True, replica_uid=dbname, ensure_ddocs=True) @@ -506,7 +506,7 @@ class SoledadWithCouchServerMixin( main_test_class = getattr(self, 'main_test_class', None) if main_test_class is not None: main_test_class.setUp(self) - self._couch_url = 'http://localhost:%d' % self.wrapper.port + self.couch_url = 'http://localhost:%d' % self.wrapper.port def tearDown(self): main_test_class = getattr(self, 'main_test_class', None) @@ -514,7 +514,7 @@ class SoledadWithCouchServerMixin( main_test_class.tearDown(self) # delete the test database try: - db = CouchDatabase(self._couch_url, 'test') + db = CouchDatabase(self.couch_url, 'test') db.delete_database() except DatabaseDoesNotExist: pass -- cgit v1.2.3 From af611822aa0ed3fa4be7089fa9835820f489431f Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Fri, 28 Aug 2015 12:37:01 -0400 Subject: [style] pep8 fixes --- common/src/leap/soledad/common/couch.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 08096f86..82b5aca8 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -1207,7 +1207,8 @@ class CouchDatabase(CommonBackend): :type doc: CouchDocument """ my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) - doc.prune_conflicts(vectorclock.VectorClockRev(doc.rev), self._replica_uid) + doc.prune_conflicts( + vectorclock.VectorClockRev(doc.rev), self._replica_uid) doc.add_conflict(my_doc) self._put_doc(my_doc, doc) @@ -1327,8 +1328,10 @@ class CouchDatabase(CommonBackend): doc_idx, sync_id) my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) if my_doc is not None: - my_doc.set_conflicts(self.get_doc_conflicts(my_doc.doc_id, my_doc.couch_rev)) - state, save_doc = _process_incoming_doc(my_doc, doc, save_conflict, self.replica_uid) + my_doc.set_conflicts( + self.get_doc_conflicts(my_doc.doc_id, my_doc.couch_rev)) + state, save_doc = _process_incoming_doc( + my_doc, doc, save_conflict, self.replica_uid) if save_doc: self._put_doc(my_doc, save_doc) doc.update(save_doc) @@ -1503,7 +1506,8 @@ def _process_incoming_doc(my_doc, other_doc, save_conflict, replica_uid): # at this point, `doc` has arrived from the other syncing party, and # we will decide what to do with it. # First, we prepare the arriving doc to update couch database. - new_doc = CouchDocument(other_doc.doc_id, other_doc.rev, other_doc.get_json()) + new_doc = CouchDocument( + other_doc.doc_id, other_doc.rev, other_doc.get_json()) if my_doc is None: return 'inserted', new_doc new_doc.couch_rev = my_doc.couch_rev @@ -1537,7 +1541,8 @@ def _process_incoming_doc(my_doc, other_doc, save_conflict, replica_uid): return 'superseded', new_doc else: if save_conflict: - new_doc.prune_conflicts(vectorclock.VectorClockRev(new_doc.rev), replica_uid) + new_doc.prune_conflicts( + vectorclock.VectorClockRev(new_doc.rev), replica_uid) new_doc.add_conflict(my_doc) return 'conflicted', new_doc other_doc.update(new_doc) -- cgit v1.2.3 From 6956525a0f325765d9ef0e8dcd3ad5f4a55545ed Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:17:49 -0300 Subject: [tests] get rid of CouchDB python process This process per test gives a lot of headache, this is why we are removing it. With it we would need to try to start and stop properly on each test case. This fails badly when a test fail and, depending on how it fails, it freezes my pc. Also, it is very heavy for a CI to run a database process for each test case. --- common/src/leap/soledad/common/tests/util.py | 169 ++++++--------------------- 1 file changed, 36 insertions(+), 133 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/util.py b/common/src/leap/soledad/common/tests/util.py index 2190eeaa..3187472f 100644 --- a/common/src/leap/soledad/common/tests/util.py +++ b/common/src/leap/soledad/common/tests/util.py @@ -27,10 +27,8 @@ import shutil import random import string import u1db -import subprocess -import time -import re import traceback +import couchdb from uuid import uuid4 from mock import Mock @@ -337,119 +335,6 @@ class BaseSoledadTest(BaseLeapTest, MockedSharedDBTest): self.assertEqual(exp_doc.content, doc.content) -# ----------------------------------------------------------------------------- -# A wrapper for running couchdb locally. -# ----------------------------------------------------------------------------- - -# from: https://github.com/smcq/paisley/blob/master/paisley/test/util.py -# TODO: include license of above project. -class CouchDBWrapper(object): - - """ - Wrapper for external CouchDB instance which is started and stopped for - testing. - """ - BOOT_TIMEOUT_SECONDS = 5 - RETRY_LIMIT = 3 - - def start(self): - tries = 0 - while tries < self.RETRY_LIMIT and not hasattr(self, 'port'): - try: - self._try_start() - return - except Exception, e: - print traceback.format_exc() - self.stop() - tries += 1 - raise Exception( - "Check your couchdb: Tried to start 3 times and failed badly") - - def _try_start(self): - """ - Start a CouchDB instance for a test. - """ - self.tempdir = tempfile.mkdtemp(suffix='.couch.test') - - path = os.path.join(os.path.dirname(__file__), - 'couchdb.ini.template') - handle = open(path) - conf = handle.read() % { - 'tempdir': self.tempdir, - } - handle.close() - - shutil.copy('/etc/couchdb/default.ini', self.tempdir) - defaultConfPath = os.path.join(self.tempdir, 'default.ini') - - confPath = os.path.join(self.tempdir, 'test.ini') - handle = open(confPath, 'w') - handle.write(conf) - handle.close() - - # create the dirs from the template - mkdir_p(os.path.join(self.tempdir, 'lib')) - mkdir_p(os.path.join(self.tempdir, 'log')) - args = ['/usr/bin/couchdb', '-n', - '-a', defaultConfPath, '-a', confPath] - null = open('/dev/null', 'w') - - self.process = subprocess.Popen( - args, env=None, stdout=null.fileno(), stderr=null.fileno(), - close_fds=True) - boot_time = time.time() - # find port - logPath = os.path.join(self.tempdir, 'log', 'couch.log') - while not os.path.exists(logPath): - if self.process.poll() is not None: - got_stdout, got_stderr = "", "" - if self.process.stdout is not None: - got_stdout = self.process.stdout.read() - - if self.process.stderr is not None: - got_stderr = self.process.stderr.read() - raise Exception(""" -couchdb exited with code %d. -stdout: -%s -stderr: -%s""" % ( - self.process.returncode, got_stdout, got_stderr)) - time.sleep(0.01) - if (time.time() - boot_time) > self.BOOT_TIMEOUT_SECONDS: - self.stop() - raise Exception("Timeout starting couch") - while os.stat(logPath).st_size == 0: - time.sleep(0.01) - if (time.time() - boot_time) > self.BOOT_TIMEOUT_SECONDS: - self.stop() - raise Exception("Timeout starting couch") - PORT_RE = re.compile( - 'Apache CouchDB has started on http://127.0.0.1:(?P\d+)') - - handle = open(logPath) - line = handle.read() - handle.close() - m = PORT_RE.search(line) - if not m: - self.stop() - raise Exception("Cannot find port in line %s" % line) - self.port = int(m.group('port')) - - def stop(self): - """ - Terminate the CouchDB instance. - """ - try: - self.process.terminate() - self.process.communicate() - except: - # just to clean up - # if it can't, the process wasn't created anyway - pass - shutil.rmtree(self.tempdir) - - class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest): """ @@ -460,15 +345,26 @@ class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest): """ Make sure we have a CouchDB instance for a test. """ - self.wrapper = CouchDBWrapper() - self.wrapper.start() - # self.db = self.wrapper.db + server = self.couch_server = couchdb.Server() + self.previous_dbs = set([db for db in server]) + self.couch_port = 5984 + self.couch_url = 'http://localhost:%d' % self.couch_port def tearDown(self): """ Stop CouchDB instance for test. """ - self.wrapper.stop() + current_dbs = set([db for db in self.couch_server]) + remaining_dbs = current_dbs - self.previous_dbs + if remaining_dbs and False: + raise Exception("tests created %s and didn't clean up!", remaining_dbs) + + def delete_db(self, name): + try: + self.couch_server.delete(name) + except: + # ignore if already missing + pass class CouchServerStateForTests(CouchServerState): @@ -484,15 +380,25 @@ class CouchServerStateForTests(CouchServerState): which is less pleasant than allowing the db to be automatically created. """ - def _create_database(self, dbname): - return CouchDatabase.open_database( + def __init__(self, *args, **kwargs): + self.dbs = [] + super(CouchServerStateForTests, self).__init__(*args, **kwargs) + + def _create_database(self, replica_uid=None, dbname=None): + """ + Create db and append to a list, allowing test to close it later + """ + dbname = dbname or ('test-%s' % uuid4().hex) + db = CouchDatabase.open_database( urljoin(self.couch_url, dbname), True, - replica_uid=dbname, + replica_uid=replica_uid or 'test', ensure_ddocs=True) + self.dbs.append(db) + return db def ensure_database(self, dbname): - db = self._create_database(dbname) + db = self._create_database(dbname=dbname) return db, db.replica_uid @@ -506,23 +412,20 @@ class SoledadWithCouchServerMixin( main_test_class = getattr(self, 'main_test_class', None) if main_test_class is not None: main_test_class.setUp(self) - self.couch_url = 'http://localhost:%d' % self.wrapper.port def tearDown(self): main_test_class = getattr(self, 'main_test_class', None) if main_test_class is not None: main_test_class.tearDown(self) # delete the test database - try: - db = CouchDatabase(self.couch_url, 'test') - db.delete_database() - except DatabaseDoesNotExist: - pass BaseSoledadTest.tearDown(self) CouchDBTestCase.tearDown(self) def make_app(self): - couch_url = urljoin( - 'http://localhost:' + str(self.wrapper.port), 'tests') - self.request_state = CouchServerStateForTests(couch_url) + self.request_state = CouchServerStateForTests(self.couch_url) + self.addCleanup(self.delete_dbs) return self.make_app_with_state(self.request_state) + + def delete_dbs(self): + for db in self.request_state.dbs: + self.delete_db(db._dbname) -- cgit v1.2.3 From 4856c49e93a4ba67055c0dc3e8b4cdbaeabc4940 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:28:33 -0300 Subject: [tests] isolate database names, use uuid We are using a single CouchDB install, which may cause tests to overlap since many of them uses the same database name, hurting isolation. This change tries to use uuid on most of it. Also changes for couch_url and couch_port introduced by removal of CouchDB process. --- common/src/leap/soledad/common/tests/test_couch.py | 19 +++++++----- .../tests/test_couch_operations_atomicity.py | 8 +++-- .../src/leap/soledad/common/tests/test_server.py | 2 -- .../soledad/common/tests/test_sqlcipher_sync.py | 2 ++ common/src/leap/soledad/common/tests/test_sync.py | 11 ++----- .../soledad/common/tests/test_sync_deferred.py | 10 ++----- .../leap/soledad/common/tests/test_sync_mutex.py | 6 ++-- .../leap/soledad/common/tests/test_sync_target.py | 34 +++++++++------------- 8 files changed, 39 insertions(+), 53 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index 468ad8d8..08a14d02 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -25,6 +25,7 @@ import json from urlparse import urljoin from couchdb.client import Server +from uuid import uuid4 from testscenarios import TestWithScenarios @@ -56,8 +57,8 @@ class TestCouchBackendImpl(CouchDBTestCase): def test__allocate_doc_id(self): db = couch.CouchDatabase.open_database( urljoin( - 'http://localhost:' + str(self.wrapper.port), - 'u1db_tests' + 'http://localhost:' + str(self.couch_port), + ('test-%s' % uuid4().hex) ), create=True, ensure_ddocs=True) @@ -66,6 +67,7 @@ class TestCouchBackendImpl(CouchDBTestCase): self.assertEqual(34, len(doc_id1)) int(doc_id1[len('D-'):], 16) self.assertNotEqual(doc_id1, db._allocate_doc_id()) + self.delete_db(db._dbname) # ----------------------------------------------------------------------------- @@ -73,25 +75,26 @@ class TestCouchBackendImpl(CouchDBTestCase): # ----------------------------------------------------------------------------- def make_couch_database_for_test(test, replica_uid): - port = str(test.wrapper.port) - return couch.CouchDatabase.open_database( - urljoin('http://localhost:' + port, replica_uid), + port = str(test.couch_port) + dbname = ('test-%s' % uuid4().hex) + db = couch.CouchDatabase.open_database( + urljoin('http://localhost:' + port, dbname), create=True, replica_uid=replica_uid or 'test', ensure_ddocs=True) def copy_couch_database_for_test(test, db): - port = str(test.wrapper.port) + port = str(test.couch_port) couch_url = 'http://localhost:' + port - new_dbname = db._replica_uid + '_copy' + new_dbname = db._dbname + '_copy' new_db = couch.CouchDatabase.open_database( urljoin(couch_url, new_dbname), create=True, replica_uid=db._replica_uid or 'test') # copy all docs session = couch.Session() - old_couch_db = Server(couch_url, session=session)[db._replica_uid] + old_couch_db = Server(couch_url, session=session)[db._dbname] new_couch_db = Server(couch_url, session=session)[new_dbname] for doc_id in old_couch_db: doc = old_couch_db.get(doc_id) diff --git a/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py b/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py index 0a06cc39..25f709ca 100644 --- a/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py +++ b/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py @@ -23,6 +23,7 @@ import threading from urlparse import urljoin from twisted.internet import defer +from uuid import uuid4 from leap.soledad.client import Soledad from leap.soledad.common.couch import CouchDatabase, CouchServerState @@ -55,7 +56,7 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer): sync_target = soledad_sync_target - def _soledad_instance(self, user='user-uuid', passphrase=u'123', + def _soledad_instance(self, user=None, passphrase=u'123', prefix='', secrets_path='secrets.json', local_db_path='soledad.u1db', server_url='', @@ -63,6 +64,7 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer): """ Instantiate Soledad. """ + user = user or self.user # this callback ensures we save a document which is sent to the shared # db. @@ -89,9 +91,9 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer): def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) - self.couch_url = 'http://localhost:' + str(self.wrapper.port) + self.user = ('user-%s' % uuid4().hex) self.db = CouchDatabase.open_database( - urljoin(self.couch_url, 'user-user-uuid'), + urljoin(self.couch_url, 'user-' + self.user), create=True, replica_uid='replica', ensure_ddocs=True) diff --git a/common/src/leap/soledad/common/tests/test_server.py b/common/src/leap/soledad/common/tests/test_server.py index 5e8e5f90..0667459e 100644 --- a/common/src/leap/soledad/common/tests/test_server.py +++ b/common/src/leap/soledad/common/tests/test_server.py @@ -333,7 +333,6 @@ class EncryptedSyncTestCase( # dependencies. # XXX explain better CouchDBTestCase.setUp(self) - self.couch_url = 'http://localhost:' + str(self.wrapper.port) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) @@ -494,7 +493,6 @@ class LockResourceTestCase( # dependencies. # XXX explain better CouchDBTestCase.setUp(self) - self.couch_url = 'http://localhost:' + str(self.wrapper.port) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) # create the databases diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py index af2d0e2a..ead4ee5f 100644 --- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py +++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py @@ -20,10 +20,12 @@ Test sqlcipher backend sync. import json +import os from u1db import sync from u1db import vectorclock from u1db import errors +from uuid import uuid4 from testscenarios import TestWithScenarios from urlparse import urljoin diff --git a/common/src/leap/soledad/common/tests/test_sync.py b/common/src/leap/soledad/common/tests/test_sync.py index 61f3879f..04e8b163 100644 --- a/common/src/leap/soledad/common/tests/test_sync.py +++ b/common/src/leap/soledad/common/tests/test_sync.py @@ -63,7 +63,6 @@ class InterruptableSyncTestCase( TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") - self.couch_url = 'http://localhost:' + str(self.wrapper.port) def tearDown(self): CouchDBTestCase.tearDown(self) @@ -177,13 +176,7 @@ class TestSoledadDbSync( SoledadWithCouchServerMixin.setUp(self) self.startTwistedServer() self.db = self.make_database_for_test(self, 'test1') - self.db2 = couch.CouchDatabase.open_database( - urljoin( - 'http://localhost:' + str(self.wrapper.port), - 'test' - ), - create=True, - ensure_ddocs=True) + self.db2 = self.request_state._create_database(replica_uid='test') def tearDown(self): """ @@ -199,7 +192,7 @@ class TestSoledadDbSync( and Token auth. """ target = soledad_sync_target( - self, target_name, + self, self.db2._dbname, source_replica_uid=self._soledad._dbpool.replica_uid) self.addCleanup(target.close) return sync.SoledadSynchronizer( diff --git a/common/src/leap/soledad/common/tests/test_sync_deferred.py b/common/src/leap/soledad/common/tests/test_sync_deferred.py index 0065413a..a07be66f 100644 --- a/common/src/leap/soledad/common/tests/test_sync_deferred.py +++ b/common/src/leap/soledad/common/tests/test_sync_deferred.py @@ -85,13 +85,7 @@ class BaseSoledadDeferredEncTest(SoledadWithCouchServerMixin): defer_encryption=True, sync_db_key=sync_db_key) self.db1 = SQLCipherDatabase(self.opts) - self.db2 = couch.CouchDatabase.open_database( - urljoin( - 'http://localhost:' + str(self.wrapper.port), - 'test' - ), - create=True, - ensure_ddocs=True) + self.db2 = self.request_state._create_database('test') def tearDown(self): # XXX should not access "private" attrs @@ -159,7 +153,7 @@ class TestSoledadDbSyncDeferredEncDecr( sync_db = self._soledad._sync_db sync_enc_pool = self._soledad._sync_enc_pool target = soledad_sync_target( - self, target_name, + self, self.db2._dbname, source_replica_uid=replica_uid, sync_db=sync_db, sync_enc_pool=sync_enc_pool) diff --git a/common/src/leap/soledad/common/tests/test_sync_mutex.py b/common/src/leap/soledad/common/tests/test_sync_mutex.py index aa46d5b7..2e2123a7 100644 --- a/common/src/leap/soledad/common/tests/test_sync_mutex.py +++ b/common/src/leap/soledad/common/tests/test_sync_mutex.py @@ -91,7 +91,7 @@ class TestSyncMutex( TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") - self.couch_url = 'http://localhost:' + str(self.wrapper.port) + self.user = ('user-%s' % uuid.uuid4().hex) def tearDown(self): CouchDBTestCase.tearDown(self) @@ -103,12 +103,12 @@ class TestSyncMutex( # ensure remote db exists before syncing db = couch.CouchDatabase.open_database( - urljoin(self.couch_url, 'user-user-uuid'), + urljoin(self.couch_url, 'user-' + self.user), create=True, ensure_ddocs=True) sol = self._soledad_instance( - user='user-uuid', server_url=self.getURL()) + user=self.user, server_url=self.getURL()) d1 = sol.sync() d2 = sol.sync() diff --git a/common/src/leap/soledad/common/tests/test_sync_target.py b/common/src/leap/soledad/common/tests/test_sync_target.py index 79c350cd..da4ff034 100644 --- a/common/src/leap/soledad/common/tests/test_sync_target.py +++ b/common/src/leap/soledad/common/tests/test_sync_target.py @@ -63,7 +63,6 @@ class TestSoledadParseReceivedDocResponse(SoledadWithCouchServerMixin): def setUp(self): SoledadWithCouchServerMixin.setUp(self) - self.couch_url = 'http://localhost:' + str(self.wrapper.port) creds = {'token': { 'uuid': 'user-uuid', 'token': 'auth-token', @@ -151,11 +150,11 @@ def make_local_db_and_soledad_target( test, path='test', source_replica_uid=uuid4().hex): test.startTwistedServer() - db = test.request_state._create_database(os.path.basename(path)) + db = test.request_state._create_database(replica_uid=os.path.basename(path)) sync_db = test._soledad._sync_db sync_enc_pool = test._soledad._sync_enc_pool st = soledad_sync_target( - test, path, + test, db._dbname, source_replica_uid=source_replica_uid, sync_db=sync_db, sync_enc_pool=sync_enc_pool) @@ -191,6 +190,8 @@ class TestSoledadSyncTarget( self.startTwistedServer() sync_db = self._soledad._sync_db sync_enc_pool = self._soledad._sync_enc_pool + if path is None: + path = self.db2._dbname target = self.sync_target( self, path, source_replica_uid=source_replica_uid, @@ -204,11 +205,11 @@ class TestSoledadSyncTarget( SoledadWithCouchServerMixin.setUp(self) self.startTwistedServer() self.db1 = make_sqlcipher_database_for_test(self, 'test1') - self.db2 = self.request_state._create_database('test2') + self.db2 = self.request_state._create_database('test') def tearDown(self): # db2, _ = self.request_state.ensure_database('test2') - self.db2.delete_database() + self.delete_db(self.db2._dbname) self.db1.close() SoledadWithCouchServerMixin.tearDown(self) TestWithScenarios.tearDown(self) @@ -220,8 +221,8 @@ class TestSoledadSyncTarget( This test was adapted to decrypt remote content before assert. """ - db = self.request_state._create_database('test') - remote_target = self.getSyncTarget('test') + db = self.db2 + remote_target = self.getSyncTarget() other_docs = [] def receive_doc(doc, gen, trans_id): @@ -247,7 +248,7 @@ class TestSoledadSyncTarget( def blackhole_getstderr(inst): return cStringIO.StringIO() - db = self.request_state._create_database('test') + db = self.db2 _put_doc_if_newer = db._put_doc_if_newer trigger_ids = ['doc-here2'] @@ -333,7 +334,7 @@ class TestSoledadSyncTarget( last_known_trans_id=None, insert_doc_cb=receive_doc, ensure_callback=ensure_cb, defer_decryption=False) self.assertEqual(1, new_gen) - db = self.request_state.open_database('test') + db = self.db2 self.assertEqual(1, len(replica_uid_box)) self.assertEqual(db._replica_uid, replica_uid_box[0]) self.assertGetEncryptedDoc( @@ -358,17 +359,16 @@ class TestSoledadSyncTarget( @defer.inlineCallbacks def test_record_sync_info(self): - db = self.request_state._create_database('test') remote_target = self.getSyncTarget( 'test', source_replica_uid='other-id') yield remote_target.record_sync_info('other-id', 2, 'T-transid') self.assertEqual( - (2, 'T-transid'), db._get_replica_gen_and_trans_id('other-id')) + (2, 'T-transid'), self.db2._get_replica_gen_and_trans_id('other-id')) @defer.inlineCallbacks def test_sync_exchange_receive(self): - db = self.request_state._create_database('test') + db = self.db2 doc = db.create_doc_from_json('{"value": "there"}') remote_target = self.getSyncTarget('test') other_changes = [] @@ -857,13 +857,7 @@ class TestSoledadDbSync( defer_encryption=True, sync_db_key=sync_db_key) self.db1 = SQLCipherDatabase(self.opts) - self.db2 = couch.CouchDatabase.open_database( - urljoin( - 'http://localhost:' + str(self.wrapper.port), - 'test' - ), - create=True, - ensure_ddocs=True) + self.db2 = self.request_state._create_database(replica_uid='test') def tearDown(self): """ @@ -890,7 +884,7 @@ class TestSoledadDbSync( 'uuid': 'user-uuid', 'token': 'auth-token', }} - target_url = self.getURL(target_name) + target_url = self.getURL(self.db2._dbname) # get a u1db syncer crypto = self._soledad._crypto -- cgit v1.2.3 From a5742807539c66577f48d6cfbb411046c17c1978 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:35:45 -0300 Subject: [tests] use addCleanup to ensure db deletion Test case has a addCleanup method, which provides a way to clean resources up and express this need as soon as you create. We are now using it to simplify some logic on database deletion during the test and to make sure that as soon as it gets created a cleanup is there to delete after. --- common/src/leap/soledad/common/tests/test_couch.py | 55 ++-------------------- 1 file changed, 5 insertions(+), 50 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index 08a14d02..52d1edd4 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -82,6 +82,8 @@ def make_couch_database_for_test(test, replica_uid): create=True, replica_uid=replica_uid or 'test', ensure_ddocs=True) + test.addCleanup(test.delete_db, dbname) + return db def copy_couch_database_for_test(test, db): @@ -146,24 +148,6 @@ class CouchTests( scenarios = COUCH_SCENARIOS - def setUp(self): - test_backends.AllDatabaseTests.setUp(self) - # save db info because of test_close - self._url = self.db._url - self._dbname = self.db._dbname - - def tearDown(self): - # if current test is `test_close` we have to use saved objects to - # delete the database because the close() method will have removed the - # references needed to do it using the CouchDatabase. - if self.id().endswith('test_couch.CouchTests.test_close(couch)'): - session = couch.Session() - server = Server(url=self._url, session=session) - del(server[self._dbname]) - else: - self.db.delete_database() - test_backends.AllDatabaseTests.tearDown(self) - class CouchDatabaseTests( TestWithScenarios, @@ -172,10 +156,6 @@ class CouchDatabaseTests( scenarios = COUCH_SCENARIOS - def tearDown(self): - self.db.delete_database() - test_backends.LocalDatabaseTests.tearDown(self) - class CouchValidateGenNTransIdTests( TestWithScenarios, @@ -184,10 +164,6 @@ class CouchValidateGenNTransIdTests( scenarios = COUCH_SCENARIOS - def tearDown(self): - self.db.delete_database() - test_backends.LocalDatabaseValidateGenNTransIdTests.tearDown(self) - class CouchValidateSourceGenTests( TestWithScenarios, @@ -196,10 +172,6 @@ class CouchValidateSourceGenTests( scenarios = COUCH_SCENARIOS - def tearDown(self): - self.db.delete_database() - test_backends.LocalDatabaseValidateSourceGenTests.tearDown(self) - class CouchWithConflictsTests( TestWithScenarios, @@ -208,10 +180,6 @@ class CouchWithConflictsTests( scenarios = COUCH_SCENARIOS - def tearDown(self): - self.db.delete_database() - test_backends.LocalDatabaseWithConflictsTests.tearDown(self) - # Notice: the CouchDB backend does not have indexing capabilities, so we do # not test indexing now. @@ -263,8 +231,6 @@ class CouchDatabaseSyncTargetTests( def setUp(self): CouchDBTestCase.setUp(self) - # from DatabaseBaseTests.setUp - self.db = self.create_database('test') # from TestCaseWithServer.setUp self.server = self.server_thread = self.port = None # other stuff @@ -272,7 +238,6 @@ class CouchDatabaseSyncTargetTests( self.other_changes = [] def tearDown(self): - CouchDBTestCase.tearDown(self) # from TestCaseWithServer.tearDown if self.server is not None: self.server.shutdown() @@ -280,9 +245,8 @@ class CouchDatabaseSyncTargetTests( self.server.server_close() if self.port: self.port.stopListening() - # from DatabaseBaseTests.tearDown - if hasattr(self, 'db') and self.db is not None: - self.db.close() + self.db.close() + CouchDBTestCase.tearDown(self) def receive_doc(self, doc, gen, trans_id): self.other_changes.append( @@ -727,17 +691,8 @@ class CouchDatabaseSyncTests( self.db3, self.db1_copy, self.db2_copy ]: if db is not None: - db.delete_database() + self.delete_db(db._dbname) db.close() - for replica_uid, dbname in [ - ('test1_copy', 'source'), - ('test2_copy', 'target'), - ('test3', 'target') - ]: - db = self.create_database(replica_uid, dbname) - db.delete_database() - # cleanup connections to avoid leaking of file descriptors - db.close() DatabaseBaseTests.tearDown(self) def assertLastExchangeLog(self, db, expected): -- cgit v1.2.3 From a2e0825700c52a63b987286dc8a512fc2dadc7f4 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:40:41 -0300 Subject: [tests] extract db creation with doc ensure method This was a duplicate, but also was getting on the way to improve isolation. With this small refactor it should be cleaner and have unique names. --- common/src/leap/soledad/common/tests/test_couch.py | 28 ++++++++-------------- 1 file changed, 10 insertions(+), 18 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index 52d1edd4..fb92a290 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -1331,10 +1331,13 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): def setUp(self): CouchDBTestCase.setUp(self) + + def create_db(self, ensure=True): + dbname = ('test-%s' % uuid4().hex) self.db = couch.CouchDatabase.open_database( - urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'), + urljoin('http://127.0.0.1:%d' % self.couch_port, dbname), create=True, - ensure_ddocs=False) # note that we don't enforce ddocs here + ensure_ddocs=ensure) def tearDown(self): self.db.delete_database() @@ -1346,6 +1349,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): Test that all methods that access design documents will raise if the design docs are not present. """ + self.create_db(ensure=False) # _get_generation() self.assertRaises( errors.MissingDesignDocError, @@ -1376,10 +1380,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): Test that all methods that access design documents list functions will raise if the functions are not present. """ - self.db = couch.CouchDatabase.open_database( - urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'), - create=True, - ensure_ddocs=True) + self.create_db(ensure=True) # erase views from _design/transactions transactions = self.db._database['_design/transactions'] transactions['lists'] = {} @@ -1406,10 +1407,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): Test that all methods that access design documents list functions will raise if the functions are not present. """ - self.db = couch.CouchDatabase.open_database( - urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'), - create=True, - ensure_ddocs=True) + self.create_db(ensure=True) # erase views from _design/transactions transactions = self.db._database['_design/transactions'] del transactions['lists'] @@ -1436,10 +1434,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): Test that all methods that access design documents' named views will raise if the views are not present. """ - self.db = couch.CouchDatabase.open_database( - urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'), - create=True, - ensure_ddocs=True) + self.create_db(ensure=True) # erase views from _design/docs docs = self.db._database['_design/docs'] del docs['views'] @@ -1478,10 +1473,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): Test that all methods that access design documents will raise if the design docs are not present. """ - self.db = couch.CouchDatabase.open_database( - urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'), - create=True, - ensure_ddocs=True) + self.create_db(ensure=True) # delete _design/docs del self.db._database['_design/docs'] # delete _design/syncs -- cgit v1.2.3 From f578b9b931858f3d95588f1a982fc77275853cda Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:42:57 -0300 Subject: [tests] isolate LockResource tests using a mock 'shared' has to be used as a DB name just because of a constant, but it is used on only one point. This changes mock this point to have unique names for better tests isolation. 'tokens' was removed as unnecessary. --- common/src/leap/soledad/common/tests/test_server.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_server.py b/common/src/leap/soledad/common/tests/test_server.py index 0667459e..f512d6c1 100644 --- a/common/src/leap/soledad/common/tests/test_server.py +++ b/common/src/leap/soledad/common/tests/test_server.py @@ -496,23 +496,15 @@ class LockResourceTestCase( self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) # create the databases - CouchDatabase.open_database( - urljoin(self.couch_url, 'shared'), - create=True, - ensure_ddocs=True) - CouchDatabase.open_database( - urljoin(self.couch_url, 'tokens'), + db = CouchDatabase.open_database( + urljoin(self.couch_url, ('shared-%s' % (uuid4().hex))), create=True, ensure_ddocs=True) + self.addCleanup(db.delete_database) self._state = CouchServerState(self.couch_url) + self._state.open_database = mock.Mock(return_value=db) def tearDown(self): - # delete remote database - db = CouchDatabase.open_database( - urljoin(self.couch_url, 'shared'), - create=True, - ensure_ddocs=True) - db.delete_database() CouchDBTestCase.tearDown(self) TestCaseWithServer.tearDown(self) -- cgit v1.2.3 From 83a59cb5c29695f321354d2d0f1c0097ff34693d Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:45:12 -0300 Subject: [tests] simplify make_app and getTarget code It was hardcoded for 'test', but the database name is now random. What is useful for test code is the replica_uid, database name for the SyncTarget is now coming from database name. --- .../soledad/common/tests/test_sqlcipher_sync.py | 31 ++++++++-------------- common/src/leap/soledad/common/tests/test_sync.py | 13 +++------ .../soledad/common/tests/test_sync_deferred.py | 14 ++++------ .../leap/soledad/common/tests/test_sync_target.py | 17 ++++-------- 4 files changed, 25 insertions(+), 50 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py index ead4ee5f..343b915c 100644 --- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py +++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py @@ -731,28 +731,23 @@ class SQLCipherDatabaseSyncTests( errors.InvalidTransactionId, self.sync, self.db1, self.db2_copy) -def _make_local_db_and_token_http_target(test, path='test'): +def make_local_db_and_soledad_target( + test, path='test', + source_replica_uid=uuid4().hex): test.startTwistedServer() - # ensure remote db exists before syncing - db = couch.CouchDatabase.open_database( - urljoin(test.couch_url, 'test'), - create=True, - replica_uid='test', - ensure_ddocs=True) - - replica_uid = test._soledad._dbpool.replica_uid + db = test.request_state._create_database(replica_uid=os.path.basename(path)) sync_db = test._soledad._sync_db sync_enc_pool = test._soledad._sync_enc_pool st = soledad_sync_target( - test, path, - source_replica_uid=replica_uid, + test, db._dbname, + source_replica_uid=source_replica_uid, sync_db=sync_db, sync_enc_pool=sync_enc_pool) return db, st target_scenarios = [ ('leap', { - 'create_db_and_target': _make_local_db_and_token_http_target, + 'create_db_and_target': make_local_db_and_soledad_target, 'make_app_with_state': make_soledad_app, 'do_sync': sync_via_synchronizer_and_soledad}), ] @@ -761,8 +756,8 @@ target_scenarios = [ class SQLCipherSyncTargetTests( TestWithScenarios, tests.DatabaseBaseTests, - tests.TestCaseWithServer, - SoledadWithCouchServerMixin): + SoledadWithCouchServerMixin, + tests.TestCaseWithServer): # TODO: implement _set_trace_hook(_shallow) in SoledadHTTPSyncTarget so # skipped tests can be succesfully executed. @@ -773,13 +768,13 @@ class SQLCipherSyncTargetTests( whitebox = False def setUp(self): - super(tests.DatabaseBaseTests, self).setUp() + super(SQLCipherSyncTargetTests, self).setUp() self.db, self.st = self.create_db_and_target(self) self.addCleanup(self.st.close) self.other_changes = [] def tearDown(self): - super(tests.DatabaseBaseTests, self).tearDown() + super(SQLCipherSyncTargetTests, self).setUp() def assertLastExchangeLog(self, db, expected): log = getattr(db, '_last_exchange_log', None) @@ -791,10 +786,6 @@ class SQLCipherSyncTargetTests( self.other_changes.append( (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id)) - def make_app(self): - self.request_state = couch.CouchServerState(self.couch_url) - return self.make_app_with_state(self.request_state) - def set_trace_hook(self, callback, shallow=False): setter = (self.st._set_trace_hook if not shallow else self.st._set_trace_hook_shallow) diff --git a/common/src/leap/soledad/common/tests/test_sync.py b/common/src/leap/soledad/common/tests/test_sync.py index 04e8b163..1041367b 100644 --- a/common/src/leap/soledad/common/tests/test_sync.py +++ b/common/src/leap/soledad/common/tests/test_sync.py @@ -147,8 +147,8 @@ class InterruptableSyncTestCase( class TestSoledadDbSync( TestWithScenarios, - tests.TestCaseWithServer, - SoledadWithCouchServerMixin): + SoledadWithCouchServerMixin, + tests.TestCaseWithServer): """ Test db.sync remote sync shortcut @@ -165,10 +165,6 @@ class TestSoledadDbSync( oauth = False token = False - def make_app(self): - self.request_state = couch.CouchServerState(self.couch_url) - return self.make_app_with_state(self.request_state) - def setUp(self): """ Need to explicitely invoke inicialization on all bases. @@ -182,11 +178,10 @@ class TestSoledadDbSync( """ Need to explicitely invoke destruction on all bases. """ - self.db2.delete_database() SoledadWithCouchServerMixin.tearDown(self) # tests.TestCaseWithServer.tearDown(self) - def do_sync(self, target_name): + def do_sync(self): """ Perform sync using SoledadSynchronizer, SoledadSyncTarget and Token auth. @@ -210,7 +205,7 @@ class TestSoledadDbSync( doc1 = self.db.create_doc_from_json(tests.simple_doc) doc2 = self.db2.create_doc_from_json(tests.nested_doc) - local_gen_before_sync = yield self.do_sync('test') + local_gen_before_sync = yield self.do_sync() gen, _, changes = self.db.whats_changed(local_gen_before_sync) self.assertEqual(1, len(changes)) self.assertEqual(doc2.doc_id, changes[0][0]) diff --git a/common/src/leap/soledad/common/tests/test_sync_deferred.py b/common/src/leap/soledad/common/tests/test_sync_deferred.py index a07be66f..90b00670 100644 --- a/common/src/leap/soledad/common/tests/test_sync_deferred.py +++ b/common/src/leap/soledad/common/tests/test_sync_deferred.py @@ -59,6 +59,7 @@ class BaseSoledadDeferredEncTest(SoledadWithCouchServerMixin): def setUp(self): SoledadWithCouchServerMixin.setUp(self) + self.startTwistedServer() # config info self.db1_file = os.path.join(self.tempdir, "db1.u1db") os.unlink(self.db1_file) @@ -103,8 +104,8 @@ class SyncTimeoutError(Exception): class TestSoledadDbSyncDeferredEncDecr( TestWithScenarios, - tests.TestCaseWithServer, - BaseSoledadDeferredEncTest): + BaseSoledadDeferredEncTest, + tests.TestCaseWithServer): """ Test db.sync remote sync shortcut. @@ -122,17 +123,12 @@ class TestSoledadDbSyncDeferredEncDecr( oauth = False token = True - def make_app(self): - self.request_state = couch.CouchServerState(self.couch_url) - return self.make_app_with_state(self.request_state) - def setUp(self): """ Need to explicitely invoke inicialization on all bases. """ BaseSoledadDeferredEncTest.setUp(self) self.server = self.server_thread = None - self.startTwistedServer() self.syncer = None def tearDown(self): @@ -144,7 +140,7 @@ class TestSoledadDbSyncDeferredEncDecr( dbsyncer.close() BaseSoledadDeferredEncTest.tearDown(self) - def do_sync(self, target_name): + def do_sync(self): """ Perform sync using SoledadSynchronizer, SoledadSyncTarget and Token auth. @@ -184,7 +180,7 @@ class TestSoledadDbSyncDeferredEncDecr( """ doc1 = self.db1.create_doc_from_json(tests.simple_doc) doc2 = self.db2.create_doc_from_json(tests.nested_doc) - local_gen_before_sync = yield self.do_sync('test') + local_gen_before_sync = yield self.do_sync() gen, _, changes = self.db1.whats_changed(local_gen_before_sync) self.assertEqual(1, len(changes)) diff --git a/common/src/leap/soledad/common/tests/test_sync_target.py b/common/src/leap/soledad/common/tests/test_sync_target.py index da4ff034..ba556ea4 100644 --- a/common/src/leap/soledad/common/tests/test_sync_target.py +++ b/common/src/leap/soledad/common/tests/test_sync_target.py @@ -268,7 +268,6 @@ class TestSoledadSyncTarget( self.patch( IndexedCouchDatabase, '_put_doc_if_newer', bomb_put_doc_if_newer) remote_target = self.getSyncTarget( - 'test', source_replica_uid='replica') other_changes = [] @@ -318,7 +317,7 @@ class TestSoledadSyncTarget( This test was adapted to decrypt remote content before assert. """ - remote_target = self.getSyncTarget('test') + remote_target = self.getSyncTarget() other_docs = [] replica_uid_box = [] @@ -347,10 +346,9 @@ class TestSoledadSyncTarget( @defer.inlineCallbacks def test_get_sync_info(self): - db = self.request_state._create_database('test') + db = self.db2 db._set_replica_gen_and_trans_id('other-id', 1, 'T-transid') remote_target = self.getSyncTarget( - 'test', source_replica_uid='other-id') sync_info = yield remote_target.get_sync_info('other-id') self.assertEqual( @@ -360,7 +358,6 @@ class TestSoledadSyncTarget( @defer.inlineCallbacks def test_record_sync_info(self): remote_target = self.getSyncTarget( - 'test', source_replica_uid='other-id') yield remote_target.record_sync_info('other-id', 2, 'T-transid') self.assertEqual( @@ -370,7 +367,7 @@ class TestSoledadSyncTarget( def test_sync_exchange_receive(self): db = self.db2 doc = db.create_doc_from_json('{"value": "there"}') - remote_target = self.getSyncTarget('test') + remote_target = self.getSyncTarget() other_changes = [] def receive_doc(doc, gen, trans_id): @@ -423,10 +420,10 @@ class SoledadDatabaseSyncTargetTests( self.db, self.st = make_local_db_and_soledad_target(self) def tearDown(self): - tests.TestCaseWithServer.tearDown(self) - SoledadWithCouchServerMixin.tearDown(self) self.db.close() self.st.close() + tests.TestCaseWithServer.tearDown(self) + SoledadWithCouchServerMixin.tearDown(self) def set_trace_hook(self, callback, shallow=False): setter = (self.st._set_trace_hook if not shallow else @@ -818,10 +815,6 @@ class TestSoledadDbSync( oauth = False token = False - def make_app(self): - self.request_state = couch.CouchServerState(self.couch_url) - return self.make_app_with_state(self.request_state) - def setUp(self): """ Need to explicitely invoke inicialization on all bases. -- cgit v1.2.3 From 46af1bd22a1c0dbe506614a09af003d595b37ead Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 11 Sep 2015 19:49:04 -0300 Subject: [tests] db3 is expected to be an attribute self.db3 is closed on tearDown. This test was creating it as a local variable, making close possibly fail. --- common/src/leap/soledad/common/tests/test_couch.py | 81 +++++++++++----------- 1 file changed, 39 insertions(+), 42 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index fb92a290..28f90e5d 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -1161,7 +1161,7 @@ class CouchDatabaseSyncTests( self.db1 = self.create_database('test1', 'both') self.db2 = self.create_database('test2', 'both') doc1 = self.db1.create_doc_from_json('{"a": 1}', doc_id='the-doc') - db3 = self.create_database('test3', 'both') + self.db3 = self.create_database('test3', 'both') self.sync(self.db2, self.db1) self.assertEqual( self.db1._get_generation_info(), @@ -1169,20 +1169,20 @@ class CouchDatabaseSyncTests( self.assertEqual( self.db2._get_generation_info(), self.db1._get_replica_gen_and_trans_id(self.db2._replica_uid)) - self.sync(db3, self.db1) + self.sync(self.db3, self.db1) # update on 2 doc2 = self.make_document('the-doc', doc1.rev, '{"a": 2}') self.db2.put_doc(doc2) - self.sync(self.db2, db3) - self.assertEqual(db3.get_doc('the-doc').rev, doc2.rev) + self.sync(self.db2, self.db3) + self.assertEqual(self.db3.get_doc('the-doc').rev, doc2.rev) # update on 1 doc1.set_json('{"a": 3}') self.db1.put_doc(doc1) # conflicts self.sync(self.db2, self.db1) - self.sync(db3, self.db1) + self.sync(self.db3, self.db1) self.assertTrue(self.db2.get_doc('the-doc').has_conflicts) - self.assertTrue(db3.get_doc('the-doc').has_conflicts) + self.assertTrue(self.db3.get_doc('the-doc').has_conflicts) # resolve conflicts = self.db2.get_doc_conflicts('the-doc') doc4 = self.make_document('the-doc', None, '{"a": 4}') @@ -1191,38 +1191,38 @@ class CouchDatabaseSyncTests( doc2 = self.db2.get_doc('the-doc') self.assertEqual(doc4.get_json(), doc2.get_json()) self.assertFalse(doc2.has_conflicts) - self.sync(self.db2, db3) - doc3 = db3.get_doc('the-doc') + self.sync(self.db2, self.db3) + doc3 = self.db3.get_doc('the-doc') self.assertEqual(doc4.get_json(), doc3.get_json()) self.assertFalse(doc3.has_conflicts) def test_sync_supersedes_conflicts(self): self.db1 = self.create_database('test1', 'both') self.db2 = self.create_database('test2', 'target') - db3 = self.create_database('test3', 'both') + self.db3 = self.create_database('test3', 'both') doc1 = self.db1.create_doc_from_json('{"a": 1}', doc_id='the-doc') self.db2.create_doc_from_json('{"b": 1}', doc_id='the-doc') - db3.create_doc_from_json('{"c": 1}', doc_id='the-doc') - self.sync(db3, self.db1) + self.db3.create_doc_from_json('{"c": 1}', doc_id='the-doc') + self.sync(self.db3, self.db1) self.assertEqual( self.db1._get_generation_info(), - db3._get_replica_gen_and_trans_id(self.db1._replica_uid)) + self.db3._get_replica_gen_and_trans_id(self.db1._replica_uid)) self.assertEqual( - db3._get_generation_info(), - self.db1._get_replica_gen_and_trans_id(db3._replica_uid)) - self.sync(db3, self.db2) + self.db3._get_generation_info(), + self.db1._get_replica_gen_and_trans_id(self.db3._replica_uid)) + self.sync(self.db3, self.db2) self.assertEqual( self.db2._get_generation_info(), - db3._get_replica_gen_and_trans_id(self.db2._replica_uid)) + self.db3._get_replica_gen_and_trans_id(self.db2._replica_uid)) self.assertEqual( - db3._get_generation_info(), - self.db2._get_replica_gen_and_trans_id(db3._replica_uid)) - self.assertEqual(3, len(db3.get_doc_conflicts('the-doc'))) + self.db3._get_generation_info(), + self.db2._get_replica_gen_and_trans_id(self.db3._replica_uid)) + self.assertEqual(3, len(self.db3.get_doc_conflicts('the-doc'))) doc1.set_json('{"a": 2}') self.db1.put_doc(doc1) - self.sync(db3, self.db1) + self.sync(self.db3, self.db1) # original doc1 should have been removed from conflicts - self.assertEqual(3, len(db3.get_doc_conflicts('the-doc'))) + self.assertEqual(3, len(self.db3.get_doc_conflicts('the-doc'))) def test_sync_stops_after_get_sync_info(self): self.db1 = self.create_database('test1', 'source') @@ -1241,79 +1241,76 @@ class CouchDatabaseSyncTests( self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1') self.assertRaises( u1db_errors.InvalidReplicaUID, self.sync, self.db1, self.db2) - # remove the reference to db2 to avoid double deleting on tearDown - self.db2.close() - self.db2 = None def test_sync_detects_rollback_in_source(self): self.db1 = self.create_database('test1', 'source') self.db2 = self.create_database('test2', 'target') self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1') self.sync(self.db1, self.db2) - db1_copy = self.copy_database(self.db1) + self.db1_copy = self.copy_database(self.db1) self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc2') self.sync(self.db1, self.db2) self.assertRaises( - u1db_errors.InvalidGeneration, self.sync, db1_copy, self.db2) + u1db_errors.InvalidGeneration, self.sync, self.db1_copy, self.db2) def test_sync_detects_rollback_in_target(self): self.db1 = self.create_database('test1', 'source') self.db2 = self.create_database('test2', 'target') self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent") self.sync(self.db1, self.db2) - db2_copy = self.copy_database(self.db2) + self.db2_copy = self.copy_database(self.db2) self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc2') self.sync(self.db1, self.db2) self.assertRaises( - u1db_errors.InvalidGeneration, self.sync, self.db1, db2_copy) + u1db_errors.InvalidGeneration, self.sync, self.db1, self.db2_copy) def test_sync_detects_diverged_source(self): self.db1 = self.create_database('test1', 'source') self.db2 = self.create_database('test2', 'target') - db3 = self.copy_database(self.db1) + self.db3 = self.copy_database(self.db1) self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent") - db3.create_doc_from_json(tests.simple_doc, doc_id="divergent") + self.db3.create_doc_from_json(tests.simple_doc, doc_id="divergent") self.sync(self.db1, self.db2) self.assertRaises( - u1db_errors.InvalidTransactionId, self.sync, db3, self.db2) + u1db_errors.InvalidTransactionId, self.sync, self.db3, self.db2) def test_sync_detects_diverged_target(self): self.db1 = self.create_database('test1', 'source') self.db2 = self.create_database('test2', 'target') - db3 = self.copy_database(self.db2) - db3.create_doc_from_json(tests.nested_doc, doc_id="divergent") + self.db3 = self.copy_database(self.db2) + self.db3.create_doc_from_json(tests.nested_doc, doc_id="divergent") self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent") self.sync(self.db1, self.db2) self.assertRaises( - u1db_errors.InvalidTransactionId, self.sync, self.db1, db3) + u1db_errors.InvalidTransactionId, self.sync, self.db1, self.db3) def test_sync_detects_rollback_and_divergence_in_source(self): self.db1 = self.create_database('test1', 'source') self.db2 = self.create_database('test2', 'target') self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1') self.sync(self.db1, self.db2) - db1_copy = self.copy_database(self.db1) + self.db1_copy = self.copy_database(self.db1) self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc2') self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc3') self.sync(self.db1, self.db2) - db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2') - db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3') + self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2') + self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3') self.assertRaises( - u1db_errors.InvalidTransactionId, self.sync, db1_copy, self.db2) + u1db_errors.InvalidTransactionId, self.sync, self.db1_copy, self.db2) def test_sync_detects_rollback_and_divergence_in_target(self): self.db1 = self.create_database('test1', 'source') self.db2 = self.create_database('test2', 'target') self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent") self.sync(self.db1, self.db2) - db2_copy = self.copy_database(self.db2) + self.db2_copy = self.copy_database(self.db2) self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc2') self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc3') self.sync(self.db1, self.db2) - db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2') - db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3') + self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2') + self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3') self.assertRaises( - u1db_errors.InvalidTransactionId, self.sync, self.db1, db2_copy) + u1db_errors.InvalidTransactionId, self.sync, self.db1, self.db2_copy) def test_optional_sync_preserve_json(self): self.db1 = self.create_database('test1', 'source') -- cgit v1.2.3 From 66fab1ba7ff4f73af512fea2fd80aef53cbdd2c6 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sat, 12 Sep 2015 22:15:40 -0300 Subject: [tests] subclass instead of copy test code This test only defines a set of different scenarios, all other methods are the same as this subclass. --- .../soledad/common/tests/test_sqlcipher_sync.py | 368 +-------------------- 1 file changed, 2 insertions(+), 366 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py index 343b915c..15148f3f 100644 --- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py +++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py @@ -41,6 +41,7 @@ from leap.soledad.client.sqlcipher import SQLCipherDatabase from leap.soledad.common.tests import u1db_tests as tests from leap.soledad.common.tests.test_sqlcipher import SQLCIPHER_SCENARIOS from leap.soledad.common.tests.util import make_soledad_app +from leap.soledad.common.tests.test_sync_target import SoledadDatabaseSyncTargetTests from leap.soledad.common.tests.util import soledad_sync_target from leap.soledad.common.tests.util import BaseSoledadTest from leap.soledad.common.tests.util import SoledadWithCouchServerMixin @@ -753,11 +754,7 @@ target_scenarios = [ ] -class SQLCipherSyncTargetTests( - TestWithScenarios, - tests.DatabaseBaseTests, - SoledadWithCouchServerMixin, - tests.TestCaseWithServer): +class SQLCipherSyncTargetTests(SoledadDatabaseSyncTargetTests): # TODO: implement _set_trace_hook(_shallow) in SoledadHTTPSyncTarget so # skipped tests can be succesfully executed. @@ -766,364 +763,3 @@ class SQLCipherSyncTargetTests( target_scenarios)) whitebox = False - - def setUp(self): - super(SQLCipherSyncTargetTests, self).setUp() - self.db, self.st = self.create_db_and_target(self) - self.addCleanup(self.st.close) - self.other_changes = [] - - def tearDown(self): - super(SQLCipherSyncTargetTests, self).setUp() - - def assertLastExchangeLog(self, db, expected): - log = getattr(db, '_last_exchange_log', None) - if log is None: - return - self.assertEqual(expected, log) - - def receive_doc(self, doc, gen, trans_id): - self.other_changes.append( - (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id)) - - def set_trace_hook(self, callback, shallow=False): - setter = (self.st._set_trace_hook if not shallow else - self.st._set_trace_hook_shallow) - try: - setter(callback) - except NotImplementedError: - self.skipTest("%s does not implement _set_trace_hook" - % (self.st.__class__.__name__,)) - - def test_get_sync_target(self): - self.assertIsNot(None, self.st) - - @defer.inlineCallbacks - def test_get_sync_info(self): - sync_info = yield self.st.get_sync_info('other') - self.assertEqual( - ('test', 0, '', 0, ''), sync_info) - - @defer.inlineCallbacks - def test_create_doc_updates_sync_info(self): - sync_info = yield self.st.get_sync_info('other') - self.assertEqual( - ('test', 0, '', 0, ''), sync_info) - self.db.create_doc_from_json(tests.simple_doc) - sync_info = yield self.st.get_sync_info('other') - self.assertEqual(1, sync_info[1]) - - @defer.inlineCallbacks - def test_record_sync_info(self): - yield self.st.record_sync_info('replica', 10, 'T-transid') - sync_info = yield self.st.get_sync_info('other') - self.assertEqual( - ('test', 0, '', 10, 'T-transid'), sync_info) - - @defer.inlineCallbacks - def test_sync_exchange(self): - """ - Modified to account for possibly receiving encrypted documents from - sever-side. - """ - - docs_by_gen = [ - (self.make_document('doc-id', 'replica:1', tests.simple_doc), 10, - 'T-sid')] - new_gen, trans_id = yield self.st.sync_exchange( - docs_by_gen, 'replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertGetEncryptedDoc( - self.db, 'doc-id', 'replica:1', tests.simple_doc, False) - self.assertTransactionLog(['doc-id'], self.db) - last_trans_id = self.getLastTransId(self.db) - self.assertEqual(([], 1, last_trans_id), - (self.other_changes, new_gen, last_trans_id)) - sync_info = yield self.st.get_sync_info('replica') - self.assertEqual(10, sync_info[3]) - - @defer.inlineCallbacks - def test_sync_exchange_push_many(self): - """ - Modified to account for possibly receiving encrypted documents from - sever-side. - """ - docs_by_gen = [ - (self.make_document( - 'doc-id', 'replica:1', tests.simple_doc), 10, 'T-1'), - (self.make_document('doc-id2', 'replica:1', tests.nested_doc), 11, - 'T-2')] - new_gen, trans_id = yield self.st.sync_exchange( - docs_by_gen, 'replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertGetEncryptedDoc( - self.db, 'doc-id', 'replica:1', tests.simple_doc, False) - self.assertGetEncryptedDoc( - self.db, 'doc-id2', 'replica:1', tests.nested_doc, False) - self.assertTransactionLog(['doc-id', 'doc-id2'], self.db) - last_trans_id = self.getLastTransId(self.db) - self.assertEqual(([], 2, last_trans_id), - (self.other_changes, new_gen, trans_id)) - sync_info = yield self.st.get_sync_info('replica') - self.assertEqual(11, sync_info[3]) - - @defer.inlineCallbacks - def test_sync_exchange_returns_many_new_docs(self): - """ - Modified to account for JSON serialization differences. - """ - doc = self.db.create_doc_from_json(tests.simple_doc) - doc2 = self.db.create_doc_from_json(tests.nested_doc) - self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db) - new_gen, _ = yield self.st.sync_exchange( - [], 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db) - self.assertEqual(2, new_gen) - self.assertEqual( - [(doc.doc_id, doc.rev, 1), - (doc2.doc_id, doc2.rev, 2)], - [c[:2] + c[3:4] for c in self.other_changes]) - self.assertEqual( - json.dumps(tests.simple_doc), - json.dumps(self.other_changes[0][2])) - self.assertEqual( - json.loads(tests.nested_doc), - json.loads(self.other_changes[1][2])) - if self.whitebox: - self.assertEqual( - self.db._last_exchange_log['return'], - {'last_gen': 2, 'docs': - [(doc.doc_id, doc.rev), (doc2.doc_id, doc2.rev)]}) - - @defer.inlineCallbacks - def test_sync_exchange_deleted(self): - doc = self.db.create_doc_from_json('{}') - edit_rev = 'replica:1|' + doc.rev - docs_by_gen = [ - (self.make_document(doc.doc_id, edit_rev, None), 10, 'T-sid')] - new_gen, trans_id = yield self.st.sync_exchange( - docs_by_gen, 'replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertGetDocIncludeDeleted( - self.db, doc.doc_id, edit_rev, None, False) - self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db) - last_trans_id = self.getLastTransId(self.db) - self.assertEqual(([], 2, last_trans_id), - (self.other_changes, new_gen, trans_id)) - sync_info = yield self.st.get_sync_info('replica') - self.assertEqual(10, sync_info[3]) - - @defer.inlineCallbacks - def test_sync_exchange_refuses_conflicts(self): - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - new_doc = '{"key": "altval"}' - docs_by_gen = [ - (self.make_document(doc.doc_id, 'replica:1', new_doc), 10, - 'T-sid')] - new_gen, _ = yield self.st.sync_exchange( - docs_by_gen, 'replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertTransactionLog([doc.doc_id], self.db) - self.assertEqual( - (doc.doc_id, doc.rev, tests.simple_doc, 1), - self.other_changes[0][:-1]) - self.assertEqual(1, new_gen) - if self.whitebox: - self.assertEqual(self.db._last_exchange_log['return'], - {'last_gen': 1, 'docs': [(doc.doc_id, doc.rev)]}) - - @defer.inlineCallbacks - def test_sync_exchange_ignores_convergence(self): - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - gen, txid = self.db._get_generation_info() - docs_by_gen = [ - (self.make_document( - doc.doc_id, doc.rev, tests.simple_doc), 10, 'T-sid')] - new_gen, _ = yield self.st.sync_exchange( - docs_by_gen, 'replica', last_known_generation=gen, - last_known_trans_id=txid, insert_doc_cb=self.receive_doc) - self.assertTransactionLog([doc.doc_id], self.db) - self.assertEqual(([], 1), (self.other_changes, new_gen)) - - @defer.inlineCallbacks - def test_sync_exchange_returns_new_docs(self): - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - new_gen, _ = yield self.st.sync_exchange( - [], 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertTransactionLog([doc.doc_id], self.db) - self.assertEqual( - (doc.doc_id, doc.rev, tests.simple_doc, 1), - self.other_changes[0][:-1]) - self.assertEqual(1, new_gen) - if self.whitebox: - self.assertEqual(self.db._last_exchange_log['return'], - {'last_gen': 1, 'docs': [(doc.doc_id, doc.rev)]}) - - @defer.inlineCallbacks - def test_sync_exchange_returns_deleted_docs(self): - doc = self.db.create_doc_from_json(tests.simple_doc) - self.db.delete_doc(doc) - self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db) - new_gen, _ = yield self.st.sync_exchange( - [], 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db) - self.assertEqual( - (doc.doc_id, doc.rev, None, 2), self.other_changes[0][:-1]) - self.assertEqual(2, new_gen) - if self.whitebox: - self.assertEqual(self.db._last_exchange_log['return'], - {'last_gen': 2, 'docs': [(doc.doc_id, doc.rev)]}) - - @defer.inlineCallbacks - def test_sync_exchange_getting_newer_docs(self): - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - new_doc = '{"key": "altval"}' - docs_by_gen = [ - (self.make_document(doc.doc_id, 'test:1|z:2', new_doc), 10, - 'T-sid')] - new_gen, _ = yield self.st.sync_exchange( - docs_by_gen, 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db) - self.assertEqual(([], 2), (self.other_changes, new_gen)) - - @defer.inlineCallbacks - def test_sync_exchange_with_concurrent_updates_of_synced_doc(self): - expected = [] - - def before_whatschanged_cb(state): - if state != 'before whats_changed': - return - cont = '{"key": "cuncurrent"}' - conc_rev = self.db.put_doc( - self.make_document(doc.doc_id, 'test:1|z:2', cont)) - expected.append((doc.doc_id, conc_rev, cont, 3)) - - self.set_trace_hook(before_whatschanged_cb) - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - new_doc = '{"key": "altval"}' - docs_by_gen = [ - (self.make_document(doc.doc_id, 'test:1|z:2', new_doc), 10, - 'T-sid')] - new_gen, _ = yield self.st.sync_exchange( - docs_by_gen, 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertEqual(expected, [c[:-1] for c in self.other_changes]) - self.assertEqual(3, new_gen) - - @defer.inlineCallbacks - def test_sync_exchange_with_concurrent_updates(self): - - def after_whatschanged_cb(state): - if state != 'after whats_changed': - return - self.db.create_doc_from_json('{"new": "doc"}') - - self.set_trace_hook(after_whatschanged_cb) - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - new_doc = '{"key": "altval"}' - docs_by_gen = [ - (self.make_document(doc.doc_id, 'test:1|z:2', new_doc), 10, - 'T-sid')] - new_gen, _ = yield self.st.sync_exchange( - docs_by_gen, 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertEqual(([], 2), (self.other_changes, new_gen)) - - @defer.inlineCallbacks - def test_sync_exchange_converged_handling(self): - doc = self.db.create_doc_from_json(tests.simple_doc) - docs_by_gen = [ - (self.make_document('new', 'other:1', '{}'), 4, 'T-foo'), - (self.make_document(doc.doc_id, doc.rev, doc.get_json()), 5, - 'T-bar')] - new_gen, _ = yield self.st.sync_exchange( - docs_by_gen, 'other-replica', last_known_generation=0, - last_known_trans_id=None, insert_doc_cb=self.receive_doc) - self.assertEqual(([], 2), (self.other_changes, new_gen)) - - @defer.inlineCallbacks - def test_sync_exchange_detect_incomplete_exchange(self): - def before_get_docs_explode(state): - if state != 'before get_docs': - return - raise errors.U1DBError("fail") - self.set_trace_hook(before_get_docs_explode) - # suppress traceback printing in the wsgiref server - # self.patch(simple_server.ServerHandler, - # 'log_exception', lambda h, exc_info: None) - doc = self.db.create_doc_from_json(tests.simple_doc) - self.assertTransactionLog([doc.doc_id], self.db) - with self.assertRaises((errors.U1DBError, errors.BrokenSyncStream)): - yield self.st.sync_exchange( - [], 'other-replica', - last_known_generation=0, last_known_trans_id=None, - insert_doc_cb=self.receive_doc) - - @defer.inlineCallbacks - def test_sync_exchange_doc_ids(self): - sync_exchange_doc_ids = getattr(self.st, 'sync_exchange_doc_ids', None) - if sync_exchange_doc_ids is None: - self.skipTest("sync_exchange_doc_ids not implemented") - db2 = self.create_database('test2') - doc = db2.create_doc_from_json(tests.simple_doc) - new_gen, trans_id = sync_exchange_doc_ids( - db2, [(doc.doc_id, 10, 'T-sid')], 0, None, - insert_doc_cb=self.receive_doc) - self.assertGetDoc(self.db, doc.doc_id, doc.rev, - tests.simple_doc, False) - self.assertTransactionLog([doc.doc_id], self.db) - last_trans_id = self.getLastTransId(self.db) - self.assertEqual(([], 1, last_trans_id), - (self.other_changes, new_gen, trans_id)) - self.assertEqual(10, self.st.get_sync_info(db2._replica_uid)[3]) - - @defer.inlineCallbacks - def test__set_trace_hook(self): - called = [] - - def cb(state): - called.append(state) - - self.set_trace_hook(cb) - yield self.st.sync_exchange([], 'replica', 0, None, self.receive_doc) - yield self.st.record_sync_info('replica', 0, 'T-sid') - self.assertEqual(['before whats_changed', - 'after whats_changed', - 'before get_docs', - 'record_sync_info', - ], - called) - - @defer.inlineCallbacks - def test__set_trace_hook_shallow(self): - if (self.st._set_trace_hook_shallow == self.st._set_trace_hook or - self.st._set_trace_hook_shallow.im_func == - SoledadHTTPSyncTarget._set_trace_hook_shallow.im_func): - # shallow same as full - expected = ['before whats_changed', - 'after whats_changed', - 'before get_docs', - 'record_sync_info', - ] - else: - expected = ['sync_exchange', 'record_sync_info'] - - called = [] - - def cb(state): - called.append(state) - - self.set_trace_hook(cb, shallow=True) - self.st.sync_exchange([], 'replica', 0, None, self.receive_doc) - self.st.record_sync_info('replica', 0, 'T-sid') - self.assertEqual(expected, called) -- cgit v1.2.3 From 197ad8b7c6a5d180b798f4dfcbe7f6d5fdd18298 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sun, 13 Sep 2015 02:41:00 -0300 Subject: [tests] change big tearDown for simple addCleanup Big tearDown logic can be replaced by a simple addCleanup. Also remove unused imports and fix a small typo on a database cleanup check. --- .../soledad/common/tests/test_sqlcipher_sync.py | 25 +--------------------- common/src/leap/soledad/common/tests/util.py | 2 +- 2 files changed, 2 insertions(+), 25 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py index 15148f3f..2e703023 100644 --- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py +++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py @@ -19,7 +19,6 @@ Test sqlcipher backend sync. """ -import json import os from u1db import sync @@ -28,15 +27,10 @@ from u1db import errors from uuid import uuid4 from testscenarios import TestWithScenarios -from urlparse import urljoin -from twisted.internet import defer - -from leap.soledad.common import couch from leap.soledad.common.crypto import ENC_SCHEME_KEY from leap.soledad.client.http_target import SoledadHTTPSyncTarget from leap.soledad.client.crypto import decrypt_doc_dict -from leap.soledad.client.sqlcipher import SQLCipherDatabase from leap.soledad.common.tests import u1db_tests as tests from leap.soledad.common.tests.test_sqlcipher import SQLCIPHER_SCENARIOS @@ -44,7 +38,6 @@ from leap.soledad.common.tests.util import make_soledad_app from leap.soledad.common.tests.test_sync_target import SoledadDatabaseSyncTargetTests from leap.soledad.common.tests.util import soledad_sync_target from leap.soledad.common.tests.util import BaseSoledadTest -from leap.soledad.common.tests.util import SoledadWithCouchServerMixin # ----------------------------------------------------------------------------- @@ -100,23 +93,6 @@ class SQLCipherDatabaseSyncTests( self._use_tracking = {} super(tests.DatabaseBaseTests, self).setUp() - def tearDown(self): - super(tests.DatabaseBaseTests, self).tearDown() - if hasattr(self, 'db1') and isinstance(self.db1, SQLCipherDatabase): - self.db1.close() - if hasattr(self, 'db1_copy') \ - and isinstance(self.db1_copy, SQLCipherDatabase): - self.db1_copy.close() - if hasattr(self, 'db2') \ - and isinstance(self.db2, SQLCipherDatabase): - self.db2.close() - if hasattr(self, 'db2_copy') \ - and isinstance(self.db2_copy, SQLCipherDatabase): - self.db2_copy.close() - if hasattr(self, 'db3') \ - and isinstance(self.db3, SQLCipherDatabase): - self.db3.close() - def create_database(self, replica_uid, sync_role=None): if replica_uid == 'test' and sync_role is None: # created up the chain by base class but unused @@ -124,6 +100,7 @@ class SQLCipherDatabaseSyncTests( db = self.create_database_for_role(replica_uid, sync_role) if sync_role: self._use_tracking[db] = (replica_uid, sync_role) + self.addCleanup(db.close) return db def create_database_for_role(self, replica_uid, sync_role): diff --git a/common/src/leap/soledad/common/tests/util.py b/common/src/leap/soledad/common/tests/util.py index 3187472f..617cccef 100644 --- a/common/src/leap/soledad/common/tests/util.py +++ b/common/src/leap/soledad/common/tests/util.py @@ -356,7 +356,7 @@ class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest): """ current_dbs = set([db for db in self.couch_server]) remaining_dbs = current_dbs - self.previous_dbs - if remaining_dbs and False: + if remaining_dbs: raise Exception("tests created %s and didn't clean up!", remaining_dbs) def delete_db(self, name): -- cgit v1.2.3 From 3f5003afee45c02bb9668c150506447cf0d2f52c Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sun, 13 Sep 2015 03:27:12 -0300 Subject: [tests] test_couch does not need a server Removing unused code as this test case does not need a server. --- common/src/leap/soledad/common/tests/test_couch.py | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index 28f90e5d..ae7933c3 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -43,7 +43,6 @@ from leap.soledad.common.tests.util import sync_via_synchronizer from leap.soledad.common.tests.u1db_tests import test_backends from leap.soledad.common.tests.u1db_tests import DatabaseBaseTests -from leap.soledad.common.tests.u1db_tests import TestCaseWithServer from u1db.backends.inmemory import InMemoryIndex @@ -208,7 +207,6 @@ nested_doc = tests.nested_doc class CouchDatabaseSyncTargetTests( TestWithScenarios, DatabaseBaseTests, - TestCaseWithServer, CouchDBTestCase): # TODO: implement _set_trace_hook(_shallow) in CouchSyncTarget so @@ -231,20 +229,11 @@ class CouchDatabaseSyncTargetTests( def setUp(self): CouchDBTestCase.setUp(self) - # from TestCaseWithServer.setUp - self.server = self.server_thread = self.port = None # other stuff self.db, self.st = self.create_db_and_target(self) self.other_changes = [] def tearDown(self): - # from TestCaseWithServer.tearDown - if self.server is not None: - self.server.shutdown() - self.server_thread.join() - self.server.server_close() - if self.port: - self.port.stopListening() self.db.close() CouchDBTestCase.tearDown(self) -- cgit v1.2.3 From cd4c4f868e1c10e44f825efc0e870edf9fe8e2c1 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 14 Sep 2015 11:21:24 -0300 Subject: [style] pep8 fixes --- common/src/leap/soledad/common/tests/test_couch.py | 6 ++++-- common/src/leap/soledad/common/tests/test_sqlcipher_sync.py | 6 ++++-- common/src/leap/soledad/common/tests/test_sync_target.py | 7 ++++--- common/src/leap/soledad/common/tests/util.py | 3 ++- 4 files changed, 14 insertions(+), 8 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index ae7933c3..845f1602 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -1285,7 +1285,8 @@ class CouchDatabaseSyncTests( self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2') self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3') self.assertRaises( - u1db_errors.InvalidTransactionId, self.sync, self.db1_copy, self.db2) + u1db_errors.InvalidTransactionId, self.sync, + self.db1_copy, self.db2) def test_sync_detects_rollback_and_divergence_in_target(self): self.db1 = self.create_database('test1', 'source') @@ -1299,7 +1300,8 @@ class CouchDatabaseSyncTests( self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2') self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3') self.assertRaises( - u1db_errors.InvalidTransactionId, self.sync, self.db1, self.db2_copy) + u1db_errors.InvalidTransactionId, self.sync, + self.db1, self.db2_copy) def test_optional_sync_preserve_json(self): self.db1 = self.create_database('test1', 'source') diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py index 2e703023..439fc070 100644 --- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py +++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py @@ -35,7 +35,8 @@ from leap.soledad.client.crypto import decrypt_doc_dict from leap.soledad.common.tests import u1db_tests as tests from leap.soledad.common.tests.test_sqlcipher import SQLCIPHER_SCENARIOS from leap.soledad.common.tests.util import make_soledad_app -from leap.soledad.common.tests.test_sync_target import SoledadDatabaseSyncTargetTests +from leap.soledad.common.tests.test_sync_target import \ + SoledadDatabaseSyncTargetTests from leap.soledad.common.tests.util import soledad_sync_target from leap.soledad.common.tests.util import BaseSoledadTest @@ -713,7 +714,8 @@ def make_local_db_and_soledad_target( test, path='test', source_replica_uid=uuid4().hex): test.startTwistedServer() - db = test.request_state._create_database(replica_uid=os.path.basename(path)) + replica_uid = os.path.basename(path) + db = test.request_state._create_database(replica_uid) sync_db = test._soledad._sync_db sync_enc_pool = test._soledad._sync_enc_pool st = soledad_sync_target( diff --git a/common/src/leap/soledad/common/tests/test_sync_target.py b/common/src/leap/soledad/common/tests/test_sync_target.py index ba556ea4..c0987e90 100644 --- a/common/src/leap/soledad/common/tests/test_sync_target.py +++ b/common/src/leap/soledad/common/tests/test_sync_target.py @@ -150,7 +150,8 @@ def make_local_db_and_soledad_target( test, path='test', source_replica_uid=uuid4().hex): test.startTwistedServer() - db = test.request_state._create_database(replica_uid=os.path.basename(path)) + replica_uid = os.path.basename(path) + db = test.request_state._create_database(replica_uid) sync_db = test._soledad._sync_db sync_enc_pool = test._soledad._sync_enc_pool st = soledad_sync_target( @@ -360,8 +361,8 @@ class TestSoledadSyncTarget( remote_target = self.getSyncTarget( source_replica_uid='other-id') yield remote_target.record_sync_info('other-id', 2, 'T-transid') - self.assertEqual( - (2, 'T-transid'), self.db2._get_replica_gen_and_trans_id('other-id')) + self.assertEqual((2, 'T-transid'), + self.db2._get_replica_gen_and_trans_id('other-id')) @defer.inlineCallbacks def test_sync_exchange_receive(self): diff --git a/common/src/leap/soledad/common/tests/util.py b/common/src/leap/soledad/common/tests/util.py index 617cccef..41307eb7 100644 --- a/common/src/leap/soledad/common/tests/util.py +++ b/common/src/leap/soledad/common/tests/util.py @@ -357,7 +357,8 @@ class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest): current_dbs = set([db for db in self.couch_server]) remaining_dbs = current_dbs - self.previous_dbs if remaining_dbs: - raise Exception("tests created %s and didn't clean up!", remaining_dbs) + raise Exception("tests created %s and didn't clean up!", + remaining_dbs) def delete_db(self, name): try: -- cgit v1.2.3 From 223f26eb2df3378ce275da2774a7ae923519afa1 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 14 Sep 2015 17:35:35 -0300 Subject: [tests] remove remaining dbs check This was used during db isolation to make sure that everything created was destroyed, but it fails with -j (multiprocess). Removing it allows parallelism. --- common/src/leap/soledad/common/tests/util.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/util.py b/common/src/leap/soledad/common/tests/util.py index 41307eb7..1c7adb91 100644 --- a/common/src/leap/soledad/common/tests/util.py +++ b/common/src/leap/soledad/common/tests/util.py @@ -345,20 +345,9 @@ class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest): """ Make sure we have a CouchDB instance for a test. """ - server = self.couch_server = couchdb.Server() - self.previous_dbs = set([db for db in server]) self.couch_port = 5984 self.couch_url = 'http://localhost:%d' % self.couch_port - - def tearDown(self): - """ - Stop CouchDB instance for test. - """ - current_dbs = set([db for db in self.couch_server]) - remaining_dbs = current_dbs - self.previous_dbs - if remaining_dbs: - raise Exception("tests created %s and didn't clean up!", - remaining_dbs) + self.couch_server = couchdb.Server(self.couch_url) def delete_db(self, name): try: -- cgit v1.2.3 From 8acc8c9058ff5983a78d6df4f66d19de1762cf4d Mon Sep 17 00:00:00 2001 From: Ruben Pollan Date: Wed, 16 Sep 2015 12:38:52 +0200 Subject: [test] fix tests with the new emit_async --- .../src/leap/soledad/common/tests/test_soledad.py | 60 +++++++++++----------- 1 file changed, 30 insertions(+), 30 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_soledad.py b/common/src/leap/soledad/common/tests/test_soledad.py index bd356858..85d6734e 100644 --- a/common/src/leap/soledad/common/tests/test_soledad.py +++ b/common/src/leap/soledad/common/tests/test_soledad.py @@ -223,7 +223,7 @@ class SoledadSignalingTestCase(BaseSoledadTest): def setUp(self): # mock signaling soledad.client.signal = Mock() - soledad.client.secrets.events.emit = Mock() + soledad.client.secrets.events.emit_async = Mock() # run parent's setUp BaseSoledadTest.setUp(self) @@ -245,57 +245,57 @@ class SoledadSignalingTestCase(BaseSoledadTest): - downloading keys / done downloading keys. - uploading keys / done uploading keys. """ - soledad.client.secrets.events.emit.reset_mock() + soledad.client.secrets.events.emit_async.reset_mock() # get a fresh instance so it emits all bootstrap signals sol = self._soledad_instance( secrets_path='alternative_stage3.json', local_db_path='alternative_stage3.u1db') # reverse call order so we can verify in the order the signals were # expected - soledad.client.secrets.events.emit.mock_calls.reverse() - soledad.client.secrets.events.emit.call_args = \ - soledad.client.secrets.events.emit.call_args_list[0] - soledad.client.secrets.events.emit.call_args_list.reverse() + soledad.client.secrets.events.emit_async.mock_calls.reverse() + soledad.client.secrets.events.emit_async.call_args = \ + soledad.client.secrets.events.emit_async.call_args_list[0] + soledad.client.secrets.events.emit_async.call_args_list.reverse() # downloading keys signals - soledad.client.secrets.events.emit.assert_called_with( + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DOWNLOADING_KEYS, ADDRESS, ) - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DONE_DOWNLOADING_KEYS, ADDRESS, ) # creating keys signals - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_CREATING_KEYS, ADDRESS, ) - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DONE_CREATING_KEYS, ADDRESS, ) # downloading once more (inside _put_keys_in_shared_db) - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DOWNLOADING_KEYS, ADDRESS, ) - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DONE_DOWNLOADING_KEYS, ADDRESS, ) # uploading keys signals - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_UPLOADING_KEYS, ADDRESS, ) - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DONE_UPLOADING_KEYS, ADDRESS, ) @@ -316,7 +316,7 @@ class SoledadSignalingTestCase(BaseSoledadTest): doc.content = sol.secrets._export_recovery_document() sol.close() # reset mock - soledad.client.secrets.events.emit.reset_mock() + soledad.client.secrets.events.emit_async.reset_mock() # get a fresh instance so it emits all bootstrap signals shared_db = self.get_default_shared_mock(get_doc_return_value=doc) sol = self._soledad_instance( @@ -325,17 +325,17 @@ class SoledadSignalingTestCase(BaseSoledadTest): shared_db_class=shared_db) # reverse call order so we can verify in the order the signals were # expected - soledad.client.secrets.events.emit.mock_calls.reverse() - soledad.client.secrets.events.emit.call_args = \ - soledad.client.secrets.events.emit.call_args_list[0] - soledad.client.secrets.events.emit.call_args_list.reverse() + soledad.client.secrets.events.emit_async.mock_calls.reverse() + soledad.client.secrets.events.emit_async.call_args = \ + soledad.client.secrets.events.emit_async.call_args_list[0] + soledad.client.secrets.events.emit_async.call_args_list.reverse() # assert download keys signals - soledad.client.secrets.events.emit.assert_called_with( + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DOWNLOADING_KEYS, ADDRESS, ) - self._pop_mock_call(soledad.client.secrets.events.emit) - soledad.client.secrets.events.emit.assert_called_with( + self._pop_mock_call(soledad.client.secrets.events.emit_async) + soledad.client.secrets.events.emit_async.assert_called_with( catalog.SOLEDAD_DONE_DOWNLOADING_KEYS, ADDRESS, ) @@ -369,7 +369,7 @@ class SoledadSignalingTestCase(BaseSoledadTest): yield sol.sync() # assert the signal has been emitted - soledad.client.events.emit.assert_called_with( + soledad.client.events.emit_async.assert_called_with( catalog.SOLEDAD_DONE_DATA_SYNC, ADDRESS, ) -- cgit v1.2.3 From ca7f6737e03143ae80add83d41860246f5459eae Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Thu, 17 Sep 2015 19:30:15 -0300 Subject: [bug] ensure needs to check into all design docs This code only checks for 'docs' presence, while we have 3 design documents. If one of them is missing, but 'docs' is not, then it will not ensure the others. This is needed to properly ensure ddocs on create command line script. --- common/src/leap/soledad/common/couch.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 82b5aca8..1c762036 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -485,13 +485,10 @@ class CouchDatabase(CommonBackend): Ensure that the design documents used by the backend exist on the couch database. """ - # we check for existence of one of the files, and put all of them if - # that one does not exist - try: - self._database['_design/docs'] - return - except ResourceNotFound: - for ddoc_name in ['docs', 'syncs', 'transactions']: + for ddoc_name in ['docs', 'syncs', 'transactions']: + try: + self._database.info(ddoc_name) + except ResourceNotFound: ddoc = json.loads( binascii.a2b_base64( getattr(ddocs, ddoc_name))) -- cgit v1.2.3 From 5b0e854096f35655037f4da07031741087155a7d Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Thu, 17 Sep 2015 19:40:49 -0300 Subject: [tests] test for ensure ddocs independently This tests the previous fix on ensuring a db that is missing a doc other than 'docs'. --- common/src/leap/soledad/common/tests/test_couch.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'common/src') diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index 845f1602..a08ffd16 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -1320,8 +1320,9 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): def setUp(self): CouchDBTestCase.setUp(self) - def create_db(self, ensure=True): - dbname = ('test-%s' % uuid4().hex) + def create_db(self, ensure=True, dbname=None): + if not dbname: + dbname = ('test-%s' % uuid4().hex) self.db = couch.CouchDatabase.open_database( urljoin('http://127.0.0.1:%d' % self.couch_port, dbname), create=True, @@ -1492,3 +1493,16 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase): self.assertRaises( errors.MissingDesignDocDeletedError, self.db._do_set_replica_gen_and_trans_id, 1, 2, 3) + + def test_ensure_ddoc_independently(self): + """ + Test that a missing ddocs other than _design/docs will be ensured + even if _design/docs is there. + """ + self.create_db(ensure=True) + del self.db._database['_design/transactions'] + self.assertRaises( + errors.MissingDesignDocDeletedError, + self.db._get_transaction_log) + self.create_db(ensure=True, dbname=self.db._dbname) + self.db._get_transaction_log() -- cgit v1.2.3