summaryrefslogtreecommitdiff
path: root/common/src
diff options
context:
space:
mode:
authorKali Kaneko <kali@leap.se>2015-09-24 15:23:34 -0400
committerKali Kaneko <kali@leap.se>2015-09-24 15:23:34 -0400
commit67d341b062640ace095fae835107ec677e9d7cae (patch)
tree0eaa17436ea5990dcae0737119050fa0db8f471a /common/src
parent363d960c3feddb93a0f660075d9b4b33f3713882 (diff)
parent4be6f05d91891122e83f74d21c83c5f8fcd3a618 (diff)
Merge tag '0.7.3' into debian/experimental
Tag leap.soledad version 0.7.3
Diffstat (limited to 'common/src')
-rw-r--r--common/src/leap/soledad/common/couch.py354
-rw-r--r--common/src/leap/soledad/common/tests/test_couch.py208
-rw-r--r--common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py10
-rw-r--r--common/src/leap/soledad/common/tests/test_server.py26
-rw-r--r--common/src/leap/soledad/common/tests/test_soledad.py60
-rw-r--r--common/src/leap/soledad/common/tests/test_sqlcipher_sync.py420
-rw-r--r--common/src/leap/soledad/common/tests/test_sync.py28
-rw-r--r--common/src/leap/soledad/common/tests/test_sync_deferred.py24
-rw-r--r--common/src/leap/soledad/common/tests/test_sync_mutex.py8
-rw-r--r--common/src/leap/soledad/common/tests/test_sync_target.py56
-rw-r--r--common/src/leap/soledad/common/tests/util.py169
11 files changed, 353 insertions, 1010 deletions
diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py
index 6c28e0be..1c762036 100644
--- a/common/src/leap/soledad/common/couch.py
+++ b/common/src/leap/soledad/common/couch.py
@@ -87,8 +87,7 @@ class CouchDocument(SoledadDocument):
atomic and consistent update of the database.
"""
- def __init__(self, doc_id=None, rev=None, json='{}', has_conflicts=False,
- syncable=True):
+ def __init__(self, doc_id=None, rev=None, json='{}', has_conflicts=False):
"""
Container for handling a document that is stored in couch backend.
@@ -100,27 +99,10 @@ class CouchDocument(SoledadDocument):
:type json: str
:param has_conflicts: Boolean indicating if this document has conflicts
:type has_conflicts: bool
- :param syncable: Should this document be synced with remote replicas?
- :type syncable: bool
"""
SoledadDocument.__init__(self, doc_id, rev, json, has_conflicts)
- self._couch_rev = None
- self._conflicts = None
- self._transactions = None
-
- def _ensure_fetch_conflicts(self, get_conflicts_fun):
- """
- Ensure conflict data has been fetched from the server.
-
- :param get_conflicts_fun: A function which, given the document id and
- the couch revision, return the conflicted
- versions of the current document.
- :type get_conflicts_fun: function
- """
- if self._conflicts is None:
- self._conflicts = get_conflicts_fun(self.doc_id,
- couch_rev=self.couch_rev)
- self.has_conflicts = len(self._conflicts) > 0
+ self.couch_rev = None
+ self.transactions = None
def get_conflicts(self):
"""
@@ -149,7 +131,7 @@ class CouchDocument(SoledadDocument):
:type doc: CouchDocument
"""
if self._conflicts is None:
- raise Exception("Run self._ensure_fetch_conflicts first!")
+ raise Exception("Fetch conflicts first!")
self._conflicts.append(doc)
self.has_conflicts = len(self._conflicts) > 0
@@ -161,27 +143,48 @@ class CouchDocument(SoledadDocument):
:type conflict_revs: [str]
"""
if self._conflicts is None:
- raise Exception("Run self._ensure_fetch_conflicts first!")
+ raise Exception("Fetch conflicts first!")
self._conflicts = filter(
lambda doc: doc.rev not in conflict_revs,
self._conflicts)
self.has_conflicts = len(self._conflicts) > 0
- def _get_couch_rev(self):
- return self._couch_rev
-
- def _set_couch_rev(self, rev):
- self._couch_rev = rev
-
- couch_rev = property(_get_couch_rev, _set_couch_rev)
-
- def _get_transactions(self):
- return self._transactions
+ def update(self, new_doc):
+ # update info
+ self.rev = new_doc.rev
+ if new_doc.is_tombstone():
+ self.is_tombstone()
+ else:
+ self.content = new_doc.content
+ self.has_conflicts = new_doc.has_conflicts
- def _set_transactions(self, rev):
- self._transactions = rev
+ def prune_conflicts(self, doc_vcr, autoresolved_increment):
+ """
+ Prune conflicts that are older then the current document's revision, or
+ whose content match to the current document's content.
+ Originally in u1db.CommonBackend
- transactions = property(_get_transactions, _set_transactions)
+ :param doc: The document to have conflicts pruned.
+ :type doc: CouchDocument
+ :param doc_vcr: A vector clock representing the current document's
+ revision.
+ :type doc_vcr: u1db.vectorclock.VectorClock
+ """
+ if self.has_conflicts:
+ autoresolved = False
+ c_revs_to_prune = []
+ for c_doc in self._conflicts:
+ c_vcr = vectorclock.VectorClockRev(c_doc.rev)
+ if doc_vcr.is_newer(c_vcr):
+ c_revs_to_prune.append(c_doc.rev)
+ elif self.same_content_as(c_doc):
+ c_revs_to_prune.append(c_doc.rev)
+ doc_vcr.maximize(c_vcr)
+ autoresolved = True
+ if autoresolved:
+ doc_vcr.increment(autoresolved_increment)
+ self.rev = doc_vcr.as_str()
+ self.delete_conflicts(c_revs_to_prune)
# monkey-patch the u1db http app to use CouchDocument
@@ -482,13 +485,10 @@ class CouchDatabase(CommonBackend):
Ensure that the design documents used by the backend exist on the
couch database.
"""
- # we check for existence of one of the files, and put all of them if
- # that one does not exist
- try:
- self._database['_design/docs']
- return
- except ResourceNotFound:
- for ddoc_name in ['docs', 'syncs', 'transactions']:
+ for ddoc_name in ['docs', 'syncs', 'transactions']:
+ try:
+ self._database.info(ddoc_name)
+ except ResourceNotFound:
ddoc = json.loads(
binascii.a2b_base64(
getattr(ddocs, ddoc_name)))
@@ -750,7 +750,6 @@ class CouchDatabase(CommonBackend):
if check_for_conflicts \
and '_attachments' in result \
and 'u1db_conflicts' in result['_attachments']:
- doc.has_conflicts = True
doc.set_conflicts(
self._build_conflicts(
doc.doc_id,
@@ -1044,7 +1043,7 @@ class CouchDatabase(CommonBackend):
conflicts.append(doc)
return conflicts
- def _get_conflicts(self, doc_id, couch_rev=None):
+ def get_doc_conflicts(self, doc_id, couch_rev=None):
"""
Get the conflicted versions of a document.
@@ -1059,32 +1058,21 @@ class CouchDatabase(CommonBackend):
"""
# request conflicts attachment from server
params = {}
+ conflicts = []
if couch_rev is not None:
params['rev'] = couch_rev # restric document's couch revision
+ else:
+ # TODO: move into resource logic!
+ first_entry = self._get_doc(doc_id, check_for_conflicts=True)
+ conflicts.append(first_entry)
resource = self._database.resource(doc_id, 'u1db_conflicts')
try:
response = resource.get_json(**params)
- return self._build_conflicts(
+ return conflicts + self._build_conflicts(
doc_id, json.loads(response[2].read()))
except ResourceNotFound:
return []
- def get_doc_conflicts(self, doc_id):
- """
- Get the list of conflicts for the given document.
-
- The order of the conflicts is such that the first entry is the value
- that would be returned by "get_doc".
-
- :return: A list of the document entries that are conflicted.
- :rtype: [CouchDocument]
- """
- conflict_docs = self._get_conflicts(doc_id)
- if len(conflict_docs) == 0:
- return []
- this_doc = self._get_doc(doc_id, check_for_conflicts=True)
- return [this_doc] + conflict_docs
-
def _get_replica_gen_and_trans_id(self, other_replica_uid):
"""
Return the last known generation and transaction id for the other db
@@ -1140,9 +1128,11 @@ class CouchDatabase(CommonBackend):
:param sync_id: The id of the current sync session.
:type sync_id: str
"""
- self._do_set_replica_gen_and_trans_id(
- other_replica_uid, other_generation, other_transaction_id,
- number_of_docs=number_of_docs, doc_idx=doc_idx, sync_id=sync_id)
+ if other_replica_uid is not None and other_generation is not None:
+ self._do_set_replica_gen_and_trans_id(
+ other_replica_uid, other_generation, other_transaction_id,
+ number_of_docs=number_of_docs, doc_idx=doc_idx,
+ sync_id=sync_id)
def _do_set_replica_gen_and_trans_id(
self, other_replica_uid, other_generation, other_transaction_id,
@@ -1206,70 +1196,6 @@ class CouchDatabase(CommonBackend):
except ResourceNotFound as e:
raise_missing_design_doc_error(e, ddoc_path)
- def _add_conflict(self, doc, my_doc_rev, my_content):
- """
- Add a conflict to the document.
-
- Note that this method does not actually update the backend; rather, it
- updates the CouchDocument object which will provide the conflict data
- when the atomic document update is made.
-
- :param doc: The document to have conflicts added to.
- :type doc: CouchDocument
- :param my_doc_rev: The revision of the conflicted document.
- :type my_doc_rev: str
- :param my_content: The content of the conflicted document as a JSON
- serialized string.
- :type my_content: str
- """
- doc._ensure_fetch_conflicts(self._get_conflicts)
- doc.add_conflict(
- self._factory(doc_id=doc.doc_id, rev=my_doc_rev,
- json=my_content))
-
- def _delete_conflicts(self, doc, conflict_revs):
- """
- Delete the conflicted revisions from the list of conflicts of C{doc}.
-
- Note that this method does not actually update the backend; rather, it
- updates the CouchDocument object which will provide the conflict data
- when the atomic document update is made.
-
- :param doc: The document to have conflicts deleted.
- :type doc: CouchDocument
- :param conflict_revs: A list of the revisions to be deleted.
- :param conflict_revs: [str]
- """
- doc._ensure_fetch_conflicts(self._get_conflicts)
- doc.delete_conflicts(conflict_revs)
-
- def _prune_conflicts(self, doc, doc_vcr):
- """
- Prune conflicts that are older then the current document's revision, or
- whose content match to the current document's content.
-
- :param doc: The document to have conflicts pruned.
- :type doc: CouchDocument
- :param doc_vcr: A vector clock representing the current document's
- revision.
- :type doc_vcr: u1db.vectorclock.VectorClock
- """
- if doc.has_conflicts is True:
- autoresolved = False
- c_revs_to_prune = []
- for c_doc in doc.get_conflicts():
- c_vcr = vectorclock.VectorClockRev(c_doc.rev)
- if doc_vcr.is_newer(c_vcr):
- c_revs_to_prune.append(c_doc.rev)
- elif doc.same_content_as(c_doc):
- c_revs_to_prune.append(c_doc.rev)
- doc_vcr.maximize(c_vcr)
- autoresolved = True
- if autoresolved:
- doc_vcr.increment(self._replica_uid)
- doc.rev = doc_vcr.as_str()
- self._delete_conflicts(doc, c_revs_to_prune)
-
def _force_doc_sync_conflict(self, doc):
"""
Add a conflict and force a document put.
@@ -1278,9 +1204,9 @@ class CouchDatabase(CommonBackend):
:type doc: CouchDocument
"""
my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True)
- self._prune_conflicts(doc, vectorclock.VectorClockRev(doc.rev))
- self._add_conflict(doc, my_doc.rev, my_doc.get_json())
- doc.has_conflicts = True
+ doc.prune_conflicts(
+ vectorclock.VectorClockRev(doc.rev), self._replica_uid)
+ doc.add_conflict(my_doc)
self._put_doc(my_doc, doc)
def resolve_doc(self, doc, conflicted_doc_revs):
@@ -1325,14 +1251,14 @@ class CouchDatabase(CommonBackend):
# the newer doc version will supersede the one in the database, so
# we copy conflicts before updating the backend.
doc.set_conflicts(cur_doc.get_conflicts()) # copy conflicts over.
- self._delete_conflicts(doc, superseded_revs)
+ doc.delete_conflicts(superseded_revs)
self._put_doc(cur_doc, doc)
else:
# the newer doc version does not supersede the one in the
# database, so we will add a conflict to the database and copy
# those over to the document the user has in her hands.
- self._add_conflict(cur_doc, new_rev, doc.get_json())
- self._delete_conflicts(cur_doc, superseded_revs)
+ cur_doc.add_conflict(doc)
+ cur_doc.delete_conflicts(superseded_revs)
self._put_doc(cur_doc, cur_doc) # just update conflicts
# backend has been updated with current conflicts, now copy them
# to the current document.
@@ -1392,65 +1318,33 @@ class CouchDatabase(CommonBackend):
'converged', at_gen is the insertion/current generation.
:rtype: (str, int)
"""
- cur_doc = self._get_doc(doc.doc_id, check_for_conflicts=True)
- # at this point, `doc` has arrived from the other syncing party, and
- # we will decide what to do with it.
- # First, we prepare the arriving doc to update couch database.
- old_doc = doc
- doc = self._factory(doc.doc_id, doc.rev, doc.get_json())
- if cur_doc is not None:
- doc.couch_rev = cur_doc.couch_rev
- # fetch conflicts because we will eventually manipulate them
- doc._ensure_fetch_conflicts(self._get_conflicts)
- # from now on, it works just like u1db sqlite backend
- doc_vcr = vectorclock.VectorClockRev(doc.rev)
- if cur_doc is None:
- cur_vcr = vectorclock.VectorClockRev(None)
- else:
- cur_vcr = vectorclock.VectorClockRev(cur_doc.rev)
- self._validate_source(replica_uid, replica_gen, replica_trans_id)
- if doc_vcr.is_newer(cur_vcr):
- rev = doc.rev
- self._prune_conflicts(doc, doc_vcr)
- if doc.rev != rev:
- # conflicts have been autoresolved
- state = 'superseded'
- else:
- state = 'inserted'
- self._put_doc(cur_doc, doc)
- elif doc.rev == cur_doc.rev:
- # magical convergence
- state = 'converged'
- elif cur_vcr.is_newer(doc_vcr):
- # Don't add this to seen_ids, because we have something newer,
- # so we should send it back, and we should not generate a
- # conflict
- state = 'superseded'
- elif cur_doc.same_content_as(doc):
- # the documents have been edited to the same thing at both ends
- doc_vcr.maximize(cur_vcr)
- doc_vcr.increment(self._replica_uid)
- doc.rev = doc_vcr.as_str()
- self._put_doc(cur_doc, doc)
- state = 'superseded'
- else:
- state = 'conflicted'
- if save_conflict:
- self._force_doc_sync_conflict(doc)
- if replica_uid is not None and replica_gen is not None:
- self._set_replica_gen_and_trans_id(
- replica_uid, replica_gen, replica_trans_id,
- number_of_docs=number_of_docs, doc_idx=doc_idx,
- sync_id=sync_id)
- # update info
- old_doc.rev = doc.rev
- if doc.is_tombstone():
- old_doc.is_tombstone()
- else:
- old_doc.content = doc.content
- old_doc.has_conflicts = doc.has_conflicts
+ if not isinstance(doc, CouchDocument):
+ doc = self._factory(doc.doc_id, doc.rev, doc.get_json())
+ self._save_source_info(replica_uid, replica_gen,
+ replica_trans_id, number_of_docs,
+ doc_idx, sync_id)
+ my_doc = self._get_doc(doc.doc_id, check_for_conflicts=True)
+ if my_doc is not None:
+ my_doc.set_conflicts(
+ self.get_doc_conflicts(my_doc.doc_id, my_doc.couch_rev))
+ state, save_doc = _process_incoming_doc(
+ my_doc, doc, save_conflict, self.replica_uid)
+ if save_doc:
+ self._put_doc(my_doc, save_doc)
+ doc.update(save_doc)
return state, self._get_generation()
+ def _save_source_info(self, replica_uid, replica_gen, replica_trans_id,
+ number_of_docs, doc_idx, sync_id):
+ """
+ Validate and save source information.
+ """
+ self._validate_source(replica_uid, replica_gen, replica_trans_id)
+ self._set_replica_gen_and_trans_id(
+ replica_uid, replica_gen, replica_trans_id,
+ number_of_docs=number_of_docs, doc_idx=doc_idx,
+ sync_id=sync_id)
+
def get_docs(self, doc_ids, check_for_conflicts=True,
include_deleted=False):
"""
@@ -1495,6 +1389,13 @@ class CouchDatabase(CommonBackend):
continue
yield t._doc
+ def _prune_conflicts(self, doc, doc_vcr):
+ """
+ Overrides original method, but it is implemented elsewhere for
+ simplicity.
+ """
+ doc.prune_conflicts(doc_vcr, self._replica_uid)
+
def _new_resource(self, *path):
"""
Return a new resource for accessing a couch database.
@@ -1546,7 +1447,7 @@ class CouchServerState(ServerState):
:param couch_url: The URL for the couch database.
:type couch_url: str
"""
- self._couch_url = couch_url
+ self.couch_url = couch_url
def open_database(self, dbname):
"""
@@ -1559,7 +1460,7 @@ class CouchServerState(ServerState):
:rtype: CouchDatabase
"""
return CouchDatabase(
- self._couch_url,
+ self.couch_url,
dbname,
ensure_ddocs=False)
@@ -1594,21 +1495,52 @@ class CouchServerState(ServerState):
"""
raise Unauthorized()
- def _set_couch_url(self, url):
- """
- Set the couchdb URL
-
- :param url: CouchDB URL
- :type url: str
- """
- self._couch_url = url
-
- def _get_couch_url(self):
- """
- Return CouchDB URL
- :rtype: str
- """
- return self._couch_url
-
- couch_url = property(_get_couch_url, _set_couch_url, doc='CouchDB URL')
+def _process_incoming_doc(my_doc, other_doc, save_conflict, replica_uid):
+ """
+ Check document, save and return state.
+ """
+ # at this point, `doc` has arrived from the other syncing party, and
+ # we will decide what to do with it.
+ # First, we prepare the arriving doc to update couch database.
+ new_doc = CouchDocument(
+ other_doc.doc_id, other_doc.rev, other_doc.get_json())
+ if my_doc is None:
+ return 'inserted', new_doc
+ new_doc.couch_rev = my_doc.couch_rev
+ new_doc.set_conflicts(my_doc.get_conflicts())
+ # fetch conflicts because we will eventually manipulate them
+ # from now on, it works just like u1db sqlite backend
+ doc_vcr = vectorclock.VectorClockRev(new_doc.rev)
+ cur_vcr = vectorclock.VectorClockRev(my_doc.rev)
+ if doc_vcr.is_newer(cur_vcr):
+ rev = new_doc.rev
+ new_doc.prune_conflicts(doc_vcr, replica_uid)
+ if new_doc.rev != rev:
+ # conflicts have been autoresolved
+ return 'superseded', new_doc
+ else:
+ return'inserted', new_doc
+ elif new_doc.rev == my_doc.rev:
+ # magical convergence
+ return 'converged', None
+ elif cur_vcr.is_newer(doc_vcr):
+ # Don't add this to seen_ids, because we have something newer,
+ # so we should send it back, and we should not generate a
+ # conflict
+ other_doc.update(new_doc)
+ return 'superseded', None
+ elif my_doc.same_content_as(new_doc):
+ # the documents have been edited to the same thing at both ends
+ doc_vcr.maximize(cur_vcr)
+ doc_vcr.increment(replica_uid)
+ new_doc.rev = doc_vcr.as_str()
+ return 'superseded', new_doc
+ else:
+ if save_conflict:
+ new_doc.prune_conflicts(
+ vectorclock.VectorClockRev(new_doc.rev), replica_uid)
+ new_doc.add_conflict(my_doc)
+ return 'conflicted', new_doc
+ other_doc.update(new_doc)
+ return 'conflicted', None
diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py
index 468ad8d8..a08ffd16 100644
--- a/common/src/leap/soledad/common/tests/test_couch.py
+++ b/common/src/leap/soledad/common/tests/test_couch.py
@@ -25,6 +25,7 @@ import json
from urlparse import urljoin
from couchdb.client import Server
+from uuid import uuid4
from testscenarios import TestWithScenarios
@@ -42,7 +43,6 @@ from leap.soledad.common.tests.util import sync_via_synchronizer
from leap.soledad.common.tests.u1db_tests import test_backends
from leap.soledad.common.tests.u1db_tests import DatabaseBaseTests
-from leap.soledad.common.tests.u1db_tests import TestCaseWithServer
from u1db.backends.inmemory import InMemoryIndex
@@ -56,8 +56,8 @@ class TestCouchBackendImpl(CouchDBTestCase):
def test__allocate_doc_id(self):
db = couch.CouchDatabase.open_database(
urljoin(
- 'http://localhost:' + str(self.wrapper.port),
- 'u1db_tests'
+ 'http://localhost:' + str(self.couch_port),
+ ('test-%s' % uuid4().hex)
),
create=True,
ensure_ddocs=True)
@@ -66,6 +66,7 @@ class TestCouchBackendImpl(CouchDBTestCase):
self.assertEqual(34, len(doc_id1))
int(doc_id1[len('D-'):], 16)
self.assertNotEqual(doc_id1, db._allocate_doc_id())
+ self.delete_db(db._dbname)
# -----------------------------------------------------------------------------
@@ -73,25 +74,28 @@ class TestCouchBackendImpl(CouchDBTestCase):
# -----------------------------------------------------------------------------
def make_couch_database_for_test(test, replica_uid):
- port = str(test.wrapper.port)
- return couch.CouchDatabase.open_database(
- urljoin('http://localhost:' + port, replica_uid),
+ port = str(test.couch_port)
+ dbname = ('test-%s' % uuid4().hex)
+ db = couch.CouchDatabase.open_database(
+ urljoin('http://localhost:' + port, dbname),
create=True,
replica_uid=replica_uid or 'test',
ensure_ddocs=True)
+ test.addCleanup(test.delete_db, dbname)
+ return db
def copy_couch_database_for_test(test, db):
- port = str(test.wrapper.port)
+ port = str(test.couch_port)
couch_url = 'http://localhost:' + port
- new_dbname = db._replica_uid + '_copy'
+ new_dbname = db._dbname + '_copy'
new_db = couch.CouchDatabase.open_database(
urljoin(couch_url, new_dbname),
create=True,
replica_uid=db._replica_uid or 'test')
# copy all docs
session = couch.Session()
- old_couch_db = Server(couch_url, session=session)[db._replica_uid]
+ old_couch_db = Server(couch_url, session=session)[db._dbname]
new_couch_db = Server(couch_url, session=session)[new_dbname]
for doc_id in old_couch_db:
doc = old_couch_db.get(doc_id)
@@ -143,24 +147,6 @@ class CouchTests(
scenarios = COUCH_SCENARIOS
- def setUp(self):
- test_backends.AllDatabaseTests.setUp(self)
- # save db info because of test_close
- self._url = self.db._url
- self._dbname = self.db._dbname
-
- def tearDown(self):
- # if current test is `test_close` we have to use saved objects to
- # delete the database because the close() method will have removed the
- # references needed to do it using the CouchDatabase.
- if self.id().endswith('test_couch.CouchTests.test_close(couch)'):
- session = couch.Session()
- server = Server(url=self._url, session=session)
- del(server[self._dbname])
- else:
- self.db.delete_database()
- test_backends.AllDatabaseTests.tearDown(self)
-
class CouchDatabaseTests(
TestWithScenarios,
@@ -169,10 +155,6 @@ class CouchDatabaseTests(
scenarios = COUCH_SCENARIOS
- def tearDown(self):
- self.db.delete_database()
- test_backends.LocalDatabaseTests.tearDown(self)
-
class CouchValidateGenNTransIdTests(
TestWithScenarios,
@@ -181,10 +163,6 @@ class CouchValidateGenNTransIdTests(
scenarios = COUCH_SCENARIOS
- def tearDown(self):
- self.db.delete_database()
- test_backends.LocalDatabaseValidateGenNTransIdTests.tearDown(self)
-
class CouchValidateSourceGenTests(
TestWithScenarios,
@@ -193,10 +171,6 @@ class CouchValidateSourceGenTests(
scenarios = COUCH_SCENARIOS
- def tearDown(self):
- self.db.delete_database()
- test_backends.LocalDatabaseValidateSourceGenTests.tearDown(self)
-
class CouchWithConflictsTests(
TestWithScenarios,
@@ -205,10 +179,6 @@ class CouchWithConflictsTests(
scenarios = COUCH_SCENARIOS
- def tearDown(self):
- self.db.delete_database()
- test_backends.LocalDatabaseWithConflictsTests.tearDown(self)
-
# Notice: the CouchDB backend does not have indexing capabilities, so we do
# not test indexing now.
@@ -237,7 +207,6 @@ nested_doc = tests.nested_doc
class CouchDatabaseSyncTargetTests(
TestWithScenarios,
DatabaseBaseTests,
- TestCaseWithServer,
CouchDBTestCase):
# TODO: implement _set_trace_hook(_shallow) in CouchSyncTarget so
@@ -260,26 +229,13 @@ class CouchDatabaseSyncTargetTests(
def setUp(self):
CouchDBTestCase.setUp(self)
- # from DatabaseBaseTests.setUp
- self.db = self.create_database('test')
- # from TestCaseWithServer.setUp
- self.server = self.server_thread = self.port = None
# other stuff
self.db, self.st = self.create_db_and_target(self)
self.other_changes = []
def tearDown(self):
+ self.db.close()
CouchDBTestCase.tearDown(self)
- # from TestCaseWithServer.tearDown
- if self.server is not None:
- self.server.shutdown()
- self.server_thread.join()
- self.server.server_close()
- if self.port:
- self.port.stopListening()
- # from DatabaseBaseTests.tearDown
- if hasattr(self, 'db') and self.db is not None:
- self.db.close()
def receive_doc(self, doc, gen, trans_id):
self.other_changes.append(
@@ -724,17 +680,8 @@ class CouchDatabaseSyncTests(
self.db3, self.db1_copy, self.db2_copy
]:
if db is not None:
- db.delete_database()
+ self.delete_db(db._dbname)
db.close()
- for replica_uid, dbname in [
- ('test1_copy', 'source'),
- ('test2_copy', 'target'),
- ('test3', 'target')
- ]:
- db = self.create_database(replica_uid, dbname)
- db.delete_database()
- # cleanup connections to avoid leaking of file descriptors
- db.close()
DatabaseBaseTests.tearDown(self)
def assertLastExchangeLog(self, db, expected):
@@ -1203,7 +1150,7 @@ class CouchDatabaseSyncTests(
self.db1 = self.create_database('test1', 'both')
self.db2 = self.create_database('test2', 'both')
doc1 = self.db1.create_doc_from_json('{"a": 1}', doc_id='the-doc')
- db3 = self.create_database('test3', 'both')
+ self.db3 = self.create_database('test3', 'both')
self.sync(self.db2, self.db1)
self.assertEqual(
self.db1._get_generation_info(),
@@ -1211,20 +1158,20 @@ class CouchDatabaseSyncTests(
self.assertEqual(
self.db2._get_generation_info(),
self.db1._get_replica_gen_and_trans_id(self.db2._replica_uid))
- self.sync(db3, self.db1)
+ self.sync(self.db3, self.db1)
# update on 2
doc2 = self.make_document('the-doc', doc1.rev, '{"a": 2}')
self.db2.put_doc(doc2)
- self.sync(self.db2, db3)
- self.assertEqual(db3.get_doc('the-doc').rev, doc2.rev)
+ self.sync(self.db2, self.db3)
+ self.assertEqual(self.db3.get_doc('the-doc').rev, doc2.rev)
# update on 1
doc1.set_json('{"a": 3}')
self.db1.put_doc(doc1)
# conflicts
self.sync(self.db2, self.db1)
- self.sync(db3, self.db1)
+ self.sync(self.db3, self.db1)
self.assertTrue(self.db2.get_doc('the-doc').has_conflicts)
- self.assertTrue(db3.get_doc('the-doc').has_conflicts)
+ self.assertTrue(self.db3.get_doc('the-doc').has_conflicts)
# resolve
conflicts = self.db2.get_doc_conflicts('the-doc')
doc4 = self.make_document('the-doc', None, '{"a": 4}')
@@ -1233,38 +1180,38 @@ class CouchDatabaseSyncTests(
doc2 = self.db2.get_doc('the-doc')
self.assertEqual(doc4.get_json(), doc2.get_json())
self.assertFalse(doc2.has_conflicts)
- self.sync(self.db2, db3)
- doc3 = db3.get_doc('the-doc')
+ self.sync(self.db2, self.db3)
+ doc3 = self.db3.get_doc('the-doc')
self.assertEqual(doc4.get_json(), doc3.get_json())
self.assertFalse(doc3.has_conflicts)
def test_sync_supersedes_conflicts(self):
self.db1 = self.create_database('test1', 'both')
self.db2 = self.create_database('test2', 'target')
- db3 = self.create_database('test3', 'both')
+ self.db3 = self.create_database('test3', 'both')
doc1 = self.db1.create_doc_from_json('{"a": 1}', doc_id='the-doc')
self.db2.create_doc_from_json('{"b": 1}', doc_id='the-doc')
- db3.create_doc_from_json('{"c": 1}', doc_id='the-doc')
- self.sync(db3, self.db1)
+ self.db3.create_doc_from_json('{"c": 1}', doc_id='the-doc')
+ self.sync(self.db3, self.db1)
self.assertEqual(
self.db1._get_generation_info(),
- db3._get_replica_gen_and_trans_id(self.db1._replica_uid))
+ self.db3._get_replica_gen_and_trans_id(self.db1._replica_uid))
self.assertEqual(
- db3._get_generation_info(),
- self.db1._get_replica_gen_and_trans_id(db3._replica_uid))
- self.sync(db3, self.db2)
+ self.db3._get_generation_info(),
+ self.db1._get_replica_gen_and_trans_id(self.db3._replica_uid))
+ self.sync(self.db3, self.db2)
self.assertEqual(
self.db2._get_generation_info(),
- db3._get_replica_gen_and_trans_id(self.db2._replica_uid))
+ self.db3._get_replica_gen_and_trans_id(self.db2._replica_uid))
self.assertEqual(
- db3._get_generation_info(),
- self.db2._get_replica_gen_and_trans_id(db3._replica_uid))
- self.assertEqual(3, len(db3.get_doc_conflicts('the-doc')))
+ self.db3._get_generation_info(),
+ self.db2._get_replica_gen_and_trans_id(self.db3._replica_uid))
+ self.assertEqual(3, len(self.db3.get_doc_conflicts('the-doc')))
doc1.set_json('{"a": 2}')
self.db1.put_doc(doc1)
- self.sync(db3, self.db1)
+ self.sync(self.db3, self.db1)
# original doc1 should have been removed from conflicts
- self.assertEqual(3, len(db3.get_doc_conflicts('the-doc')))
+ self.assertEqual(3, len(self.db3.get_doc_conflicts('the-doc')))
def test_sync_stops_after_get_sync_info(self):
self.db1 = self.create_database('test1', 'source')
@@ -1283,79 +1230,78 @@ class CouchDatabaseSyncTests(
self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1')
self.assertRaises(
u1db_errors.InvalidReplicaUID, self.sync, self.db1, self.db2)
- # remove the reference to db2 to avoid double deleting on tearDown
- self.db2.close()
- self.db2 = None
def test_sync_detects_rollback_in_source(self):
self.db1 = self.create_database('test1', 'source')
self.db2 = self.create_database('test2', 'target')
self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1')
self.sync(self.db1, self.db2)
- db1_copy = self.copy_database(self.db1)
+ self.db1_copy = self.copy_database(self.db1)
self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc2')
self.sync(self.db1, self.db2)
self.assertRaises(
- u1db_errors.InvalidGeneration, self.sync, db1_copy, self.db2)
+ u1db_errors.InvalidGeneration, self.sync, self.db1_copy, self.db2)
def test_sync_detects_rollback_in_target(self):
self.db1 = self.create_database('test1', 'source')
self.db2 = self.create_database('test2', 'target')
self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
self.sync(self.db1, self.db2)
- db2_copy = self.copy_database(self.db2)
+ self.db2_copy = self.copy_database(self.db2)
self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc2')
self.sync(self.db1, self.db2)
self.assertRaises(
- u1db_errors.InvalidGeneration, self.sync, self.db1, db2_copy)
+ u1db_errors.InvalidGeneration, self.sync, self.db1, self.db2_copy)
def test_sync_detects_diverged_source(self):
self.db1 = self.create_database('test1', 'source')
self.db2 = self.create_database('test2', 'target')
- db3 = self.copy_database(self.db1)
+ self.db3 = self.copy_database(self.db1)
self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
- db3.create_doc_from_json(tests.simple_doc, doc_id="divergent")
+ self.db3.create_doc_from_json(tests.simple_doc, doc_id="divergent")
self.sync(self.db1, self.db2)
self.assertRaises(
- u1db_errors.InvalidTransactionId, self.sync, db3, self.db2)
+ u1db_errors.InvalidTransactionId, self.sync, self.db3, self.db2)
def test_sync_detects_diverged_target(self):
self.db1 = self.create_database('test1', 'source')
self.db2 = self.create_database('test2', 'target')
- db3 = self.copy_database(self.db2)
- db3.create_doc_from_json(tests.nested_doc, doc_id="divergent")
+ self.db3 = self.copy_database(self.db2)
+ self.db3.create_doc_from_json(tests.nested_doc, doc_id="divergent")
self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
self.sync(self.db1, self.db2)
self.assertRaises(
- u1db_errors.InvalidTransactionId, self.sync, self.db1, db3)
+ u1db_errors.InvalidTransactionId, self.sync, self.db1, self.db3)
def test_sync_detects_rollback_and_divergence_in_source(self):
self.db1 = self.create_database('test1', 'source')
self.db2 = self.create_database('test2', 'target')
self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1')
self.sync(self.db1, self.db2)
- db1_copy = self.copy_database(self.db1)
+ self.db1_copy = self.copy_database(self.db1)
self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc2')
self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc3')
self.sync(self.db1, self.db2)
- db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3')
+ self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2')
+ self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3')
self.assertRaises(
- u1db_errors.InvalidTransactionId, self.sync, db1_copy, self.db2)
+ u1db_errors.InvalidTransactionId, self.sync,
+ self.db1_copy, self.db2)
def test_sync_detects_rollback_and_divergence_in_target(self):
self.db1 = self.create_database('test1', 'source')
self.db2 = self.create_database('test2', 'target')
self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
self.sync(self.db1, self.db2)
- db2_copy = self.copy_database(self.db2)
+ self.db2_copy = self.copy_database(self.db2)
self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc2')
self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc3')
self.sync(self.db1, self.db2)
- db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3')
+ self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2')
+ self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3')
self.assertRaises(
- u1db_errors.InvalidTransactionId, self.sync, self.db1, db2_copy)
+ u1db_errors.InvalidTransactionId, self.sync,
+ self.db1, self.db2_copy)
def test_optional_sync_preserve_json(self):
self.db1 = self.create_database('test1', 'source')
@@ -1373,10 +1319,14 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase):
def setUp(self):
CouchDBTestCase.setUp(self)
+
+ def create_db(self, ensure=True, dbname=None):
+ if not dbname:
+ dbname = ('test-%s' % uuid4().hex)
self.db = couch.CouchDatabase.open_database(
- urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'),
+ urljoin('http://127.0.0.1:%d' % self.couch_port, dbname),
create=True,
- ensure_ddocs=False) # note that we don't enforce ddocs here
+ ensure_ddocs=ensure)
def tearDown(self):
self.db.delete_database()
@@ -1388,6 +1338,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase):
Test that all methods that access design documents will raise if the
design docs are not present.
"""
+ self.create_db(ensure=False)
# _get_generation()
self.assertRaises(
errors.MissingDesignDocError,
@@ -1418,10 +1369,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase):
Test that all methods that access design documents list functions
will raise if the functions are not present.
"""
- self.db = couch.CouchDatabase.open_database(
- urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'),
- create=True,
- ensure_ddocs=True)
+ self.create_db(ensure=True)
# erase views from _design/transactions
transactions = self.db._database['_design/transactions']
transactions['lists'] = {}
@@ -1448,10 +1396,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase):
Test that all methods that access design documents list functions
will raise if the functions are not present.
"""
- self.db = couch.CouchDatabase.open_database(
- urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'),
- create=True,
- ensure_ddocs=True)
+ self.create_db(ensure=True)
# erase views from _design/transactions
transactions = self.db._database['_design/transactions']
del transactions['lists']
@@ -1478,10 +1423,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase):
Test that all methods that access design documents' named views will
raise if the views are not present.
"""
- self.db = couch.CouchDatabase.open_database(
- urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'),
- create=True,
- ensure_ddocs=True)
+ self.create_db(ensure=True)
# erase views from _design/docs
docs = self.db._database['_design/docs']
del docs['views']
@@ -1520,10 +1462,7 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase):
Test that all methods that access design documents will raise if the
design docs are not present.
"""
- self.db = couch.CouchDatabase.open_database(
- urljoin('http://127.0.0.1:%d' % self.wrapper.port, 'test'),
- create=True,
- ensure_ddocs=True)
+ self.create_db(ensure=True)
# delete _design/docs
del self.db._database['_design/docs']
# delete _design/syncs
@@ -1554,3 +1493,16 @@ class CouchDatabaseExceptionsTests(CouchDBTestCase):
self.assertRaises(
errors.MissingDesignDocDeletedError,
self.db._do_set_replica_gen_and_trans_id, 1, 2, 3)
+
+ def test_ensure_ddoc_independently(self):
+ """
+ Test that a missing ddocs other than _design/docs will be ensured
+ even if _design/docs is there.
+ """
+ self.create_db(ensure=True)
+ del self.db._database['_design/transactions']
+ self.assertRaises(
+ errors.MissingDesignDocDeletedError,
+ self.db._get_transaction_log)
+ self.create_db(ensure=True, dbname=self.db._dbname)
+ self.db._get_transaction_log()
diff --git a/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py b/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py
index c488822e..25f709ca 100644
--- a/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py
+++ b/common/src/leap/soledad/common/tests/test_couch_operations_atomicity.py
@@ -23,6 +23,7 @@ import threading
from urlparse import urljoin
from twisted.internet import defer
+from uuid import uuid4
from leap.soledad.client import Soledad
from leap.soledad.common.couch import CouchDatabase, CouchServerState
@@ -55,7 +56,7 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer):
sync_target = soledad_sync_target
- def _soledad_instance(self, user='user-uuid', passphrase=u'123',
+ def _soledad_instance(self, user=None, passphrase=u'123',
prefix='',
secrets_path='secrets.json',
local_db_path='soledad.u1db', server_url='',
@@ -63,6 +64,7 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer):
"""
Instantiate Soledad.
"""
+ user = user or self.user
# this callback ensures we save a document which is sent to the shared
# db.
@@ -83,15 +85,15 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer):
return soledad
def make_app(self):
- self.request_state = CouchServerState(self._couch_url)
+ self.request_state = CouchServerState(self.couch_url)
return self.make_app_after_state(self.request_state)
def setUp(self):
TestCaseWithServer.setUp(self)
CouchDBTestCase.setUp(self)
- self._couch_url = 'http://localhost:' + str(self.wrapper.port)
+ self.user = ('user-%s' % uuid4().hex)
self.db = CouchDatabase.open_database(
- urljoin(self._couch_url, 'user-user-uuid'),
+ urljoin(self.couch_url, 'user-' + self.user),
create=True,
replica_uid='replica',
ensure_ddocs=True)
diff --git a/common/src/leap/soledad/common/tests/test_server.py b/common/src/leap/soledad/common/tests/test_server.py
index 5ffa2a63..f512d6c1 100644
--- a/common/src/leap/soledad/common/tests/test_server.py
+++ b/common/src/leap/soledad/common/tests/test_server.py
@@ -50,7 +50,7 @@ from leap.soledad.server.auth import URLToAuthorization
def _couch_ensure_database(self, dbname):
db = CouchDatabase.open_database(
- self._couch_url + '/' + dbname,
+ self.couch_url + '/' + dbname,
create=True,
ensure_ddocs=True)
return db, db._replica_uid
@@ -325,7 +325,7 @@ class EncryptedSyncTestCase(
shared_db=self.get_default_shared_mock(_put_doc_side_effect))
def make_app(self):
- self.request_state = CouchServerState(self._couch_url)
+ self.request_state = CouchServerState(self.couch_url)
return self.make_app_with_state(self.request_state)
def setUp(self):
@@ -333,7 +333,6 @@ class EncryptedSyncTestCase(
# dependencies.
# XXX explain better
CouchDBTestCase.setUp(self)
- self._couch_url = 'http://localhost:' + str(self.wrapper.port)
self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
TestCaseWithServer.setUp(self)
@@ -368,7 +367,7 @@ class EncryptedSyncTestCase(
# ensure remote db exists before syncing
db = CouchDatabase.open_database(
- urljoin(self._couch_url, 'user-' + user),
+ urljoin(self.couch_url, 'user-' + user),
create=True,
ensure_ddocs=True)
@@ -494,27 +493,18 @@ class LockResourceTestCase(
# dependencies.
# XXX explain better
CouchDBTestCase.setUp(self)
- self._couch_url = 'http://localhost:' + str(self.wrapper.port)
self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
TestCaseWithServer.setUp(self)
# create the databases
- CouchDatabase.open_database(
- urljoin(self._couch_url, 'shared'),
- create=True,
- ensure_ddocs=True)
- CouchDatabase.open_database(
- urljoin(self._couch_url, 'tokens'),
+ db = CouchDatabase.open_database(
+ urljoin(self.couch_url, ('shared-%s' % (uuid4().hex))),
create=True,
ensure_ddocs=True)
- self._state = CouchServerState(self._couch_url)
+ self.addCleanup(db.delete_database)
+ self._state = CouchServerState(self.couch_url)
+ self._state.open_database = mock.Mock(return_value=db)
def tearDown(self):
- # delete remote database
- db = CouchDatabase.open_database(
- urljoin(self._couch_url, 'shared'),
- create=True,
- ensure_ddocs=True)
- db.delete_database()
CouchDBTestCase.tearDown(self)
TestCaseWithServer.tearDown(self)
diff --git a/common/src/leap/soledad/common/tests/test_soledad.py b/common/src/leap/soledad/common/tests/test_soledad.py
index bd356858..85d6734e 100644
--- a/common/src/leap/soledad/common/tests/test_soledad.py
+++ b/common/src/leap/soledad/common/tests/test_soledad.py
@@ -223,7 +223,7 @@ class SoledadSignalingTestCase(BaseSoledadTest):
def setUp(self):
# mock signaling
soledad.client.signal = Mock()
- soledad.client.secrets.events.emit = Mock()
+ soledad.client.secrets.events.emit_async = Mock()
# run parent's setUp
BaseSoledadTest.setUp(self)
@@ -245,57 +245,57 @@ class SoledadSignalingTestCase(BaseSoledadTest):
- downloading keys / done downloading keys.
- uploading keys / done uploading keys.
"""
- soledad.client.secrets.events.emit.reset_mock()
+ soledad.client.secrets.events.emit_async.reset_mock()
# get a fresh instance so it emits all bootstrap signals
sol = self._soledad_instance(
secrets_path='alternative_stage3.json',
local_db_path='alternative_stage3.u1db')
# reverse call order so we can verify in the order the signals were
# expected
- soledad.client.secrets.events.emit.mock_calls.reverse()
- soledad.client.secrets.events.emit.call_args = \
- soledad.client.secrets.events.emit.call_args_list[0]
- soledad.client.secrets.events.emit.call_args_list.reverse()
+ soledad.client.secrets.events.emit_async.mock_calls.reverse()
+ soledad.client.secrets.events.emit_async.call_args = \
+ soledad.client.secrets.events.emit_async.call_args_list[0]
+ soledad.client.secrets.events.emit_async.call_args_list.reverse()
# downloading keys signals
- soledad.client.secrets.events.emit.assert_called_with(
+ soledad.client.secrets.events.emit_async.assert_called_with(
catalog.SOLEDAD_DOWNLOADING_KEYS,
ADDRESS,
)
- self._pop_mock_call(soledad.client.secrets.events.emit)
- soledad.client.secrets.events.emit.assert_called_with(
+ self._pop_mock_call(soledad.client.secrets.events.emit_async)
+ soledad.client.secrets.events.emit_async.assert_called_with(
catalog.SOLEDAD_DONE_DOWNLOADING_KEYS,
ADDRESS,
)
# creating keys signals
- self._pop_mock_call(soledad.client.secrets.events.emit)
- soledad.client.secrets.events.emit.assert_called_with(
+ self._pop_mock_call(soledad.client.secrets.events.emit_async)
+ soledad.client.secrets.events.emit_async.assert_called_with(
catalog.SOLEDAD_CREATING_KEYS,
ADDRESS,
)
- self._pop_mock_call(soledad.client.secrets.events.emit)
- soledad.client.secrets.events.emit.assert_called_with(
+ self._pop_mock_call(soledad.client.secrets.events.emit_async)
+ soledad.client.secrets.events.emit_async.assert_called_with(
catalog.SOLEDAD_DONE_CREATING_KEYS,
ADDRESS,
)
# downloading once more (inside _put_keys_in_shared_db)
- self._pop_mock_call(soledad.client.secrets.events.emit)
- soledad.client.secrets.events.emit.assert_called_with(
+ self._pop_mock_call(soledad.client.secrets.events.emit_async)
+ soledad.client.secrets.events.emit_async.assert_called_with(
catalog.SOLEDAD_DOWNLOADING_KEYS,
ADDRESS,
)
- self._pop_mock_call(soledad.client.secrets.events.emit)
- soledad.client.secrets.events.emit.assert_called_with(
+ self._pop_mock_call(soledad.client.secrets.events.emit_async)
+ soledad.client.secrets.events.emit_async.assert_called_with(
catalog.SOLEDAD_DONE_DOWNLOADING_KEYS,
ADDRESS,
)
# uploading keys signals
- self._pop_mock_call(soledad.client.secrets.events.emit)
- soledad.client.secrets.events.emit.assert_called_with(
+ self._pop_mock_call(soledad.client.secrets.events.emit_async)
+ soledad.client.secrets.events.emit_async.assert_called_with(
catalog.SOLEDAD_UPLOADING_KEYS,
ADDRESS,
)
- self._pop_mock_call(soledad.client.secrets.events.emit)
- soledad.client.secrets.events.emit.assert_called_with(
+ self._pop_mock_call(soledad.client.secrets.events.emit_async)
+ soledad.client.secrets.events.emit_async.assert_called_with(
catalog.SOLEDAD_DONE_UPLOADING_KEYS,
ADDRESS,
)
@@ -316,7 +316,7 @@ class SoledadSignalingTestCase(BaseSoledadTest):
doc.content = sol.secrets._export_recovery_document()
sol.close()
# reset mock
- soledad.client.secrets.events.emit.reset_mock()
+ soledad.client.secrets.events.emit_async.reset_mock()
# get a fresh instance so it emits all bootstrap signals
shared_db = self.get_default_shared_mock(get_doc_return_value=doc)
sol = self._soledad_instance(
@@ -325,17 +325,17 @@ class SoledadSignalingTestCase(BaseSoledadTest):
shared_db_class=shared_db)
# reverse call order so we can verify in the order the signals were
# expected
- soledad.client.secrets.events.emit.mock_calls.reverse()
- soledad.client.secrets.events.emit.call_args = \
- soledad.client.secrets.events.emit.call_args_list[0]
- soledad.client.secrets.events.emit.call_args_list.reverse()
+ soledad.client.secrets.events.emit_async.mock_calls.reverse()
+ soledad.client.secrets.events.emit_async.call_args = \
+ soledad.client.secrets.events.emit_async.call_args_list[0]
+ soledad.client.secrets.events.emit_async.call_args_list.reverse()
# assert download keys signals
- soledad.client.secrets.events.emit.assert_called_with(
+ soledad.client.secrets.events.emit_async.assert_called_with(
catalog.SOLEDAD_DOWNLOADING_KEYS,
ADDRESS,
)
- self._pop_mock_call(soledad.client.secrets.events.emit)
- soledad.client.secrets.events.emit.assert_called_with(
+ self._pop_mock_call(soledad.client.secrets.events.emit_async)
+ soledad.client.secrets.events.emit_async.assert_called_with(
catalog.SOLEDAD_DONE_DOWNLOADING_KEYS,
ADDRESS,
)
@@ -369,7 +369,7 @@ class SoledadSignalingTestCase(BaseSoledadTest):
yield sol.sync()
# assert the signal has been emitted
- soledad.client.events.emit.assert_called_with(
+ soledad.client.events.emit_async.assert_called_with(
catalog.SOLEDAD_DONE_DATA_SYNC,
ADDRESS,
)
diff --git a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py
index c57d6f61..439fc070 100644
--- a/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py
+++ b/common/src/leap/soledad/common/tests/test_sqlcipher_sync.py
@@ -19,29 +19,26 @@ Test sqlcipher backend sync.
"""
-import json
+import os
from u1db import sync
from u1db import vectorclock
from u1db import errors
+from uuid import uuid4
from testscenarios import TestWithScenarios
-from urlparse import urljoin
-from twisted.internet import defer
-
-from leap.soledad.common import couch
from leap.soledad.common.crypto import ENC_SCHEME_KEY
from leap.soledad.client.http_target import SoledadHTTPSyncTarget
from leap.soledad.client.crypto import decrypt_doc_dict
-from leap.soledad.client.sqlcipher import SQLCipherDatabase
from leap.soledad.common.tests import u1db_tests as tests
from leap.soledad.common.tests.test_sqlcipher import SQLCIPHER_SCENARIOS
from leap.soledad.common.tests.util import make_soledad_app
+from leap.soledad.common.tests.test_sync_target import \
+ SoledadDatabaseSyncTargetTests
from leap.soledad.common.tests.util import soledad_sync_target
from leap.soledad.common.tests.util import BaseSoledadTest
-from leap.soledad.common.tests.util import SoledadWithCouchServerMixin
# -----------------------------------------------------------------------------
@@ -97,23 +94,6 @@ class SQLCipherDatabaseSyncTests(
self._use_tracking = {}
super(tests.DatabaseBaseTests, self).setUp()
- def tearDown(self):
- super(tests.DatabaseBaseTests, self).tearDown()
- if hasattr(self, 'db1') and isinstance(self.db1, SQLCipherDatabase):
- self.db1.close()
- if hasattr(self, 'db1_copy') \
- and isinstance(self.db1_copy, SQLCipherDatabase):
- self.db1_copy.close()
- if hasattr(self, 'db2') \
- and isinstance(self.db2, SQLCipherDatabase):
- self.db2.close()
- if hasattr(self, 'db2_copy') \
- and isinstance(self.db2_copy, SQLCipherDatabase):
- self.db2_copy.close()
- if hasattr(self, 'db3') \
- and isinstance(self.db3, SQLCipherDatabase):
- self.db3.close()
-
def create_database(self, replica_uid, sync_role=None):
if replica_uid == 'test' and sync_role is None:
# created up the chain by base class but unused
@@ -121,6 +101,7 @@ class SQLCipherDatabaseSyncTests(
db = self.create_database_for_role(replica_uid, sync_role)
if sync_role:
self._use_tracking[db] = (replica_uid, sync_role)
+ self.addCleanup(db.close)
return db
def create_database_for_role(self, replica_uid, sync_role):
@@ -729,38 +710,30 @@ class SQLCipherDatabaseSyncTests(
errors.InvalidTransactionId, self.sync, self.db1, self.db2_copy)
-def _make_local_db_and_token_http_target(test, path='test'):
+def make_local_db_and_soledad_target(
+ test, path='test',
+ source_replica_uid=uuid4().hex):
test.startTwistedServer()
- # ensure remote db exists before syncing
- db = couch.CouchDatabase.open_database(
- urljoin(test._couch_url, 'test'),
- create=True,
- replica_uid='test',
- ensure_ddocs=True)
-
- replica_uid = test._soledad._dbpool.replica_uid
+ replica_uid = os.path.basename(path)
+ db = test.request_state._create_database(replica_uid)
sync_db = test._soledad._sync_db
sync_enc_pool = test._soledad._sync_enc_pool
st = soledad_sync_target(
- test, path,
- source_replica_uid=replica_uid,
+ test, db._dbname,
+ source_replica_uid=source_replica_uid,
sync_db=sync_db,
sync_enc_pool=sync_enc_pool)
return db, st
target_scenarios = [
('leap', {
- 'create_db_and_target': _make_local_db_and_token_http_target,
+ 'create_db_and_target': make_local_db_and_soledad_target,
'make_app_with_state': make_soledad_app,
'do_sync': sync_via_synchronizer_and_soledad}),
]
-class SQLCipherSyncTargetTests(
- TestWithScenarios,
- tests.DatabaseBaseTests,
- tests.TestCaseWithServer,
- SoledadWithCouchServerMixin):
+class SQLCipherSyncTargetTests(SoledadDatabaseSyncTargetTests):
# TODO: implement _set_trace_hook(_shallow) in SoledadHTTPSyncTarget so
# skipped tests can be succesfully executed.
@@ -769,368 +742,3 @@ class SQLCipherSyncTargetTests(
target_scenarios))
whitebox = False
-
- def setUp(self):
- super(tests.DatabaseBaseTests, self).setUp()
- self.db, self.st = self.create_db_and_target(self)
- self.addCleanup(self.st.close)
- self.other_changes = []
-
- def tearDown(self):
- super(tests.DatabaseBaseTests, self).tearDown()
-
- def assertLastExchangeLog(self, db, expected):
- log = getattr(db, '_last_exchange_log', None)
- if log is None:
- return
- self.assertEqual(expected, log)
-
- def receive_doc(self, doc, gen, trans_id):
- self.other_changes.append(
- (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id))
-
- def make_app(self):
- self.request_state = couch.CouchServerState(self._couch_url)
- return self.make_app_with_state(self.request_state)
-
- def set_trace_hook(self, callback, shallow=False):
- setter = (self.st._set_trace_hook if not shallow else
- self.st._set_trace_hook_shallow)
- try:
- setter(callback)
- except NotImplementedError:
- self.skipTest("%s does not implement _set_trace_hook"
- % (self.st.__class__.__name__,))
-
- def test_get_sync_target(self):
- self.assertIsNot(None, self.st)
-
- @defer.inlineCallbacks
- def test_get_sync_info(self):
- sync_info = yield self.st.get_sync_info('other')
- self.assertEqual(
- ('test', 0, '', 0, ''), sync_info)
-
- @defer.inlineCallbacks
- def test_create_doc_updates_sync_info(self):
- sync_info = yield self.st.get_sync_info('other')
- self.assertEqual(
- ('test', 0, '', 0, ''), sync_info)
- self.db.create_doc_from_json(tests.simple_doc)
- sync_info = yield self.st.get_sync_info('other')
- self.assertEqual(1, sync_info[1])
-
- @defer.inlineCallbacks
- def test_record_sync_info(self):
- yield self.st.record_sync_info('replica', 10, 'T-transid')
- sync_info = yield self.st.get_sync_info('other')
- self.assertEqual(
- ('test', 0, '', 10, 'T-transid'), sync_info)
-
- @defer.inlineCallbacks
- def test_sync_exchange(self):
- """
- Modified to account for possibly receiving encrypted documents from
- sever-side.
- """
-
- docs_by_gen = [
- (self.make_document('doc-id', 'replica:1', tests.simple_doc), 10,
- 'T-sid')]
- new_gen, trans_id = yield self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=0,
- last_known_trans_id=None, insert_doc_cb=self.receive_doc)
- self.assertGetEncryptedDoc(
- self.db, 'doc-id', 'replica:1', tests.simple_doc, False)
- self.assertTransactionLog(['doc-id'], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual(([], 1, last_trans_id),
- (self.other_changes, new_gen, last_trans_id))
- sync_info = yield self.st.get_sync_info('replica')
- self.assertEqual(10, sync_info[3])
-
- @defer.inlineCallbacks
- def test_sync_exchange_push_many(self):
- """
- Modified to account for possibly receiving encrypted documents from
- sever-side.
- """
- docs_by_gen = [
- (self.make_document(
- 'doc-id', 'replica:1', tests.simple_doc), 10, 'T-1'),
- (self.make_document('doc-id2', 'replica:1', tests.nested_doc), 11,
- 'T-2')]
- new_gen, trans_id = yield self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=0,
- last_known_trans_id=None, insert_doc_cb=self.receive_doc)
- self.assertGetEncryptedDoc(
- self.db, 'doc-id', 'replica:1', tests.simple_doc, False)
- self.assertGetEncryptedDoc(
- self.db, 'doc-id2', 'replica:1', tests.nested_doc, False)
- self.assertTransactionLog(['doc-id', 'doc-id2'], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual(([], 2, last_trans_id),
- (self.other_changes, new_gen, trans_id))
- sync_info = yield self.st.get_sync_info('replica')
- self.assertEqual(11, sync_info[3])
-
- @defer.inlineCallbacks
- def test_sync_exchange_returns_many_new_docs(self):
- """
- Modified to account for JSON serialization differences.
- """
- doc = self.db.create_doc_from_json(tests.simple_doc)
- doc2 = self.db.create_doc_from_json(tests.nested_doc)
- self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db)
- new_gen, _ = yield self.st.sync_exchange(
- [], 'other-replica', last_known_generation=0,
- last_known_trans_id=None, insert_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db)
- self.assertEqual(2, new_gen)
- self.assertEqual(
- [(doc.doc_id, doc.rev, 1),
- (doc2.doc_id, doc2.rev, 2)],
- [c[:2] + c[3:4] for c in self.other_changes])
- self.assertEqual(
- json.dumps(tests.simple_doc),
- json.dumps(self.other_changes[0][2]))
- self.assertEqual(
- json.loads(tests.nested_doc),
- json.loads(self.other_changes[1][2]))
- if self.whitebox:
- self.assertEqual(
- self.db._last_exchange_log['return'],
- {'last_gen': 2, 'docs':
- [(doc.doc_id, doc.rev), (doc2.doc_id, doc2.rev)]})
-
- @defer.inlineCallbacks
- def test_sync_exchange_deleted(self):
- doc = self.db.create_doc_from_json('{}')
- edit_rev = 'replica:1|' + doc.rev
- docs_by_gen = [
- (self.make_document(doc.doc_id, edit_rev, None), 10, 'T-sid')]
- new_gen, trans_id = yield self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=0,
- last_known_trans_id=None, insert_doc_cb=self.receive_doc)
- self.assertGetDocIncludeDeleted(
- self.db, doc.doc_id, edit_rev, None, False)
- self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual(([], 2, last_trans_id),
- (self.other_changes, new_gen, trans_id))
- sync_info = yield self.st.get_sync_info('replica')
- self.assertEqual(10, sync_info[3])
-
- @defer.inlineCallbacks
- def test_sync_exchange_refuses_conflicts(self):
- doc = self.db.create_doc_from_json(tests.simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- new_doc = '{"key": "altval"}'
- docs_by_gen = [
- (self.make_document(doc.doc_id, 'replica:1', new_doc), 10,
- 'T-sid')]
- new_gen, _ = yield self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=0,
- last_known_trans_id=None, insert_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- self.assertEqual(
- (doc.doc_id, doc.rev, tests.simple_doc, 1),
- self.other_changes[0][:-1])
- self.assertEqual(1, new_gen)
- if self.whitebox:
- self.assertEqual(self.db._last_exchange_log['return'],
- {'last_gen': 1, 'docs': [(doc.doc_id, doc.rev)]})
-
- @defer.inlineCallbacks
- def test_sync_exchange_ignores_convergence(self):
- doc = self.db.create_doc_from_json(tests.simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- gen, txid = self.db._get_generation_info()
- docs_by_gen = [
- (self.make_document(
- doc.doc_id, doc.rev, tests.simple_doc), 10, 'T-sid')]
- new_gen, _ = yield self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=gen,
- last_known_trans_id=txid, insert_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- self.assertEqual(([], 1), (self.other_changes, new_gen))
-
- @defer.inlineCallbacks
- def test_sync_exchange_returns_new_docs(self):
- doc = self.db.create_doc_from_json(tests.simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- new_gen, _ = yield self.st.sync_exchange(
- [], 'other-replica', last_known_generation=0,
- last_known_trans_id=None, insert_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- self.assertEqual(
- (doc.doc_id, doc.rev, tests.simple_doc, 1),
- self.other_changes[0][:-1])
- self.assertEqual(1, new_gen)
- if self.whitebox:
- self.assertEqual(self.db._last_exchange_log['return'],
- {'last_gen': 1, 'docs': [(doc.doc_id, doc.rev)]})
-
- @defer.inlineCallbacks
- def test_sync_exchange_returns_deleted_docs(self):
- doc = self.db.create_doc_from_json(tests.simple_doc)
- self.db.delete_doc(doc)
- self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db)
- new_gen, _ = yield self.st.sync_exchange(
- [], 'other-replica', last_known_generation=0,
- last_known_trans_id=None, insert_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db)
- self.assertEqual(
- (doc.doc_id, doc.rev, None, 2), self.other_changes[0][:-1])
- self.assertEqual(2, new_gen)
- if self.whitebox:
- self.assertEqual(self.db._last_exchange_log['return'],
- {'last_gen': 2, 'docs': [(doc.doc_id, doc.rev)]})
-
- @defer.inlineCallbacks
- def test_sync_exchange_getting_newer_docs(self):
- doc = self.db.create_doc_from_json(tests.simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- new_doc = '{"key": "altval"}'
- docs_by_gen = [
- (self.make_document(doc.doc_id, 'test:1|z:2', new_doc), 10,
- 'T-sid')]
- new_gen, _ = yield self.st.sync_exchange(
- docs_by_gen, 'other-replica', last_known_generation=0,
- last_known_trans_id=None, insert_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db)
- self.assertEqual(([], 2), (self.other_changes, new_gen))
-
- @defer.inlineCallbacks
- def test_sync_exchange_with_concurrent_updates_of_synced_doc(self):
- expected = []
-
- def before_whatschanged_cb(state):
- if state != 'before whats_changed':
- return
- cont = '{"key": "cuncurrent"}'
- conc_rev = self.db.put_doc(
- self.make_document(doc.doc_id, 'test:1|z:2', cont))
- expected.append((doc.doc_id, conc_rev, cont, 3))
-
- self.set_trace_hook(before_whatschanged_cb)
- doc = self.db.create_doc_from_json(tests.simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- new_doc = '{"key": "altval"}'
- docs_by_gen = [
- (self.make_document(doc.doc_id, 'test:1|z:2', new_doc), 10,
- 'T-sid')]
- new_gen, _ = yield self.st.sync_exchange(
- docs_by_gen, 'other-replica', last_known_generation=0,
- last_known_trans_id=None, insert_doc_cb=self.receive_doc)
- self.assertEqual(expected, [c[:-1] for c in self.other_changes])
- self.assertEqual(3, new_gen)
-
- @defer.inlineCallbacks
- def test_sync_exchange_with_concurrent_updates(self):
-
- def after_whatschanged_cb(state):
- if state != 'after whats_changed':
- return
- self.db.create_doc_from_json('{"new": "doc"}')
-
- self.set_trace_hook(after_whatschanged_cb)
- doc = self.db.create_doc_from_json(tests.simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- new_doc = '{"key": "altval"}'
- docs_by_gen = [
- (self.make_document(doc.doc_id, 'test:1|z:2', new_doc), 10,
- 'T-sid')]
- new_gen, _ = yield self.st.sync_exchange(
- docs_by_gen, 'other-replica', last_known_generation=0,
- last_known_trans_id=None, insert_doc_cb=self.receive_doc)
- self.assertEqual(([], 2), (self.other_changes, new_gen))
-
- @defer.inlineCallbacks
- def test_sync_exchange_converged_handling(self):
- doc = self.db.create_doc_from_json(tests.simple_doc)
- docs_by_gen = [
- (self.make_document('new', 'other:1', '{}'), 4, 'T-foo'),
- (self.make_document(doc.doc_id, doc.rev, doc.get_json()), 5,
- 'T-bar')]
- new_gen, _ = yield self.st.sync_exchange(
- docs_by_gen, 'other-replica', last_known_generation=0,
- last_known_trans_id=None, insert_doc_cb=self.receive_doc)
- self.assertEqual(([], 2), (self.other_changes, new_gen))
-
- @defer.inlineCallbacks
- def test_sync_exchange_detect_incomplete_exchange(self):
- def before_get_docs_explode(state):
- if state != 'before get_docs':
- return
- raise errors.U1DBError("fail")
- self.set_trace_hook(before_get_docs_explode)
- # suppress traceback printing in the wsgiref server
- # self.patch(simple_server.ServerHandler,
- # 'log_exception', lambda h, exc_info: None)
- doc = self.db.create_doc_from_json(tests.simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- with self.assertRaises((errors.U1DBError, errors.BrokenSyncStream)):
- yield self.st.sync_exchange(
- [], 'other-replica',
- last_known_generation=0, last_known_trans_id=None,
- insert_doc_cb=self.receive_doc)
-
- @defer.inlineCallbacks
- def test_sync_exchange_doc_ids(self):
- sync_exchange_doc_ids = getattr(self.st, 'sync_exchange_doc_ids', None)
- if sync_exchange_doc_ids is None:
- self.skipTest("sync_exchange_doc_ids not implemented")
- db2 = self.create_database('test2')
- doc = db2.create_doc_from_json(tests.simple_doc)
- new_gen, trans_id = sync_exchange_doc_ids(
- db2, [(doc.doc_id, 10, 'T-sid')], 0, None,
- insert_doc_cb=self.receive_doc)
- self.assertGetDoc(self.db, doc.doc_id, doc.rev,
- tests.simple_doc, False)
- self.assertTransactionLog([doc.doc_id], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual(([], 1, last_trans_id),
- (self.other_changes, new_gen, trans_id))
- self.assertEqual(10, self.st.get_sync_info(db2._replica_uid)[3])
-
- @defer.inlineCallbacks
- def test__set_trace_hook(self):
- called = []
-
- def cb(state):
- called.append(state)
-
- self.set_trace_hook(cb)
- yield self.st.sync_exchange([], 'replica', 0, None, self.receive_doc)
- yield self.st.record_sync_info('replica', 0, 'T-sid')
- self.assertEqual(['before whats_changed',
- 'after whats_changed',
- 'before get_docs',
- 'record_sync_info',
- ],
- called)
-
- @defer.inlineCallbacks
- def test__set_trace_hook_shallow(self):
- if (self.st._set_trace_hook_shallow == self.st._set_trace_hook or
- self.st._set_trace_hook_shallow.im_func ==
- SoledadHTTPSyncTarget._set_trace_hook_shallow.im_func):
- # shallow same as full
- expected = ['before whats_changed',
- 'after whats_changed',
- 'before get_docs',
- 'record_sync_info',
- ]
- else:
- expected = ['sync_exchange', 'record_sync_info']
-
- called = []
-
- def cb(state):
- called.append(state)
-
- self.set_trace_hook(cb, shallow=True)
- self.st.sync_exchange([], 'replica', 0, None, self.receive_doc)
- self.st.record_sync_info('replica', 0, 'T-sid')
- self.assertEqual(expected, called)
diff --git a/common/src/leap/soledad/common/tests/test_sync.py b/common/src/leap/soledad/common/tests/test_sync.py
index 14152370..1041367b 100644
--- a/common/src/leap/soledad/common/tests/test_sync.py
+++ b/common/src/leap/soledad/common/tests/test_sync.py
@@ -56,14 +56,13 @@ class InterruptableSyncTestCase(
sync_target = soledad_sync_target
def make_app(self):
- self.request_state = couch.CouchServerState(self._couch_url)
+ self.request_state = couch.CouchServerState(self.couch_url)
return self.make_app_with_state(self.request_state)
def setUp(self):
TestCaseWithServer.setUp(self)
CouchDBTestCase.setUp(self)
self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
- self._couch_url = 'http://localhost:' + str(self.wrapper.port)
def tearDown(self):
CouchDBTestCase.tearDown(self)
@@ -103,7 +102,7 @@ class InterruptableSyncTestCase(
# ensure remote db exists before syncing
db = couch.CouchDatabase.open_database(
- urljoin(self._couch_url, 'user-user-uuid'),
+ urljoin(self.couch_url, 'user-user-uuid'),
create=True,
ensure_ddocs=True)
@@ -148,8 +147,8 @@ class InterruptableSyncTestCase(
class TestSoledadDbSync(
TestWithScenarios,
- tests.TestCaseWithServer,
- SoledadWithCouchServerMixin):
+ SoledadWithCouchServerMixin,
+ tests.TestCaseWithServer):
"""
Test db.sync remote sync shortcut
@@ -166,10 +165,6 @@ class TestSoledadDbSync(
oauth = False
token = False
- def make_app(self):
- self.request_state = couch.CouchServerState(self._couch_url)
- return self.make_app_with_state(self.request_state)
-
def setUp(self):
"""
Need to explicitely invoke inicialization on all bases.
@@ -177,29 +172,22 @@ class TestSoledadDbSync(
SoledadWithCouchServerMixin.setUp(self)
self.startTwistedServer()
self.db = self.make_database_for_test(self, 'test1')
- self.db2 = couch.CouchDatabase.open_database(
- urljoin(
- 'http://localhost:' + str(self.wrapper.port),
- 'test'
- ),
- create=True,
- ensure_ddocs=True)
+ self.db2 = self.request_state._create_database(replica_uid='test')
def tearDown(self):
"""
Need to explicitely invoke destruction on all bases.
"""
- self.db2.delete_database()
SoledadWithCouchServerMixin.tearDown(self)
# tests.TestCaseWithServer.tearDown(self)
- def do_sync(self, target_name):
+ def do_sync(self):
"""
Perform sync using SoledadSynchronizer, SoledadSyncTarget
and Token auth.
"""
target = soledad_sync_target(
- self, target_name,
+ self, self.db2._dbname,
source_replica_uid=self._soledad._dbpool.replica_uid)
self.addCleanup(target.close)
return sync.SoledadSynchronizer(
@@ -217,7 +205,7 @@ class TestSoledadDbSync(
doc1 = self.db.create_doc_from_json(tests.simple_doc)
doc2 = self.db2.create_doc_from_json(tests.nested_doc)
- local_gen_before_sync = yield self.do_sync('test')
+ local_gen_before_sync = yield self.do_sync()
gen, _, changes = self.db.whats_changed(local_gen_before_sync)
self.assertEqual(1, len(changes))
self.assertEqual(doc2.doc_id, changes[0][0])
diff --git a/common/src/leap/soledad/common/tests/test_sync_deferred.py b/common/src/leap/soledad/common/tests/test_sync_deferred.py
index ffb8a4ae..90b00670 100644
--- a/common/src/leap/soledad/common/tests/test_sync_deferred.py
+++ b/common/src/leap/soledad/common/tests/test_sync_deferred.py
@@ -59,6 +59,7 @@ class BaseSoledadDeferredEncTest(SoledadWithCouchServerMixin):
def setUp(self):
SoledadWithCouchServerMixin.setUp(self)
+ self.startTwistedServer()
# config info
self.db1_file = os.path.join(self.tempdir, "db1.u1db")
os.unlink(self.db1_file)
@@ -85,13 +86,7 @@ class BaseSoledadDeferredEncTest(SoledadWithCouchServerMixin):
defer_encryption=True, sync_db_key=sync_db_key)
self.db1 = SQLCipherDatabase(self.opts)
- self.db2 = couch.CouchDatabase.open_database(
- urljoin(
- 'http://localhost:' + str(self.wrapper.port),
- 'test'
- ),
- create=True,
- ensure_ddocs=True)
+ self.db2 = self.request_state._create_database('test')
def tearDown(self):
# XXX should not access "private" attrs
@@ -109,8 +104,8 @@ class SyncTimeoutError(Exception):
class TestSoledadDbSyncDeferredEncDecr(
TestWithScenarios,
- tests.TestCaseWithServer,
- BaseSoledadDeferredEncTest):
+ BaseSoledadDeferredEncTest,
+ tests.TestCaseWithServer):
"""
Test db.sync remote sync shortcut.
@@ -128,17 +123,12 @@ class TestSoledadDbSyncDeferredEncDecr(
oauth = False
token = True
- def make_app(self):
- self.request_state = couch.CouchServerState(self._couch_url)
- return self.make_app_with_state(self.request_state)
-
def setUp(self):
"""
Need to explicitely invoke inicialization on all bases.
"""
BaseSoledadDeferredEncTest.setUp(self)
self.server = self.server_thread = None
- self.startTwistedServer()
self.syncer = None
def tearDown(self):
@@ -150,7 +140,7 @@ class TestSoledadDbSyncDeferredEncDecr(
dbsyncer.close()
BaseSoledadDeferredEncTest.tearDown(self)
- def do_sync(self, target_name):
+ def do_sync(self):
"""
Perform sync using SoledadSynchronizer, SoledadSyncTarget
and Token auth.
@@ -159,7 +149,7 @@ class TestSoledadDbSyncDeferredEncDecr(
sync_db = self._soledad._sync_db
sync_enc_pool = self._soledad._sync_enc_pool
target = soledad_sync_target(
- self, target_name,
+ self, self.db2._dbname,
source_replica_uid=replica_uid,
sync_db=sync_db,
sync_enc_pool=sync_enc_pool)
@@ -190,7 +180,7 @@ class TestSoledadDbSyncDeferredEncDecr(
"""
doc1 = self.db1.create_doc_from_json(tests.simple_doc)
doc2 = self.db2.create_doc_from_json(tests.nested_doc)
- local_gen_before_sync = yield self.do_sync('test')
+ local_gen_before_sync = yield self.do_sync()
gen, _, changes = self.db1.whats_changed(local_gen_before_sync)
self.assertEqual(1, len(changes))
diff --git a/common/src/leap/soledad/common/tests/test_sync_mutex.py b/common/src/leap/soledad/common/tests/test_sync_mutex.py
index a904a940..2e2123a7 100644
--- a/common/src/leap/soledad/common/tests/test_sync_mutex.py
+++ b/common/src/leap/soledad/common/tests/test_sync_mutex.py
@@ -84,14 +84,14 @@ class TestSyncMutex(
sync_target = soledad_sync_target
def make_app(self):
- self.request_state = couch.CouchServerState(self._couch_url)
+ self.request_state = couch.CouchServerState(self.couch_url)
return self.make_app_with_state(self.request_state)
def setUp(self):
TestCaseWithServer.setUp(self)
CouchDBTestCase.setUp(self)
self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
- self._couch_url = 'http://localhost:' + str(self.wrapper.port)
+ self.user = ('user-%s' % uuid.uuid4().hex)
def tearDown(self):
CouchDBTestCase.tearDown(self)
@@ -103,12 +103,12 @@ class TestSyncMutex(
# ensure remote db exists before syncing
db = couch.CouchDatabase.open_database(
- urljoin(self._couch_url, 'user-user-uuid'),
+ urljoin(self.couch_url, 'user-' + self.user),
create=True,
ensure_ddocs=True)
sol = self._soledad_instance(
- user='user-uuid', server_url=self.getURL())
+ user=self.user, server_url=self.getURL())
d1 = sol.sync()
d2 = sol.sync()
diff --git a/common/src/leap/soledad/common/tests/test_sync_target.py b/common/src/leap/soledad/common/tests/test_sync_target.py
index d855fb52..c0987e90 100644
--- a/common/src/leap/soledad/common/tests/test_sync_target.py
+++ b/common/src/leap/soledad/common/tests/test_sync_target.py
@@ -63,13 +63,12 @@ class TestSoledadParseReceivedDocResponse(SoledadWithCouchServerMixin):
def setUp(self):
SoledadWithCouchServerMixin.setUp(self)
- self._couch_url = 'http://localhost:' + str(self.wrapper.port)
creds = {'token': {
'uuid': 'user-uuid',
'token': 'auth-token',
}}
self.target = target.SoledadHTTPSyncTarget(
- self._couch_url,
+ self.couch_url,
uuid4().hex,
creds,
self._soledad._crypto,
@@ -151,11 +150,12 @@ def make_local_db_and_soledad_target(
test, path='test',
source_replica_uid=uuid4().hex):
test.startTwistedServer()
- db = test.request_state._create_database(os.path.basename(path))
+ replica_uid = os.path.basename(path)
+ db = test.request_state._create_database(replica_uid)
sync_db = test._soledad._sync_db
sync_enc_pool = test._soledad._sync_enc_pool
st = soledad_sync_target(
- test, path,
+ test, db._dbname,
source_replica_uid=source_replica_uid,
sync_db=sync_db,
sync_enc_pool=sync_enc_pool)
@@ -191,6 +191,8 @@ class TestSoledadSyncTarget(
self.startTwistedServer()
sync_db = self._soledad._sync_db
sync_enc_pool = self._soledad._sync_enc_pool
+ if path is None:
+ path = self.db2._dbname
target = self.sync_target(
self, path,
source_replica_uid=source_replica_uid,
@@ -204,11 +206,11 @@ class TestSoledadSyncTarget(
SoledadWithCouchServerMixin.setUp(self)
self.startTwistedServer()
self.db1 = make_sqlcipher_database_for_test(self, 'test1')
- self.db2 = self.request_state._create_database('test2')
+ self.db2 = self.request_state._create_database('test')
def tearDown(self):
# db2, _ = self.request_state.ensure_database('test2')
- self.db2.delete_database()
+ self.delete_db(self.db2._dbname)
self.db1.close()
SoledadWithCouchServerMixin.tearDown(self)
TestWithScenarios.tearDown(self)
@@ -220,8 +222,8 @@ class TestSoledadSyncTarget(
This test was adapted to decrypt remote content before assert.
"""
- db = self.request_state._create_database('test')
- remote_target = self.getSyncTarget('test')
+ db = self.db2
+ remote_target = self.getSyncTarget()
other_docs = []
def receive_doc(doc, gen, trans_id):
@@ -247,7 +249,7 @@ class TestSoledadSyncTarget(
def blackhole_getstderr(inst):
return cStringIO.StringIO()
- db = self.request_state._create_database('test')
+ db = self.db2
_put_doc_if_newer = db._put_doc_if_newer
trigger_ids = ['doc-here2']
@@ -267,7 +269,6 @@ class TestSoledadSyncTarget(
self.patch(
IndexedCouchDatabase, '_put_doc_if_newer', bomb_put_doc_if_newer)
remote_target = self.getSyncTarget(
- 'test',
source_replica_uid='replica')
other_changes = []
@@ -317,7 +318,7 @@ class TestSoledadSyncTarget(
This test was adapted to decrypt remote content before assert.
"""
- remote_target = self.getSyncTarget('test')
+ remote_target = self.getSyncTarget()
other_docs = []
replica_uid_box = []
@@ -333,7 +334,7 @@ class TestSoledadSyncTarget(
last_known_trans_id=None, insert_doc_cb=receive_doc,
ensure_callback=ensure_cb, defer_decryption=False)
self.assertEqual(1, new_gen)
- db = self.request_state.open_database('test')
+ db = self.db2
self.assertEqual(1, len(replica_uid_box))
self.assertEqual(db._replica_uid, replica_uid_box[0])
self.assertGetEncryptedDoc(
@@ -346,10 +347,9 @@ class TestSoledadSyncTarget(
@defer.inlineCallbacks
def test_get_sync_info(self):
- db = self.request_state._create_database('test')
+ db = self.db2
db._set_replica_gen_and_trans_id('other-id', 1, 'T-transid')
remote_target = self.getSyncTarget(
- 'test',
source_replica_uid='other-id')
sync_info = yield remote_target.get_sync_info('other-id')
self.assertEqual(
@@ -358,19 +358,17 @@ class TestSoledadSyncTarget(
@defer.inlineCallbacks
def test_record_sync_info(self):
- db = self.request_state._create_database('test')
remote_target = self.getSyncTarget(
- 'test',
source_replica_uid='other-id')
yield remote_target.record_sync_info('other-id', 2, 'T-transid')
- self.assertEqual(
- (2, 'T-transid'), db._get_replica_gen_and_trans_id('other-id'))
+ self.assertEqual((2, 'T-transid'),
+ self.db2._get_replica_gen_and_trans_id('other-id'))
@defer.inlineCallbacks
def test_sync_exchange_receive(self):
- db = self.request_state._create_database('test')
+ db = self.db2
doc = db.create_doc_from_json('{"value": "there"}')
- remote_target = self.getSyncTarget('test')
+ remote_target = self.getSyncTarget()
other_changes = []
def receive_doc(doc, gen, trans_id):
@@ -423,10 +421,10 @@ class SoledadDatabaseSyncTargetTests(
self.db, self.st = make_local_db_and_soledad_target(self)
def tearDown(self):
- tests.TestCaseWithServer.tearDown(self)
- SoledadWithCouchServerMixin.tearDown(self)
self.db.close()
self.st.close()
+ tests.TestCaseWithServer.tearDown(self)
+ SoledadWithCouchServerMixin.tearDown(self)
def set_trace_hook(self, callback, shallow=False):
setter = (self.st._set_trace_hook if not shallow else
@@ -818,10 +816,6 @@ class TestSoledadDbSync(
oauth = False
token = False
- def make_app(self):
- self.request_state = couch.CouchServerState(self._couch_url)
- return self.make_app_with_state(self.request_state)
-
def setUp(self):
"""
Need to explicitely invoke inicialization on all bases.
@@ -857,13 +851,7 @@ class TestSoledadDbSync(
defer_encryption=True, sync_db_key=sync_db_key)
self.db1 = SQLCipherDatabase(self.opts)
- self.db2 = couch.CouchDatabase.open_database(
- urljoin(
- 'http://localhost:' + str(self.wrapper.port),
- 'test'
- ),
- create=True,
- ensure_ddocs=True)
+ self.db2 = self.request_state._create_database(replica_uid='test')
def tearDown(self):
"""
@@ -890,7 +878,7 @@ class TestSoledadDbSync(
'uuid': 'user-uuid',
'token': 'auth-token',
}}
- target_url = self.getURL(target_name)
+ target_url = self.getURL(self.db2._dbname)
# get a u1db syncer
crypto = self._soledad._crypto
diff --git a/common/src/leap/soledad/common/tests/util.py b/common/src/leap/soledad/common/tests/util.py
index daa9c558..1c7adb91 100644
--- a/common/src/leap/soledad/common/tests/util.py
+++ b/common/src/leap/soledad/common/tests/util.py
@@ -27,10 +27,8 @@ import shutil
import random
import string
import u1db
-import subprocess
-import time
-import re
import traceback
+import couchdb
from uuid import uuid4
from mock import Mock
@@ -337,119 +335,6 @@ class BaseSoledadTest(BaseLeapTest, MockedSharedDBTest):
self.assertEqual(exp_doc.content, doc.content)
-# -----------------------------------------------------------------------------
-# A wrapper for running couchdb locally.
-# -----------------------------------------------------------------------------
-
-# from: https://github.com/smcq/paisley/blob/master/paisley/test/util.py
-# TODO: include license of above project.
-class CouchDBWrapper(object):
-
- """
- Wrapper for external CouchDB instance which is started and stopped for
- testing.
- """
- BOOT_TIMEOUT_SECONDS = 5
- RETRY_LIMIT = 3
-
- def start(self):
- tries = 0
- while tries < self.RETRY_LIMIT and not hasattr(self, 'port'):
- try:
- self._try_start()
- return
- except Exception, e:
- print traceback.format_exc()
- self.stop()
- tries += 1
- raise Exception(
- "Check your couchdb: Tried to start 3 times and failed badly")
-
- def _try_start(self):
- """
- Start a CouchDB instance for a test.
- """
- self.tempdir = tempfile.mkdtemp(suffix='.couch.test')
-
- path = os.path.join(os.path.dirname(__file__),
- 'couchdb.ini.template')
- handle = open(path)
- conf = handle.read() % {
- 'tempdir': self.tempdir,
- }
- handle.close()
-
- shutil.copy('/etc/couchdb/default.ini', self.tempdir)
- defaultConfPath = os.path.join(self.tempdir, 'default.ini')
-
- confPath = os.path.join(self.tempdir, 'test.ini')
- handle = open(confPath, 'w')
- handle.write(conf)
- handle.close()
-
- # create the dirs from the template
- mkdir_p(os.path.join(self.tempdir, 'lib'))
- mkdir_p(os.path.join(self.tempdir, 'log'))
- args = ['/usr/bin/couchdb', '-n',
- '-a', defaultConfPath, '-a', confPath]
- null = open('/dev/null', 'w')
-
- self.process = subprocess.Popen(
- args, env=None, stdout=null.fileno(), stderr=null.fileno(),
- close_fds=True)
- boot_time = time.time()
- # find port
- logPath = os.path.join(self.tempdir, 'log', 'couch.log')
- while not os.path.exists(logPath):
- if self.process.poll() is not None:
- got_stdout, got_stderr = "", ""
- if self.process.stdout is not None:
- got_stdout = self.process.stdout.read()
-
- if self.process.stderr is not None:
- got_stderr = self.process.stderr.read()
- raise Exception("""
-couchdb exited with code %d.
-stdout:
-%s
-stderr:
-%s""" % (
- self.process.returncode, got_stdout, got_stderr))
- time.sleep(0.01)
- if (time.time() - boot_time) > self.BOOT_TIMEOUT_SECONDS:
- self.stop()
- raise Exception("Timeout starting couch")
- while os.stat(logPath).st_size == 0:
- time.sleep(0.01)
- if (time.time() - boot_time) > self.BOOT_TIMEOUT_SECONDS:
- self.stop()
- raise Exception("Timeout starting couch")
- PORT_RE = re.compile(
- 'Apache CouchDB has started on http://127.0.0.1:(?P<port>\d+)')
-
- handle = open(logPath)
- line = handle.read()
- handle.close()
- m = PORT_RE.search(line)
- if not m:
- self.stop()
- raise Exception("Cannot find port in line %s" % line)
- self.port = int(m.group('port'))
-
- def stop(self):
- """
- Terminate the CouchDB instance.
- """
- try:
- self.process.terminate()
- self.process.communicate()
- except:
- # just to clean up
- # if it can't, the process wasn't created anyway
- pass
- shutil.rmtree(self.tempdir)
-
-
class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest):
"""
@@ -460,15 +345,16 @@ class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest):
"""
Make sure we have a CouchDB instance for a test.
"""
- self.wrapper = CouchDBWrapper()
- self.wrapper.start()
- # self.db = self.wrapper.db
+ self.couch_port = 5984
+ self.couch_url = 'http://localhost:%d' % self.couch_port
+ self.couch_server = couchdb.Server(self.couch_url)
- def tearDown(self):
- """
- Stop CouchDB instance for test.
- """
- self.wrapper.stop()
+ def delete_db(self, name):
+ try:
+ self.couch_server.delete(name)
+ except:
+ # ignore if already missing
+ pass
class CouchServerStateForTests(CouchServerState):
@@ -484,15 +370,25 @@ class CouchServerStateForTests(CouchServerState):
which is less pleasant than allowing the db to be automatically created.
"""
- def _create_database(self, dbname):
- return CouchDatabase.open_database(
- urljoin(self._couch_url, dbname),
+ def __init__(self, *args, **kwargs):
+ self.dbs = []
+ super(CouchServerStateForTests, self).__init__(*args, **kwargs)
+
+ def _create_database(self, replica_uid=None, dbname=None):
+ """
+ Create db and append to a list, allowing test to close it later
+ """
+ dbname = dbname or ('test-%s' % uuid4().hex)
+ db = CouchDatabase.open_database(
+ urljoin(self.couch_url, dbname),
True,
- replica_uid=dbname,
+ replica_uid=replica_uid or 'test',
ensure_ddocs=True)
+ self.dbs.append(db)
+ return db
def ensure_database(self, dbname):
- db = self._create_database(dbname)
+ db = self._create_database(dbname=dbname)
return db, db.replica_uid
@@ -506,23 +402,20 @@ class SoledadWithCouchServerMixin(
main_test_class = getattr(self, 'main_test_class', None)
if main_test_class is not None:
main_test_class.setUp(self)
- self._couch_url = 'http://localhost:%d' % self.wrapper.port
def tearDown(self):
main_test_class = getattr(self, 'main_test_class', None)
if main_test_class is not None:
main_test_class.tearDown(self)
# delete the test database
- try:
- db = CouchDatabase(self._couch_url, 'test')
- db.delete_database()
- except DatabaseDoesNotExist:
- pass
BaseSoledadTest.tearDown(self)
CouchDBTestCase.tearDown(self)
def make_app(self):
- couch_url = urljoin(
- 'http://localhost:' + str(self.wrapper.port), 'tests')
- self.request_state = CouchServerStateForTests(couch_url)
+ self.request_state = CouchServerStateForTests(self.couch_url)
+ self.addCleanup(self.delete_dbs)
return self.make_app_with_state(self.request_state)
+
+ def delete_dbs(self):
+ for db in self.request_state.dbs:
+ self.delete_db(db._dbname)