From 36f3daf7ae3a76711cdc319a97a835047cd22605 Mon Sep 17 00:00:00 2001 From: Christoph Kluenter Date: Thu, 7 Jul 2016 09:15:52 +0200 Subject: [pkg] remove pixelated from requirements-latest.pip modifying original PR [0] by cristoph to account for the recent vendoring of l2db code, which means we no longer depend on u1db/dirspec. I expect the whole mess about the venv setup to be further simplified pretty soon, since we are going to merge most of the leap.* packages into a couple of repos. [0] https://github.com/leapcode/soledad/pull/327 --- client/pkg/requirements-latest.pip | 2 +- common/pkg/requirements-latest.pip | 2 +- server/pkg/requirements-latest.pip | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/client/pkg/requirements-latest.pip b/client/pkg/requirements-latest.pip index 46a7ccba..d32e1ffa 100644 --- a/client/pkg/requirements-latest.pip +++ b/client/pkg/requirements-latest.pip @@ -1,5 +1,5 @@ --index-url https://pypi.python.org/simple/ --e 'git+https://github.com/pixelated-project/leap_pycommon.git@develop#egg=leap.common' +-e 'git+https://github.com/leapcode/leap_pycommon.git@develop#egg=leap.common' -e '../common' -e . diff --git a/common/pkg/requirements-latest.pip b/common/pkg/requirements-latest.pip index 396d77f1..852f2433 100644 --- a/common/pkg/requirements-latest.pip +++ b/common/pkg/requirements-latest.pip @@ -1,4 +1,4 @@ --index-url https://pypi.python.org/simple/ --e 'git+https://github.com/pixelated-project/leap_pycommon.git@develop#egg=leap.common' +-e 'git+https://github.com/leapcode/leap_pycommon.git@develop#egg=leap.common' -e . diff --git a/server/pkg/requirements-latest.pip b/server/pkg/requirements-latest.pip index 46a7ccba..d32e1ffa 100644 --- a/server/pkg/requirements-latest.pip +++ b/server/pkg/requirements-latest.pip @@ -1,5 +1,5 @@ --index-url https://pypi.python.org/simple/ --e 'git+https://github.com/pixelated-project/leap_pycommon.git@develop#egg=leap.common' +-e 'git+https://github.com/leapcode/leap_pycommon.git@develop#egg=leap.common' -e '../common' -e . -- cgit v1.2.3 From 0514978eeed0e4db41fe13b3352ab55ccf299ef1 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 22 Jul 2016 20:14:26 +0200 Subject: [test] fail gracefully on cert delete --- scripts/docker/files/bin/setup-test-env.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scripts/docker/files/bin/setup-test-env.py b/scripts/docker/files/bin/setup-test-env.py index 0f3ea6f4..c0487e8f 100755 --- a/scripts/docker/files/bin/setup-test-env.py +++ b/scripts/docker/files/bin/setup-test-env.py @@ -389,8 +389,11 @@ def cert_create(args): def cert_delete(args): private_key = os.path.join(args.basedir, args.private_key) cert_key = os.path.join(args.basedir, args.cert_key) - os.unlink(private_key) - os.unlink(cert_key) + try: + os.unlink(private_key) + os.unlink(cert_key) + except OSError: + pass # -- cgit v1.2.3 From c2849c2f13adfc7c1388de50f41cd234868113ec Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 22 Jul 2016 20:15:35 +0200 Subject: [test] update docker readme and todo --- scripts/docker/README.md | 9 +++++++++ scripts/docker/TODO | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/scripts/docker/README.md b/scripts/docker/README.md index c4d7ac94..fda1c04a 100644 --- a/scripts/docker/README.md +++ b/scripts/docker/README.md @@ -14,6 +14,15 @@ Check the `Makefile` for the rules for running containers. Check the `helper/` directory for scripts that help running tests. +Installation +------------ + +0. update and install +1. Install docker for your system: https://docs.docker.com/ +2. Build the image by running `make` +3. Use one of the scripts in the `helper/` directory + + Environment variables for docker containers ------------------------------------------- diff --git a/scripts/docker/TODO b/scripts/docker/TODO index 5185d754..90597637 100644 --- a/scripts/docker/TODO +++ b/scripts/docker/TODO @@ -1 +1,5 @@ - limit resources of containers (mem and cpu) +- allow running couchdb on another container +- use a config file to get defaults for running tests +- use the /builds directory as base of git repo +- save the test state to a directory to make it reproducible -- cgit v1.2.3 From 23fe0be50c7c9408eab47e8286e19b16a77f66ad Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 08:06:39 -0300 Subject: [test] remove ddocs param from docker setup script --- scripts/docker/files/bin/setup-test-env.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/scripts/docker/files/bin/setup-test-env.py b/scripts/docker/files/bin/setup-test-env.py index c0487e8f..4868fd56 100755 --- a/scripts/docker/files/bin/setup-test-env.py +++ b/scripts/docker/files/bin/setup-test-env.py @@ -194,12 +194,12 @@ def user_db_create(args): url = 'http://localhost:%d/user-%s' % (args.port, args.uuid) try: CouchDatabase.open_database( - url=url, create=False, replica_uid=None, ensure_ddocs=True) + url=url, create=False, replica_uid=None) print '[*] error: database "user-%s" already exists' % args.uuid exit(1) except DatabaseDoesNotExist: CouchDatabase.open_database( - url=url, create=True, replica_uid=None, ensure_ddocs=True) + url=url, create=True, replica_uid=None) print '[+] database created: user-%s' % args.uuid @@ -372,7 +372,10 @@ CERT_CONFIG_FILE = os.path.join( def cert_create(args): private_key = os.path.join(args.basedir, args.private_key) cert_key = os.path.join(args.basedir, args.cert_key) - os.mkdir(args.basedir) + try: + os.mkdir(args.basedir) + except OSError: + pass call([ 'openssl', 'req', -- cgit v1.2.3 From fda2acf0c8aaf123359470ced37f56e8223a3286 Mon Sep 17 00:00:00 2001 From: drebs Date: Thu, 21 Jul 2016 13:51:06 +0200 Subject: [feat] use _local couch docs for metadata storage --- common/src/leap/soledad/common/couch/__init__.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py index 523a50a0..21ffd036 100644 --- a/common/src/leap/soledad/common/couch/__init__.py +++ b/common/src/leap/soledad/common/couch/__init__.py @@ -110,6 +110,9 @@ class CouchDatabase(object): CouchDB details from backend code. """ + CONFIG_DOC_ID = '_local/config' + SYNC_DOC_ID_PREFIX = '_local/sync_' + @classmethod def open_database(cls, url, create, ensure_ddocs=False, replica_uid=None, database_security=None): @@ -261,12 +264,12 @@ class CouchDatabase(object): """ try: # set on existent config document - doc = self._database['u1db_config'] + doc = self._database[self.CONFIG_DOC_ID] doc['replica_uid'] = replica_uid except ResourceNotFound: # or create the config document doc = { - '_id': 'u1db_config', + '_id': self.CONFIG_DOC_ID, 'replica_uid': replica_uid, } self._database.save(doc) @@ -280,7 +283,7 @@ class CouchDatabase(object): """ try: # grab replica_uid from server - doc = self._database['u1db_config'] + doc = self._database[self.CONFIG_DOC_ID] replica_uid = doc['replica_uid'] return replica_uid except ResourceNotFound: @@ -499,7 +502,7 @@ class CouchDatabase(object): synchronized with the replica, this is (0, ''). :rtype: (int, str) """ - doc_id = 'u1db_sync_%s' % other_replica_uid + doc_id = '%s%s' % (self.SYNC_DOC_ID_PREFIX, other_replica_uid) try: doc = self._database[doc_id] except ResourceNotFound: @@ -562,7 +565,7 @@ class CouchDatabase(object): generation. :type other_transaction_id: str """ - doc_id = 'u1db_sync_%s' % other_replica_uid + doc_id = '%s%s' % (self.SYNC_DOC_ID_PREFIX, other_replica_uid) try: doc = self._database[doc_id] except ResourceNotFound: -- cgit v1.2.3 From 239273e765ffc2cf3f74dc2bdd6b7ac8a8acdccd Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 22 Jul 2016 19:53:21 +0200 Subject: [feat] do not use couch views for sync metadata When compared to plain couch document get, the use of the simplest view functions takes around double the time, while the use of the simplest list function can take more than 8 times: get 100 docs: total: 0.440337 secs mean: 0.004403 query 100 views: total: 0.911425 secs mean: 0.009114 query 100 lists: total: 3.711537 secs mean: 0.037115 Besides that, the current implementation of sync metadata storage over couch is dependent of timestamps of document puts, what can lead to metadata corruption if the clock of the system is changed for any reason. Because of these reasons, we seek to change the implementation of database metadata. This commit implements the storage of transaction log data on couch documents with special ids, in the form "gen-xxxxxxxxxx", where the x's are replaced by the generation index. Each generation document holds a dictionary containing the generation, doc_id and transaction_id for the changed document. For each modified document, a generation document is inserted holding the transaction metadata. --- common/src/leap/soledad/common/couch/__init__.py | 121 ++++++++++++----------- 1 file changed, 65 insertions(+), 56 deletions(-) diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py index 21ffd036..6cad2b19 100644 --- a/common/src/leap/soledad/common/couch/__init__.py +++ b/common/src/leap/soledad/common/couch/__init__.py @@ -103,6 +103,10 @@ def couch_server(url): THREAD_POOL = ThreadPool(20) +def _get_gen_doc_id(gen): + return 'gen-%s' % str(gen).zfill(10) + + class CouchDatabase(object): """ Holds CouchDB related code. @@ -213,7 +217,7 @@ class CouchDatabase(object): Ensure that the design documents used by the backend exist on the couch database. """ - for ddoc_name in ['docs', 'syncs', 'transactions']: + for ddoc_name in ['docs', 'syncs']: try: self.json_from_resource(['_design'] + ddoc_name.split('/') + ['_info'], @@ -437,8 +441,6 @@ class CouchDatabase(object): result['_attachments']['u1db_conflicts']['data'])))) # store couch revision doc.couch_rev = result['_rev'] - # store transactions - doc.transactions = result['u1db_transactions'] return doc def _build_conflicts(self, doc_id, attached_conflicts): @@ -474,14 +476,11 @@ class CouchDatabase(object): """ if generation == 0: return '' - # query a couch list function - ddoc_path = [ - '_design', 'transactions', '_list', 'trans_id_for_gen', 'log' - ] - response = self.json_from_resource(ddoc_path, gen=generation) - if response == {}: + log = self._get_transaction_log(start=generation, end=generation) + if not log: raise InvalidGeneration - return response['transaction_id'] + _, _, trans_id = log[0] + return trans_id def get_replica_gen_and_trans_id(self, other_replica_uid): """ @@ -512,8 +511,8 @@ class CouchDatabase(object): 'transaction_id': '', } self._database.save(doc) - result = doc['generation'], doc['transaction_id'] - return result + gen, trans_id = doc['generation'], doc['transaction_id'] + return gen, trans_id def get_doc_conflicts(self, doc_id, couch_rev=None): """ @@ -581,12 +580,32 @@ class CouchDatabase(object): :return: The complete transaction log. :rtype: [(str, str)] """ - # query a couch view - ddoc_path = ['_design', 'transactions', '_view', 'log'] - response = self.json_from_resource(ddoc_path) - return map( - lambda row: (row['id'], row['value']), - response['rows']) + log = self._get_transaction_log() + return map(lambda i: (i[1], i[2]), log) + + def _get_gen_docs( + self, start=0, end=9999999999, descending=None, limit=None): + params = {} + if descending: + params['descending'] = 'true' + # honor couch way of traversing the view tree in reverse order + start, end = end, start + params['startkey'] = _get_gen_doc_id(start) + params['endkey'] = _get_gen_doc_id(end) + params['include_docs'] = 'true' + if limit: + params['limit'] = limit + view = self._database.view("_all_docs", **params) + return view.rows + + def _get_transaction_log(self, start=0, end=9999999999): + # get current gen and trans_id + rows = self._get_gen_docs(start=start, end=end) + log = [] + for row in rows: + doc = row['doc'] + log.append((doc['gen'], doc['doc_id'], doc['trans_id'])) + return log def whats_changed(self, old_generation=0): """ @@ -605,32 +624,18 @@ class CouchDatabase(object): changes first) :rtype: (int, str, [(str, int, str)]) """ - # query a couch list function - ddoc_path = [ - '_design', 'transactions', '_list', 'whats_changed', 'log' - ] - response = self.json_from_resource(ddoc_path, old_gen=old_generation) - results = map( - lambda row: - (row['generation'], row['doc_id'], row['transaction_id']), - response['transactions']) - results.reverse() - cur_gen = old_generation - seen = set() changes = [] - newest_trans_id = '' - for generation, doc_id, trans_id in results: + cur_generation, last_trans_id = self.get_generation_info() + relevant_tail = self._get_transaction_log(start=old_generation + 1) + seen = set() + generation = cur_generation + for _, doc_id, trans_id in reversed(relevant_tail): if doc_id not in seen: changes.append((doc_id, generation, trans_id)) seen.add(doc_id) - if changes: - cur_gen = changes[0][1] # max generation - newest_trans_id = changes[0][2] - changes.reverse() - else: - cur_gen, newest_trans_id = self.get_generation_info() - - return cur_gen, newest_trans_id, changes + generation -= 1 + changes.reverse() + return (cur_generation, last_trans_id, changes) def get_generation_info(self): """ @@ -641,10 +646,11 @@ class CouchDatabase(object): """ if self.batching and self.batch_generation: return self.batch_generation - # query a couch list function - ddoc_path = ['_design', 'transactions', '_list', 'generation', 'log'] - info = self.json_from_resource(ddoc_path) - return (info['generation'], info['transaction_id']) + rows = self._get_gen_docs(descending=True, limit=1) + if not rows: + return 0, '' + gen_doc = rows.pop()['doc'] + return gen_doc['gen'], gen_doc['trans_id'] def json_from_resource(self, ddoc_path, check_missing_ddoc=True, **kwargs): @@ -740,21 +746,25 @@ class CouchDatabase(object): 'length': len(conflicts), } parts.append(conflicts) - # store old transactions, if any - transactions = old_doc.transactions[:] if old_doc is not None else [] - # create a new transaction id and timestamp it so the transaction log - # is consistent when querying the database. - transactions.append( - # here we store milliseconds to keep consistent with javascript - # Date.prototype.getTime() which was used before inside a couchdb - # update handler. - (int(time.time() * 1000), - transaction_id)) + # add the gen document + while True: # TODO: add a lock, remove this while + try: + gen, _ = self.get_generation_info() + new_gen = gen + 1 + gen_doc = { + '_id': _get_gen_doc_id(new_gen), + 'gen': new_gen, + 'doc_id': doc.doc_id, + 'trans_id': transaction_id, + } + self._database.save(gen_doc) + break + except ResourceConflict: + pass # build the couch document couch_doc = { '_id': doc.doc_id, 'u1db_rev': doc.rev, - 'u1db_transactions': transactions, '_attachments': attachments, } # if we are updating a doc we have to add the couch doc revision @@ -786,7 +796,6 @@ class CouchDatabase(object): self.batch_docs[doc.doc_id] = couch_doc last_gen, last_trans_id = self.batch_generation self.batch_generation = (last_gen + 1, transaction_id) - return transactions[-1][1] def _new_resource(self, *path): """ -- cgit v1.2.3 From 2a57f749672580591b07065adde334029ddfb460 Mon Sep 17 00:00:00 2001 From: drebs Date: Sat, 23 Jul 2016 17:11:14 +0200 Subject: [test] adapt couch tests to use new generation/transaction storage scheme --- testing/tests/couch/common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testing/tests/couch/common.py b/testing/tests/couch/common.py index b08e1fa3..263ac94c 100644 --- a/testing/tests/couch/common.py +++ b/testing/tests/couch/common.py @@ -49,7 +49,6 @@ def copy_couch_database_for_test(test, db): elif 'u1db_rev' in doc: new_doc = { '_id': doc['_id'], - 'u1db_transactions': doc['u1db_transactions'], 'u1db_rev': doc['u1db_rev'] } attachments = [] @@ -65,6 +64,8 @@ def copy_couch_database_for_test(test, db): if (att is not None): new_couch_db.put_attachment(new_doc, att, filename=att_name) + elif doc_id.startswith('gen-'): + new_couch_db.save(doc) # cleanup connections to prevent file descriptor leaking return new_db -- cgit v1.2.3 From c3e0f52080041e2a01cfa483efe73f8503a10f31 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 22 Jul 2016 20:33:06 +0200 Subject: [feat] remove usage of design documents in couch Design documents are slow and we already have alternatives to all uses we used to make of them, so this commit completelly removes all usage of design documents. --- common/src/leap/soledad/common/.gitignore | 1 - common/src/leap/soledad/common/README.txt | 9 -- common/src/leap/soledad/common/couch/__init__.py | 78 ++--------- common/src/leap/soledad/common/couch/errors.py | 144 -------------------- common/src/leap/soledad/common/ddocs/README.txt | 34 ----- .../soledad/common/ddocs/docs/views/get/map.js | 20 --- .../soledad/common/ddocs/syncs/updates/state.js | 105 --------------- .../ddocs/syncs/views/changes_to_return/map.js | 20 --- .../common/ddocs/syncs/views/seen_ids/map.js | 11 -- .../soledad/common/ddocs/syncs/views/state/map.js | 17 --- .../common/ddocs/transactions/lists/generation.js | 20 --- .../ddocs/transactions/lists/trans_id_for_gen.js | 19 --- .../ddocs/transactions/lists/whats_changed.js | 22 --- .../common/ddocs/transactions/views/log/map.js | 7 - testing/tests/couch/common.py | 4 - testing/tests/couch/test_ddocs.py | 149 +-------------------- 16 files changed, 10 insertions(+), 650 deletions(-) delete mode 100644 common/src/leap/soledad/common/.gitignore delete mode 100644 common/src/leap/soledad/common/couch/errors.py delete mode 100644 common/src/leap/soledad/common/ddocs/README.txt delete mode 100644 common/src/leap/soledad/common/ddocs/docs/views/get/map.js delete mode 100644 common/src/leap/soledad/common/ddocs/syncs/updates/state.js delete mode 100644 common/src/leap/soledad/common/ddocs/syncs/views/changes_to_return/map.js delete mode 100644 common/src/leap/soledad/common/ddocs/syncs/views/seen_ids/map.js delete mode 100644 common/src/leap/soledad/common/ddocs/syncs/views/state/map.js delete mode 100644 common/src/leap/soledad/common/ddocs/transactions/lists/generation.js delete mode 100644 common/src/leap/soledad/common/ddocs/transactions/lists/trans_id_for_gen.js delete mode 100644 common/src/leap/soledad/common/ddocs/transactions/lists/whats_changed.js delete mode 100644 common/src/leap/soledad/common/ddocs/transactions/views/log/map.js diff --git a/common/src/leap/soledad/common/.gitignore b/common/src/leap/soledad/common/.gitignore deleted file mode 100644 index 3378c78a..00000000 --- a/common/src/leap/soledad/common/.gitignore +++ /dev/null @@ -1 +0,0 @@ -ddocs.py diff --git a/common/src/leap/soledad/common/README.txt b/common/src/leap/soledad/common/README.txt index 38b9858e..0a252650 100644 --- a/common/src/leap/soledad/common/README.txt +++ b/common/src/leap/soledad/common/README.txt @@ -60,15 +60,6 @@ implemented in a way that all changes will be pushed with just one operation. * delete_index * create_index -Couch views and update functions are used in order to achieve atomicity on the -Couch backend. Transactions are stored in the `u1db_transactions` field of the -couch document. Document's content and conflicted versions are stored as couch -document attachments with names, respectivelly, `u1db_content` and -`u1db_conflicts`. - -A map of methods and couch query URI can be found on the `./ddocs/README.txt` -document. - Notes: * Currently, the couch backend does not implement indexing, so what is diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py index 6cad2b19..ca0c2855 100644 --- a/common/src/leap/soledad/common/couch/__init__.py +++ b/common/src/leap/soledad/common/couch/__init__.py @@ -37,7 +37,6 @@ from couchdb.client import Server, Database from couchdb.http import ( ResourceConflict, ResourceNotFound, - ServerError, Session, urljoin as couch_urljoin, Resource, @@ -50,9 +49,6 @@ from leap.soledad.common.l2db.errors import ( from leap.soledad.common.l2db.remote import http_app -from leap.soledad.common import ddocs -from .errors import raise_server_error -from .errors import raise_missing_design_doc_error from .support import MultipartWriter from leap.soledad.common.errors import InvalidURLError from leap.soledad.common.document import ServerDocument @@ -177,7 +173,6 @@ class CouchDatabase(object): self.batch_generation = None self.batch_docs = {} if ensure_ddocs: - self.ensure_ddocs_on_db() self.ensure_security_ddoc(database_security) def batch_start(self): @@ -212,22 +207,6 @@ class CouchDatabase(object): except ResourceNotFound: raise DatabaseDoesNotExist() - def ensure_ddocs_on_db(self): - """ - Ensure that the design documents used by the backend exist on the - couch database. - """ - for ddoc_name in ['docs', 'syncs']: - try: - self.json_from_resource(['_design'] + - ddoc_name.split('/') + ['_info'], - check_missing_ddoc=False) - except ResourceNotFound: - ddoc = json.loads( - binascii.a2b_base64( - getattr(ddocs, ddoc_name))) - self._database.save(ddoc) - def ensure_security_ddoc(self, security_config=None): """ Make sure that only soledad user is able to access this database as @@ -539,7 +518,6 @@ class CouchDatabase(object): try: response = self.json_from_resource([doc_id, 'u1db_conflicts'], - check_missing_ddoc=False, **params) return conflicts + self._build_conflicts( doc_id, json.loads(response.read())) @@ -652,48 +630,23 @@ class CouchDatabase(object): gen_doc = rows.pop()['doc'] return gen_doc['gen'], gen_doc['trans_id'] - def json_from_resource(self, ddoc_path, check_missing_ddoc=True, - **kwargs): + def json_from_resource(self, doc_path, **kwargs): """ Get a resource from it's path and gets a doc's JSON using provided - parameters, also checking for missing design docs by default. + parameters. - :param ddoc_path: The path to resource. - :type ddoc_path: [str] - :param check_missing_ddoc: Raises info on what design doc is missing. - :type check_missin_ddoc: bool + :param doc_path: The path to resource. + :type doc_path: [str] :return: The request's data parsed from JSON to a dict. :rtype: dict - - :raise MissingDesignDocError: Raised when tried to access a missing - design document. - :raise MissingDesignDocListFunctionError: Raised when trying to access - a missing list function on a - design document. - :raise MissingDesignDocNamedViewError: Raised when trying to access a - missing named view on a design - document. - :raise MissingDesignDocDeletedError: Raised when trying to access a - deleted design document. - :raise MissingDesignDocUnknownError: Raised when failed to access a - design document for an yet - unknown reason. - """ - if ddoc_path is not None: - resource = self._database.resource(*ddoc_path) + """ + if doc_path is not None: + resource = self._database.resource(*doc_path) else: resource = self._database.resource() - try: - _, _, data = resource.get_json(**kwargs) - return data - except ResourceNotFound as e: - if check_missing_ddoc: - raise_missing_design_doc_error(e, ddoc_path) - else: - raise e - except ServerError as e: - raise_server_error(e, ddoc_path) + _, _, data = resource.get_json(**kwargs) + return data def save_document(self, old_doc, doc, transaction_id): """ @@ -710,19 +663,6 @@ class CouchDatabase(object): :raise RevisionConflict: Raised when trying to update a document but couch revisions mismatch. - :raise MissingDesignDocError: Raised when tried to access a missing - design document. - :raise MissingDesignDocListFunctionError: Raised when trying to access - a missing list function on a - design document. - :raise MissingDesignDocNamedViewError: Raised when trying to access a - missing named view on a design - document. - :raise MissingDesignDocDeletedError: Raised when trying to access a - deleted design document. - :raise MissingDesignDocUnknownError: Raised when failed to access a - design document for an yet - unknown reason. """ attachments = {} # we save content and conflicts as attachments parts = [] # and we put it using couch's multipart PUT diff --git a/common/src/leap/soledad/common/couch/errors.py b/common/src/leap/soledad/common/couch/errors.py deleted file mode 100644 index 9b287c76..00000000 --- a/common/src/leap/soledad/common/couch/errors.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -# errors.py -# Copyright (C) 2015 LEAP -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -from leap.soledad.common.errors import SoledadError, BackendNotReadyError -from leap.soledad.common.errors import register_exception - -""" -Specific errors that can be raised by CouchDatabase. -""" - - -@register_exception -class MissingDesignDocError(BackendNotReadyError): - - """ - Raised when trying to access a missing couch design document. - """ - - wire_description = "missing design document" - status = 500 - - -@register_exception -class MissingDesignDocNamedViewError(SoledadError): - - """ - Raised when trying to access a missing named view on a couch design - document. - """ - - wire_description = "missing design document named function" - status = 500 - - -@register_exception -class MissingDesignDocListFunctionError(SoledadError): - - """ - Raised when trying to access a missing list function on a couch design - document. - """ - - wire_description = "missing design document list function" - status = 500 - - -@register_exception -class MissingDesignDocDeletedError(SoledadError): - - """ - Raised when trying to access a deleted couch design document. - """ - - wire_description = "design document was deleted" - status = 500 - - -@register_exception -class DesignDocUnknownError(SoledadError): - - """ - Raised when trying to access a couch design document and getting an - unknown error. - """ - - wire_description = "missing design document unknown error" - status = 500 - - -def raise_missing_design_doc_error(exc, ddoc_path): - """ - Raise an appropriate exception when catching a ResourceNotFound when - accessing a design document. - - :param exc: The exception cought. - :type exc: ResourceNotFound - :param ddoc_path: A list representing the requested path. - :type ddoc_path: list - - :raise MissingDesignDocError: Raised when tried to access a missing design - document. - :raise MissingDesignDocListFunctionError: Raised when trying to access a - missing list function on a - design document. - :raise MissingDesignDocNamedViewError: Raised when trying to access a - missing named view on a design - document. - :raise MissingDesignDocDeletedError: Raised when trying to access a - deleted design document. - :raise MissingDesignDocUnknownError: Raised when failed to access a design - document for an yet unknown reason. - """ - path = "".join(ddoc_path) - if exc.message[1] == 'missing': - raise MissingDesignDocError(path) - elif exc.message[1] == 'missing function' or \ - exc.message[1].startswith('missing lists function'): - raise MissingDesignDocListFunctionError(path) - elif exc.message[1] == 'missing_named_view': - raise MissingDesignDocNamedViewError(path) - elif exc.message[1] == 'deleted': - raise MissingDesignDocDeletedError(path) - # other errors are unknown for now - raise DesignDocUnknownError("%s: %s" % (path, str(exc.message))) - - -def raise_server_error(exc, ddoc_path): - """ - Raise an appropriate exception when catching a ServerError when - accessing a design document. - - :param exc: The exception cought. - :type exc: ResourceNotFound - :param ddoc_path: A list representing the requested path. - :type ddoc_path: list - - :raise MissingDesignDocListFunctionError: Raised when trying to access a - missing list function on a - design document. - :raise MissingDesignDocUnknownError: Raised when failed to access a design - document for an yet unknown reason. - """ - path = "".join(ddoc_path) - msg = exc.message[1][0] - if msg == 'unnamed_error': - raise MissingDesignDocListFunctionError(path) - elif msg == 'TypeError': - if 'point is undefined' in exc.message[1][1]: - raise MissingDesignDocListFunctionError - # other errors are unknown for now - raise DesignDocUnknownError("%s: %s" % (path, str(exc.message))) diff --git a/common/src/leap/soledad/common/ddocs/README.txt b/common/src/leap/soledad/common/ddocs/README.txt deleted file mode 100644 index 5569d929..00000000 --- a/common/src/leap/soledad/common/ddocs/README.txt +++ /dev/null @@ -1,34 +0,0 @@ -This directory holds a folder structure containing javascript files that -represent the design documents needed by the CouchDB U1DB backend. These files -are compiled into the `../ddocs.py` file by setuptools when creating the -source distribution. - -The following table depicts the U1DB CouchDB backend method and the URI that -is queried to obtain/update data from/to the server. - - +----------------------------------+------------------------------------------------------------------+ - | u1db backend method | URI | - |----------------------------------+------------------------------------------------------------------| - | _get_generation | _design/transactions/_list/generation/log | - | _get_generation_info | _design/transactions/_list/generation/log | - | _get_trans_id_for_gen | _design/transactions/_list/trans_id_for_gen/log | - | _get_transaction_log | _design/transactions/_view/log | - | _get_doc (*) | _design/docs/_view/get?key= | - | _has_conflicts | _design/docs/_view/get?key= | - | get_all_docs | _design/docs/_view/get | - | _put_doc | _design/docs/_update/put/ | - | _whats_changed | _design/transactions/_list/whats_changed/log?old_gen= | - | _get_conflicts (*) | _design/docs/_view/conflicts?key= | - | _get_replica_gen_and_trans_id | _design/syncs/_view/log?other_replica_uid= | - | _do_set_replica_gen_and_trans_id | _design/syncs/_update/put/u1db_sync_log | - | _add_conflict | _design/docs/_update/add_conflict/ | - | _delete_conflicts | _design/docs/_update/delete_conflicts/?doc_rev= | - | list_indexes | not implemented | - | _get_index_definition | not implemented | - | delete_index | not implemented | - | _get_indexed_fields | not implemented | - | _put_and_update_indexes | not implemented | - +----------------------------------+------------------------------------------------------------------+ - -(*) These methods also request CouchDB document attachments that store U1DB - document contents. diff --git a/common/src/leap/soledad/common/ddocs/docs/views/get/map.js b/common/src/leap/soledad/common/ddocs/docs/views/get/map.js deleted file mode 100644 index ae08d9e9..00000000 --- a/common/src/leap/soledad/common/ddocs/docs/views/get/map.js +++ /dev/null @@ -1,20 +0,0 @@ -function(doc) { - if (doc.u1db_rev) { - var is_tombstone = true; - var has_conflicts = false; - if (doc._attachments) { - if (doc._attachments.u1db_content) - is_tombstone = false; - if (doc._attachments.u1db_conflicts) - has_conflicts = true; - } - emit(doc._id, - { - "couch_rev": doc._rev, - "u1db_rev": doc.u1db_rev, - "is_tombstone": is_tombstone, - "has_conflicts": has_conflicts, - } - ); - } -} diff --git a/common/src/leap/soledad/common/ddocs/syncs/updates/state.js b/common/src/leap/soledad/common/ddocs/syncs/updates/state.js deleted file mode 100644 index d62aeb40..00000000 --- a/common/src/leap/soledad/common/ddocs/syncs/updates/state.js +++ /dev/null @@ -1,105 +0,0 @@ -/** - * This update handler stores information about ongoing synchronization - * attempts from distinct source replicas. - * - * Normally, u1db synchronization occurs during one POST request. In order to - * split that into many serial POST requests, we store the state of each sync - * in the server, using a document with id 'u1db_sync_state'. To identify - * each sync attempt, we use a sync_id sent by the client. If we ever receive - * a new sync_id, we trash current data for that source replica and start - * over. - * - * We expect the following in the document body: - * - * { - * 'source_replica_uid': '', - * 'sync_id': '', - * 'seen_ids': [['', ], ...], // optional - * 'changes_to_return': [ // optional - * 'gen': , - * 'trans_id': '', - * 'changes_to_return': [[', , ''], ...] - * ], - * } - * - * The format of the final document stored on server is: - * - * { - * '_id': '', - * '_rev' '', - * 'ongoing_syncs': { - * '': { - * 'sync_id': '', - * 'seen_ids': [['', [, ...], - * 'changes_to_return': { - * 'gen': , - * 'trans_id': '', - * 'changes_to_return': [ - * ['', , ''], - * ..., - * ], - * }, - * }, - * ... // info about other source replicas here - * } - * } - */ -function(doc, req) { - - // prevent updates to alien documents - if (doc != null && doc['_id'] != 'u1db_sync_state') - return [null, 'invalid data']; - - // create the document if it doesn't exist - if (!doc) - doc = { - '_id': 'u1db_sync_state', - 'ongoing_syncs': {}, - }; - - // parse and validate incoming data - var body = JSON.parse(req.body); - if (body['source_replica_uid'] == null) - return [null, 'invalid data']; - var source_replica_uid = body['source_replica_uid']; - - if (body['sync_id'] == null) - return [null, 'invalid data']; - var sync_id = body['sync_id']; - - // trash outdated sync data for that replica if that exists - if (doc['ongoing_syncs'][source_replica_uid] != null && - doc['ongoing_syncs'][source_replica_uid]['sync_id'] != sync_id) - delete doc['ongoing_syncs'][source_replica_uid]; - - // create an entry for that source replica - if (doc['ongoing_syncs'][source_replica_uid] == null) - doc['ongoing_syncs'][source_replica_uid] = { - 'sync_id': sync_id, - 'seen_ids': {}, - 'changes_to_return': null, - }; - - // incoming meta-data values should be exclusive, so we count how many - // arrived and deny to accomplish the transaction if the count is high. - var incoming_values = 0; - var info = doc['ongoing_syncs'][source_replica_uid] - - // add incoming seen id - if ('seen_id' in body) { - info['seen_ids'][body['seen_id'][0]] = body['seen_id'][1]; - incoming_values += 1; - } - - // add incoming changes_to_return - if ('changes_to_return' in body) { - info['changes_to_return'] = body['changes_to_return']; - incoming_values += 1; - } - - if (incoming_values != 1) - return [null, 'invalid data']; - - return [doc, 'ok']; -} - diff --git a/common/src/leap/soledad/common/ddocs/syncs/views/changes_to_return/map.js b/common/src/leap/soledad/common/ddocs/syncs/views/changes_to_return/map.js deleted file mode 100644 index 94b7e767..00000000 --- a/common/src/leap/soledad/common/ddocs/syncs/views/changes_to_return/map.js +++ /dev/null @@ -1,20 +0,0 @@ -function(doc) { - if (doc['_id'] == 'u1db_sync_state' && doc['ongoing_syncs'] != null) - for (var source_replica_uid in doc['ongoing_syncs']) { - var changes = doc['ongoing_syncs'][source_replica_uid]['changes_to_return']; - var sync_id = doc['ongoing_syncs'][source_replica_uid]['sync_id']; - if (changes == null) - emit([source_replica_uid, sync_id, 0], null); - else if (changes.length == 0) - emit([source_replica_uid, sync_id, 0], []); - else - for (var i = 0; i < changes['changes_to_return'].length; i++) - emit( - [source_replica_uid, sync_id, i], - { - 'gen': changes['gen'], - 'trans_id': changes['trans_id'], - 'next_change_to_return': changes['changes_to_return'][i], - }); - } -} diff --git a/common/src/leap/soledad/common/ddocs/syncs/views/seen_ids/map.js b/common/src/leap/soledad/common/ddocs/syncs/views/seen_ids/map.js deleted file mode 100644 index 16118e88..00000000 --- a/common/src/leap/soledad/common/ddocs/syncs/views/seen_ids/map.js +++ /dev/null @@ -1,11 +0,0 @@ -function(doc) { - if (doc['_id'] == 'u1db_sync_state' && doc['ongoing_syncs'] != null) - for (var source_replica_uid in doc['ongoing_syncs']) { - var sync_id = doc['ongoing_syncs'][source_replica_uid]['sync_id']; - emit( - [source_replica_uid, sync_id], - { - 'seen_ids': doc['ongoing_syncs'][source_replica_uid]['seen_ids'], - }); - } -} diff --git a/common/src/leap/soledad/common/ddocs/syncs/views/state/map.js b/common/src/leap/soledad/common/ddocs/syncs/views/state/map.js deleted file mode 100644 index e88c6ebb..00000000 --- a/common/src/leap/soledad/common/ddocs/syncs/views/state/map.js +++ /dev/null @@ -1,17 +0,0 @@ -function(doc) { - if (doc['_id'] == 'u1db_sync_state' && doc['ongoing_syncs'] != null) - for (var source_replica_uid in doc['ongoing_syncs']) { - var changes = doc['ongoing_syncs'][source_replica_uid]['changes_to_return']; - var sync_id = doc['ongoing_syncs'][source_replica_uid]['sync_id']; - if (changes == null) - emit([source_replica_uid, sync_id], null); - else - emit( - [source_replica_uid, sync_id], - { - 'gen': changes['gen'], - 'trans_id': changes['trans_id'], - 'number_of_changes': changes['changes_to_return'].length - }); - } -} diff --git a/common/src/leap/soledad/common/ddocs/transactions/lists/generation.js b/common/src/leap/soledad/common/ddocs/transactions/lists/generation.js deleted file mode 100644 index dbdfff0d..00000000 --- a/common/src/leap/soledad/common/ddocs/transactions/lists/generation.js +++ /dev/null @@ -1,20 +0,0 @@ -function(head, req) { - var row; - var rows=[]; - // fetch all rows - while(row = getRow()) { - rows.push(row); - } - if (rows.length > 0) - send(JSON.stringify({ - "generation": rows.length, - "doc_id": rows[rows.length-1]['id'], - "transaction_id": rows[rows.length-1]['value'] - })); - else - send(JSON.stringify({ - "generation": 0, - "doc_id": "", - "transaction_id": "", - })); -} diff --git a/common/src/leap/soledad/common/ddocs/transactions/lists/trans_id_for_gen.js b/common/src/leap/soledad/common/ddocs/transactions/lists/trans_id_for_gen.js deleted file mode 100644 index 2ec91794..00000000 --- a/common/src/leap/soledad/common/ddocs/transactions/lists/trans_id_for_gen.js +++ /dev/null @@ -1,19 +0,0 @@ -function(head, req) { - var row; - var rows=[]; - var i = 1; - var gen = 1; - if (req.query.gen) - gen = parseInt(req.query['gen']); - // fetch all rows - while(row = getRow()) - rows.push(row); - if (gen <= rows.length) - send(JSON.stringify({ - "generation": gen, - "doc_id": rows[gen-1]['id'], - "transaction_id": rows[gen-1]['value'], - })); - else - send('{}'); -} diff --git a/common/src/leap/soledad/common/ddocs/transactions/lists/whats_changed.js b/common/src/leap/soledad/common/ddocs/transactions/lists/whats_changed.js deleted file mode 100644 index b35cdf51..00000000 --- a/common/src/leap/soledad/common/ddocs/transactions/lists/whats_changed.js +++ /dev/null @@ -1,22 +0,0 @@ -function(head, req) { - var row; - var gen = 1; - var old_gen = 0; - if (req.query.old_gen) - old_gen = parseInt(req.query['old_gen']); - send('{"transactions":[\n'); - // fetch all rows - while(row = getRow()) { - if (gen > old_gen) { - if (gen > old_gen+1) - send(',\n'); - send(JSON.stringify({ - "generation": gen, - "doc_id": row["id"], - "transaction_id": row["value"] - })); - } - gen++; - } - send('\n]}'); -} diff --git a/common/src/leap/soledad/common/ddocs/transactions/views/log/map.js b/common/src/leap/soledad/common/ddocs/transactions/views/log/map.js deleted file mode 100644 index 94ef63ca..00000000 --- a/common/src/leap/soledad/common/ddocs/transactions/views/log/map.js +++ /dev/null @@ -1,7 +0,0 @@ -function(doc) { - if (doc.u1db_transactions) - doc.u1db_transactions.forEach(function(t) { - emit(t[0], // use timestamp as key so the results are ordered - t[1]); // value is the transaction_id - }); -} diff --git a/testing/tests/couch/common.py b/testing/tests/couch/common.py index 263ac94c..45cf8d7f 100644 --- a/testing/tests/couch/common.py +++ b/testing/tests/couch/common.py @@ -41,10 +41,6 @@ def copy_couch_database_for_test(test, db): # bypass u1db_config document if doc_id == 'u1db_config': pass - # copy design docs - elif doc_id.startswith('_design'): - del doc['_rev'] - new_couch_db.save(doc) # copy u1db docs elif 'u1db_rev' in doc: new_doc = { diff --git a/testing/tests/couch/test_ddocs.py b/testing/tests/couch/test_ddocs.py index 9ff32633..2060e27d 100644 --- a/testing/tests/couch/test_ddocs.py +++ b/testing/tests/couch/test_ddocs.py @@ -1,6 +1,5 @@ from uuid import uuid4 -from leap.soledad.common.couch import errors from leap.soledad.common import couch from test_soledad.util import CouchDBTestCase @@ -17,7 +16,7 @@ class CouchDesignDocsTests(CouchDBTestCase): if dbname not in self.couch_server: self.couch_server.create(dbname) self.db = couch.CouchDatabase( - ('http://127.0.0.1:%d' % self.couch_port), + (self.couch_url), dbname, ensure_ddocs=ensure) @@ -26,152 +25,6 @@ class CouchDesignDocsTests(CouchDBTestCase): self.db.close() CouchDBTestCase.tearDown(self) - def test_missing_design_doc_raises(self): - """ - Test that all methods that access design documents will raise if the - design docs are not present. - """ - self.create_db(ensure=False) - # get_generation_info() - self.assertRaises( - errors.MissingDesignDocError, - self.db.get_generation_info) - # get_trans_id_for_gen() - self.assertRaises( - errors.MissingDesignDocError, - self.db.get_trans_id_for_gen, 1) - # get_transaction_log() - self.assertRaises( - errors.MissingDesignDocError, - self.db.get_transaction_log) - # whats_changed() - self.assertRaises( - errors.MissingDesignDocError, - self.db.whats_changed) - - def test_missing_design_doc_functions_raises(self): - """ - Test that all methods that access design documents list functions - will raise if the functions are not present. - """ - self.create_db(ensure=True) - # erase views from _design/transactions - transactions = self.db._database['_design/transactions'] - transactions['lists'] = {} - self.db._database.save(transactions) - # get_generation_info() - self.assertRaises( - errors.MissingDesignDocListFunctionError, - self.db.get_generation_info) - # get_trans_id_for_gen() - self.assertRaises( - errors.MissingDesignDocListFunctionError, - self.db.get_trans_id_for_gen, 1) - # whats_changed() - self.assertRaises( - errors.MissingDesignDocListFunctionError, - self.db.whats_changed) - - def test_absent_design_doc_functions_raises(self): - """ - Test that all methods that access design documents list functions - will raise if the functions are not present. - """ - self.create_db(ensure=True) - # erase views from _design/transactions - transactions = self.db._database['_design/transactions'] - del transactions['lists'] - self.db._database.save(transactions) - # get_generation_info() - self.assertRaises( - errors.MissingDesignDocListFunctionError, - self.db.get_generation_info) - # _get_trans_id_for_gen() - self.assertRaises( - errors.MissingDesignDocListFunctionError, - self.db.get_trans_id_for_gen, 1) - # whats_changed() - self.assertRaises( - errors.MissingDesignDocListFunctionError, - self.db.whats_changed) - - def test_missing_design_doc_named_views_raises(self): - """ - Test that all methods that access design documents' named views will - raise if the views are not present. - """ - self.create_db(ensure=True) - # erase views from _design/docs - docs = self.db._database['_design/docs'] - del docs['views'] - self.db._database.save(docs) - # erase views from _design/syncs - syncs = self.db._database['_design/syncs'] - del syncs['views'] - self.db._database.save(syncs) - # erase views from _design/transactions - transactions = self.db._database['_design/transactions'] - del transactions['views'] - self.db._database.save(transactions) - # get_generation_info() - self.assertRaises( - errors.MissingDesignDocNamedViewError, - self.db.get_generation_info) - # _get_trans_id_for_gen() - self.assertRaises( - errors.MissingDesignDocNamedViewError, - self.db.get_trans_id_for_gen, 1) - # _get_transaction_log() - self.assertRaises( - errors.MissingDesignDocNamedViewError, - self.db.get_transaction_log) - # whats_changed() - self.assertRaises( - errors.MissingDesignDocNamedViewError, - self.db.whats_changed) - - def test_deleted_design_doc_raises(self): - """ - Test that all methods that access design documents will raise if the - design docs are not present. - """ - self.create_db(ensure=True) - # delete _design/docs - del self.db._database['_design/docs'] - # delete _design/syncs - del self.db._database['_design/syncs'] - # delete _design/transactions - del self.db._database['_design/transactions'] - # get_generation_info() - self.assertRaises( - errors.MissingDesignDocDeletedError, - self.db.get_generation_info) - # get_trans_id_for_gen() - self.assertRaises( - errors.MissingDesignDocDeletedError, - self.db.get_trans_id_for_gen, 1) - # get_transaction_log() - self.assertRaises( - errors.MissingDesignDocDeletedError, - self.db.get_transaction_log) - # whats_changed() - self.assertRaises( - errors.MissingDesignDocDeletedError, - self.db.whats_changed) - - def test_ensure_ddoc_independently(self): - """ - Test that a missing ddocs other than _design/docs will be ensured - even if _design/docs is there. - """ - self.create_db(ensure=True) - del self.db._database['_design/transactions'] - self.assertRaises( - errors.MissingDesignDocDeletedError, - self.db.get_transaction_log) - self.create_db(ensure=True, dbname=self.db._dbname) - self.db.get_transaction_log() - def test_ensure_security_doc(self): """ Ensure_security creates a _security ddoc to ensure that only soledad -- cgit v1.2.3 From c7b464077215425759ab402fb2314f4e8f9acd7e Mon Sep 17 00:00:00 2001 From: drebs Date: Sat, 23 Jul 2016 17:11:53 +0200 Subject: [test] remove traces of design docs from couch tests --- server/pkg/create-user-db | 2 +- testing/test_soledad/util.py | 3 +-- testing/tests/couch/test_atomicity.py | 3 +-- testing/tests/couch/test_backend.py | 3 +-- testing/tests/server/test_server.py | 3 +-- testing/tests/sync/test_sync.py | 3 +-- testing/tests/sync/test_sync_mutex.py | 3 +-- 7 files changed, 7 insertions(+), 13 deletions(-) diff --git a/server/pkg/create-user-db b/server/pkg/create-user-db index 5e48d4de..b955b4c3 100755 --- a/server/pkg/create-user-db +++ b/server/pkg/create-user-db @@ -80,7 +80,7 @@ def ensure_database(dbname): url = url_for_db(dbname) db_security = CONF['database-security'] db = CouchDatabase.open_database(url=url, create=True, - replica_uid=None, ensure_ddocs=True, + replica_uid=None, database_security=db_security) print ('success! Ensured that database %s exists, with replica_uid: %s' % (db._dbname, db.replica_uid)) diff --git a/testing/test_soledad/util.py b/testing/test_soledad/util.py index 033a55df..02f3859b 100644 --- a/testing/test_soledad/util.py +++ b/testing/test_soledad/util.py @@ -391,8 +391,7 @@ class CouchServerStateForTests(CouchServerState): db = CouchDatabase.open_database( urljoin(self.couch_url, dbname), True, - replica_uid=replica_uid or 'test', - ensure_ddocs=True) + replica_uid=replica_uid or 'test') self.dbs.append(db) return db diff --git a/testing/tests/couch/test_atomicity.py b/testing/tests/couch/test_atomicity.py index aec9c6cf..3badfb19 100644 --- a/testing/tests/couch/test_atomicity.py +++ b/testing/tests/couch/test_atomicity.py @@ -90,8 +90,7 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer): self.db = CouchDatabase.open_database( urljoin(self.couch_url, 'user-' + self.user), create=True, - replica_uid='replica', - ensure_ddocs=True) + replica_uid='replica') self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") self.startTwistedServer() diff --git a/testing/tests/couch/test_backend.py b/testing/tests/couch/test_backend.py index f178e8a5..c399338e 100644 --- a/testing/tests/couch/test_backend.py +++ b/testing/tests/couch/test_backend.py @@ -43,8 +43,7 @@ class TestCouchBackendImpl(CouchDBTestCase): 'http://localhost:' + str(self.couch_port), ('test-%s' % uuid4().hex) ), - create=True, - ensure_ddocs=True) + create=True) doc_id1 = db._allocate_doc_id() self.assertTrue(doc_id1.startswith('D-')) self.assertEqual(34, len(doc_id1)) diff --git a/testing/tests/server/test_server.py b/testing/tests/server/test_server.py index b99d1939..49d25ed0 100644 --- a/testing/tests/server/test_server.py +++ b/testing/tests/server/test_server.py @@ -391,8 +391,7 @@ class EncryptedSyncTestCase( # ensure remote db exists before syncing db = CouchDatabase.open_database( urljoin(self.couch_url, 'user-' + user), - create=True, - ensure_ddocs=True) + create=True) def _db1AssertEmptyDocList(results): _, doclist = results diff --git a/testing/tests/sync/test_sync.py b/testing/tests/sync/test_sync.py index 095884ce..5540b7cb 100644 --- a/testing/tests/sync/test_sync.py +++ b/testing/tests/sync/test_sync.py @@ -101,8 +101,7 @@ class InterruptableSyncTestCase( # ensure remote db exists before syncing db = couch.CouchDatabase.open_database( urljoin(self.couch_url, 'user-user-uuid'), - create=True, - ensure_ddocs=True) + create=True) # create interruptor thread t = _SyncInterruptor(sol, db) diff --git a/testing/tests/sync/test_sync_mutex.py b/testing/tests/sync/test_sync_mutex.py index 787cfee8..261c6485 100644 --- a/testing/tests/sync/test_sync_mutex.py +++ b/testing/tests/sync/test_sync_mutex.py @@ -105,8 +105,7 @@ class TestSyncMutex( # ensure remote db exists before syncing db = CouchDatabase.open_database( urljoin(self.couch_url, 'user-' + self.user), - create=True, - ensure_ddocs=True) + create=True) sol = self._soledad_instance( user=self.user, server_url=self.getURL()) -- cgit v1.2.3 From 8596668c4bb38251da8891e8fd7a23bcf972c21a Mon Sep 17 00:00:00 2001 From: drebs Date: Sat, 23 Jul 2016 17:21:58 +0200 Subject: [bug] fix order of multipart serialization when writing to couch The couch backend makes use of attachments and multipart structure for writing the document to the couch database. For that to work, the order in which attachments are described must match the actual order in which attachments are written to the couch http stream. This was not being properly taken care of, and eventually the json serializer was arbitrarilly ordering the attachments description in a way that it didn't match the actual order of attachments writing. This commit fixes that by using json.dumps() sort_keys parameter and making sure conflicts are always written before content. --- common/src/leap/soledad/common/couch/__init__.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py index ca0c2855..ba3192a4 100644 --- a/common/src/leap/soledad/common/couch/__init__.py +++ b/common/src/leap/soledad/common/couch/__init__.py @@ -714,7 +714,18 @@ class CouchDatabase(object): if not self.batching: buf = StringIO() envelope = MultipartWriter(buf) - envelope.add('application/json', json.dumps(couch_doc)) + # the order in which attachments are described inside the + # serialization of the couch document must match the order in which + # they are actually written in the multipart structure. Because of + # that, we use `sorted_keys=True` in the json serialization (so + # "u1db_conflicts" comes before "u1db_content" on the couch + # document attachments description), and also reverse the order of + # the parts before writing them, so the "conflict" part is written + # before the "content" part. + envelope.add( + 'application/json', + json.dumps(couch_doc, sort_keys=True)) + parts.reverse() for part in parts: envelope.add('application/octet-stream', part) envelope.close() -- cgit v1.2.3 From 1893611393a3ec69d8099a8601fb21262e5f36f4 Mon Sep 17 00:00:00 2001 From: drebs Date: Sun, 24 Jul 2016 07:02:39 -0300 Subject: [feat] use a lock for updating couch gen data --- common/src/leap/soledad/common/couch/__init__.py | 38 +++++++++++++++--------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py index ba3192a4..effb2be2 100644 --- a/common/src/leap/soledad/common/couch/__init__.py +++ b/common/src/leap/soledad/common/couch/__init__.py @@ -27,10 +27,12 @@ import time import functools +from collections import defaultdict from StringIO import StringIO from urlparse import urljoin from contextlib import contextmanager from multiprocessing.pool import ThreadPool +from threading import Lock from couchdb.client import Server, Database @@ -113,6 +115,8 @@ class CouchDatabase(object): CONFIG_DOC_ID = '_local/config' SYNC_DOC_ID_PREFIX = '_local/sync_' + _put_doc_lock = defaultdict(Lock) + @classmethod def open_database(cls, url, create, ensure_ddocs=False, replica_uid=None, database_security=None): @@ -675,6 +679,7 @@ class CouchDatabase(object): 'length': len(content), } parts.append(content) + # save conflicts as attachment if doc.has_conflicts is True: conflicts = json.dumps( @@ -686,21 +691,26 @@ class CouchDatabase(object): 'length': len(conflicts), } parts.append(conflicts) + # add the gen document - while True: # TODO: add a lock, remove this while - try: - gen, _ = self.get_generation_info() - new_gen = gen + 1 - gen_doc = { - '_id': _get_gen_doc_id(new_gen), - 'gen': new_gen, - 'doc_id': doc.doc_id, - 'trans_id': transaction_id, - } - self._database.save(gen_doc) - break - except ResourceConflict: - pass + + # TODO: in u1db protocol, the increment of database generation should + # be made in the same atomic transaction as the actual document save, + # otherwise the same document might be concurrently updated by + # concurrent syncs from other replicas. A simple lock based on the uuid + # and doc_id would be enough to prevent that, if all entry points to + # database update are made through the soledad api. + with self._put_doc_lock[self._database.name]: + gen, _ = self.get_generation_info() + new_gen = gen + 1 + gen_doc = { + '_id': _get_gen_doc_id(new_gen), + 'gen': new_gen, + 'doc_id': doc.doc_id, + 'trans_id': transaction_id, + } + self._database.save(gen_doc) + # build the couch document couch_doc = { '_id': doc.doc_id, -- cgit v1.2.3 From 793180533e4f19b364145c61939d6cad07dd851a Mon Sep 17 00:00:00 2001 From: drebs Date: Sun, 24 Jul 2016 07:56:52 -0300 Subject: [test] add pytest initial setup for performance tests --- testing/tests/perf/assets/cert_default.conf | 15 +++ testing/tests/perf/conftest.py | 143 ++++++++++++++++++++++++++++ testing/tests/perf/pytest.ini | 2 + testing/tests/perf/test_sync.py | 43 +++++++++ testing/tox.ini | 3 + 5 files changed, 206 insertions(+) create mode 100644 testing/tests/perf/assets/cert_default.conf create mode 100644 testing/tests/perf/conftest.py create mode 100644 testing/tests/perf/pytest.ini create mode 100644 testing/tests/perf/test_sync.py diff --git a/testing/tests/perf/assets/cert_default.conf b/testing/tests/perf/assets/cert_default.conf new file mode 100644 index 00000000..8043cea3 --- /dev/null +++ b/testing/tests/perf/assets/cert_default.conf @@ -0,0 +1,15 @@ +[ req ] +default_bits = 1024 +default_keyfile = keyfile.pem +distinguished_name = req_distinguished_name +prompt = no +output_password = mypass + +[ req_distinguished_name ] +C = GB +ST = Test State or Province +L = Test Locality +O = Organization Name +OU = Organizational Unit Name +CN = localhost +emailAddress = test@email.address diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py new file mode 100644 index 00000000..c66f2863 --- /dev/null +++ b/testing/tests/perf/conftest.py @@ -0,0 +1,143 @@ +import json +import os +import pytest +import requests +import signal +import time + +from hashlib import sha512 +from subprocess import call + +from leap.soledad.client import Soledad +from leap.soledad.common.couch import CouchDatabase + +from leap.common.events import server +server.ensure_server() + + +DEFAULT_UUID = '0' +DEFAULT_PASSPHRASE = '123' + +DEFAULT_URL = 'http://127.0.0.1:2424' +DEFAULT_PRIVKEY = 'soledad_privkey.pem' +DEFAULT_CERTKEY = 'soledad_certkey.pem' +DEFAULT_TOKEN = 'an-auth-token' + + +@pytest.fixture +def certificate(tmpdir): + privkey = os.path.join(tmpdir.strpath, 'privkey.pem') + certkey = os.path.join(tmpdir.strpath, 'certkey.pem') + call([ + 'openssl', + 'req', + '-x509', + '-sha256', + '-nodes', + '-days', '365', + '-newkey', 'rsa:2048', + '-config', './assets/cert_default.conf', # TODO: fix basedir + '-keyout', privkey, + '-out', certkey]) + return privkey, certkey + + +def get_pid(pidfile): + if not os.path.isfile(pidfile): + return 0 + try: + with open(pidfile) as f: + return int(f.read()) + except IOError: + return 0 + + +class CouchUserDatabase(object): + + def __init__(self): + url = 'http://127.0.0.1:5984/' + self._user_db_url = url + 'user-%s' % DEFAULT_UUID + self._token_db_url = url + _token_dbname() + self._shared_db_url = url + 'shared' + + def setup(self): + CouchDatabase.open_database( + url=self._user_db_url, create=True, replica_uid=None) + requests.put(self._token_db_url) + requests.put(self._shared_db_url) + self._add_token() + + def _add_token(self): + token = sha512(DEFAULT_TOKEN).hexdigest() + content = {'type': 'Token', 'user_id': DEFAULT_UUID} + requests.put( + self._token_db_url + '/' + token, data=json.dumps(content)) + + def teardown(self): + requests.delete(self._user_db_url) + requests.delete(self._token_db_url) + requests.delete(self._shared_db_url) + + +@pytest.fixture(scope='function') +def couchdb_user_db(request): + db = CouchUserDatabase() + db.setup() + request.addfinalizer(db.teardown) + return db + + +def _token_dbname(): + dbname = 'tokens_' + \ + str(int(time.time() / (30 * 24 * 3600))) + return dbname + + +class SoledadServer(object): + + def __init__(self, tmpdir): + self._pidfile = os.path.join(tmpdir.strpath, 'soledad-server.pid') + self._logfile = os.path.join(tmpdir.strpath, 'soledad-server.log') + + def start(self): + call([ + 'twistd', + '--logfile=%s' % self._logfile, + '--pidfile=%s' % self._pidfile, + 'web', + '--wsgi=leap.soledad.server.application', + '--port=2424' + ]) + + def stop(self): + pid = get_pid(self._pidfile) + os.kill(pid, signal.SIGKILL) + + +@pytest.fixture +def soledad_server(tmpdir, couchdb_user_db, request): + server = SoledadServer(tmpdir) + server.start() + request.addfinalizer(server.stop) + return server + + +@pytest.fixture() +def soledad_client(tmpdir, soledad_server): + uuid = DEFAULT_UUID + passphrase = DEFAULT_PASSPHRASE + secrets_path = os.path.join(tmpdir.strpath, '%s.secret' % uuid) + local_db_path = os.path.join(tmpdir.strpath, '%s.db' % uuid) + server_url = DEFAULT_URL + token = DEFAULT_TOKEN + + # get a soledad instance + return Soledad( + uuid, + unicode(passphrase), + secrets_path=secrets_path, + local_db_path=local_db_path, + server_url=server_url, + cert_file=None, + auth_token=token, + defer_encryption=True) diff --git a/testing/tests/perf/pytest.ini b/testing/tests/perf/pytest.ini new file mode 100644 index 00000000..7a0508ce --- /dev/null +++ b/testing/tests/perf/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +twisted = yes diff --git a/testing/tests/perf/test_sync.py b/testing/tests/perf/test_sync.py new file mode 100644 index 00000000..1e29a86a --- /dev/null +++ b/testing/tests/perf/test_sync.py @@ -0,0 +1,43 @@ +import pytest + +from twisted.internet.defer import gatherResults + +from leap.soledad.common.couch import CouchDatabase +from leap.soledad.common.document import ServerDocument + + +@pytest.inlineCallbacks +def test_upload(soledad_client): + # create a bunch of local documents + uploads = 100 + deferreds = [] + for i in xrange(uploads): + d = soledad_client.create_doc({'upload': True}) + deferreds.append(d) + yield gatherResults(deferreds) + + # synchronize + yield soledad_client.sync() + + # check that documents reached the remote database + remote = CouchDatabase('http://127.0.0.1:5984', 'user-0') + remote_count, _ = remote.get_all_docs() + assert remote_count == uploads + + +@pytest.inlineCallbacks +def test_download(soledad_client): + # create a bunch of remote documents + downloads = 100 + remote = CouchDatabase('http://127.0.0.1:5984', 'user-0') + for i in xrange(downloads): + doc = ServerDocument('doc-%d' % i, 'replica:1') + doc.content = {'download': True} + remote.save_document(None, doc, i) + + # synchronize + yield soledad_client.sync() + + # check that documents reached the local database + local_count, docs = yield soledad_client.get_all_docs() + assert local_count == downloads diff --git a/testing/tox.ini b/testing/tox.ini index 3663eef3..caeb52c1 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -8,16 +8,19 @@ deps = pytest pytest-flake8 pytest-pep8 + pytest-twisted mock testscenarios setuptools-trial pep8 pdbpp couchdb + requests # install soledad local packages -e../common -e../client -e../server setenv = HOME=/tmp + TERM=xterm install_command = pip install {opts} {packages} -- cgit v1.2.3 From 4073d6a8542121504ef83b9cc02ecff94e041e32 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 05:44:20 -0300 Subject: [test] use pytest fixture scopes to provide per module soledad server for perf tests --- testing/tests/perf/conftest.py | 110 +++++++++++++++++++++++++---------------- 1 file changed, 68 insertions(+), 42 deletions(-) diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py index c66f2863..05f91a45 100644 --- a/testing/tests/perf/conftest.py +++ b/testing/tests/perf/conftest.py @@ -11,6 +11,9 @@ from subprocess import call from leap.soledad.client import Soledad from leap.soledad.common.couch import CouchDatabase +# we have to manually setup the events server in order to be able to signal +# events. This is usually done by the enclosing application using soledad +# client (i.e. bitmask client). from leap.common.events import server server.ensure_server() @@ -24,48 +27,31 @@ DEFAULT_CERTKEY = 'soledad_certkey.pem' DEFAULT_TOKEN = 'an-auth-token' -@pytest.fixture -def certificate(tmpdir): - privkey = os.path.join(tmpdir.strpath, 'privkey.pem') - certkey = os.path.join(tmpdir.strpath, 'certkey.pem') - call([ - 'openssl', - 'req', - '-x509', - '-sha256', - '-nodes', - '-days', '365', - '-newkey', 'rsa:2048', - '-config', './assets/cert_default.conf', # TODO: fix basedir - '-keyout', privkey, - '-out', certkey]) - return privkey, certkey +# +# soledad_dbs fixture: provides all databases needed by soledad server in a per +# module scope (same databases for all tests in this module). +# - -def get_pid(pidfile): - if not os.path.isfile(pidfile): - return 0 - try: - with open(pidfile) as f: - return int(f.read()) - except IOError: - return 0 +def _token_dbname(): + dbname = 'tokens_' + \ + str(int(time.time() / (30 * 24 * 3600))) + return dbname -class CouchUserDatabase(object): +class SoledadDatabases(object): def __init__(self): url = 'http://127.0.0.1:5984/' - self._user_db_url = url + 'user-%s' % DEFAULT_UUID self._token_db_url = url + _token_dbname() self._shared_db_url = url + 'shared' def setup(self): - CouchDatabase.open_database( - url=self._user_db_url, create=True, replica_uid=None) + self._create_dbs() + self._add_token() + + def _create_dbs(self): requests.put(self._token_db_url) requests.put(self._shared_db_url) - self._add_token() def _add_token(self): token = sha512(DEFAULT_TOKEN).hexdigest() @@ -74,28 +60,64 @@ class CouchUserDatabase(object): self._token_db_url + '/' + token, data=json.dumps(content)) def teardown(self): - requests.delete(self._user_db_url) requests.delete(self._token_db_url) requests.delete(self._shared_db_url) +@pytest.fixture(scope='module') +def soledad_dbs(request): + db = SoledadDatabases() + db.setup() + request.addfinalizer(db.teardown) + return db + + +# +# user_db fixture: provides an empty database for a given user in a per +# function scope. +# + +class UserDatabase(object): + + def __init__(self): + url = 'http://127.0.0.1:5984/' + self._user_db_url = url + 'user-%s' % DEFAULT_UUID + + def setup(self): + CouchDatabase.open_database( + url=self._user_db_url, create=True, replica_uid=None) + + def teardown(self): + requests.delete(self._user_db_url) + + @pytest.fixture(scope='function') -def couchdb_user_db(request): - db = CouchUserDatabase() +def user_db(request): + db = UserDatabase() db.setup() request.addfinalizer(db.teardown) return db -def _token_dbname(): - dbname = 'tokens_' + \ - str(int(time.time() / (30 * 24 * 3600))) - return dbname +def get_pid(pidfile): + if not os.path.isfile(pidfile): + return 0 + try: + with open(pidfile) as f: + return int(f.read()) + except IOError: + return 0 + +# +# soledad_server fixture: provides a running soledad server in a per module +# context (same soledad server for all tests in this module). +# class SoledadServer(object): - def __init__(self, tmpdir): + def __init__(self, tmpdir_factory): + tmpdir = tmpdir_factory.mktemp('soledad-server') self._pidfile = os.path.join(tmpdir.strpath, 'soledad-server.pid') self._logfile = os.path.join(tmpdir.strpath, 'soledad-server.log') @@ -114,16 +136,20 @@ class SoledadServer(object): os.kill(pid, signal.SIGKILL) -@pytest.fixture -def soledad_server(tmpdir, couchdb_user_db, request): - server = SoledadServer(tmpdir) +@pytest.fixture(scope='module') +def soledad_server(tmpdir_factory, request): + server = SoledadServer(tmpdir_factory) server.start() request.addfinalizer(server.stop) return server +# +# soledad_client fixture: provides a clean soledad client for a test function. +# + @pytest.fixture() -def soledad_client(tmpdir, soledad_server): +def soledad_client(tmpdir, soledad_server, user_db, soledad_dbs): uuid = DEFAULT_UUID passphrase = DEFAULT_PASSPHRASE secrets_path = os.path.join(tmpdir.strpath, '%s.secret' % uuid) -- cgit v1.2.3 From c40a2bf488e03bef14d440ab1a847afab6f5fb76 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 05:44:47 -0300 Subject: [test] add some payload to perf sync tests --- testing/tests/perf/test_sync.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/testing/tests/perf/test_sync.py b/testing/tests/perf/test_sync.py index 1e29a86a..fbe3b877 100644 --- a/testing/tests/perf/test_sync.py +++ b/testing/tests/perf/test_sync.py @@ -6,13 +6,16 @@ from leap.soledad.common.couch import CouchDatabase from leap.soledad.common.document import ServerDocument +content = ' ' * 10000 + + @pytest.inlineCallbacks def test_upload(soledad_client): # create a bunch of local documents uploads = 100 deferreds = [] for i in xrange(uploads): - d = soledad_client.create_doc({'upload': True}) + d = soledad_client.create_doc({'upload': True, 'content': content}) deferreds.append(d) yield gatherResults(deferreds) @@ -32,7 +35,7 @@ def test_download(soledad_client): remote = CouchDatabase('http://127.0.0.1:5984', 'user-0') for i in xrange(downloads): doc = ServerDocument('doc-%d' % i, 'replica:1') - doc.content = {'download': True} + doc.content = {'download': True, 'content': content} remote.save_document(None, doc, i) # synchronize -- cgit v1.2.3 From fa2c50b9c6cc037a8ab348b5a746b2e728f3068a Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 07:48:41 -0300 Subject: [test] allow custom couch url for couch tests --- testing/test_soledad/util.py | 4 ++-- testing/tests/conftest.py | 13 +++++++++++++ testing/tests/couch/common.py | 6 ++---- testing/tests/couch/couchdb.ini.template | 22 ---------------------- testing/tests/couch/test_backend.py | 5 +---- 5 files changed, 18 insertions(+), 32 deletions(-) create mode 100644 testing/tests/conftest.py delete mode 100644 testing/tests/couch/couchdb.ini.template diff --git a/testing/test_soledad/util.py b/testing/test_soledad/util.py index 02f3859b..e23d185e 100644 --- a/testing/test_soledad/util.py +++ b/testing/test_soledad/util.py @@ -27,6 +27,7 @@ import shutil import random import string import couchdb +import pytest from uuid import uuid4 from mock import Mock @@ -344,6 +345,7 @@ class BaseSoledadTest(BaseLeapTest, MockedSharedDBTest): self.assertEqual(exp_doc.content, doc.content) +@pytest.mark.usefixtures("couch_url") class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest): """ @@ -354,8 +356,6 @@ class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest): """ Make sure we have a CouchDB instance for a test. """ - self.couch_port = 5984 - self.couch_url = 'http://localhost:%d' % self.couch_port self.couch_server = couchdb.Server(self.couch_url) def delete_db(self, name): diff --git a/testing/tests/conftest.py b/testing/tests/conftest.py new file mode 100644 index 00000000..3be9ba2a --- /dev/null +++ b/testing/tests/conftest.py @@ -0,0 +1,13 @@ +import pytest + + +def pytest_addoption(parser): + parser.addoption( + "--couch-url", type="string", default="http://127.0.0.1:5984", + help="the url for the couch server to be used during tests") + + +@pytest.fixture +def couch_url(request): + url = request.config.getoption('--couch-url') + request.cls.couch_url = url diff --git a/testing/tests/couch/common.py b/testing/tests/couch/common.py index 45cf8d7f..48d30168 100644 --- a/testing/tests/couch/common.py +++ b/testing/tests/couch/common.py @@ -13,10 +13,9 @@ nested_doc = tests.nested_doc def make_couch_database_for_test(test, replica_uid): - port = str(test.couch_port) dbname = ('test-%s' % uuid4().hex) db = couch.CouchDatabase.open_database( - urljoin('http://localhost:' + port, dbname), + urljoin(test.couch_url, dbname), create=True, replica_uid=replica_uid or 'test', ensure_ddocs=True) @@ -25,8 +24,7 @@ def make_couch_database_for_test(test, replica_uid): def copy_couch_database_for_test(test, db): - port = str(test.couch_port) - couch_url = 'http://localhost:' + port + couch_url = test.couch_url new_dbname = db._dbname + '_copy' new_db = couch.CouchDatabase.open_database( urljoin(couch_url, new_dbname), diff --git a/testing/tests/couch/couchdb.ini.template b/testing/tests/couch/couchdb.ini.template deleted file mode 100644 index 174d9d86..00000000 --- a/testing/tests/couch/couchdb.ini.template +++ /dev/null @@ -1,22 +0,0 @@ -; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure. - -; Upgrading CouchDB will overwrite this file. - -[couchdb] -database_dir = %(tempdir)s/lib -view_index_dir = %(tempdir)s/lib -max_document_size = 4294967296 ; 4 GB -os_process_timeout = 120000 ; 120 seconds. for view and external servers. -max_dbs_open = 100 -delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned -uri_file = %(tempdir)s/lib/couch.uri -file_compression = snappy - -[log] -file = %(tempdir)s/log/couch.log -level = info -include_sasl = true - -[httpd] -port = 0 -bind_address = 127.0.0.1 diff --git a/testing/tests/couch/test_backend.py b/testing/tests/couch/test_backend.py index c399338e..4fad11cf 100644 --- a/testing/tests/couch/test_backend.py +++ b/testing/tests/couch/test_backend.py @@ -39,10 +39,7 @@ class TestCouchBackendImpl(CouchDBTestCase): def test__allocate_doc_id(self): db = couch.CouchDatabase.open_database( - urljoin( - 'http://localhost:' + str(self.couch_port), - ('test-%s' % uuid4().hex) - ), + urljoin(self.couch_url, 'test-%s' % uuid4().hex), create=True) doc_id1 = db._allocate_doc_id() self.assertTrue(doc_id1.startswith('D-')) -- cgit v1.2.3 From 1dc48b603869db1bfcd9c0f86ae3973b715f9222 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 06:58:26 -0300 Subject: [test] allow custom couch url for perf tests --- testing/tests/perf/conftest.py | 47 +++++++++++++++++++++++++++++++---------- testing/tests/perf/test_sync.py | 10 +++++---- 2 files changed, 42 insertions(+), 15 deletions(-) diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py index 05f91a45..85a48059 100644 --- a/testing/tests/perf/conftest.py +++ b/testing/tests/perf/conftest.py @@ -7,10 +7,12 @@ import time from hashlib import sha512 from subprocess import call +from urlparse import urljoin from leap.soledad.client import Soledad from leap.soledad.common.couch import CouchDatabase + # we have to manually setup the events server in order to be able to signal # events. This is usually done by the enclosing application using soledad # client (i.e. bitmask client). @@ -18,6 +20,16 @@ from leap.common.events import server server.ensure_server() +def pytest_addoption(parser): + parser.addoption( + "--couch-url", type="string", default="http://127.0.0.1:5984", + help="the url for the couch server to be used during tests") + + +# +# default options for all tests +# + DEFAULT_UUID = '0' DEFAULT_PASSPHRASE = '123' @@ -40,10 +52,9 @@ def _token_dbname(): class SoledadDatabases(object): - def __init__(self): - url = 'http://127.0.0.1:5984/' - self._token_db_url = url + _token_dbname() - self._shared_db_url = url + 'shared' + def __init__(self, url): + self._token_db_url = urljoin(url, _token_dbname()) + self._shared_db_url = urljoin(url, 'shared') def setup(self): self._create_dbs() @@ -66,7 +77,8 @@ class SoledadDatabases(object): @pytest.fixture(scope='module') def soledad_dbs(request): - db = SoledadDatabases() + couch_url = request.config.option.couch_url + db = SoledadDatabases(couch_url) db.setup() request.addfinalizer(db.teardown) return db @@ -79,9 +91,8 @@ def soledad_dbs(request): class UserDatabase(object): - def __init__(self): - url = 'http://127.0.0.1:5984/' - self._user_db_url = url + 'user-%s' % DEFAULT_UUID + def __init__(self, url): + self._user_db_url = urljoin(url, 'user-%s' % DEFAULT_UUID) def setup(self): CouchDatabase.open_database( @@ -93,7 +104,8 @@ class UserDatabase(object): @pytest.fixture(scope='function') def user_db(request): - db = UserDatabase() + couch_url = request.config.option.couch_url + db = UserDatabase(couch_url) db.setup() request.addfinalizer(db.teardown) return db @@ -116,12 +128,15 @@ def get_pid(pidfile): class SoledadServer(object): - def __init__(self, tmpdir_factory): + def __init__(self, tmpdir_factory, couch_url): tmpdir = tmpdir_factory.mktemp('soledad-server') self._pidfile = os.path.join(tmpdir.strpath, 'soledad-server.pid') self._logfile = os.path.join(tmpdir.strpath, 'soledad-server.log') + self._couch_url = couch_url def start(self): + self._create_conf_file() + # start the server call([ 'twistd', '--logfile=%s' % self._logfile, @@ -131,6 +146,15 @@ class SoledadServer(object): '--port=2424' ]) + def _create_conf_file(self): + if not os.access('/etc', os.W_OK): + return + if not os.path.isdir('/etc/soledad'): + os.mkdir('/etc/soledad') + with open('/etc/soledad/soledad-server.conf', 'w') as f: + content = '[soledad-server]\ncouch_url = %s' % self._couch_url + f.write(content) + def stop(self): pid = get_pid(self._pidfile) os.kill(pid, signal.SIGKILL) @@ -138,7 +162,8 @@ class SoledadServer(object): @pytest.fixture(scope='module') def soledad_server(tmpdir_factory, request): - server = SoledadServer(tmpdir_factory) + couch_url = request.config.option.couch_url + server = SoledadServer(tmpdir_factory, couch_url) server.start() request.addfinalizer(server.stop) return server diff --git a/testing/tests/perf/test_sync.py b/testing/tests/perf/test_sync.py index fbe3b877..9de733fb 100644 --- a/testing/tests/perf/test_sync.py +++ b/testing/tests/perf/test_sync.py @@ -10,7 +10,7 @@ content = ' ' * 10000 @pytest.inlineCallbacks -def test_upload(soledad_client): +def test_upload(soledad_client, request): # create a bunch of local documents uploads = 100 deferreds = [] @@ -23,16 +23,18 @@ def test_upload(soledad_client): yield soledad_client.sync() # check that documents reached the remote database - remote = CouchDatabase('http://127.0.0.1:5984', 'user-0') + url = request.config.getoption('--couch-url') + remote = CouchDatabase(url, 'user-0') remote_count, _ = remote.get_all_docs() assert remote_count == uploads @pytest.inlineCallbacks -def test_download(soledad_client): +def test_download(soledad_client, request): # create a bunch of remote documents downloads = 100 - remote = CouchDatabase('http://127.0.0.1:5984', 'user-0') + url = request.config.getoption('--couch-url') + remote = CouchDatabase(url, 'user-0') for i in xrange(downloads): doc = ServerDocument('doc-%d' % i, 'replica:1') doc.content = {'download': True, 'content': content} -- cgit v1.2.3 From 8d4f8ced79bfc4da12dac38d5337b48c042a2183 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 06:58:44 -0300 Subject: [test] avoid perf tests to be run on normal tox calls Currently the perf tests use pytest-twisted plugin, and this has some implications in the old tests adapted from u1db that now use trial classes. Because of that, we exclude perf tests from usual tox calls, but you can still run them by explicitelly calling `tox perf`. --- testing/tests/pytest.ini | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 testing/tests/pytest.ini diff --git a/testing/tests/pytest.ini b/testing/tests/pytest.ini new file mode 100644 index 00000000..3d785ca7 --- /dev/null +++ b/testing/tests/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +norecursedirs = perf -- cgit v1.2.3 From 259b5fa06bb83c8d9cbb53e43efd18ac9a9730f4 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 09:37:02 -0300 Subject: [test] remove pep8 from tox config --- testing/tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/tox.ini b/testing/tox.ini index caeb52c1..c3126cf2 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -2,7 +2,7 @@ envlist = py27 [testenv] -commands = py.test --pep8 {posargs} +commands = py.test {posargs} changedir = tests deps = pytest -- cgit v1.2.3 From bf9355077c2f190c82d660ad9b7059a1c3f32a8d Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 11:37:47 -0300 Subject: [test] use tox to create docker image --- scripts/docker/Dockerfile | 46 ++++++++++------------------------------------ 1 file changed, 10 insertions(+), 36 deletions(-) diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 915508ea..2ec310a9 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -1,51 +1,25 @@ # start with a fresh debian image FROM debian -# expose soledad server port in case we want to run a server container -EXPOSE 2424 - -# install dependencies from debian repos -COPY files/apt/leap.list /etc/apt/sources.list.d/ - -RUN apt-get update -RUN apt-get -y --force-yes install leap-archive-keyring - RUN apt-get update RUN apt-get -y install git -RUN apt-get -y install vim -RUN apt-get -y install python-ipdb - -# install python deps +# needed to build python twisted module RUN apt-get -y install libpython2.7-dev -RUN apt-get -y install libffi-dev +# needed to build python cryptography module RUN apt-get -y install libssl-dev -RUN apt-get -y install libzmq3-dev -RUN apt-get -y install python-pip -RUN apt-get -y install couchdb -RUN apt-get -y install python-srp -RUN apt-get -y install python-scrypt -RUN apt-get -y install leap-keymanager -RUN apt-get -y install python-tz +# install pip and tox +RUN apt-get -y install python-pip RUN pip install -U pip -RUN pip install psutil - -# install soledad-perf deps -RUN pip install klein -RUN apt-get -y install curl -RUN apt-get -y install httperf +RUN pip install tox # clone repositories -ENV BASEURL "https://github.com/leapcode" -ENV VARDIR "/var/local" -ENV REPOS "soledad leap_pycommon soledad-perf" -RUN for repo in ${REPOS}; do git clone ${BASEURL}/${repo}.git /var/local/${repo}; done +RUN mkdir -p /builds/leap +RUN git clone -b develop https://0xacab.org/leap/soledad.git /builds/leap/soledad -# copy over files to help setup the environment and run soledad -RUN mkdir -p /usr/local/soledad - -COPY files/build/install-deps-from-repos.sh /usr/local/soledad/ -RUN /usr/local/soledad/install-deps-from-repos.sh +# use tox to install everything needed to run tests +RUN cd /builds/leap/soledad/testing && tox -v -r --notest +RUN mkdir -p /usr/local/soledad COPY files/bin/ /usr/local/soledad/ -- cgit v1.2.3 From 8d08016b6e5985569ca5d04ef3e2690e78809f54 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 11:38:19 -0300 Subject: [test] use tox and couchdb image to run tests --- scripts/docker/Makefile | 17 +++++++++++------ scripts/docker/files/bin/run-tox.sh | 14 ++++++++++++++ 2 files changed, 25 insertions(+), 6 deletions(-) create mode 100755 scripts/docker/files/bin/run-tox.sh diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 4fa2e264..6f30a341 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -27,11 +27,14 @@ MEMORY ?= 512m # Docker image generation (main make target) # ############################################## -all: image +all: soledad-image couchdb-image -image: +soledad-image: docker build -t $(IMAGE_NAME) . +couchdb-image: + docker pull couchdb + ################################################## # Run a Soledad Server inside a docker container # ################################################## @@ -69,16 +72,18 @@ run-client-bootstrap: /usr/local/soledad/run-client-bootstrap.sh ################################################# -# Run all trial tests inside a docker container # +# Run all tests inside a docker container # ################################################# -run-trial: +run-tox: + docker run -d --name couchdb couchdb docker run -t -i \ --memory="$(MEMORY)" \ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ + --link couchdb \ $(IMAGE_NAME) \ - /usr/local/soledad/run-trial.sh + /usr/local/soledad/run-tox.sh ############################################ # Performance tests and graphic generation # @@ -123,7 +128,7 @@ cp-perf-result: # Other helper targets # ######################## -run-shell: image +run-shell: soledad-image docker run -t -i \ --memory="$(MEMORY)" \ $(IMAGE_NAME) \ diff --git a/scripts/docker/files/bin/run-tox.sh b/scripts/docker/files/bin/run-tox.sh new file mode 100755 index 00000000..793ce6e1 --- /dev/null +++ b/scripts/docker/files/bin/run-tox.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +REPO=/builds/leap/soledad/testing + +if [ ! -z "${SOLEDAD_REMOTE}" ]; then + git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} + git -C ${REPO} fetch origin +fi + +if [ ! -z "${SOLEDAD_BRANCH}" ]; then + git -C ${REPO} checkout ${SOLEDAD_BRANCH} +fi + +(cd ${REPO}; tox -- -v --durations 0 --couch-url http://couchdb:5984) -- cgit v1.2.3 From 1a8ea1fde14eb5b2d2c4d4165c4bc21031512d06 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 11:39:38 -0300 Subject: [test] use docker image with couchdb service to run tests --- .gitlab-ci.yml | 7 +++++-- scripts/docker/Dockerfile | 3 +++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 647cc43c..2835e5cf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,3 +1,6 @@ -trial: +tests: + image: leap/soledad:1.0 + services: + - couchdb script: - - cd testing; tox + - cd testing; tox -- --couch-url http://couchdb:5984 diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 2ec310a9..1e46fda3 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -4,13 +4,16 @@ FROM debian RUN apt-get update RUN apt-get -y install git + # needed to build python twisted module RUN apt-get -y install libpython2.7-dev # needed to build python cryptography module RUN apt-get -y install libssl-dev +RUN apt-get -y install libffi-dev # install pip and tox RUN apt-get -y install python-pip + RUN pip install -U pip RUN pip install tox -- cgit v1.2.3 From 92813593b93d9788fd978acdeeba59d32c311d48 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 21:25:46 -0300 Subject: [test] use pip download cache for tests and docker image --- scripts/docker/Dockerfile | 1 - testing/tox.ini | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 1e46fda3..8c6bfdb3 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -13,7 +13,6 @@ RUN apt-get -y install libffi-dev # install pip and tox RUN apt-get -y install python-pip - RUN pip install -U pip RUN pip install tox diff --git a/testing/tox.ini b/testing/tox.ini index c3126cf2..5f401a35 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -23,4 +23,5 @@ deps = setenv = HOME=/tmp TERM=xterm + PIP_DOWNLOAD_CACHE=/var/cache/pip install_command = pip install {opts} {packages} -- cgit v1.2.3 From 6f98a8d61c33a4fc3619f998eeea0075d51c739b Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 21:31:46 -0300 Subject: [test] add rules to run perf test on docker with separate couchdb server container --- scripts/docker/Makefile | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 6f30a341..6ad4cced 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -89,8 +89,15 @@ run-tox: # Performance tests and graphic generation # ############################################ -run-perf-test: - helper/run-test.sh perf +run-perf: + docker run -d --name couchdb couchdb + docker run -t -i \ + --memory="$(MEMORY)" \ + --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ + --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ + --link couchdb \ + $(IMAGE_NAME) \ + /usr/local/soledad/run-tox-perf.sh run-client-perf: @if [ -z "$(CONTAINER_ID_FILE)" ]; then \ -- cgit v1.2.3 From 76acb8f39a32b6b61f00af571bae9bd48c0a5d62 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 21:51:45 -0300 Subject: [test] use random name for couchdb container in docker perf test --- scripts/docker/Makefile | 15 ++++++++++----- scripts/docker/files/bin/run-perf.sh | 20 ++++++++++++++++++++ scripts/docker/files/bin/run-tox.sh | 5 ++++- 3 files changed, 34 insertions(+), 6 deletions(-) create mode 100755 scripts/docker/files/bin/run-perf.sh diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 6ad4cced..4b4d4496 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -76,12 +76,14 @@ run-client-bootstrap: ################################################# run-tox: - docker run -d --name couchdb couchdb + name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \ + docker run -d --name $${name} couchdb; \ docker run -t -i \ --memory="$(MEMORY)" \ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ - --link couchdb \ + --env="COUCH_URL=http://$${name}:5984" \ + --link $${name} \ $(IMAGE_NAME) \ /usr/local/soledad/run-tox.sh @@ -90,14 +92,17 @@ run-tox: ############################################ run-perf: - docker run -d --name couchdb couchdb + name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \ + docker run -d --name $${name} couchdb; \ docker run -t -i \ --memory="$(MEMORY)" \ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ - --link couchdb \ + --env="SOLEDAD_PRELOAD_NUM=$(SOLEDAD_PRELOAD_NUM)" \ + --env="COUCH_URL=http://$${name}:5984" \ + --link $${name} \ $(IMAGE_NAME) \ - /usr/local/soledad/run-tox-perf.sh + /usr/local/soledad/run-perf.sh run-client-perf: @if [ -z "$(CONTAINER_ID_FILE)" ]; then \ diff --git a/scripts/docker/files/bin/run-perf.sh b/scripts/docker/files/bin/run-perf.sh new file mode 100755 index 00000000..35c7f006 --- /dev/null +++ b/scripts/docker/files/bin/run-perf.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +REPO=/builds/leap/soledad/testing +COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}" + +if [ ! -z "${SOLEDAD_REMOTE}" ]; then + git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} + git -C ${REPO} fetch origin +fi + +if [ ! -z "${SOLEDAD_BRANCH}" ]; then + git -C ${REPO} checkout ${SOLEDAD_BRANCH} +fi + +cd ${REPO} + +tox perf -- \ + --durations 0 \ + --couch-url ${COUCH_URL} \ + --twisted diff --git a/scripts/docker/files/bin/run-tox.sh b/scripts/docker/files/bin/run-tox.sh index 793ce6e1..74fde182 100755 --- a/scripts/docker/files/bin/run-tox.sh +++ b/scripts/docker/files/bin/run-tox.sh @@ -1,6 +1,7 @@ #!/bin/bash REPO=/builds/leap/soledad/testing +COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}" if [ ! -z "${SOLEDAD_REMOTE}" ]; then git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} @@ -11,4 +12,6 @@ if [ ! -z "${SOLEDAD_BRANCH}" ]; then git -C ${REPO} checkout ${SOLEDAD_BRANCH} fi -(cd ${REPO}; tox -- -v --durations 0 --couch-url http://couchdb:5984) +cd ${REPO} + +tox -- --couch-url ${COUCH_URL} -- cgit v1.2.3 From de5cd462cc3f04275e22d9267ecb8e6c2b23dfda Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 21:34:23 -0300 Subject: [test] allow passing number of docs on command line on perf tests --- scripts/docker/files/bin/run-perf.sh | 4 +++- testing/tests/perf/conftest.py | 6 ++++++ testing/tests/perf/test_sync.py | 4 ++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/scripts/docker/files/bin/run-perf.sh b/scripts/docker/files/bin/run-perf.sh index 35c7f006..72060230 100755 --- a/scripts/docker/files/bin/run-perf.sh +++ b/scripts/docker/files/bin/run-perf.sh @@ -2,6 +2,7 @@ REPO=/builds/leap/soledad/testing COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}" +SOLEDAD_PRELOAD_NUM="${SOLEDAD_PRELOAD_NUM:-100}" if [ ! -z "${SOLEDAD_REMOTE}" ]; then git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} @@ -17,4 +18,5 @@ cd ${REPO} tox perf -- \ --durations 0 \ --couch-url ${COUCH_URL} \ - --twisted + --twisted \ + --num-docs ${SOLEDAD_PRELOAD_NUM} diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py index 85a48059..463c791a 100644 --- a/testing/tests/perf/conftest.py +++ b/testing/tests/perf/conftest.py @@ -26,6 +26,12 @@ def pytest_addoption(parser): help="the url for the couch server to be used during tests") +def pytest_addoption(parser): + parser.addoption( + "--num-docs", type="int", default=100, + help="the number of documents to use in performance tests") + + # # default options for all tests # diff --git a/testing/tests/perf/test_sync.py b/testing/tests/perf/test_sync.py index 9de733fb..45af9a91 100644 --- a/testing/tests/perf/test_sync.py +++ b/testing/tests/perf/test_sync.py @@ -12,7 +12,7 @@ content = ' ' * 10000 @pytest.inlineCallbacks def test_upload(soledad_client, request): # create a bunch of local documents - uploads = 100 + uploads = request.config.option.num_docs deferreds = [] for i in xrange(uploads): d = soledad_client.create_doc({'upload': True, 'content': content}) @@ -32,7 +32,7 @@ def test_upload(soledad_client, request): @pytest.inlineCallbacks def test_download(soledad_client, request): # create a bunch of remote documents - downloads = 100 + downloads = request.config.option.num_docs url = request.config.getoption('--couch-url') remote = CouchDatabase(url, 'user-0') for i in xrange(downloads): -- cgit v1.2.3 From 682aab0b30e479ea4e826f0636340bb100b36c0a Mon Sep 17 00:00:00 2001 From: drebs Date: Tue, 26 Jul 2016 13:44:50 -0300 Subject: [test] add custom couchdb docker image --- scripts/docker/Makefile | 6 +++--- scripts/docker/README.md | 14 +++++++++----- scripts/docker/couchdb/Dockerfile | 3 +++ scripts/docker/couchdb/Makefile | 4 ++++ scripts/docker/couchdb/README.rst | 12 ++++++++++++ scripts/docker/couchdb/local.ini | 2 ++ 6 files changed, 33 insertions(+), 8 deletions(-) create mode 100644 scripts/docker/couchdb/Dockerfile create mode 100644 scripts/docker/couchdb/Makefile create mode 100644 scripts/docker/couchdb/README.rst create mode 100644 scripts/docker/couchdb/local.ini diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 4b4d4496..1bb57757 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -33,7 +33,7 @@ soledad-image: docker build -t $(IMAGE_NAME) . couchdb-image: - docker pull couchdb + (cd couchdb/ && make) ################################################## # Run a Soledad Server inside a docker container # @@ -77,7 +77,7 @@ run-client-bootstrap: run-tox: name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \ - docker run -d --name $${name} couchdb; \ + docker run -d --name $${name} leap/couchdb; \ docker run -t -i \ --memory="$(MEMORY)" \ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ @@ -93,7 +93,7 @@ run-tox: run-perf: name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \ - docker run -d --name $${name} couchdb; \ + docker run -d --name $${name} leap/couchdb; \ docker run -t -i \ --memory="$(MEMORY)" \ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ diff --git a/scripts/docker/README.md b/scripts/docker/README.md index fda1c04a..97b39f87 100644 --- a/scripts/docker/README.md +++ b/scripts/docker/README.md @@ -11,16 +11,20 @@ Check the `Dockerfile` for the steps for creating the docker image. Check the `Makefile` for the rules for running containers. -Check the `helper/` directory for scripts that help running tests. - Installation ------------ -0. update and install 1. Install docker for your system: https://docs.docker.com/ -2. Build the image by running `make` -3. Use one of the scripts in the `helper/` directory +2. Build images by running `make` +3. Execute `make run-tox` and `make run-perf` to run tox tests and perf tests, + respectivelly. +4. You may want to pass some variables to the `make` command to control + parameters of execution, for example: + + make run-perf SOLEDAD_PRELOAD_NUM=500 + + See more variables below. Environment variables for docker containers diff --git a/scripts/docker/couchdb/Dockerfile b/scripts/docker/couchdb/Dockerfile new file mode 100644 index 00000000..03448da5 --- /dev/null +++ b/scripts/docker/couchdb/Dockerfile @@ -0,0 +1,3 @@ +FROM couchdb:latest + +COPY local.ini /usr/local/etc/couchdb/ diff --git a/scripts/docker/couchdb/Makefile b/scripts/docker/couchdb/Makefile new file mode 100644 index 00000000..cf3ac966 --- /dev/null +++ b/scripts/docker/couchdb/Makefile @@ -0,0 +1,4 @@ +IMAGE_NAME ?= leap/couchdb + +image: + docker build -t $(IMAGE_NAME) . diff --git a/scripts/docker/couchdb/README.rst b/scripts/docker/couchdb/README.rst new file mode 100644 index 00000000..31a791a8 --- /dev/null +++ b/scripts/docker/couchdb/README.rst @@ -0,0 +1,12 @@ +Couchdb Docker image +==================== + +This directory contains rules to build a custom couchdb docker image to be +provided as backend to soledad server. + +Type `make` to build the image. + +Differences between this image and the official one: + + - add the "nodelay" socket option on the httpd section of the config file + (see: https://leap.se/code/issues/8264). diff --git a/scripts/docker/couchdb/local.ini b/scripts/docker/couchdb/local.ini new file mode 100644 index 00000000..3650e0ed --- /dev/null +++ b/scripts/docker/couchdb/local.ini @@ -0,0 +1,2 @@ +[httpd] +socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}] -- cgit v1.2.3 From c889ba67158850763394fc6087b0837716866cd1 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 27 Jul 2016 18:13:01 -0300 Subject: [test] remove duplicated function declaration `pytest_addoption` was declared twice making the second declaration replace the first, thus removing couch url parameter. --- testing/tests/perf/conftest.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py index 463c791a..5ec047e4 100644 --- a/testing/tests/perf/conftest.py +++ b/testing/tests/perf/conftest.py @@ -24,9 +24,6 @@ def pytest_addoption(parser): parser.addoption( "--couch-url", type="string", default="http://127.0.0.1:5984", help="the url for the couch server to be used during tests") - - -def pytest_addoption(parser): parser.addoption( "--num-docs", type="int", default=100, help="the number of documents to use in performance tests") -- cgit v1.2.3 From 72ee56e3863729b148cf8cc16e4004dc7b52acdd Mon Sep 17 00:00:00 2001 From: drebs Date: Thu, 28 Jul 2016 12:38:10 -0300 Subject: [feat] standardize metadata storage in couch backend. --- common/src/leap/soledad/common/couch/__init__.py | 55 +++++++++++++++--------- 1 file changed, 34 insertions(+), 21 deletions(-) diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py index effb2be2..032f230b 100644 --- a/common/src/leap/soledad/common/couch/__init__.py +++ b/common/src/leap/soledad/common/couch/__init__.py @@ -105,6 +105,17 @@ def _get_gen_doc_id(gen): return 'gen-%s' % str(gen).zfill(10) +GENERATION_KEY = 'gen' +TRANSACTION_ID_KEY = 'trans_id' +REPLICA_UID_KEY = 'replica_uid' +DOC_ID_KEY = 'doc_id' +SCHEMA_VERSION_KEY = 'schema_version' + +CONFIG_DOC_ID = '_local/config' +SYNC_DOC_ID_PREFIX = '_local/sync_' +SCHEMA_VERSION = 1 + + class CouchDatabase(object): """ Holds CouchDB related code. @@ -112,9 +123,6 @@ class CouchDatabase(object): CouchDB details from backend code. """ - CONFIG_DOC_ID = '_local/config' - SYNC_DOC_ID_PREFIX = '_local/sync_' - _put_doc_lock = defaultdict(Lock) @classmethod @@ -251,13 +259,14 @@ class CouchDatabase(object): """ try: # set on existent config document - doc = self._database[self.CONFIG_DOC_ID] - doc['replica_uid'] = replica_uid + doc = self._database[CONFIG_DOC_ID] + doc[REPLICA_UID_KEY] = replica_uid except ResourceNotFound: # or create the config document doc = { - '_id': self.CONFIG_DOC_ID, - 'replica_uid': replica_uid, + '_id': CONFIG_DOC_ID, + REPLICA_UID_KEY: replica_uid, + SCHEMA_VERSION_KEY: SCHEMA_VERSION, } self._database.save(doc) @@ -270,8 +279,8 @@ class CouchDatabase(object): """ try: # grab replica_uid from server - doc = self._database[self.CONFIG_DOC_ID] - replica_uid = doc['replica_uid'] + doc = self._database[CONFIG_DOC_ID] + replica_uid = doc[REPLICA_UID_KEY] return replica_uid except ResourceNotFound: # create a unique replica_uid @@ -484,17 +493,18 @@ class CouchDatabase(object): synchronized with the replica, this is (0, ''). :rtype: (int, str) """ - doc_id = '%s%s' % (self.SYNC_DOC_ID_PREFIX, other_replica_uid) + doc_id = '%s%s' % (SYNC_DOC_ID_PREFIX, other_replica_uid) try: doc = self._database[doc_id] except ResourceNotFound: doc = { '_id': doc_id, - 'generation': 0, - 'transaction_id': '', + GENERATION_KEY: 0, + REPLICA_UID_KEY: str(other_replica_uid), + TRANSACTION_ID_KEY: '', } self._database.save(doc) - gen, trans_id = doc['generation'], doc['transaction_id'] + gen, trans_id = doc[GENERATION_KEY], doc[TRANSACTION_ID_KEY] return gen, trans_id def get_doc_conflicts(self, doc_id, couch_rev=None): @@ -546,13 +556,13 @@ class CouchDatabase(object): generation. :type other_transaction_id: str """ - doc_id = '%s%s' % (self.SYNC_DOC_ID_PREFIX, other_replica_uid) + doc_id = '%s%s' % (SYNC_DOC_ID_PREFIX, other_replica_uid) try: doc = self._database[doc_id] except ResourceNotFound: doc = {'_id': doc_id} - doc['generation'] = other_generation - doc['transaction_id'] = other_transaction_id + doc[GENERATION_KEY] = other_generation + doc[TRANSACTION_ID_KEY] = other_transaction_id self._database.save(doc) def get_transaction_log(self): @@ -586,7 +596,10 @@ class CouchDatabase(object): log = [] for row in rows: doc = row['doc'] - log.append((doc['gen'], doc['doc_id'], doc['trans_id'])) + log.append(( + doc[GENERATION_KEY], + doc[DOC_ID_KEY], + doc[TRANSACTION_ID_KEY])) return log def whats_changed(self, old_generation=0): @@ -632,7 +645,7 @@ class CouchDatabase(object): if not rows: return 0, '' gen_doc = rows.pop()['doc'] - return gen_doc['gen'], gen_doc['trans_id'] + return gen_doc[GENERATION_KEY], gen_doc[TRANSACTION_ID_KEY] def json_from_resource(self, doc_path, **kwargs): """ @@ -705,9 +718,9 @@ class CouchDatabase(object): new_gen = gen + 1 gen_doc = { '_id': _get_gen_doc_id(new_gen), - 'gen': new_gen, - 'doc_id': doc.doc_id, - 'trans_id': transaction_id, + GENERATION_KEY: new_gen, + DOC_ID_KEY: doc.doc_id, + TRANSACTION_ID_KEY: transaction_id, } self._database.save(gen_doc) -- cgit v1.2.3 From bc28ea6e652418791dcf63fadcc81db9c50e2d45 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 29 Jul 2016 10:01:46 -0300 Subject: [pkg] add couch schema migration script --- scripts/migration/0.8.2/README.md | 73 +++++++++++ scripts/migration/0.8.2/log/.empty | 0 scripts/migration/0.8.2/migrate.py | 77 +++++++++++ .../0.8.2/migrate_couch_schema/__init__.py | 142 +++++++++++++++++++++ scripts/migration/0.8.2/setup.py | 8 ++ scripts/migration/0.8.2/tests/conftest.py | 46 +++++++ scripts/migration/0.8.2/tests/test_migrate.py | 67 ++++++++++ scripts/migration/0.8.2/tox.ini | 13 ++ 8 files changed, 426 insertions(+) create mode 100644 scripts/migration/0.8.2/README.md create mode 100644 scripts/migration/0.8.2/log/.empty create mode 100755 scripts/migration/0.8.2/migrate.py create mode 100644 scripts/migration/0.8.2/migrate_couch_schema/__init__.py create mode 100644 scripts/migration/0.8.2/setup.py create mode 100644 scripts/migration/0.8.2/tests/conftest.py create mode 100644 scripts/migration/0.8.2/tests/test_migrate.py create mode 100644 scripts/migration/0.8.2/tox.ini diff --git a/scripts/migration/0.8.2/README.md b/scripts/migration/0.8.2/README.md new file mode 100644 index 00000000..919a5235 --- /dev/null +++ b/scripts/migration/0.8.2/README.md @@ -0,0 +1,73 @@ +CouchDB schema migration to Soledad 0.8.2 +========================================= + +Migrate couch database schema from <= 0.8.1 version to 0.8.2 version. + + +ATTENTION! +---------- + + - This script does not backup your data for you. Make sure you have a backup + copy of your databases before running this script! + + - Make sure you turn off any service that might be writing to the couch + database before running this script. + + +Usage +----- + +To see what the script would do, run: + + ./migrate.py + +To actually run the migration, add the --do-migrate command line option: + + ./migrate.py --do-migrate + +See command line options: + + ./migrate.py --help + + +Log +--- + +If you don't pass a --log-file command line option, a log will be written to +the `log/` folder. + + +Differences between old and new couch schema +-------------------------------------------- + +The differences between old and new schemas are: + + - Transaction metadata was previously stored inside each document, and we + used design doc view/list functions to retrieve that information. Now, + transaction metadata is stored in documents with special ids + (gen-0000000001 to gen-9999999999). + + - Database replica config metadata was stored in a document called + "u1db_config", and now we store it in the "_local/config" document. + + - Sync metadata was previously stored in documents with id + "u1db_sync_", and now are stored in + "_local/sync_". + + - The new schema doesn't make use of any design documents. + + +What does this script do +------------------------ + +- List all databases starting with "user-". +- For each one, do: + - Check if it contains the old "u1db_config" document. + - If it doesn't, skip this db. + - Get the transaction log using the usual design doc view/list functions. + - Write a new "gen-X" document for each line on the transaction log. + - Get the "u1db_config" document, create a new one in "_local/config", + Delete the old one. + - List all "u1db_sync_X" documents, create new ones in "_local/sync_X", + delete the old ones. + - Delete unused design documents. diff --git a/scripts/migration/0.8.2/log/.empty b/scripts/migration/0.8.2/log/.empty new file mode 100644 index 00000000..e69de29b diff --git a/scripts/migration/0.8.2/migrate.py b/scripts/migration/0.8.2/migrate.py new file mode 100755 index 00000000..159905ef --- /dev/null +++ b/scripts/migration/0.8.2/migrate.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# migrate.py + +""" +Migrate CouchDB schema to Soledad 0.8.2 schema. + +****************************************************************************** + ATTENTION! + + - This script does not backup your data for you. Make sure you have a backup + copy of your databases before running this script! + + - Make sure you turn off any service that might be writing to the couch + database before running this script. + +****************************************************************************** + +Run this script with the --help option to see command line options. + +See the README.md file for more information. +""" + +import datetime +import logging +import os + +from argparse import ArgumentParser + +from migrate_couch_schema import migrate + + +TARGET_VERSION = '0.8.2' +DEFAULT_COUCH_URL = 'http://127.0.0.1:5984' + + +# +# command line args and execution +# + +def _configure_logger(log_file): + if not log_file: + fname, _ = os.path.basename(__file__).split('.') + timestr = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + filename = 'soledad_%s_%s_%s.log' \ + % (TARGET_VERSION, fname, timestr) + dirname = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'log') + log_file = os.path.join(dirname, filename) + logging.basicConfig( + filename=log_file, + filemode='a', + format='%(asctime)s,%(msecs)d %(levelname)s %(message)s', + datefmt='%H:%M:%S', + level=logging.DEBUG) + + +def _parse_args(): + parser = ArgumentParser() + parser.add_argument( + '--couch_url', + help='the url for the couch database', + default=DEFAULT_COUCH_URL) + parser.add_argument( + '--do-migrate', + help='actually perform the migration (otherwise ' + 'just print what would be done)', + action='store_true') + parser.add_argument( + '--log-file', + help='the log file to use') + return parser.parse_args() + + +if __name__ == '__main__': + args = _parse_args() + _configure_logger(args.log_file) + migrate(args, TARGET_VERSION) diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py new file mode 100644 index 00000000..37e5a525 --- /dev/null +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -0,0 +1,142 @@ +# __init__.py +""" +Support functions for migration script. +""" + +import logging + +from couchdb import Server + +from leap.soledad.common.couch import GENERATION_KEY +from leap.soledad.common.couch import TRANSACTION_ID_KEY +from leap.soledad.common.couch import REPLICA_UID_KEY +from leap.soledad.common.couch import DOC_ID_KEY +from leap.soledad.common.couch import SCHEMA_VERSION_KEY +from leap.soledad.common.couch import CONFIG_DOC_ID +from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX +from leap.soledad.common.couch import SCHEMA_VERSION + + +logger = logging.getLogger(__name__) + + +# +# support functions +# + +def _get_couch_server(couch_url): + return Server(couch_url) + + +def _is_migrateable(db): + config_doc = db.get('u1db_config') + if config_doc is None: + return False + return True + + +def _get_transaction_log(db): + ddoc_path = ['_design', 'transactions', '_view', 'log'] + resource = db.resource(*ddoc_path) + _, _, data = resource.get_json() + rows = data['rows'] + transaction_log = [] + gen = 1 + for row in rows: + transaction_log.append((gen, row['id'], row['value'])) + gen += 1 + return transaction_log + + +def _get_user_dbs(server): + user_dbs = filter(lambda dbname: dbname.startswith('user-'), server) + return user_dbs + + +# +# migration main functions +# + +def migrate(args, target_version): + server = _get_couch_server(args.couch_url) + logger.info('starting couch schema migration to %s...' % target_version) + if not args.do_migrate: + logger.warning('dry-run: no changes will be made to databases') + user_dbs = _get_user_dbs(server) + for dbname in user_dbs: + db = server[dbname] + if not _is_migrateable(db): + logger.warning("skipping user db: %s" % dbname) + continue + logger.info("starting migration of user db: %s" % dbname) + _migrate_user_db(db, args.do_migrate) + logger.info("finished migration of user db: %s" % dbname) + logger.info('finished couch schema migration to %s' % target_version) + + +def _migrate_user_db(db, do_migrate): + _migrate_transaction_log(db, do_migrate) + _migrate_config_doc(db, do_migrate) + _migrate_sync_docs(db, do_migrate) + _delete_design_docs(db, do_migrate) + + +def _migrate_transaction_log(db, do_migrate): + transaction_log = _get_transaction_log(db) + for gen, doc_id, trans_id in transaction_log: + gen_doc_id = 'gen-%s' % str(gen).zfill(10) + doc = { + '_id': gen_doc_id, + GENERATION_KEY: gen, + DOC_ID_KEY: doc_id, + TRANSACTION_ID_KEY: trans_id, + } + logger.info('creating gen doc: %s' % (gen_doc_id)) + if do_migrate: + db.save(doc) + + +def _migrate_config_doc(db, do_migrate): + old_doc = db['u1db_config'] + new_doc = { + '_id': CONFIG_DOC_ID, + REPLICA_UID_KEY: old_doc[REPLICA_UID_KEY], + SCHEMA_VERSION_KEY: SCHEMA_VERSION, + } + logger.info("moving config doc: %s -> %s" + % (old_doc['_id'], new_doc['_id'])) + if do_migrate: + db.save(new_doc) + db.delete(old_doc) + + +def _migrate_sync_docs(db, do_migrate): + view = db.view( + '_all_docs', + startkey='u1db_sync', + endkey='u1db_synd', + include_docs='true') + for row in view.rows: + old_doc = row['doc'] + old_id = old_doc['_id'] + replica_uid = old_id.replace('u1db_sync_', '') + new_id = "%s%s" % (SYNC_DOC_ID_PREFIX, replica_uid) + new_doc = { + '_id': new_id, + GENERATION_KEY: old_doc['generation'], + TRANSACTION_ID_KEY: old_doc['transaction_id'], + REPLICA_UID_KEY: replica_uid, + } + logger.info("moving sync doc: %s -> %s" % (old_id, new_id)) + if do_migrate: + db.save(new_doc) + db.delete(old_doc) + + +def _delete_design_docs(db, do_migrate): + for ddoc in ['docs', 'syncs', 'transactions']: + doc_id = '_design/%s' % ddoc + doc = db.get(doc_id) + logger.info("deleting design doc: %s" % doc_id) + if do_migrate: + db.delete(doc) diff --git a/scripts/migration/0.8.2/setup.py b/scripts/migration/0.8.2/setup.py new file mode 100644 index 00000000..0467e932 --- /dev/null +++ b/scripts/migration/0.8.2/setup.py @@ -0,0 +1,8 @@ +from setuptools import setup +from setuptools import find_packages + + +setup( + name='migrate_couch_schema', + packages=find_packages('.'), +) diff --git a/scripts/migration/0.8.2/tests/conftest.py b/scripts/migration/0.8.2/tests/conftest.py new file mode 100644 index 00000000..92d1e17e --- /dev/null +++ b/scripts/migration/0.8.2/tests/conftest.py @@ -0,0 +1,46 @@ +# conftest.py + +""" +Provide a couch database with content stored in old schema. +""" + +import couchdb +import pytest +import uuid + + +COUCH_URL = 'http://127.0.0.1:5984' + +transaction_map = """ +function(doc) { + if (doc.u1db_transactions) + doc.u1db_transactions.forEach(function(t) { + emit(t[0], // use timestamp as key so the results are ordered + t[1]); // value is the transaction_id + }); +} +""" + +initial_docs = [ + {'_id': 'u1db_config', 'replica_uid': 'an-uid'}, + {'_id': 'u1db_sync_A', 'generation': 0, 'replica_uid': 'A', + 'transaction_id': ''}, + {'_id': 'u1db_sync_B', 'generation': 2, 'replica_uid': 'B', + 'transaction_id': 'X'}, + {'_id': 'doc1', 'u1db_transactions': [(1, 'trans-1'), (3, 'trans-3')]}, + {'_id': 'doc2', 'u1db_transactions': [(2, 'trans-2'), (4, 'trans-4')]}, + {'_id': '_design/docs'}, + {'_id': '_design/syncs'}, + {'_id': '_design/transactions', 'views': {'log': {'map': transaction_map}}} +] + + +@pytest.fixture(scope='function') +def db(request): + server = couchdb.Server(COUCH_URL) + dbname = "user-" + uuid.uuid4().hex + db = server.create(dbname) + for doc in initial_docs: + db.save(doc) + request.addfinalizer(lambda: server.delete(dbname)) + return db diff --git a/scripts/migration/0.8.2/tests/test_migrate.py b/scripts/migration/0.8.2/tests/test_migrate.py new file mode 100644 index 00000000..10c8b906 --- /dev/null +++ b/scripts/migration/0.8.2/tests/test_migrate.py @@ -0,0 +1,67 @@ +# test_migrate.py + +""" +Ensure that the migration script works! +""" + +from migrate_couch_schema import _migrate_user_db + +from leap.soledad.common.couch import GENERATION_KEY +from leap.soledad.common.couch import TRANSACTION_ID_KEY +from leap.soledad.common.couch import REPLICA_UID_KEY +from leap.soledad.common.couch import DOC_ID_KEY +from leap.soledad.common.couch import SCHEMA_VERSION_KEY +from leap.soledad.common.couch import CONFIG_DOC_ID +from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX +from leap.soledad.common.couch import SCHEMA_VERSION + + +def test__migrate_user_db(db): + _migrate_user_db(db, True) + + # we should find exactly 6 documents: 2 normal documents and 4 generation + # documents + view = db.view('_all_docs') + assert len(view.rows) == 6 + + # ensure that the ids of the documents we found on the database are correct + doc_ids = map(lambda doc: doc.id, view.rows) + assert 'doc1' in doc_ids + assert 'doc2' in doc_ids + assert 'gen-0000000001' in doc_ids + assert 'gen-0000000002' in doc_ids + assert 'gen-0000000003' in doc_ids + assert 'gen-0000000004' in doc_ids + + # assert config doc contents + config_doc = db.get(CONFIG_DOC_ID) + assert config_doc[REPLICA_UID_KEY] == 'an-uid' + assert config_doc[SCHEMA_VERSION_KEY] == SCHEMA_VERSION + + # assert sync docs contents + sync_doc_A = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'A')) + assert sync_doc_A[GENERATION_KEY] == 0 + assert sync_doc_A[REPLICA_UID_KEY] == 'A' + assert sync_doc_A[TRANSACTION_ID_KEY] == '' + sync_doc_B = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'B')) + assert sync_doc_B[GENERATION_KEY] == 2 + assert sync_doc_B[REPLICA_UID_KEY] == 'B' + assert sync_doc_B[TRANSACTION_ID_KEY] == 'X' + + # assert gen docs contents + gen_1 = db.get('gen-0000000001') + assert gen_1[DOC_ID_KEY] == 'doc1' + assert gen_1[GENERATION_KEY] == 1 + assert gen_1[TRANSACTION_ID_KEY] == 'trans-1' + gen_2 = db.get('gen-0000000002') + assert gen_2[DOC_ID_KEY] == 'doc2' + assert gen_2[GENERATION_KEY] == 2 + assert gen_2[TRANSACTION_ID_KEY] == 'trans-2' + gen_3 = db.get('gen-0000000003') + assert gen_3[DOC_ID_KEY] == 'doc1' + assert gen_3[GENERATION_KEY] == 3 + assert gen_3[TRANSACTION_ID_KEY] == 'trans-3' + gen_4 = db.get('gen-0000000004') + assert gen_4[DOC_ID_KEY] == 'doc2' + assert gen_4[GENERATION_KEY] == 4 + assert gen_4[TRANSACTION_ID_KEY] == 'trans-4' diff --git a/scripts/migration/0.8.2/tox.ini b/scripts/migration/0.8.2/tox.ini new file mode 100644 index 00000000..2bb6be4c --- /dev/null +++ b/scripts/migration/0.8.2/tox.ini @@ -0,0 +1,13 @@ +[tox] +envlist = py27 + +[testenv] +commands = py.test {posargs} +changedir = tests +deps = + pytest + couchdb + pdbpp + -e../../../common +setenv = + TERM=xterm -- cgit v1.2.3 From 06d0157728f5f8f273e4c83d42bf5b92784734c7 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 29 Jul 2016 20:14:28 -0300 Subject: [test] remove pip download cache Recent versions of pip will ignore that option and use a cache anyway. --- testing/tox.ini | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/tox.ini b/testing/tox.ini index 5f401a35..c3126cf2 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -23,5 +23,4 @@ deps = setenv = HOME=/tmp TERM=xterm - PIP_DOWNLOAD_CACHE=/var/cache/pip install_command = pip install {opts} {packages} -- cgit v1.2.3 From 1106d871e9a7e09cedac436a0488fc87af177b67 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 29 Jul 2016 20:34:19 -0300 Subject: [bug] use couch lock to atomize saving of document --- common/src/leap/soledad/common/couch/__init__.py | 98 ++++++++++++------------ 1 file changed, 50 insertions(+), 48 deletions(-) diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py index 032f230b..2d57635a 100644 --- a/common/src/leap/soledad/common/couch/__init__.py +++ b/common/src/leap/soledad/common/couch/__init__.py @@ -705,8 +705,6 @@ class CouchDatabase(object): } parts.append(conflicts) - # add the gen document - # TODO: in u1db protocol, the increment of database generation should # be made in the same atomic transaction as the actual document save, # otherwise the same document might be concurrently updated by @@ -714,6 +712,8 @@ class CouchDatabase(object): # and doc_id would be enough to prevent that, if all entry points to # database update are made through the soledad api. with self._put_doc_lock[self._database.name]: + + # add the gen document gen, _ = self.get_generation_info() new_gen = gen + 1 gen_doc = { @@ -724,52 +724,54 @@ class CouchDatabase(object): } self._database.save(gen_doc) - # build the couch document - couch_doc = { - '_id': doc.doc_id, - 'u1db_rev': doc.rev, - '_attachments': attachments, - } - # if we are updating a doc we have to add the couch doc revision - if old_doc is not None and hasattr(old_doc, 'couch_rev'): - couch_doc['_rev'] = old_doc.couch_rev - # prepare the multipart PUT - if not self.batching: - buf = StringIO() - envelope = MultipartWriter(buf) - # the order in which attachments are described inside the - # serialization of the couch document must match the order in which - # they are actually written in the multipart structure. Because of - # that, we use `sorted_keys=True` in the json serialization (so - # "u1db_conflicts" comes before "u1db_content" on the couch - # document attachments description), and also reverse the order of - # the parts before writing them, so the "conflict" part is written - # before the "content" part. - envelope.add( - 'application/json', - json.dumps(couch_doc, sort_keys=True)) - parts.reverse() - for part in parts: - envelope.add('application/octet-stream', part) - envelope.close() - # try to save and fail if there's a revision conflict - try: - resource = self._new_resource() - resource.put_json( - doc.doc_id, body=str(buf.getvalue()), - headers=envelope.headers) - except ResourceConflict: - raise RevisionConflict() - else: - for name, attachment in attachments.items(): - del attachment['follows'] - del attachment['length'] - index = 0 if name is 'u1db_content' else 1 - attachment['data'] = binascii.b2a_base64(parts[index]).strip() - couch_doc['_attachments'] = attachments - self.batch_docs[doc.doc_id] = couch_doc - last_gen, last_trans_id = self.batch_generation - self.batch_generation = (last_gen + 1, transaction_id) + # build the couch document + couch_doc = { + '_id': doc.doc_id, + 'u1db_rev': doc.rev, + '_attachments': attachments, + } + # if we are updating a doc we have to add the couch doc revision + if old_doc is not None and hasattr(old_doc, 'couch_rev'): + couch_doc['_rev'] = old_doc.couch_rev + # prepare the multipart PUT + if not self.batching: + buf = StringIO() + envelope = MultipartWriter(buf) + # the order in which attachments are described inside the + # serialization of the couch document must match the order in + # which they are actually written in the multipart structure. + # Because of that, we use `sorted_keys=True` in the json + # serialization (so "u1db_conflicts" comes before + # "u1db_content" on the couch document attachments + # description), and also reverse the order of the parts before + # writing them, so the "conflict" part is written before the + # "content" part. + envelope.add( + 'application/json', + json.dumps(couch_doc, sort_keys=True)) + parts.reverse() + for part in parts: + envelope.add('application/octet-stream', part) + envelope.close() + # try to save and fail if there's a revision conflict + try: + resource = self._new_resource() + resource.put_json( + doc.doc_id, body=str(buf.getvalue()), + headers=envelope.headers) + except ResourceConflict: + raise RevisionConflict() + else: + for name, attachment in attachments.items(): + del attachment['follows'] + del attachment['length'] + index = 0 if name is 'u1db_content' else 1 + attachment['data'] = binascii.b2a_base64( + parts[index]).strip() + couch_doc['_attachments'] = attachments + self.batch_docs[doc.doc_id] = couch_doc + last_gen, last_trans_id = self.batch_generation + self.batch_generation = (last_gen + 1, transaction_id) def _new_resource(self, *path): """ -- cgit v1.2.3 From 027d0b5f40944973807e1a4fc497c496e78b3eeb Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 29 Jul 2016 20:36:59 -0300 Subject: [refactor] simplify couch whats_changed calculation --- common/src/leap/soledad/common/couch/__init__.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py index 2d57635a..9edbe380 100644 --- a/common/src/leap/soledad/common/couch/__init__.py +++ b/common/src/leap/soledad/common/couch/__init__.py @@ -623,12 +623,10 @@ class CouchDatabase(object): cur_generation, last_trans_id = self.get_generation_info() relevant_tail = self._get_transaction_log(start=old_generation + 1) seen = set() - generation = cur_generation - for _, doc_id, trans_id in reversed(relevant_tail): + for generation, doc_id, trans_id in reversed(relevant_tail): if doc_id not in seen: changes.append((doc_id, generation, trans_id)) seen.add(doc_id) - generation -= 1 changes.reverse() return (cur_generation, last_trans_id, changes) -- cgit v1.2.3 From 3b237bb46743a93feed4bb6f3c839d72fc28df48 Mon Sep 17 00:00:00 2001 From: drebs Date: Sun, 31 Jul 2016 10:37:35 -0300 Subject: [feat] use couch _all_docs for get_docs() and get_all_docs() The previous solution would make use of concurrent get's to couch backend in a pool of threads to implement the get_docs() and get_all_docs() CouchDatabase backend methods. This commit replaces those by a simpler implementation use the `_all_docs` couchdb view api. It passes all needed IDs to the view and r etrieves all documents with content in the same request. A comparison between both implementations shows an improvement of at least 15 times for large number of documents. The table below shows the time for different implementations of get_all_docs() for different number of documents and threads versus _all_docs implementation: +-------+-----------------+------------------+-------------+ | | threads | _all_docs | improvement | +-------+-----------------+------------------+-------------+ | 10 | 0.0728030204773 | 0.00782012939453 | 9.3 | | 100 | 0.609349966049 | 0.0377721786499 | 16.1 | | 1000 | 5.86522197723 | 0.370730876923 | 15.8 | | 10000 | 66.1713931561 | 3.61764383316 | 18.3 | +-------+-----------------+------------------+-------------+ --- common/src/leap/soledad/common/couch/__init__.py | 59 +++++++++++++----------- 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py index 9edbe380..d0c1a7ba 100644 --- a/common/src/leap/soledad/common/couch/__init__.py +++ b/common/src/leap/soledad/common/couch/__init__.py @@ -23,15 +23,12 @@ import json import re import uuid import binascii -import time -import functools from collections import defaultdict from StringIO import StringIO from urlparse import urljoin from contextlib import contextmanager -from multiprocessing.pool import ThreadPool from threading import Lock @@ -98,9 +95,6 @@ def couch_server(url): yield server -THREAD_POOL = ThreadPool(20) - - def _get_gen_doc_id(gen): return 'gen-%s' % str(gen).zfill(10) @@ -307,8 +301,8 @@ class CouchDatabase(object): """ generation, _ = self.get_generation_info() - results = list(self.get_docs(self._database, - include_deleted=include_deleted)) + results = list( + self._get_docs(None, True, include_deleted)) return (generation, results) def get_docs(self, doc_ids, check_for_conflicts=True, @@ -329,24 +323,37 @@ class CouchDatabase(object): in matching doc_ids order. :rtype: iterable """ - # Workaround for: - # - # http://bugs.python.org/issue7980 - # https://leap.se/code/issues/5449 - # - # python-couchdb uses time.strptime, which is not thread safe. In - # order to avoid the problem described on the issues above, we preload - # strptime here by evaluating the conversion of an arbitrary date. - # This will not be needed when/if we switch from python-couchdb to - # paisley. - time.strptime('Mar 8 1917', '%b %d %Y') - get_one = functools.partial( - self.get_doc, check_for_conflicts=check_for_conflicts) - docs = [THREAD_POOL.apply_async(get_one, [doc_id]) - for doc_id in doc_ids] - for doc in docs: - doc = doc.get() - if not doc or not include_deleted and doc.is_tombstone(): + return self._get_docs(doc_ids, check_for_conflicts, include_deleted) + + def _get_docs(self, doc_ids, check_for_conflicts, include_deleted): + """ + Use couch's `_all_docs` view to get the documents indicated in + `doc_ids`, + + :param doc_ids: A list of document identifiers or None for all. + :type doc_ids: list + :param check_for_conflicts: If set to False, then the conflict check + will be skipped, and 'None' will be + returned instead of True/False. + :type check_for_conflicts: bool + :param include_deleted: If set to True, deleted documents will be + returned with empty content. Otherwise deleted + documents will not be included in the results. + + :return: iterable giving the Document object for each document id + in matching doc_ids order. + :rtype: iterable + """ + params = {'include_docs': 'true', 'attachments': 'true'} + if doc_ids is not None: + params['keys'] = doc_ids + view = self._database.view("_all_docs", **params) + for row in view.rows: + result = row['doc'] + doc = self.__parse_doc_from_couch( + result, result['_id'], check_for_conflicts=check_for_conflicts) + # filter out non-u1db or deleted documents + if not doc or (not include_deleted and doc.is_tombstone()): continue yield doc -- cgit v1.2.3 From 49cd07b909f2185b116bda5b30cfcfe0095291e0 Mon Sep 17 00:00:00 2001 From: drebs Date: Sun, 31 Jul 2016 18:25:48 -0300 Subject: [bug] retry allocation of gen instead of using a lock The use of a lock to allocate the next generation of a change in couch backend suffers from at least 2 problems: 1. all modification to the couch database would have to be made through a soledad server entrypoint, otherwise the lock would have no effect. 2. introducing a lock makes code uglier, harder to debug, and prone to undesired blocks. The solution implemented by this commit is not so elegant, but works for what we need right now. Now, concurrent threads updating the couch database will race for the allocation of a new generation, and retry when they fail to do so. There's no high risk of getting blocked for too much time in the while loop because (1) there's always one thread that wins (what makes the expected number of retries to be N/2 if N is the number of concurrent threads), and (2) the number of concurrent attempts to update the user database is limited by the number of devices syncing at the same time. --- common/src/leap/soledad/common/couch/__init__.py | 166 +++++++++++++---------- 1 file changed, 95 insertions(+), 71 deletions(-) diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py index d0c1a7ba..06c94c27 100644 --- a/common/src/leap/soledad/common/couch/__init__.py +++ b/common/src/leap/soledad/common/couch/__init__.py @@ -25,11 +25,9 @@ import uuid import binascii -from collections import defaultdict from StringIO import StringIO from urlparse import urljoin from contextlib import contextmanager -from threading import Lock from couchdb.client import Server, Database @@ -117,8 +115,6 @@ class CouchDatabase(object): CouchDB details from backend code. """ - _put_doc_lock = defaultdict(Lock) - @classmethod def open_database(cls, url, create, ensure_ddocs=False, replica_uid=None, database_security=None): @@ -670,6 +666,51 @@ class CouchDatabase(object): _, _, data = resource.get_json(**kwargs) return data + def _allocate_new_generation(self, doc_id, transaction_id): + """ + Allocate a new generation number for a document modification. + + We need to allocate a new generation to this document modification by + creating a new gen doc. In order to avoid concurrent database updates + from allocating the same new generation, we will try to create the + document until we succeed, meaning that no other piece of code holds + the same generation number as ours. + + The loop below would only be executed more than once if: + + 1. there's more than one thread trying to modify the user's database, + and + + 2. the execution of getting the current generation and saving the gen + doc different threads get interleaved (one of them will succeed + and the others will fail and try again). + + Number 1 only happens when more than one user device is syncing at the + same time. Number 2 depends on not-so-frequent coincidence of + code execution. + + Also, in the race between threads for a generation number there's + always one thread that wins. so if there are N threads in the race, the + expected number of repetitions of the loop for each thread would be + N/2. If N is equal to the number of devices that the user has, the + number of possible repetitions of the loop should always be low. + """ + while True: + try: + # add the gen document + gen, _ = self.get_generation_info() + new_gen = gen + 1 + gen_doc = { + '_id': _get_gen_doc_id(new_gen), + GENERATION_KEY: new_gen, + DOC_ID_KEY: doc_id, + TRANSACTION_ID_KEY: transaction_id, + } + self._database.save(gen_doc) + break # succeeded allocating a new generation, proceed + except ResourceConflict: + pass # try again! + def save_document(self, old_doc, doc, transaction_id): """ Put the document in the Couch backend database. @@ -710,73 +751,56 @@ class CouchDatabase(object): } parts.append(conflicts) - # TODO: in u1db protocol, the increment of database generation should - # be made in the same atomic transaction as the actual document save, - # otherwise the same document might be concurrently updated by - # concurrent syncs from other replicas. A simple lock based on the uuid - # and doc_id would be enough to prevent that, if all entry points to - # database update are made through the soledad api. - with self._put_doc_lock[self._database.name]: - - # add the gen document - gen, _ = self.get_generation_info() - new_gen = gen + 1 - gen_doc = { - '_id': _get_gen_doc_id(new_gen), - GENERATION_KEY: new_gen, - DOC_ID_KEY: doc.doc_id, - TRANSACTION_ID_KEY: transaction_id, - } - self._database.save(gen_doc) - - # build the couch document - couch_doc = { - '_id': doc.doc_id, - 'u1db_rev': doc.rev, - '_attachments': attachments, - } - # if we are updating a doc we have to add the couch doc revision - if old_doc is not None and hasattr(old_doc, 'couch_rev'): - couch_doc['_rev'] = old_doc.couch_rev - # prepare the multipart PUT - if not self.batching: - buf = StringIO() - envelope = MultipartWriter(buf) - # the order in which attachments are described inside the - # serialization of the couch document must match the order in - # which they are actually written in the multipart structure. - # Because of that, we use `sorted_keys=True` in the json - # serialization (so "u1db_conflicts" comes before - # "u1db_content" on the couch document attachments - # description), and also reverse the order of the parts before - # writing them, so the "conflict" part is written before the - # "content" part. - envelope.add( - 'application/json', - json.dumps(couch_doc, sort_keys=True)) - parts.reverse() - for part in parts: - envelope.add('application/octet-stream', part) - envelope.close() - # try to save and fail if there's a revision conflict - try: - resource = self._new_resource() - resource.put_json( - doc.doc_id, body=str(buf.getvalue()), - headers=envelope.headers) - except ResourceConflict: - raise RevisionConflict() - else: - for name, attachment in attachments.items(): - del attachment['follows'] - del attachment['length'] - index = 0 if name is 'u1db_content' else 1 - attachment['data'] = binascii.b2a_base64( - parts[index]).strip() - couch_doc['_attachments'] = attachments - self.batch_docs[doc.doc_id] = couch_doc - last_gen, last_trans_id = self.batch_generation - self.batch_generation = (last_gen + 1, transaction_id) + self._allocate_new_generation(doc.doc_id, transaction_id) + + # build the couch document + couch_doc = { + '_id': doc.doc_id, + 'u1db_rev': doc.rev, + '_attachments': attachments, + } + # if we are updating a doc we have to add the couch doc revision + if old_doc is not None and hasattr(old_doc, 'couch_rev'): + couch_doc['_rev'] = old_doc.couch_rev + # prepare the multipart PUT + if not self.batching: + buf = StringIO() + envelope = MultipartWriter(buf) + # the order in which attachments are described inside the + # serialization of the couch document must match the order in + # which they are actually written in the multipart structure. + # Because of that, we use `sorted_keys=True` in the json + # serialization (so "u1db_conflicts" comes before + # "u1db_content" on the couch document attachments + # description), and also reverse the order of the parts before + # writing them, so the "conflict" part is written before the + # "content" part. + envelope.add( + 'application/json', + json.dumps(couch_doc, sort_keys=True)) + parts.reverse() + for part in parts: + envelope.add('application/octet-stream', part) + envelope.close() + # try to save and fail if there's a revision conflict + try: + resource = self._new_resource() + resource.put_json( + doc.doc_id, body=str(buf.getvalue()), + headers=envelope.headers) + except ResourceConflict: + raise RevisionConflict() + else: + for name, attachment in attachments.items(): + del attachment['follows'] + del attachment['length'] + index = 0 if name is 'u1db_content' else 1 + attachment['data'] = binascii.b2a_base64( + parts[index]).strip() + couch_doc['_attachments'] = attachments + self.batch_docs[doc.doc_id] = couch_doc + last_gen, last_trans_id = self.batch_generation + self.batch_generation = (last_gen + 1, transaction_id) def _new_resource(self, *path): """ -- cgit v1.2.3 From 2ce01514d42e9fcd4bf97a9a06655ceebca5c394 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 1 Aug 2016 19:58:49 -0300 Subject: [refactor] remove unused design docs compilation code --- common/setup.py | 125 +---------------------- scripts/ddocs/update_design_docs.py | 170 ------------------------------- scripts/packaging/compile_design_docs.py | 112 -------------------- 3 files changed, 1 insertion(+), 406 deletions(-) delete mode 100644 scripts/ddocs/update_design_docs.py delete mode 100644 scripts/packaging/compile_design_docs.py diff --git a/common/setup.py b/common/setup.py index 7191fa00..c8a543ac 100644 --- a/common/setup.py +++ b/common/setup.py @@ -17,13 +17,8 @@ """ setup file for leap.soledad.common """ -import binascii -import json -from os import listdir -from os.path import realpath, dirname, isdir, join, isfile, basename import re -from distutils.command.build import build as _build from setuptools import setup from setuptools import find_packages from setuptools import Command @@ -110,117 +105,6 @@ def get_versions(): with open(versioneer_cfg.versionfile_source, 'w') as f: f.write(subst_template) -cmdclass = versioneer.get_cmdclass() - -# -# Couch backend design docs file generation. -# - -old_cmd_sdist = cmdclass["sdist"] - - -def build_ddocs_py(basedir=None, with_src=True): - """ - Build `ddocs.py` file. - - For ease of development, couch backend design documents are stored as - `.js` files in subdirectories of `src/leap/soledad/common/ddocs`. This - function scans that directory for javascript files, builds the design - documents structure, and encode those structures in the `ddocs.py` file. - - This function is used when installing in develop mode, building or - generating source distributions (see the next classes and the `cmdclass` - setuptools parameter. - - This funciton uses the following conventions to generate design documents: - - - Design documents are represented by directories in the form - `/`, there prefix is the `src/leap/soledad/common/ddocs` - directory. - - Design document directories might contain `views`, `lists` and - `updates` subdirectories. - - Views subdirectories must contain a `map.js` file and may contain a - `reduce.js` file. - - List and updates subdirectories may contain any number of javascript - files (i.e. ending in `.js`) whose names will be mapped to the - corresponding list or update function name. - """ - cur_pwd = dirname(realpath(__file__)) - common_path = ('src', 'leap', 'soledad', 'common') - dest_common_path = common_path - if not with_src: - dest_common_path = common_path[1:] - prefix = join(cur_pwd, *common_path) - - dest_prefix = prefix - if basedir is not None: - # we're bulding a sdist - dest_prefix = join(basedir, *dest_common_path) - - ddocs_prefix = join(prefix, 'ddocs') - - if not isdir(ddocs_prefix): - print "No ddocs/ folder, bailing out..." - return - - ddocs = {} - - # design docs are represented by subdirectories of `ddocs_prefix` - for ddoc in [f for f in listdir(ddocs_prefix) - if isdir(join(ddocs_prefix, f))]: - - ddocs[ddoc] = {'_id': '_design/%s' % ddoc} - - for t in ['views', 'lists', 'updates']: - tdir = join(ddocs_prefix, ddoc, t) - if isdir(tdir): - - ddocs[ddoc][t] = {} - - if t == 'views': # handle views (with map/reduce functions) - for view in [f for f in listdir(tdir) - if isdir(join(tdir, f))]: - # look for map.js and reduce.js - mapfile = join(tdir, view, 'map.js') - reducefile = join(tdir, view, 'reduce.js') - mapfun = None - reducefun = None - try: - with open(mapfile) as f: - mapfun = f.read() - except IOError: - pass - try: - with open(reducefile) as f: - reducefun = f.read() - except IOError: - pass - ddocs[ddoc]['views'][view] = {} - - if mapfun is not None: - ddocs[ddoc]['views'][view]['map'] = mapfun - if reducefun is not None: - ddocs[ddoc]['views'][view]['reduce'] = reducefun - - else: # handle lists, updates, etc - for fun in [f for f in listdir(tdir) - if isfile(join(tdir, f))]: - funfile = join(tdir, fun) - funname = basename(funfile).replace('.js', '') - try: - with open(funfile) as f: - ddocs[ddoc][t][funname] = f.read() - except IOError: - pass - # write file containing design docs strings - ddoc_filename = "ddocs.py" - with open(join(dest_prefix, ddoc_filename), 'w') as f: - for ddoc in ddocs: - f.write( - "%s = '%s'\n" % - (ddoc, binascii.b2a_base64(json.dumps(ddocs[ddoc]))[:-1])) - print "Wrote design docs in %s" % (dest_prefix + '/' + ddoc_filename,) - class cmd_develop(_cmd_develop): def run(self): @@ -230,17 +114,10 @@ class cmd_develop(_cmd_develop): # unless we update this, the command will keep using the old version self.distribution.metadata.version = versions["version"] _cmd_develop.run(self) - build_ddocs_py() - - -class cmd_build(_build): - def run(self): - _build.run(self) - build_ddocs_py(basedir=self.build_lib, with_src=False) +cmdclass = versioneer.get_cmdclass() cmdclass["freeze_debianver"] = freeze_debianver -cmdclass["build"] = cmd_build cmdclass["develop"] = cmd_develop diff --git a/scripts/ddocs/update_design_docs.py b/scripts/ddocs/update_design_docs.py deleted file mode 100644 index 281482b8..00000000 --- a/scripts/ddocs/update_design_docs.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/python - -# This script updates Soledad's design documents in the session database and -# all user databases with contents from the installed leap.soledad.common -# package. - -import json -import logging -import argparse -import re -import threading -import binascii - -from urlparse import urlparse -from getpass import getpass -from ConfigParser import ConfigParser - -from couchdb.client import Server -from couchdb.http import Resource -from couchdb.http import Session -from couchdb.http import ResourceNotFound - -from leap.soledad.common import ddocs - - -MAX_THREADS = 20 -DESIGN_DOCS = { - '_design/docs': json.loads(binascii.a2b_base64(ddocs.docs)), - '_design/syncs': json.loads(binascii.a2b_base64(ddocs.syncs)), - '_design/transactions': json.loads( - binascii.a2b_base64(ddocs.transactions)), -} - - -# create a logger -logger = logging.getLogger(__name__) -LOG_FORMAT = '%(asctime)s %(message)s' -logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) - - -def _parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument('-u', dest='uuid', default=None, type=str, - help='the UUID of the user') - parser.add_argument('-t', dest='threads', default=MAX_THREADS, type=int, - help='the number of parallel threads') - return parser.parse_args() - - -def _get_url(): - # get couch url - cp = ConfigParser() - cp.read('/etc/soledad/soledad-server.conf') - url = urlparse(cp.get('soledad-server', 'couch_url')) - # get admin password - netloc = re.sub('^.*@', '', url.netloc) - url = url._replace(netloc=netloc) - password = getpass("Admin password for %s: " % url.geturl()) - return url._replace(netloc='admin:%s@%s' % (password, netloc)) - - -def _get_server(url): - resource = Resource( - url.geturl(), Session(retry_delays=[1, 2, 4, 8], timeout=10)) - return Server(url=resource) - - -def _confirm(url): - hidden_url = re.sub( - 'http://(.*):.*@', - 'http://\\1:xxxxx@', - url.geturl()) - - print """ - ========== - ATTENTION! - ========== - - This script will modify Soledad's shared and user databases in: - - %s - - This script does not make a backup of the couch db data, so make sure you - have a copy or you may loose data. - """ % hidden_url - confirm = raw_input("Proceed (type uppercase YES)? ") - - if confirm != "YES": - exit(1) - - -# -# Thread -# - -class DBWorkerThread(threading.Thread): - - def __init__(self, server, dbname, db_idx, db_len, release_fun): - threading.Thread.__init__(self) - self._dbname = dbname - self._cdb = server[self._dbname] - self._db_idx = db_idx - self._db_len = db_len - self._release_fun = release_fun - - def run(self): - - logger.info( - "(%d/%d) Updating db %s." - % (self._db_idx, self._db_len, self._dbname)) - - for doc_id in DESIGN_DOCS: - try: - doc = self._cdb[doc_id] - except ResourceNotFound: - doc = {'_id': doc_id} - for key in ['lists', 'views', 'updates']: - if key in DESIGN_DOCS[doc_id]: - doc[key] = DESIGN_DOCS[doc_id][key] - self._cdb.save(doc) - - # release the semaphore - self._release_fun() - - -def _launch_update_design_docs_thread( - server, dbname, db_idx, db_len, semaphore_pool): - semaphore_pool.acquire() # wait for an available working slot - thread = DBWorkerThread( - server, dbname, db_idx, db_len, semaphore_pool.release) - thread.daemon = True - thread.start() - return thread - - -def _update_design_docs(args, server): - - # find the actual databases to be updated - dbs = [] - if args.uuid: - dbs.append('user-%s' % args.uuid) - else: - for dbname in server: - if dbname.startswith('user-') or dbname == 'shared': - dbs.append(dbname) - else: - logger.info("Skipping db %s." % dbname) - - db_idx = 0 - db_len = len(dbs) - semaphore_pool = threading.BoundedSemaphore(value=args.threads) - threads = [] - - # launch the update - for db in dbs: - db_idx += 1 - threads.append( - _launch_update_design_docs_thread( - server, db, db_idx, db_len, semaphore_pool)) - - # wait for all threads to finish - map(lambda thread: thread.join(), threads) - - -if __name__ == "__main__": - args = _parse_args() - url = _get_url() - _confirm(url) - server = _get_server(url) - _update_design_docs(args, server) diff --git a/scripts/packaging/compile_design_docs.py b/scripts/packaging/compile_design_docs.py deleted file mode 100644 index b2b5729a..00000000 --- a/scripts/packaging/compile_design_docs.py +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/python - - -# This script builds files for the design documents represented in the -# ../common/src/soledad/common/ddocs directory structure (relative to the -# current location of the script) into a target directory. - - -import argparse -from os import listdir -from os.path import realpath, dirname, isdir, join, isfile, basename -import json - -DDOCS_REL_PATH = ('..', 'common', 'src', 'leap', 'soledad', 'common', 'ddocs') - - -def build_ddocs(): - """ - Build design documents. - - For ease of development, couch backend design documents are stored as - `.js` files in subdirectories of - `../common/src/leap/soledad/common/ddocs`. This function scans that - directory for javascript files, and builds the design documents structure. - - This funciton uses the following conventions to generate design documents: - - - Design documents are represented by directories in the form - `/`, there prefix is the `src/leap/soledad/common/ddocs` - directory. - - Design document directories might contain `views`, `lists` and - `updates` subdirectories. - - Views subdirectories must contain a `map.js` file and may contain a - `reduce.js` file. - - List and updates subdirectories may contain any number of javascript - files (i.e. ending in `.js`) whose names will be mapped to the - corresponding list or update function name. - """ - ddocs = {} - - # design docs are represented by subdirectories of `DDOCS_REL_PATH` - cur_pwd = dirname(realpath(__file__)) - ddocs_path = join(cur_pwd, *DDOCS_REL_PATH) - for ddoc in [f for f in listdir(ddocs_path) - if isdir(join(ddocs_path, f))]: - - ddocs[ddoc] = {'_id': '_design/%s' % ddoc} - - for t in ['views', 'lists', 'updates']: - tdir = join(ddocs_path, ddoc, t) - if isdir(tdir): - - ddocs[ddoc][t] = {} - - if t == 'views': # handle views (with map/reduce functions) - for view in [f for f in listdir(tdir) - if isdir(join(tdir, f))]: - # look for map.js and reduce.js - mapfile = join(tdir, view, 'map.js') - reducefile = join(tdir, view, 'reduce.js') - mapfun = None - reducefun = None - try: - with open(mapfile) as f: - mapfun = f.read() - except IOError: - pass - try: - with open(reducefile) as f: - reducefun = f.read() - except IOError: - pass - ddocs[ddoc]['views'][view] = {} - - if mapfun is not None: - ddocs[ddoc]['views'][view]['map'] = mapfun - if reducefun is not None: - ddocs[ddoc]['views'][view]['reduce'] = reducefun - - else: # handle lists, updates, etc - for fun in [f for f in listdir(tdir) - if isfile(join(tdir, f))]: - funfile = join(tdir, fun) - funname = basename(funfile).replace('.js', '') - try: - with open(funfile) as f: - ddocs[ddoc][t][funname] = f.read() - except IOError: - pass - return ddocs - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument( - 'target', type=str, - help='the target dir where to store design documents') - args = parser.parse_args() - - # check if given target is a directory - if not isdir(args.target): - print 'Error: %s is not a directory.' % args.target - exit(1) - - # write desifgn docs files - ddocs = build_ddocs() - for ddoc in ddocs: - ddoc_filename = "%s.json" % ddoc - with open(join(args.target, ddoc_filename), 'w') as f: - f.write("%s" % json.dumps(ddocs[ddoc], indent=3)) - print "Wrote _design/%s content in %s" \ - % (ddoc, join(args.target, ddoc_filename,)) -- cgit v1.2.3 From c11478c65856a2a607ed538b6dbb9873a2b0963c Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 1 Aug 2016 19:00:45 -0300 Subject: [test] adds pep8 as a tox env "tox -e pep8" runs it standalone and "tox" includes the pep8 env. --- testing/tox.ini | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/testing/tox.ini b/testing/tox.ini index c3126cf2..246f63fa 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -1,13 +1,11 @@ [tox] -envlist = py27 +envlist = py27,pep8 [testenv] commands = py.test {posargs} changedir = tests deps = pytest - pytest-flake8 - pytest-pep8 pytest-twisted mock testscenarios @@ -24,3 +22,8 @@ setenv = HOME=/tmp TERM=xterm install_command = pip install {opts} {packages} + +[testenv:pep8] +changedir = .. +deps = pep8 +commands = pep8 {posargs} client server common -- cgit v1.2.3 From 4c2be2af0c328d4d413e8f26a9056d2102e94ea9 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 1 Aug 2016 19:42:49 -0300 Subject: [test] adds optional parallel env for local dev --- testing/tox.ini | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/testing/tox.ini b/testing/tox.ini index 246f63fa..83793cc7 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -27,3 +27,10 @@ install_command = pip install {opts} {packages} changedir = .. deps = pep8 commands = pep8 {posargs} client server common + +[testenv:parallel] +deps = + {[testenv]deps} + pytest-xdist +install_command = pip install {opts} {packages} +commands = py.test {posargs} -n 4 -- cgit v1.2.3 From e315847bd06bb575505720bd7e882e07406b1fab Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 1 Aug 2016 21:12:58 -0300 Subject: [test] do not run pep8 tox env by default --- testing/tox.ini | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testing/tox.ini b/testing/tox.ini index 83793cc7..a25bc1e6 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py27,pep8 +envlist = py27 [testenv] commands = py.test {posargs} @@ -10,7 +10,6 @@ deps = mock testscenarios setuptools-trial - pep8 pdbpp couchdb requests -- cgit v1.2.3 From 1c8b39b808a5d5b56f5463d29ad1a7e901bf84d5 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Tue, 2 Aug 2016 00:07:13 -0300 Subject: [test] avoid race condition on test_processing_order test_processing_order aims to check that unordered docs wont be processed, but if we let the pool start and advance Twisted LoopingCall clock right before calling the processing method manually, the process method will run concurrently and cause a race condition issue. --- client/src/leap/soledad/client/encdecpool.py | 5 ++++- testing/tests/sync/test_encdecpool.py | 9 +++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/client/src/leap/soledad/client/encdecpool.py b/client/src/leap/soledad/client/encdecpool.py index a6d49b21..2cf5da6e 100644 --- a/client/src/leap/soledad/client/encdecpool.py +++ b/client/src/leap/soledad/client/encdecpool.py @@ -344,6 +344,9 @@ class SyncDecrypterPool(SyncEncryptDecryptPool): self._loop = LoopingCall(self._decrypt_and_recurse) + def _start_pool(self, period): + self._loop.start(period) + def start(self, docs_to_process): """ Set the number of documents we expect to process. @@ -360,7 +363,7 @@ class SyncDecrypterPool(SyncEncryptDecryptPool): self._docs_to_process = docs_to_process self._deferred = defer.Deferred() d = self._init_db() - d.addCallback(lambda _: self._loop.start(self.DECRYPT_LOOP_PERIOD)) + d.addCallback(lambda _: self._start_pool(self.DECRYPT_LOOP_PERIOD)) return d def stop(self): diff --git a/testing/tests/sync/test_encdecpool.py b/testing/tests/sync/test_encdecpool.py index 82e99a47..0aa17682 100644 --- a/testing/tests/sync/test_encdecpool.py +++ b/testing/tests/sync/test_encdecpool.py @@ -29,7 +29,6 @@ from leap.soledad.client.encdecpool import SyncDecrypterPool from leap.soledad.common.document import SoledadDocument from test_soledad.util import BaseSoledadTest from twisted.internet import defer -from twisted.test.proto_helpers import MemoryReactorClock DOC_ID = "mydoc" DOC_REV = "rev" @@ -219,9 +218,6 @@ class TestSyncDecrypterPool(BaseSoledadTest): This test ensures that processing of documents only occur if there is a sequence in place. """ - reactor_clock = MemoryReactorClock() - self._pool._loop.clock = reactor_clock - crypto = self._soledad._crypto docs = [] @@ -234,18 +230,19 @@ class TestSyncDecrypterPool(BaseSoledadTest): docs.append((doc, encrypted_content)) # insert the encrypted document in the pool - self._pool.start(10) # pool is expecting to process 10 docs + yield self._pool.start(10) # pool is expecting to process 10 docs + self._pool._loop.stop() # we are processing manually # first three arrives, forming a sequence for i, (doc, encrypted_content) in enumerate(docs[:3]): gen = idx = i + 1 yield self._pool.insert_encrypted_received_doc( doc.doc_id, doc.rev, encrypted_content, gen, "trans_id", idx) + # last one arrives alone, so it can't be processed doc, encrypted_content = docs[-1] yield self._pool.insert_encrypted_received_doc( doc.doc_id, doc.rev, encrypted_content, 10, "trans_id", 10) - reactor_clock.advance(self._pool.DECRYPT_LOOP_PERIOD) yield self._pool._decrypt_and_recurse() self.assertEqual(3, self._pool._processed_docs) -- cgit v1.2.3 From 9084597674130682d84cd1884c8dbd24b866096e Mon Sep 17 00:00:00 2001 From: drebs Date: Wed, 3 Aug 2016 21:30:08 -0300 Subject: [pkg] support netrc couch access in migrate script --- scripts/migration/0.8.2/migrate.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/scripts/migration/0.8.2/migrate.py b/scripts/migration/0.8.2/migrate.py index 159905ef..adc0f7d9 100755 --- a/scripts/migration/0.8.2/migrate.py +++ b/scripts/migration/0.8.2/migrate.py @@ -22,15 +22,20 @@ See the README.md file for more information. import datetime import logging +import netrc import os from argparse import ArgumentParser +from leap.soledad.server import load_configuration + from migrate_couch_schema import migrate TARGET_VERSION = '0.8.2' DEFAULT_COUCH_URL = 'http://127.0.0.1:5984' +CONF = load_configuration('/etc/soledad/soledad-server.conf') +NETRC_PATH = CONF['soledad-server']['admin_netrc'] # @@ -54,12 +59,24 @@ def _configure_logger(log_file): level=logging.DEBUG) +def _default_couch_url(): + if not os.path.exists(NETRC_PATH): + return DEFAULT_COUCH_URL + parsed_netrc = netrc.netrc(NETRC_PATH) + host, (login, _, password) = parsed_netrc.hosts.items()[0] + url = ('http://%(login)s:%(password)s@%(host)s:5984' % { + 'login': login, + 'password': password, + 'host': host}) + return url + + def _parse_args(): parser = ArgumentParser() parser.add_argument( '--couch_url', help='the url for the couch database', - default=DEFAULT_COUCH_URL) + default=_default_couch_url()) parser.add_argument( '--do-migrate', help='actually perform the migration (otherwise ' -- cgit v1.2.3 From 18e4ffe5562be61efe9cd206494b9853e063a897 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 5 Aug 2016 12:34:13 -0300 Subject: [bug] create gen document after saving the actual document in couch If we create the gen document before saving the actual document in couch, we may run into problems if more than one client is syncing and trying to save documents with the same id at the same time. By moving the gen document creation to after the actual document save in couch, we rely on couch/u1db resolution of conflicts before actually allocating a new generation, and the problem above doesn't occur. --- common/src/leap/soledad/common/couch/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py index 06c94c27..be30210c 100644 --- a/common/src/leap/soledad/common/couch/__init__.py +++ b/common/src/leap/soledad/common/couch/__init__.py @@ -751,8 +751,6 @@ class CouchDatabase(object): } parts.append(conflicts) - self._allocate_new_generation(doc.doc_id, transaction_id) - # build the couch document couch_doc = { '_id': doc.doc_id, @@ -802,6 +800,8 @@ class CouchDatabase(object): last_gen, last_trans_id = self.batch_generation self.batch_generation = (last_gen + 1, transaction_id) + self._allocate_new_generation(doc.doc_id, transaction_id) + def _new_resource(self, *path): """ Return a new resource for accessing a couch database. -- cgit v1.2.3 From c8709722223f6c813ad49df09185c316746f9edb Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 8 Aug 2016 13:50:38 -0300 Subject: [pkg] remove version pinning for couchdb and beaker We will not maintain support for older versions of debian as that introduces some unneeded complexity for now. Also, the version pinned for couchdb python library has a bug that makes some requests slow. Because of those, we remove the pinning for now. --- server/pkg/requirements.pip | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/server/pkg/requirements.pip b/server/pkg/requirements.pip index 2d845f24..e92dfde6 100644 --- a/server/pkg/requirements.pip +++ b/server/pkg/requirements.pip @@ -1,6 +1,5 @@ configparser PyOpenSSL twisted>=12.3.0 -#pinned for wheezy compatibility -Beaker==1.6.3 #wheezy -couchdb==0.8 #wheezy +Beaker +couchdb -- cgit v1.2.3 From 7d2e485d37c2928b7ed773df7f6a5aa98e3396c0 Mon Sep 17 00:00:00 2001 From: drebs Date: Tue, 9 Aug 2016 15:25:50 -0300 Subject: [feat] log to syslog (#8286) --- server/pkg/soledad-server | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/pkg/soledad-server b/server/pkg/soledad-server index 74ed122e..9dada6a0 100644 --- a/server/pkg/soledad-server +++ b/server/pkg/soledad-server @@ -12,7 +12,6 @@ PATH=/sbin:/bin:/usr/sbin:/usr/bin PIDFILE=/var/run/soledad.pid OBJ=leap.soledad.server.application -LOGFILE=/var/log/soledad.log HTTPS_PORT=2424 CONFDIR=/etc/soledad CERT_PATH="${CONFDIR}/soledad-server.pem" @@ -37,7 +36,8 @@ case "${1}" in --exec ${TWISTD_PATH} -- \ --uid=${USER} --gid=${GROUP} \ --pidfile=${PIDFILE} \ - --logfile=${LOGFILE} \ + --syslog \ + --prefix=soledad-server \ web \ --wsgi=${OBJ} \ --port=ssl:${HTTPS_PORT}:privateKey=${PRIVKEY_PATH}:certKey=${CERT_PATH}:sslmethod=${SSL_METHOD} -- cgit v1.2.3 From 6fd0062c4c2199e610d7832bbfbd57a07abab9e1 Mon Sep 17 00:00:00 2001 From: drebs Date: Tue, 9 Aug 2016 15:38:39 -0300 Subject: [test] use "leapcode" in docker image name "leapcode" is the LEAP docker hub organisation varac could squat (https://hub.docker.com/r/leap/ was already taken). --- .gitlab-ci.yml | 2 +- scripts/docker/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2835e5cf..820dbd2a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,5 +1,5 @@ tests: - image: leap/soledad:1.0 + image: leapcode/soledad:1.0 services: - couchdb script: diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 1bb57757..0fdc93fa 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -16,7 +16,7 @@ # Some configurations you might override when calling this makefile # ##################################################################### -IMAGE_NAME ?= leap/soledad:1.0 +IMAGE_NAME ?= leapcode/soledad:1.0 SOLEDAD_REMOTE ?= https://0xacab.org/leap/soledad.git SOLEDAD_BRANCH ?= develop SOLEDAD_PRELOAD_NUM ?= 100 -- cgit v1.2.3 From 29f528927322bb8e3ca326399367c33225b492b5 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 15 Aug 2016 20:39:06 -0300 Subject: [test] allow for shell autocomplete for test names tox was configured to change to the testing/tests directory before executing pytest, by using tox's "changedir" configuration option. The reason why this was the case is that we wanted to discover tests inside the testing/tests directory only. The problem with that approach is that if we wanted to point to a specific test file, for example "tests/perf/test_sync.py", we would have to omit the "tests" part and write "tox perf/test_sync.py" because the argument would be understood as relative to the changed dir. That is not practical as doesn't allow to use the shell autocomplete, and is also not the only way to achieve what we want. Actually, pytest has a configuration option called "testpaths" where you can indicate where it should discover tests. This commit changes one approach by the other and allows to user shell autocomplete for easyness of testing during development. --- testing/pytest.ini | 3 +++ testing/tests/pytest.ini | 2 -- testing/tox.ini | 1 - 3 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 testing/pytest.ini delete mode 100644 testing/tests/pytest.ini diff --git a/testing/pytest.ini b/testing/pytest.ini new file mode 100644 index 00000000..2d34c607 --- /dev/null +++ b/testing/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +testpaths = tests +norecursedirs = tests/perf diff --git a/testing/tests/pytest.ini b/testing/tests/pytest.ini deleted file mode 100644 index 3d785ca7..00000000 --- a/testing/tests/pytest.ini +++ /dev/null @@ -1,2 +0,0 @@ -[pytest] -norecursedirs = perf diff --git a/testing/tox.ini b/testing/tox.ini index a25bc1e6..a7d62189 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -3,7 +3,6 @@ envlist = py27 [testenv] commands = py.test {posargs} -changedir = tests deps = pytest pytest-twisted -- cgit v1.2.3 From dc0bae8b6025a060297b55520674cd7238f0186b Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 17 Aug 2016 23:00:34 -0300 Subject: [bug] remove misleading ensure_ddoc ensure_ddoc doesnt make sense anymore as we dont have any ddoc other than _security, which has its own method for setting. 'ensure_security' is explicit and is set internally when user is creating a database, otherwise it will be False as it's only used during creation. This isn't exposed externally (of couch module) to avoid confusion. This confusion was making create-user-db fail to create a security ddoc as it wasn't passing ensure_ddocs=True. -- Resolves: #8388 --- common/src/leap/soledad/common/couch/__init__.py | 15 ++++++--------- common/src/leap/soledad/common/couch/state.py | 2 +- scripts/profiling/mail/couchdb_server.py | 5 ++--- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py index be30210c..0f4102db 100644 --- a/common/src/leap/soledad/common/couch/__init__.py +++ b/common/src/leap/soledad/common/couch/__init__.py @@ -116,7 +116,7 @@ class CouchDatabase(object): """ @classmethod - def open_database(cls, url, create, ensure_ddocs=False, replica_uid=None, + def open_database(cls, url, create, replica_uid=None, database_security=None): """ Open a U1DB database using CouchDB as backend. @@ -127,8 +127,6 @@ class CouchDatabase(object): :type create: bool :param replica_uid: an optional unique replica identifier :type replica_uid: str - :param ensure_ddocs: Ensure that the design docs exist on server. - :type ensure_ddocs: bool :param database_security: security rules as CouchDB security doc :type database_security: dict @@ -149,21 +147,20 @@ class CouchDatabase(object): server.create(dbname) else: raise DatabaseDoesNotExist() - db = cls(url, - dbname, ensure_ddocs=ensure_ddocs, + db = cls(url, dbname, ensure_security=create, database_security=database_security) return SoledadBackend( db, replica_uid=replica_uid) - def __init__(self, url, dbname, ensure_ddocs=True, + def __init__(self, url, dbname, ensure_security=False, database_security=None): """ :param url: Couch server URL with necessary credentials :type url: string :param dbname: Couch database name :type dbname: string - :param ensure_ddocs: Ensure that the design docs exist on server. - :type ensure_ddocs: bool + :param ensure_security: will PUT a _security ddoc if set + :type ensure_security: bool :param database_security: security rules as CouchDB security doc :type database_security: dict """ @@ -174,7 +171,7 @@ class CouchDatabase(object): self.batching = False self.batch_generation = None self.batch_docs = {} - if ensure_ddocs: + if ensure_security: self.ensure_security_ddoc(database_security) def batch_start(self): diff --git a/common/src/leap/soledad/common/couch/state.py b/common/src/leap/soledad/common/couch/state.py index 9b40a264..9ff9fe55 100644 --- a/common/src/leap/soledad/common/couch/state.py +++ b/common/src/leap/soledad/common/couch/state.py @@ -80,7 +80,7 @@ class CouchServerState(ServerState): :rtype: SoledadBackend """ url = urljoin(self.couch_url, dbname) - db = CouchDatabase.open_database(url, create=False, ensure_ddocs=False) + db = CouchDatabase.open_database(url, create=False) return db def ensure_database(self, dbname): diff --git a/scripts/profiling/mail/couchdb_server.py b/scripts/profiling/mail/couchdb_server.py index 2cf0a3fd..452f8ec2 100644 --- a/scripts/profiling/mail/couchdb_server.py +++ b/scripts/profiling/mail/couchdb_server.py @@ -18,8 +18,7 @@ def start_couchdb_wrapper(): def get_u1db_database(dbname, port): return CouchDatabase.open_database( 'http://127.0.0.1:%d/%s' % (port, dbname), - True, - ensure_ddocs=True) + True) def create_tokens_database(port, uuid, token_value): @@ -38,5 +37,5 @@ def get_couchdb_wrapper_and_u1db(uuid, token_value): couchdb_u1db = get_u1db_database('user-%s' % uuid, couchdb_wrapper.port) get_u1db_database('shared', couchdb_wrapper.port) create_tokens_database(couchdb_wrapper.port, uuid, token_value) - + return couchdb_wrapper, couchdb_u1db -- cgit v1.2.3 From e8747096045933754d3f8ac2608cce844f6b0fee Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 17 Aug 2016 23:07:04 -0300 Subject: [tests] Adapt tests for ensure_ddocs death --- testing/tests/couch/common.py | 3 +-- testing/tests/couch/test_ddocs.py | 8 +++----- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/testing/tests/couch/common.py b/testing/tests/couch/common.py index 48d30168..84790059 100644 --- a/testing/tests/couch/common.py +++ b/testing/tests/couch/common.py @@ -17,8 +17,7 @@ def make_couch_database_for_test(test, replica_uid): db = couch.CouchDatabase.open_database( urljoin(test.couch_url, dbname), create=True, - replica_uid=replica_uid or 'test', - ensure_ddocs=True) + replica_uid=replica_uid or 'test') test.addCleanup(test.delete_db, dbname) return db diff --git a/testing/tests/couch/test_ddocs.py b/testing/tests/couch/test_ddocs.py index 2060e27d..3937f2de 100644 --- a/testing/tests/couch/test_ddocs.py +++ b/testing/tests/couch/test_ddocs.py @@ -9,16 +9,16 @@ class CouchDesignDocsTests(CouchDBTestCase): def setUp(self): CouchDBTestCase.setUp(self) + self.create_db() - def create_db(self, ensure=True, dbname=None): + def create_db(self, dbname=None): if not dbname: dbname = ('test-%s' % uuid4().hex) if dbname not in self.couch_server: self.couch_server.create(dbname) self.db = couch.CouchDatabase( (self.couch_url), - dbname, - ensure_ddocs=ensure) + dbname) def tearDown(self): self.db.delete_database() @@ -30,7 +30,6 @@ class CouchDesignDocsTests(CouchDBTestCase): Ensure_security creates a _security ddoc to ensure that only soledad will have the lowest privileged access to an user db. """ - self.create_db(ensure=False) self.assertFalse(self.db._database.resource.get_json('_security')[2]) self.db.ensure_security_ddoc() security_ddoc = self.db._database.resource.get_json('_security')[2] @@ -43,7 +42,6 @@ class CouchDesignDocsTests(CouchDBTestCase): """ Given a configuration, follow it to create the security document """ - self.create_db(ensure=False) configuration = {'members': ['user1', 'user2'], 'members_roles': ['role1', 'role2'], 'admins': ['admin'], -- cgit v1.2.3 From f0f3e0358a01708eb048d8eaf463361e682be466 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Tue, 9 Aug 2016 21:08:13 -0300 Subject: [test] Adds pytest-benchmark adapted to Twisted Adapted pytest-benchmark to Twisted as it's synchronous and added fixtures for benchmarking. --- testing/tests/perf/conftest.py | 99 +++++++++++++++++++++----------- testing/tests/perf/test_sync.py | 121 +++++++++++++++++++++++++++++----------- testing/tox.ini | 1 + 3 files changed, 155 insertions(+), 66 deletions(-) diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py index 5ec047e4..8a75d0ae 100644 --- a/testing/tests/perf/conftest.py +++ b/testing/tests/perf/conftest.py @@ -6,8 +6,10 @@ import signal import time from hashlib import sha512 +from uuid import uuid4 from subprocess import call from urlparse import urljoin +from twisted.internet import threads, reactor from leap.soledad.client import Soledad from leap.soledad.common.couch import CouchDatabase @@ -59,17 +61,17 @@ class SoledadDatabases(object): self._token_db_url = urljoin(url, _token_dbname()) self._shared_db_url = urljoin(url, 'shared') - def setup(self): + def setup(self, uuid): self._create_dbs() - self._add_token() + self._add_token(uuid) def _create_dbs(self): requests.put(self._token_db_url) requests.put(self._shared_db_url) - def _add_token(self): + def _add_token(self, uuid): token = sha512(DEFAULT_TOKEN).hexdigest() - content = {'type': 'Token', 'user_id': DEFAULT_UUID} + content = {'type': 'Token', 'user_id': uuid} requests.put( self._token_db_url + '/' + token, data=json.dumps(content)) @@ -81,37 +83,41 @@ class SoledadDatabases(object): @pytest.fixture(scope='module') def soledad_dbs(request): couch_url = request.config.option.couch_url - db = SoledadDatabases(couch_url) - db.setup() - request.addfinalizer(db.teardown) - return db + + def create(uuid=DEFAULT_UUID): + db = SoledadDatabases(couch_url) + request.addfinalizer(db.teardown) + return db.setup(uuid) + return create # -# user_db fixture: provides an empty database for a given user in a per +# remote_db fixture: provides an empty database for a given user in a per # function scope. # class UserDatabase(object): - def __init__(self, url): - self._user_db_url = urljoin(url, 'user-%s' % DEFAULT_UUID) + def __init__(self, url, uuid): + self._remote_db_url = urljoin(url, 'user-%s' % uuid) def setup(self): - CouchDatabase.open_database( - url=self._user_db_url, create=True, replica_uid=None) + return CouchDatabase.open_database( + url=self._remote_db_url, create=True, replica_uid=None) def teardown(self): - requests.delete(self._user_db_url) + requests.delete(self._remote_db_url) @pytest.fixture(scope='function') -def user_db(request): +def remote_db(request): couch_url = request.config.option.couch_url - db = UserDatabase(couch_url) - db.setup() - request.addfinalizer(db.teardown) - return db + + def create(uuid=DEFAULT_UUID): + db = UserDatabase(couch_url, uuid) + request.addfinalizer(db.teardown) + return db.setup() + return create def get_pid(pidfile): @@ -172,26 +178,55 @@ def soledad_server(tmpdir_factory, request): return server +@pytest.fixture(scope='function') +def txbenchmark(benchmark): + def blockOnThread(*args, **kwargs): + return threads.deferToThread( + benchmark, threads.blockingCallFromThread, + reactor, *args, **kwargs) + return blockOnThread + + +@pytest.fixture(scope='function') +def txbenchmark_with_setup(benchmark): + def blockOnThreadWithSetup(setup, f): + def blocking_runner(*args, **kwargs): + return threads.blockingCallFromThread(reactor, f, *args, **kwargs) + + def blocking_setup(): + return threads.blockingCallFromThread(reactor, setup) + + def bench(): + return benchmark.pedantic(blocking_runner, setup=blocking_setup, + rounds=4, warmup_rounds=1) + return threads.deferToThread(bench) + return blockOnThreadWithSetup + + # # soledad_client fixture: provides a clean soledad client for a test function. # @pytest.fixture() -def soledad_client(tmpdir, soledad_server, user_db, soledad_dbs): - uuid = DEFAULT_UUID +def soledad_client(tmpdir, soledad_server, remote_db, soledad_dbs): passphrase = DEFAULT_PASSPHRASE - secrets_path = os.path.join(tmpdir.strpath, '%s.secret' % uuid) - local_db_path = os.path.join(tmpdir.strpath, '%s.db' % uuid) server_url = DEFAULT_URL token = DEFAULT_TOKEN # get a soledad instance - return Soledad( - uuid, - unicode(passphrase), - secrets_path=secrets_path, - local_db_path=local_db_path, - server_url=server_url, - cert_file=None, - auth_token=token, - defer_encryption=True) + def create(new=False): + uuid = uuid4().hex if new else DEFAULT_UUID + secrets_path = os.path.join(tmpdir.strpath, '%s.secret' % uuid4().hex) + local_db_path = os.path.join(tmpdir.strpath, '%s.db' % uuid4().hex) + remote_db(uuid) + soledad_dbs(uuid) + return Soledad( + uuid, + unicode(passphrase), + secrets_path=secrets_path, + local_db_path=local_db_path, + server_url=server_url, + cert_file=None, + auth_token=token, + defer_encryption=True) + return create diff --git a/testing/tests/perf/test_sync.py b/testing/tests/perf/test_sync.py index 45af9a91..ea109d05 100644 --- a/testing/tests/perf/test_sync.py +++ b/testing/tests/perf/test_sync.py @@ -2,47 +2,100 @@ import pytest from twisted.internet.defer import gatherResults -from leap.soledad.common.couch import CouchDatabase -from leap.soledad.common.document import ServerDocument + +def load_up(client, amount, size): + content = 'x'*size + deferreds = [] + # create a bunch of local documents + for i in xrange(amount): + d = client.create_doc({'content': content}) + deferreds.append(d) + d = gatherResults(deferreds) + d.addCallback(lambda _: None) + return d -content = ' ' * 10000 +@pytest.inlineCallbacks +@pytest.mark.benchmark(group="test_upload") +def test_upload_20_500k(soledad_client, txbenchmark_with_setup): + uploads, size, client = 20, 500*1000, soledad_client() + + def setup(): + return load_up(client, uploads, size) + + yield txbenchmark_with_setup(setup, client.sync) @pytest.inlineCallbacks -def test_upload(soledad_client, request): - # create a bunch of local documents - uploads = request.config.option.num_docs - deferreds = [] - for i in xrange(uploads): - d = soledad_client.create_doc({'upload': True, 'content': content}) - deferreds.append(d) - yield gatherResults(deferreds) +@pytest.mark.benchmark(group="test_upload") +def test_upload_100_100k(soledad_client, txbenchmark_with_setup): + uploads, size, client = 100, 100*1000, soledad_client() + + def setup(): + return load_up(client, uploads, size) + + yield txbenchmark_with_setup(setup, client.sync) + + +@pytest.inlineCallbacks +@pytest.mark.benchmark(group="test_upload") +def test_upload_1000_10k(soledad_client, txbenchmark_with_setup): + uploads, size, client = 1000, 10*1000, soledad_client() - # synchronize - yield soledad_client.sync() + def setup(): + return load_up(client, uploads, size) - # check that documents reached the remote database - url = request.config.getoption('--couch-url') - remote = CouchDatabase(url, 'user-0') - remote_count, _ = remote.get_all_docs() - assert remote_count == uploads + yield txbenchmark_with_setup(setup, client.sync) @pytest.inlineCallbacks -def test_download(soledad_client, request): - # create a bunch of remote documents - downloads = request.config.option.num_docs - url = request.config.getoption('--couch-url') - remote = CouchDatabase(url, 'user-0') - for i in xrange(downloads): - doc = ServerDocument('doc-%d' % i, 'replica:1') - doc.content = {'download': True, 'content': content} - remote.save_document(None, doc, i) - - # synchronize - yield soledad_client.sync() - - # check that documents reached the local database - local_count, docs = yield soledad_client.get_all_docs() - assert local_count == downloads +@pytest.mark.benchmark(group="test_download") +def test_download_20_500k(soledad_client, txbenchmark_with_setup): + downloads, size, client = 20, 500*1000, soledad_client() + + yield load_up(client, downloads, size) + yield client.sync() + + def setup(): + clean_client = soledad_client() + return (clean_client,), {} + + def sync(clean_client): + return clean_client.sync() + yield txbenchmark_with_setup(setup, sync) + + +@pytest.inlineCallbacks +@pytest.mark.benchmark(group="test_download") +def test_download_100_100k(soledad_client, txbenchmark_with_setup): + downloads, size, client = 100, 100*1000, soledad_client() + + yield load_up(client, downloads, size) + yield client.sync() + # We could create them directly on remote db, but sending them + # ensures we are dealing with properly encrypted docs + + def setup(): + clean_client = soledad_client() + return (clean_client,), {} + + def sync(clean_client): + return clean_client.sync() + yield txbenchmark_with_setup(setup, sync) + + +@pytest.inlineCallbacks +@pytest.mark.benchmark(group="test_download") +def test_download_1000_10k(soledad_client, txbenchmark_with_setup): + downloads, size, client = 1000, 10*1000, soledad_client() + + yield load_up(client, downloads, size) + yield client.sync() + + def setup(): + clean_client = soledad_client() + return (clean_client,), {} + + def sync(clean_client): + return clean_client.sync() + yield txbenchmark_with_setup(setup, sync) diff --git a/testing/tox.ini b/testing/tox.ini index a7d62189..c1d7ddb7 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -6,6 +6,7 @@ commands = py.test {posargs} deps = pytest pytest-twisted + pytest-benchmark mock testscenarios setuptools-trial -- cgit v1.2.3 From 6639cf0d00fa5bdfc0f43d4dea5c5055776130b8 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Tue, 9 Aug 2016 23:54:33 -0300 Subject: [test] use a nested func to simplify scenarios If we have many scenarios (like 20/500k, 100/100k, 1000,10k) then making a nested function to generate tests based on scenario parameters simplifies the code a lot. --- testing/tests/perf/conftest.py | 6 ++- testing/tests/perf/test_sync.py | 104 ++++++++++++---------------------------- 2 files changed, 35 insertions(+), 75 deletions(-) diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py index 8a75d0ae..4b2186db 100644 --- a/testing/tests/perf/conftest.py +++ b/testing/tests/perf/conftest.py @@ -208,7 +208,7 @@ def txbenchmark_with_setup(benchmark): # @pytest.fixture() -def soledad_client(tmpdir, soledad_server, remote_db, soledad_dbs): +def soledad_client(tmpdir, soledad_server, remote_db, soledad_dbs, request): passphrase = DEFAULT_PASSPHRASE server_url = DEFAULT_URL token = DEFAULT_TOKEN @@ -220,7 +220,7 @@ def soledad_client(tmpdir, soledad_server, remote_db, soledad_dbs): local_db_path = os.path.join(tmpdir.strpath, '%s.db' % uuid4().hex) remote_db(uuid) soledad_dbs(uuid) - return Soledad( + soledad_client = Soledad( uuid, unicode(passphrase), secrets_path=secrets_path, @@ -229,4 +229,6 @@ def soledad_client(tmpdir, soledad_server, remote_db, soledad_dbs): cert_file=None, auth_token=token, defer_encryption=True) + request.addfinalizer(soledad_client.close) + return soledad_client return create diff --git a/testing/tests/perf/test_sync.py b/testing/tests/perf/test_sync.py index ea109d05..146f1394 100644 --- a/testing/tests/perf/test_sync.py +++ b/testing/tests/perf/test_sync.py @@ -15,87 +15,45 @@ def load_up(client, amount, size): return d -@pytest.inlineCallbacks -@pytest.mark.benchmark(group="test_upload") -def test_upload_20_500k(soledad_client, txbenchmark_with_setup): - uploads, size, client = 20, 500*1000, soledad_client() +def create_upload(uploads, size): + @pytest.inlineCallbacks + @pytest.mark.benchmark(group="test_upload") + def test(soledad_client, txbenchmark_with_setup): + client = soledad_client() - def setup(): - return load_up(client, uploads, size) + def setup(): + return load_up(client, uploads, size) - yield txbenchmark_with_setup(setup, client.sync) + yield txbenchmark_with_setup(setup, client.sync) + return test -@pytest.inlineCallbacks -@pytest.mark.benchmark(group="test_upload") -def test_upload_100_100k(soledad_client, txbenchmark_with_setup): - uploads, size, client = 100, 100*1000, soledad_client() +test_upload_20_500k = create_upload(20, 500*1000) +test_upload_100_100k = create_upload(100, 100*1000) +test_upload_1000_10k = create_upload(1000, 10*1000) - def setup(): - return load_up(client, uploads, size) - yield txbenchmark_with_setup(setup, client.sync) +def create_download(downloads, size): + @pytest.inlineCallbacks + @pytest.mark.benchmark(group="test_download") + def test(soledad_client, txbenchmark_with_setup): + client = soledad_client() + yield load_up(client, downloads, size) + yield client.sync() + # We could create them directly on couch, but sending them + # ensures we are dealing with properly encrypted docs -@pytest.inlineCallbacks -@pytest.mark.benchmark(group="test_upload") -def test_upload_1000_10k(soledad_client, txbenchmark_with_setup): - uploads, size, client = 1000, 10*1000, soledad_client() + def setup(): + clean_client = soledad_client() + return (clean_client,), {} - def setup(): - return load_up(client, uploads, size) + def sync(clean_client): + return clean_client.sync() + yield txbenchmark_with_setup(setup, sync) + return test - yield txbenchmark_with_setup(setup, client.sync) - -@pytest.inlineCallbacks -@pytest.mark.benchmark(group="test_download") -def test_download_20_500k(soledad_client, txbenchmark_with_setup): - downloads, size, client = 20, 500*1000, soledad_client() - - yield load_up(client, downloads, size) - yield client.sync() - - def setup(): - clean_client = soledad_client() - return (clean_client,), {} - - def sync(clean_client): - return clean_client.sync() - yield txbenchmark_with_setup(setup, sync) - - -@pytest.inlineCallbacks -@pytest.mark.benchmark(group="test_download") -def test_download_100_100k(soledad_client, txbenchmark_with_setup): - downloads, size, client = 100, 100*1000, soledad_client() - - yield load_up(client, downloads, size) - yield client.sync() - # We could create them directly on remote db, but sending them - # ensures we are dealing with properly encrypted docs - - def setup(): - clean_client = soledad_client() - return (clean_client,), {} - - def sync(clean_client): - return clean_client.sync() - yield txbenchmark_with_setup(setup, sync) - - -@pytest.inlineCallbacks -@pytest.mark.benchmark(group="test_download") -def test_download_1000_10k(soledad_client, txbenchmark_with_setup): - downloads, size, client = 1000, 10*1000, soledad_client() - - yield load_up(client, downloads, size) - yield client.sync() - - def setup(): - clean_client = soledad_client() - return (clean_client,), {} - - def sync(clean_client): - return clean_client.sync() - yield txbenchmark_with_setup(setup, sync) +test_download_20_500k = create_download(20, 500*1000) +test_download_100_100k = create_download(100, 100*1000) +test_download_1000_10k = create_download(1000, 10*1000) -- cgit v1.2.3 From 7c811131771af33370aa04b33dc70f6ed2cc637a Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Tue, 9 Aug 2016 23:57:18 -0300 Subject: [test] adds sqlcipher create tests Creating 20/500k, 100/100k and 1000/10k. --- testing/tests/perf/test_sqlcipher.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 testing/tests/perf/test_sqlcipher.py diff --git a/testing/tests/perf/test_sqlcipher.py b/testing/tests/perf/test_sqlcipher.py new file mode 100644 index 00000000..8e82d172 --- /dev/null +++ b/testing/tests/perf/test_sqlcipher.py @@ -0,0 +1,32 @@ +''' +Tests SoledadClient/SQLCipher interaction +''' +import pytest + +from twisted.internet.defer import gatherResults + + +def load_up(client, amount, size): + content = 'x'*size + deferreds = [] + # create a bunch of local documents + for i in xrange(amount): + d = client.create_doc({'content': content}) + deferreds.append(d) + d = gatherResults(deferreds) + d.addCallback(lambda _: None) + return d + + +def build_test_sqlcipher_create(amount, size): + @pytest.inlineCallbacks + @pytest.mark.benchmark(group="test_sqlcipher_create") + def test(soledad_client, txbenchmark): + client = soledad_client() + yield txbenchmark(load_up, client, amount, size) + return test + + +test_create_20_500k = build_test_sqlcipher_create(20, 500*1000) +test_create_100_100k = build_test_sqlcipher_create(100, 100*1000) +test_create_1000_10k = build_test_sqlcipher_create(1000, 10*1000) -- cgit v1.2.3 From 481fa255bf3cb53fd932bd984cd40d097ca7bb61 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 15 Aug 2016 15:42:23 -0300 Subject: [test] adds sqlcipher synchronous tests --- testing/tests/perf/test_sqlcipher.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/testing/tests/perf/test_sqlcipher.py b/testing/tests/perf/test_sqlcipher.py index 8e82d172..511080a1 100644 --- a/testing/tests/perf/test_sqlcipher.py +++ b/testing/tests/perf/test_sqlcipher.py @@ -6,27 +6,40 @@ import pytest from twisted.internet.defer import gatherResults -def load_up(client, amount, size): +def load_up(client, amount, size, defer=True): content = 'x'*size deferreds = [] # create a bunch of local documents for i in xrange(amount): d = client.create_doc({'content': content}) deferreds.append(d) - d = gatherResults(deferreds) - d.addCallback(lambda _: None) - return d + if defer: + d = gatherResults(deferreds) + d.addCallback(lambda _: None) + return d -def build_test_sqlcipher_create(amount, size): +def build_test_sqlcipher_async_create(amount, size): @pytest.inlineCallbacks - @pytest.mark.benchmark(group="test_sqlcipher_create") + @pytest.mark.benchmark(group="test_sqlcipher_async_create") def test(soledad_client, txbenchmark): client = soledad_client() yield txbenchmark(load_up, client, amount, size) return test +def build_test_sqlcipher_create(amount, size): + @pytest.mark.benchmark(group="test_sqlcipher_create") + def test(soledad_client, benchmark): + client = soledad_client()._dbsyncer + benchmark(load_up, client, amount, size, defer=False) + return test + + +test_async_create_20_500k = build_test_sqlcipher_async_create(20, 500*1000) +test_async_create_100_100k = build_test_sqlcipher_async_create(100, 100*1000) +test_async_create_1000_10k = build_test_sqlcipher_async_create(1000, 10*1000) +# synchronous test_create_20_500k = build_test_sqlcipher_create(20, 500*1000) test_create_100_100k = build_test_sqlcipher_create(100, 100*1000) test_create_1000_10k = build_test_sqlcipher_create(1000, 10*1000) -- cgit v1.2.3 From 6f5bc4d7301147c35a9651cf423804c6de252647 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 19 Aug 2016 17:04:27 -0300 Subject: [test] make all nested fixture function-scoped function is the default scope, so there is no need to pass this parameter. Previously, one of the scopes was 'module', but it is a nested function that fires on demand, so it should clean up itself from test to test in order to avoid conflict while putting. --- testing/tests/perf/conftest.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py index 4b2186db..0b66263a 100644 --- a/testing/tests/perf/conftest.py +++ b/testing/tests/perf/conftest.py @@ -80,7 +80,7 @@ class SoledadDatabases(object): requests.delete(self._shared_db_url) -@pytest.fixture(scope='module') +@pytest.fixture() def soledad_dbs(request): couch_url = request.config.option.couch_url @@ -109,7 +109,7 @@ class UserDatabase(object): requests.delete(self._remote_db_url) -@pytest.fixture(scope='function') +@pytest.fixture() def remote_db(request): couch_url = request.config.option.couch_url @@ -178,7 +178,7 @@ def soledad_server(tmpdir_factory, request): return server -@pytest.fixture(scope='function') +@pytest.fixture() def txbenchmark(benchmark): def blockOnThread(*args, **kwargs): return threads.deferToThread( @@ -187,7 +187,7 @@ def txbenchmark(benchmark): return blockOnThread -@pytest.fixture(scope='function') +@pytest.fixture() def txbenchmark_with_setup(benchmark): def blockOnThreadWithSetup(setup, f): def blocking_runner(*args, **kwargs): -- cgit v1.2.3 From 8e3d85950bb54b7818f0356387bba81769787f13 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 19 Aug 2016 17:06:25 -0300 Subject: [test] remove fixed default uuid Use a new one to avoid reusing the same database. --- testing/tests/perf/conftest.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py index 0b66263a..fca9c58d 100644 --- a/testing/tests/perf/conftest.py +++ b/testing/tests/perf/conftest.py @@ -35,7 +35,6 @@ def pytest_addoption(parser): # default options for all tests # -DEFAULT_UUID = '0' DEFAULT_PASSPHRASE = '123' DEFAULT_URL = 'http://127.0.0.1:2424' @@ -84,7 +83,7 @@ class SoledadDatabases(object): def soledad_dbs(request): couch_url = request.config.option.couch_url - def create(uuid=DEFAULT_UUID): + def create(uuid): db = SoledadDatabases(couch_url) request.addfinalizer(db.teardown) return db.setup(uuid) @@ -113,7 +112,7 @@ class UserDatabase(object): def remote_db(request): couch_url = request.config.option.couch_url - def create(uuid=DEFAULT_UUID): + def create(uuid): db = UserDatabase(couch_url, uuid) request.addfinalizer(db.teardown) return db.setup() @@ -212,14 +211,20 @@ def soledad_client(tmpdir, soledad_server, remote_db, soledad_dbs, request): passphrase = DEFAULT_PASSPHRASE server_url = DEFAULT_URL token = DEFAULT_TOKEN + default_uuid = uuid4().hex + remote_db(default_uuid) + soledad_dbs(default_uuid) # get a soledad instance def create(new=False): - uuid = uuid4().hex if new else DEFAULT_UUID secrets_path = os.path.join(tmpdir.strpath, '%s.secret' % uuid4().hex) local_db_path = os.path.join(tmpdir.strpath, '%s.db' % uuid4().hex) - remote_db(uuid) - soledad_dbs(uuid) + if new: + uuid = uuid4().hex + remote_db(uuid) + soledad_dbs(uuid) + else: + uuid = default_uuid soledad_client = Soledad( uuid, unicode(passphrase), -- cgit v1.2.3 From c8c82b6663d122b0933a8459c4710c914edf7f84 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 19 Aug 2016 17:07:04 -0300 Subject: [test] sync without changes Syncing without any changes was reported as slow. This benchmark will help measure it. --- testing/tests/perf/test_sync.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/testing/tests/perf/test_sync.py b/testing/tests/perf/test_sync.py index 146f1394..668ceae7 100644 --- a/testing/tests/perf/test_sync.py +++ b/testing/tests/perf/test_sync.py @@ -57,3 +57,15 @@ def create_download(downloads, size): test_download_20_500k = create_download(20, 500*1000) test_download_100_100k = create_download(100, 100*1000) test_download_1000_10k = create_download(1000, 10*1000) + + +@pytest.inlineCallbacks +@pytest.mark.benchmark(group="test_nothing_to_sync") +def test_nothing_to_sync(soledad_client, txbenchmark_with_setup): + def setup(): + clean_client = soledad_client() + return (clean_client,), {} + + def sync(clean_client): + return clean_client.sync() + yield txbenchmark_with_setup(setup, sync) -- cgit v1.2.3 From 1dc2c18f02e62644da00cb0e3326f357953f5c84 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 19 Aug 2016 17:08:20 -0300 Subject: [test] test soledad instatiation time It has a heavy scrypt hashing processing with room for improvement. --- testing/tests/perf/test_misc.py | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 testing/tests/perf/test_misc.py diff --git a/testing/tests/perf/test_misc.py b/testing/tests/perf/test_misc.py new file mode 100644 index 00000000..c2f8e3b1 --- /dev/null +++ b/testing/tests/perf/test_misc.py @@ -0,0 +1,6 @@ +import pytest + + +@pytest.mark.benchmark(group="test_instance") +def test_instance(soledad_client, benchmark): + benchmark(soledad_client) -- cgit v1.2.3 From b75165567539dcd59873395049ce2210776aa166 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sat, 20 Aug 2016 00:42:34 -0300 Subject: [test] adds encdecpool tests Most of them are commented as memory usage is going out of control for now. --- testing/tests/perf/test_encdecpool.py | 84 +++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 testing/tests/perf/test_encdecpool.py diff --git a/testing/tests/perf/test_encdecpool.py b/testing/tests/perf/test_encdecpool.py new file mode 100644 index 00000000..dbbbea89 --- /dev/null +++ b/testing/tests/perf/test_encdecpool.py @@ -0,0 +1,84 @@ +import pytest +import json +from uuid import uuid4 +from twisted.internet.defer import gatherResults +from leap.soledad.client.encdecpool import SyncEncrypterPool +from leap.soledad.client.encdecpool import SyncDecrypterPool +from leap.soledad.common.document import SoledadDocument + + +def create_encrypt(amount, size): + @pytest.mark.benchmark(group="test_pool_encrypt") + @pytest.inlineCallbacks + def test(soledad_client, txbenchmark_with_setup, request): + DOC_CONTENT = {'payload': 'x'*size} + + def setup(): + client = soledad_client() + pool = SyncEncrypterPool(client._crypto, client._sync_db) + pool.start() + request.addfinalizer(pool.stop) + return (pool,), {} + + @pytest.inlineCallbacks + def put_and_wait(pool): + doc_ids = [] + deferreds = [] + for _ in xrange(amount): + doc = SoledadDocument( + doc_id=uuid4().hex, rev='rev', + json=json.dumps(DOC_CONTENT)) + deferreds.append(pool.encrypt_doc(doc)) + doc_ids.append(doc.doc_id) + yield gatherResults(deferreds) + + yield txbenchmark_with_setup(setup, put_and_wait) + return test + +test_encrypt_1000_10k = create_encrypt(1000, 10*1000) +# test_encrypt_1000_500k = create_encrypt(1000, 500*1000) +# test_encrypt_1000_1M = create_encrypt(1000, 1000*1000) +# test_encrypt_1000_10M = create_encrypt(1000, 10*1000*1000) + + +def create_decrypt(amount, size): + @pytest.mark.benchmark(group="test_pool_decrypt") + @pytest.inlineCallbacks + def test(soledad_client, txbenchmark_with_setup, request): + DOC_CONTENT = {'payload': 'x'*size} + client = soledad_client() + + def setup(): + pool = SyncDecrypterPool( + client._crypto, + client._sync_db, + source_replica_uid=client._dbpool.replica_uid, + insert_doc_cb=lambda x, y, z: False) # ignored + pool.start(amount) + request.addfinalizer(pool.stop) + crypto = client._crypto + docs = [] + for _ in xrange(amount): + doc = SoledadDocument( + doc_id=uuid4().hex, rev='rev', + json=json.dumps(DOC_CONTENT)) + encrypted_content = json.loads(crypto.encrypt_doc(doc)) + docs.append((doc.doc_id, encrypted_content)) + return (pool, docs), {} + + def put_and_wait(pool, docs): + deferreds = [] # fires on completion + for idx, (doc_id, content) in enumerate(docs, 1): + deferreds.append(pool.insert_encrypted_received_doc( + doc_id, 'rev', content, idx, "trans_id", idx)) + return gatherResults(deferreds) + + yield txbenchmark_with_setup(setup, put_and_wait) + return test + +test_decrypt_1000_10k = create_decrypt(1000, 10*1000) +test_decrypt_1000_100k = create_decrypt(1000, 10*1000) +# memory issues ahead +# test_decrypt_1000_500k = create_decrypt(1000, 500*1000) +# test_decrypt_1000_1M = create_decrypt(1000, 1000*1000) +# test_decrypt_1000_10M = create_decrypt(1000, 10*1000*1000) -- cgit v1.2.3 From 062d781b734db60d0ae317eaf5b86c7d75abacd9 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sat, 20 Aug 2016 00:44:02 -0300 Subject: [bug] limit pool comnsumption to 900 docs This was discovered during load tests: Trying to process more than 999 docs triggers an error on SQLite due a select query not supporting 999 values to query. --- client/src/leap/soledad/client/encdecpool.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/client/src/leap/soledad/client/encdecpool.py b/client/src/leap/soledad/client/encdecpool.py index 2cf5da6e..7be6030e 100644 --- a/client/src/leap/soledad/client/encdecpool.py +++ b/client/src/leap/soledad/client/encdecpool.py @@ -556,6 +556,8 @@ class SyncDecrypterPool(SyncEncryptDecryptPool): while next_index in self._decrypted_docs_indexes: sequence.append(str(next_index)) next_index += 1 + if len(sequence) > 900: + break # SQLITE_LIMIT_VARIABLE_NUMBER # Then fetch all the ones ready for insertion. if sequence: insertable_docs = yield self._get_docs(encrypted=False, -- cgit v1.2.3 From 585d6b2461869594210639548549aa6be336e752 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sat, 20 Aug 2016 01:27:47 -0300 Subject: [test] adds test for SoledadCrypto 10k, 100k, 500k, 1m, 10m and 50m for encryption and decryption of a whole document. --- testing/tests/perf/test_crypto.py | 48 +++++++++++++++++++++++++++++++++++ testing/tests/perf/test_encdecpool.py | 18 ++++++------- 2 files changed, 57 insertions(+), 9 deletions(-) create mode 100644 testing/tests/perf/test_crypto.py diff --git a/testing/tests/perf/test_crypto.py b/testing/tests/perf/test_crypto.py new file mode 100644 index 00000000..84c89a0d --- /dev/null +++ b/testing/tests/perf/test_crypto.py @@ -0,0 +1,48 @@ +import pytest +import json +from uuid import uuid4 +from leap.soledad.common.document import SoledadDocument + + +def create_doc_encryption(size): + @pytest.mark.benchmark(group="test_crypto_encrypt_doc") + def test_doc_encryption(soledad_client, benchmark): + crypto = soledad_client()._crypto + + DOC_CONTENT = {'payload': 'x'*size} + doc = SoledadDocument( + doc_id=uuid4().hex, rev='rev', + json=json.dumps(DOC_CONTENT)) + + benchmark(crypto.encrypt_doc, doc) + return test_doc_encryption + + +def create_doc_decryption(size): + @pytest.mark.benchmark(group="test_crypto_decrypt_doc") + def test_doc_decryption(soledad_client, benchmark): + crypto = soledad_client()._crypto + + DOC_CONTENT = {'payload': 'x'*size} + doc = SoledadDocument( + doc_id=uuid4().hex, rev='rev', + json=json.dumps(DOC_CONTENT)) + encrypted_doc = crypto.encrypt_doc(doc) + doc.set_json(encrypted_doc) + + benchmark(crypto.decrypt_doc, doc) + return test_doc_decryption + + +test_encrypt_doc_10k = create_doc_encryption(10*1000) +test_encrypt_doc_100k = create_doc_encryption(100*1000) +test_encrypt_doc_500k = create_doc_encryption(500*1000) +test_encrypt_doc_1M = create_doc_encryption(1000*1000) +test_encrypt_doc_10M = create_doc_encryption(10*1000*1000) +test_encrypt_doc_50M = create_doc_encryption(50*1000*1000) +test_decrypt_doc_10k = create_doc_decryption(10*1000) +test_decrypt_doc_100k = create_doc_decryption(100*1000) +test_decrypt_doc_500k = create_doc_decryption(500*1000) +test_decrypt_doc_1M = create_doc_decryption(1000*1000) +test_decrypt_doc_10M = create_doc_decryption(10*1000*1000) +test_decrypt_doc_50M = create_doc_decryption(50*1000*1000) diff --git a/testing/tests/perf/test_encdecpool.py b/testing/tests/perf/test_encdecpool.py index dbbbea89..abc58e35 100644 --- a/testing/tests/perf/test_encdecpool.py +++ b/testing/tests/perf/test_encdecpool.py @@ -35,10 +35,10 @@ def create_encrypt(amount, size): yield txbenchmark_with_setup(setup, put_and_wait) return test -test_encrypt_1000_10k = create_encrypt(1000, 10*1000) -# test_encrypt_1000_500k = create_encrypt(1000, 500*1000) -# test_encrypt_1000_1M = create_encrypt(1000, 1000*1000) -# test_encrypt_1000_10M = create_encrypt(1000, 10*1000*1000) +test_encdecpool_encrypt_1000_10k = create_encrypt(1000, 10*1000) +# test_encdecpool_encrypt_1000_500k = create_encrypt(1000, 500*1000) +# test_encdecpool_encrypt_1000_1M = create_encrypt(1000, 1000*1000) +# test_encdecpool_encrypt_1000_10M = create_encrypt(1000, 10*1000*1000) def create_decrypt(amount, size): @@ -76,9 +76,9 @@ def create_decrypt(amount, size): yield txbenchmark_with_setup(setup, put_and_wait) return test -test_decrypt_1000_10k = create_decrypt(1000, 10*1000) -test_decrypt_1000_100k = create_decrypt(1000, 10*1000) +test_encdecpool_decrypt_1000_10k = create_decrypt(1000, 10*1000) +test_encdecpool_decrypt_1000_100k = create_decrypt(1000, 10*1000) # memory issues ahead -# test_decrypt_1000_500k = create_decrypt(1000, 500*1000) -# test_decrypt_1000_1M = create_decrypt(1000, 1000*1000) -# test_decrypt_1000_10M = create_decrypt(1000, 10*1000*1000) +# test_encdecpool_decrypt_1000_500k = create_decrypt(1000, 500*1000) +# test_encdecpool_decrypt_1000_1M = create_decrypt(1000, 1000*1000) +# test_encdecpool_decrypt_1000_10M = create_decrypt(1000, 10*1000*1000) -- cgit v1.2.3 From fa4bf209bef0ca4fd6145c8d518c3d99f770cb65 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sat, 20 Aug 2016 01:42:13 -0300 Subject: [test] adds tests for raw encryption Hypothesis: raw vs doc Added the same sizes set (10k, 100k, 500k, 1M, 10M, 50M) as the document crypto test, so we can compare how close to raw the higher level operation is. --- testing/tests/perf/test_crypto.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/testing/tests/perf/test_crypto.py b/testing/tests/perf/test_crypto.py index 84c89a0d..a32ef593 100644 --- a/testing/tests/perf/test_crypto.py +++ b/testing/tests/perf/test_crypto.py @@ -2,6 +2,8 @@ import pytest import json from uuid import uuid4 from leap.soledad.common.document import SoledadDocument +from leap.soledad.client.crypto import encrypt_sym +from leap.soledad.client.crypto import decrypt_sym def create_doc_encryption(size): @@ -46,3 +48,34 @@ test_decrypt_doc_500k = create_doc_decryption(500*1000) test_decrypt_doc_1M = create_doc_decryption(1000*1000) test_decrypt_doc_10M = create_doc_decryption(10*1000*1000) test_decrypt_doc_50M = create_doc_decryption(50*1000*1000) + +KEY = 'x'*32 + + +def create_raw_encryption(size): + @pytest.mark.benchmark(group="test_crypto_raw_encrypt") + def test_raw_encrypt(benchmark): + benchmark(encrypt_sym, 'x'*size, KEY) + return test_raw_encrypt + + +def create_raw_decryption(size): + @pytest.mark.benchmark(group="test_crypto_raw_decrypt") + def test_raw_decrypt(benchmark): + iv, ciphertext = encrypt_sym('x'*size, KEY) + benchmark(decrypt_sym, ciphertext, KEY, iv) + return test_raw_decrypt + + +test_encrypt_raw_10k = create_raw_encryption(10*1000) +test_encrypt_raw_100k = create_raw_encryption(100*1000) +test_encrypt_raw_500k = create_raw_encryption(500*1000) +test_encrypt_raw_1M = create_raw_encryption(1000*1000) +test_encrypt_raw_10M = create_raw_encryption(10*1000*1000) +test_encrypt_raw_50M = create_raw_encryption(50*1000*1000) +test_decrypt_raw_10k = create_raw_decryption(10*1000) +test_decrypt_raw_100k = create_raw_decryption(100*1000) +test_decrypt_raw_500k = create_raw_decryption(500*1000) +test_decrypt_raw_1M = create_raw_decryption(1000*1000) +test_decrypt_raw_10M = create_raw_decryption(10*1000*1000) +test_decrypt_raw_50M = create_raw_decryption(50*1000*1000) -- cgit v1.2.3 From cf91133809bab11ee43f20178944f91b1466bfd5 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Sat, 20 Aug 2016 02:09:50 -0300 Subject: [test] calibrate encdecpool bench for memory 1000 docs at 100k~500k are exploding memory (4Gb+4Gb swap). Changed for 100 docs in order to be able to get measures on higher loads. Now its 10k, 100k and 500k --- testing/tests/perf/test_encdecpool.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/testing/tests/perf/test_encdecpool.py b/testing/tests/perf/test_encdecpool.py index abc58e35..681b909a 100644 --- a/testing/tests/perf/test_encdecpool.py +++ b/testing/tests/perf/test_encdecpool.py @@ -35,10 +35,9 @@ def create_encrypt(amount, size): yield txbenchmark_with_setup(setup, put_and_wait) return test -test_encdecpool_encrypt_1000_10k = create_encrypt(1000, 10*1000) -# test_encdecpool_encrypt_1000_500k = create_encrypt(1000, 500*1000) -# test_encdecpool_encrypt_1000_1M = create_encrypt(1000, 1000*1000) -# test_encdecpool_encrypt_1000_10M = create_encrypt(1000, 10*1000*1000) +test_encdecpool_encrypt_100_10k = create_encrypt(100, 10*1000) +test_encdecpool_encrypt_100_100k = create_encrypt(100, 100*1000) +test_encdecpool_encrypt_100_500k = create_encrypt(100, 500*1000) def create_decrypt(amount, size): @@ -76,9 +75,6 @@ def create_decrypt(amount, size): yield txbenchmark_with_setup(setup, put_and_wait) return test -test_encdecpool_decrypt_1000_10k = create_decrypt(1000, 10*1000) -test_encdecpool_decrypt_1000_100k = create_decrypt(1000, 10*1000) -# memory issues ahead -# test_encdecpool_decrypt_1000_500k = create_decrypt(1000, 500*1000) -# test_encdecpool_decrypt_1000_1M = create_decrypt(1000, 1000*1000) -# test_encdecpool_decrypt_1000_10M = create_decrypt(1000, 10*1000*1000) +test_encdecpool_decrypt_100_10k = create_decrypt(100, 10*1000) +test_encdecpool_decrypt_100_100k = create_decrypt(100, 100*1000) +test_encdecpool_decrypt_100_500k = create_decrypt(100, 500*1000) -- cgit v1.2.3 From 4f5ecb4c719a3a842d852fbaab549d2881d6528f Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 22 Aug 2016 18:00:52 -0300 Subject: [test] make txbench ignore kwargs for readability They arent used so far and using empty dicts to make them work is ugly. Removing it leaves the return function on setup code clean and readable. --- testing/tests/perf/conftest.py | 6 +++++- testing/tests/perf/test_encdecpool.py | 4 ++-- testing/tests/perf/test_sync.py | 6 ++---- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py index fca9c58d..68e0fb38 100644 --- a/testing/tests/perf/conftest.py +++ b/testing/tests/perf/conftest.py @@ -193,7 +193,11 @@ def txbenchmark_with_setup(benchmark): return threads.blockingCallFromThread(reactor, f, *args, **kwargs) def blocking_setup(): - return threads.blockingCallFromThread(reactor, setup) + args = threads.blockingCallFromThread(reactor, setup) + try: + return tuple(arg for arg in args), {} + except TypeError: + return ((args,), {}) if args else None def bench(): return benchmark.pedantic(blocking_runner, setup=blocking_setup, diff --git a/testing/tests/perf/test_encdecpool.py b/testing/tests/perf/test_encdecpool.py index 681b909a..4eb990a8 100644 --- a/testing/tests/perf/test_encdecpool.py +++ b/testing/tests/perf/test_encdecpool.py @@ -18,7 +18,7 @@ def create_encrypt(amount, size): pool = SyncEncrypterPool(client._crypto, client._sync_db) pool.start() request.addfinalizer(pool.stop) - return (pool,), {} + return pool @pytest.inlineCallbacks def put_and_wait(pool): @@ -63,7 +63,7 @@ def create_decrypt(amount, size): json=json.dumps(DOC_CONTENT)) encrypted_content = json.loads(crypto.encrypt_doc(doc)) docs.append((doc.doc_id, encrypted_content)) - return (pool, docs), {} + return pool, docs def put_and_wait(pool, docs): deferreds = [] # fires on completion diff --git a/testing/tests/perf/test_sync.py b/testing/tests/perf/test_sync.py index 668ceae7..0be9d12f 100644 --- a/testing/tests/perf/test_sync.py +++ b/testing/tests/perf/test_sync.py @@ -45,8 +45,7 @@ def create_download(downloads, size): # ensures we are dealing with properly encrypted docs def setup(): - clean_client = soledad_client() - return (clean_client,), {} + return soledad_client() def sync(clean_client): return clean_client.sync() @@ -63,8 +62,7 @@ test_download_1000_10k = create_download(1000, 10*1000) @pytest.mark.benchmark(group="test_nothing_to_sync") def test_nothing_to_sync(soledad_client, txbenchmark_with_setup): def setup(): - clean_client = soledad_client() - return (clean_client,), {} + return soledad_client() def sync(clean_client): return clean_client.sync() -- cgit v1.2.3 From 71b13eda4ec5a577df1ccf4b07e26cbd87eaaa3e Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 22 Aug 2016 18:52:51 -0300 Subject: [tests] remove unused new=False logic --- testing/tests/perf/conftest.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py index 68e0fb38..9abd0c54 100644 --- a/testing/tests/perf/conftest.py +++ b/testing/tests/perf/conftest.py @@ -220,17 +220,11 @@ def soledad_client(tmpdir, soledad_server, remote_db, soledad_dbs, request): soledad_dbs(default_uuid) # get a soledad instance - def create(new=False): + def create(): secrets_path = os.path.join(tmpdir.strpath, '%s.secret' % uuid4().hex) local_db_path = os.path.join(tmpdir.strpath, '%s.db' % uuid4().hex) - if new: - uuid = uuid4().hex - remote_db(uuid) - soledad_dbs(uuid) - else: - uuid = default_uuid soledad_client = Soledad( - uuid, + default_uuid, unicode(passphrase), secrets_path=secrets_path, local_db_path=local_db_path, -- cgit v1.2.3 From bad25ba9a5e7e5296b79544f50cafc47599a76b9 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 22 Aug 2016 18:53:49 -0300 Subject: [tests] move doc creation to setup Otherwise it will add unrelated overhead to results. --- testing/tests/perf/test_encdecpool.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/testing/tests/perf/test_encdecpool.py b/testing/tests/perf/test_encdecpool.py index 4eb990a8..919fdaa7 100644 --- a/testing/tests/perf/test_encdecpool.py +++ b/testing/tests/perf/test_encdecpool.py @@ -18,19 +18,16 @@ def create_encrypt(amount, size): pool = SyncEncrypterPool(client._crypto, client._sync_db) pool.start() request.addfinalizer(pool.stop) - return pool + docs = [ + SoledadDocument(doc_id=uuid4().hex, rev='rev', + json=json.dumps(DOC_CONTENT)) + for _ in xrange(amount) + ] + return pool, docs @pytest.inlineCallbacks - def put_and_wait(pool): - doc_ids = [] - deferreds = [] - for _ in xrange(amount): - doc = SoledadDocument( - doc_id=uuid4().hex, rev='rev', - json=json.dumps(DOC_CONTENT)) - deferreds.append(pool.encrypt_doc(doc)) - doc_ids.append(doc.doc_id) - yield gatherResults(deferreds) + def put_and_wait(pool, docs): + yield gatherResults([pool.encrypt_doc(doc) for doc in docs]) yield txbenchmark_with_setup(setup, put_and_wait) return test -- cgit v1.2.3 From a64095ad5e6c6a36221444a34d8d56c0ae1c6150 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 22 Aug 2016 19:22:15 -0300 Subject: [test] refactor load_up for readability defer parameter wasnt clear --- testing/tests/perf/test_sqlcipher.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/testing/tests/perf/test_sqlcipher.py b/testing/tests/perf/test_sqlcipher.py index 511080a1..1fce1c3e 100644 --- a/testing/tests/perf/test_sqlcipher.py +++ b/testing/tests/perf/test_sqlcipher.py @@ -8,15 +8,9 @@ from twisted.internet.defer import gatherResults def load_up(client, amount, size, defer=True): content = 'x'*size - deferreds = [] - # create a bunch of local documents - for i in xrange(amount): - d = client.create_doc({'content': content}) - deferreds.append(d) + results = [client.create_doc({'content': content}) for _ in xrange(amount)] if defer: - d = gatherResults(deferreds) - d.addCallback(lambda _: None) - return d + return gatherResults(results) def build_test_sqlcipher_async_create(amount, size): -- cgit v1.2.3 From b847ffe282fe0e08783447359cbbf745a2c8f376 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Tue, 23 Aug 2016 15:40:08 -0300 Subject: [test] point issue #7370 as reason for low values We are using lower values on test_encdecpool due high memory usage, described in #7370. Added a comment to explain it. --- testing/tests/perf/test_encdecpool.py | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/tests/perf/test_encdecpool.py b/testing/tests/perf/test_encdecpool.py index 919fdaa7..3c501084 100644 --- a/testing/tests/perf/test_encdecpool.py +++ b/testing/tests/perf/test_encdecpool.py @@ -5,6 +5,7 @@ from twisted.internet.defer import gatherResults from leap.soledad.client.encdecpool import SyncEncrypterPool from leap.soledad.client.encdecpool import SyncDecrypterPool from leap.soledad.common.document import SoledadDocument +# FIXME: test load is low due issue #7370, higher values will get out of memory def create_encrypt(amount, size): -- cgit v1.2.3 From eaf8907696a7b5ae0b1411f0770f59c0ed5f9fc4 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Tue, 23 Aug 2016 15:48:55 -0300 Subject: [doc] improve SQLITE_MAX_VARIABLE_NUMBER comments --- client/src/leap/soledad/client/encdecpool.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/client/src/leap/soledad/client/encdecpool.py b/client/src/leap/soledad/client/encdecpool.py index 7be6030e..74a40931 100644 --- a/client/src/leap/soledad/client/encdecpool.py +++ b/client/src/leap/soledad/client/encdecpool.py @@ -557,7 +557,11 @@ class SyncDecrypterPool(SyncEncryptDecryptPool): sequence.append(str(next_index)) next_index += 1 if len(sequence) > 900: - break # SQLITE_LIMIT_VARIABLE_NUMBER + # 999 is the default value of SQLITE_MAX_VARIABLE_NUMBER + # if we try to query more, SQLite will refuse + # we need to find a way to improve this + # being researched in #7669 + break # Then fetch all the ones ready for insertion. if sequence: insertable_docs = yield self._get_docs(encrypted=False, -- cgit v1.2.3 From bebbaad4988c4f4ec26d37791f7738ea27719fca Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Tue, 23 Aug 2016 15:53:04 -0300 Subject: [test] test_instance -> test_initialization This isnt a test, but a benchmark. Initialization sounds more like an operation while instance is just something. --- testing/tests/perf/test_misc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/tests/perf/test_misc.py b/testing/tests/perf/test_misc.py index c2f8e3b1..ead48adf 100644 --- a/testing/tests/perf/test_misc.py +++ b/testing/tests/perf/test_misc.py @@ -2,5 +2,5 @@ import pytest @pytest.mark.benchmark(group="test_instance") -def test_instance(soledad_client, benchmark): +def test_initialization(soledad_client, benchmark): benchmark(soledad_client) -- cgit v1.2.3 From 46bb2b65e6fe642b07dee1de6c628c6f2cd303fd Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 11:50:33 -0300 Subject: [pkg] add --pdb option to migration script --- scripts/migration/0.8.2/migrate.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/scripts/migration/0.8.2/migrate.py b/scripts/migration/0.8.2/migrate.py index adc0f7d9..fe612221 100755 --- a/scripts/migration/0.8.2/migrate.py +++ b/scripts/migration/0.8.2/migrate.py @@ -85,10 +85,22 @@ def _parse_args(): parser.add_argument( '--log-file', help='the log file to use') + parser.add_argument( + '--pdb', action='store_true', + help='escape to pdb shell in case of exception') return parser.parse_args() +def _enable_pdb(): + import sys + from IPython.core import ultratb + sys.excepthook = ultratb.FormattedTB( + mode='Verbose', color_scheme='Linux', call_pdb=1) + + if __name__ == '__main__': args = _parse_args() + if args.pdb: + _enable_pdb() _configure_logger(args.log_file) migrate(args, TARGET_VERSION) -- cgit v1.2.3 From 4ed6bb54f1cc96ee0a8b98914c98b94edc1d1b1c Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 12:14:40 -0300 Subject: [pkg] add leftovers deletion to couch scehma migration script Previous versions of the couchdb schema used documents "u1db_sync_log" and "u1db_sync_state" to store sync metadata. At some point this was changed, but the documents might have stayed as leftovers. This commit adds the deletion of such documents to the migration script. --- scripts/migration/0.8.2/migrate_couch_schema/__init__.py | 15 +++++++++++++++ scripts/migration/0.8.2/tests/conftest.py | 6 +++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index 37e5a525..60214aae 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -119,6 +119,21 @@ def _migrate_sync_docs(db, do_migrate): for row in view.rows: old_doc = row['doc'] old_id = old_doc['_id'] + + # older schemas used different documents with ids starting with + # "u1db_sync" to store sync-related data: + # + # - u1db_sync_log: was used to store the whole sync log. + # - u1db_sync_state: was used to store the sync state. + # + # if any of these documents exist in the current db, they are leftover + # from previous migrations, and should just be removed. + if old_id in ['u1db_sync_log', 'u1db_sync_state']: + logger.info('removing leftover "u1db_sync_log" document...') + if do_migrate: + db.delete(old_doc) + continue + replica_uid = old_id.replace('u1db_sync_', '') new_id = "%s%s" % (SYNC_DOC_ID_PREFIX, replica_uid) new_doc = { diff --git a/scripts/migration/0.8.2/tests/conftest.py b/scripts/migration/0.8.2/tests/conftest.py index 92d1e17e..8e49891c 100644 --- a/scripts/migration/0.8.2/tests/conftest.py +++ b/scripts/migration/0.8.2/tests/conftest.py @@ -31,7 +31,11 @@ initial_docs = [ {'_id': 'doc2', 'u1db_transactions': [(2, 'trans-2'), (4, 'trans-4')]}, {'_id': '_design/docs'}, {'_id': '_design/syncs'}, - {'_id': '_design/transactions', 'views': {'log': {'map': transaction_map}}} + {'_id': '_design/transactions', + 'views': {'log': {'map': transaction_map}}}, + # the following should be removed if found in the dbs + {'_id': 'u1db_sync_log'}, + {'_id': 'u1db_sync_state'}, ] -- cgit v1.2.3 From 099f2b7453ee6486ccc23c0766f613709aacbde0 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 12:17:52 -0300 Subject: [pkg] move config doc as last action of couch schema migration script If the moving of the config document is the last action of the couch schema migration script, then we can test for successful migration of a certain db by checking if the config document was already moved. This commit just changes the order of migration actions to enforce this situation. --- scripts/migration/0.8.2/migrate_couch_schema/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index 60214aae..d0dd41e3 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -76,9 +76,9 @@ def migrate(args, target_version): def _migrate_user_db(db, do_migrate): _migrate_transaction_log(db, do_migrate) - _migrate_config_doc(db, do_migrate) _migrate_sync_docs(db, do_migrate) _delete_design_docs(db, do_migrate) + _migrate_config_doc(db, do_migrate) def _migrate_transaction_log(db, do_migrate): -- cgit v1.2.3 From 5f7395ebe9a8419de51c43ad189ca30af4b371f0 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 13:36:43 -0300 Subject: [pkg] fail gracefully for missing design doc on couch schema migration script --- .../migration/0.8.2/migrate_couch_schema/__init__.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index d0dd41e3..456eadf0 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -6,6 +6,7 @@ Support functions for migration script. import logging from couchdb import Server +from couchdb import ResourceNotFound from leap.soledad.common.couch import GENERATION_KEY from leap.soledad.common.couch import TRANSACTION_ID_KEY @@ -38,7 +39,13 @@ def _is_migrateable(db): def _get_transaction_log(db): ddoc_path = ['_design', 'transactions', '_view', 'log'] resource = db.resource(*ddoc_path) - _, _, data = resource.get_json() + try: + _, _, data = resource.get_json() + except ResourceNotFound: + logger.warning( + 'Missing transactions design document, ' + 'can\'t get transaction log.') + return [] rows = data['rows'] transaction_log = [] gen = 1 @@ -152,6 +159,9 @@ def _delete_design_docs(db, do_migrate): for ddoc in ['docs', 'syncs', 'transactions']: doc_id = '_design/%s' % ddoc doc = db.get(doc_id) - logger.info("deleting design doc: %s" % doc_id) - if do_migrate: - db.delete(doc) + if doc: + logger.info("deleting design doc: %s" % doc_id) + if do_migrate: + db.delete(doc) + else: + logger.warning("design doc not found: %s" % doc_id) -- cgit v1.2.3 From 9ae70f3cd0eaad378c73416a0cc18f62199082b0 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 13:38:40 -0300 Subject: [pkg] improve log message for skipped dbs on couch schema migration script --- scripts/migration/0.8.2/migrate_couch_schema/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index 456eadf0..66ae960b 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -73,7 +73,7 @@ def migrate(args, target_version): for dbname in user_dbs: db = server[dbname] if not _is_migrateable(db): - logger.warning("skipping user db: %s" % dbname) + logger.warning("skipping not migrateable user db: %s" % dbname) continue logger.info("starting migration of user db: %s" % dbname) _migrate_user_db(db, args.do_migrate) -- cgit v1.2.3 From 2025916a1c4d4518e714086e2144be0e83c95d9e Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 14:31:52 -0300 Subject: [pkg] ignore existing correct gen docs in couch schema migrate script --- scripts/migration/0.8.2/migrate_couch_schema/__init__.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index 66ae960b..c3eb9c3d 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -7,6 +7,7 @@ import logging from couchdb import Server from couchdb import ResourceNotFound +from couchdb import ResourceConflict from leap.soledad.common.couch import GENERATION_KEY from leap.soledad.common.couch import TRANSACTION_ID_KEY @@ -100,7 +101,15 @@ def _migrate_transaction_log(db, do_migrate): } logger.info('creating gen doc: %s' % (gen_doc_id)) if do_migrate: - db.save(doc) + try: + db.save(doc) + except ResourceConflict: + # this gen document already exists. if documents are the same, + # continue with migration. + existing_doc = db.get(gen_doc_id) + for key in [GENERATION_KEY, DOC_ID_KEY, TRANSACTION_ID_KEY]: + if existing_doc[key] != doc[key]: + raise def _migrate_config_doc(db, do_migrate): -- cgit v1.2.3 From 3f74c450c37046cdd04c515e0797084a01426a80 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 14:33:50 -0300 Subject: [pkg] log any errors in couch schema migration script --- scripts/migration/0.8.2/migrate.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/scripts/migration/0.8.2/migrate.py b/scripts/migration/0.8.2/migrate.py index fe612221..c9c8a9a0 100755 --- a/scripts/migration/0.8.2/migrate.py +++ b/scripts/migration/0.8.2/migrate.py @@ -103,4 +103,9 @@ if __name__ == '__main__': if args.pdb: _enable_pdb() _configure_logger(args.log_file) - migrate(args, TARGET_VERSION) + logger = logging.getLogger(__name__) + try: + migrate(args, TARGET_VERSION) + except: + logger.exception('Fatal error on migrate script!') + raise -- cgit v1.2.3 From a601f8ddd7b8cd3a9cecbdb7fb16788becadb667 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 14:51:58 -0300 Subject: [pkg] log errors and continue with next db in couch schema migration script --- scripts/migration/0.8.2/migrate_couch_schema/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index c3eb9c3d..c9ec4910 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -77,8 +77,12 @@ def migrate(args, target_version): logger.warning("skipping not migrateable user db: %s" % dbname) continue logger.info("starting migration of user db: %s" % dbname) - _migrate_user_db(db, args.do_migrate) - logger.info("finished migration of user db: %s" % dbname) + try: + _migrate_user_db(db, args.do_migrate) + logger.info("finished migration of user db: %s" % dbname) + except: + logger.exception('Error migrating user db: %s' % dbname) + logger.error('Continuing with next database.') logger.info('finished couch schema migration to %s' % target_version) -- cgit v1.2.3 From 9b178bfc632ea9dbd584029af05bb688f801b0e3 Mon Sep 17 00:00:00 2001 From: drebs Date: Sun, 21 Aug 2016 10:53:51 -0300 Subject: [pkg] improve logging of couch schema migration script --- scripts/migration/0.8.2/migrate.py | 12 ++++++-- .../0.8.2/migrate_couch_schema/__init__.py | 33 ++++++++++++---------- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/scripts/migration/0.8.2/migrate.py b/scripts/migration/0.8.2/migrate.py index c9c8a9a0..6ad5bc2d 100755 --- a/scripts/migration/0.8.2/migrate.py +++ b/scripts/migration/0.8.2/migrate.py @@ -42,7 +42,7 @@ NETRC_PATH = CONF['soledad-server']['admin_netrc'] # command line args and execution # -def _configure_logger(log_file): +def _configure_logger(log_file, level=logging.INFO): if not log_file: fname, _ = os.path.basename(__file__).split('.') timestr = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') @@ -56,7 +56,7 @@ def _configure_logger(log_file): filemode='a', format='%(asctime)s,%(msecs)d %(levelname)s %(message)s', datefmt='%H:%M:%S', - level=logging.DEBUG) + level=level) def _default_couch_url(): @@ -88,6 +88,10 @@ def _parse_args(): parser.add_argument( '--pdb', action='store_true', help='escape to pdb shell in case of exception') + parser.add_argument( + '--verbose', action='store_true', + help='output detailed information about the migration ' + '(i.e. include debug messages)') return parser.parse_args() @@ -102,7 +106,9 @@ if __name__ == '__main__': args = _parse_args() if args.pdb: _enable_pdb() - _configure_logger(args.log_file) + _configure_logger( + args.log_file, + level=logging.DEBUG if args.verbose else logging.INFO) logger = logging.getLogger(__name__) try: migrate(args, TARGET_VERSION) diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index c9ec4910..edf671ae 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -44,8 +44,8 @@ def _get_transaction_log(db): _, _, data = resource.get_json() except ResourceNotFound: logger.warning( - 'Missing transactions design document, ' - 'can\'t get transaction log.') + '[%s] missing transactions design document, ' + 'can\'t get transaction log.' % db.name) return [] rows = data['rows'] transaction_log = [] @@ -67,22 +67,22 @@ def _get_user_dbs(server): def migrate(args, target_version): server = _get_couch_server(args.couch_url) - logger.info('starting couch schema migration to %s...' % target_version) + logger.info('starting couch schema migration to %s' % target_version) if not args.do_migrate: logger.warning('dry-run: no changes will be made to databases') user_dbs = _get_user_dbs(server) for dbname in user_dbs: db = server[dbname] if not _is_migrateable(db): - logger.warning("skipping not migrateable user db: %s" % dbname) + logger.warning("[%s] skipping not migrateable user db" % dbname) continue - logger.info("starting migration of user db: %s" % dbname) + logger.info("[%s] starting migration of user db" % dbname) try: _migrate_user_db(db, args.do_migrate) - logger.info("finished migration of user db: %s" % dbname) + logger.info("[%s] finished migration of user db" % dbname) except: - logger.exception('Error migrating user db: %s' % dbname) - logger.error('Continuing with next database.') + logger.exception('[%s] error migrating user db' % dbname) + logger.error('continuing with next database.') logger.info('finished couch schema migration to %s' % target_version) @@ -103,7 +103,7 @@ def _migrate_transaction_log(db, do_migrate): DOC_ID_KEY: doc_id, TRANSACTION_ID_KEY: trans_id, } - logger.info('creating gen doc: %s' % (gen_doc_id)) + logger.debug('[%s] creating gen doc: %s' % (db.name, gen_doc_id)) if do_migrate: try: db.save(doc) @@ -123,14 +123,15 @@ def _migrate_config_doc(db, do_migrate): REPLICA_UID_KEY: old_doc[REPLICA_UID_KEY], SCHEMA_VERSION_KEY: SCHEMA_VERSION, } - logger.info("moving config doc: %s -> %s" - % (old_doc['_id'], new_doc['_id'])) + logger.info("[%s] moving config doc: %s -> %s" + % (db.name, old_doc['_id'], new_doc['_id'])) if do_migrate: db.save(new_doc) db.delete(old_doc) def _migrate_sync_docs(db, do_migrate): + logger.info('[%s] moving sync docs' % db.name) view = db.view( '_all_docs', startkey='u1db_sync', @@ -149,7 +150,8 @@ def _migrate_sync_docs(db, do_migrate): # if any of these documents exist in the current db, they are leftover # from previous migrations, and should just be removed. if old_id in ['u1db_sync_log', 'u1db_sync_state']: - logger.info('removing leftover "u1db_sync_log" document...') + logger.info('[%s] removing leftover document: %s' + % (db.name, old_id)) if do_migrate: db.delete(old_doc) continue @@ -162,7 +164,8 @@ def _migrate_sync_docs(db, do_migrate): TRANSACTION_ID_KEY: old_doc['transaction_id'], REPLICA_UID_KEY: replica_uid, } - logger.info("moving sync doc: %s -> %s" % (old_id, new_id)) + logger.debug("[%s] moving sync doc: %s -> %s" + % (db.name, old_id, new_id)) if do_migrate: db.save(new_doc) db.delete(old_doc) @@ -173,8 +176,8 @@ def _delete_design_docs(db, do_migrate): doc_id = '_design/%s' % ddoc doc = db.get(doc_id) if doc: - logger.info("deleting design doc: %s" % doc_id) + logger.info("[%s] deleting design doc: %s" % (db.name, doc_id)) if do_migrate: db.delete(doc) else: - logger.warning("design doc not found: %s" % doc_id) + logger.warning("[%s] design doc not found: %s" % (db.name, doc_id)) -- cgit v1.2.3 From 8e87fecc4f9262ee290c0a148cdbfb214cc0417d Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 22 Aug 2016 20:56:49 -0300 Subject: [test] avoid failing on interrupted couch schema migrations --- .../migration/0.8.2/migrate_couch_schema/__init__.py | 17 +++++++++++++---- scripts/migration/0.8.2/tests/conftest.py | 4 ++++ 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index edf671ae..f0b456e4 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -32,9 +32,7 @@ def _get_couch_server(couch_url): def _is_migrateable(db): config_doc = db.get('u1db_config') - if config_doc is None: - return False - return True + return bool(config_doc) def _get_transaction_log(db): @@ -126,6 +124,8 @@ def _migrate_config_doc(db, do_migrate): logger.info("[%s] moving config doc: %s -> %s" % (db.name, old_doc['_id'], new_doc['_id'])) if do_migrate: + # the config doc must not exist, otherwise we would have skipped this + # database. db.save(new_doc) db.delete(old_doc) @@ -167,7 +167,16 @@ def _migrate_sync_docs(db, do_migrate): logger.debug("[%s] moving sync doc: %s -> %s" % (db.name, old_id, new_id)) if do_migrate: - db.save(new_doc) + try: + db.save(new_doc) + except ResourceConflict: + # this sync document already exists. if documents are the same, + # continue with migration. + existing_doc = db.get(new_id) + for key in [GENERATION_KEY, TRANSACTION_ID_KEY, + REPLICA_UID_KEY]: + if existing_doc[key] != new_doc[key]: + raise db.delete(old_doc) diff --git a/scripts/migration/0.8.2/tests/conftest.py b/scripts/migration/0.8.2/tests/conftest.py index 8e49891c..61f6c7ee 100644 --- a/scripts/migration/0.8.2/tests/conftest.py +++ b/scripts/migration/0.8.2/tests/conftest.py @@ -33,6 +33,10 @@ initial_docs = [ {'_id': '_design/syncs'}, {'_id': '_design/transactions', 'views': {'log': {'map': transaction_map}}}, + # add some data from previous interrupted migration + {'_id': '_local/sync_A', 'gen': 0, 'trans_id': '', 'replica_uid': 'A'}, + {'_id': 'gen-0000000002', + 'gen': 2, 'trans_id': 'trans-2', 'doc_id': 'doc2'}, # the following should be removed if found in the dbs {'_id': 'u1db_sync_log'}, {'_id': 'u1db_sync_state'}, -- cgit v1.2.3 From 49d4013819733966c05178254725e6a89f1909fe Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Thu, 25 Aug 2016 19:04:18 -0300 Subject: [test] stop trying to hit me and hit me TestSyncEncrypterPool.test_encrypt_doc_and_get_it_back was trying to do an operation and asserting the number of attempts. This test is about putting a doc on encrypter pool and getting it encrypted. If we dont wait for the encryption operation to succeed, then complex trial-and-error happens, but if we just ask twisted to wait for one operation before going to the other, this is not needed. -- Resolves: #8398 --- testing/tests/sync/test_encdecpool.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/testing/tests/sync/test_encdecpool.py b/testing/tests/sync/test_encdecpool.py index 0aa17682..4a32885e 100644 --- a/testing/tests/sync/test_encdecpool.py +++ b/testing/tests/sync/test_encdecpool.py @@ -64,17 +64,11 @@ class TestSyncEncrypterPool(BaseSoledadTest): """ doc = SoledadDocument( doc_id=DOC_ID, rev=DOC_REV, json=json.dumps(DOC_CONTENT)) - self._pool.encrypt_doc(doc) - # exhaustivelly attempt to get the encrypted document - encrypted = None - attempts = 0 - while encrypted is None and attempts < 10: - encrypted = yield self._pool.get_encrypted_doc(DOC_ID, DOC_REV) - attempts += 1 + yield self._pool.encrypt_doc(doc) + encrypted = yield self._pool.get_encrypted_doc(DOC_ID, DOC_REV) self.assertIsNotNone(encrypted) - self.assertTrue(attempts < 10) class TestSyncDecrypterPool(BaseSoledadTest): -- cgit v1.2.3 From d86831e4cd3e77f340618168528e62cf4dafb5d7 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 29 Aug 2016 00:05:45 -0300 Subject: [test] randomize payload We were using 'x'*size as payload, but on real usage the payload will be random. This commit randomizes the payload using a predefined seed, so the random payload will be the same across benchmarks. Using random payloads also improves accuracy of compression or encoding impacts and we will be evaluating those changes for resouce usage issues. Also note that base64 is used on payload. That was needed for utf8 safety, but overhead was removed to leave payloads as defined by benchmarks. Base64 was chosen also due its popular usage on MIME encoding, which is used on mail attachments (our current scenario). --- testing/tests/perf/conftest.py | 12 ++++++++++++ testing/tests/perf/test_crypto.py | 22 +++++++++++----------- testing/tests/perf/test_encdecpool.py | 8 ++++---- testing/tests/perf/test_sqlcipher.py | 13 ++++++------- testing/tests/perf/test_sync.py | 13 ++++++------- 5 files changed, 39 insertions(+), 29 deletions(-) diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py index 9abd0c54..3681025f 100644 --- a/testing/tests/perf/conftest.py +++ b/testing/tests/perf/conftest.py @@ -2,6 +2,8 @@ import json import os import pytest import requests +import random +import base64 import signal import time @@ -43,6 +45,16 @@ DEFAULT_CERTKEY = 'soledad_certkey.pem' DEFAULT_TOKEN = 'an-auth-token' +@pytest.fixture() +def payload(): + def generate(size): + random.seed(1337) # same seed to avoid different bench results + payload_bytes = bytearray(random.getrandbits(8) for _ in xrange(size)) + # encode as base64 to avoid ascii encode/decode errors + return base64.b64encode(payload_bytes)[:size] # remove b64 overhead + return generate + + # # soledad_dbs fixture: provides all databases needed by soledad server in a per # module scope (same databases for all tests in this module). diff --git a/testing/tests/perf/test_crypto.py b/testing/tests/perf/test_crypto.py index a32ef593..be00560b 100644 --- a/testing/tests/perf/test_crypto.py +++ b/testing/tests/perf/test_crypto.py @@ -8,10 +8,10 @@ from leap.soledad.client.crypto import decrypt_sym def create_doc_encryption(size): @pytest.mark.benchmark(group="test_crypto_encrypt_doc") - def test_doc_encryption(soledad_client, benchmark): + def test_doc_encryption(soledad_client, benchmark, payload): crypto = soledad_client()._crypto - DOC_CONTENT = {'payload': 'x'*size} + DOC_CONTENT = {'payload': payload(size)} doc = SoledadDocument( doc_id=uuid4().hex, rev='rev', json=json.dumps(DOC_CONTENT)) @@ -22,10 +22,10 @@ def create_doc_encryption(size): def create_doc_decryption(size): @pytest.mark.benchmark(group="test_crypto_decrypt_doc") - def test_doc_decryption(soledad_client, benchmark): + def test_doc_decryption(soledad_client, benchmark, payload): crypto = soledad_client()._crypto - DOC_CONTENT = {'payload': 'x'*size} + DOC_CONTENT = {'payload': payload(size)} doc = SoledadDocument( doc_id=uuid4().hex, rev='rev', json=json.dumps(DOC_CONTENT)) @@ -49,21 +49,21 @@ test_decrypt_doc_1M = create_doc_decryption(1000*1000) test_decrypt_doc_10M = create_doc_decryption(10*1000*1000) test_decrypt_doc_50M = create_doc_decryption(50*1000*1000) -KEY = 'x'*32 - def create_raw_encryption(size): @pytest.mark.benchmark(group="test_crypto_raw_encrypt") - def test_raw_encrypt(benchmark): - benchmark(encrypt_sym, 'x'*size, KEY) + def test_raw_encrypt(benchmark, payload): + key = payload(32) + benchmark(encrypt_sym, payload(size), key) return test_raw_encrypt def create_raw_decryption(size): @pytest.mark.benchmark(group="test_crypto_raw_decrypt") - def test_raw_decrypt(benchmark): - iv, ciphertext = encrypt_sym('x'*size, KEY) - benchmark(decrypt_sym, ciphertext, KEY, iv) + def test_raw_decrypt(benchmark, payload): + key = payload(32) + iv, ciphertext = encrypt_sym(payload(size), key) + benchmark(decrypt_sym, ciphertext, key, iv) return test_raw_decrypt diff --git a/testing/tests/perf/test_encdecpool.py b/testing/tests/perf/test_encdecpool.py index 3c501084..77091a41 100644 --- a/testing/tests/perf/test_encdecpool.py +++ b/testing/tests/perf/test_encdecpool.py @@ -11,8 +11,8 @@ from leap.soledad.common.document import SoledadDocument def create_encrypt(amount, size): @pytest.mark.benchmark(group="test_pool_encrypt") @pytest.inlineCallbacks - def test(soledad_client, txbenchmark_with_setup, request): - DOC_CONTENT = {'payload': 'x'*size} + def test(soledad_client, txbenchmark_with_setup, request, payload): + DOC_CONTENT = {'payload': payload(size)} def setup(): client = soledad_client() @@ -41,8 +41,8 @@ test_encdecpool_encrypt_100_500k = create_encrypt(100, 500*1000) def create_decrypt(amount, size): @pytest.mark.benchmark(group="test_pool_decrypt") @pytest.inlineCallbacks - def test(soledad_client, txbenchmark_with_setup, request): - DOC_CONTENT = {'payload': 'x'*size} + def test(soledad_client, txbenchmark_with_setup, request, payload): + DOC_CONTENT = {'payload': payload(size)} client = soledad_client() def setup(): diff --git a/testing/tests/perf/test_sqlcipher.py b/testing/tests/perf/test_sqlcipher.py index 1fce1c3e..e7a54228 100644 --- a/testing/tests/perf/test_sqlcipher.py +++ b/testing/tests/perf/test_sqlcipher.py @@ -6,9 +6,8 @@ import pytest from twisted.internet.defer import gatherResults -def load_up(client, amount, size, defer=True): - content = 'x'*size - results = [client.create_doc({'content': content}) for _ in xrange(amount)] +def load_up(client, amount, payload, defer=True): + results = [client.create_doc({'content': payload}) for _ in xrange(amount)] if defer: return gatherResults(results) @@ -16,17 +15,17 @@ def load_up(client, amount, size, defer=True): def build_test_sqlcipher_async_create(amount, size): @pytest.inlineCallbacks @pytest.mark.benchmark(group="test_sqlcipher_async_create") - def test(soledad_client, txbenchmark): + def test(soledad_client, txbenchmark, payload): client = soledad_client() - yield txbenchmark(load_up, client, amount, size) + yield txbenchmark(load_up, client, amount, payload(size)) return test def build_test_sqlcipher_create(amount, size): @pytest.mark.benchmark(group="test_sqlcipher_create") - def test(soledad_client, benchmark): + def test(soledad_client, benchmark, payload): client = soledad_client()._dbsyncer - benchmark(load_up, client, amount, size, defer=False) + benchmark(load_up, client, amount, payload(size), defer=False) return test diff --git a/testing/tests/perf/test_sync.py b/testing/tests/perf/test_sync.py index 0be9d12f..0b48a0b9 100644 --- a/testing/tests/perf/test_sync.py +++ b/testing/tests/perf/test_sync.py @@ -3,12 +3,11 @@ import pytest from twisted.internet.defer import gatherResults -def load_up(client, amount, size): - content = 'x'*size +def load_up(client, amount, payload): deferreds = [] # create a bunch of local documents for i in xrange(amount): - d = client.create_doc({'content': content}) + d = client.create_doc({'content': payload}) deferreds.append(d) d = gatherResults(deferreds) d.addCallback(lambda _: None) @@ -18,11 +17,11 @@ def load_up(client, amount, size): def create_upload(uploads, size): @pytest.inlineCallbacks @pytest.mark.benchmark(group="test_upload") - def test(soledad_client, txbenchmark_with_setup): + def test(soledad_client, txbenchmark_with_setup, payload): client = soledad_client() def setup(): - return load_up(client, uploads, size) + return load_up(client, uploads, payload(size)) yield txbenchmark_with_setup(setup, client.sync) return test @@ -36,10 +35,10 @@ test_upload_1000_10k = create_upload(1000, 10*1000) def create_download(downloads, size): @pytest.inlineCallbacks @pytest.mark.benchmark(group="test_download") - def test(soledad_client, txbenchmark_with_setup): + def test(soledad_client, txbenchmark_with_setup, payload): client = soledad_client() - yield load_up(client, downloads, size) + yield load_up(client, downloads, payload(size)) yield client.sync() # We could create them directly on couch, but sending them # ensures we are dealing with properly encrypted docs -- cgit v1.2.3 From 0690eb5338953dadc68c53b8d4010ca40adc3b0b Mon Sep 17 00:00:00 2001 From: drebs Date: Thu, 25 Aug 2016 20:25:30 -0300 Subject: [pkg] remove leftover simplejson imports from l2db --- common/src/leap/soledad/common/l2db/__init__.py | 5 +---- common/src/leap/soledad/common/l2db/backends/__init__.py | 5 +---- common/src/leap/soledad/common/l2db/backends/inmemory.py | 5 +---- common/src/leap/soledad/common/l2db/backends/sqlite_backend.py | 9 +++------ .../src/leap/soledad/common/l2db/remote/basic_auth_middleware.py | 6 ++---- common/src/leap/soledad/common/l2db/remote/http_app.py | 5 +---- common/src/leap/soledad/common/l2db/remote/http_client.py | 5 +---- common/src/leap/soledad/common/l2db/remote/http_database.py | 5 +---- common/src/leap/soledad/common/l2db/remote/http_target.py | 5 +---- 9 files changed, 12 insertions(+), 38 deletions(-) diff --git a/common/src/leap/soledad/common/l2db/__init__.py b/common/src/leap/soledad/common/l2db/__init__.py index c0bd15fe..568897c4 100644 --- a/common/src/leap/soledad/common/l2db/__init__.py +++ b/common/src/leap/soledad/common/l2db/__init__.py @@ -16,10 +16,7 @@ """L2DB""" -try: - import simplejson as json -except ImportError: - import json # noqa +import json from leap.soledad.common.l2db.errors import InvalidJSON, InvalidContent diff --git a/common/src/leap/soledad/common/l2db/backends/__init__.py b/common/src/leap/soledad/common/l2db/backends/__init__.py index 922daafd..c731c3d3 100644 --- a/common/src/leap/soledad/common/l2db/backends/__init__.py +++ b/common/src/leap/soledad/common/l2db/backends/__init__.py @@ -17,10 +17,7 @@ """Abstract classes and common implementations for the backends.""" import re -try: - import simplejson as json -except ImportError: - import json # noqa +import json import uuid from leap.soledad.common import l2db diff --git a/common/src/leap/soledad/common/l2db/backends/inmemory.py b/common/src/leap/soledad/common/l2db/backends/inmemory.py index 06a934a6..6fd251af 100644 --- a/common/src/leap/soledad/common/l2db/backends/inmemory.py +++ b/common/src/leap/soledad/common/l2db/backends/inmemory.py @@ -16,10 +16,7 @@ """The in-memory Database class for U1DB.""" -try: - import simplejson as json -except ImportError: - import json # noqa +import json from leap.soledad.common.l2db import ( Document, errors, diff --git a/common/src/leap/soledad/common/l2db/backends/sqlite_backend.py b/common/src/leap/soledad/common/l2db/backends/sqlite_backend.py index ba273039..d73c0d16 100644 --- a/common/src/leap/soledad/common/l2db/backends/sqlite_backend.py +++ b/common/src/leap/soledad/common/l2db/backends/sqlite_backend.py @@ -21,17 +21,14 @@ A L2DB implementation that uses SQLite as its persistence layer. import errno import os -try: - import simplejson as json -except ImportError: - import json # noqa -from sqlite3 import dbapi2 +import json import sys import time import uuid - import pkg_resources +from sqlite3 import dbapi2 + from leap.soledad.common.l2db.backends import CommonBackend, CommonSyncTarget from leap.soledad.common.l2db import ( Document, errors, diff --git a/common/src/leap/soledad/common/l2db/remote/basic_auth_middleware.py b/common/src/leap/soledad/common/l2db/remote/basic_auth_middleware.py index a2cbff62..96d0d872 100644 --- a/common/src/leap/soledad/common/l2db/remote/basic_auth_middleware.py +++ b/common/src/leap/soledad/common/l2db/remote/basic_auth_middleware.py @@ -15,10 +15,8 @@ # along with u1db. If not, see . """U1DB Basic Auth authorisation WSGI middleware.""" import httplib -try: - import simplejson as json -except ImportError: - import json # noqa +import json + from wsgiref.util import shift_path_info diff --git a/common/src/leap/soledad/common/l2db/remote/http_app.py b/common/src/leap/soledad/common/l2db/remote/http_app.py index 65277bd1..5cf6645e 100644 --- a/common/src/leap/soledad/common/l2db/remote/http_app.py +++ b/common/src/leap/soledad/common/l2db/remote/http_app.py @@ -23,10 +23,7 @@ HTTP Application exposing U1DB. import functools import httplib import inspect -try: - import simplejson as json -except ImportError: - import json # noqa +import json import sys import urlparse diff --git a/common/src/leap/soledad/common/l2db/remote/http_client.py b/common/src/leap/soledad/common/l2db/remote/http_client.py index a65264b6..53363c0a 100644 --- a/common/src/leap/soledad/common/l2db/remote/http_client.py +++ b/common/src/leap/soledad/common/l2db/remote/http_client.py @@ -17,10 +17,7 @@ """Base class to make requests to a remote HTTP server.""" import httplib -try: - import simplejson as json -except ImportError: - import json # noqa +import json import socket import ssl import sys diff --git a/common/src/leap/soledad/common/l2db/remote/http_database.py b/common/src/leap/soledad/common/l2db/remote/http_database.py index b2b48dee..7512379f 100644 --- a/common/src/leap/soledad/common/l2db/remote/http_database.py +++ b/common/src/leap/soledad/common/l2db/remote/http_database.py @@ -16,10 +16,7 @@ """HTTPDatabase to access a remote db over the HTTP API.""" -try: - import simplejson as json -except ImportError: - import json # noqa +import json import uuid from leap.soledad.common.l2db import ( diff --git a/common/src/leap/soledad/common/l2db/remote/http_target.py b/common/src/leap/soledad/common/l2db/remote/http_target.py index 7e7f366f..38804f01 100644 --- a/common/src/leap/soledad/common/l2db/remote/http_target.py +++ b/common/src/leap/soledad/common/l2db/remote/http_target.py @@ -16,10 +16,7 @@ """SyncTarget API implementation to a remote HTTP server.""" -try: - import simplejson as json -except ImportError: - import json # noqa +import json from leap.soledad.common.l2db import Document, SyncTarget from leap.soledad.common.l2db.errors import BrokenSyncStream -- cgit v1.2.3 From 0f4bc628a4677978a9330f510fb20dfc7c8fced2 Mon Sep 17 00:00:00 2001 From: drebs Date: Thu, 25 Aug 2016 21:33:34 -0300 Subject: [pkg] remove deprecated requirements-testing.pip --- client/pkg/requirements-testing.pip | 1 - common/pkg/requirements-testing.pip | 14 -------------- common/setup.py | 2 -- 3 files changed, 17 deletions(-) delete mode 100644 client/pkg/requirements-testing.pip delete mode 100644 common/pkg/requirements-testing.pip diff --git a/client/pkg/requirements-testing.pip b/client/pkg/requirements-testing.pip deleted file mode 100644 index 94ab6e8e..00000000 --- a/client/pkg/requirements-testing.pip +++ /dev/null @@ -1 +0,0 @@ -pep8 diff --git a/common/pkg/requirements-testing.pip b/common/pkg/requirements-testing.pip deleted file mode 100644 index 526b7101..00000000 --- a/common/pkg/requirements-testing.pip +++ /dev/null @@ -1,14 +0,0 @@ -mock -testscenarios -setuptools-trial -pep8 - -#---------------------------------------------------------------------- -#Right now, common tests also depend on having the other soledad -#modules installed. Commenting to avoid versioning problem, you should -#know what you are testing against :) -#---------------------------------------------------------------------- - -#leap.common>=0.4.0 -#leap.soledad.server>=0.7.0 -#leap.soledad.client>=0.7.0 diff --git a/common/setup.py b/common/setup.py index c8a543ac..7800828c 100644 --- a/common/setup.py +++ b/common/setup.py @@ -164,6 +164,4 @@ setup( package_data={'': ["*.sql"]}, test_suite='leap.soledad.common.tests', install_requires=requirements, - tests_require=utils.parse_requirements( - reqfiles=['pkg/requirements-testing.pip']), ) -- cgit v1.2.3 From 6b283761de3a845d09de20fcc2b6a4a63130e45f Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Thu, 1 Sep 2016 00:55:26 -0400 Subject: [bug] fail gracefully if dbsyncer has not been initialized this is needed for some mail tests. --- client/src/leap/soledad/client/api.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/client/src/leap/soledad/client/api.py b/client/src/leap/soledad/client/api.py index 1bfbed8a..fbf605a9 100644 --- a/client/src/leap/soledad/client/api.py +++ b/client/src/leap/soledad/client/api.py @@ -736,6 +736,8 @@ class Soledad(object): :rtype: twisted.internet.defer.Deferred """ sync_url = urlparse.urljoin(self._server_url, 'user-%s' % self.uuid) + if not self._dbsyncer: + return d = self._dbsyncer.sync( sync_url, creds=self._creds, -- cgit v1.2.3 From 60fb1e6554775d1b22685f5b1eea04795c84a542 Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Fri, 2 Sep 2016 16:12:43 -0400 Subject: [tests] add pep8 in main tox run from within the gitlab script add coverage reports too. (hereby we swear not to write stupid tests just because it feels good to have an increased coverage metric). - Resolves: #8416 --- .gitignore | 3 +++ .gitlab-ci.yml | 2 +- testing/tox.ini | 7 ++++++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 6c3e413e..1f278cbf 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,6 @@ MANIFEST _trial_temp .DS_Store scripts/profiling/sync/profiles + +testing/htmlcov +testing/.coverage diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 820dbd2a..11c482b5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -3,4 +3,4 @@ tests: services: - couchdb script: - - cd testing; tox -- --couch-url http://couchdb:5984 + - cd testing; tox -- --couch-url http://couchdb:5984 && tox -e pep8 diff --git a/testing/tox.ini b/testing/tox.ini index c1d7ddb7..fecd4c5b 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -2,9 +2,14 @@ envlist = py27 [testenv] -commands = py.test {posargs} +commands = py.test --cov-report=html \ + --cov-report=term \ + --cov=leap.soledad \ + {posargs} deps = + coverage pytest + pytest-cov pytest-twisted pytest-benchmark mock -- cgit v1.2.3 From 47fa0a4554ddf5a81d5da895b00ed8c33526694b Mon Sep 17 00:00:00 2001 From: Kali Kaneko Date: Tue, 6 Sep 2016 10:06:18 -0400 Subject: [style] pep8 --- client/setup.py | 14 +++++++------- common/setup.py | 14 +++++++------- server/setup.py | 14 +++++++------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/client/setup.py b/client/setup.py index 90986dde..235e731c 100644 --- a/client/setup.py +++ b/client/setup.py @@ -114,13 +114,13 @@ requirements = utils.parse_requirements() if utils.is_develop_mode(): print - print ("[WARNING] Skipping leap-specific dependencies " - "because development mode is detected.") - print ("[WARNING] You can install " - "the latest published versions with " - "'pip install -r pkg/requirements-leap.pip'") - print ("[WARNING] Or you can instead do 'python setup.py develop' " - "from the parent folder of each one of them.") + print("[WARNING] Skipping leap-specific dependencies " + "because development mode is detected.") + print("[WARNING] You can install " + "the latest published versions with " + "'pip install -r pkg/requirements-leap.pip'") + print("[WARNING] Or you can instead do 'python setup.py develop' " + "from the parent folder of each one of them.") print else: requirements += utils.parse_requirements( diff --git a/common/setup.py b/common/setup.py index 7800828c..bb70d587 100644 --- a/common/setup.py +++ b/common/setup.py @@ -127,13 +127,13 @@ requirements = utils.parse_requirements() if utils.is_develop_mode(): print - print ("[WARNING] Skipping leap-specific dependencies " - "because development mode is detected.") - print ("[WARNING] You can install " - "the latest published versions with " - "'pip install -r pkg/requirements-leap.pip'") - print ("[WARNING] Or you can instead do 'python setup.py develop' " - "from the parent folder of each one of them.") + print("[WARNING] Skipping leap-specific dependencies " + "because development mode is detected.") + print("[WARNING] You can install " + "the latest published versions with " + "'pip install -r pkg/requirements-leap.pip'") + print("[WARNING] Or you can instead do 'python setup.py develop' " + "from the parent folder of each one of them.") print else: requirements += utils.parse_requirements( diff --git a/server/setup.py b/server/setup.py index b3b26010..a18d0b2d 100644 --- a/server/setup.py +++ b/server/setup.py @@ -122,13 +122,13 @@ requirements = utils.parse_requirements() if utils.is_develop_mode(): print - print ("[WARNING] Skipping leap-specific dependencies " - "because development mode is detected.") - print ("[WARNING] You can install " - "the latest published versions with " - "'pip install -r pkg/requirements-leap.pip'") - print ("[WARNING] Or you can instead do 'python setup.py develop' " - "from the parent folder of each one of them.") + print("[WARNING] Skipping leap-specific dependencies " + "because development mode is detected.") + print("[WARNING] You can install " + "the latest published versions with " + "'pip install -r pkg/requirements-leap.pip'") + print("[WARNING] Or you can instead do 'python setup.py develop' " + "from the parent folder of each one of them.") print else: requirements += utils.parse_requirements( -- cgit v1.2.3 From bf21811b18dad6f562357037ae3d335ac59b5746 Mon Sep 17 00:00:00 2001 From: drebs Date: Tue, 6 Sep 2016 12:11:21 -0300 Subject: [test] add tox env for perf tests --- testing/tox.ini | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/testing/tox.ini b/testing/tox.ini index fecd4c5b..88ca9fcc 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -11,7 +11,6 @@ deps = pytest pytest-cov pytest-twisted - pytest-benchmark mock testscenarios setuptools-trial @@ -27,6 +26,12 @@ setenv = TERM=xterm install_command = pip install {opts} {packages} +[testenv:perf] +deps = + {[testenv]deps} + pytest-benchmark +commands = py.test tests/perf {posargs} + [testenv:pep8] changedir = .. deps = pep8 -- cgit v1.2.3 From a7ffb49e1169c93af3b728f5db1a5e16e6231590 Mon Sep 17 00:00:00 2001 From: drebs Date: Thu, 8 Sep 2016 12:44:53 -0300 Subject: [test] add tagged perf job for gitlab-ci In order to configure performance tests to run in a specific machine we need to add a tagged job to .gitlab-ci.yml file. That job will only execute the perf tests, and then we can have runners that will only run those jobs. --- .gitlab-ci.yml | 29 +++++++++++++++++++++++++++-- testing/tox.ini | 1 + 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 11c482b5..76d22b3c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,6 +1,31 @@ +stages: + - syntax + - tests + - benchmark + +syntax: + stage: syntax + script: + - cd testing + - tox -e pep8 + tests: + stage: tests + image: leapcode/soledad:1.0 + services: + - couchdb + script: + - cd testing + - tox -- --couch-url http://couchdb:5984 + +benchmark: + stage: benchmark image: leapcode/soledad:1.0 services: - - couchdb + - couchdb script: - - cd testing; tox -- --couch-url http://couchdb:5984 && tox -e pep8 + - cd testing + - tox -e perf -- --couch-url http://couchdb:5984 + tags: + - docker + - benchmark diff --git a/testing/tox.ini b/testing/tox.ini index 88ca9fcc..231b2a9a 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -30,6 +30,7 @@ install_command = pip install {opts} {packages} deps = {[testenv]deps} pytest-benchmark +basepython = python2.7 commands = py.test tests/perf {posargs} [testenv:pep8] -- cgit v1.2.3 From b7340a962bfeae9af28c4b514d0eb077f41dd832 Mon Sep 17 00:00:00 2001 From: drebs Date: Thu, 22 Sep 2016 14:44:46 -0300 Subject: [feat] centralize logging and use twisted.logger by default --- client/src/leap/soledad/client/adbapi.py | 19 +++------ client/src/leap/soledad/client/api.py | 4 +- client/src/leap/soledad/client/crypto.py | 6 +-- client/src/leap/soledad/client/encdecpool.py | 7 ++-- .../leap/soledad/client/http_target/__init__.py | 4 +- client/src/leap/soledad/client/http_target/api.py | 1 - .../src/leap/soledad/client/http_target/fetch.py | 4 +- client/src/leap/soledad/client/http_target/send.py | 5 +-- client/src/leap/soledad/client/pragmas.py | 4 +- client/src/leap/soledad/client/secrets.py | 8 ++-- client/src/leap/soledad/client/sqlcipher.py | 8 ++-- client/src/leap/soledad/client/sync.py | 5 +-- common/src/leap/soledad/common/couch/state.py | 4 +- common/src/leap/soledad/common/log.py | 45 ++++++++++++++++++++++ server/src/leap/soledad/server/auth.py | 7 +++- 15 files changed, 84 insertions(+), 47 deletions(-) create mode 100644 common/src/leap/soledad/common/log.py diff --git a/client/src/leap/soledad/client/adbapi.py b/client/src/leap/soledad/client/adbapi.py index ef0f9066..ce9bec05 100644 --- a/client/src/leap/soledad/client/adbapi.py +++ b/client/src/leap/soledad/client/adbapi.py @@ -19,31 +19,25 @@ An asyncrhonous interface to soledad using sqlcipher backend. It uses twisted.enterprise.adbapi. """ import re -import os import sys -import logging from functools import partial from twisted.enterprise import adbapi from twisted.internet.defer import DeferredSemaphore -from twisted.python import log from zope.proxy import ProxyBase, setProxiedObject from pysqlcipher import dbapi2 +from leap.soledad.common.log import getLogger from leap.soledad.common.errors import DatabaseAccessError from leap.soledad.client import sqlcipher as soledad_sqlcipher from leap.soledad.client.pragmas import set_init_pragmas -logger = logging.getLogger(name=__name__) +logger = getLogger(__name__) -DEBUG_SQL = os.environ.get("LEAP_DEBUG_SQL") -if DEBUG_SQL: - log.startLogging(sys.stdout) - """ How long the SQLCipher connection should wait for the lock to go away until raising an exception. @@ -221,13 +215,12 @@ class U1DBConnectionPool(adbapi.ConnectionPool): def _errback(failure): failure.trap(dbapi2.OperationalError) if failure.getErrorMessage() == "database is locked": - logger.warning("Database operation timed out.") + logger.warn("database operation timed out") should_retry = semaphore.acquire() if should_retry: - logger.warning( - "Database operation timed out while waiting for " - "lock, trying again...") + logger.warn("trying again...") return _run_interaction() + logger.warn("giving up!") return failure d = _run_interaction() @@ -286,7 +279,7 @@ class U1DBConnectionPool(adbapi.ConnectionPool): try: conn.rollback() except: - log.err(None, "Rollback failed") + logger.error(None, "Rollback failed") raise excType, excValue, excTraceback def finalClose(self): diff --git a/client/src/leap/soledad/client/api.py b/client/src/leap/soledad/client/api.py index fbf605a9..f620f1bc 100644 --- a/client/src/leap/soledad/client/api.py +++ b/client/src/leap/soledad/client/api.py @@ -28,7 +28,6 @@ remote storage in the server side. import binascii import errno import httplib -import logging import os import socket import ssl @@ -49,6 +48,7 @@ from leap.common.plugins import collect_plugins from leap.soledad.common import SHARED_DB_NAME from leap.soledad.common import soledad_assert from leap.soledad.common import soledad_assert_type +from leap.soledad.common.log import getLogger from leap.soledad.common.l2db.remote import http_client from leap.soledad.common.l2db.remote.ssl_match_hostname import match_hostname from leap.soledad.common.errors import DatabaseAccessError @@ -62,7 +62,7 @@ from leap.soledad.client.shared_db import SoledadSharedDatabase from leap.soledad.client import sqlcipher from leap.soledad.client import encdecpool -logger = logging.getLogger(name=__name__) +logger = getLogger(__name__) # we may want to collect statistics from the sync process diff --git a/client/src/leap/soledad/client/crypto.py b/client/src/leap/soledad/client/crypto.py index f7d92372..9f5fe28e 100644 --- a/client/src/leap/soledad/client/crypto.py +++ b/client/src/leap/soledad/client/crypto.py @@ -22,7 +22,6 @@ import binascii import hmac import hashlib import json -import logging from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.backends.multibackend import MultiBackend @@ -32,9 +31,10 @@ from cryptography.hazmat.backends.openssl.backend \ from leap.soledad.common import soledad_assert from leap.soledad.common import soledad_assert_type from leap.soledad.common import crypto +from leap.soledad.common.log import getLogger -logger = logging.getLogger(__name__) +logger = getLogger(__name__) MAC_KEY_LENGTH = 64 @@ -356,7 +356,7 @@ def _verify_doc_mac(doc_id, doc_rev, ciphertext, enc_scheme, enc_method, calculated_mac_hash = hashlib.sha256(calculated_mac).digest() if doc_mac_hash != calculated_mac_hash: - logger.warning("Wrong MAC while decrypting doc...") + logger.warn("Wrong MAC while decrypting doc...") raise crypto.WrongMacError("Could not authenticate document's " "contents.") diff --git a/client/src/leap/soledad/client/encdecpool.py b/client/src/leap/soledad/client/encdecpool.py index 74a40931..c1a7f651 100644 --- a/client/src/leap/soledad/client/encdecpool.py +++ b/client/src/leap/soledad/client/encdecpool.py @@ -23,22 +23,21 @@ during synchronization. import json -import logging from uuid import uuid4 from twisted.internet.task import LoopingCall from twisted.internet import threads from twisted.internet import defer -from twisted.python import log from leap.soledad.common.document import SoledadDocument from leap.soledad.common import soledad_assert +from leap.soledad.common.log import getLogger from leap.soledad.client.crypto import encrypt_docstr from leap.soledad.client.crypto import decrypt_doc_dict -logger = logging.getLogger(__name__) +logger = getLogger(__name__) # @@ -393,7 +392,7 @@ class SyncDecrypterPool(SyncEncryptDecryptPool): return d def _errback(self, failure): - log.err(failure) + logger.error(failure) self._deferred.errback(failure) self._processed_docs = 0 self._last_inserted_idx = 0 diff --git a/client/src/leap/soledad/client/http_target/__init__.py b/client/src/leap/soledad/client/http_target/__init__.py index b7e54aa4..62e8bcf0 100644 --- a/client/src/leap/soledad/client/http_target/__init__.py +++ b/client/src/leap/soledad/client/http_target/__init__.py @@ -23,15 +23,15 @@ after receiving. import os -import logging +from leap.soledad.common.log import getLogger from leap.common.http import HTTPClient from leap.soledad.client.http_target.send import HTTPDocSender from leap.soledad.client.http_target.api import SyncTargetAPI from leap.soledad.client.http_target.fetch import HTTPDocFetcher -logger = logging.getLogger(__name__) +logger = getLogger(__name__) # we may want to collect statistics from the sync process diff --git a/client/src/leap/soledad/client/http_target/api.py b/client/src/leap/soledad/client/http_target/api.py index f8de9a15..3c8e3764 100644 --- a/client/src/leap/soledad/client/http_target/api.py +++ b/client/src/leap/soledad/client/http_target/api.py @@ -15,7 +15,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . import os -import time import json import base64 diff --git a/client/src/leap/soledad/client/http_target/fetch.py b/client/src/leap/soledad/client/http_target/fetch.py index a3f70b02..184c5883 100644 --- a/client/src/leap/soledad/client/http_target/fetch.py +++ b/client/src/leap/soledad/client/http_target/fetch.py @@ -14,7 +14,6 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import logging import json from twisted.internet import defer @@ -24,11 +23,12 @@ from leap.soledad.client.events import emit_async from leap.soledad.client.crypto import is_symmetrically_encrypted from leap.soledad.client.encdecpool import SyncDecrypterPool from leap.soledad.client.http_target.support import RequestBody +from leap.soledad.common.log import getLogger from leap.soledad.common.document import SoledadDocument from leap.soledad.common.l2db import errors from leap.soledad.common.l2db.remote import utils -logger = logging.getLogger(__name__) +logger = getLogger(__name__) class HTTPDocFetcher(object): diff --git a/client/src/leap/soledad/client/http_target/send.py b/client/src/leap/soledad/client/http_target/send.py index 13218acf..c7bd057e 100644 --- a/client/src/leap/soledad/client/http_target/send.py +++ b/client/src/leap/soledad/client/http_target/send.py @@ -15,15 +15,15 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . import json -import logging from twisted.internet import defer +from leap.soledad.common.log import getLogger from leap.soledad.client.events import emit_async from leap.soledad.client.events import SOLEDAD_SYNC_SEND_STATUS from leap.soledad.client.http_target.support import RequestBody -logger = logging.getLogger(__name__) +logger = getLogger(__name__) class HTTPDocSender(object): @@ -82,7 +82,6 @@ class HTTPDocSender(object): if self._defer_encryption: self._delete_sent(sent) - user_data = {'uuid': self.uuid, 'userid': self.userid} _emit_send_status(self.uuid, body.consumed, total) defer.returnValue(result) diff --git a/client/src/leap/soledad/client/pragmas.py b/client/src/leap/soledad/client/pragmas.py index 55397d10..3c3e7aab 100644 --- a/client/src/leap/soledad/client/pragmas.py +++ b/client/src/leap/soledad/client/pragmas.py @@ -17,15 +17,15 @@ """ Different pragmas used in the initialization of the SQLCipher database. """ -import logging import string import threading import os from leap.soledad.common import soledad_assert +from leap.soledad.common.log import getLogger -logger = logging.getLogger(__name__) +logger = getLogger(__name__) _db_init_lock = threading.Lock() diff --git a/client/src/leap/soledad/client/secrets.py b/client/src/leap/soledad/client/secrets.py index 3547a711..b7bcdd0a 100644 --- a/client/src/leap/soledad/client/secrets.py +++ b/client/src/leap/soledad/client/secrets.py @@ -23,7 +23,6 @@ Soledad secrets handling. import os import scrypt -import logging import binascii import errno import json @@ -33,11 +32,12 @@ from hashlib import sha256 from leap.soledad.common import soledad_assert from leap.soledad.common import soledad_assert_type from leap.soledad.common import document +from leap.soledad.common.log import getLogger from leap.soledad.client import events from leap.soledad.client.crypto import encrypt_sym, decrypt_sym -logger = logging.getLogger(name=__name__) +logger = getLogger(__name__) # @@ -461,7 +461,7 @@ class SoledadSecrets(object): events.emit_async(events.SOLEDAD_DOWNLOADING_KEYS, user_data) db = self._shared_db if not db: - logger.warning('No shared db found') + logger.warn('no shared db found') return doc = db.get_doc(self._shared_db_doc_id()) user_data = {'userid': self._userid, 'uuid': self._uuid} @@ -492,7 +492,7 @@ class SoledadSecrets(object): events.emit_async(events.SOLEDAD_UPLOADING_KEYS, user_data) db = self._shared_db if not db: - logger.warning('No shared db found') + logger.warn('no shared db found') return db.put_doc(doc) events.emit_async(events.SOLEDAD_DONE_UPLOADING_KEYS, user_data) diff --git a/client/src/leap/soledad/client/sqlcipher.py b/client/src/leap/soledad/client/sqlcipher.py index 166c0783..14d6f5ae 100644 --- a/client/src/leap/soledad/client/sqlcipher.py +++ b/client/src/leap/soledad/client/sqlcipher.py @@ -41,7 +41,6 @@ So, as the statements above were introduced for backwards compatibility with SQLCipher 1.1 databases, we do not implement them as all SQLCipher databases handled by Soledad should be created by SQLCipher >= 2.0. """ -import logging import os import json @@ -55,8 +54,9 @@ from twisted.internet import defer from twisted.enterprise import adbapi from leap.soledad.common.document import SoledadDocument -from leap.soledad.common import l2db +from leap.soledad.common.log import getLogger from leap.soledad.common.l2db import errors as u1db_errors +from leap.soledad.common.l2db import Document from leap.soledad.common.l2db.backends import sqlite_backend from leap.soledad.common.errors import DatabaseAccessError @@ -65,7 +65,7 @@ from leap.soledad.client.sync import SoledadSynchronizer from leap.soledad.client import pragmas -logger = logging.getLogger(__name__) +logger = getLogger(__name__) # Monkey-patch u1db.backends.sqlite_backend with pysqlcipher.dbapi2 @@ -595,7 +595,7 @@ class U1DBSQLiteBackend(sqlite_backend.SQLitePartialExpandDatabase): self._db_handle = conn self._real_replica_uid = None self._ensure_schema() - self._factory = l2db.Document + self._factory = Document class SoledadSQLCipherWrapper(SQLCipherDatabase): diff --git a/client/src/leap/soledad/client/sync.py b/client/src/leap/soledad/client/sync.py index 2656a150..335daaef 100644 --- a/client/src/leap/soledad/client/sync.py +++ b/client/src/leap/soledad/client/sync.py @@ -18,17 +18,16 @@ Soledad synchronization utilities. """ import os -import time -import logging from twisted.internet import defer +from leap.soledad.common.log import getLogger from leap.soledad.common.l2db import errors from leap.soledad.common.l2db.sync import Synchronizer from leap.soledad.common.errors import BackendNotReadyError -logger = logging.getLogger(__name__) +logger = getLogger(__name__) # we may want to collect statistics from the sync process diff --git a/common/src/leap/soledad/common/couch/state.py b/common/src/leap/soledad/common/couch/state.py index 9ff9fe55..e3cd1a24 100644 --- a/common/src/leap/soledad/common/couch/state.py +++ b/common/src/leap/soledad/common/couch/state.py @@ -17,12 +17,12 @@ """ Server state using CouchDatabase as backend. """ -import logging import re import time from urlparse import urljoin from hashlib import sha512 +from leap.soledad.common.log import getLogger from leap.soledad.common.couch import CouchDatabase from leap.soledad.common.couch import couch_server from leap.soledad.common.command import exec_validated_cmd @@ -30,7 +30,7 @@ from leap.soledad.common.l2db.remote.server_state import ServerState from leap.soledad.common.l2db.errors import Unauthorized -logger = logging.getLogger(__name__) +logger = getLogger(__name__) def is_db_name_valid(name): diff --git a/common/src/leap/soledad/common/log.py b/common/src/leap/soledad/common/log.py new file mode 100644 index 00000000..3f026045 --- /dev/null +++ b/common/src/leap/soledad/common/log.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# log.py +# Copyright (C) 2016 LEAP +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +""" +This module centralizes logging facilities and allows for different behaviours, +as using the python logging module instead of twisted logger, and to print logs +to stdout, mainly for development purposes. +""" + + +import os +import sys + +from twisted.logger import Logger +from twisted.logger import textFileLogObserver + + +def getLogger(*args, **kwargs): + + if os.environ.get('SOLEDAD_USE_PYTHON_LOGGING'): + import logging + return logging.getLogger(__name__) + + if os.environ.get('SOLEDAD_LOG_TO_STDOUT'): + kwargs({'observer': textFileLogObserver(sys.stdout)}) + + return Logger(*args, **kwargs) + + +__all__ = ['getLogger'] diff --git a/server/src/leap/soledad/server/auth.py b/server/src/leap/soledad/server/auth.py index ecee2d5d..b7186b3b 100644 --- a/server/src/leap/soledad/server/auth.py +++ b/server/src/leap/soledad/server/auth.py @@ -22,13 +22,16 @@ import json from abc import ABCMeta, abstractmethod from routes.mapper import Mapper -from twisted.python import log +from leap.soledad.common.log import getLogger from leap.soledad.common.l2db import DBNAME_CONSTRAINTS, errors as u1db_errors from leap.soledad.common import SHARED_DB_NAME from leap.soledad.common import USER_DB_PREFIX +logger = getLogger(__name__) + + class URLToAuthorization(object): """ Verify if actions can be performed by a user. @@ -378,7 +381,7 @@ class SoledadTokenAuthMiddleware(SoledadAuthMiddleware): try: return self._state.verify_token(uuid, token) except Exception as e: - log.err(e) + logger.error(e) return False def _get_auth_error_string(self): -- cgit v1.2.3 From b1a7b538ab851c9e1aab3be5ba31411d658a4773 Mon Sep 17 00:00:00 2001 From: drebs Date: Thu, 22 Sep 2016 15:32:34 -0300 Subject: [style] standardize log messages --- client/src/leap/soledad/client/api.py | 6 ++--- client/src/leap/soledad/client/crypto.py | 4 +-- client/src/leap/soledad/client/encdecpool.py | 10 ++++---- client/src/leap/soledad/client/pragmas.py | 8 +++--- client/src/leap/soledad/client/secrets.py | 20 +++++++-------- client/src/leap/soledad/client/sync.py | 37 +++++++++++----------------- 6 files changed, 38 insertions(+), 47 deletions(-) diff --git a/client/src/leap/soledad/client/api.py b/client/src/leap/soledad/client/api.py index f620f1bc..6870d5ba 100644 --- a/client/src/leap/soledad/client/api.py +++ b/client/src/leap/soledad/client/api.py @@ -337,7 +337,7 @@ class Soledad(object): """ Close underlying U1DB database. """ - logger.debug("Closing soledad") + logger.debug("closing soledad") self._dbpool.close() if getattr(self, '_dbsyncer', None): self._dbsyncer.close() @@ -763,7 +763,7 @@ class Soledad(object): def _sync_errback(failure): s = StringIO() failure.printDetailedTraceback(file=s) - msg = "Soledad exception when syncing!\n" + s.getvalue() + msg = "got exception when syncing!\n" + s.getvalue() logger.error(msg) return failure @@ -1005,7 +1005,7 @@ class Soledad(object): def create_path_if_not_exists(path): try: if not os.path.isdir(path): - logger.info('Creating directory: %s.' % path) + logger.info('creating directory: %s.' % path) os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): diff --git a/client/src/leap/soledad/client/crypto.py b/client/src/leap/soledad/client/crypto.py index 9f5fe28e..d81c883b 100644 --- a/client/src/leap/soledad/client/crypto.py +++ b/client/src/leap/soledad/client/crypto.py @@ -300,7 +300,7 @@ def encrypt_docstr(docstr, doc_id, doc_rev, key, secret): # convert binary data to hexadecimal representation so the JSON # serialization does not complain about what it tries to serialize. hex_ciphertext = binascii.b2a_hex(ciphertext) - logger.debug("Encrypting doc: %s" % doc_id) + logger.debug("encrypting doc: %s" % doc_id) return json.dumps({ crypto.ENC_JSON_KEY: hex_ciphertext, crypto.ENC_SCHEME_KEY: enc_scheme, @@ -356,7 +356,7 @@ def _verify_doc_mac(doc_id, doc_rev, ciphertext, enc_scheme, enc_method, calculated_mac_hash = hashlib.sha256(calculated_mac).digest() if doc_mac_hash != calculated_mac_hash: - logger.warn("Wrong MAC while decrypting doc...") + logger.warn("wrong MAC while decrypting doc...") raise crypto.WrongMacError("Could not authenticate document's " "contents.") diff --git a/client/src/leap/soledad/client/encdecpool.py b/client/src/leap/soledad/client/encdecpool.py index c1a7f651..056b012f 100644 --- a/client/src/leap/soledad/client/encdecpool.py +++ b/client/src/leap/soledad/client/encdecpool.py @@ -154,7 +154,7 @@ class SyncEncrypterPool(SyncEncryptDecryptPool): Start the encrypter pool. """ SyncEncryptDecryptPool.start(self) - logger.debug("Starting the encryption loop...") + logger.debug("starting the encryption loop...") def stop(self): """ @@ -229,10 +229,10 @@ class SyncEncrypterPool(SyncEncryptDecryptPool): % self.TABLE_NAME result = yield self._runQuery(query, (doc_id, doc_rev)) if result: - logger.debug("Found doc on sync db: %s" % doc_id) + logger.debug("found doc on sync db: %s" % doc_id) val = result.pop() defer.returnValue(val[0]) - logger.debug("Did not find doc on sync db: %s" % doc_id) + logger.debug("did not find doc on sync db: %s" % doc_id) defer.returnValue(None) def delete_encrypted_doc(self, doc_id, doc_rev): @@ -505,7 +505,7 @@ class SyncDecrypterPool(SyncEncryptDecryptPool): :rtype: twisted.internet.defer.Deferred """ doc_id, rev, content, gen, trans_id, idx = result - logger.debug("Sync decrypter pool: decrypted doc %s: %s %s %s" + logger.debug("sync decrypter pool: decrypted doc %s: %s %s %s" % (doc_id, rev, gen, trans_id)) return self.insert_received_doc( doc_id, rev, content, gen, trans_id, idx) @@ -610,7 +610,7 @@ class SyncDecrypterPool(SyncEncryptDecryptPool): :type trans_id: str """ # could pass source_replica in params for callback chain - logger.debug("Sync decrypter pool: inserting doc in local db: " + logger.debug("sync decrypter pool: inserting doc in local db: " "%s:%s %s" % (doc_id, doc_rev, gen)) # convert deleted documents to avoid error on document creation diff --git a/client/src/leap/soledad/client/pragmas.py b/client/src/leap/soledad/client/pragmas.py index 3c3e7aab..870ed63e 100644 --- a/client/src/leap/soledad/client/pragmas.py +++ b/client/src/leap/soledad/client/pragmas.py @@ -321,7 +321,7 @@ def set_synchronous_off(db_handle): """ Change the setting of the "synchronous" flag to OFF. """ - logger.debug("SQLCIPHER: SETTING SYNCHRONOUS OFF") + logger.debug("sqlcipher: setting synchronous off") db_handle.cursor().execute('PRAGMA synchronous=OFF') @@ -329,7 +329,7 @@ def set_synchronous_normal(db_handle): """ Change the setting of the "synchronous" flag to NORMAL. """ - logger.debug("SQLCIPHER: SETTING SYNCHRONOUS NORMAL") + logger.debug("sqlcipher: setting synchronous normal") db_handle.cursor().execute('PRAGMA synchronous=NORMAL') @@ -337,7 +337,7 @@ def set_mem_temp_store(db_handle): """ Use a in-memory store for temporary tables. """ - logger.debug("SQLCIPHER: SETTING TEMP_STORE MEMORY") + logger.debug("sqlcipher: setting temp_store memory") db_handle.cursor().execute('PRAGMA temp_store=MEMORY') @@ -362,7 +362,7 @@ def set_write_ahead_logging(db_handle): requirements of the application. The default strategy is to run a checkpoint once the WAL reaches 1000 pages" """ - logger.debug("SQLCIPHER: SETTING WRITE-AHEAD LOGGING") + logger.debug("sqlcipher: setting write-ahead logging") db_handle.cursor().execute('PRAGMA journal_mode=WAL') # The optimum value can still use a little bit of tuning, but we favor diff --git a/client/src/leap/soledad/client/secrets.py b/client/src/leap/soledad/client/secrets.py index b7bcdd0a..1eb6f31d 100644 --- a/client/src/leap/soledad/client/secrets.py +++ b/client/src/leap/soledad/client/secrets.py @@ -193,42 +193,42 @@ class SoledadSecrets(object): """ # STAGE 1 - verify if secrets exist locally try: - logger.info("Trying to load secrets from local storage...") + logger.info("trying to load secrets from local storage...") version = self._load_secrets_from_local_file() # eventually migrate local and remote stored documents from old # format version if version < self.RECOVERY_DOC_VERSION: self._store_secrets() self._upload_crypto_secrets() - logger.info("Found secrets in local storage.") + logger.info("found secrets in local storage") return except NoStorageSecret: - logger.info("Could not find secrets in local storage.") + logger.info("could not find secrets in local storage") # STAGE 2 - there are no secrets in local storage and this is the # first time we are running soledad with the specified # secrets_path. Try to fetch encrypted secrets from # server. try: - logger.info('Trying to fetch secrets from remote storage...') + logger.info('trying to fetch secrets from remote storage...') version = self._download_crypto_secrets() self._store_secrets() # eventually migrate remote stored document from old format # version if version < self.RECOVERY_DOC_VERSION: self._upload_crypto_secrets() - logger.info('Found secrets in remote storage.') + logger.info('found secrets in remote storage.') return except NoStorageSecret: - logger.info("Could not find secrets in remote storage.") + logger.info("could not find secrets in remote storage.") # STAGE 3 - there are no secrets in server also, so we want to # generate the secrets and store them in the remote # db. - logger.info("Generating secrets...") + logger.info("generating secrets...") self._gen_crypto_secrets() - logger.info("Uploading secrets...") + logger.info("uploading secrets...") self._upload_crypto_secrets() def _has_secret(self): @@ -298,7 +298,7 @@ class SoledadSecrets(object): """ Generate the crypto secrets. """ - logger.info('No cryptographic secrets found, creating new secrets...') + logger.info('no cryptographic secrets found, creating new secrets...') secret_id = self._gen_secret() self.set_secret_id(secret_id) @@ -445,7 +445,7 @@ class SoledadSecrets(object): encrypted_secret) secret_count += 1 except SecretsException as e: - logger.error("Failed to decrypt storage secret: %s" + logger.error("failed to decrypt storage secret: %s" % str(e)) return secret_count, active_secret diff --git a/client/src/leap/soledad/client/sync.py b/client/src/leap/soledad/client/sync.py index 335daaef..0c68d100 100644 --- a/client/src/leap/soledad/client/sync.py +++ b/client/src/leap/soledad/client/sync.py @@ -101,16 +101,12 @@ class SoledadSynchronizer(Synchronizer): target_gen, target_trans_id = 0, '' target_my_gen, target_my_trans_id = 0, '' - logger.debug( - "Soledad target sync info:\n" - " target replica uid: %s\n" - " target generation: %d\n" - " target trans id: %s\n" - " target my gen: %d\n" - " target my trans_id: %s\n" - " source replica_uid: %s\n" - % (self.target_replica_uid, target_gen, target_trans_id, - target_my_gen, target_my_trans_id, self.source._replica_uid)) + logger.debug("target replica uid: %s" % self.target_replica_uid) + logger.debug("target generation: %d" % target_gen) + logger.debug("target trans id: %s" % target_trans_id) + logger.debug("target my gen: %d" % target_my_gen) + logger.debug("target my trans_id: %s" % target_my_trans_id) + logger.debug("source replica_uid: %s" % self.source._replica_uid) # make sure we'll have access to target replica uid once it exists if self.target_replica_uid is None: @@ -133,8 +129,7 @@ class SoledadSynchronizer(Synchronizer): # what's changed since that generation and this current gen my_gen, _, changes = self.source.whats_changed(target_my_gen) - logger.debug("Soledad sync: there are %d documents to send." - % len(changes)) + logger.debug("there are %d documents to send" % len(changes)) # get source last-seen database generation for the target if self.target_replica_uid is None: @@ -143,11 +138,10 @@ class SoledadSynchronizer(Synchronizer): target_last_known_gen, target_last_known_trans_id = \ self.source._get_replica_gen_and_trans_id( self.target_replica_uid) - logger.debug( - "Soledad source sync info:\n" - " last target gen known to source: %d\n" - " last target trans_id known to source: %s" - % (target_last_known_gen, target_last_known_trans_id)) + logger.debug( + "last known target gen: %d" % target_last_known_gen) + logger.debug( + "last known target trans_id: %s" % target_last_known_trans_id) # validate transaction ids if not changes and target_last_known_gen == target_gen: @@ -180,11 +174,8 @@ class SoledadSynchronizer(Synchronizer): target_last_known_gen, target_last_known_trans_id, self._insert_doc_from_target, ensure_callback=ensure_callback, defer_decryption=defer_decryption) - logger.debug( - "Soledad source sync info after sync exchange:\n" - " source known target gen: %d\n" - " source known target trans_id: %s" - % (new_gen, new_trans_id)) + logger.debug("target gen after sync: %d" % new_gen) + logger.debug("target trans_id after sync: %s" % new_trans_id) info = { "target_replica_uid": self.target_replica_uid, "new_gen": new_gen, @@ -223,7 +214,7 @@ class SoledadSynchronizer(Synchronizer): :return: A deferred which will fire when the sync has been completed. :rtype: twisted.internet.defer.Deferred """ - logger.debug("Completing deferred last step in SYNC...") + logger.debug("completing deferred last step in sync...") # record target synced-up-to generation including applying what we # sent -- cgit v1.2.3 From d5bbe37495ee733be5f78de72364f3ec48ed7a0d Mon Sep 17 00:00:00 2001 From: drebs Date: Sat, 24 Sep 2016 11:32:13 -0300 Subject: [test] use pytest tmpdir fixture in all tests Tests that were imported from u1db or created on top of that structure were leaving temporary directories behind. This could cause problems in test servers, either by filling the partition or by extrapolating the maximum amount of files in a directory. This commit replaces all usages of temporary directories in the old test structure by pytest tmpdir fixture, which properly cares for removing temporary directories. --- testing/setup.py | 2 +- testing/test_soledad/u1db_tests/test_open.py | 15 +++---- testing/test_soledad/util.py | 13 +------ testing/tests/client/test_app.py | 8 ++++ testing/tests/client/test_doc.py | 4 ++ testing/tests/client/test_https.py | 4 +- testing/tests/conftest.py | 5 +++ testing/tests/couch/test_atomicity.py | 4 +- testing/tests/couch/test_command.py | 8 ++-- testing/tests/server/test_server.py | 7 +--- testing/tests/sqlcipher/test_backend.py | 58 +++++++--------------------- testing/tests/sync/test_sync.py | 2 - testing/tests/sync/test_sync_mutex.py | 4 -- 13 files changed, 50 insertions(+), 84 deletions(-) diff --git a/testing/setup.py b/testing/setup.py index 059b2489..c1204c9a 100644 --- a/testing/setup.py +++ b/testing/setup.py @@ -5,5 +5,5 @@ from setuptools import find_packages setup( name='test_soledad', packages=find_packages('.'), - package_data={'': ['*.conf']} + package_data={'': ['*.conf', 'u1db_tests/testing-certs/*']} ) diff --git a/testing/test_soledad/u1db_tests/test_open.py b/testing/test_soledad/u1db_tests/test_open.py index 30d4de00..b572fba0 100644 --- a/testing/test_soledad/u1db_tests/test_open.py +++ b/testing/test_soledad/u1db_tests/test_open.py @@ -18,24 +18,25 @@ """Test u1db.open""" import os +import pytest + from unittest import skip -from leap.soledad.common.l2db import ( - errors, open as u1db_open, -) from test_soledad import u1db_tests as tests +from test_soledad.u1db_tests.test_backends import TestAlternativeDocument + +from leap.soledad.common.l2db import errors +from leap.soledad.common.l2db import open as u1db_open from leap.soledad.common.l2db.backends import sqlite_backend -from test_soledad.u1db_tests.test_backends \ - import TestAlternativeDocument @skip("Skiping tests imported from U1DB.") +@pytest.mark.usefixtures('method_tmpdir') class TestU1DBOpen(tests.TestCase): def setUp(self): super(TestU1DBOpen, self).setUp() - tmpdir = self.createTempDir() - self.db_path = tmpdir + '/test.db' + self.db_path = self.tempdir + '/test.db' def test_open_no_create(self): self.assertRaises(errors.DatabaseDoesNotExist, diff --git a/testing/test_soledad/util.py b/testing/test_soledad/util.py index e23d185e..d53f6cda 100644 --- a/testing/test_soledad/util.py +++ b/testing/test_soledad/util.py @@ -22,8 +22,6 @@ Utilities used by multiple test suites. import os -import tempfile -import shutil import random import string import couchdb @@ -43,7 +41,6 @@ from leap.soledad.common import l2db from leap.soledad.common.l2db import sync from leap.soledad.common.l2db.remote import http_database -from leap.soledad.common import soledad_assert from leap.soledad.common.document import SoledadDocument from leap.soledad.common.couch import CouchDatabase from leap.soledad.common.couch.state import CouchServerState @@ -226,6 +223,7 @@ class BaseSoledadTest(BaseLeapTest, MockedSharedDBTest): """ defer_sync_encryption = False + @pytest.mark.usefixtures("method_tmpdir") def setUp(self): # The following snippet comes from BaseLeapTest.setUpClass, but we # repeat it here because twisted.trial does not work with @@ -233,7 +231,6 @@ class BaseSoledadTest(BaseLeapTest, MockedSharedDBTest): self.old_path = os.environ['PATH'] self.old_home = os.environ['HOME'] - self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") self.home = self.tempdir bin_tdir = os.path.join( self.tempdir, @@ -276,14 +273,6 @@ class BaseSoledadTest(BaseLeapTest, MockedSharedDBTest): self._soledad.secrets.secrets_path]: if os.path.isfile(f): os.unlink(f) - # The following snippet comes from BaseLeapTest.setUpClass, but we - # repeat it here because twisted.trial does not work with - # setUpClass/tearDownClass. - soledad_assert( - self.tempdir.startswith('/tmp/leap_tests-'), - "beware! tried to remove a dir which does not " - "live in temporal folder!") - shutil.rmtree(self.tempdir) from twisted.internet import reactor reactor.addSystemEventTrigger( diff --git a/testing/tests/client/test_app.py b/testing/tests/client/test_app.py index fef2f371..6867473e 100644 --- a/testing/tests/client/test_app.py +++ b/testing/tests/client/test_app.py @@ -17,6 +17,8 @@ """ Test ObjectStore and Couch backend bits. """ +import pytest + from testscenarios import TestWithScenarios from test_soledad.util import BaseSoledadTest @@ -31,9 +33,15 @@ from test_soledad.u1db_tests import test_backends # The following tests come from `u1db.tests.test_backends`. # ----------------------------------------------------------------------------- +@pytest.mark.usefixtures('method_tmpdir') class SoledadTests( TestWithScenarios, test_backends.AllDatabaseTests, BaseSoledadTest): + def setUp(self): + TestWithScenarios.setUp(self) + test_backends.AllDatabaseTests.setUp(self) + BaseSoledadTest.setUp(self) + scenarios = [ ('token_http', { 'make_database_for_test': make_token_http_database_for_test, diff --git a/testing/tests/client/test_doc.py b/testing/tests/client/test_doc.py index e158d768..36479e90 100644 --- a/testing/tests/client/test_doc.py +++ b/testing/tests/client/test_doc.py @@ -17,6 +17,8 @@ """ Test Leap backend bits: soledad docs """ +import pytest + from testscenarios import TestWithScenarios from test_soledad.u1db_tests import test_document @@ -28,6 +30,7 @@ from test_soledad.util import make_soledad_document_for_test # The following tests come from `u1db.tests.test_document`. # ----------------------------------------------------------------------------- +@pytest.mark.usefixtures('method_tmpdir') class TestSoledadDocument( TestWithScenarios, test_document.TestDocument, BaseSoledadTest): @@ -37,6 +40,7 @@ class TestSoledadDocument( 'make_document_for_test': make_soledad_document_for_test})]) +@pytest.mark.usefixtures('method_tmpdir') class TestSoledadPyDocument( TestWithScenarios, test_document.TestPyDocument, BaseSoledadTest): diff --git a/testing/tests/client/test_https.py b/testing/tests/client/test_https.py index caac16da..1b6caed6 100644 --- a/testing/tests/client/test_https.py +++ b/testing/tests/client/test_https.py @@ -17,7 +17,7 @@ """ Test Leap backend bits: https """ -from unittest import skip +import pytest from testscenarios import TestWithScenarios @@ -62,7 +62,7 @@ def token_leap_https_sync_target(test, host, path, cert_file=None): return st -@skip("Skiping tests imported from U1DB.") +@pytest.mark.skip class TestSoledadHTTPSyncTargetHttpsSupport( TestWithScenarios, # test_https.TestHttpSyncTargetHttpsSupport, diff --git a/testing/tests/conftest.py b/testing/tests/conftest.py index 3be9ba2a..9e4319ac 100644 --- a/testing/tests/conftest.py +++ b/testing/tests/conftest.py @@ -11,3 +11,8 @@ def pytest_addoption(parser): def couch_url(request): url = request.config.getoption('--couch-url') request.cls.couch_url = url + + +@pytest.fixture +def method_tmpdir(request, tmpdir): + request.instance.tempdir = tmpdir.strpath diff --git a/testing/tests/couch/test_atomicity.py b/testing/tests/couch/test_atomicity.py index 3badfb19..a3ae0314 100644 --- a/testing/tests/couch/test_atomicity.py +++ b/testing/tests/couch/test_atomicity.py @@ -18,7 +18,7 @@ Test atomicity of couch operations. """ import os -import tempfile +import pytest import threading from urlparse import urljoin @@ -41,6 +41,7 @@ from test_soledad.u1db_tests import TestCaseWithServer REPEAT_TIMES = 20 +@pytest.mark.usefixtures('method_tmpdir') class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer): @staticmethod @@ -91,7 +92,6 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer): urljoin(self.couch_url, 'user-' + self.user), create=True, replica_uid='replica') - self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") self.startTwistedServer() def tearDown(self): diff --git a/testing/tests/couch/test_command.py b/testing/tests/couch/test_command.py index f61e118d..6a96ebf9 100644 --- a/testing/tests/couch/test_command.py +++ b/testing/tests/couch/test_command.py @@ -1,6 +1,6 @@ from twisted.trial import unittest -from leap.soledad.common import couch +from leap.soledad.common.couch import state as couch_state from leap.soledad.common.l2db import errors as u1db_errors from mock import Mock @@ -9,7 +9,7 @@ from mock import Mock class CommandBasedDBCreationTest(unittest.TestCase): def test_ensure_db_using_custom_command(self): - state = couch.state.CouchServerState("url", create_cmd="echo") + state = couch_state.CouchServerState("url", create_cmd="/bin/echo") mock_db = Mock() mock_db.replica_uid = 'replica_uid' state.open_database = Mock(return_value=mock_db) @@ -18,11 +18,11 @@ class CommandBasedDBCreationTest(unittest.TestCase): self.assertEquals(mock_db.replica_uid, replica_uid) def test_raises_unauthorized_on_failure(self): - state = couch.state.CouchServerState("url", create_cmd="inexistent") + state = couch_state.CouchServerState("url", create_cmd="inexistent") self.assertRaises(u1db_errors.Unauthorized, state.ensure_database, "user-1337") def test_raises_unauthorized_by_default(self): - state = couch.state.CouchServerState("url") + state = couch_state.CouchServerState("url") self.assertRaises(u1db_errors.Unauthorized, state.ensure_database, "user-1337") diff --git a/testing/tests/server/test_server.py b/testing/tests/server/test_server.py index 49d25ed0..18f92d88 100644 --- a/testing/tests/server/test_server.py +++ b/testing/tests/server/test_server.py @@ -20,7 +20,7 @@ Tests for server-related functionality. import binascii import mock import os -import tempfile +import pytest from hashlib import sha512 from pkg_resources import resource_filename @@ -287,6 +287,7 @@ class ServerAuthorizationTestCase(BaseSoledadTest): self._make_environ('/%s/sync-from/x' % dbname, 'POST'))) +@pytest.mark.usefixtures("method_tmpdir") class EncryptedSyncTestCase( CouchDBTestCase, TestCaseWithServer): @@ -349,11 +350,7 @@ class EncryptedSyncTestCase( return self.make_app_with_state(self.request_state) def setUp(self): - # the order of the following initializations is crucial because of - # dependencies. - # XXX explain better CouchDBTestCase.setUp(self) - self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") TestCaseWithServer.setUp(self) def tearDown(self): diff --git a/testing/tests/sqlcipher/test_backend.py b/testing/tests/sqlcipher/test_backend.py index 11472d46..caacba0d 100644 --- a/testing/tests/sqlcipher/test_backend.py +++ b/testing/tests/sqlcipher/test_backend.py @@ -18,10 +18,9 @@ Test sqlcipher backend internals. """ import os +import pytest import time import threading -import tempfile -import shutil from pysqlcipher import dbapi2 from testscenarios import TestWithScenarios @@ -33,7 +32,6 @@ from leap.soledad.common.l2db.backends.sqlite_backend \ import SQLitePartialExpandDatabase # soledad stuff. -from leap.soledad.common import soledad_assert from leap.soledad.common.document import SoledadDocument from leap.soledad.client.sqlcipher import SQLCipherDatabase from leap.soledad.client.sqlcipher import SQLCipherOptions @@ -109,6 +107,7 @@ class SQLCipherIndexTests( # The following tests come from `u1db.tests.test_sqlite_backend`. # ----------------------------------------------------------------------------- +@pytest.mark.usefixtures('method_tmpdir') class TestSQLCipherDatabase(tests.TestCase): """ Tests from u1db.tests.test_sqlite_backend.TestSQLiteDatabase. @@ -117,8 +116,7 @@ class TestSQLCipherDatabase(tests.TestCase): def test_atomic_initialize(self): # This test was modified to ensure that db2.close() is called within # the thread that created the database. - tmpdir = self.createTempDir() - dbname = os.path.join(tmpdir, 'atomic.db') + dbname = os.path.join(self.tempdir, 'atomic.db') t2 = None # will be a thread @@ -164,6 +162,7 @@ class TestSQLCipherDatabase(tests.TestCase): db1.close() +@pytest.mark.usefixtures('method_tmpdir') class TestSQLCipherPartialExpandDatabase(tests.TestCase): """ Tests from u1db.tests.test_sqlite_backend.TestSQLitePartialExpandDatabase. @@ -226,8 +225,7 @@ class TestSQLCipherPartialExpandDatabase(tests.TestCase): pass def test__open_database_non_existent(self): - temp_dir = self.createTempDir(prefix='u1db-test-') - path = temp_dir + '/non-existent.sqlite' + path = self.tempdir + '/non-existent.sqlite' self.assertRaises(errors.DatabaseDoesNotExist, sqlcipher_open, path, PASSWORD, create=False) @@ -243,8 +241,7 @@ class TestSQLCipherPartialExpandDatabase(tests.TestCase): # This test was modified to ensure that an empty database file will # raise a DatabaseIsNotEncrypted exception instead of a # dbapi2.OperationalError exception. - temp_dir = self.createTempDir(prefix='u1db-test-') - path1 = temp_dir + '/invalid1.db' + path1 = self.tempdir + '/invalid1.db' with open(path1, 'wb') as f: f.write("") self.assertRaises(DatabaseIsNotEncrypted, @@ -270,8 +267,7 @@ class TestSQLCipherPartialExpandDatabase(tests.TestCase): def test_open_database_create(self): # SQLCipherDatabas has no open_database() method, so we just test for # the actual database constructor effects. - temp_dir = self.createTempDir(prefix='u1db-test-') - path = temp_dir + '/new.sqlite' + path = self.tempdir + '/new.sqlite' db1 = sqlcipher_open(path, PASSWORD, create=True) db2 = sqlcipher_open(path, PASSWORD, create=False) self.assertIsInstance(db2, SQLCipherDatabase) @@ -395,8 +391,7 @@ class TestSQLCipherPartialExpandDatabase(tests.TestCase): c.fetchall()) def test__ensure_schema_rollback(self): - temp_dir = self.createTempDir(prefix='u1db-test-') - path = temp_dir + '/rollback.db' + path = self.tempdir + '/rollback.db' class SQLitePartialExpandDbTesting(SQLCipherDatabase): @@ -414,15 +409,13 @@ class TestSQLCipherPartialExpandDatabase(tests.TestCase): db._initialize(db._db_handle.cursor()) def test_open_database_non_existent(self): - temp_dir = self.createTempDir(prefix='u1db-test-') - path = temp_dir + '/non-existent.sqlite' + path = self.tempdir + '/non-existent.sqlite' self.assertRaises(errors.DatabaseDoesNotExist, sqlcipher_open, path, "123", create=False) def test_delete_database_existent(self): - temp_dir = self.createTempDir(prefix='u1db-test-') - path = temp_dir + '/new.sqlite' + path = self.tempdir + '/new.sqlite' db = sqlcipher_open(path, "123", create=True) db.close() SQLCipherDatabase.delete_database(path) @@ -431,8 +424,7 @@ class TestSQLCipherPartialExpandDatabase(tests.TestCase): create=False) def test_delete_database_nonexistent(self): - temp_dir = self.createTempDir(prefix='u1db-test-') - path = temp_dir + '/non-existent.sqlite' + path = self.tempdir + '/non-existent.sqlite' self.assertRaises(errors.DatabaseDoesNotExist, SQLCipherDatabase.delete_database, path) @@ -630,37 +622,13 @@ class SQLCipherEncryptionTests(BaseSoledadTest): os.unlink(dbfile) def setUp(self): - # the following come from BaseLeapTest.setUpClass, because - # twisted.trial doesn't support such class methods for setting up - # test classes. - self.old_path = os.environ['PATH'] - self.old_home = os.environ['HOME'] - self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") - self.home = self.tempdir - bin_tdir = os.path.join( - self.tempdir, - 'bin') - os.environ["PATH"] = bin_tdir - os.environ["HOME"] = self.tempdir - # this is our own stuff + BaseSoledadTest.setUp(self) self.DB_FILE = os.path.join(self.tempdir, 'test.db') self._delete_dbfiles() def tearDown(self): self._delete_dbfiles() - # the following come from BaseLeapTest.tearDownClass, because - # twisted.trial doesn't support such class methods for tearing down - # test classes. - os.environ["PATH"] = self.old_path - os.environ["HOME"] = self.old_home - # safety check! please do not wipe my home... - # XXX needs to adapt to non-linuces - soledad_assert( - self.tempdir.startswith('/tmp/leap_tests-') or - self.tempdir.startswith('/var/folder'), - "beware! tried to remove a dir which does not " - "live in temporal folder!") - shutil.rmtree(self.tempdir) + BaseSoledadTest.tearDown(self) def test_try_to_open_encrypted_db_with_sqlite_backend(self): """ diff --git a/testing/tests/sync/test_sync.py b/testing/tests/sync/test_sync.py index 5540b7cb..5290003e 100644 --- a/testing/tests/sync/test_sync.py +++ b/testing/tests/sync/test_sync.py @@ -15,7 +15,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . import json -import tempfile import threading import time @@ -60,7 +59,6 @@ class InterruptableSyncTestCase( def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) - self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") def tearDown(self): CouchDBTestCase.tearDown(self) diff --git a/testing/tests/sync/test_sync_mutex.py b/testing/tests/sync/test_sync_mutex.py index 261c6485..2626ab2a 100644 --- a/testing/tests/sync/test_sync_mutex.py +++ b/testing/tests/sync/test_sync_mutex.py @@ -24,8 +24,6 @@ be two concurrent synchronization processes at the same time. import time import uuid -import tempfile -import shutil from urlparse import urljoin @@ -91,13 +89,11 @@ class TestSyncMutex( def setUp(self): TestCaseWithServer.setUp(self) CouchDBTestCase.setUp(self) - self.tempdir = tempfile.mkdtemp(prefix="leap_tests-") self.user = ('user-%s' % uuid.uuid4().hex) def tearDown(self): CouchDBTestCase.tearDown(self) TestCaseWithServer.tearDown(self) - shutil.rmtree(self.tempdir) def test_two_concurrent_syncs_do_not_overlap_no_docs(self): self.startServer() -- cgit v1.2.3 From 887fd917a19654aa6a7c6c54be3f22c3b6c79b92 Mon Sep 17 00:00:00 2001 From: drebs Date: Sat, 24 Sep 2016 15:57:54 -0300 Subject: [test] add flake8 code check and generalize name of tox env --- .gitlab-ci.yml | 8 ++++---- common/src/leap/soledad/common/l2db/remote/server_state.py | 2 -- common/src/leap/soledad/common/l2db/sync.py | 4 ++-- testing/tox.ini | 10 +++++++--- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 76d22b3c..bdb5505f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,13 +1,13 @@ stages: - - syntax + - code-check - tests - benchmark -syntax: - stage: syntax +code-check: + stage: code-check script: - cd testing - - tox -e pep8 + - tox -e code-check tests: stage: tests diff --git a/common/src/leap/soledad/common/l2db/remote/server_state.py b/common/src/leap/soledad/common/l2db/remote/server_state.py index f131e09e..e20b4679 100644 --- a/common/src/leap/soledad/common/l2db/remote/server_state.py +++ b/common/src/leap/soledad/common/l2db/remote/server_state.py @@ -15,8 +15,6 @@ # along with u1db. If not, see . """State for servers exposing a set of U1DB databases.""" -import os -import errno class ServerState(object): diff --git a/common/src/leap/soledad/common/l2db/sync.py b/common/src/leap/soledad/common/l2db/sync.py index c612629f..5e9b22f4 100644 --- a/common/src/leap/soledad/common/l2db/sync.py +++ b/common/src/leap/soledad/common/l2db/sync.py @@ -126,8 +126,8 @@ class Synchronizer(object): target_last_known_gen, target_last_known_trans_id = 0, '' else: target_last_known_gen, target_last_known_trans_id = ( - self.source._get_replica_gen_and_trans_id( # nopep8 - self.target_replica_uid)) + self.source._get_replica_gen_and_trans_id( # nopep8 + self.target_replica_uid)) if not changes and target_last_known_gen == target_gen: if target_trans_id != target_last_known_trans_id: raise errors.InvalidTransactionId diff --git a/testing/tox.ini b/testing/tox.ini index 231b2a9a..820d958c 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -33,10 +33,14 @@ deps = basepython = python2.7 commands = py.test tests/perf {posargs} -[testenv:pep8] +[testenv:code-check] changedir = .. -deps = pep8 -commands = pep8 {posargs} client server common +deps = + pep8 + flake8 +commands = + pep8 client server common + flake8 --ignore=F812,E731 client server common [testenv:parallel] deps = -- cgit v1.2.3 From 0c89333460953413033154e60da2ddb9cc1aed55 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 30 Sep 2016 15:40:54 -0300 Subject: [bug] use % for formatting Otherwise it will put the exception as an additional parameter. --- client/src/leap/soledad/client/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/src/leap/soledad/client/sync.py b/client/src/leap/soledad/client/sync.py index 0c68d100..7ed5f693 100644 --- a/client/src/leap/soledad/client/sync.py +++ b/client/src/leap/soledad/client/sync.py @@ -96,7 +96,7 @@ class SoledadSynchronizer(Synchronizer): sync_target.get_sync_info(self.source._replica_uid) except (errors.DatabaseDoesNotExist, BackendNotReadyError) as e: logger.debug("Database isn't ready on server. Will be created.") - logger.debug("Reason: %s", e.__class__) + logger.debug("Reason: %s" % e.__class__) self.target_replica_uid = None target_gen, target_trans_id = 0, '' target_my_gen, target_my_trans_id = 0, '' -- cgit v1.2.3 From e13aefd14e82794622613802733713c6226e1d59 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 3 Oct 2016 17:05:36 -0300 Subject: [refactor] move configuration loading to its own module --- server/src/leap/soledad/server/__init__.py | 88 +++++++----------------------- server/src/leap/soledad/server/config.py | 67 +++++++++++++++++++++++ 2 files changed, 86 insertions(+), 69 deletions(-) create mode 100644 server/src/leap/soledad/server/config.py diff --git a/server/src/leap/soledad/server/__init__.py b/server/src/leap/soledad/server/__init__.py index 34570b52..97bcf888 100644 --- a/server/src/leap/soledad/server/__init__.py +++ b/server/src/leap/soledad/server/__init__.py @@ -80,7 +80,6 @@ documents on the shared database is handled by `leap.soledad.server.auth` module. """ -import configparser import urlparse import sys @@ -88,11 +87,10 @@ from leap.soledad.common.l2db.remote import http_app, utils from leap.soledad.server.auth import SoledadTokenAuthMiddleware from leap.soledad.server.gzip_middleware import GzipMiddleware -from leap.soledad.server.sync import ( - SyncResource, - MAX_REQUEST_SIZE, - MAX_ENTRY_SIZE, -) +from leap.soledad.server.sync import SyncResource +from leap.soledad.server.sync import MAX_REQUEST_SIZE +from leap.soledad.server.sync import MAX_ENTRY_SIZE +from soledad.server.config import load_configuration from leap.soledad.common import SHARED_DB_NAME from leap.soledad.common.backend import SoledadBackend @@ -100,6 +98,14 @@ from leap.soledad.common.couch.state import CouchServerState from ._version import get_versions + +__all__ = [ + 'SoledadApp', + 'application', + '__version__', +] + + # ---------------------------------------------------------------------------- # Soledad WSGI application # ---------------------------------------------------------------------------- @@ -249,57 +255,6 @@ class HTTPInvocationByMethodWithBody( http_app.HTTPInvocationByMethodWithBody = HTTPInvocationByMethodWithBody -# ---------------------------------------------------------------------------- -# Auxiliary functions -# ---------------------------------------------------------------------------- -CONFIG_DEFAULTS = { - 'soledad-server': { - 'couch_url': 'http://localhost:5984', - 'create_cmd': None, - 'admin_netrc': '/etc/couchdb/couchdb-admin.netrc', - 'batching': False - }, - 'database-security': { - 'members': ['soledad'], - 'members_roles': [], - 'admins': [], - 'admins_roles': [] - } -} - - -def load_configuration(file_path): - """ - Load server configuration from file. - - @param file_path: The path to the configuration file. - @type file_path: str - - @return: A dictionary with the configuration. - @rtype: dict - """ - defaults = dict(CONFIG_DEFAULTS) - config = configparser.SafeConfigParser() - config.read(file_path) - for section in defaults: - if not config.has_section(section): - continue - for key, value in defaults[section].items(): - if not config.has_option(section, key): - continue - elif type(value) == bool: - defaults[section][key] = config.getboolean(section, key) - elif type(value) == list: - values = config.get(section, key).split(',') - values = [v.strip() for v in values] - defaults[section][key] = values - else: - defaults[section][key] = config.get(section, key) - # TODO: implement basic parsing/sanitization of options comming from - # config file. - return defaults - - # ---------------------------------------------------------------------------- # Run as Twisted WSGI Resource # ---------------------------------------------------------------------------- @@ -317,20 +272,15 @@ def _get_couch_state(): return state -def application(environ, start_response): - """return WSGI application that may be used by `twistd -web`""" - state = _get_couch_state() - application = GzipMiddleware( - SoledadTokenAuthMiddleware(SoledadApp(state))) - return application(environ, start_response) +_couch_state = _get_couch_state() +# a WSGI application that may be used by `twistd -web` +application = GzipMiddleware( + SoledadTokenAuthMiddleware(SoledadApp(_couch_state))) -def debug_local_application_do_not_use(environ, start_response): - """in where we bypass token auth middleware for ease of mind while - debugging in your local environment""" - state = _get_couch_state() - application = SoledadApp(state) - return application(environ, start_response) +# another WSGI application in which we bypass token auth middleware for ease of +# mind while debugging in your local environment +# debug_local_application_do_not_use = SoledadApp(_couch_state) __version__ = get_versions()['version'] diff --git a/server/src/leap/soledad/server/config.py b/server/src/leap/soledad/server/config.py new file mode 100644 index 00000000..4a791cbe --- /dev/null +++ b/server/src/leap/soledad/server/config.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# config.py +# Copyright (C) 2016 LEAP +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +import configparser + + +CONFIG_DEFAULTS = { + 'soledad-server': { + 'couch_url': 'http://localhost:5984', + 'create_cmd': None, + 'admin_netrc': '/etc/couchdb/couchdb-admin.netrc', + 'batching': False + }, + 'database-security': { + 'members': ['soledad'], + 'members_roles': [], + 'admins': [], + 'admins_roles': [] + } +} + + +def load_configuration(file_path): + """ + Load server configuration from file. + + @param file_path: The path to the configuration file. + @type file_path: str + + @return: A dictionary with the configuration. + @rtype: dict + """ + defaults = dict(CONFIG_DEFAULTS) + config = configparser.SafeConfigParser() + config.read(file_path) + for section in defaults: + if not config.has_section(section): + continue + for key, value in defaults[section].items(): + if not config.has_option(section, key): + continue + elif type(value) == bool: + defaults[section][key] = config.getboolean(section, key) + elif type(value) == list: + values = config.get(section, key).split(',') + values = [v.strip() for v in values] + defaults[section][key] = values + else: + defaults[section][key] = config.get(section, key) + # TODO: implement basic parsing/sanitization of options comming from + # config file. + return defaults -- cgit v1.2.3 From 4e06eb370b99f2d343e96f774a3ad9b8b77c9548 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 3 Oct 2016 19:27:42 -0300 Subject: [feature] check for user dbs couch schema versions --- common/src/leap/soledad/common/couch/state.py | 34 ++++++++++++++++++++++++++- common/src/leap/soledad/common/errors.py | 13 +++++++++- server/src/leap/soledad/server/__init__.py | 2 +- testing/tests/couch/conftest.py | 31 ++++++++++++++++++++++++ testing/tests/couch/test_command.py | 8 ++++--- testing/tests/couch/test_state.py | 23 ++++++++++++++++++ testing/tests/perf/conftest.py | 4 ++-- testing/tests/server/test_server.py | 4 ++-- 8 files changed, 109 insertions(+), 10 deletions(-) create mode 100644 testing/tests/couch/conftest.py create mode 100644 testing/tests/couch/test_state.py diff --git a/common/src/leap/soledad/common/couch/state.py b/common/src/leap/soledad/common/couch/state.py index e3cd1a24..1d045a9d 100644 --- a/common/src/leap/soledad/common/couch/state.py +++ b/common/src/leap/soledad/common/couch/state.py @@ -17,6 +17,7 @@ """ Server state using CouchDatabase as backend. """ +import couchdb import re import time from urlparse import urljoin @@ -25,9 +26,14 @@ from hashlib import sha512 from leap.soledad.common.log import getLogger from leap.soledad.common.couch import CouchDatabase from leap.soledad.common.couch import couch_server +from leap.soledad.common.couch import CONFIG_DOC_ID +from leap.soledad.common.couch import SCHEMA_VERSION +from leap.soledad.common.couch import SCHEMA_VERSION_KEY from leap.soledad.common.command import exec_validated_cmd from leap.soledad.common.l2db.remote.server_state import ServerState from leap.soledad.common.l2db.errors import Unauthorized +from leap.soledad.common.errors import WrongCouchSchemaVersionError +from leap.soledad.common.errors import MissingCouchConfigDocumentError logger = getLogger(__name__) @@ -59,15 +65,41 @@ class CouchServerState(ServerState): TOKENS_TYPE_DEF = "Token" TOKENS_USER_ID_KEY = "user_id" - def __init__(self, couch_url, create_cmd=None): + def __init__(self, couch_url, create_cmd=None, + check_schema_versions=True): """ Initialize the couch server state. :param couch_url: The URL for the couch database. :type couch_url: str + :param check_schema_versions: Whether to check couch schema version of + user dbs. + :type check_schema_versions: bool """ self.couch_url = couch_url self.create_cmd = create_cmd + if check_schema_versions: + self._check_schema_versions() + + def _check_schema_versions(self): + """ + Check that all user databases use the correct couch schema. + """ + server = couchdb.client.Server(self.couch_url) + for dbname in server: + if not dbname.startswith('user-'): + continue + db = server[dbname] + + # if there are documents, ensure that a config doc exists + config_doc = db.get(CONFIG_DOC_ID) + if config_doc: + if config_doc[SCHEMA_VERSION_KEY] != SCHEMA_VERSION: + raise WrongCouchSchemaVersionError(dbname) + else: + result = db.view('_all_docs', limit=1) + if result.total_rows != 0: + raise MissingCouchConfigDocumentError(dbname) def open_database(self, dbname): """ diff --git a/common/src/leap/soledad/common/errors.py b/common/src/leap/soledad/common/errors.py index dec871c9..d543a3de 100644 --- a/common/src/leap/soledad/common/errors.py +++ b/common/src/leap/soledad/common/errors.py @@ -77,7 +77,6 @@ http_errors.ERROR_STATUSES = set( class InvalidURLError(Exception): - """ Exception raised when Soledad encounters a malformed URL. """ @@ -90,3 +89,15 @@ class BackendNotReadyError(SoledadError): """ wire_description = "backend not ready" status = 500 + + +class WrongCouchSchemaVersionError(SoledadError): + """ + Raised in case there is a user database with wrong couch schema version. + """ + + +class MissingCouchConfigDocumentError(SoledadError): + """ + Raised if a database has documents but lacks the couch config document. + """ diff --git a/server/src/leap/soledad/server/__init__.py b/server/src/leap/soledad/server/__init__.py index 97bcf888..2e1a453a 100644 --- a/server/src/leap/soledad/server/__init__.py +++ b/server/src/leap/soledad/server/__init__.py @@ -90,7 +90,7 @@ from leap.soledad.server.gzip_middleware import GzipMiddleware from leap.soledad.server.sync import SyncResource from leap.soledad.server.sync import MAX_REQUEST_SIZE from leap.soledad.server.sync import MAX_ENTRY_SIZE -from soledad.server.config import load_configuration +from leap.soledad.server.config import load_configuration from leap.soledad.common import SHARED_DB_NAME from leap.soledad.common.backend import SoledadBackend diff --git a/testing/tests/couch/conftest.py b/testing/tests/couch/conftest.py new file mode 100644 index 00000000..1074f091 --- /dev/null +++ b/testing/tests/couch/conftest.py @@ -0,0 +1,31 @@ +import couchdb +import pytest +import random +import string + + +@pytest.fixture +def random_name(): + return 'user-' + ''.join( + random.choice( + string.ascii_lowercase) for _ in range(10)) + + +class RandomDatabase(object): + + def __init__(self, couch_url, name): + self.couch_url = couch_url + self.name = name + self.server = couchdb.client.Server(couch_url) + self.database = self.server.create(name) + + def teardown(self): + self.server.delete(self.name) + + +@pytest.fixture +def db(random_name, request): + couch_url = request.config.getoption('--couch-url') + db = RandomDatabase(couch_url, random_name) + request.addfinalizer(db.teardown) + return db diff --git a/testing/tests/couch/test_command.py b/testing/tests/couch/test_command.py index 6a96ebf9..68097fb1 100644 --- a/testing/tests/couch/test_command.py +++ b/testing/tests/couch/test_command.py @@ -9,7 +9,8 @@ from mock import Mock class CommandBasedDBCreationTest(unittest.TestCase): def test_ensure_db_using_custom_command(self): - state = couch_state.CouchServerState("url", create_cmd="/bin/echo") + state = couch_state.CouchServerState( + "url", create_cmd="/bin/echo", check_schema_versions=False) mock_db = Mock() mock_db.replica_uid = 'replica_uid' state.open_database = Mock(return_value=mock_db) @@ -18,11 +19,12 @@ class CommandBasedDBCreationTest(unittest.TestCase): self.assertEquals(mock_db.replica_uid, replica_uid) def test_raises_unauthorized_on_failure(self): - state = couch_state.CouchServerState("url", create_cmd="inexistent") + state = couch_state.CouchServerState( + "url", create_cmd="inexistent", check_schema_versions=False) self.assertRaises(u1db_errors.Unauthorized, state.ensure_database, "user-1337") def test_raises_unauthorized_by_default(self): - state = couch_state.CouchServerState("url") + state = couch_state.CouchServerState("url", check_schema_versions=False) self.assertRaises(u1db_errors.Unauthorized, state.ensure_database, "user-1337") diff --git a/testing/tests/couch/test_state.py b/testing/tests/couch/test_state.py new file mode 100644 index 00000000..a53ba076 --- /dev/null +++ b/testing/tests/couch/test_state.py @@ -0,0 +1,23 @@ +import pytest + +from leap.soledad.common.couch import CONFIG_DOC_ID +from leap.soledad.common.couch import SCHEMA_VERSION +from leap.soledad.common.couch import SCHEMA_VERSION_KEY +from leap.soledad.common.couch.state import CouchServerState + +from leap.soledad.common.errors import WrongCouchSchemaVersionError +from leap.soledad.common.errors import MissingCouchConfigDocumentError + + +def test_wrong_couch_version_raises(db): + wrong_schema_version = SCHEMA_VERSION + 1 + db.database.create( + {'_id': CONFIG_DOC_ID, SCHEMA_VERSION_KEY: wrong_schema_version}) + with pytest.raises(WrongCouchSchemaVersionError): + CouchServerState(db.couch_url, create_cmd='/bin/echo') + + +def test_missing_config_doc_raises(db): + db.database.create({}) + with pytest.raises(MissingCouchConfigDocumentError): + CouchServerState(db.couch_url, create_cmd='/bin/echo') diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py index 3681025f..5ac1f3c0 100644 --- a/testing/tests/perf/conftest.py +++ b/testing/tests/perf/conftest.py @@ -193,8 +193,8 @@ def soledad_server(tmpdir_factory, request): def txbenchmark(benchmark): def blockOnThread(*args, **kwargs): return threads.deferToThread( - benchmark, threads.blockingCallFromThread, - reactor, *args, **kwargs) + benchmark, threads.blockingCallFromThread, + reactor, *args, **kwargs) return blockOnThread diff --git a/testing/tests/server/test_server.py b/testing/tests/server/test_server.py index 18f92d88..6bbcf002 100644 --- a/testing/tests/server/test_server.py +++ b/testing/tests/server/test_server.py @@ -43,8 +43,8 @@ from test_soledad.util import ( from leap.soledad.common import crypto from leap.soledad.client import Soledad -from leap.soledad.server import load_configuration -from leap.soledad.server import CONFIG_DEFAULTS +from leap.soledad.server.config import load_configuration +from leap.soledad.server.config import CONFIG_DEFAULTS from leap.soledad.server.auth import URLToAuthorization from leap.soledad.server.auth import SoledadTokenAuthMiddleware -- cgit v1.2.3 From 2b6a0e8d7168b20f86d585ebc4e57b61b1bb9cf9 Mon Sep 17 00:00:00 2001 From: Tulio Casagrande Date: Tue, 4 Oct 2016 18:40:33 -0300 Subject: [bug] remove finalClose from SQLCipherU1DBSync We discovered that class was registering a `finalClose` to be executed on reactor shutdown. On the multiuser scenario, a logout destroys Soledad and should properly terminate everything related to it. That SQLCipherU1DBSync instance was being held even after logout by the reactor so it could call that `finalClose` on shutdown. The `finalClose` only set running to False and set a `shutdownID` that was not used anywhere else, so we removed it and moved setting running to False to the `close` function method. That way we preserve the functionality but let the instance be properly garbage collected on logout. --- client/src/leap/soledad/client/sqlcipher.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/client/src/leap/soledad/client/sqlcipher.py b/client/src/leap/soledad/client/sqlcipher.py index 14d6f5ae..3921c323 100644 --- a/client/src/leap/soledad/client/sqlcipher.py +++ b/client/src/leap/soledad/client/sqlcipher.py @@ -448,7 +448,6 @@ class SQLCipherU1DBSync(SQLCipherDatabase): self.received_docs = [] self.running = False - self.shutdownID = None self._db_handle = None # initialize the main db before scheduling a start @@ -465,8 +464,6 @@ class SQLCipherU1DBSync(SQLCipherDatabase): def _start(self): if not self.running: - self.shutdownID = self._reactor.addSystemEventTrigger( - 'during', 'shutdown', self.finalClose) self.running = True def _initialize_main_db(self): @@ -561,13 +558,6 @@ class SQLCipherU1DBSync(SQLCipherDatabase): # XXX this SHOULD BE a callback return self._get_generation() - def finalClose(self): - """ - This should only be called by the shutdown trigger. - """ - self.shutdownID = None - self.running = False - def close(self): """ Close the syncer and syncdb orderly @@ -578,6 +568,7 @@ class SQLCipherU1DBSync(SQLCipherDatabase): _, syncer = self._syncers[url] syncer.close() del self._syncers[url] + self.running = False class U1DBSQLiteBackend(sqlite_backend.SQLitePartialExpandDatabase): -- cgit v1.2.3 From c326dae7b824366208220da94ca730788bb50a18 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 5 Oct 2016 01:15:50 -0300 Subject: [bug] adds libsqlcipher to docker Current docker image is broken due missing libsqlcipher. This commit adds it and jessie-backports due package needs. Resolves: #8508 --- scripts/docker/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 8c6bfdb3..26d5f782 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -1,5 +1,5 @@ # start with a fresh debian image -FROM debian +FROM debian:jessie-backports RUN apt-get update @@ -10,6 +10,8 @@ RUN apt-get -y install libpython2.7-dev # needed to build python cryptography module RUN apt-get -y install libssl-dev RUN apt-get -y install libffi-dev +# needed to build pysqlcipher +RUN apt-get -y install libsqlcipher-dev # install pip and tox RUN apt-get -y install python-pip -- cgit v1.2.3 From 308384eba7da58fbfdc17ead35a98216880206b5 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 5 Oct 2016 01:19:51 -0300 Subject: [feature] use latest image Instead of hardcoding a version. This should give us the flexibility of changing images without changing code. --- .gitlab-ci.yml | 4 ++-- scripts/docker/Makefile | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index bdb5505f..7419cdaa 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -11,7 +11,7 @@ code-check: tests: stage: tests - image: leapcode/soledad:1.0 + image: leapcode/soledad:latest services: - couchdb script: @@ -20,7 +20,7 @@ tests: benchmark: stage: benchmark - image: leapcode/soledad:1.0 + image: leapcode/soledad:latest services: - couchdb script: diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 0fdc93fa..7050526a 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -16,7 +16,7 @@ # Some configurations you might override when calling this makefile # ##################################################################### -IMAGE_NAME ?= leapcode/soledad:1.0 +IMAGE_NAME ?= leapcode/soledad:latest SOLEDAD_REMOTE ?= https://0xacab.org/leap/soledad.git SOLEDAD_BRANCH ?= develop SOLEDAD_PRELOAD_NUM ?= 100 -- cgit v1.2.3 From 28eb55c8388fa0dd713471d1c3334ef4ccb49ae4 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Mon, 3 Oct 2016 15:23:19 -0300 Subject: [tests] specify basepython on root env code-check is running with py3 randomly on CI, this commit should pin it. --- testing/tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/tox.ini b/testing/tox.ini index 820d958c..31cb8a4f 100644 --- a/testing/tox.ini +++ b/testing/tox.ini @@ -2,6 +2,7 @@ envlist = py27 [testenv] +basepython = python2.7 commands = py.test --cov-report=html \ --cov-report=term \ --cov=leap.soledad \ @@ -30,7 +31,6 @@ install_command = pip install {opts} {packages} deps = {[testenv]deps} pytest-benchmark -basepython = python2.7 commands = py.test tests/perf {posargs} [testenv:code-check] -- cgit v1.2.3 From 09a62dd1d6b076fcc7ac001d0b998ebb119feaad Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 5 Oct 2016 19:52:58 -0300 Subject: [tests] make check_schema_versions default to False CouchServerState is spread across test codebase and this option is intended to be used only on server startup. This commit makes it default to False and explicitly set it to True on where it's necessary. --- common/src/leap/soledad/common/couch/state.py | 2 +- server/src/leap/soledad/server/__init__.py | 3 ++- testing/tests/couch/test_state.py | 6 ++++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/common/src/leap/soledad/common/couch/state.py b/common/src/leap/soledad/common/couch/state.py index 1d045a9d..70c5fa36 100644 --- a/common/src/leap/soledad/common/couch/state.py +++ b/common/src/leap/soledad/common/couch/state.py @@ -66,7 +66,7 @@ class CouchServerState(ServerState): TOKENS_USER_ID_KEY = "user_id" def __init__(self, couch_url, create_cmd=None, - check_schema_versions=True): + check_schema_versions=False): """ Initialize the couch server state. diff --git a/server/src/leap/soledad/server/__init__.py b/server/src/leap/soledad/server/__init__.py index 2e1a453a..e4fa4aa7 100644 --- a/server/src/leap/soledad/server/__init__.py +++ b/server/src/leap/soledad/server/__init__.py @@ -267,7 +267,8 @@ def _load_config(): def _get_couch_state(): conf = _load_config() - state = CouchServerState(conf['couch_url'], create_cmd=conf['create_cmd']) + state = CouchServerState(conf['couch_url'], create_cmd=conf['create_cmd'], + check_schema_versions=True) SoledadBackend.BATCH_SUPPORT = conf.get('batching', False) return state diff --git a/testing/tests/couch/test_state.py b/testing/tests/couch/test_state.py index a53ba076..e293b5b8 100644 --- a/testing/tests/couch/test_state.py +++ b/testing/tests/couch/test_state.py @@ -14,10 +14,12 @@ def test_wrong_couch_version_raises(db): db.database.create( {'_id': CONFIG_DOC_ID, SCHEMA_VERSION_KEY: wrong_schema_version}) with pytest.raises(WrongCouchSchemaVersionError): - CouchServerState(db.couch_url, create_cmd='/bin/echo') + CouchServerState(db.couch_url, create_cmd='/bin/echo', + check_schema_versions=True) def test_missing_config_doc_raises(db): db.database.create({}) with pytest.raises(MissingCouchConfigDocumentError): - CouchServerState(db.couch_url, create_cmd='/bin/echo') + CouchServerState(db.couch_url, create_cmd='/bin/echo', + check_schema_versions=True) -- cgit v1.2.3 From ee4dc679c8ae1a87a9b5ef3b2757a3113218e4c6 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Thu, 6 Oct 2016 18:55:20 -0300 Subject: [docs] explain CouchServerState parameters create_cmd lacked an explanation and check_schema_versions lacked reasoning on why it defaults to False. --- common/src/leap/soledad/common/couch/state.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/common/src/leap/soledad/common/couch/state.py b/common/src/leap/soledad/common/couch/state.py index 70c5fa36..523ac0b0 100644 --- a/common/src/leap/soledad/common/couch/state.py +++ b/common/src/leap/soledad/common/couch/state.py @@ -72,8 +72,14 @@ class CouchServerState(ServerState): :param couch_url: The URL for the couch database. :type couch_url: str + :param create_cmd: Command to be executed for user db creation. It will + receive a properly sanitized parameter with user db + name and should access CouchDB with necessary + privileges, which server lacks for security reasons. + :type create_cmd: str :param check_schema_versions: Whether to check couch schema version of - user dbs. + user dbs. Set to False as this is only + intended to run once during start-up. :type check_schema_versions: bool """ self.couch_url = couch_url -- cgit v1.2.3 From e121a92161d3a18cebc8796d43b98c05b6916088 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Thu, 6 Oct 2016 18:56:25 -0300 Subject: [feature] adds libsqlite3-dev on docker image This is necessary for keymanager and this image is shared, thus adding here with a comment explaining why. Also explained why using jessie-backports. --- scripts/docker/Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 26d5f782..21764d84 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -1,4 +1,5 @@ # start with a fresh debian image +# we use backports because of libsqlcipher-dev FROM debian:jessie-backports RUN apt-get update @@ -12,6 +13,8 @@ RUN apt-get -y install libssl-dev RUN apt-get -y install libffi-dev # needed to build pysqlcipher RUN apt-get -y install libsqlcipher-dev +# needed to support keymanager +RUN apt-get -y install libsqlite3-dev # install pip and tox RUN apt-get -y install python-pip -- cgit v1.2.3 From a3836cd316c31a7256b2e110776e93c21cc772cc Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 14 Oct 2016 04:06:14 -0300 Subject: [tests] fix server import When importing server, couch_state will load itself against couch_db url configured on server. This fails when running on Docker as couchdb is in another node. --- server/src/leap/soledad/server/__init__.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/server/src/leap/soledad/server/__init__.py b/server/src/leap/soledad/server/__init__.py index e4fa4aa7..d154e3fe 100644 --- a/server/src/leap/soledad/server/__init__.py +++ b/server/src/leap/soledad/server/__init__.py @@ -272,12 +272,14 @@ def _get_couch_state(): SoledadBackend.BATCH_SUPPORT = conf.get('batching', False) return state +try: + _couch_state = _get_couch_state() + # a WSGI application that may be used by `twistd -web` + application = GzipMiddleware( + SoledadTokenAuthMiddleware(SoledadApp(_couch_state))) +except: + pass -_couch_state = _get_couch_state() - -# a WSGI application that may be used by `twistd -web` -application = GzipMiddleware( - SoledadTokenAuthMiddleware(SoledadApp(_couch_state))) # another WSGI application in which we bypass token auth middleware for ease of # mind while debugging in your local environment -- cgit v1.2.3 From d40023b42053af0971e90ec33dbf9339c2e5f834 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Fri, 14 Oct 2016 04:39:31 -0300 Subject: [tests] cache tox folder This should avoid tox virtualenv recreation. --- .gitlab-ci.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7419cdaa..dd4e4605 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -3,6 +3,11 @@ stages: - tests - benchmark +# Cache tox envs between builds +cache: + paths: + - testing/.tox/ + code-check: stage: code-check script: -- cgit v1.2.3 From 18b4cb0aa61a4f935362cf268afc543280461dda Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 30 Sep 2016 09:20:21 -0300 Subject: [pkg] use correct folder name for migrate script --- scripts/migration/0.8.2/README.md | 73 -------- scripts/migration/0.8.2/log/.empty | 0 scripts/migration/0.8.2/migrate.py | 117 ------------- .../0.8.2/migrate_couch_schema/__init__.py | 192 --------------------- scripts/migration/0.8.2/setup.py | 8 - scripts/migration/0.8.2/tests/conftest.py | 54 ------ scripts/migration/0.8.2/tests/test_migrate.py | 67 ------- scripts/migration/0.8.2/tox.ini | 13 -- scripts/migration/0.9.0/.gitignore | 1 + scripts/migration/0.9.0/README.md | 73 ++++++++ scripts/migration/0.9.0/log/.empty | 0 scripts/migration/0.9.0/migrate.py | 117 +++++++++++++ .../0.9.0/migrate_couch_schema/__init__.py | 192 +++++++++++++++++++++ scripts/migration/0.9.0/requirements.pip | 3 + scripts/migration/0.9.0/setup.py | 8 + scripts/migration/0.9.0/tests/conftest.py | 54 ++++++ scripts/migration/0.9.0/tests/test_migrate.py | 67 +++++++ scripts/migration/0.9.0/tox.ini | 13 ++ 18 files changed, 528 insertions(+), 524 deletions(-) delete mode 100644 scripts/migration/0.8.2/README.md delete mode 100644 scripts/migration/0.8.2/log/.empty delete mode 100755 scripts/migration/0.8.2/migrate.py delete mode 100644 scripts/migration/0.8.2/migrate_couch_schema/__init__.py delete mode 100644 scripts/migration/0.8.2/setup.py delete mode 100644 scripts/migration/0.8.2/tests/conftest.py delete mode 100644 scripts/migration/0.8.2/tests/test_migrate.py delete mode 100644 scripts/migration/0.8.2/tox.ini create mode 100644 scripts/migration/0.9.0/.gitignore create mode 100644 scripts/migration/0.9.0/README.md create mode 100644 scripts/migration/0.9.0/log/.empty create mode 100755 scripts/migration/0.9.0/migrate.py create mode 100644 scripts/migration/0.9.0/migrate_couch_schema/__init__.py create mode 100644 scripts/migration/0.9.0/requirements.pip create mode 100644 scripts/migration/0.9.0/setup.py create mode 100644 scripts/migration/0.9.0/tests/conftest.py create mode 100644 scripts/migration/0.9.0/tests/test_migrate.py create mode 100644 scripts/migration/0.9.0/tox.ini diff --git a/scripts/migration/0.8.2/README.md b/scripts/migration/0.8.2/README.md deleted file mode 100644 index 919a5235..00000000 --- a/scripts/migration/0.8.2/README.md +++ /dev/null @@ -1,73 +0,0 @@ -CouchDB schema migration to Soledad 0.8.2 -========================================= - -Migrate couch database schema from <= 0.8.1 version to 0.8.2 version. - - -ATTENTION! ----------- - - - This script does not backup your data for you. Make sure you have a backup - copy of your databases before running this script! - - - Make sure you turn off any service that might be writing to the couch - database before running this script. - - -Usage ------ - -To see what the script would do, run: - - ./migrate.py - -To actually run the migration, add the --do-migrate command line option: - - ./migrate.py --do-migrate - -See command line options: - - ./migrate.py --help - - -Log ---- - -If you don't pass a --log-file command line option, a log will be written to -the `log/` folder. - - -Differences between old and new couch schema --------------------------------------------- - -The differences between old and new schemas are: - - - Transaction metadata was previously stored inside each document, and we - used design doc view/list functions to retrieve that information. Now, - transaction metadata is stored in documents with special ids - (gen-0000000001 to gen-9999999999). - - - Database replica config metadata was stored in a document called - "u1db_config", and now we store it in the "_local/config" document. - - - Sync metadata was previously stored in documents with id - "u1db_sync_", and now are stored in - "_local/sync_". - - - The new schema doesn't make use of any design documents. - - -What does this script do ------------------------- - -- List all databases starting with "user-". -- For each one, do: - - Check if it contains the old "u1db_config" document. - - If it doesn't, skip this db. - - Get the transaction log using the usual design doc view/list functions. - - Write a new "gen-X" document for each line on the transaction log. - - Get the "u1db_config" document, create a new one in "_local/config", - Delete the old one. - - List all "u1db_sync_X" documents, create new ones in "_local/sync_X", - delete the old ones. - - Delete unused design documents. diff --git a/scripts/migration/0.8.2/log/.empty b/scripts/migration/0.8.2/log/.empty deleted file mode 100644 index e69de29b..00000000 diff --git a/scripts/migration/0.8.2/migrate.py b/scripts/migration/0.8.2/migrate.py deleted file mode 100755 index 6ad5bc2d..00000000 --- a/scripts/migration/0.8.2/migrate.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python -# migrate.py - -""" -Migrate CouchDB schema to Soledad 0.8.2 schema. - -****************************************************************************** - ATTENTION! - - - This script does not backup your data for you. Make sure you have a backup - copy of your databases before running this script! - - - Make sure you turn off any service that might be writing to the couch - database before running this script. - -****************************************************************************** - -Run this script with the --help option to see command line options. - -See the README.md file for more information. -""" - -import datetime -import logging -import netrc -import os - -from argparse import ArgumentParser - -from leap.soledad.server import load_configuration - -from migrate_couch_schema import migrate - - -TARGET_VERSION = '0.8.2' -DEFAULT_COUCH_URL = 'http://127.0.0.1:5984' -CONF = load_configuration('/etc/soledad/soledad-server.conf') -NETRC_PATH = CONF['soledad-server']['admin_netrc'] - - -# -# command line args and execution -# - -def _configure_logger(log_file, level=logging.INFO): - if not log_file: - fname, _ = os.path.basename(__file__).split('.') - timestr = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') - filename = 'soledad_%s_%s_%s.log' \ - % (TARGET_VERSION, fname, timestr) - dirname = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'log') - log_file = os.path.join(dirname, filename) - logging.basicConfig( - filename=log_file, - filemode='a', - format='%(asctime)s,%(msecs)d %(levelname)s %(message)s', - datefmt='%H:%M:%S', - level=level) - - -def _default_couch_url(): - if not os.path.exists(NETRC_PATH): - return DEFAULT_COUCH_URL - parsed_netrc = netrc.netrc(NETRC_PATH) - host, (login, _, password) = parsed_netrc.hosts.items()[0] - url = ('http://%(login)s:%(password)s@%(host)s:5984' % { - 'login': login, - 'password': password, - 'host': host}) - return url - - -def _parse_args(): - parser = ArgumentParser() - parser.add_argument( - '--couch_url', - help='the url for the couch database', - default=_default_couch_url()) - parser.add_argument( - '--do-migrate', - help='actually perform the migration (otherwise ' - 'just print what would be done)', - action='store_true') - parser.add_argument( - '--log-file', - help='the log file to use') - parser.add_argument( - '--pdb', action='store_true', - help='escape to pdb shell in case of exception') - parser.add_argument( - '--verbose', action='store_true', - help='output detailed information about the migration ' - '(i.e. include debug messages)') - return parser.parse_args() - - -def _enable_pdb(): - import sys - from IPython.core import ultratb - sys.excepthook = ultratb.FormattedTB( - mode='Verbose', color_scheme='Linux', call_pdb=1) - - -if __name__ == '__main__': - args = _parse_args() - if args.pdb: - _enable_pdb() - _configure_logger( - args.log_file, - level=logging.DEBUG if args.verbose else logging.INFO) - logger = logging.getLogger(__name__) - try: - migrate(args, TARGET_VERSION) - except: - logger.exception('Fatal error on migrate script!') - raise diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py deleted file mode 100644 index f0b456e4..00000000 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ /dev/null @@ -1,192 +0,0 @@ -# __init__.py -""" -Support functions for migration script. -""" - -import logging - -from couchdb import Server -from couchdb import ResourceNotFound -from couchdb import ResourceConflict - -from leap.soledad.common.couch import GENERATION_KEY -from leap.soledad.common.couch import TRANSACTION_ID_KEY -from leap.soledad.common.couch import REPLICA_UID_KEY -from leap.soledad.common.couch import DOC_ID_KEY -from leap.soledad.common.couch import SCHEMA_VERSION_KEY -from leap.soledad.common.couch import CONFIG_DOC_ID -from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX -from leap.soledad.common.couch import SCHEMA_VERSION - - -logger = logging.getLogger(__name__) - - -# -# support functions -# - -def _get_couch_server(couch_url): - return Server(couch_url) - - -def _is_migrateable(db): - config_doc = db.get('u1db_config') - return bool(config_doc) - - -def _get_transaction_log(db): - ddoc_path = ['_design', 'transactions', '_view', 'log'] - resource = db.resource(*ddoc_path) - try: - _, _, data = resource.get_json() - except ResourceNotFound: - logger.warning( - '[%s] missing transactions design document, ' - 'can\'t get transaction log.' % db.name) - return [] - rows = data['rows'] - transaction_log = [] - gen = 1 - for row in rows: - transaction_log.append((gen, row['id'], row['value'])) - gen += 1 - return transaction_log - - -def _get_user_dbs(server): - user_dbs = filter(lambda dbname: dbname.startswith('user-'), server) - return user_dbs - - -# -# migration main functions -# - -def migrate(args, target_version): - server = _get_couch_server(args.couch_url) - logger.info('starting couch schema migration to %s' % target_version) - if not args.do_migrate: - logger.warning('dry-run: no changes will be made to databases') - user_dbs = _get_user_dbs(server) - for dbname in user_dbs: - db = server[dbname] - if not _is_migrateable(db): - logger.warning("[%s] skipping not migrateable user db" % dbname) - continue - logger.info("[%s] starting migration of user db" % dbname) - try: - _migrate_user_db(db, args.do_migrate) - logger.info("[%s] finished migration of user db" % dbname) - except: - logger.exception('[%s] error migrating user db' % dbname) - logger.error('continuing with next database.') - logger.info('finished couch schema migration to %s' % target_version) - - -def _migrate_user_db(db, do_migrate): - _migrate_transaction_log(db, do_migrate) - _migrate_sync_docs(db, do_migrate) - _delete_design_docs(db, do_migrate) - _migrate_config_doc(db, do_migrate) - - -def _migrate_transaction_log(db, do_migrate): - transaction_log = _get_transaction_log(db) - for gen, doc_id, trans_id in transaction_log: - gen_doc_id = 'gen-%s' % str(gen).zfill(10) - doc = { - '_id': gen_doc_id, - GENERATION_KEY: gen, - DOC_ID_KEY: doc_id, - TRANSACTION_ID_KEY: trans_id, - } - logger.debug('[%s] creating gen doc: %s' % (db.name, gen_doc_id)) - if do_migrate: - try: - db.save(doc) - except ResourceConflict: - # this gen document already exists. if documents are the same, - # continue with migration. - existing_doc = db.get(gen_doc_id) - for key in [GENERATION_KEY, DOC_ID_KEY, TRANSACTION_ID_KEY]: - if existing_doc[key] != doc[key]: - raise - - -def _migrate_config_doc(db, do_migrate): - old_doc = db['u1db_config'] - new_doc = { - '_id': CONFIG_DOC_ID, - REPLICA_UID_KEY: old_doc[REPLICA_UID_KEY], - SCHEMA_VERSION_KEY: SCHEMA_VERSION, - } - logger.info("[%s] moving config doc: %s -> %s" - % (db.name, old_doc['_id'], new_doc['_id'])) - if do_migrate: - # the config doc must not exist, otherwise we would have skipped this - # database. - db.save(new_doc) - db.delete(old_doc) - - -def _migrate_sync_docs(db, do_migrate): - logger.info('[%s] moving sync docs' % db.name) - view = db.view( - '_all_docs', - startkey='u1db_sync', - endkey='u1db_synd', - include_docs='true') - for row in view.rows: - old_doc = row['doc'] - old_id = old_doc['_id'] - - # older schemas used different documents with ids starting with - # "u1db_sync" to store sync-related data: - # - # - u1db_sync_log: was used to store the whole sync log. - # - u1db_sync_state: was used to store the sync state. - # - # if any of these documents exist in the current db, they are leftover - # from previous migrations, and should just be removed. - if old_id in ['u1db_sync_log', 'u1db_sync_state']: - logger.info('[%s] removing leftover document: %s' - % (db.name, old_id)) - if do_migrate: - db.delete(old_doc) - continue - - replica_uid = old_id.replace('u1db_sync_', '') - new_id = "%s%s" % (SYNC_DOC_ID_PREFIX, replica_uid) - new_doc = { - '_id': new_id, - GENERATION_KEY: old_doc['generation'], - TRANSACTION_ID_KEY: old_doc['transaction_id'], - REPLICA_UID_KEY: replica_uid, - } - logger.debug("[%s] moving sync doc: %s -> %s" - % (db.name, old_id, new_id)) - if do_migrate: - try: - db.save(new_doc) - except ResourceConflict: - # this sync document already exists. if documents are the same, - # continue with migration. - existing_doc = db.get(new_id) - for key in [GENERATION_KEY, TRANSACTION_ID_KEY, - REPLICA_UID_KEY]: - if existing_doc[key] != new_doc[key]: - raise - db.delete(old_doc) - - -def _delete_design_docs(db, do_migrate): - for ddoc in ['docs', 'syncs', 'transactions']: - doc_id = '_design/%s' % ddoc - doc = db.get(doc_id) - if doc: - logger.info("[%s] deleting design doc: %s" % (db.name, doc_id)) - if do_migrate: - db.delete(doc) - else: - logger.warning("[%s] design doc not found: %s" % (db.name, doc_id)) diff --git a/scripts/migration/0.8.2/setup.py b/scripts/migration/0.8.2/setup.py deleted file mode 100644 index 0467e932..00000000 --- a/scripts/migration/0.8.2/setup.py +++ /dev/null @@ -1,8 +0,0 @@ -from setuptools import setup -from setuptools import find_packages - - -setup( - name='migrate_couch_schema', - packages=find_packages('.'), -) diff --git a/scripts/migration/0.8.2/tests/conftest.py b/scripts/migration/0.8.2/tests/conftest.py deleted file mode 100644 index 61f6c7ee..00000000 --- a/scripts/migration/0.8.2/tests/conftest.py +++ /dev/null @@ -1,54 +0,0 @@ -# conftest.py - -""" -Provide a couch database with content stored in old schema. -""" - -import couchdb -import pytest -import uuid - - -COUCH_URL = 'http://127.0.0.1:5984' - -transaction_map = """ -function(doc) { - if (doc.u1db_transactions) - doc.u1db_transactions.forEach(function(t) { - emit(t[0], // use timestamp as key so the results are ordered - t[1]); // value is the transaction_id - }); -} -""" - -initial_docs = [ - {'_id': 'u1db_config', 'replica_uid': 'an-uid'}, - {'_id': 'u1db_sync_A', 'generation': 0, 'replica_uid': 'A', - 'transaction_id': ''}, - {'_id': 'u1db_sync_B', 'generation': 2, 'replica_uid': 'B', - 'transaction_id': 'X'}, - {'_id': 'doc1', 'u1db_transactions': [(1, 'trans-1'), (3, 'trans-3')]}, - {'_id': 'doc2', 'u1db_transactions': [(2, 'trans-2'), (4, 'trans-4')]}, - {'_id': '_design/docs'}, - {'_id': '_design/syncs'}, - {'_id': '_design/transactions', - 'views': {'log': {'map': transaction_map}}}, - # add some data from previous interrupted migration - {'_id': '_local/sync_A', 'gen': 0, 'trans_id': '', 'replica_uid': 'A'}, - {'_id': 'gen-0000000002', - 'gen': 2, 'trans_id': 'trans-2', 'doc_id': 'doc2'}, - # the following should be removed if found in the dbs - {'_id': 'u1db_sync_log'}, - {'_id': 'u1db_sync_state'}, -] - - -@pytest.fixture(scope='function') -def db(request): - server = couchdb.Server(COUCH_URL) - dbname = "user-" + uuid.uuid4().hex - db = server.create(dbname) - for doc in initial_docs: - db.save(doc) - request.addfinalizer(lambda: server.delete(dbname)) - return db diff --git a/scripts/migration/0.8.2/tests/test_migrate.py b/scripts/migration/0.8.2/tests/test_migrate.py deleted file mode 100644 index 10c8b906..00000000 --- a/scripts/migration/0.8.2/tests/test_migrate.py +++ /dev/null @@ -1,67 +0,0 @@ -# test_migrate.py - -""" -Ensure that the migration script works! -""" - -from migrate_couch_schema import _migrate_user_db - -from leap.soledad.common.couch import GENERATION_KEY -from leap.soledad.common.couch import TRANSACTION_ID_KEY -from leap.soledad.common.couch import REPLICA_UID_KEY -from leap.soledad.common.couch import DOC_ID_KEY -from leap.soledad.common.couch import SCHEMA_VERSION_KEY -from leap.soledad.common.couch import CONFIG_DOC_ID -from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX -from leap.soledad.common.couch import SCHEMA_VERSION - - -def test__migrate_user_db(db): - _migrate_user_db(db, True) - - # we should find exactly 6 documents: 2 normal documents and 4 generation - # documents - view = db.view('_all_docs') - assert len(view.rows) == 6 - - # ensure that the ids of the documents we found on the database are correct - doc_ids = map(lambda doc: doc.id, view.rows) - assert 'doc1' in doc_ids - assert 'doc2' in doc_ids - assert 'gen-0000000001' in doc_ids - assert 'gen-0000000002' in doc_ids - assert 'gen-0000000003' in doc_ids - assert 'gen-0000000004' in doc_ids - - # assert config doc contents - config_doc = db.get(CONFIG_DOC_ID) - assert config_doc[REPLICA_UID_KEY] == 'an-uid' - assert config_doc[SCHEMA_VERSION_KEY] == SCHEMA_VERSION - - # assert sync docs contents - sync_doc_A = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'A')) - assert sync_doc_A[GENERATION_KEY] == 0 - assert sync_doc_A[REPLICA_UID_KEY] == 'A' - assert sync_doc_A[TRANSACTION_ID_KEY] == '' - sync_doc_B = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'B')) - assert sync_doc_B[GENERATION_KEY] == 2 - assert sync_doc_B[REPLICA_UID_KEY] == 'B' - assert sync_doc_B[TRANSACTION_ID_KEY] == 'X' - - # assert gen docs contents - gen_1 = db.get('gen-0000000001') - assert gen_1[DOC_ID_KEY] == 'doc1' - assert gen_1[GENERATION_KEY] == 1 - assert gen_1[TRANSACTION_ID_KEY] == 'trans-1' - gen_2 = db.get('gen-0000000002') - assert gen_2[DOC_ID_KEY] == 'doc2' - assert gen_2[GENERATION_KEY] == 2 - assert gen_2[TRANSACTION_ID_KEY] == 'trans-2' - gen_3 = db.get('gen-0000000003') - assert gen_3[DOC_ID_KEY] == 'doc1' - assert gen_3[GENERATION_KEY] == 3 - assert gen_3[TRANSACTION_ID_KEY] == 'trans-3' - gen_4 = db.get('gen-0000000004') - assert gen_4[DOC_ID_KEY] == 'doc2' - assert gen_4[GENERATION_KEY] == 4 - assert gen_4[TRANSACTION_ID_KEY] == 'trans-4' diff --git a/scripts/migration/0.8.2/tox.ini b/scripts/migration/0.8.2/tox.ini deleted file mode 100644 index 2bb6be4c..00000000 --- a/scripts/migration/0.8.2/tox.ini +++ /dev/null @@ -1,13 +0,0 @@ -[tox] -envlist = py27 - -[testenv] -commands = py.test {posargs} -changedir = tests -deps = - pytest - couchdb - pdbpp - -e../../../common -setenv = - TERM=xterm diff --git a/scripts/migration/0.9.0/.gitignore b/scripts/migration/0.9.0/.gitignore new file mode 100644 index 00000000..6115c109 --- /dev/null +++ b/scripts/migration/0.9.0/.gitignore @@ -0,0 +1 @@ +log/* diff --git a/scripts/migration/0.9.0/README.md b/scripts/migration/0.9.0/README.md new file mode 100644 index 00000000..919a5235 --- /dev/null +++ b/scripts/migration/0.9.0/README.md @@ -0,0 +1,73 @@ +CouchDB schema migration to Soledad 0.8.2 +========================================= + +Migrate couch database schema from <= 0.8.1 version to 0.8.2 version. + + +ATTENTION! +---------- + + - This script does not backup your data for you. Make sure you have a backup + copy of your databases before running this script! + + - Make sure you turn off any service that might be writing to the couch + database before running this script. + + +Usage +----- + +To see what the script would do, run: + + ./migrate.py + +To actually run the migration, add the --do-migrate command line option: + + ./migrate.py --do-migrate + +See command line options: + + ./migrate.py --help + + +Log +--- + +If you don't pass a --log-file command line option, a log will be written to +the `log/` folder. + + +Differences between old and new couch schema +-------------------------------------------- + +The differences between old and new schemas are: + + - Transaction metadata was previously stored inside each document, and we + used design doc view/list functions to retrieve that information. Now, + transaction metadata is stored in documents with special ids + (gen-0000000001 to gen-9999999999). + + - Database replica config metadata was stored in a document called + "u1db_config", and now we store it in the "_local/config" document. + + - Sync metadata was previously stored in documents with id + "u1db_sync_", and now are stored in + "_local/sync_". + + - The new schema doesn't make use of any design documents. + + +What does this script do +------------------------ + +- List all databases starting with "user-". +- For each one, do: + - Check if it contains the old "u1db_config" document. + - If it doesn't, skip this db. + - Get the transaction log using the usual design doc view/list functions. + - Write a new "gen-X" document for each line on the transaction log. + - Get the "u1db_config" document, create a new one in "_local/config", + Delete the old one. + - List all "u1db_sync_X" documents, create new ones in "_local/sync_X", + delete the old ones. + - Delete unused design documents. diff --git a/scripts/migration/0.9.0/log/.empty b/scripts/migration/0.9.0/log/.empty new file mode 100644 index 00000000..e69de29b diff --git a/scripts/migration/0.9.0/migrate.py b/scripts/migration/0.9.0/migrate.py new file mode 100755 index 00000000..6ad5bc2d --- /dev/null +++ b/scripts/migration/0.9.0/migrate.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# migrate.py + +""" +Migrate CouchDB schema to Soledad 0.8.2 schema. + +****************************************************************************** + ATTENTION! + + - This script does not backup your data for you. Make sure you have a backup + copy of your databases before running this script! + + - Make sure you turn off any service that might be writing to the couch + database before running this script. + +****************************************************************************** + +Run this script with the --help option to see command line options. + +See the README.md file for more information. +""" + +import datetime +import logging +import netrc +import os + +from argparse import ArgumentParser + +from leap.soledad.server import load_configuration + +from migrate_couch_schema import migrate + + +TARGET_VERSION = '0.8.2' +DEFAULT_COUCH_URL = 'http://127.0.0.1:5984' +CONF = load_configuration('/etc/soledad/soledad-server.conf') +NETRC_PATH = CONF['soledad-server']['admin_netrc'] + + +# +# command line args and execution +# + +def _configure_logger(log_file, level=logging.INFO): + if not log_file: + fname, _ = os.path.basename(__file__).split('.') + timestr = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + filename = 'soledad_%s_%s_%s.log' \ + % (TARGET_VERSION, fname, timestr) + dirname = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'log') + log_file = os.path.join(dirname, filename) + logging.basicConfig( + filename=log_file, + filemode='a', + format='%(asctime)s,%(msecs)d %(levelname)s %(message)s', + datefmt='%H:%M:%S', + level=level) + + +def _default_couch_url(): + if not os.path.exists(NETRC_PATH): + return DEFAULT_COUCH_URL + parsed_netrc = netrc.netrc(NETRC_PATH) + host, (login, _, password) = parsed_netrc.hosts.items()[0] + url = ('http://%(login)s:%(password)s@%(host)s:5984' % { + 'login': login, + 'password': password, + 'host': host}) + return url + + +def _parse_args(): + parser = ArgumentParser() + parser.add_argument( + '--couch_url', + help='the url for the couch database', + default=_default_couch_url()) + parser.add_argument( + '--do-migrate', + help='actually perform the migration (otherwise ' + 'just print what would be done)', + action='store_true') + parser.add_argument( + '--log-file', + help='the log file to use') + parser.add_argument( + '--pdb', action='store_true', + help='escape to pdb shell in case of exception') + parser.add_argument( + '--verbose', action='store_true', + help='output detailed information about the migration ' + '(i.e. include debug messages)') + return parser.parse_args() + + +def _enable_pdb(): + import sys + from IPython.core import ultratb + sys.excepthook = ultratb.FormattedTB( + mode='Verbose', color_scheme='Linux', call_pdb=1) + + +if __name__ == '__main__': + args = _parse_args() + if args.pdb: + _enable_pdb() + _configure_logger( + args.log_file, + level=logging.DEBUG if args.verbose else logging.INFO) + logger = logging.getLogger(__name__) + try: + migrate(args, TARGET_VERSION) + except: + logger.exception('Fatal error on migrate script!') + raise diff --git a/scripts/migration/0.9.0/migrate_couch_schema/__init__.py b/scripts/migration/0.9.0/migrate_couch_schema/__init__.py new file mode 100644 index 00000000..f0b456e4 --- /dev/null +++ b/scripts/migration/0.9.0/migrate_couch_schema/__init__.py @@ -0,0 +1,192 @@ +# __init__.py +""" +Support functions for migration script. +""" + +import logging + +from couchdb import Server +from couchdb import ResourceNotFound +from couchdb import ResourceConflict + +from leap.soledad.common.couch import GENERATION_KEY +from leap.soledad.common.couch import TRANSACTION_ID_KEY +from leap.soledad.common.couch import REPLICA_UID_KEY +from leap.soledad.common.couch import DOC_ID_KEY +from leap.soledad.common.couch import SCHEMA_VERSION_KEY +from leap.soledad.common.couch import CONFIG_DOC_ID +from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX +from leap.soledad.common.couch import SCHEMA_VERSION + + +logger = logging.getLogger(__name__) + + +# +# support functions +# + +def _get_couch_server(couch_url): + return Server(couch_url) + + +def _is_migrateable(db): + config_doc = db.get('u1db_config') + return bool(config_doc) + + +def _get_transaction_log(db): + ddoc_path = ['_design', 'transactions', '_view', 'log'] + resource = db.resource(*ddoc_path) + try: + _, _, data = resource.get_json() + except ResourceNotFound: + logger.warning( + '[%s] missing transactions design document, ' + 'can\'t get transaction log.' % db.name) + return [] + rows = data['rows'] + transaction_log = [] + gen = 1 + for row in rows: + transaction_log.append((gen, row['id'], row['value'])) + gen += 1 + return transaction_log + + +def _get_user_dbs(server): + user_dbs = filter(lambda dbname: dbname.startswith('user-'), server) + return user_dbs + + +# +# migration main functions +# + +def migrate(args, target_version): + server = _get_couch_server(args.couch_url) + logger.info('starting couch schema migration to %s' % target_version) + if not args.do_migrate: + logger.warning('dry-run: no changes will be made to databases') + user_dbs = _get_user_dbs(server) + for dbname in user_dbs: + db = server[dbname] + if not _is_migrateable(db): + logger.warning("[%s] skipping not migrateable user db" % dbname) + continue + logger.info("[%s] starting migration of user db" % dbname) + try: + _migrate_user_db(db, args.do_migrate) + logger.info("[%s] finished migration of user db" % dbname) + except: + logger.exception('[%s] error migrating user db' % dbname) + logger.error('continuing with next database.') + logger.info('finished couch schema migration to %s' % target_version) + + +def _migrate_user_db(db, do_migrate): + _migrate_transaction_log(db, do_migrate) + _migrate_sync_docs(db, do_migrate) + _delete_design_docs(db, do_migrate) + _migrate_config_doc(db, do_migrate) + + +def _migrate_transaction_log(db, do_migrate): + transaction_log = _get_transaction_log(db) + for gen, doc_id, trans_id in transaction_log: + gen_doc_id = 'gen-%s' % str(gen).zfill(10) + doc = { + '_id': gen_doc_id, + GENERATION_KEY: gen, + DOC_ID_KEY: doc_id, + TRANSACTION_ID_KEY: trans_id, + } + logger.debug('[%s] creating gen doc: %s' % (db.name, gen_doc_id)) + if do_migrate: + try: + db.save(doc) + except ResourceConflict: + # this gen document already exists. if documents are the same, + # continue with migration. + existing_doc = db.get(gen_doc_id) + for key in [GENERATION_KEY, DOC_ID_KEY, TRANSACTION_ID_KEY]: + if existing_doc[key] != doc[key]: + raise + + +def _migrate_config_doc(db, do_migrate): + old_doc = db['u1db_config'] + new_doc = { + '_id': CONFIG_DOC_ID, + REPLICA_UID_KEY: old_doc[REPLICA_UID_KEY], + SCHEMA_VERSION_KEY: SCHEMA_VERSION, + } + logger.info("[%s] moving config doc: %s -> %s" + % (db.name, old_doc['_id'], new_doc['_id'])) + if do_migrate: + # the config doc must not exist, otherwise we would have skipped this + # database. + db.save(new_doc) + db.delete(old_doc) + + +def _migrate_sync_docs(db, do_migrate): + logger.info('[%s] moving sync docs' % db.name) + view = db.view( + '_all_docs', + startkey='u1db_sync', + endkey='u1db_synd', + include_docs='true') + for row in view.rows: + old_doc = row['doc'] + old_id = old_doc['_id'] + + # older schemas used different documents with ids starting with + # "u1db_sync" to store sync-related data: + # + # - u1db_sync_log: was used to store the whole sync log. + # - u1db_sync_state: was used to store the sync state. + # + # if any of these documents exist in the current db, they are leftover + # from previous migrations, and should just be removed. + if old_id in ['u1db_sync_log', 'u1db_sync_state']: + logger.info('[%s] removing leftover document: %s' + % (db.name, old_id)) + if do_migrate: + db.delete(old_doc) + continue + + replica_uid = old_id.replace('u1db_sync_', '') + new_id = "%s%s" % (SYNC_DOC_ID_PREFIX, replica_uid) + new_doc = { + '_id': new_id, + GENERATION_KEY: old_doc['generation'], + TRANSACTION_ID_KEY: old_doc['transaction_id'], + REPLICA_UID_KEY: replica_uid, + } + logger.debug("[%s] moving sync doc: %s -> %s" + % (db.name, old_id, new_id)) + if do_migrate: + try: + db.save(new_doc) + except ResourceConflict: + # this sync document already exists. if documents are the same, + # continue with migration. + existing_doc = db.get(new_id) + for key in [GENERATION_KEY, TRANSACTION_ID_KEY, + REPLICA_UID_KEY]: + if existing_doc[key] != new_doc[key]: + raise + db.delete(old_doc) + + +def _delete_design_docs(db, do_migrate): + for ddoc in ['docs', 'syncs', 'transactions']: + doc_id = '_design/%s' % ddoc + doc = db.get(doc_id) + if doc: + logger.info("[%s] deleting design doc: %s" % (db.name, doc_id)) + if do_migrate: + db.delete(doc) + else: + logger.warning("[%s] design doc not found: %s" % (db.name, doc_id)) diff --git a/scripts/migration/0.9.0/requirements.pip b/scripts/migration/0.9.0/requirements.pip new file mode 100644 index 00000000..ea22a1a4 --- /dev/null +++ b/scripts/migration/0.9.0/requirements.pip @@ -0,0 +1,3 @@ +couchdb +leap.soledad.common==0.9.0 +leap.soledad.server==0.9.0 diff --git a/scripts/migration/0.9.0/setup.py b/scripts/migration/0.9.0/setup.py new file mode 100644 index 00000000..0467e932 --- /dev/null +++ b/scripts/migration/0.9.0/setup.py @@ -0,0 +1,8 @@ +from setuptools import setup +from setuptools import find_packages + + +setup( + name='migrate_couch_schema', + packages=find_packages('.'), +) diff --git a/scripts/migration/0.9.0/tests/conftest.py b/scripts/migration/0.9.0/tests/conftest.py new file mode 100644 index 00000000..61f6c7ee --- /dev/null +++ b/scripts/migration/0.9.0/tests/conftest.py @@ -0,0 +1,54 @@ +# conftest.py + +""" +Provide a couch database with content stored in old schema. +""" + +import couchdb +import pytest +import uuid + + +COUCH_URL = 'http://127.0.0.1:5984' + +transaction_map = """ +function(doc) { + if (doc.u1db_transactions) + doc.u1db_transactions.forEach(function(t) { + emit(t[0], // use timestamp as key so the results are ordered + t[1]); // value is the transaction_id + }); +} +""" + +initial_docs = [ + {'_id': 'u1db_config', 'replica_uid': 'an-uid'}, + {'_id': 'u1db_sync_A', 'generation': 0, 'replica_uid': 'A', + 'transaction_id': ''}, + {'_id': 'u1db_sync_B', 'generation': 2, 'replica_uid': 'B', + 'transaction_id': 'X'}, + {'_id': 'doc1', 'u1db_transactions': [(1, 'trans-1'), (3, 'trans-3')]}, + {'_id': 'doc2', 'u1db_transactions': [(2, 'trans-2'), (4, 'trans-4')]}, + {'_id': '_design/docs'}, + {'_id': '_design/syncs'}, + {'_id': '_design/transactions', + 'views': {'log': {'map': transaction_map}}}, + # add some data from previous interrupted migration + {'_id': '_local/sync_A', 'gen': 0, 'trans_id': '', 'replica_uid': 'A'}, + {'_id': 'gen-0000000002', + 'gen': 2, 'trans_id': 'trans-2', 'doc_id': 'doc2'}, + # the following should be removed if found in the dbs + {'_id': 'u1db_sync_log'}, + {'_id': 'u1db_sync_state'}, +] + + +@pytest.fixture(scope='function') +def db(request): + server = couchdb.Server(COUCH_URL) + dbname = "user-" + uuid.uuid4().hex + db = server.create(dbname) + for doc in initial_docs: + db.save(doc) + request.addfinalizer(lambda: server.delete(dbname)) + return db diff --git a/scripts/migration/0.9.0/tests/test_migrate.py b/scripts/migration/0.9.0/tests/test_migrate.py new file mode 100644 index 00000000..10c8b906 --- /dev/null +++ b/scripts/migration/0.9.0/tests/test_migrate.py @@ -0,0 +1,67 @@ +# test_migrate.py + +""" +Ensure that the migration script works! +""" + +from migrate_couch_schema import _migrate_user_db + +from leap.soledad.common.couch import GENERATION_KEY +from leap.soledad.common.couch import TRANSACTION_ID_KEY +from leap.soledad.common.couch import REPLICA_UID_KEY +from leap.soledad.common.couch import DOC_ID_KEY +from leap.soledad.common.couch import SCHEMA_VERSION_KEY +from leap.soledad.common.couch import CONFIG_DOC_ID +from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX +from leap.soledad.common.couch import SCHEMA_VERSION + + +def test__migrate_user_db(db): + _migrate_user_db(db, True) + + # we should find exactly 6 documents: 2 normal documents and 4 generation + # documents + view = db.view('_all_docs') + assert len(view.rows) == 6 + + # ensure that the ids of the documents we found on the database are correct + doc_ids = map(lambda doc: doc.id, view.rows) + assert 'doc1' in doc_ids + assert 'doc2' in doc_ids + assert 'gen-0000000001' in doc_ids + assert 'gen-0000000002' in doc_ids + assert 'gen-0000000003' in doc_ids + assert 'gen-0000000004' in doc_ids + + # assert config doc contents + config_doc = db.get(CONFIG_DOC_ID) + assert config_doc[REPLICA_UID_KEY] == 'an-uid' + assert config_doc[SCHEMA_VERSION_KEY] == SCHEMA_VERSION + + # assert sync docs contents + sync_doc_A = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'A')) + assert sync_doc_A[GENERATION_KEY] == 0 + assert sync_doc_A[REPLICA_UID_KEY] == 'A' + assert sync_doc_A[TRANSACTION_ID_KEY] == '' + sync_doc_B = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'B')) + assert sync_doc_B[GENERATION_KEY] == 2 + assert sync_doc_B[REPLICA_UID_KEY] == 'B' + assert sync_doc_B[TRANSACTION_ID_KEY] == 'X' + + # assert gen docs contents + gen_1 = db.get('gen-0000000001') + assert gen_1[DOC_ID_KEY] == 'doc1' + assert gen_1[GENERATION_KEY] == 1 + assert gen_1[TRANSACTION_ID_KEY] == 'trans-1' + gen_2 = db.get('gen-0000000002') + assert gen_2[DOC_ID_KEY] == 'doc2' + assert gen_2[GENERATION_KEY] == 2 + assert gen_2[TRANSACTION_ID_KEY] == 'trans-2' + gen_3 = db.get('gen-0000000003') + assert gen_3[DOC_ID_KEY] == 'doc1' + assert gen_3[GENERATION_KEY] == 3 + assert gen_3[TRANSACTION_ID_KEY] == 'trans-3' + gen_4 = db.get('gen-0000000004') + assert gen_4[DOC_ID_KEY] == 'doc2' + assert gen_4[GENERATION_KEY] == 4 + assert gen_4[TRANSACTION_ID_KEY] == 'trans-4' diff --git a/scripts/migration/0.9.0/tox.ini b/scripts/migration/0.9.0/tox.ini new file mode 100644 index 00000000..2bb6be4c --- /dev/null +++ b/scripts/migration/0.9.0/tox.ini @@ -0,0 +1,13 @@ +[tox] +envlist = py27 + +[testenv] +commands = py.test {posargs} +changedir = tests +deps = + pytest + couchdb + pdbpp + -e../../../common +setenv = + TERM=xterm -- cgit v1.2.3 From b3f62da1bc79f42f1a1bc15ef3164b2569712984 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 30 Sep 2016 09:19:26 -0300 Subject: [pkg] update changelog to 0.9.0 --- CHANGELOG.rst | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 24c20641..ded2cac9 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,41 @@ +0.9.0 - 11 November, 2016 ++++++++++++++++++++++++++ + +Main features +~~~~~~~~~~~~~ + +- Server-side changes in couch backend schema. +- Use of tox and pytest to run tests. +- Performance tests. + +Server +====== + +*** Attention: Migration needed! *** + +This version of soledad uses a different database schema in the server couch +backend. The difference from the old schema is that the use of design documents +for storing and accessing soledad db metadata was removed because incurred in +too much memory and time overhead for passing data to the javascript +interpreter. + +Because of that, you need to run a migration script on your database. Check the +`scripts/migration/0.9.0/` diretctory for instructions on how to run the +migration script on your database. Don't forget to backup before running the +script! + +Bugfixes +~~~~~~~~ +- Fix order of multipart serialization when writing to couch. + +Features +~~~~~~~~ +- Log to syslog. +- Remove usage of design documents in couch backend. +- Use _local couch docs for metadata storage. +- Other small improvements in couch backend. + + 0.8.1 - 14 July, 2016 +++++++++++++++++++++ -- cgit v1.2.3 From 0fd7e9f018b02161a844c11332ffced56b256010 Mon Sep 17 00:00:00 2001 From: drebs Date: Wed, 9 Nov 2016 11:20:12 -0200 Subject: [pkg] update leap requirements files --- client/pkg/requirements-leap.pip | 2 +- server/pkg/requirements-leap.pip | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/client/pkg/requirements-leap.pip b/client/pkg/requirements-leap.pip index 52d1263b..920d4123 100644 --- a/client/pkg/requirements-leap.pip +++ b/client/pkg/requirements-leap.pip @@ -1,2 +1,2 @@ leap.common>=0.4.3 -leap.soledad.common>=0.7.0 +leap.soledad.common>=0.9.0 diff --git a/server/pkg/requirements-leap.pip b/server/pkg/requirements-leap.pip index aaad340c..93b447e5 100644 --- a/server/pkg/requirements-leap.pip +++ b/server/pkg/requirements-leap.pip @@ -1 +1 @@ -leap.soledad.common>=0.6.5 +leap.soledad.common>=0.9.0 -- cgit v1.2.3